1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 A FORE Systems 200E-series driver for ATM on Linux. 4 Christophe Lizzi (lizzi@cnam.fr), October 1999-March 2003. 5 6 Based on the PCA-200E driver from Uwe Dannowski (Uwe.Dannowski@inf.tu-dresden.de). 7 8 This driver simultaneously supports PCA-200E and SBA-200E adapters 9 on i386, alpha (untested), powerpc, sparc and sparc64 architectures. 10 11 */ 12 13 14 #include <linux/kernel.h> 15 #include <linux/slab.h> 16 #include <linux/init.h> 17 #include <linux/capability.h> 18 #include <linux/interrupt.h> 19 #include <linux/bitops.h> 20 #include <linux/pci.h> 21 #include <linux/module.h> 22 #include <linux/atmdev.h> 23 #include <linux/sonet.h> 24 #include <linux/dma-mapping.h> 25 #include <linux/delay.h> 26 #include <linux/firmware.h> 27 #include <linux/pgtable.h> 28 #include <asm/io.h> 29 #include <asm/string.h> 30 #include <asm/page.h> 31 #include <asm/irq.h> 32 #include <asm/dma.h> 33 #include <asm/byteorder.h> 34 #include <linux/uaccess.h> 35 #include <linux/atomic.h> 36 37 #ifdef CONFIG_SBUS 38 #include <linux/of.h> 39 #include <linux/platform_device.h> 40 #include <asm/idprom.h> 41 #include <asm/openprom.h> 42 #include <asm/oplib.h> 43 #endif 44 45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */ 46 #define FORE200E_USE_TASKLET 47 #endif 48 49 #if 0 /* enable the debugging code of the buffer supply queues */ 50 #define FORE200E_BSQ_DEBUG 51 #endif 52 53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */ 54 #define FORE200E_52BYTE_AAL0_SDU 55 #endif 56 57 #include "fore200e.h" 58 #include "suni.h" 59 60 #define FORE200E_VERSION "0.3e" 61 62 #define FORE200E "fore200e: " 63 64 #if 0 /* override .config */ 65 #define CONFIG_ATM_FORE200E_DEBUG 1 66 #endif 67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 68 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \ 69 printk(FORE200E format, ##args); } while (0) 70 #else 71 #define DPRINTK(level, format, args...) do {} while (0) 72 #endif 73 74 75 #define FORE200E_ALIGN(addr, alignment) \ 76 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr)) 77 78 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type)) 79 80 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ]) 81 82 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo)) 83 84 #if 1 85 #define ASSERT(expr) if (!(expr)) { \ 86 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 87 __func__, __LINE__, #expr); \ 88 panic(FORE200E "%s", __func__); \ 89 } 90 #else 91 #define ASSERT(expr) do {} while (0) 92 #endif 93 94 95 static const struct atmdev_ops fore200e_ops; 96 97 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen"); 98 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION); 99 100 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 101 { BUFFER_S1_NBR, BUFFER_L1_NBR }, 102 { BUFFER_S2_NBR, BUFFER_L2_NBR } 103 }; 104 105 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = { 106 { BUFFER_S1_SIZE, BUFFER_L1_SIZE }, 107 { BUFFER_S2_SIZE, BUFFER_L2_SIZE } 108 }; 109 110 111 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0) 112 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" }; 113 #endif 114 115 116 #if 0 /* currently unused */ 117 static int 118 fore200e_fore2atm_aal(enum fore200e_aal aal) 119 { 120 switch(aal) { 121 case FORE200E_AAL0: return ATM_AAL0; 122 case FORE200E_AAL34: return ATM_AAL34; 123 case FORE200E_AAL5: return ATM_AAL5; 124 } 125 126 return -EINVAL; 127 } 128 #endif 129 130 131 static enum fore200e_aal 132 fore200e_atm2fore_aal(int aal) 133 { 134 switch(aal) { 135 case ATM_AAL0: return FORE200E_AAL0; 136 case ATM_AAL34: return FORE200E_AAL34; 137 case ATM_AAL1: 138 case ATM_AAL2: 139 case ATM_AAL5: return FORE200E_AAL5; 140 } 141 142 return -EINVAL; 143 } 144 145 146 static char* 147 fore200e_irq_itoa(int irq) 148 { 149 static char str[8]; 150 sprintf(str, "%d", irq); 151 return str; 152 } 153 154 155 /* allocate and align a chunk of memory intended to hold the data behing exchanged 156 between the driver and the adapter (using streaming DVMA) */ 157 158 static int 159 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction) 160 { 161 unsigned long offset = 0; 162 163 if (alignment <= sizeof(int)) 164 alignment = 0; 165 166 chunk->alloc_size = size + alignment; 167 chunk->direction = direction; 168 169 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL); 170 if (chunk->alloc_addr == NULL) 171 return -ENOMEM; 172 173 if (alignment > 0) 174 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment); 175 176 chunk->align_addr = chunk->alloc_addr + offset; 177 178 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr, 179 size, direction); 180 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) { 181 kfree(chunk->alloc_addr); 182 return -ENOMEM; 183 } 184 return 0; 185 } 186 187 188 /* free a chunk of memory */ 189 190 static void 191 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 192 { 193 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size, 194 chunk->direction); 195 kfree(chunk->alloc_addr); 196 } 197 198 /* 199 * Allocate a DMA consistent chunk of memory intended to act as a communication 200 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver 201 * and the adapter. 202 */ 203 static int 204 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk, 205 int size, int nbr, int alignment) 206 { 207 /* returned chunks are page-aligned */ 208 chunk->alloc_size = size * nbr; 209 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size, 210 &chunk->dma_addr, GFP_KERNEL); 211 if (!chunk->alloc_addr) 212 return -ENOMEM; 213 chunk->align_addr = chunk->alloc_addr; 214 return 0; 215 } 216 217 /* 218 * Free a DMA consistent chunk of memory. 219 */ 220 static void 221 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk) 222 { 223 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr, 224 chunk->dma_addr); 225 } 226 227 static void 228 fore200e_spin(int msecs) 229 { 230 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 231 while (time_before(jiffies, timeout)); 232 } 233 234 235 static int 236 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs) 237 { 238 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 239 int ok; 240 241 mb(); 242 do { 243 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR)) 244 break; 245 246 } while (time_before(jiffies, timeout)); 247 248 #if 1 249 if (!ok) { 250 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n", 251 *addr, val); 252 } 253 #endif 254 255 return ok; 256 } 257 258 259 static int 260 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs) 261 { 262 unsigned long timeout = jiffies + msecs_to_jiffies(msecs); 263 int ok; 264 265 do { 266 if ((ok = (fore200e->bus->read(addr) == val))) 267 break; 268 269 } while (time_before(jiffies, timeout)); 270 271 #if 1 272 if (!ok) { 273 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n", 274 fore200e->bus->read(addr), val); 275 } 276 #endif 277 278 return ok; 279 } 280 281 282 static void 283 fore200e_free_rx_buf(struct fore200e* fore200e) 284 { 285 int scheme, magn, nbr; 286 struct buffer* buffer; 287 288 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 289 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 290 291 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) { 292 293 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) { 294 295 struct chunk* data = &buffer[ nbr ].data; 296 297 if (data->alloc_addr != NULL) 298 fore200e_chunk_free(fore200e, data); 299 } 300 } 301 } 302 } 303 } 304 305 306 static void 307 fore200e_uninit_bs_queue(struct fore200e* fore200e) 308 { 309 int scheme, magn; 310 311 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 312 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 313 314 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status; 315 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block; 316 317 if (status->alloc_addr) 318 fore200e_dma_chunk_free(fore200e, status); 319 320 if (rbd_block->alloc_addr) 321 fore200e_dma_chunk_free(fore200e, rbd_block); 322 } 323 } 324 } 325 326 327 static int 328 fore200e_reset(struct fore200e* fore200e, int diag) 329 { 330 int ok; 331 332 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET; 333 334 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat); 335 336 fore200e->bus->reset(fore200e); 337 338 if (diag) { 339 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000); 340 if (ok == 0) { 341 342 printk(FORE200E "device %s self-test failed\n", fore200e->name); 343 return -ENODEV; 344 } 345 346 printk(FORE200E "device %s self-test passed\n", fore200e->name); 347 348 fore200e->state = FORE200E_STATE_RESET; 349 } 350 351 return 0; 352 } 353 354 355 static void 356 fore200e_shutdown(struct fore200e* fore200e) 357 { 358 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n", 359 fore200e->name, fore200e->phys_base, 360 fore200e_irq_itoa(fore200e->irq)); 361 362 if (fore200e->state > FORE200E_STATE_RESET) { 363 /* first, reset the board to prevent further interrupts or data transfers */ 364 fore200e_reset(fore200e, 0); 365 } 366 367 /* then, release all allocated resources */ 368 switch(fore200e->state) { 369 370 case FORE200E_STATE_COMPLETE: 371 kfree(fore200e->stats); 372 373 fallthrough; 374 case FORE200E_STATE_IRQ: 375 free_irq(fore200e->irq, fore200e->atm_dev); 376 377 fallthrough; 378 case FORE200E_STATE_ALLOC_BUF: 379 fore200e_free_rx_buf(fore200e); 380 381 fallthrough; 382 case FORE200E_STATE_INIT_BSQ: 383 fore200e_uninit_bs_queue(fore200e); 384 385 fallthrough; 386 case FORE200E_STATE_INIT_RXQ: 387 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status); 388 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd); 389 390 fallthrough; 391 case FORE200E_STATE_INIT_TXQ: 392 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status); 393 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd); 394 395 fallthrough; 396 case FORE200E_STATE_INIT_CMDQ: 397 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status); 398 399 fallthrough; 400 case FORE200E_STATE_INITIALIZE: 401 /* nothing to do for that state */ 402 403 case FORE200E_STATE_START_FW: 404 /* nothing to do for that state */ 405 406 case FORE200E_STATE_RESET: 407 /* nothing to do for that state */ 408 409 case FORE200E_STATE_MAP: 410 fore200e->bus->unmap(fore200e); 411 412 fallthrough; 413 case FORE200E_STATE_CONFIGURE: 414 /* nothing to do for that state */ 415 416 case FORE200E_STATE_REGISTER: 417 /* XXX shouldn't we *start* by deregistering the device? */ 418 atm_dev_deregister(fore200e->atm_dev); 419 420 fallthrough; 421 case FORE200E_STATE_BLANK: 422 /* nothing to do for that state */ 423 break; 424 } 425 } 426 427 428 #ifdef CONFIG_PCI 429 430 static u32 fore200e_pca_read(volatile u32 __iomem *addr) 431 { 432 /* on big-endian hosts, the board is configured to convert 433 the endianess of slave RAM accesses */ 434 return le32_to_cpu(readl(addr)); 435 } 436 437 438 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr) 439 { 440 /* on big-endian hosts, the board is configured to convert 441 the endianess of slave RAM accesses */ 442 writel(cpu_to_le32(val), addr); 443 } 444 445 static int 446 fore200e_pca_irq_check(struct fore200e* fore200e) 447 { 448 /* this is a 1 bit register */ 449 int irq_posted = readl(fore200e->regs.pca.psr); 450 451 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2) 452 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) { 453 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number); 454 } 455 #endif 456 457 return irq_posted; 458 } 459 460 461 static void 462 fore200e_pca_irq_ack(struct fore200e* fore200e) 463 { 464 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr); 465 } 466 467 468 static void 469 fore200e_pca_reset(struct fore200e* fore200e) 470 { 471 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr); 472 fore200e_spin(10); 473 writel(0, fore200e->regs.pca.hcr); 474 } 475 476 477 static int fore200e_pca_map(struct fore200e* fore200e) 478 { 479 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name); 480 481 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH); 482 483 if (fore200e->virt_base == NULL) { 484 printk(FORE200E "can't map device %s\n", fore200e->name); 485 return -EFAULT; 486 } 487 488 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 489 490 /* gain access to the PCA specific registers */ 491 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET; 492 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET; 493 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET; 494 495 fore200e->state = FORE200E_STATE_MAP; 496 return 0; 497 } 498 499 500 static void 501 fore200e_pca_unmap(struct fore200e* fore200e) 502 { 503 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name); 504 505 if (fore200e->virt_base != NULL) 506 iounmap(fore200e->virt_base); 507 } 508 509 510 static int fore200e_pca_configure(struct fore200e *fore200e) 511 { 512 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); 513 u8 master_ctrl, latency; 514 515 DPRINTK(2, "device %s being configured\n", fore200e->name); 516 517 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) { 518 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n"); 519 return -EIO; 520 } 521 522 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl); 523 524 master_ctrl = master_ctrl 525 #if defined(__BIG_ENDIAN) 526 /* request the PCA board to convert the endianess of slave RAM accesses */ 527 | PCA200E_CTRL_CONVERT_ENDIAN 528 #endif 529 #if 0 530 | PCA200E_CTRL_DIS_CACHE_RD 531 | PCA200E_CTRL_DIS_WRT_INVAL 532 | PCA200E_CTRL_ENA_CONT_REQ_MODE 533 | PCA200E_CTRL_2_CACHE_WRT_INVAL 534 #endif 535 | PCA200E_CTRL_LARGE_PCI_BURSTS; 536 537 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl); 538 539 /* raise latency from 32 (default) to 192, as this seems to prevent NIC 540 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition. 541 this may impact the performances of other PCI devices on the same bus, though */ 542 latency = 192; 543 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency); 544 545 fore200e->state = FORE200E_STATE_CONFIGURE; 546 return 0; 547 } 548 549 550 static int __init 551 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom) 552 { 553 struct host_cmdq* cmdq = &fore200e->host_cmdq; 554 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 555 struct prom_opcode opcode; 556 int ok; 557 u32 prom_dma; 558 559 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 560 561 opcode.opcode = OPCODE_GET_PROM; 562 opcode.pad = 0; 563 564 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data), 565 DMA_FROM_DEVICE); 566 if (dma_mapping_error(fore200e->dev, prom_dma)) 567 return -ENOMEM; 568 569 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr); 570 571 *entry->status = STATUS_PENDING; 572 573 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode); 574 575 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 576 577 *entry->status = STATUS_FREE; 578 579 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE); 580 581 if (ok == 0) { 582 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name); 583 return -EIO; 584 } 585 586 #if defined(__BIG_ENDIAN) 587 588 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) )) 589 590 /* MAC address is stored as little-endian */ 591 swap_here(&prom->mac_addr[0]); 592 swap_here(&prom->mac_addr[4]); 593 #endif 594 595 return 0; 596 } 597 598 599 static int 600 fore200e_pca_proc_read(struct fore200e* fore200e, char *page) 601 { 602 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev); 603 604 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n", 605 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn)); 606 } 607 608 static const struct fore200e_bus fore200e_pci_ops = { 609 .model_name = "PCA-200E", 610 .proc_name = "pca200e", 611 .descr_alignment = 32, 612 .buffer_alignment = 4, 613 .status_alignment = 32, 614 .read = fore200e_pca_read, 615 .write = fore200e_pca_write, 616 .configure = fore200e_pca_configure, 617 .map = fore200e_pca_map, 618 .reset = fore200e_pca_reset, 619 .prom_read = fore200e_pca_prom_read, 620 .unmap = fore200e_pca_unmap, 621 .irq_check = fore200e_pca_irq_check, 622 .irq_ack = fore200e_pca_irq_ack, 623 .proc_read = fore200e_pca_proc_read, 624 }; 625 #endif /* CONFIG_PCI */ 626 627 #ifdef CONFIG_SBUS 628 629 static u32 fore200e_sba_read(volatile u32 __iomem *addr) 630 { 631 return sbus_readl(addr); 632 } 633 634 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr) 635 { 636 sbus_writel(val, addr); 637 } 638 639 static void fore200e_sba_irq_enable(struct fore200e *fore200e) 640 { 641 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 642 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr); 643 } 644 645 static int fore200e_sba_irq_check(struct fore200e *fore200e) 646 { 647 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ; 648 } 649 650 static void fore200e_sba_irq_ack(struct fore200e *fore200e) 651 { 652 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY; 653 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr); 654 } 655 656 static void fore200e_sba_reset(struct fore200e *fore200e) 657 { 658 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr); 659 fore200e_spin(10); 660 fore200e->bus->write(0, fore200e->regs.sba.hcr); 661 } 662 663 static int __init fore200e_sba_map(struct fore200e *fore200e) 664 { 665 struct platform_device *op = to_platform_device(fore200e->dev); 666 unsigned int bursts; 667 668 /* gain access to the SBA specific registers */ 669 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR"); 670 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR"); 671 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR"); 672 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM"); 673 674 if (!fore200e->virt_base) { 675 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name); 676 return -EFAULT; 677 } 678 679 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base); 680 681 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */ 682 683 /* get the supported DVMA burst sizes */ 684 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00); 685 686 if (sbus_can_dma_64bit()) 687 sbus_set_sbus64(&op->dev, bursts); 688 689 fore200e->state = FORE200E_STATE_MAP; 690 return 0; 691 } 692 693 static void fore200e_sba_unmap(struct fore200e *fore200e) 694 { 695 struct platform_device *op = to_platform_device(fore200e->dev); 696 697 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH); 698 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH); 699 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH); 700 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH); 701 } 702 703 static int __init fore200e_sba_configure(struct fore200e *fore200e) 704 { 705 fore200e->state = FORE200E_STATE_CONFIGURE; 706 return 0; 707 } 708 709 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom) 710 { 711 struct platform_device *op = to_platform_device(fore200e->dev); 712 const u8 *prop; 713 int len; 714 715 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len); 716 if (!prop) 717 return -ENODEV; 718 memcpy(&prom->mac_addr[4], prop, 4); 719 720 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len); 721 if (!prop) 722 return -ENODEV; 723 memcpy(&prom->mac_addr[2], prop, 4); 724 725 prom->serial_number = of_getintprop_default(op->dev.of_node, 726 "serialnumber", 0); 727 prom->hw_revision = of_getintprop_default(op->dev.of_node, 728 "promversion", 0); 729 730 return 0; 731 } 732 733 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page) 734 { 735 struct platform_device *op = to_platform_device(fore200e->dev); 736 const struct linux_prom_registers *regs; 737 738 regs = of_get_property(op->dev.of_node, "reg", NULL); 739 740 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n", 741 (regs ? regs->which_io : 0), op->dev.of_node); 742 } 743 744 static const struct fore200e_bus fore200e_sbus_ops = { 745 .model_name = "SBA-200E", 746 .proc_name = "sba200e", 747 .descr_alignment = 32, 748 .buffer_alignment = 64, 749 .status_alignment = 32, 750 .read = fore200e_sba_read, 751 .write = fore200e_sba_write, 752 .configure = fore200e_sba_configure, 753 .map = fore200e_sba_map, 754 .reset = fore200e_sba_reset, 755 .prom_read = fore200e_sba_prom_read, 756 .unmap = fore200e_sba_unmap, 757 .irq_enable = fore200e_sba_irq_enable, 758 .irq_check = fore200e_sba_irq_check, 759 .irq_ack = fore200e_sba_irq_ack, 760 .proc_read = fore200e_sba_proc_read, 761 }; 762 #endif /* CONFIG_SBUS */ 763 764 static void 765 fore200e_tx_irq(struct fore200e* fore200e) 766 { 767 struct host_txq* txq = &fore200e->host_txq; 768 struct host_txq_entry* entry; 769 struct atm_vcc* vcc; 770 struct fore200e_vc_map* vc_map; 771 772 if (fore200e->host_txq.txing == 0) 773 return; 774 775 for (;;) { 776 777 entry = &txq->host_entry[ txq->tail ]; 778 779 if ((*entry->status & STATUS_COMPLETE) == 0) { 780 break; 781 } 782 783 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n", 784 entry, txq->tail, entry->vc_map, entry->skb); 785 786 /* free copy of misaligned data */ 787 kfree(entry->data); 788 789 /* remove DMA mapping */ 790 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length, 791 DMA_TO_DEVICE); 792 793 vc_map = entry->vc_map; 794 795 /* vcc closed since the time the entry was submitted for tx? */ 796 if ((vc_map->vcc == NULL) || 797 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 798 799 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n", 800 fore200e->atm_dev->number); 801 802 dev_kfree_skb_any(entry->skb); 803 } 804 else { 805 ASSERT(vc_map->vcc); 806 807 /* vcc closed then immediately re-opened? */ 808 if (vc_map->incarn != entry->incarn) { 809 810 /* when a vcc is closed, some PDUs may be still pending in the tx queue. 811 if the same vcc is immediately re-opened, those pending PDUs must 812 not be popped after the completion of their emission, as they refer 813 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc 814 would be decremented by the size of the (unrelated) skb, possibly 815 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc. 816 we thus bind the tx entry to the current incarnation of the vcc 817 when the entry is submitted for tx. When the tx later completes, 818 if the incarnation number of the tx entry does not match the one 819 of the vcc, then this implies that the vcc has been closed then re-opened. 820 we thus just drop the skb here. */ 821 822 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n", 823 fore200e->atm_dev->number); 824 825 dev_kfree_skb_any(entry->skb); 826 } 827 else { 828 vcc = vc_map->vcc; 829 ASSERT(vcc); 830 831 /* notify tx completion */ 832 if (vcc->pop) { 833 vcc->pop(vcc, entry->skb); 834 } 835 else { 836 dev_kfree_skb_any(entry->skb); 837 } 838 839 /* check error condition */ 840 if (*entry->status & STATUS_ERROR) 841 atomic_inc(&vcc->stats->tx_err); 842 else 843 atomic_inc(&vcc->stats->tx); 844 } 845 } 846 847 *entry->status = STATUS_FREE; 848 849 fore200e->host_txq.txing--; 850 851 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX); 852 } 853 } 854 855 856 #ifdef FORE200E_BSQ_DEBUG 857 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn) 858 { 859 struct buffer* buffer; 860 int count = 0; 861 862 buffer = bsq->freebuf; 863 while (buffer) { 864 865 if (buffer->supplied) { 866 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n", 867 where, scheme, magn, buffer->index); 868 } 869 870 if (buffer->magn != magn) { 871 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n", 872 where, scheme, magn, buffer->index, buffer->magn); 873 } 874 875 if (buffer->scheme != scheme) { 876 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n", 877 where, scheme, magn, buffer->index, buffer->scheme); 878 } 879 880 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) { 881 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n", 882 where, scheme, magn, buffer->index); 883 } 884 885 count++; 886 buffer = buffer->next; 887 } 888 889 if (count != bsq->freebuf_count) { 890 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n", 891 where, scheme, magn, count, bsq->freebuf_count); 892 } 893 return 0; 894 } 895 #endif 896 897 898 static void 899 fore200e_supply(struct fore200e* fore200e) 900 { 901 int scheme, magn, i; 902 903 struct host_bsq* bsq; 904 struct host_bsq_entry* entry; 905 struct buffer* buffer; 906 907 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 908 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 909 910 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 911 912 #ifdef FORE200E_BSQ_DEBUG 913 bsq_audit(1, bsq, scheme, magn); 914 #endif 915 while (bsq->freebuf_count >= RBD_BLK_SIZE) { 916 917 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n", 918 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count); 919 920 entry = &bsq->host_entry[ bsq->head ]; 921 922 for (i = 0; i < RBD_BLK_SIZE; i++) { 923 924 /* take the first buffer in the free buffer list */ 925 buffer = bsq->freebuf; 926 if (!buffer) { 927 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n", 928 scheme, magn, bsq->freebuf_count); 929 return; 930 } 931 bsq->freebuf = buffer->next; 932 933 #ifdef FORE200E_BSQ_DEBUG 934 if (buffer->supplied) 935 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n", 936 scheme, magn, buffer->index); 937 buffer->supplied = 1; 938 #endif 939 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr; 940 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer); 941 } 942 943 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS); 944 945 /* decrease accordingly the number of free rx buffers */ 946 bsq->freebuf_count -= RBD_BLK_SIZE; 947 948 *entry->status = STATUS_PENDING; 949 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr); 950 } 951 } 952 } 953 } 954 955 956 static int 957 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd) 958 { 959 struct sk_buff* skb; 960 struct buffer* buffer; 961 struct fore200e_vcc* fore200e_vcc; 962 int i, pdu_len = 0; 963 #ifdef FORE200E_52BYTE_AAL0_SDU 964 u32 cell_header = 0; 965 #endif 966 967 ASSERT(vcc); 968 969 fore200e_vcc = FORE200E_VCC(vcc); 970 ASSERT(fore200e_vcc); 971 972 #ifdef FORE200E_52BYTE_AAL0_SDU 973 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) { 974 975 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) | 976 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) | 977 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) | 978 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) | 979 rpd->atm_header.clp; 980 pdu_len = 4; 981 } 982 #endif 983 984 /* compute total PDU length */ 985 for (i = 0; i < rpd->nseg; i++) 986 pdu_len += rpd->rsd[ i ].length; 987 988 skb = alloc_skb(pdu_len, GFP_ATOMIC); 989 if (skb == NULL) { 990 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); 991 992 atomic_inc(&vcc->stats->rx_drop); 993 return -ENOMEM; 994 } 995 996 __net_timestamp(skb); 997 998 #ifdef FORE200E_52BYTE_AAL0_SDU 999 if (cell_header) { 1000 *((u32*)skb_put(skb, 4)) = cell_header; 1001 } 1002 #endif 1003 1004 /* reassemble segments */ 1005 for (i = 0; i < rpd->nseg; i++) { 1006 1007 /* rebuild rx buffer address from rsd handle */ 1008 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1009 1010 /* Make device DMA transfer visible to CPU. */ 1011 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr, 1012 rpd->rsd[i].length, DMA_FROM_DEVICE); 1013 1014 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length); 1015 1016 /* Now let the device get at it again. */ 1017 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr, 1018 rpd->rsd[i].length, DMA_FROM_DEVICE); 1019 } 1020 1021 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize); 1022 1023 if (pdu_len < fore200e_vcc->rx_min_pdu) 1024 fore200e_vcc->rx_min_pdu = pdu_len; 1025 if (pdu_len > fore200e_vcc->rx_max_pdu) 1026 fore200e_vcc->rx_max_pdu = pdu_len; 1027 fore200e_vcc->rx_pdu++; 1028 1029 /* push PDU */ 1030 if (atm_charge(vcc, skb->truesize) == 0) { 1031 1032 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n", 1033 vcc->itf, vcc->vpi, vcc->vci); 1034 1035 dev_kfree_skb_any(skb); 1036 1037 atomic_inc(&vcc->stats->rx_drop); 1038 return -ENOMEM; 1039 } 1040 1041 vcc->push(vcc, skb); 1042 atomic_inc(&vcc->stats->rx); 1043 1044 return 0; 1045 } 1046 1047 1048 static void 1049 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd) 1050 { 1051 struct host_bsq* bsq; 1052 struct buffer* buffer; 1053 int i; 1054 1055 for (i = 0; i < rpd->nseg; i++) { 1056 1057 /* rebuild rx buffer address from rsd handle */ 1058 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle); 1059 1060 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ]; 1061 1062 #ifdef FORE200E_BSQ_DEBUG 1063 bsq_audit(2, bsq, buffer->scheme, buffer->magn); 1064 1065 if (buffer->supplied == 0) 1066 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n", 1067 buffer->scheme, buffer->magn, buffer->index); 1068 buffer->supplied = 0; 1069 #endif 1070 1071 /* re-insert the buffer into the free buffer list */ 1072 buffer->next = bsq->freebuf; 1073 bsq->freebuf = buffer; 1074 1075 /* then increment the number of free rx buffers */ 1076 bsq->freebuf_count++; 1077 } 1078 } 1079 1080 1081 static void 1082 fore200e_rx_irq(struct fore200e* fore200e) 1083 { 1084 struct host_rxq* rxq = &fore200e->host_rxq; 1085 struct host_rxq_entry* entry; 1086 struct atm_vcc* vcc; 1087 struct fore200e_vc_map* vc_map; 1088 1089 for (;;) { 1090 1091 entry = &rxq->host_entry[ rxq->head ]; 1092 1093 /* no more received PDUs */ 1094 if ((*entry->status & STATUS_COMPLETE) == 0) 1095 break; 1096 1097 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1098 1099 if ((vc_map->vcc == NULL) || 1100 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) { 1101 1102 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n", 1103 fore200e->atm_dev->number, 1104 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1105 } 1106 else { 1107 vcc = vc_map->vcc; 1108 ASSERT(vcc); 1109 1110 if ((*entry->status & STATUS_ERROR) == 0) { 1111 1112 fore200e_push_rpd(fore200e, vcc, entry->rpd); 1113 } 1114 else { 1115 DPRINTK(2, "damaged PDU on %d.%d.%d\n", 1116 fore200e->atm_dev->number, 1117 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); 1118 atomic_inc(&vcc->stats->rx_err); 1119 } 1120 } 1121 1122 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX); 1123 1124 fore200e_collect_rpd(fore200e, entry->rpd); 1125 1126 /* rewrite the rpd address to ack the received PDU */ 1127 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr); 1128 *entry->status = STATUS_FREE; 1129 1130 fore200e_supply(fore200e); 1131 } 1132 } 1133 1134 1135 #ifndef FORE200E_USE_TASKLET 1136 static void 1137 fore200e_irq(struct fore200e* fore200e) 1138 { 1139 unsigned long flags; 1140 1141 spin_lock_irqsave(&fore200e->q_lock, flags); 1142 fore200e_rx_irq(fore200e); 1143 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1144 1145 spin_lock_irqsave(&fore200e->q_lock, flags); 1146 fore200e_tx_irq(fore200e); 1147 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1148 } 1149 #endif 1150 1151 1152 static irqreturn_t 1153 fore200e_interrupt(int irq, void* dev) 1154 { 1155 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev); 1156 1157 if (fore200e->bus->irq_check(fore200e) == 0) { 1158 1159 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number); 1160 return IRQ_NONE; 1161 } 1162 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number); 1163 1164 #ifdef FORE200E_USE_TASKLET 1165 tasklet_schedule(&fore200e->tx_tasklet); 1166 tasklet_schedule(&fore200e->rx_tasklet); 1167 #else 1168 fore200e_irq(fore200e); 1169 #endif 1170 1171 fore200e->bus->irq_ack(fore200e); 1172 return IRQ_HANDLED; 1173 } 1174 1175 1176 #ifdef FORE200E_USE_TASKLET 1177 static void 1178 fore200e_tx_tasklet(unsigned long data) 1179 { 1180 struct fore200e* fore200e = (struct fore200e*) data; 1181 unsigned long flags; 1182 1183 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1184 1185 spin_lock_irqsave(&fore200e->q_lock, flags); 1186 fore200e_tx_irq(fore200e); 1187 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1188 } 1189 1190 1191 static void 1192 fore200e_rx_tasklet(unsigned long data) 1193 { 1194 struct fore200e* fore200e = (struct fore200e*) data; 1195 unsigned long flags; 1196 1197 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number); 1198 1199 spin_lock_irqsave(&fore200e->q_lock, flags); 1200 fore200e_rx_irq((struct fore200e*) data); 1201 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1202 } 1203 #endif 1204 1205 1206 static int 1207 fore200e_select_scheme(struct atm_vcc* vcc) 1208 { 1209 /* fairly balance the VCs over (identical) buffer schemes */ 1210 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO; 1211 1212 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n", 1213 vcc->itf, vcc->vpi, vcc->vci, scheme); 1214 1215 return scheme; 1216 } 1217 1218 1219 static int 1220 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu) 1221 { 1222 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1223 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1224 struct activate_opcode activ_opcode; 1225 struct deactivate_opcode deactiv_opcode; 1226 struct vpvc vpvc; 1227 int ok; 1228 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal); 1229 1230 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1231 1232 if (activate) { 1233 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc); 1234 1235 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN; 1236 activ_opcode.aal = aal; 1237 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme; 1238 activ_opcode.pad = 0; 1239 } 1240 else { 1241 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN; 1242 deactiv_opcode.pad = 0; 1243 } 1244 1245 vpvc.vci = vcc->vci; 1246 vpvc.vpi = vcc->vpi; 1247 1248 *entry->status = STATUS_PENDING; 1249 1250 if (activate) { 1251 1252 #ifdef FORE200E_52BYTE_AAL0_SDU 1253 mtu = 48; 1254 #endif 1255 /* the MTU is not used by the cp, except in the case of AAL0 */ 1256 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu); 1257 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc); 1258 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode); 1259 } 1260 else { 1261 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc); 1262 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode); 1263 } 1264 1265 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1266 1267 *entry->status = STATUS_FREE; 1268 1269 if (ok == 0) { 1270 printk(FORE200E "unable to %s VC %d.%d.%d\n", 1271 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci); 1272 return -EIO; 1273 } 1274 1275 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci, 1276 activate ? "open" : "clos"); 1277 1278 return 0; 1279 } 1280 1281 1282 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */ 1283 1284 static void 1285 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate) 1286 { 1287 if (qos->txtp.max_pcr < ATM_OC3_PCR) { 1288 1289 /* compute the data cells to idle cells ratio from the tx PCR */ 1290 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR; 1291 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells; 1292 } 1293 else { 1294 /* disable rate control */ 1295 rate->data_cells = rate->idle_cells = 0; 1296 } 1297 } 1298 1299 1300 static int 1301 fore200e_open(struct atm_vcc *vcc) 1302 { 1303 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1304 struct fore200e_vcc* fore200e_vcc; 1305 struct fore200e_vc_map* vc_map; 1306 unsigned long flags; 1307 int vci = vcc->vci; 1308 short vpi = vcc->vpi; 1309 1310 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS)); 1311 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS)); 1312 1313 spin_lock_irqsave(&fore200e->q_lock, flags); 1314 1315 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci); 1316 if (vc_map->vcc) { 1317 1318 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1319 1320 printk(FORE200E "VC %d.%d.%d already in use\n", 1321 fore200e->atm_dev->number, vpi, vci); 1322 1323 return -EINVAL; 1324 } 1325 1326 vc_map->vcc = vcc; 1327 1328 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1329 1330 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC); 1331 if (fore200e_vcc == NULL) { 1332 vc_map->vcc = NULL; 1333 return -ENOMEM; 1334 } 1335 1336 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1337 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n", 1338 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1339 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ], 1340 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu, 1341 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ], 1342 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu); 1343 1344 /* pseudo-CBR bandwidth requested? */ 1345 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1346 1347 mutex_lock(&fore200e->rate_mtx); 1348 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) { 1349 mutex_unlock(&fore200e->rate_mtx); 1350 1351 kfree(fore200e_vcc); 1352 vc_map->vcc = NULL; 1353 return -EAGAIN; 1354 } 1355 1356 /* reserve bandwidth */ 1357 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr; 1358 mutex_unlock(&fore200e->rate_mtx); 1359 } 1360 1361 vcc->itf = vcc->dev->number; 1362 1363 set_bit(ATM_VF_PARTIAL,&vcc->flags); 1364 set_bit(ATM_VF_ADDR, &vcc->flags); 1365 1366 vcc->dev_data = fore200e_vcc; 1367 1368 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) { 1369 1370 vc_map->vcc = NULL; 1371 1372 clear_bit(ATM_VF_ADDR, &vcc->flags); 1373 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1374 1375 vcc->dev_data = NULL; 1376 1377 mutex_lock(&fore200e->rate_mtx); 1378 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1379 mutex_unlock(&fore200e->rate_mtx); 1380 1381 kfree(fore200e_vcc); 1382 return -EINVAL; 1383 } 1384 1385 /* compute rate control parameters */ 1386 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1387 1388 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate); 1389 set_bit(ATM_VF_HASQOS, &vcc->flags); 1390 1391 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n", 1392 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1393 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr, 1394 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells); 1395 } 1396 1397 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1; 1398 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0; 1399 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0; 1400 1401 /* new incarnation of the vcc */ 1402 vc_map->incarn = ++fore200e->incarn_count; 1403 1404 /* VC unusable before this flag is set */ 1405 set_bit(ATM_VF_READY, &vcc->flags); 1406 1407 return 0; 1408 } 1409 1410 1411 static void 1412 fore200e_close(struct atm_vcc* vcc) 1413 { 1414 struct fore200e_vcc* fore200e_vcc; 1415 struct fore200e* fore200e; 1416 struct fore200e_vc_map* vc_map; 1417 unsigned long flags; 1418 1419 ASSERT(vcc); 1420 fore200e = FORE200E_DEV(vcc->dev); 1421 1422 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS)); 1423 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS)); 1424 1425 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal)); 1426 1427 clear_bit(ATM_VF_READY, &vcc->flags); 1428 1429 fore200e_activate_vcin(fore200e, 0, vcc, 0); 1430 1431 spin_lock_irqsave(&fore200e->q_lock, flags); 1432 1433 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1434 1435 /* the vc is no longer considered as "in use" by fore200e_open() */ 1436 vc_map->vcc = NULL; 1437 1438 vcc->itf = vcc->vci = vcc->vpi = 0; 1439 1440 fore200e_vcc = FORE200E_VCC(vcc); 1441 vcc->dev_data = NULL; 1442 1443 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1444 1445 /* release reserved bandwidth, if any */ 1446 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) { 1447 1448 mutex_lock(&fore200e->rate_mtx); 1449 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1450 mutex_unlock(&fore200e->rate_mtx); 1451 1452 clear_bit(ATM_VF_HASQOS, &vcc->flags); 1453 } 1454 1455 clear_bit(ATM_VF_ADDR, &vcc->flags); 1456 clear_bit(ATM_VF_PARTIAL,&vcc->flags); 1457 1458 ASSERT(fore200e_vcc); 1459 kfree(fore200e_vcc); 1460 } 1461 1462 1463 static int 1464 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb) 1465 { 1466 struct fore200e* fore200e; 1467 struct fore200e_vcc* fore200e_vcc; 1468 struct fore200e_vc_map* vc_map; 1469 struct host_txq* txq; 1470 struct host_txq_entry* entry; 1471 struct tpd* tpd; 1472 struct tpd_haddr tpd_haddr; 1473 int retry = CONFIG_ATM_FORE200E_TX_RETRY; 1474 int tx_copy = 0; 1475 int tx_len = skb->len; 1476 u32* cell_header = NULL; 1477 unsigned char* skb_data; 1478 int skb_len; 1479 unsigned char* data; 1480 unsigned long flags; 1481 1482 if (!vcc) 1483 return -EINVAL; 1484 1485 fore200e = FORE200E_DEV(vcc->dev); 1486 fore200e_vcc = FORE200E_VCC(vcc); 1487 1488 if (!fore200e) 1489 return -EINVAL; 1490 1491 txq = &fore200e->host_txq; 1492 if (!fore200e_vcc) 1493 return -EINVAL; 1494 1495 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1496 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi); 1497 dev_kfree_skb_any(skb); 1498 return -EINVAL; 1499 } 1500 1501 #ifdef FORE200E_52BYTE_AAL0_SDU 1502 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) { 1503 cell_header = (u32*) skb->data; 1504 skb_data = skb->data + 4; /* skip 4-byte cell header */ 1505 skb_len = tx_len = skb->len - 4; 1506 1507 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header); 1508 } 1509 else 1510 #endif 1511 { 1512 skb_data = skb->data; 1513 skb_len = skb->len; 1514 } 1515 1516 if (((unsigned long)skb_data) & 0x3) { 1517 1518 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name); 1519 tx_copy = 1; 1520 tx_len = skb_len; 1521 } 1522 1523 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) { 1524 1525 /* this simply NUKES the PCA board */ 1526 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name); 1527 tx_copy = 1; 1528 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD; 1529 } 1530 1531 if (tx_copy) { 1532 data = kmalloc(tx_len, GFP_ATOMIC); 1533 if (data == NULL) { 1534 if (vcc->pop) { 1535 vcc->pop(vcc, skb); 1536 } 1537 else { 1538 dev_kfree_skb_any(skb); 1539 } 1540 return -ENOMEM; 1541 } 1542 1543 memcpy(data, skb_data, skb_len); 1544 if (skb_len < tx_len) 1545 memset(data + skb_len, 0x00, tx_len - skb_len); 1546 } 1547 else { 1548 data = skb_data; 1549 } 1550 1551 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci); 1552 ASSERT(vc_map->vcc == vcc); 1553 1554 retry_here: 1555 1556 spin_lock_irqsave(&fore200e->q_lock, flags); 1557 1558 entry = &txq->host_entry[ txq->head ]; 1559 1560 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) { 1561 1562 /* try to free completed tx queue entries */ 1563 fore200e_tx_irq(fore200e); 1564 1565 if (*entry->status != STATUS_FREE) { 1566 1567 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1568 1569 /* retry once again? */ 1570 if (--retry > 0) { 1571 udelay(50); 1572 goto retry_here; 1573 } 1574 1575 atomic_inc(&vcc->stats->tx_err); 1576 1577 fore200e->tx_sat++; 1578 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", 1579 fore200e->name, fore200e->cp_queues->heartbeat); 1580 if (vcc->pop) { 1581 vcc->pop(vcc, skb); 1582 } 1583 else { 1584 dev_kfree_skb_any(skb); 1585 } 1586 1587 if (tx_copy) 1588 kfree(data); 1589 1590 return -ENOBUFS; 1591 } 1592 } 1593 1594 entry->incarn = vc_map->incarn; 1595 entry->vc_map = vc_map; 1596 entry->skb = skb; 1597 entry->data = tx_copy ? data : NULL; 1598 1599 tpd = entry->tpd; 1600 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len, 1601 DMA_TO_DEVICE); 1602 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) { 1603 if (tx_copy) 1604 kfree(data); 1605 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1606 return -ENOMEM; 1607 } 1608 tpd->tsd[ 0 ].length = tx_len; 1609 1610 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX); 1611 txq->txing++; 1612 1613 /* The dma_map call above implies a dma_sync so the device can use it, 1614 * thus no explicit dma_sync call is necessary here. 1615 */ 1616 1617 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n", 1618 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 1619 tpd->tsd[0].length, skb_len); 1620 1621 if (skb_len < fore200e_vcc->tx_min_pdu) 1622 fore200e_vcc->tx_min_pdu = skb_len; 1623 if (skb_len > fore200e_vcc->tx_max_pdu) 1624 fore200e_vcc->tx_max_pdu = skb_len; 1625 fore200e_vcc->tx_pdu++; 1626 1627 /* set tx rate control information */ 1628 tpd->rate.data_cells = fore200e_vcc->rate.data_cells; 1629 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells; 1630 1631 if (cell_header) { 1632 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP); 1633 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT; 1634 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT; 1635 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT; 1636 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT; 1637 } 1638 else { 1639 /* set the ATM header, common to all cells conveying the PDU */ 1640 tpd->atm_header.clp = 0; 1641 tpd->atm_header.plt = 0; 1642 tpd->atm_header.vci = vcc->vci; 1643 tpd->atm_header.vpi = vcc->vpi; 1644 tpd->atm_header.gfc = 0; 1645 } 1646 1647 tpd->spec.length = tx_len; 1648 tpd->spec.nseg = 1; 1649 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal); 1650 tpd->spec.intr = 1; 1651 1652 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */ 1653 tpd_haddr.pad = 0; 1654 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */ 1655 1656 *entry->status = STATUS_PENDING; 1657 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr); 1658 1659 spin_unlock_irqrestore(&fore200e->q_lock, flags); 1660 1661 return 0; 1662 } 1663 1664 1665 static int 1666 fore200e_getstats(struct fore200e* fore200e) 1667 { 1668 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1669 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1670 struct stats_opcode opcode; 1671 int ok; 1672 u32 stats_dma_addr; 1673 1674 if (fore200e->stats == NULL) { 1675 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL); 1676 if (fore200e->stats == NULL) 1677 return -ENOMEM; 1678 } 1679 1680 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats, 1681 sizeof(struct stats), DMA_FROM_DEVICE); 1682 if (dma_mapping_error(fore200e->dev, stats_dma_addr)) 1683 return -ENOMEM; 1684 1685 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1686 1687 opcode.opcode = OPCODE_GET_STATS; 1688 opcode.pad = 0; 1689 1690 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr); 1691 1692 *entry->status = STATUS_PENDING; 1693 1694 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode); 1695 1696 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1697 1698 *entry->status = STATUS_FREE; 1699 1700 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE); 1701 1702 if (ok == 0) { 1703 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name); 1704 return -EIO; 1705 } 1706 1707 return 0; 1708 } 1709 1710 #if 0 /* currently unused */ 1711 static int 1712 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs) 1713 { 1714 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1715 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1716 struct oc3_opcode opcode; 1717 int ok; 1718 u32 oc3_regs_dma_addr; 1719 1720 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1721 1722 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1723 1724 opcode.opcode = OPCODE_GET_OC3; 1725 opcode.reg = 0; 1726 opcode.value = 0; 1727 opcode.mask = 0; 1728 1729 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1730 1731 *entry->status = STATUS_PENDING; 1732 1733 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode); 1734 1735 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1736 1737 *entry->status = STATUS_FREE; 1738 1739 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE); 1740 1741 if (ok == 0) { 1742 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name); 1743 return -EIO; 1744 } 1745 1746 return 0; 1747 } 1748 #endif 1749 1750 1751 static int 1752 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask) 1753 { 1754 struct host_cmdq* cmdq = &fore200e->host_cmdq; 1755 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; 1756 struct oc3_opcode opcode; 1757 int ok; 1758 1759 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask); 1760 1761 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); 1762 1763 opcode.opcode = OPCODE_SET_OC3; 1764 opcode.reg = reg; 1765 opcode.value = value; 1766 opcode.mask = mask; 1767 1768 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr); 1769 1770 *entry->status = STATUS_PENDING; 1771 1772 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode); 1773 1774 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400); 1775 1776 *entry->status = STATUS_FREE; 1777 1778 if (ok == 0) { 1779 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name); 1780 return -EIO; 1781 } 1782 1783 return 0; 1784 } 1785 1786 1787 static int 1788 fore200e_setloop(struct fore200e* fore200e, int loop_mode) 1789 { 1790 u32 mct_value, mct_mask; 1791 int error; 1792 1793 if (!capable(CAP_NET_ADMIN)) 1794 return -EPERM; 1795 1796 switch (loop_mode) { 1797 1798 case ATM_LM_NONE: 1799 mct_value = 0; 1800 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE; 1801 break; 1802 1803 case ATM_LM_LOC_PHY: 1804 mct_value = mct_mask = SUNI_MCT_DLE; 1805 break; 1806 1807 case ATM_LM_RMT_PHY: 1808 mct_value = mct_mask = SUNI_MCT_LLE; 1809 break; 1810 1811 default: 1812 return -EINVAL; 1813 } 1814 1815 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask); 1816 if (error == 0) 1817 fore200e->loop_mode = loop_mode; 1818 1819 return error; 1820 } 1821 1822 1823 static int 1824 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg) 1825 { 1826 struct sonet_stats tmp; 1827 1828 if (fore200e_getstats(fore200e) < 0) 1829 return -EIO; 1830 1831 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors); 1832 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors); 1833 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors); 1834 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors); 1835 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors); 1836 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors); 1837 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors); 1838 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) + 1839 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) + 1840 be32_to_cpu(fore200e->stats->aal5.cells_transmitted); 1841 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) + 1842 be32_to_cpu(fore200e->stats->aal34.cells_received) + 1843 be32_to_cpu(fore200e->stats->aal5.cells_received); 1844 1845 if (arg) 1846 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0; 1847 1848 return 0; 1849 } 1850 1851 1852 static int 1853 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg) 1854 { 1855 struct fore200e* fore200e = FORE200E_DEV(dev); 1856 1857 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg); 1858 1859 switch (cmd) { 1860 1861 case SONET_GETSTAT: 1862 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg); 1863 1864 case SONET_GETDIAG: 1865 return put_user(0, (int __user *)arg) ? -EFAULT : 0; 1866 1867 case ATM_SETLOOP: 1868 return fore200e_setloop(fore200e, (int)(unsigned long)arg); 1869 1870 case ATM_GETLOOP: 1871 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0; 1872 1873 case ATM_QUERYLOOP: 1874 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0; 1875 } 1876 1877 return -ENOSYS; /* not implemented */ 1878 } 1879 1880 1881 static int 1882 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags) 1883 { 1884 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc); 1885 struct fore200e* fore200e = FORE200E_DEV(vcc->dev); 1886 1887 if (!test_bit(ATM_VF_READY, &vcc->flags)) { 1888 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi); 1889 return -EINVAL; 1890 } 1891 1892 DPRINTK(2, "change_qos %d.%d.%d, " 1893 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; " 1894 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n" 1895 "available_cell_rate = %u", 1896 vcc->itf, vcc->vpi, vcc->vci, 1897 fore200e_traffic_class[ qos->txtp.traffic_class ], 1898 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu, 1899 fore200e_traffic_class[ qos->rxtp.traffic_class ], 1900 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu, 1901 flags, fore200e->available_cell_rate); 1902 1903 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) { 1904 1905 mutex_lock(&fore200e->rate_mtx); 1906 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) { 1907 mutex_unlock(&fore200e->rate_mtx); 1908 return -EAGAIN; 1909 } 1910 1911 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr; 1912 fore200e->available_cell_rate -= qos->txtp.max_pcr; 1913 1914 mutex_unlock(&fore200e->rate_mtx); 1915 1916 memcpy(&vcc->qos, qos, sizeof(struct atm_qos)); 1917 1918 /* update rate control parameters */ 1919 fore200e_rate_ctrl(qos, &fore200e_vcc->rate); 1920 1921 set_bit(ATM_VF_HASQOS, &vcc->flags); 1922 1923 return 0; 1924 } 1925 1926 return -EINVAL; 1927 } 1928 1929 1930 static int fore200e_irq_request(struct fore200e *fore200e) 1931 { 1932 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) { 1933 1934 printk(FORE200E "unable to reserve IRQ %s for device %s\n", 1935 fore200e_irq_itoa(fore200e->irq), fore200e->name); 1936 return -EBUSY; 1937 } 1938 1939 printk(FORE200E "IRQ %s reserved for device %s\n", 1940 fore200e_irq_itoa(fore200e->irq), fore200e->name); 1941 1942 #ifdef FORE200E_USE_TASKLET 1943 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e); 1944 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e); 1945 #endif 1946 1947 fore200e->state = FORE200E_STATE_IRQ; 1948 return 0; 1949 } 1950 1951 1952 static int fore200e_get_esi(struct fore200e *fore200e) 1953 { 1954 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL); 1955 int ok, i; 1956 1957 if (!prom) 1958 return -ENOMEM; 1959 1960 ok = fore200e->bus->prom_read(fore200e, prom); 1961 if (ok < 0) { 1962 kfree(prom); 1963 return -EBUSY; 1964 } 1965 1966 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n", 1967 fore200e->name, 1968 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */ 1969 prom->serial_number & 0xFFFF, &prom->mac_addr[2]); 1970 1971 for (i = 0; i < ESI_LEN; i++) { 1972 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ]; 1973 } 1974 1975 kfree(prom); 1976 1977 return 0; 1978 } 1979 1980 1981 static int fore200e_alloc_rx_buf(struct fore200e *fore200e) 1982 { 1983 int scheme, magn, nbr, size, i; 1984 1985 struct host_bsq* bsq; 1986 struct buffer* buffer; 1987 1988 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 1989 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 1990 1991 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 1992 1993 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ]; 1994 size = fore200e_rx_buf_size[ scheme ][ magn ]; 1995 1996 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn); 1997 1998 /* allocate the array of receive buffers */ 1999 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer), 2000 GFP_KERNEL); 2001 2002 if (buffer == NULL) 2003 return -ENOMEM; 2004 2005 bsq->freebuf = NULL; 2006 2007 for (i = 0; i < nbr; i++) { 2008 2009 buffer[ i ].scheme = scheme; 2010 buffer[ i ].magn = magn; 2011 #ifdef FORE200E_BSQ_DEBUG 2012 buffer[ i ].index = i; 2013 buffer[ i ].supplied = 0; 2014 #endif 2015 2016 /* allocate the receive buffer body */ 2017 if (fore200e_chunk_alloc(fore200e, 2018 &buffer[ i ].data, size, fore200e->bus->buffer_alignment, 2019 DMA_FROM_DEVICE) < 0) { 2020 2021 while (i > 0) 2022 fore200e_chunk_free(fore200e, &buffer[ --i ].data); 2023 kfree(buffer); 2024 2025 return -ENOMEM; 2026 } 2027 2028 /* insert the buffer into the free buffer list */ 2029 buffer[ i ].next = bsq->freebuf; 2030 bsq->freebuf = &buffer[ i ]; 2031 } 2032 /* all the buffers are free, initially */ 2033 bsq->freebuf_count = nbr; 2034 2035 #ifdef FORE200E_BSQ_DEBUG 2036 bsq_audit(3, bsq, scheme, magn); 2037 #endif 2038 } 2039 } 2040 2041 fore200e->state = FORE200E_STATE_ALLOC_BUF; 2042 return 0; 2043 } 2044 2045 2046 static int fore200e_init_bs_queue(struct fore200e *fore200e) 2047 { 2048 int scheme, magn, i; 2049 2050 struct host_bsq* bsq; 2051 struct cp_bsq_entry __iomem * cp_entry; 2052 2053 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) { 2054 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) { 2055 2056 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn); 2057 2058 bsq = &fore200e->host_bsq[ scheme ][ magn ]; 2059 2060 /* allocate and align the array of status words */ 2061 if (fore200e_dma_chunk_alloc(fore200e, 2062 &bsq->status, 2063 sizeof(enum status), 2064 QUEUE_SIZE_BS, 2065 fore200e->bus->status_alignment) < 0) { 2066 return -ENOMEM; 2067 } 2068 2069 /* allocate and align the array of receive buffer descriptors */ 2070 if (fore200e_dma_chunk_alloc(fore200e, 2071 &bsq->rbd_block, 2072 sizeof(struct rbd_block), 2073 QUEUE_SIZE_BS, 2074 fore200e->bus->descr_alignment) < 0) { 2075 2076 fore200e_dma_chunk_free(fore200e, &bsq->status); 2077 return -ENOMEM; 2078 } 2079 2080 /* get the base address of the cp resident buffer supply queue entries */ 2081 cp_entry = fore200e->virt_base + 2082 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]); 2083 2084 /* fill the host resident and cp resident buffer supply queue entries */ 2085 for (i = 0; i < QUEUE_SIZE_BS; i++) { 2086 2087 bsq->host_entry[ i ].status = 2088 FORE200E_INDEX(bsq->status.align_addr, enum status, i); 2089 bsq->host_entry[ i ].rbd_block = 2090 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i); 2091 bsq->host_entry[ i ].rbd_block_dma = 2092 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i); 2093 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2094 2095 *bsq->host_entry[ i ].status = STATUS_FREE; 2096 2097 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i), 2098 &cp_entry[ i ].status_haddr); 2099 } 2100 } 2101 } 2102 2103 fore200e->state = FORE200E_STATE_INIT_BSQ; 2104 return 0; 2105 } 2106 2107 2108 static int fore200e_init_rx_queue(struct fore200e *fore200e) 2109 { 2110 struct host_rxq* rxq = &fore200e->host_rxq; 2111 struct cp_rxq_entry __iomem * cp_entry; 2112 int i; 2113 2114 DPRINTK(2, "receive queue is being initialized\n"); 2115 2116 /* allocate and align the array of status words */ 2117 if (fore200e_dma_chunk_alloc(fore200e, 2118 &rxq->status, 2119 sizeof(enum status), 2120 QUEUE_SIZE_RX, 2121 fore200e->bus->status_alignment) < 0) { 2122 return -ENOMEM; 2123 } 2124 2125 /* allocate and align the array of receive PDU descriptors */ 2126 if (fore200e_dma_chunk_alloc(fore200e, 2127 &rxq->rpd, 2128 sizeof(struct rpd), 2129 QUEUE_SIZE_RX, 2130 fore200e->bus->descr_alignment) < 0) { 2131 2132 fore200e_dma_chunk_free(fore200e, &rxq->status); 2133 return -ENOMEM; 2134 } 2135 2136 /* get the base address of the cp resident rx queue entries */ 2137 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq); 2138 2139 /* fill the host resident and cp resident rx entries */ 2140 for (i=0; i < QUEUE_SIZE_RX; i++) { 2141 2142 rxq->host_entry[ i ].status = 2143 FORE200E_INDEX(rxq->status.align_addr, enum status, i); 2144 rxq->host_entry[ i ].rpd = 2145 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i); 2146 rxq->host_entry[ i ].rpd_dma = 2147 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i); 2148 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2149 2150 *rxq->host_entry[ i ].status = STATUS_FREE; 2151 2152 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i), 2153 &cp_entry[ i ].status_haddr); 2154 2155 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i), 2156 &cp_entry[ i ].rpd_haddr); 2157 } 2158 2159 /* set the head entry of the queue */ 2160 rxq->head = 0; 2161 2162 fore200e->state = FORE200E_STATE_INIT_RXQ; 2163 return 0; 2164 } 2165 2166 2167 static int fore200e_init_tx_queue(struct fore200e *fore200e) 2168 { 2169 struct host_txq* txq = &fore200e->host_txq; 2170 struct cp_txq_entry __iomem * cp_entry; 2171 int i; 2172 2173 DPRINTK(2, "transmit queue is being initialized\n"); 2174 2175 /* allocate and align the array of status words */ 2176 if (fore200e_dma_chunk_alloc(fore200e, 2177 &txq->status, 2178 sizeof(enum status), 2179 QUEUE_SIZE_TX, 2180 fore200e->bus->status_alignment) < 0) { 2181 return -ENOMEM; 2182 } 2183 2184 /* allocate and align the array of transmit PDU descriptors */ 2185 if (fore200e_dma_chunk_alloc(fore200e, 2186 &txq->tpd, 2187 sizeof(struct tpd), 2188 QUEUE_SIZE_TX, 2189 fore200e->bus->descr_alignment) < 0) { 2190 2191 fore200e_dma_chunk_free(fore200e, &txq->status); 2192 return -ENOMEM; 2193 } 2194 2195 /* get the base address of the cp resident tx queue entries */ 2196 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq); 2197 2198 /* fill the host resident and cp resident tx entries */ 2199 for (i=0; i < QUEUE_SIZE_TX; i++) { 2200 2201 txq->host_entry[ i ].status = 2202 FORE200E_INDEX(txq->status.align_addr, enum status, i); 2203 txq->host_entry[ i ].tpd = 2204 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i); 2205 txq->host_entry[ i ].tpd_dma = 2206 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i); 2207 txq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2208 2209 *txq->host_entry[ i ].status = STATUS_FREE; 2210 2211 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i), 2212 &cp_entry[ i ].status_haddr); 2213 2214 /* although there is a one-to-one mapping of tx queue entries and tpds, 2215 we do not write here the DMA (physical) base address of each tpd into 2216 the related cp resident entry, because the cp relies on this write 2217 operation to detect that a new pdu has been submitted for tx */ 2218 } 2219 2220 /* set the head and tail entries of the queue */ 2221 txq->head = 0; 2222 txq->tail = 0; 2223 2224 fore200e->state = FORE200E_STATE_INIT_TXQ; 2225 return 0; 2226 } 2227 2228 2229 static int fore200e_init_cmd_queue(struct fore200e *fore200e) 2230 { 2231 struct host_cmdq* cmdq = &fore200e->host_cmdq; 2232 struct cp_cmdq_entry __iomem * cp_entry; 2233 int i; 2234 2235 DPRINTK(2, "command queue is being initialized\n"); 2236 2237 /* allocate and align the array of status words */ 2238 if (fore200e_dma_chunk_alloc(fore200e, 2239 &cmdq->status, 2240 sizeof(enum status), 2241 QUEUE_SIZE_CMD, 2242 fore200e->bus->status_alignment) < 0) { 2243 return -ENOMEM; 2244 } 2245 2246 /* get the base address of the cp resident cmd queue entries */ 2247 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq); 2248 2249 /* fill the host resident and cp resident cmd entries */ 2250 for (i=0; i < QUEUE_SIZE_CMD; i++) { 2251 2252 cmdq->host_entry[ i ].status = 2253 FORE200E_INDEX(cmdq->status.align_addr, enum status, i); 2254 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ]; 2255 2256 *cmdq->host_entry[ i ].status = STATUS_FREE; 2257 2258 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 2259 &cp_entry[ i ].status_haddr); 2260 } 2261 2262 /* set the head entry of the queue */ 2263 cmdq->head = 0; 2264 2265 fore200e->state = FORE200E_STATE_INIT_CMDQ; 2266 return 0; 2267 } 2268 2269 2270 static void fore200e_param_bs_queue(struct fore200e *fore200e, 2271 enum buffer_scheme scheme, 2272 enum buffer_magn magn, int queue_length, 2273 int pool_size, int supply_blksize) 2274 { 2275 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ]; 2276 2277 fore200e->bus->write(queue_length, &bs_spec->queue_length); 2278 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size); 2279 fore200e->bus->write(pool_size, &bs_spec->pool_size); 2280 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize); 2281 } 2282 2283 2284 static int fore200e_initialize(struct fore200e *fore200e) 2285 { 2286 struct cp_queues __iomem * cpq; 2287 int ok, scheme, magn; 2288 2289 DPRINTK(2, "device %s being initialized\n", fore200e->name); 2290 2291 mutex_init(&fore200e->rate_mtx); 2292 spin_lock_init(&fore200e->q_lock); 2293 2294 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET; 2295 2296 /* enable cp to host interrupts */ 2297 fore200e->bus->write(1, &cpq->imask); 2298 2299 if (fore200e->bus->irq_enable) 2300 fore200e->bus->irq_enable(fore200e); 2301 2302 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect); 2303 2304 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len); 2305 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len); 2306 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len); 2307 2308 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension); 2309 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension); 2310 2311 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) 2312 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) 2313 fore200e_param_bs_queue(fore200e, scheme, magn, 2314 QUEUE_SIZE_BS, 2315 fore200e_rx_buf_nbr[ scheme ][ magn ], 2316 RBD_BLK_SIZE); 2317 2318 /* issue the initialize command */ 2319 fore200e->bus->write(STATUS_PENDING, &cpq->init.status); 2320 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode); 2321 2322 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000); 2323 if (ok == 0) { 2324 printk(FORE200E "device %s initialization failed\n", fore200e->name); 2325 return -ENODEV; 2326 } 2327 2328 printk(FORE200E "device %s initialized\n", fore200e->name); 2329 2330 fore200e->state = FORE200E_STATE_INITIALIZE; 2331 return 0; 2332 } 2333 2334 2335 static void fore200e_monitor_putc(struct fore200e *fore200e, char c) 2336 { 2337 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2338 2339 #if 0 2340 printk("%c", c); 2341 #endif 2342 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send); 2343 } 2344 2345 2346 static int fore200e_monitor_getc(struct fore200e *fore200e) 2347 { 2348 struct cp_monitor __iomem * monitor = fore200e->cp_monitor; 2349 unsigned long timeout = jiffies + msecs_to_jiffies(50); 2350 int c; 2351 2352 while (time_before(jiffies, timeout)) { 2353 2354 c = (int) fore200e->bus->read(&monitor->soft_uart.recv); 2355 2356 if (c & FORE200E_CP_MONITOR_UART_AVAIL) { 2357 2358 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv); 2359 #if 0 2360 printk("%c", c & 0xFF); 2361 #endif 2362 return c & 0xFF; 2363 } 2364 } 2365 2366 return -1; 2367 } 2368 2369 2370 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str) 2371 { 2372 while (*str) { 2373 2374 /* the i960 monitor doesn't accept any new character if it has something to say */ 2375 while (fore200e_monitor_getc(fore200e) >= 0); 2376 2377 fore200e_monitor_putc(fore200e, *str++); 2378 } 2379 2380 while (fore200e_monitor_getc(fore200e) >= 0); 2381 } 2382 2383 #ifdef __LITTLE_ENDIAN 2384 #define FW_EXT ".bin" 2385 #else 2386 #define FW_EXT "_ecd.bin2" 2387 #endif 2388 2389 static int fore200e_load_and_start_fw(struct fore200e *fore200e) 2390 { 2391 const struct firmware *firmware; 2392 const struct fw_header *fw_header; 2393 const __le32 *fw_data; 2394 u32 fw_size; 2395 u32 __iomem *load_addr; 2396 char buf[48]; 2397 int err; 2398 2399 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT); 2400 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) { 2401 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name); 2402 return err; 2403 } 2404 2405 fw_data = (const __le32 *)firmware->data; 2406 fw_size = firmware->size / sizeof(u32); 2407 fw_header = (const struct fw_header *)firmware->data; 2408 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); 2409 2410 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", 2411 fore200e->name, load_addr, fw_size); 2412 2413 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) { 2414 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name); 2415 goto release; 2416 } 2417 2418 for (; fw_size--; fw_data++, load_addr++) 2419 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr); 2420 2421 DPRINTK(2, "device %s firmware being started\n", fore200e->name); 2422 2423 #if defined(__sparc_v9__) 2424 /* reported to be required by SBA cards on some sparc64 hosts */ 2425 fore200e_spin(100); 2426 #endif 2427 2428 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset)); 2429 fore200e_monitor_puts(fore200e, buf); 2430 2431 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) { 2432 printk(FORE200E "device %s firmware didn't start\n", fore200e->name); 2433 goto release; 2434 } 2435 2436 printk(FORE200E "device %s firmware started\n", fore200e->name); 2437 2438 fore200e->state = FORE200E_STATE_START_FW; 2439 err = 0; 2440 2441 release: 2442 release_firmware(firmware); 2443 return err; 2444 } 2445 2446 2447 static int fore200e_register(struct fore200e *fore200e, struct device *parent) 2448 { 2449 struct atm_dev* atm_dev; 2450 2451 DPRINTK(2, "device %s being registered\n", fore200e->name); 2452 2453 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops, 2454 -1, NULL); 2455 if (atm_dev == NULL) { 2456 printk(FORE200E "unable to register device %s\n", fore200e->name); 2457 return -ENODEV; 2458 } 2459 2460 atm_dev->dev_data = fore200e; 2461 fore200e->atm_dev = atm_dev; 2462 2463 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS; 2464 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS; 2465 2466 fore200e->available_cell_rate = ATM_OC3_PCR; 2467 2468 fore200e->state = FORE200E_STATE_REGISTER; 2469 return 0; 2470 } 2471 2472 2473 static int fore200e_init(struct fore200e *fore200e, struct device *parent) 2474 { 2475 if (fore200e_register(fore200e, parent) < 0) 2476 return -ENODEV; 2477 2478 if (fore200e->bus->configure(fore200e) < 0) 2479 return -ENODEV; 2480 2481 if (fore200e->bus->map(fore200e) < 0) 2482 return -ENODEV; 2483 2484 if (fore200e_reset(fore200e, 1) < 0) 2485 return -ENODEV; 2486 2487 if (fore200e_load_and_start_fw(fore200e) < 0) 2488 return -ENODEV; 2489 2490 if (fore200e_initialize(fore200e) < 0) 2491 return -ENODEV; 2492 2493 if (fore200e_init_cmd_queue(fore200e) < 0) 2494 return -ENOMEM; 2495 2496 if (fore200e_init_tx_queue(fore200e) < 0) 2497 return -ENOMEM; 2498 2499 if (fore200e_init_rx_queue(fore200e) < 0) 2500 return -ENOMEM; 2501 2502 if (fore200e_init_bs_queue(fore200e) < 0) 2503 return -ENOMEM; 2504 2505 if (fore200e_alloc_rx_buf(fore200e) < 0) 2506 return -ENOMEM; 2507 2508 if (fore200e_get_esi(fore200e) < 0) 2509 return -EIO; 2510 2511 if (fore200e_irq_request(fore200e) < 0) 2512 return -EBUSY; 2513 2514 fore200e_supply(fore200e); 2515 2516 /* all done, board initialization is now complete */ 2517 fore200e->state = FORE200E_STATE_COMPLETE; 2518 return 0; 2519 } 2520 2521 #ifdef CONFIG_SBUS 2522 static int fore200e_sba_probe(struct platform_device *op) 2523 { 2524 struct fore200e *fore200e; 2525 static int index = 0; 2526 int err; 2527 2528 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2529 if (!fore200e) 2530 return -ENOMEM; 2531 2532 fore200e->bus = &fore200e_sbus_ops; 2533 fore200e->dev = &op->dev; 2534 fore200e->irq = op->archdata.irqs[0]; 2535 fore200e->phys_base = op->resource[0].start; 2536 2537 sprintf(fore200e->name, "SBA-200E-%d", index); 2538 2539 err = fore200e_init(fore200e, &op->dev); 2540 if (err < 0) { 2541 fore200e_shutdown(fore200e); 2542 kfree(fore200e); 2543 return err; 2544 } 2545 2546 index++; 2547 dev_set_drvdata(&op->dev, fore200e); 2548 2549 return 0; 2550 } 2551 2552 static void fore200e_sba_remove(struct platform_device *op) 2553 { 2554 struct fore200e *fore200e = dev_get_drvdata(&op->dev); 2555 2556 fore200e_shutdown(fore200e); 2557 kfree(fore200e); 2558 } 2559 2560 static const struct of_device_id fore200e_sba_match[] = { 2561 { 2562 .name = SBA200E_PROM_NAME, 2563 }, 2564 {}, 2565 }; 2566 MODULE_DEVICE_TABLE(of, fore200e_sba_match); 2567 2568 static struct platform_driver fore200e_sba_driver = { 2569 .driver = { 2570 .name = "fore_200e", 2571 .of_match_table = fore200e_sba_match, 2572 }, 2573 .probe = fore200e_sba_probe, 2574 .remove = fore200e_sba_remove, 2575 }; 2576 #endif 2577 2578 #ifdef CONFIG_PCI 2579 static int fore200e_pca_detect(struct pci_dev *pci_dev, 2580 const struct pci_device_id *pci_ent) 2581 { 2582 struct fore200e* fore200e; 2583 int err = 0; 2584 static int index = 0; 2585 2586 if (pci_enable_device(pci_dev)) { 2587 err = -EINVAL; 2588 goto out; 2589 } 2590 2591 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) { 2592 err = -EINVAL; 2593 goto out; 2594 } 2595 2596 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); 2597 if (fore200e == NULL) { 2598 err = -ENOMEM; 2599 goto out_disable; 2600 } 2601 2602 fore200e->bus = &fore200e_pci_ops; 2603 fore200e->dev = &pci_dev->dev; 2604 fore200e->irq = pci_dev->irq; 2605 fore200e->phys_base = pci_resource_start(pci_dev, 0); 2606 2607 sprintf(fore200e->name, "PCA-200E-%d", index - 1); 2608 2609 pci_set_master(pci_dev); 2610 2611 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n", 2612 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq)); 2613 2614 sprintf(fore200e->name, "PCA-200E-%d", index); 2615 2616 err = fore200e_init(fore200e, &pci_dev->dev); 2617 if (err < 0) { 2618 fore200e_shutdown(fore200e); 2619 goto out_free; 2620 } 2621 2622 ++index; 2623 pci_set_drvdata(pci_dev, fore200e); 2624 2625 out: 2626 return err; 2627 2628 out_free: 2629 kfree(fore200e); 2630 out_disable: 2631 pci_disable_device(pci_dev); 2632 goto out; 2633 } 2634 2635 2636 static void fore200e_pca_remove_one(struct pci_dev *pci_dev) 2637 { 2638 struct fore200e *fore200e; 2639 2640 fore200e = pci_get_drvdata(pci_dev); 2641 2642 fore200e_shutdown(fore200e); 2643 kfree(fore200e); 2644 pci_disable_device(pci_dev); 2645 } 2646 2647 2648 static const struct pci_device_id fore200e_pca_tbl[] = { 2649 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID }, 2650 { 0, } 2651 }; 2652 2653 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl); 2654 2655 static struct pci_driver fore200e_pca_driver = { 2656 .name = "fore_200e", 2657 .probe = fore200e_pca_detect, 2658 .remove = fore200e_pca_remove_one, 2659 .id_table = fore200e_pca_tbl, 2660 }; 2661 #endif 2662 2663 static int __init fore200e_module_init(void) 2664 { 2665 int err = 0; 2666 2667 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); 2668 2669 #ifdef CONFIG_SBUS 2670 err = platform_driver_register(&fore200e_sba_driver); 2671 if (err) 2672 return err; 2673 #endif 2674 2675 #ifdef CONFIG_PCI 2676 err = pci_register_driver(&fore200e_pca_driver); 2677 #endif 2678 2679 #ifdef CONFIG_SBUS 2680 if (err) 2681 platform_driver_unregister(&fore200e_sba_driver); 2682 #endif 2683 2684 return err; 2685 } 2686 2687 static void __exit fore200e_module_cleanup(void) 2688 { 2689 #ifdef CONFIG_PCI 2690 pci_unregister_driver(&fore200e_pca_driver); 2691 #endif 2692 #ifdef CONFIG_SBUS 2693 platform_driver_unregister(&fore200e_sba_driver); 2694 #endif 2695 } 2696 2697 static int 2698 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page) 2699 { 2700 struct fore200e* fore200e = FORE200E_DEV(dev); 2701 struct fore200e_vcc* fore200e_vcc; 2702 struct atm_vcc* vcc; 2703 int i, len, left = *pos; 2704 unsigned long flags; 2705 2706 if (!left--) { 2707 2708 if (fore200e_getstats(fore200e) < 0) 2709 return -EIO; 2710 2711 len = sprintf(page,"\n" 2712 " device:\n" 2713 " internal name:\t\t%s\n", fore200e->name); 2714 2715 /* print bus-specific information */ 2716 if (fore200e->bus->proc_read) 2717 len += fore200e->bus->proc_read(fore200e, page + len); 2718 2719 len += sprintf(page + len, 2720 " interrupt line:\t\t%s\n" 2721 " physical base address:\t0x%p\n" 2722 " virtual base address:\t0x%p\n" 2723 " factory address (ESI):\t%pM\n" 2724 " board serial number:\t\t%d\n\n", 2725 fore200e_irq_itoa(fore200e->irq), 2726 (void*)fore200e->phys_base, 2727 fore200e->virt_base, 2728 fore200e->esi, 2729 fore200e->esi[4] * 256 + fore200e->esi[5]); 2730 2731 return len; 2732 } 2733 2734 if (!left--) 2735 return sprintf(page, 2736 " free small bufs, scheme 1:\t%d\n" 2737 " free large bufs, scheme 1:\t%d\n" 2738 " free small bufs, scheme 2:\t%d\n" 2739 " free large bufs, scheme 2:\t%d\n", 2740 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count, 2741 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count, 2742 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count, 2743 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count); 2744 2745 if (!left--) { 2746 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat); 2747 2748 len = sprintf(page,"\n\n" 2749 " cell processor:\n" 2750 " heartbeat state:\t\t"); 2751 2752 if (hb >> 16 != 0xDEAD) 2753 len += sprintf(page + len, "0x%08x\n", hb); 2754 else 2755 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF); 2756 2757 return len; 2758 } 2759 2760 if (!left--) { 2761 static const char* media_name[] = { 2762 "unshielded twisted pair", 2763 "multimode optical fiber ST", 2764 "multimode optical fiber SC", 2765 "single-mode optical fiber ST", 2766 "single-mode optical fiber SC", 2767 "unknown" 2768 }; 2769 2770 static const char* oc3_mode[] = { 2771 "normal operation", 2772 "diagnostic loopback", 2773 "line loopback", 2774 "unknown" 2775 }; 2776 2777 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release); 2778 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release); 2779 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision); 2780 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2781 u32 oc3_index; 2782 2783 if (media_index > 4) 2784 media_index = 5; 2785 2786 switch (fore200e->loop_mode) { 2787 case ATM_LM_NONE: oc3_index = 0; 2788 break; 2789 case ATM_LM_LOC_PHY: oc3_index = 1; 2790 break; 2791 case ATM_LM_RMT_PHY: oc3_index = 2; 2792 break; 2793 default: oc3_index = 3; 2794 } 2795 2796 return sprintf(page, 2797 " firmware release:\t\t%d.%d.%d\n" 2798 " monitor release:\t\t%d.%d\n" 2799 " media type:\t\t\t%s\n" 2800 " OC-3 revision:\t\t0x%x\n" 2801 " OC-3 mode:\t\t\t%s", 2802 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24, 2803 mon960_release >> 16, mon960_release << 16 >> 16, 2804 media_name[ media_index ], 2805 oc3_revision, 2806 oc3_mode[ oc3_index ]); 2807 } 2808 2809 if (!left--) { 2810 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor; 2811 2812 return sprintf(page, 2813 "\n\n" 2814 " monitor:\n" 2815 " version number:\t\t%d\n" 2816 " boot status word:\t\t0x%08x\n", 2817 fore200e->bus->read(&cp_monitor->mon_version), 2818 fore200e->bus->read(&cp_monitor->bstat)); 2819 } 2820 2821 if (!left--) 2822 return sprintf(page, 2823 "\n" 2824 " device statistics:\n" 2825 " 4b5b:\n" 2826 " crc_header_errors:\t\t%10u\n" 2827 " framing_errors:\t\t%10u\n", 2828 be32_to_cpu(fore200e->stats->phy.crc_header_errors), 2829 be32_to_cpu(fore200e->stats->phy.framing_errors)); 2830 2831 if (!left--) 2832 return sprintf(page, "\n" 2833 " OC-3:\n" 2834 " section_bip8_errors:\t%10u\n" 2835 " path_bip8_errors:\t\t%10u\n" 2836 " line_bip24_errors:\t\t%10u\n" 2837 " line_febe_errors:\t\t%10u\n" 2838 " path_febe_errors:\t\t%10u\n" 2839 " corr_hcs_errors:\t\t%10u\n" 2840 " ucorr_hcs_errors:\t\t%10u\n", 2841 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors), 2842 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors), 2843 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors), 2844 be32_to_cpu(fore200e->stats->oc3.line_febe_errors), 2845 be32_to_cpu(fore200e->stats->oc3.path_febe_errors), 2846 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors), 2847 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors)); 2848 2849 if (!left--) 2850 return sprintf(page,"\n" 2851 " ATM:\t\t\t\t cells\n" 2852 " TX:\t\t\t%10u\n" 2853 " RX:\t\t\t%10u\n" 2854 " vpi out of range:\t\t%10u\n" 2855 " vpi no conn:\t\t%10u\n" 2856 " vci out of range:\t\t%10u\n" 2857 " vci no conn:\t\t%10u\n", 2858 be32_to_cpu(fore200e->stats->atm.cells_transmitted), 2859 be32_to_cpu(fore200e->stats->atm.cells_received), 2860 be32_to_cpu(fore200e->stats->atm.vpi_bad_range), 2861 be32_to_cpu(fore200e->stats->atm.vpi_no_conn), 2862 be32_to_cpu(fore200e->stats->atm.vci_bad_range), 2863 be32_to_cpu(fore200e->stats->atm.vci_no_conn)); 2864 2865 if (!left--) 2866 return sprintf(page,"\n" 2867 " AAL0:\t\t\t cells\n" 2868 " TX:\t\t\t%10u\n" 2869 " RX:\t\t\t%10u\n" 2870 " dropped:\t\t\t%10u\n", 2871 be32_to_cpu(fore200e->stats->aal0.cells_transmitted), 2872 be32_to_cpu(fore200e->stats->aal0.cells_received), 2873 be32_to_cpu(fore200e->stats->aal0.cells_dropped)); 2874 2875 if (!left--) 2876 return sprintf(page,"\n" 2877 " AAL3/4:\n" 2878 " SAR sublayer:\t\t cells\n" 2879 " TX:\t\t\t%10u\n" 2880 " RX:\t\t\t%10u\n" 2881 " dropped:\t\t\t%10u\n" 2882 " CRC errors:\t\t%10u\n" 2883 " protocol errors:\t\t%10u\n\n" 2884 " CS sublayer:\t\t PDUs\n" 2885 " TX:\t\t\t%10u\n" 2886 " RX:\t\t\t%10u\n" 2887 " dropped:\t\t\t%10u\n" 2888 " protocol errors:\t\t%10u\n", 2889 be32_to_cpu(fore200e->stats->aal34.cells_transmitted), 2890 be32_to_cpu(fore200e->stats->aal34.cells_received), 2891 be32_to_cpu(fore200e->stats->aal34.cells_dropped), 2892 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors), 2893 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors), 2894 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted), 2895 be32_to_cpu(fore200e->stats->aal34.cspdus_received), 2896 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped), 2897 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors)); 2898 2899 if (!left--) 2900 return sprintf(page,"\n" 2901 " AAL5:\n" 2902 " SAR sublayer:\t\t cells\n" 2903 " TX:\t\t\t%10u\n" 2904 " RX:\t\t\t%10u\n" 2905 " dropped:\t\t\t%10u\n" 2906 " congestions:\t\t%10u\n\n" 2907 " CS sublayer:\t\t PDUs\n" 2908 " TX:\t\t\t%10u\n" 2909 " RX:\t\t\t%10u\n" 2910 " dropped:\t\t\t%10u\n" 2911 " CRC errors:\t\t%10u\n" 2912 " protocol errors:\t\t%10u\n", 2913 be32_to_cpu(fore200e->stats->aal5.cells_transmitted), 2914 be32_to_cpu(fore200e->stats->aal5.cells_received), 2915 be32_to_cpu(fore200e->stats->aal5.cells_dropped), 2916 be32_to_cpu(fore200e->stats->aal5.congestion_experienced), 2917 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted), 2918 be32_to_cpu(fore200e->stats->aal5.cspdus_received), 2919 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped), 2920 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors), 2921 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors)); 2922 2923 if (!left--) 2924 return sprintf(page,"\n" 2925 " AUX:\t\t allocation failures\n" 2926 " small b1:\t\t\t%10u\n" 2927 " large b1:\t\t\t%10u\n" 2928 " small b2:\t\t\t%10u\n" 2929 " large b2:\t\t\t%10u\n" 2930 " RX PDUs:\t\t\t%10u\n" 2931 " TX PDUs:\t\t\t%10lu\n", 2932 be32_to_cpu(fore200e->stats->aux.small_b1_failed), 2933 be32_to_cpu(fore200e->stats->aux.large_b1_failed), 2934 be32_to_cpu(fore200e->stats->aux.small_b2_failed), 2935 be32_to_cpu(fore200e->stats->aux.large_b2_failed), 2936 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed), 2937 fore200e->tx_sat); 2938 2939 if (!left--) 2940 return sprintf(page,"\n" 2941 " receive carrier:\t\t\t%s\n", 2942 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!"); 2943 2944 if (!left--) { 2945 return sprintf(page,"\n" 2946 " VCCs:\n address VPI VCI AAL " 2947 "TX PDUs TX min/max size RX PDUs RX min/max size\n"); 2948 } 2949 2950 for (i = 0; i < NBR_CONNECT; i++) { 2951 2952 vcc = fore200e->vc_map[i].vcc; 2953 2954 if (vcc == NULL) 2955 continue; 2956 2957 spin_lock_irqsave(&fore200e->q_lock, flags); 2958 2959 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) { 2960 2961 fore200e_vcc = FORE200E_VCC(vcc); 2962 ASSERT(fore200e_vcc); 2963 2964 len = sprintf(page, 2965 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 2966 vcc, 2967 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 2968 fore200e_vcc->tx_pdu, 2969 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 2970 fore200e_vcc->tx_max_pdu, 2971 fore200e_vcc->rx_pdu, 2972 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu, 2973 fore200e_vcc->rx_max_pdu); 2974 2975 spin_unlock_irqrestore(&fore200e->q_lock, flags); 2976 return len; 2977 } 2978 2979 spin_unlock_irqrestore(&fore200e->q_lock, flags); 2980 } 2981 2982 return 0; 2983 } 2984 2985 module_init(fore200e_module_init); 2986 module_exit(fore200e_module_cleanup); 2987 2988 2989 static const struct atmdev_ops fore200e_ops = { 2990 .open = fore200e_open, 2991 .close = fore200e_close, 2992 .ioctl = fore200e_ioctl, 2993 .send = fore200e_send, 2994 .change_qos = fore200e_change_qos, 2995 .proc_read = fore200e_proc_read, 2996 .owner = THIS_MODULE 2997 }; 2998 2999 MODULE_LICENSE("GPL"); 3000 #ifdef CONFIG_PCI 3001 #ifdef __LITTLE_ENDIAN__ 3002 MODULE_FIRMWARE("pca200e.bin"); 3003 #else 3004 MODULE_FIRMWARE("pca200e_ecd.bin2"); 3005 #endif 3006 #endif /* CONFIG_PCI */ 3007 #ifdef CONFIG_SBUS 3008 MODULE_FIRMWARE("sba200e_ecd.bin2"); 3009 #endif 3010