1 /* 2 * File Name: 3 * defxx.c 4 * 5 * Copyright Information: 6 * Copyright Digital Equipment Corporation 1996. 7 * 8 * This software may be used and distributed according to the terms of 9 * the GNU General Public License, incorporated herein by reference. 10 * 11 * Abstract: 12 * A Linux device driver supporting the Digital Equipment Corporation 13 * FDDI TURBOchannel, EISA and PCI controller families. Supported 14 * adapters include: 15 * 16 * DEC FDDIcontroller/TURBOchannel (DEFTA) 17 * DEC FDDIcontroller/EISA (DEFEA) 18 * DEC FDDIcontroller/PCI (DEFPA) 19 * 20 * The original author: 21 * LVS Lawrence V. Stefani <lstefani@yahoo.com> 22 * 23 * Maintainers: 24 * macro Maciej W. Rozycki <macro@linux-mips.org> 25 * 26 * Credits: 27 * I'd like to thank Patricia Cross for helping me get started with 28 * Linux, David Davies for a lot of help upgrading and configuring 29 * my development system and for answering many OS and driver 30 * development questions, and Alan Cox for recommendations and 31 * integration help on getting FDDI support into Linux. LVS 32 * 33 * Driver Architecture: 34 * The driver architecture is largely based on previous driver work 35 * for other operating systems. The upper edge interface and 36 * functions were largely taken from existing Linux device drivers 37 * such as David Davies' DE4X5.C driver and Donald Becker's TULIP.C 38 * driver. 39 * 40 * Adapter Probe - 41 * The driver scans for supported EISA adapters by reading the 42 * SLOT ID register for each EISA slot and making a match 43 * against the expected value. 44 * 45 * Bus-Specific Initialization - 46 * This driver currently supports both EISA and PCI controller 47 * families. While the custom DMA chip and FDDI logic is similar 48 * or identical, the bus logic is very different. After 49 * initialization, the only bus-specific differences is in how the 50 * driver enables and disables interrupts. Other than that, the 51 * run-time critical code behaves the same on both families. 52 * It's important to note that both adapter families are configured 53 * to I/O map, rather than memory map, the adapter registers. 54 * 55 * Driver Open/Close - 56 * In the driver open routine, the driver ISR (interrupt service 57 * routine) is registered and the adapter is brought to an 58 * operational state. In the driver close routine, the opposite 59 * occurs; the driver ISR is deregistered and the adapter is 60 * brought to a safe, but closed state. Users may use consecutive 61 * commands to bring the adapter up and down as in the following 62 * example: 63 * ifconfig fddi0 up 64 * ifconfig fddi0 down 65 * ifconfig fddi0 up 66 * 67 * Driver Shutdown - 68 * Apparently, there is no shutdown or halt routine support under 69 * Linux. This routine would be called during "reboot" or 70 * "shutdown" to allow the driver to place the adapter in a safe 71 * state before a warm reboot occurs. To be really safe, the user 72 * should close the adapter before shutdown (eg. ifconfig fddi0 down) 73 * to ensure that the adapter DMA engine is taken off-line. However, 74 * the current driver code anticipates this problem and always issues 75 * a soft reset of the adapter at the beginning of driver initialization. 76 * A future driver enhancement in this area may occur in 2.1.X where 77 * Alan indicated that a shutdown handler may be implemented. 78 * 79 * Interrupt Service Routine - 80 * The driver supports shared interrupts, so the ISR is registered for 81 * each board with the appropriate flag and the pointer to that board's 82 * device structure. This provides the context during interrupt 83 * processing to support shared interrupts and multiple boards. 84 * 85 * Interrupt enabling/disabling can occur at many levels. At the host 86 * end, you can disable system interrupts, or disable interrupts at the 87 * PIC (on Intel systems). Across the bus, both EISA and PCI adapters 88 * have a bus-logic chip interrupt enable/disable as well as a DMA 89 * controller interrupt enable/disable. 90 * 91 * The driver currently enables and disables adapter interrupts at the 92 * bus-logic chip and assumes that Linux will take care of clearing or 93 * acknowledging any host-based interrupt chips. 94 * 95 * Control Functions - 96 * Control functions are those used to support functions such as adding 97 * or deleting multicast addresses, enabling or disabling packet 98 * reception filters, or other custom/proprietary commands. Presently, 99 * the driver supports the "get statistics", "set multicast list", and 100 * "set mac address" functions defined by Linux. A list of possible 101 * enhancements include: 102 * 103 * - Custom ioctl interface for executing port interface commands 104 * - Custom ioctl interface for adding unicast addresses to 105 * adapter CAM (to support bridge functions). 106 * - Custom ioctl interface for supporting firmware upgrades. 107 * 108 * Hardware (port interface) Support Routines - 109 * The driver function names that start with "dfx_hw_" represent 110 * low-level port interface routines that are called frequently. They 111 * include issuing a DMA or port control command to the adapter, 112 * resetting the adapter, or reading the adapter state. Since the 113 * driver initialization and run-time code must make calls into the 114 * port interface, these routines were written to be as generic and 115 * usable as possible. 116 * 117 * Receive Path - 118 * The adapter DMA engine supports a 256 entry receive descriptor block 119 * of which up to 255 entries can be used at any given time. The 120 * architecture is a standard producer, consumer, completion model in 121 * which the driver "produces" receive buffers to the adapter, the 122 * adapter "consumes" the receive buffers by DMAing incoming packet data, 123 * and the driver "completes" the receive buffers by servicing the 124 * incoming packet, then "produces" a new buffer and starts the cycle 125 * again. Receive buffers can be fragmented in up to 16 fragments 126 * (descriptor entries). For simplicity, this driver posts 127 * single-fragment receive buffers of 4608 bytes, then allocates a 128 * sk_buff, copies the data, then reposts the buffer. To reduce CPU 129 * utilization, a better approach would be to pass up the receive 130 * buffer (no extra copy) then allocate and post a replacement buffer. 131 * This is a performance enhancement that should be looked into at 132 * some point. 133 * 134 * Transmit Path - 135 * Like the receive path, the adapter DMA engine supports a 256 entry 136 * transmit descriptor block of which up to 255 entries can be used at 137 * any given time. Transmit buffers can be fragmented in up to 255 138 * fragments (descriptor entries). This driver always posts one 139 * fragment per transmit packet request. 140 * 141 * The fragment contains the entire packet from FC to end of data. 142 * Before posting the buffer to the adapter, the driver sets a three-byte 143 * packet request header (PRH) which is required by the Motorola MAC chip 144 * used on the adapters. The PRH tells the MAC the type of token to 145 * receive/send, whether or not to generate and append the CRC, whether 146 * synchronous or asynchronous framing is used, etc. Since the PRH 147 * definition is not necessarily consistent across all FDDI chipsets, 148 * the driver, rather than the common FDDI packet handler routines, 149 * sets these bytes. 150 * 151 * To reduce the amount of descriptor fetches needed per transmit request, 152 * the driver takes advantage of the fact that there are at least three 153 * bytes available before the skb->data field on the outgoing transmit 154 * request. This is guaranteed by having fddi_setup() in net_init.c set 155 * dev->hard_header_len to 24 bytes. 21 bytes accounts for the largest 156 * header in an 802.2 SNAP frame. The other 3 bytes are the extra "pad" 157 * bytes which we'll use to store the PRH. 158 * 159 * There's a subtle advantage to adding these pad bytes to the 160 * hard_header_len, it ensures that the data portion of the packet for 161 * an 802.2 SNAP frame is longword aligned. Other FDDI driver 162 * implementations may not need the extra padding and can start copying 163 * or DMAing directly from the FC byte which starts at skb->data. Should 164 * another driver implementation need ADDITIONAL padding, the net_init.c 165 * module should be updated and dev->hard_header_len should be increased. 166 * NOTE: To maintain the alignment on the data portion of the packet, 167 * dev->hard_header_len should always be evenly divisible by 4 and at 168 * least 24 bytes in size. 169 * 170 * Modification History: 171 * Date Name Description 172 * 16-Aug-96 LVS Created. 173 * 20-Aug-96 LVS Updated dfx_probe so that version information 174 * string is only displayed if 1 or more cards are 175 * found. Changed dfx_rcv_queue_process to copy 176 * 3 NULL bytes before FC to ensure that data is 177 * longword aligned in receive buffer. 178 * 09-Sep-96 LVS Updated dfx_ctl_set_multicast_list to enable 179 * LLC group promiscuous mode if multicast list 180 * is too large. LLC individual/group promiscuous 181 * mode is now disabled if IFF_PROMISC flag not set. 182 * dfx_xmt_queue_pkt no longer checks for NULL skb 183 * on Alan Cox recommendation. Added node address 184 * override support. 185 * 12-Sep-96 LVS Reset current address to factory address during 186 * device open. Updated transmit path to post a 187 * single fragment which includes PRH->end of data. 188 * Mar 2000 AC Did various cleanups for 2.3.x 189 * Jun 2000 jgarzik PCI and resource alloc cleanups 190 * Jul 2000 tjeerd Much cleanup and some bug fixes 191 * Sep 2000 tjeerd Fix leak on unload, cosmetic code cleanup 192 * Feb 2001 Skb allocation fixes 193 * Feb 2001 davej PCI enable cleanups. 194 * 04 Aug 2003 macro Converted to the DMA API. 195 * 14 Aug 2004 macro Fix device names reported. 196 * 14 Jun 2005 macro Use irqreturn_t. 197 * 23 Oct 2006 macro Big-endian host support. 198 * 14 Dec 2006 macro TURBOchannel support. 199 */ 200 201 /* Include files */ 202 #include <linux/bitops.h> 203 #include <linux/compiler.h> 204 #include <linux/delay.h> 205 #include <linux/dma-mapping.h> 206 #include <linux/eisa.h> 207 #include <linux/errno.h> 208 #include <linux/fddidevice.h> 209 #include <linux/init.h> 210 #include <linux/interrupt.h> 211 #include <linux/ioport.h> 212 #include <linux/kernel.h> 213 #include <linux/module.h> 214 #include <linux/netdevice.h> 215 #include <linux/pci.h> 216 #include <linux/skbuff.h> 217 #include <linux/slab.h> 218 #include <linux/string.h> 219 #include <linux/tc.h> 220 221 #include <asm/byteorder.h> 222 #include <asm/io.h> 223 224 #include "defxx.h" 225 226 /* Version information string should be updated prior to each new release! */ 227 #define DRV_NAME "defxx" 228 #define DRV_VERSION "v1.10" 229 #define DRV_RELDATE "2006/12/14" 230 231 static char version[] __devinitdata = 232 DRV_NAME ": " DRV_VERSION " " DRV_RELDATE 233 " Lawrence V. Stefani and others\n"; 234 235 #define DYNAMIC_BUFFERS 1 236 237 #define SKBUFF_RX_COPYBREAK 200 238 /* 239 * NEW_SKB_SIZE = PI_RCV_DATA_K_SIZE_MAX+128 to allow 128 byte 240 * alignment for compatibility with old EISA boards. 241 */ 242 #define NEW_SKB_SIZE (PI_RCV_DATA_K_SIZE_MAX+128) 243 244 #ifdef CONFIG_PCI 245 #define DFX_BUS_PCI(dev) (dev->bus == &pci_bus_type) 246 #else 247 #define DFX_BUS_PCI(dev) 0 248 #endif 249 250 #ifdef CONFIG_EISA 251 #define DFX_BUS_EISA(dev) (dev->bus == &eisa_bus_type) 252 #else 253 #define DFX_BUS_EISA(dev) 0 254 #endif 255 256 #ifdef CONFIG_TC 257 #define DFX_BUS_TC(dev) (dev->bus == &tc_bus_type) 258 #else 259 #define DFX_BUS_TC(dev) 0 260 #endif 261 262 #ifdef CONFIG_DEFXX_MMIO 263 #define DFX_MMIO 1 264 #else 265 #define DFX_MMIO 0 266 #endif 267 268 /* Define module-wide (static) routines */ 269 270 static void dfx_bus_init(struct net_device *dev); 271 static void dfx_bus_uninit(struct net_device *dev); 272 static void dfx_bus_config_check(DFX_board_t *bp); 273 274 static int dfx_driver_init(struct net_device *dev, 275 const char *print_name, 276 resource_size_t bar_start); 277 static int dfx_adap_init(DFX_board_t *bp, int get_buffers); 278 279 static int dfx_open(struct net_device *dev); 280 static int dfx_close(struct net_device *dev); 281 282 static void dfx_int_pr_halt_id(DFX_board_t *bp); 283 static void dfx_int_type_0_process(DFX_board_t *bp); 284 static void dfx_int_common(struct net_device *dev); 285 static irqreturn_t dfx_interrupt(int irq, void *dev_id); 286 287 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev); 288 static void dfx_ctl_set_multicast_list(struct net_device *dev); 289 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr); 290 static int dfx_ctl_update_cam(DFX_board_t *bp); 291 static int dfx_ctl_update_filters(DFX_board_t *bp); 292 293 static int dfx_hw_dma_cmd_req(DFX_board_t *bp); 294 static int dfx_hw_port_ctrl_req(DFX_board_t *bp, PI_UINT32 command, PI_UINT32 data_a, PI_UINT32 data_b, PI_UINT32 *host_data); 295 static void dfx_hw_adap_reset(DFX_board_t *bp, PI_UINT32 type); 296 static int dfx_hw_adap_state_rd(DFX_board_t *bp); 297 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type); 298 299 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 300 static void dfx_rcv_queue_process(DFX_board_t *bp); 301 static void dfx_rcv_flush(DFX_board_t *bp); 302 303 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 304 struct net_device *dev); 305 static int dfx_xmt_done(DFX_board_t *bp); 306 static void dfx_xmt_flush(DFX_board_t *bp); 307 308 /* Define module-wide (static) variables */ 309 310 static struct pci_driver dfx_pci_driver; 311 static struct eisa_driver dfx_eisa_driver; 312 static struct tc_driver dfx_tc_driver; 313 314 315 /* 316 * ======================= 317 * = dfx_port_write_long = 318 * = dfx_port_read_long = 319 * ======================= 320 * 321 * Overview: 322 * Routines for reading and writing values from/to adapter 323 * 324 * Returns: 325 * None 326 * 327 * Arguments: 328 * bp - pointer to board information 329 * offset - register offset from base I/O address 330 * data - for dfx_port_write_long, this is a value to write; 331 * for dfx_port_read_long, this is a pointer to store 332 * the read value 333 * 334 * Functional Description: 335 * These routines perform the correct operation to read or write 336 * the adapter register. 337 * 338 * EISA port block base addresses are based on the slot number in which the 339 * controller is installed. For example, if the EISA controller is installed 340 * in slot 4, the port block base address is 0x4000. If the controller is 341 * installed in slot 2, the port block base address is 0x2000, and so on. 342 * This port block can be used to access PDQ, ESIC, and DEFEA on-board 343 * registers using the register offsets defined in DEFXX.H. 344 * 345 * PCI port block base addresses are assigned by the PCI BIOS or system 346 * firmware. There is one 128 byte port block which can be accessed. It 347 * allows for I/O mapping of both PDQ and PFI registers using the register 348 * offsets defined in DEFXX.H. 349 * 350 * Return Codes: 351 * None 352 * 353 * Assumptions: 354 * bp->base is a valid base I/O address for this adapter. 355 * offset is a valid register offset for this adapter. 356 * 357 * Side Effects: 358 * Rather than produce macros for these functions, these routines 359 * are defined using "inline" to ensure that the compiler will 360 * generate inline code and not waste a procedure call and return. 361 * This provides all the benefits of macros, but with the 362 * advantage of strict data type checking. 363 */ 364 365 static inline void dfx_writel(DFX_board_t *bp, int offset, u32 data) 366 { 367 writel(data, bp->base.mem + offset); 368 mb(); 369 } 370 371 static inline void dfx_outl(DFX_board_t *bp, int offset, u32 data) 372 { 373 outl(data, bp->base.port + offset); 374 } 375 376 static void dfx_port_write_long(DFX_board_t *bp, int offset, u32 data) 377 { 378 struct device __maybe_unused *bdev = bp->bus_dev; 379 int dfx_bus_tc = DFX_BUS_TC(bdev); 380 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 381 382 if (dfx_use_mmio) 383 dfx_writel(bp, offset, data); 384 else 385 dfx_outl(bp, offset, data); 386 } 387 388 389 static inline void dfx_readl(DFX_board_t *bp, int offset, u32 *data) 390 { 391 mb(); 392 *data = readl(bp->base.mem + offset); 393 } 394 395 static inline void dfx_inl(DFX_board_t *bp, int offset, u32 *data) 396 { 397 *data = inl(bp->base.port + offset); 398 } 399 400 static void dfx_port_read_long(DFX_board_t *bp, int offset, u32 *data) 401 { 402 struct device __maybe_unused *bdev = bp->bus_dev; 403 int dfx_bus_tc = DFX_BUS_TC(bdev); 404 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 405 406 if (dfx_use_mmio) 407 dfx_readl(bp, offset, data); 408 else 409 dfx_inl(bp, offset, data); 410 } 411 412 413 /* 414 * ================ 415 * = dfx_get_bars = 416 * ================ 417 * 418 * Overview: 419 * Retrieves the address range used to access control and status 420 * registers. 421 * 422 * Returns: 423 * None 424 * 425 * Arguments: 426 * bdev - pointer to device information 427 * bar_start - pointer to store the start address 428 * bar_len - pointer to store the length of the area 429 * 430 * Assumptions: 431 * I am sure there are some. 432 * 433 * Side Effects: 434 * None 435 */ 436 static void dfx_get_bars(struct device *bdev, 437 resource_size_t *bar_start, resource_size_t *bar_len) 438 { 439 int dfx_bus_pci = DFX_BUS_PCI(bdev); 440 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 441 int dfx_bus_tc = DFX_BUS_TC(bdev); 442 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 443 444 if (dfx_bus_pci) { 445 int num = dfx_use_mmio ? 0 : 1; 446 447 *bar_start = pci_resource_start(to_pci_dev(bdev), num); 448 *bar_len = pci_resource_len(to_pci_dev(bdev), num); 449 } 450 if (dfx_bus_eisa) { 451 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 452 resource_size_t bar; 453 454 if (dfx_use_mmio) { 455 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_2); 456 bar <<= 8; 457 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_1); 458 bar <<= 8; 459 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_CMP_0); 460 bar <<= 16; 461 *bar_start = bar; 462 bar = inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_2); 463 bar <<= 8; 464 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_1); 465 bar <<= 8; 466 bar |= inb(base_addr + PI_ESIC_K_MEM_ADD_MASK_0); 467 bar <<= 16; 468 *bar_len = (bar | PI_MEM_ADD_MASK_M) + 1; 469 } else { 470 *bar_start = base_addr; 471 *bar_len = PI_ESIC_K_CSR_IO_LEN; 472 } 473 } 474 if (dfx_bus_tc) { 475 *bar_start = to_tc_dev(bdev)->resource.start + 476 PI_TC_K_CSR_OFFSET; 477 *bar_len = PI_TC_K_CSR_LEN; 478 } 479 } 480 481 static const struct net_device_ops dfx_netdev_ops = { 482 .ndo_open = dfx_open, 483 .ndo_stop = dfx_close, 484 .ndo_start_xmit = dfx_xmt_queue_pkt, 485 .ndo_get_stats = dfx_ctl_get_stats, 486 .ndo_set_rx_mode = dfx_ctl_set_multicast_list, 487 .ndo_set_mac_address = dfx_ctl_set_mac_address, 488 }; 489 490 /* 491 * ================ 492 * = dfx_register = 493 * ================ 494 * 495 * Overview: 496 * Initializes a supported FDDI controller 497 * 498 * Returns: 499 * Condition code 500 * 501 * Arguments: 502 * bdev - pointer to device information 503 * 504 * Functional Description: 505 * 506 * Return Codes: 507 * 0 - This device (fddi0, fddi1, etc) configured successfully 508 * -EBUSY - Failed to get resources, or dfx_driver_init failed. 509 * 510 * Assumptions: 511 * It compiles so it should work :-( (PCI cards do :-) 512 * 513 * Side Effects: 514 * Device structures for FDDI adapters (fddi0, fddi1, etc) are 515 * initialized and the board resources are read and stored in 516 * the device structure. 517 */ 518 static int __devinit dfx_register(struct device *bdev) 519 { 520 static int version_disp; 521 int dfx_bus_pci = DFX_BUS_PCI(bdev); 522 int dfx_bus_tc = DFX_BUS_TC(bdev); 523 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 524 const char *print_name = dev_name(bdev); 525 struct net_device *dev; 526 DFX_board_t *bp; /* board pointer */ 527 resource_size_t bar_start = 0; /* pointer to port */ 528 resource_size_t bar_len = 0; /* resource length */ 529 int alloc_size; /* total buffer size used */ 530 struct resource *region; 531 int err = 0; 532 533 if (!version_disp) { /* display version info if adapter is found */ 534 version_disp = 1; /* set display flag to TRUE so that */ 535 printk(version); /* we only display this string ONCE */ 536 } 537 538 dev = alloc_fddidev(sizeof(*bp)); 539 if (!dev) { 540 printk(KERN_ERR "%s: Unable to allocate fddidev, aborting\n", 541 print_name); 542 return -ENOMEM; 543 } 544 545 /* Enable PCI device. */ 546 if (dfx_bus_pci && pci_enable_device(to_pci_dev(bdev))) { 547 printk(KERN_ERR "%s: Cannot enable PCI device, aborting\n", 548 print_name); 549 goto err_out; 550 } 551 552 SET_NETDEV_DEV(dev, bdev); 553 554 bp = netdev_priv(dev); 555 bp->bus_dev = bdev; 556 dev_set_drvdata(bdev, dev); 557 558 dfx_get_bars(bdev, &bar_start, &bar_len); 559 560 if (dfx_use_mmio) 561 region = request_mem_region(bar_start, bar_len, print_name); 562 else 563 region = request_region(bar_start, bar_len, print_name); 564 if (!region) { 565 printk(KERN_ERR "%s: Cannot reserve I/O resource " 566 "0x%lx @ 0x%lx, aborting\n", 567 print_name, (long)bar_len, (long)bar_start); 568 err = -EBUSY; 569 goto err_out_disable; 570 } 571 572 /* Set up I/O base address. */ 573 if (dfx_use_mmio) { 574 bp->base.mem = ioremap_nocache(bar_start, bar_len); 575 if (!bp->base.mem) { 576 printk(KERN_ERR "%s: Cannot map MMIO\n", print_name); 577 err = -ENOMEM; 578 goto err_out_region; 579 } 580 } else { 581 bp->base.port = bar_start; 582 dev->base_addr = bar_start; 583 } 584 585 /* Initialize new device structure */ 586 dev->netdev_ops = &dfx_netdev_ops; 587 588 if (dfx_bus_pci) 589 pci_set_master(to_pci_dev(bdev)); 590 591 if (dfx_driver_init(dev, print_name, bar_start) != DFX_K_SUCCESS) { 592 err = -ENODEV; 593 goto err_out_unmap; 594 } 595 596 err = register_netdev(dev); 597 if (err) 598 goto err_out_kfree; 599 600 printk("%s: registered as %s\n", print_name, dev->name); 601 return 0; 602 603 err_out_kfree: 604 alloc_size = sizeof(PI_DESCR_BLOCK) + 605 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + 606 #ifndef DYNAMIC_BUFFERS 607 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + 608 #endif 609 sizeof(PI_CONSUMER_BLOCK) + 610 (PI_ALIGN_K_DESC_BLK - 1); 611 if (bp->kmalloced) 612 dma_free_coherent(bdev, alloc_size, 613 bp->kmalloced, bp->kmalloced_dma); 614 615 err_out_unmap: 616 if (dfx_use_mmio) 617 iounmap(bp->base.mem); 618 619 err_out_region: 620 if (dfx_use_mmio) 621 release_mem_region(bar_start, bar_len); 622 else 623 release_region(bar_start, bar_len); 624 625 err_out_disable: 626 if (dfx_bus_pci) 627 pci_disable_device(to_pci_dev(bdev)); 628 629 err_out: 630 free_netdev(dev); 631 return err; 632 } 633 634 635 /* 636 * ================ 637 * = dfx_bus_init = 638 * ================ 639 * 640 * Overview: 641 * Initializes the bus-specific controller logic. 642 * 643 * Returns: 644 * None 645 * 646 * Arguments: 647 * dev - pointer to device information 648 * 649 * Functional Description: 650 * Determine and save adapter IRQ in device table, 651 * then perform bus-specific logic initialization. 652 * 653 * Return Codes: 654 * None 655 * 656 * Assumptions: 657 * bp->base has already been set with the proper 658 * base I/O address for this device. 659 * 660 * Side Effects: 661 * Interrupts are enabled at the adapter bus-specific logic. 662 * Note: Interrupts at the DMA engine (PDQ chip) are not 663 * enabled yet. 664 */ 665 666 static void __devinit dfx_bus_init(struct net_device *dev) 667 { 668 DFX_board_t *bp = netdev_priv(dev); 669 struct device *bdev = bp->bus_dev; 670 int dfx_bus_pci = DFX_BUS_PCI(bdev); 671 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 672 int dfx_bus_tc = DFX_BUS_TC(bdev); 673 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 674 u8 val; 675 676 DBG_printk("In dfx_bus_init...\n"); 677 678 /* Initialize a pointer back to the net_device struct */ 679 bp->dev = dev; 680 681 /* Initialize adapter based on bus type */ 682 683 if (dfx_bus_tc) 684 dev->irq = to_tc_dev(bdev)->interrupt; 685 if (dfx_bus_eisa) { 686 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 687 688 /* Get the interrupt level from the ESIC chip. */ 689 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 690 val &= PI_CONFIG_STAT_0_M_IRQ; 691 val >>= PI_CONFIG_STAT_0_V_IRQ; 692 693 switch (val) { 694 case PI_CONFIG_STAT_0_IRQ_K_9: 695 dev->irq = 9; 696 break; 697 698 case PI_CONFIG_STAT_0_IRQ_K_10: 699 dev->irq = 10; 700 break; 701 702 case PI_CONFIG_STAT_0_IRQ_K_11: 703 dev->irq = 11; 704 break; 705 706 case PI_CONFIG_STAT_0_IRQ_K_15: 707 dev->irq = 15; 708 break; 709 } 710 711 /* 712 * Enable memory decoding (MEMCS0) and/or port decoding 713 * (IOCS1/IOCS0) as appropriate in Function Control 714 * Register. One of the port chip selects seems to be 715 * used for the Burst Holdoff register, but this bit of 716 * documentation is missing and as yet it has not been 717 * determined which of the two. This is also the reason 718 * the size of the decoded port range is twice as large 719 * as one required by the PDQ. 720 */ 721 722 /* Set the decode range of the board. */ 723 val = ((bp->base.port >> 12) << PI_IO_CMP_V_SLOT); 724 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_1, val); 725 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_0_0, 0); 726 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_1, val); 727 outb(base_addr + PI_ESIC_K_IO_ADD_CMP_1_0, 0); 728 val = PI_ESIC_K_CSR_IO_LEN - 1; 729 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_1, (val >> 8) & 0xff); 730 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_0_0, val & 0xff); 731 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_1, (val >> 8) & 0xff); 732 outb(base_addr + PI_ESIC_K_IO_ADD_MASK_1_0, val & 0xff); 733 734 /* Enable the decoders. */ 735 val = PI_FUNCTION_CNTRL_M_IOCS1 | PI_FUNCTION_CNTRL_M_IOCS0; 736 if (dfx_use_mmio) 737 val |= PI_FUNCTION_CNTRL_M_MEMCS0; 738 outb(base_addr + PI_ESIC_K_FUNCTION_CNTRL, val); 739 740 /* 741 * Enable access to the rest of the module 742 * (including PDQ and packet memory). 743 */ 744 val = PI_SLOT_CNTRL_M_ENB; 745 outb(base_addr + PI_ESIC_K_SLOT_CNTRL, val); 746 747 /* 748 * Map PDQ registers into memory or port space. This is 749 * done with a bit in the Burst Holdoff register. 750 */ 751 val = inb(base_addr + PI_DEFEA_K_BURST_HOLDOFF); 752 if (dfx_use_mmio) 753 val |= PI_BURST_HOLDOFF_V_MEM_MAP; 754 else 755 val &= ~PI_BURST_HOLDOFF_V_MEM_MAP; 756 outb(base_addr + PI_DEFEA_K_BURST_HOLDOFF, val); 757 758 /* Enable interrupts at EISA bus interface chip (ESIC) */ 759 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 760 val |= PI_CONFIG_STAT_0_M_INT_ENB; 761 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); 762 } 763 if (dfx_bus_pci) { 764 struct pci_dev *pdev = to_pci_dev(bdev); 765 766 /* Get the interrupt level from the PCI Configuration Table */ 767 768 dev->irq = pdev->irq; 769 770 /* Check Latency Timer and set if less than minimal */ 771 772 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &val); 773 if (val < PFI_K_LAT_TIMER_MIN) { 774 val = PFI_K_LAT_TIMER_DEF; 775 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, val); 776 } 777 778 /* Enable interrupts at PCI bus interface chip (PFI) */ 779 val = PFI_MODE_M_PDQ_INT_ENB | PFI_MODE_M_DMA_ENB; 780 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, val); 781 } 782 } 783 784 /* 785 * ================== 786 * = dfx_bus_uninit = 787 * ================== 788 * 789 * Overview: 790 * Uninitializes the bus-specific controller logic. 791 * 792 * Returns: 793 * None 794 * 795 * Arguments: 796 * dev - pointer to device information 797 * 798 * Functional Description: 799 * Perform bus-specific logic uninitialization. 800 * 801 * Return Codes: 802 * None 803 * 804 * Assumptions: 805 * bp->base has already been set with the proper 806 * base I/O address for this device. 807 * 808 * Side Effects: 809 * Interrupts are disabled at the adapter bus-specific logic. 810 */ 811 812 static void __devexit dfx_bus_uninit(struct net_device *dev) 813 { 814 DFX_board_t *bp = netdev_priv(dev); 815 struct device *bdev = bp->bus_dev; 816 int dfx_bus_pci = DFX_BUS_PCI(bdev); 817 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 818 u8 val; 819 820 DBG_printk("In dfx_bus_uninit...\n"); 821 822 /* Uninitialize adapter based on bus type */ 823 824 if (dfx_bus_eisa) { 825 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 826 827 /* Disable interrupts at EISA bus interface chip (ESIC) */ 828 val = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 829 val &= ~PI_CONFIG_STAT_0_M_INT_ENB; 830 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, val); 831 } 832 if (dfx_bus_pci) { 833 /* Disable interrupts at PCI bus interface chip (PFI) */ 834 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 0); 835 } 836 } 837 838 839 /* 840 * ======================== 841 * = dfx_bus_config_check = 842 * ======================== 843 * 844 * Overview: 845 * Checks the configuration (burst size, full-duplex, etc.) If any parameters 846 * are illegal, then this routine will set new defaults. 847 * 848 * Returns: 849 * None 850 * 851 * Arguments: 852 * bp - pointer to board information 853 * 854 * Functional Description: 855 * For Revision 1 FDDI EISA, Revision 2 or later FDDI EISA with rev E or later 856 * PDQ, and all FDDI PCI controllers, all values are legal. 857 * 858 * Return Codes: 859 * None 860 * 861 * Assumptions: 862 * dfx_adap_init has NOT been called yet so burst size and other items have 863 * not been set. 864 * 865 * Side Effects: 866 * None 867 */ 868 869 static void __devinit dfx_bus_config_check(DFX_board_t *bp) 870 { 871 struct device __maybe_unused *bdev = bp->bus_dev; 872 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 873 int status; /* return code from adapter port control call */ 874 u32 host_data; /* LW data returned from port control call */ 875 876 DBG_printk("In dfx_bus_config_check...\n"); 877 878 /* Configuration check only valid for EISA adapter */ 879 880 if (dfx_bus_eisa) { 881 /* 882 * First check if revision 2 EISA controller. Rev. 1 cards used 883 * PDQ revision B, so no workaround needed in this case. Rev. 3 884 * cards used PDQ revision E, so no workaround needed in this 885 * case, either. Only Rev. 2 cards used either Rev. D or E 886 * chips, so we must verify the chip revision on Rev. 2 cards. 887 */ 888 if (to_eisa_device(bdev)->id.driver_data == DEFEA_PROD_ID_2) { 889 /* 890 * Revision 2 FDDI EISA controller found, 891 * so let's check PDQ revision of adapter. 892 */ 893 status = dfx_hw_port_ctrl_req(bp, 894 PI_PCTRL_M_SUB_CMD, 895 PI_SUB_CMD_K_PDQ_REV_GET, 896 0, 897 &host_data); 898 if ((status != DFX_K_SUCCESS) || (host_data == 2)) 899 { 900 /* 901 * Either we couldn't determine the PDQ revision, or 902 * we determined that it is at revision D. In either case, 903 * we need to implement the workaround. 904 */ 905 906 /* Ensure that the burst size is set to 8 longwords or less */ 907 908 switch (bp->burst_size) 909 { 910 case PI_PDATA_B_DMA_BURST_SIZE_32: 911 case PI_PDATA_B_DMA_BURST_SIZE_16: 912 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_8; 913 break; 914 915 default: 916 break; 917 } 918 919 /* Ensure that full-duplex mode is not enabled */ 920 921 bp->full_duplex_enb = PI_SNMP_K_FALSE; 922 } 923 } 924 } 925 } 926 927 928 /* 929 * =================== 930 * = dfx_driver_init = 931 * =================== 932 * 933 * Overview: 934 * Initializes remaining adapter board structure information 935 * and makes sure adapter is in a safe state prior to dfx_open(). 936 * 937 * Returns: 938 * Condition code 939 * 940 * Arguments: 941 * dev - pointer to device information 942 * print_name - printable device name 943 * 944 * Functional Description: 945 * This function allocates additional resources such as the host memory 946 * blocks needed by the adapter (eg. descriptor and consumer blocks). 947 * Remaining bus initialization steps are also completed. The adapter 948 * is also reset so that it is in the DMA_UNAVAILABLE state. The OS 949 * must call dfx_open() to open the adapter and bring it on-line. 950 * 951 * Return Codes: 952 * DFX_K_SUCCESS - initialization succeeded 953 * DFX_K_FAILURE - initialization failed - could not allocate memory 954 * or read adapter MAC address 955 * 956 * Assumptions: 957 * Memory allocated from pci_alloc_consistent() call is physically 958 * contiguous, locked memory. 959 * 960 * Side Effects: 961 * Adapter is reset and should be in DMA_UNAVAILABLE state before 962 * returning from this routine. 963 */ 964 965 static int __devinit dfx_driver_init(struct net_device *dev, 966 const char *print_name, 967 resource_size_t bar_start) 968 { 969 DFX_board_t *bp = netdev_priv(dev); 970 struct device *bdev = bp->bus_dev; 971 int dfx_bus_pci = DFX_BUS_PCI(bdev); 972 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 973 int dfx_bus_tc = DFX_BUS_TC(bdev); 974 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 975 int alloc_size; /* total buffer size needed */ 976 char *top_v, *curr_v; /* virtual addrs into memory block */ 977 dma_addr_t top_p, curr_p; /* physical addrs into memory block */ 978 u32 data; /* host data register value */ 979 __le32 le32; 980 char *board_name = NULL; 981 982 DBG_printk("In dfx_driver_init...\n"); 983 984 /* Initialize bus-specific hardware registers */ 985 986 dfx_bus_init(dev); 987 988 /* 989 * Initialize default values for configurable parameters 990 * 991 * Note: All of these parameters are ones that a user may 992 * want to customize. It'd be nice to break these 993 * out into Space.c or someplace else that's more 994 * accessible/understandable than this file. 995 */ 996 997 bp->full_duplex_enb = PI_SNMP_K_FALSE; 998 bp->req_ttrt = 8 * 12500; /* 8ms in 80 nanosec units */ 999 bp->burst_size = PI_PDATA_B_DMA_BURST_SIZE_DEF; 1000 bp->rcv_bufs_to_post = RCV_BUFS_DEF; 1001 1002 /* 1003 * Ensure that HW configuration is OK 1004 * 1005 * Note: Depending on the hardware revision, we may need to modify 1006 * some of the configurable parameters to workaround hardware 1007 * limitations. We'll perform this configuration check AFTER 1008 * setting the parameters to their default values. 1009 */ 1010 1011 dfx_bus_config_check(bp); 1012 1013 /* Disable PDQ interrupts first */ 1014 1015 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1016 1017 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ 1018 1019 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); 1020 1021 /* Read the factory MAC address from the adapter then save it */ 1022 1023 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_LO, 0, 1024 &data) != DFX_K_SUCCESS) { 1025 printk("%s: Could not read adapter factory MAC address!\n", 1026 print_name); 1027 return DFX_K_FAILURE; 1028 } 1029 le32 = cpu_to_le32(data); 1030 memcpy(&bp->factory_mac_addr[0], &le32, sizeof(u32)); 1031 1032 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_MLA, PI_PDATA_A_MLA_K_HI, 0, 1033 &data) != DFX_K_SUCCESS) { 1034 printk("%s: Could not read adapter factory MAC address!\n", 1035 print_name); 1036 return DFX_K_FAILURE; 1037 } 1038 le32 = cpu_to_le32(data); 1039 memcpy(&bp->factory_mac_addr[4], &le32, sizeof(u16)); 1040 1041 /* 1042 * Set current address to factory address 1043 * 1044 * Note: Node address override support is handled through 1045 * dfx_ctl_set_mac_address. 1046 */ 1047 1048 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); 1049 if (dfx_bus_tc) 1050 board_name = "DEFTA"; 1051 if (dfx_bus_eisa) 1052 board_name = "DEFEA"; 1053 if (dfx_bus_pci) 1054 board_name = "DEFPA"; 1055 pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n", 1056 print_name, board_name, dfx_use_mmio ? "" : "I/O ", 1057 (long long)bar_start, dev->irq, dev->dev_addr); 1058 1059 /* 1060 * Get memory for descriptor block, consumer block, and other buffers 1061 * that need to be DMA read or written to by the adapter. 1062 */ 1063 1064 alloc_size = sizeof(PI_DESCR_BLOCK) + 1065 PI_CMD_REQ_K_SIZE_MAX + 1066 PI_CMD_RSP_K_SIZE_MAX + 1067 #ifndef DYNAMIC_BUFFERS 1068 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + 1069 #endif 1070 sizeof(PI_CONSUMER_BLOCK) + 1071 (PI_ALIGN_K_DESC_BLK - 1); 1072 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, 1073 &bp->kmalloced_dma, 1074 GFP_ATOMIC); 1075 if (top_v == NULL) { 1076 printk("%s: Could not allocate memory for host buffers " 1077 "and structures!\n", print_name); 1078 return DFX_K_FAILURE; 1079 } 1080 memset(top_v, 0, alloc_size); /* zero out memory before continuing */ 1081 top_p = bp->kmalloced_dma; /* get physical address of buffer */ 1082 1083 /* 1084 * To guarantee the 8K alignment required for the descriptor block, 8K - 1 1085 * plus the amount of memory needed was allocated. The physical address 1086 * is now 8K aligned. By carving up the memory in a specific order, 1087 * we'll guarantee the alignment requirements for all other structures. 1088 * 1089 * Note: If the assumptions change regarding the non-paged, non-cached, 1090 * physically contiguous nature of the memory block or the address 1091 * alignments, then we'll need to implement a different algorithm 1092 * for allocating the needed memory. 1093 */ 1094 1095 curr_p = ALIGN(top_p, PI_ALIGN_K_DESC_BLK); 1096 curr_v = top_v + (curr_p - top_p); 1097 1098 /* Reserve space for descriptor block */ 1099 1100 bp->descr_block_virt = (PI_DESCR_BLOCK *) curr_v; 1101 bp->descr_block_phys = curr_p; 1102 curr_v += sizeof(PI_DESCR_BLOCK); 1103 curr_p += sizeof(PI_DESCR_BLOCK); 1104 1105 /* Reserve space for command request buffer */ 1106 1107 bp->cmd_req_virt = (PI_DMA_CMD_REQ *) curr_v; 1108 bp->cmd_req_phys = curr_p; 1109 curr_v += PI_CMD_REQ_K_SIZE_MAX; 1110 curr_p += PI_CMD_REQ_K_SIZE_MAX; 1111 1112 /* Reserve space for command response buffer */ 1113 1114 bp->cmd_rsp_virt = (PI_DMA_CMD_RSP *) curr_v; 1115 bp->cmd_rsp_phys = curr_p; 1116 curr_v += PI_CMD_RSP_K_SIZE_MAX; 1117 curr_p += PI_CMD_RSP_K_SIZE_MAX; 1118 1119 /* Reserve space for the LLC host receive queue buffers */ 1120 1121 bp->rcv_block_virt = curr_v; 1122 bp->rcv_block_phys = curr_p; 1123 1124 #ifndef DYNAMIC_BUFFERS 1125 curr_v += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); 1126 curr_p += (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX); 1127 #endif 1128 1129 /* Reserve space for the consumer block */ 1130 1131 bp->cons_block_virt = (PI_CONSUMER_BLOCK *) curr_v; 1132 bp->cons_block_phys = curr_p; 1133 1134 /* Display virtual and physical addresses if debug driver */ 1135 1136 DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n", 1137 print_name, 1138 (long)bp->descr_block_virt, bp->descr_block_phys); 1139 DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n", 1140 print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys); 1141 DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n", 1142 print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys); 1143 DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n", 1144 print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys); 1145 DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n", 1146 print_name, (long)bp->cons_block_virt, bp->cons_block_phys); 1147 1148 return DFX_K_SUCCESS; 1149 } 1150 1151 1152 /* 1153 * ================= 1154 * = dfx_adap_init = 1155 * ================= 1156 * 1157 * Overview: 1158 * Brings the adapter to the link avail/link unavailable state. 1159 * 1160 * Returns: 1161 * Condition code 1162 * 1163 * Arguments: 1164 * bp - pointer to board information 1165 * get_buffers - non-zero if buffers to be allocated 1166 * 1167 * Functional Description: 1168 * Issues the low-level firmware/hardware calls necessary to bring 1169 * the adapter up, or to properly reset and restore adapter during 1170 * run-time. 1171 * 1172 * Return Codes: 1173 * DFX_K_SUCCESS - Adapter brought up successfully 1174 * DFX_K_FAILURE - Adapter initialization failed 1175 * 1176 * Assumptions: 1177 * bp->reset_type should be set to a valid reset type value before 1178 * calling this routine. 1179 * 1180 * Side Effects: 1181 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state 1182 * upon a successful return of this routine. 1183 */ 1184 1185 static int dfx_adap_init(DFX_board_t *bp, int get_buffers) 1186 { 1187 DBG_printk("In dfx_adap_init...\n"); 1188 1189 /* Disable PDQ interrupts first */ 1190 1191 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1192 1193 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ 1194 1195 if (dfx_hw_dma_uninit(bp, bp->reset_type) != DFX_K_SUCCESS) 1196 { 1197 printk("%s: Could not uninitialize/reset adapter!\n", bp->dev->name); 1198 return DFX_K_FAILURE; 1199 } 1200 1201 /* 1202 * When the PDQ is reset, some false Type 0 interrupts may be pending, 1203 * so we'll acknowledge all Type 0 interrupts now before continuing. 1204 */ 1205 1206 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, PI_HOST_INT_K_ACK_ALL_TYPE_0); 1207 1208 /* 1209 * Clear Type 1 and Type 2 registers before going to DMA_AVAILABLE state 1210 * 1211 * Note: We only need to clear host copies of these registers. The PDQ reset 1212 * takes care of the on-board register values. 1213 */ 1214 1215 bp->cmd_req_reg.lword = 0; 1216 bp->cmd_rsp_reg.lword = 0; 1217 bp->rcv_xmt_reg.lword = 0; 1218 1219 /* Clear consumer block before going to DMA_AVAILABLE state */ 1220 1221 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); 1222 1223 /* Initialize the DMA Burst Size */ 1224 1225 if (dfx_hw_port_ctrl_req(bp, 1226 PI_PCTRL_M_SUB_CMD, 1227 PI_SUB_CMD_K_BURST_SIZE_SET, 1228 bp->burst_size, 1229 NULL) != DFX_K_SUCCESS) 1230 { 1231 printk("%s: Could not set adapter burst size!\n", bp->dev->name); 1232 return DFX_K_FAILURE; 1233 } 1234 1235 /* 1236 * Set base address of Consumer Block 1237 * 1238 * Assumption: 32-bit physical address of consumer block is 64 byte 1239 * aligned. That is, bits 0-5 of the address must be zero. 1240 */ 1241 1242 if (dfx_hw_port_ctrl_req(bp, 1243 PI_PCTRL_M_CONS_BLOCK, 1244 bp->cons_block_phys, 1245 0, 1246 NULL) != DFX_K_SUCCESS) 1247 { 1248 printk("%s: Could not set consumer block address!\n", bp->dev->name); 1249 return DFX_K_FAILURE; 1250 } 1251 1252 /* 1253 * Set the base address of Descriptor Block and bring adapter 1254 * to DMA_AVAILABLE state. 1255 * 1256 * Note: We also set the literal and data swapping requirements 1257 * in this command. 1258 * 1259 * Assumption: 32-bit physical address of descriptor block 1260 * is 8Kbyte aligned. 1261 */ 1262 if (dfx_hw_port_ctrl_req(bp, PI_PCTRL_M_INIT, 1263 (u32)(bp->descr_block_phys | 1264 PI_PDATA_A_INIT_M_BSWAP_INIT), 1265 0, NULL) != DFX_K_SUCCESS) { 1266 printk("%s: Could not set descriptor block address!\n", 1267 bp->dev->name); 1268 return DFX_K_FAILURE; 1269 } 1270 1271 /* Set transmit flush timeout value */ 1272 1273 bp->cmd_req_virt->cmd_type = PI_CMD_K_CHARS_SET; 1274 bp->cmd_req_virt->char_set.item[0].item_code = PI_ITEM_K_FLUSH_TIME; 1275 bp->cmd_req_virt->char_set.item[0].value = 3; /* 3 seconds */ 1276 bp->cmd_req_virt->char_set.item[0].item_index = 0; 1277 bp->cmd_req_virt->char_set.item[1].item_code = PI_ITEM_K_EOL; 1278 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1279 { 1280 printk("%s: DMA command request failed!\n", bp->dev->name); 1281 return DFX_K_FAILURE; 1282 } 1283 1284 /* Set the initial values for eFDXEnable and MACTReq MIB objects */ 1285 1286 bp->cmd_req_virt->cmd_type = PI_CMD_K_SNMP_SET; 1287 bp->cmd_req_virt->snmp_set.item[0].item_code = PI_ITEM_K_FDX_ENB_DIS; 1288 bp->cmd_req_virt->snmp_set.item[0].value = bp->full_duplex_enb; 1289 bp->cmd_req_virt->snmp_set.item[0].item_index = 0; 1290 bp->cmd_req_virt->snmp_set.item[1].item_code = PI_ITEM_K_MAC_T_REQ; 1291 bp->cmd_req_virt->snmp_set.item[1].value = bp->req_ttrt; 1292 bp->cmd_req_virt->snmp_set.item[1].item_index = 0; 1293 bp->cmd_req_virt->snmp_set.item[2].item_code = PI_ITEM_K_EOL; 1294 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1295 { 1296 printk("%s: DMA command request failed!\n", bp->dev->name); 1297 return DFX_K_FAILURE; 1298 } 1299 1300 /* Initialize adapter CAM */ 1301 1302 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 1303 { 1304 printk("%s: Adapter CAM update failed!\n", bp->dev->name); 1305 return DFX_K_FAILURE; 1306 } 1307 1308 /* Initialize adapter filters */ 1309 1310 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 1311 { 1312 printk("%s: Adapter filters update failed!\n", bp->dev->name); 1313 return DFX_K_FAILURE; 1314 } 1315 1316 /* 1317 * Remove any existing dynamic buffers (i.e. if the adapter is being 1318 * reinitialized) 1319 */ 1320 1321 if (get_buffers) 1322 dfx_rcv_flush(bp); 1323 1324 /* Initialize receive descriptor block and produce buffers */ 1325 1326 if (dfx_rcv_init(bp, get_buffers)) 1327 { 1328 printk("%s: Receive buffer allocation failed\n", bp->dev->name); 1329 if (get_buffers) 1330 dfx_rcv_flush(bp); 1331 return DFX_K_FAILURE; 1332 } 1333 1334 /* Issue START command and bring adapter to LINK_(UN)AVAILABLE state */ 1335 1336 bp->cmd_req_virt->cmd_type = PI_CMD_K_START; 1337 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 1338 { 1339 printk("%s: Start command failed\n", bp->dev->name); 1340 if (get_buffers) 1341 dfx_rcv_flush(bp); 1342 return DFX_K_FAILURE; 1343 } 1344 1345 /* Initialization succeeded, reenable PDQ interrupts */ 1346 1347 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_ENABLE_DEF_INTS); 1348 return DFX_K_SUCCESS; 1349 } 1350 1351 1352 /* 1353 * ============ 1354 * = dfx_open = 1355 * ============ 1356 * 1357 * Overview: 1358 * Opens the adapter 1359 * 1360 * Returns: 1361 * Condition code 1362 * 1363 * Arguments: 1364 * dev - pointer to device information 1365 * 1366 * Functional Description: 1367 * This function brings the adapter to an operational state. 1368 * 1369 * Return Codes: 1370 * 0 - Adapter was successfully opened 1371 * -EAGAIN - Could not register IRQ or adapter initialization failed 1372 * 1373 * Assumptions: 1374 * This routine should only be called for a device that was 1375 * initialized successfully. 1376 * 1377 * Side Effects: 1378 * Adapter should be in LINK_AVAILABLE or LINK_UNAVAILABLE state 1379 * if the open is successful. 1380 */ 1381 1382 static int dfx_open(struct net_device *dev) 1383 { 1384 DFX_board_t *bp = netdev_priv(dev); 1385 int ret; 1386 1387 DBG_printk("In dfx_open...\n"); 1388 1389 /* Register IRQ - support shared interrupts by passing device ptr */ 1390 1391 ret = request_irq(dev->irq, dfx_interrupt, IRQF_SHARED, dev->name, 1392 dev); 1393 if (ret) { 1394 printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq); 1395 return ret; 1396 } 1397 1398 /* 1399 * Set current address to factory MAC address 1400 * 1401 * Note: We've already done this step in dfx_driver_init. 1402 * However, it's possible that a user has set a node 1403 * address override, then closed and reopened the 1404 * adapter. Unless we reset the device address field 1405 * now, we'll continue to use the existing modified 1406 * address. 1407 */ 1408 1409 memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN); 1410 1411 /* Clear local unicast/multicast address tables and counts */ 1412 1413 memset(bp->uc_table, 0, sizeof(bp->uc_table)); 1414 memset(bp->mc_table, 0, sizeof(bp->mc_table)); 1415 bp->uc_count = 0; 1416 bp->mc_count = 0; 1417 1418 /* Disable promiscuous filter settings */ 1419 1420 bp->ind_group_prom = PI_FSTATE_K_BLOCK; 1421 bp->group_prom = PI_FSTATE_K_BLOCK; 1422 1423 spin_lock_init(&bp->lock); 1424 1425 /* Reset and initialize adapter */ 1426 1427 bp->reset_type = PI_PDATA_A_RESET_M_SKIP_ST; /* skip self-test */ 1428 if (dfx_adap_init(bp, 1) != DFX_K_SUCCESS) 1429 { 1430 printk(KERN_ERR "%s: Adapter open failed!\n", dev->name); 1431 free_irq(dev->irq, dev); 1432 return -EAGAIN; 1433 } 1434 1435 /* Set device structure info */ 1436 netif_start_queue(dev); 1437 return 0; 1438 } 1439 1440 1441 /* 1442 * ============= 1443 * = dfx_close = 1444 * ============= 1445 * 1446 * Overview: 1447 * Closes the device/module. 1448 * 1449 * Returns: 1450 * Condition code 1451 * 1452 * Arguments: 1453 * dev - pointer to device information 1454 * 1455 * Functional Description: 1456 * This routine closes the adapter and brings it to a safe state. 1457 * The interrupt service routine is deregistered with the OS. 1458 * The adapter can be opened again with another call to dfx_open(). 1459 * 1460 * Return Codes: 1461 * Always return 0. 1462 * 1463 * Assumptions: 1464 * No further requests for this adapter are made after this routine is 1465 * called. dfx_open() can be called to reset and reinitialize the 1466 * adapter. 1467 * 1468 * Side Effects: 1469 * Adapter should be in DMA_UNAVAILABLE state upon completion of this 1470 * routine. 1471 */ 1472 1473 static int dfx_close(struct net_device *dev) 1474 { 1475 DFX_board_t *bp = netdev_priv(dev); 1476 1477 DBG_printk("In dfx_close...\n"); 1478 1479 /* Disable PDQ interrupts first */ 1480 1481 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1482 1483 /* Place adapter in DMA_UNAVAILABLE state by resetting adapter */ 1484 1485 (void) dfx_hw_dma_uninit(bp, PI_PDATA_A_RESET_M_SKIP_ST); 1486 1487 /* 1488 * Flush any pending transmit buffers 1489 * 1490 * Note: It's important that we flush the transmit buffers 1491 * BEFORE we clear our copy of the Type 2 register. 1492 * Otherwise, we'll have no idea how many buffers 1493 * we need to free. 1494 */ 1495 1496 dfx_xmt_flush(bp); 1497 1498 /* 1499 * Clear Type 1 and Type 2 registers after adapter reset 1500 * 1501 * Note: Even though we're closing the adapter, it's 1502 * possible that an interrupt will occur after 1503 * dfx_close is called. Without some assurance to 1504 * the contrary we want to make sure that we don't 1505 * process receive and transmit LLC frames and update 1506 * the Type 2 register with bad information. 1507 */ 1508 1509 bp->cmd_req_reg.lword = 0; 1510 bp->cmd_rsp_reg.lword = 0; 1511 bp->rcv_xmt_reg.lword = 0; 1512 1513 /* Clear consumer block for the same reason given above */ 1514 1515 memset(bp->cons_block_virt, 0, sizeof(PI_CONSUMER_BLOCK)); 1516 1517 /* Release all dynamically allocate skb in the receive ring. */ 1518 1519 dfx_rcv_flush(bp); 1520 1521 /* Clear device structure flags */ 1522 1523 netif_stop_queue(dev); 1524 1525 /* Deregister (free) IRQ */ 1526 1527 free_irq(dev->irq, dev); 1528 1529 return 0; 1530 } 1531 1532 1533 /* 1534 * ====================== 1535 * = dfx_int_pr_halt_id = 1536 * ====================== 1537 * 1538 * Overview: 1539 * Displays halt id's in string form. 1540 * 1541 * Returns: 1542 * None 1543 * 1544 * Arguments: 1545 * bp - pointer to board information 1546 * 1547 * Functional Description: 1548 * Determine current halt id and display appropriate string. 1549 * 1550 * Return Codes: 1551 * None 1552 * 1553 * Assumptions: 1554 * None 1555 * 1556 * Side Effects: 1557 * None 1558 */ 1559 1560 static void dfx_int_pr_halt_id(DFX_board_t *bp) 1561 { 1562 PI_UINT32 port_status; /* PDQ port status register value */ 1563 PI_UINT32 halt_id; /* PDQ port status halt ID */ 1564 1565 /* Read the latest port status */ 1566 1567 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 1568 1569 /* Display halt state transition information */ 1570 1571 halt_id = (port_status & PI_PSTATUS_M_HALT_ID) >> PI_PSTATUS_V_HALT_ID; 1572 switch (halt_id) 1573 { 1574 case PI_HALT_ID_K_SELFTEST_TIMEOUT: 1575 printk("%s: Halt ID: Selftest Timeout\n", bp->dev->name); 1576 break; 1577 1578 case PI_HALT_ID_K_PARITY_ERROR: 1579 printk("%s: Halt ID: Host Bus Parity Error\n", bp->dev->name); 1580 break; 1581 1582 case PI_HALT_ID_K_HOST_DIR_HALT: 1583 printk("%s: Halt ID: Host-Directed Halt\n", bp->dev->name); 1584 break; 1585 1586 case PI_HALT_ID_K_SW_FAULT: 1587 printk("%s: Halt ID: Adapter Software Fault\n", bp->dev->name); 1588 break; 1589 1590 case PI_HALT_ID_K_HW_FAULT: 1591 printk("%s: Halt ID: Adapter Hardware Fault\n", bp->dev->name); 1592 break; 1593 1594 case PI_HALT_ID_K_PC_TRACE: 1595 printk("%s: Halt ID: FDDI Network PC Trace Path Test\n", bp->dev->name); 1596 break; 1597 1598 case PI_HALT_ID_K_DMA_ERROR: 1599 printk("%s: Halt ID: Adapter DMA Error\n", bp->dev->name); 1600 break; 1601 1602 case PI_HALT_ID_K_IMAGE_CRC_ERROR: 1603 printk("%s: Halt ID: Firmware Image CRC Error\n", bp->dev->name); 1604 break; 1605 1606 case PI_HALT_ID_K_BUS_EXCEPTION: 1607 printk("%s: Halt ID: 68000 Bus Exception\n", bp->dev->name); 1608 break; 1609 1610 default: 1611 printk("%s: Halt ID: Unknown (code = %X)\n", bp->dev->name, halt_id); 1612 break; 1613 } 1614 } 1615 1616 1617 /* 1618 * ========================== 1619 * = dfx_int_type_0_process = 1620 * ========================== 1621 * 1622 * Overview: 1623 * Processes Type 0 interrupts. 1624 * 1625 * Returns: 1626 * None 1627 * 1628 * Arguments: 1629 * bp - pointer to board information 1630 * 1631 * Functional Description: 1632 * Processes all enabled Type 0 interrupts. If the reason for the interrupt 1633 * is a serious fault on the adapter, then an error message is displayed 1634 * and the adapter is reset. 1635 * 1636 * One tricky potential timing window is the rapid succession of "link avail" 1637 * "link unavail" state change interrupts. The acknowledgement of the Type 0 1638 * interrupt must be done before reading the state from the Port Status 1639 * register. This is true because a state change could occur after reading 1640 * the data, but before acknowledging the interrupt. If this state change 1641 * does happen, it would be lost because the driver is using the old state, 1642 * and it will never know about the new state because it subsequently 1643 * acknowledges the state change interrupt. 1644 * 1645 * INCORRECT CORRECT 1646 * read type 0 int reasons read type 0 int reasons 1647 * read adapter state ack type 0 interrupts 1648 * ack type 0 interrupts read adapter state 1649 * ... process interrupt ... ... process interrupt ... 1650 * 1651 * Return Codes: 1652 * None 1653 * 1654 * Assumptions: 1655 * None 1656 * 1657 * Side Effects: 1658 * An adapter reset may occur if the adapter has any Type 0 error interrupts 1659 * or if the port status indicates that the adapter is halted. The driver 1660 * is responsible for reinitializing the adapter with the current CAM 1661 * contents and adapter filter settings. 1662 */ 1663 1664 static void dfx_int_type_0_process(DFX_board_t *bp) 1665 1666 { 1667 PI_UINT32 type_0_status; /* Host Interrupt Type 0 register */ 1668 PI_UINT32 state; /* current adap state (from port status) */ 1669 1670 /* 1671 * Read host interrupt Type 0 register to determine which Type 0 1672 * interrupts are pending. Immediately write it back out to clear 1673 * those interrupts. 1674 */ 1675 1676 dfx_port_read_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, &type_0_status); 1677 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_0_STATUS, type_0_status); 1678 1679 /* Check for Type 0 error interrupts */ 1680 1681 if (type_0_status & (PI_TYPE_0_STAT_M_NXM | 1682 PI_TYPE_0_STAT_M_PM_PAR_ERR | 1683 PI_TYPE_0_STAT_M_BUS_PAR_ERR)) 1684 { 1685 /* Check for Non-Existent Memory error */ 1686 1687 if (type_0_status & PI_TYPE_0_STAT_M_NXM) 1688 printk("%s: Non-Existent Memory Access Error\n", bp->dev->name); 1689 1690 /* Check for Packet Memory Parity error */ 1691 1692 if (type_0_status & PI_TYPE_0_STAT_M_PM_PAR_ERR) 1693 printk("%s: Packet Memory Parity Error\n", bp->dev->name); 1694 1695 /* Check for Host Bus Parity error */ 1696 1697 if (type_0_status & PI_TYPE_0_STAT_M_BUS_PAR_ERR) 1698 printk("%s: Host Bus Parity Error\n", bp->dev->name); 1699 1700 /* Reset adapter and bring it back on-line */ 1701 1702 bp->link_available = PI_K_FALSE; /* link is no longer available */ 1703 bp->reset_type = 0; /* rerun on-board diagnostics */ 1704 printk("%s: Resetting adapter...\n", bp->dev->name); 1705 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) 1706 { 1707 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); 1708 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1709 return; 1710 } 1711 printk("%s: Adapter reset successful!\n", bp->dev->name); 1712 return; 1713 } 1714 1715 /* Check for transmit flush interrupt */ 1716 1717 if (type_0_status & PI_TYPE_0_STAT_M_XMT_FLUSH) 1718 { 1719 /* Flush any pending xmt's and acknowledge the flush interrupt */ 1720 1721 bp->link_available = PI_K_FALSE; /* link is no longer available */ 1722 dfx_xmt_flush(bp); /* flush any outstanding packets */ 1723 (void) dfx_hw_port_ctrl_req(bp, 1724 PI_PCTRL_M_XMT_DATA_FLUSH_DONE, 1725 0, 1726 0, 1727 NULL); 1728 } 1729 1730 /* Check for adapter state change */ 1731 1732 if (type_0_status & PI_TYPE_0_STAT_M_STATE_CHANGE) 1733 { 1734 /* Get latest adapter state */ 1735 1736 state = dfx_hw_adap_state_rd(bp); /* get adapter state */ 1737 if (state == PI_STATE_K_HALTED) 1738 { 1739 /* 1740 * Adapter has transitioned to HALTED state, try to reset 1741 * adapter to bring it back on-line. If reset fails, 1742 * leave the adapter in the broken state. 1743 */ 1744 1745 printk("%s: Controller has transitioned to HALTED state!\n", bp->dev->name); 1746 dfx_int_pr_halt_id(bp); /* display halt id as string */ 1747 1748 /* Reset adapter and bring it back on-line */ 1749 1750 bp->link_available = PI_K_FALSE; /* link is no longer available */ 1751 bp->reset_type = 0; /* rerun on-board diagnostics */ 1752 printk("%s: Resetting adapter...\n", bp->dev->name); 1753 if (dfx_adap_init(bp, 0) != DFX_K_SUCCESS) 1754 { 1755 printk("%s: Adapter reset failed! Disabling adapter interrupts.\n", bp->dev->name); 1756 dfx_port_write_long(bp, PI_PDQ_K_REG_HOST_INT_ENB, PI_HOST_INT_K_DISABLE_ALL_INTS); 1757 return; 1758 } 1759 printk("%s: Adapter reset successful!\n", bp->dev->name); 1760 } 1761 else if (state == PI_STATE_K_LINK_AVAIL) 1762 { 1763 bp->link_available = PI_K_TRUE; /* set link available flag */ 1764 } 1765 } 1766 } 1767 1768 1769 /* 1770 * ================== 1771 * = dfx_int_common = 1772 * ================== 1773 * 1774 * Overview: 1775 * Interrupt service routine (ISR) 1776 * 1777 * Returns: 1778 * None 1779 * 1780 * Arguments: 1781 * bp - pointer to board information 1782 * 1783 * Functional Description: 1784 * This is the ISR which processes incoming adapter interrupts. 1785 * 1786 * Return Codes: 1787 * None 1788 * 1789 * Assumptions: 1790 * This routine assumes PDQ interrupts have not been disabled. 1791 * When interrupts are disabled at the PDQ, the Port Status register 1792 * is automatically cleared. This routine uses the Port Status 1793 * register value to determine whether a Type 0 interrupt occurred, 1794 * so it's important that adapter interrupts are not normally 1795 * enabled/disabled at the PDQ. 1796 * 1797 * It's vital that this routine is NOT reentered for the 1798 * same board and that the OS is not in another section of 1799 * code (eg. dfx_xmt_queue_pkt) for the same board on a 1800 * different thread. 1801 * 1802 * Side Effects: 1803 * Pending interrupts are serviced. Depending on the type of 1804 * interrupt, acknowledging and clearing the interrupt at the 1805 * PDQ involves writing a register to clear the interrupt bit 1806 * or updating completion indices. 1807 */ 1808 1809 static void dfx_int_common(struct net_device *dev) 1810 { 1811 DFX_board_t *bp = netdev_priv(dev); 1812 PI_UINT32 port_status; /* Port Status register */ 1813 1814 /* Process xmt interrupts - frequent case, so always call this routine */ 1815 1816 if(dfx_xmt_done(bp)) /* free consumed xmt packets */ 1817 netif_wake_queue(dev); 1818 1819 /* Process rcv interrupts - frequent case, so always call this routine */ 1820 1821 dfx_rcv_queue_process(bp); /* service received LLC frames */ 1822 1823 /* 1824 * Transmit and receive producer and completion indices are updated on the 1825 * adapter by writing to the Type 2 Producer register. Since the frequent 1826 * case is that we'll be processing either LLC transmit or receive buffers, 1827 * we'll optimize I/O writes by doing a single register write here. 1828 */ 1829 1830 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); 1831 1832 /* Read PDQ Port Status register to find out which interrupts need processing */ 1833 1834 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 1835 1836 /* Process Type 0 interrupts (if any) - infrequent, so only call when needed */ 1837 1838 if (port_status & PI_PSTATUS_M_TYPE_0_PENDING) 1839 dfx_int_type_0_process(bp); /* process Type 0 interrupts */ 1840 } 1841 1842 1843 /* 1844 * ================= 1845 * = dfx_interrupt = 1846 * ================= 1847 * 1848 * Overview: 1849 * Interrupt processing routine 1850 * 1851 * Returns: 1852 * Whether a valid interrupt was seen. 1853 * 1854 * Arguments: 1855 * irq - interrupt vector 1856 * dev_id - pointer to device information 1857 * 1858 * Functional Description: 1859 * This routine calls the interrupt processing routine for this adapter. It 1860 * disables and reenables adapter interrupts, as appropriate. We can support 1861 * shared interrupts since the incoming dev_id pointer provides our device 1862 * structure context. 1863 * 1864 * Return Codes: 1865 * IRQ_HANDLED - an IRQ was handled. 1866 * IRQ_NONE - no IRQ was handled. 1867 * 1868 * Assumptions: 1869 * The interrupt acknowledgement at the hardware level (eg. ACKing the PIC 1870 * on Intel-based systems) is done by the operating system outside this 1871 * routine. 1872 * 1873 * System interrupts are enabled through this call. 1874 * 1875 * Side Effects: 1876 * Interrupts are disabled, then reenabled at the adapter. 1877 */ 1878 1879 static irqreturn_t dfx_interrupt(int irq, void *dev_id) 1880 { 1881 struct net_device *dev = dev_id; 1882 DFX_board_t *bp = netdev_priv(dev); 1883 struct device *bdev = bp->bus_dev; 1884 int dfx_bus_pci = DFX_BUS_PCI(bdev); 1885 int dfx_bus_eisa = DFX_BUS_EISA(bdev); 1886 int dfx_bus_tc = DFX_BUS_TC(bdev); 1887 1888 /* Service adapter interrupts */ 1889 1890 if (dfx_bus_pci) { 1891 u32 status; 1892 1893 dfx_port_read_long(bp, PFI_K_REG_STATUS, &status); 1894 if (!(status & PFI_STATUS_M_PDQ_INT)) 1895 return IRQ_NONE; 1896 1897 spin_lock(&bp->lock); 1898 1899 /* Disable PDQ-PFI interrupts at PFI */ 1900 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 1901 PFI_MODE_M_DMA_ENB); 1902 1903 /* Call interrupt service routine for this adapter */ 1904 dfx_int_common(dev); 1905 1906 /* Clear PDQ interrupt status bit and reenable interrupts */ 1907 dfx_port_write_long(bp, PFI_K_REG_STATUS, 1908 PFI_STATUS_M_PDQ_INT); 1909 dfx_port_write_long(bp, PFI_K_REG_MODE_CTRL, 1910 (PFI_MODE_M_PDQ_INT_ENB | 1911 PFI_MODE_M_DMA_ENB)); 1912 1913 spin_unlock(&bp->lock); 1914 } 1915 if (dfx_bus_eisa) { 1916 unsigned long base_addr = to_eisa_device(bdev)->base_addr; 1917 u8 status; 1918 1919 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 1920 if (!(status & PI_CONFIG_STAT_0_M_PEND)) 1921 return IRQ_NONE; 1922 1923 spin_lock(&bp->lock); 1924 1925 /* Disable interrupts at the ESIC */ 1926 status &= ~PI_CONFIG_STAT_0_M_INT_ENB; 1927 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); 1928 1929 /* Call interrupt service routine for this adapter */ 1930 dfx_int_common(dev); 1931 1932 /* Reenable interrupts at the ESIC */ 1933 status = inb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0); 1934 status |= PI_CONFIG_STAT_0_M_INT_ENB; 1935 outb(base_addr + PI_ESIC_K_IO_CONFIG_STAT_0, status); 1936 1937 spin_unlock(&bp->lock); 1938 } 1939 if (dfx_bus_tc) { 1940 u32 status; 1941 1942 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &status); 1943 if (!(status & (PI_PSTATUS_M_RCV_DATA_PENDING | 1944 PI_PSTATUS_M_XMT_DATA_PENDING | 1945 PI_PSTATUS_M_SMT_HOST_PENDING | 1946 PI_PSTATUS_M_UNSOL_PENDING | 1947 PI_PSTATUS_M_CMD_RSP_PENDING | 1948 PI_PSTATUS_M_CMD_REQ_PENDING | 1949 PI_PSTATUS_M_TYPE_0_PENDING))) 1950 return IRQ_NONE; 1951 1952 spin_lock(&bp->lock); 1953 1954 /* Call interrupt service routine for this adapter */ 1955 dfx_int_common(dev); 1956 1957 spin_unlock(&bp->lock); 1958 } 1959 1960 return IRQ_HANDLED; 1961 } 1962 1963 1964 /* 1965 * ===================== 1966 * = dfx_ctl_get_stats = 1967 * ===================== 1968 * 1969 * Overview: 1970 * Get statistics for FDDI adapter 1971 * 1972 * Returns: 1973 * Pointer to FDDI statistics structure 1974 * 1975 * Arguments: 1976 * dev - pointer to device information 1977 * 1978 * Functional Description: 1979 * Gets current MIB objects from adapter, then 1980 * returns FDDI statistics structure as defined 1981 * in if_fddi.h. 1982 * 1983 * Note: Since the FDDI statistics structure is 1984 * still new and the device structure doesn't 1985 * have an FDDI-specific get statistics handler, 1986 * we'll return the FDDI statistics structure as 1987 * a pointer to an Ethernet statistics structure. 1988 * That way, at least the first part of the statistics 1989 * structure can be decoded properly, and it allows 1990 * "smart" applications to perform a second cast to 1991 * decode the FDDI-specific statistics. 1992 * 1993 * We'll have to pay attention to this routine as the 1994 * device structure becomes more mature and LAN media 1995 * independent. 1996 * 1997 * Return Codes: 1998 * None 1999 * 2000 * Assumptions: 2001 * None 2002 * 2003 * Side Effects: 2004 * None 2005 */ 2006 2007 static struct net_device_stats *dfx_ctl_get_stats(struct net_device *dev) 2008 { 2009 DFX_board_t *bp = netdev_priv(dev); 2010 2011 /* Fill the bp->stats structure with driver-maintained counters */ 2012 2013 bp->stats.gen.rx_packets = bp->rcv_total_frames; 2014 bp->stats.gen.tx_packets = bp->xmt_total_frames; 2015 bp->stats.gen.rx_bytes = bp->rcv_total_bytes; 2016 bp->stats.gen.tx_bytes = bp->xmt_total_bytes; 2017 bp->stats.gen.rx_errors = bp->rcv_crc_errors + 2018 bp->rcv_frame_status_errors + 2019 bp->rcv_length_errors; 2020 bp->stats.gen.tx_errors = bp->xmt_length_errors; 2021 bp->stats.gen.rx_dropped = bp->rcv_discards; 2022 bp->stats.gen.tx_dropped = bp->xmt_discards; 2023 bp->stats.gen.multicast = bp->rcv_multicast_frames; 2024 bp->stats.gen.collisions = 0; /* always zero (0) for FDDI */ 2025 2026 /* Get FDDI SMT MIB objects */ 2027 2028 bp->cmd_req_virt->cmd_type = PI_CMD_K_SMT_MIB_GET; 2029 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2030 return (struct net_device_stats *)&bp->stats; 2031 2032 /* Fill the bp->stats structure with the SMT MIB object values */ 2033 2034 memcpy(bp->stats.smt_station_id, &bp->cmd_rsp_virt->smt_mib_get.smt_station_id, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_station_id)); 2035 bp->stats.smt_op_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_op_version_id; 2036 bp->stats.smt_hi_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_hi_version_id; 2037 bp->stats.smt_lo_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_lo_version_id; 2038 memcpy(bp->stats.smt_user_data, &bp->cmd_rsp_virt->smt_mib_get.smt_user_data, sizeof(bp->cmd_rsp_virt->smt_mib_get.smt_user_data)); 2039 bp->stats.smt_mib_version_id = bp->cmd_rsp_virt->smt_mib_get.smt_mib_version_id; 2040 bp->stats.smt_mac_cts = bp->cmd_rsp_virt->smt_mib_get.smt_mac_ct; 2041 bp->stats.smt_non_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_non_master_ct; 2042 bp->stats.smt_master_cts = bp->cmd_rsp_virt->smt_mib_get.smt_master_ct; 2043 bp->stats.smt_available_paths = bp->cmd_rsp_virt->smt_mib_get.smt_available_paths; 2044 bp->stats.smt_config_capabilities = bp->cmd_rsp_virt->smt_mib_get.smt_config_capabilities; 2045 bp->stats.smt_config_policy = bp->cmd_rsp_virt->smt_mib_get.smt_config_policy; 2046 bp->stats.smt_connection_policy = bp->cmd_rsp_virt->smt_mib_get.smt_connection_policy; 2047 bp->stats.smt_t_notify = bp->cmd_rsp_virt->smt_mib_get.smt_t_notify; 2048 bp->stats.smt_stat_rpt_policy = bp->cmd_rsp_virt->smt_mib_get.smt_stat_rpt_policy; 2049 bp->stats.smt_trace_max_expiration = bp->cmd_rsp_virt->smt_mib_get.smt_trace_max_expiration; 2050 bp->stats.smt_bypass_present = bp->cmd_rsp_virt->smt_mib_get.smt_bypass_present; 2051 bp->stats.smt_ecm_state = bp->cmd_rsp_virt->smt_mib_get.smt_ecm_state; 2052 bp->stats.smt_cf_state = bp->cmd_rsp_virt->smt_mib_get.smt_cf_state; 2053 bp->stats.smt_remote_disconnect_flag = bp->cmd_rsp_virt->smt_mib_get.smt_remote_disconnect_flag; 2054 bp->stats.smt_station_status = bp->cmd_rsp_virt->smt_mib_get.smt_station_status; 2055 bp->stats.smt_peer_wrap_flag = bp->cmd_rsp_virt->smt_mib_get.smt_peer_wrap_flag; 2056 bp->stats.smt_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_msg_time_stamp.ls; 2057 bp->stats.smt_transition_time_stamp = bp->cmd_rsp_virt->smt_mib_get.smt_transition_time_stamp.ls; 2058 bp->stats.mac_frame_status_functions = bp->cmd_rsp_virt->smt_mib_get.mac_frame_status_functions; 2059 bp->stats.mac_t_max_capability = bp->cmd_rsp_virt->smt_mib_get.mac_t_max_capability; 2060 bp->stats.mac_tvx_capability = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_capability; 2061 bp->stats.mac_available_paths = bp->cmd_rsp_virt->smt_mib_get.mac_available_paths; 2062 bp->stats.mac_current_path = bp->cmd_rsp_virt->smt_mib_get.mac_current_path; 2063 memcpy(bp->stats.mac_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_upstream_nbr, FDDI_K_ALEN); 2064 memcpy(bp->stats.mac_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_downstream_nbr, FDDI_K_ALEN); 2065 memcpy(bp->stats.mac_old_upstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_upstream_nbr, FDDI_K_ALEN); 2066 memcpy(bp->stats.mac_old_downstream_nbr, &bp->cmd_rsp_virt->smt_mib_get.mac_old_downstream_nbr, FDDI_K_ALEN); 2067 bp->stats.mac_dup_address_test = bp->cmd_rsp_virt->smt_mib_get.mac_dup_address_test; 2068 bp->stats.mac_requested_paths = bp->cmd_rsp_virt->smt_mib_get.mac_requested_paths; 2069 bp->stats.mac_downstream_port_type = bp->cmd_rsp_virt->smt_mib_get.mac_downstream_port_type; 2070 memcpy(bp->stats.mac_smt_address, &bp->cmd_rsp_virt->smt_mib_get.mac_smt_address, FDDI_K_ALEN); 2071 bp->stats.mac_t_req = bp->cmd_rsp_virt->smt_mib_get.mac_t_req; 2072 bp->stats.mac_t_neg = bp->cmd_rsp_virt->smt_mib_get.mac_t_neg; 2073 bp->stats.mac_t_max = bp->cmd_rsp_virt->smt_mib_get.mac_t_max; 2074 bp->stats.mac_tvx_value = bp->cmd_rsp_virt->smt_mib_get.mac_tvx_value; 2075 bp->stats.mac_frame_error_threshold = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_threshold; 2076 bp->stats.mac_frame_error_ratio = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_ratio; 2077 bp->stats.mac_rmt_state = bp->cmd_rsp_virt->smt_mib_get.mac_rmt_state; 2078 bp->stats.mac_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_da_flag; 2079 bp->stats.mac_una_da_flag = bp->cmd_rsp_virt->smt_mib_get.mac_unda_flag; 2080 bp->stats.mac_frame_error_flag = bp->cmd_rsp_virt->smt_mib_get.mac_frame_error_flag; 2081 bp->stats.mac_ma_unitdata_available = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_available; 2082 bp->stats.mac_hardware_present = bp->cmd_rsp_virt->smt_mib_get.mac_hardware_present; 2083 bp->stats.mac_ma_unitdata_enable = bp->cmd_rsp_virt->smt_mib_get.mac_ma_unitdata_enable; 2084 bp->stats.path_tvx_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_tvx_lower_bound; 2085 bp->stats.path_t_max_lower_bound = bp->cmd_rsp_virt->smt_mib_get.path_t_max_lower_bound; 2086 bp->stats.path_max_t_req = bp->cmd_rsp_virt->smt_mib_get.path_max_t_req; 2087 memcpy(bp->stats.path_configuration, &bp->cmd_rsp_virt->smt_mib_get.path_configuration, sizeof(bp->cmd_rsp_virt->smt_mib_get.path_configuration)); 2088 bp->stats.port_my_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[0]; 2089 bp->stats.port_my_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_my_type[1]; 2090 bp->stats.port_neighbor_type[0] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[0]; 2091 bp->stats.port_neighbor_type[1] = bp->cmd_rsp_virt->smt_mib_get.port_neighbor_type[1]; 2092 bp->stats.port_connection_policies[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[0]; 2093 bp->stats.port_connection_policies[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_policies[1]; 2094 bp->stats.port_mac_indicated[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[0]; 2095 bp->stats.port_mac_indicated[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_indicated[1]; 2096 bp->stats.port_current_path[0] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[0]; 2097 bp->stats.port_current_path[1] = bp->cmd_rsp_virt->smt_mib_get.port_current_path[1]; 2098 memcpy(&bp->stats.port_requested_paths[0*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[0], 3); 2099 memcpy(&bp->stats.port_requested_paths[1*3], &bp->cmd_rsp_virt->smt_mib_get.port_requested_paths[1], 3); 2100 bp->stats.port_mac_placement[0] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[0]; 2101 bp->stats.port_mac_placement[1] = bp->cmd_rsp_virt->smt_mib_get.port_mac_placement[1]; 2102 bp->stats.port_available_paths[0] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[0]; 2103 bp->stats.port_available_paths[1] = bp->cmd_rsp_virt->smt_mib_get.port_available_paths[1]; 2104 bp->stats.port_pmd_class[0] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[0]; 2105 bp->stats.port_pmd_class[1] = bp->cmd_rsp_virt->smt_mib_get.port_pmd_class[1]; 2106 bp->stats.port_connection_capabilities[0] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[0]; 2107 bp->stats.port_connection_capabilities[1] = bp->cmd_rsp_virt->smt_mib_get.port_connection_capabilities[1]; 2108 bp->stats.port_bs_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[0]; 2109 bp->stats.port_bs_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_bs_flag[1]; 2110 bp->stats.port_ler_estimate[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[0]; 2111 bp->stats.port_ler_estimate[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_estimate[1]; 2112 bp->stats.port_ler_cutoff[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[0]; 2113 bp->stats.port_ler_cutoff[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_cutoff[1]; 2114 bp->stats.port_ler_alarm[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[0]; 2115 bp->stats.port_ler_alarm[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_alarm[1]; 2116 bp->stats.port_connect_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[0]; 2117 bp->stats.port_connect_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_connect_state[1]; 2118 bp->stats.port_pcm_state[0] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[0]; 2119 bp->stats.port_pcm_state[1] = bp->cmd_rsp_virt->smt_mib_get.port_pcm_state[1]; 2120 bp->stats.port_pc_withhold[0] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[0]; 2121 bp->stats.port_pc_withhold[1] = bp->cmd_rsp_virt->smt_mib_get.port_pc_withhold[1]; 2122 bp->stats.port_ler_flag[0] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[0]; 2123 bp->stats.port_ler_flag[1] = bp->cmd_rsp_virt->smt_mib_get.port_ler_flag[1]; 2124 bp->stats.port_hardware_present[0] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[0]; 2125 bp->stats.port_hardware_present[1] = bp->cmd_rsp_virt->smt_mib_get.port_hardware_present[1]; 2126 2127 /* Get FDDI counters */ 2128 2129 bp->cmd_req_virt->cmd_type = PI_CMD_K_CNTRS_GET; 2130 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2131 return (struct net_device_stats *)&bp->stats; 2132 2133 /* Fill the bp->stats structure with the FDDI counter values */ 2134 2135 bp->stats.mac_frame_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.frame_cnt.ls; 2136 bp->stats.mac_copied_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.copied_cnt.ls; 2137 bp->stats.mac_transmit_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.transmit_cnt.ls; 2138 bp->stats.mac_error_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.error_cnt.ls; 2139 bp->stats.mac_lost_cts = bp->cmd_rsp_virt->cntrs_get.cntrs.lost_cnt.ls; 2140 bp->stats.port_lct_fail_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[0].ls; 2141 bp->stats.port_lct_fail_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lct_rejects[1].ls; 2142 bp->stats.port_lem_reject_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[0].ls; 2143 bp->stats.port_lem_reject_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.lem_rejects[1].ls; 2144 bp->stats.port_lem_cts[0] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[0].ls; 2145 bp->stats.port_lem_cts[1] = bp->cmd_rsp_virt->cntrs_get.cntrs.link_errors[1].ls; 2146 2147 return (struct net_device_stats *)&bp->stats; 2148 } 2149 2150 2151 /* 2152 * ============================== 2153 * = dfx_ctl_set_multicast_list = 2154 * ============================== 2155 * 2156 * Overview: 2157 * Enable/Disable LLC frame promiscuous mode reception 2158 * on the adapter and/or update multicast address table. 2159 * 2160 * Returns: 2161 * None 2162 * 2163 * Arguments: 2164 * dev - pointer to device information 2165 * 2166 * Functional Description: 2167 * This routine follows a fairly simple algorithm for setting the 2168 * adapter filters and CAM: 2169 * 2170 * if IFF_PROMISC flag is set 2171 * enable LLC individual/group promiscuous mode 2172 * else 2173 * disable LLC individual/group promiscuous mode 2174 * if number of incoming multicast addresses > 2175 * (CAM max size - number of unicast addresses in CAM) 2176 * enable LLC group promiscuous mode 2177 * set driver-maintained multicast address count to zero 2178 * else 2179 * disable LLC group promiscuous mode 2180 * set driver-maintained multicast address count to incoming count 2181 * update adapter CAM 2182 * update adapter filters 2183 * 2184 * Return Codes: 2185 * None 2186 * 2187 * Assumptions: 2188 * Multicast addresses are presented in canonical (LSB) format. 2189 * 2190 * Side Effects: 2191 * On-board adapter CAM and filters are updated. 2192 */ 2193 2194 static void dfx_ctl_set_multicast_list(struct net_device *dev) 2195 { 2196 DFX_board_t *bp = netdev_priv(dev); 2197 int i; /* used as index in for loop */ 2198 struct netdev_hw_addr *ha; 2199 2200 /* Enable LLC frame promiscuous mode, if necessary */ 2201 2202 if (dev->flags & IFF_PROMISC) 2203 bp->ind_group_prom = PI_FSTATE_K_PASS; /* Enable LLC ind/group prom mode */ 2204 2205 /* Else, update multicast address table */ 2206 2207 else 2208 { 2209 bp->ind_group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC ind/group prom mode */ 2210 /* 2211 * Check whether incoming multicast address count exceeds table size 2212 * 2213 * Note: The adapters utilize an on-board 64 entry CAM for 2214 * supporting perfect filtering of multicast packets 2215 * and bridge functions when adding unicast addresses. 2216 * There is no hash function available. To support 2217 * additional multicast addresses, the all multicast 2218 * filter (LLC group promiscuous mode) must be enabled. 2219 * 2220 * The firmware reserves two CAM entries for SMT-related 2221 * multicast addresses, which leaves 62 entries available. 2222 * The following code ensures that we're not being asked 2223 * to add more than 62 addresses to the CAM. If we are, 2224 * the driver will enable the all multicast filter. 2225 * Should the number of multicast addresses drop below 2226 * the high water mark, the filter will be disabled and 2227 * perfect filtering will be used. 2228 */ 2229 2230 if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count)) 2231 { 2232 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ 2233 bp->mc_count = 0; /* Don't add mc addrs to CAM */ 2234 } 2235 else 2236 { 2237 bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */ 2238 bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */ 2239 } 2240 2241 /* Copy addresses to multicast address table, then update adapter CAM */ 2242 2243 i = 0; 2244 netdev_for_each_mc_addr(ha, dev) 2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], 2246 ha->addr, FDDI_K_ALEN); 2247 2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2249 { 2250 DBG_printk("%s: Could not update multicast address table!\n", dev->name); 2251 } 2252 else 2253 { 2254 DBG_printk("%s: Multicast address table updated! Added %d addresses.\n", dev->name, bp->mc_count); 2255 } 2256 } 2257 2258 /* Update adapter filters */ 2259 2260 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 2261 { 2262 DBG_printk("%s: Could not update adapter filters!\n", dev->name); 2263 } 2264 else 2265 { 2266 DBG_printk("%s: Adapter filters updated!\n", dev->name); 2267 } 2268 } 2269 2270 2271 /* 2272 * =========================== 2273 * = dfx_ctl_set_mac_address = 2274 * =========================== 2275 * 2276 * Overview: 2277 * Add node address override (unicast address) to adapter 2278 * CAM and update dev_addr field in device table. 2279 * 2280 * Returns: 2281 * None 2282 * 2283 * Arguments: 2284 * dev - pointer to device information 2285 * addr - pointer to sockaddr structure containing unicast address to add 2286 * 2287 * Functional Description: 2288 * The adapter supports node address overrides by adding one or more 2289 * unicast addresses to the adapter CAM. This is similar to adding 2290 * multicast addresses. In this routine we'll update the driver and 2291 * device structures with the new address, then update the adapter CAM 2292 * to ensure that the adapter will copy and strip frames destined and 2293 * sourced by that address. 2294 * 2295 * Return Codes: 2296 * Always returns zero. 2297 * 2298 * Assumptions: 2299 * The address pointed to by addr->sa_data is a valid unicast 2300 * address and is presented in canonical (LSB) format. 2301 * 2302 * Side Effects: 2303 * On-board adapter CAM is updated. On-board adapter filters 2304 * may be updated. 2305 */ 2306 2307 static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr) 2308 { 2309 struct sockaddr *p_sockaddr = (struct sockaddr *)addr; 2310 DFX_board_t *bp = netdev_priv(dev); 2311 2312 /* Copy unicast address to driver-maintained structs and update count */ 2313 2314 memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */ 2315 memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */ 2316 bp->uc_count = 1; 2317 2318 /* 2319 * Verify we're not exceeding the CAM size by adding unicast address 2320 * 2321 * Note: It's possible that before entering this routine we've 2322 * already filled the CAM with 62 multicast addresses. 2323 * Since we need to place the node address override into 2324 * the CAM, we have to check to see that we're not 2325 * exceeding the CAM size. If we are, we have to enable 2326 * the LLC group (multicast) promiscuous mode filter as 2327 * in dfx_ctl_set_multicast_list. 2328 */ 2329 2330 if ((bp->uc_count + bp->mc_count) > PI_CMD_ADDR_FILTER_K_SIZE) 2331 { 2332 bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */ 2333 bp->mc_count = 0; /* Don't add mc addrs to CAM */ 2334 2335 /* Update adapter filters */ 2336 2337 if (dfx_ctl_update_filters(bp) != DFX_K_SUCCESS) 2338 { 2339 DBG_printk("%s: Could not update adapter filters!\n", dev->name); 2340 } 2341 else 2342 { 2343 DBG_printk("%s: Adapter filters updated!\n", dev->name); 2344 } 2345 } 2346 2347 /* Update adapter CAM with new unicast address */ 2348 2349 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2350 { 2351 DBG_printk("%s: Could not set new MAC address!\n", dev->name); 2352 } 2353 else 2354 { 2355 DBG_printk("%s: Adapter CAM updated with new MAC address\n", dev->name); 2356 } 2357 return 0; /* always return zero */ 2358 } 2359 2360 2361 /* 2362 * ====================== 2363 * = dfx_ctl_update_cam = 2364 * ====================== 2365 * 2366 * Overview: 2367 * Procedure to update adapter CAM (Content Addressable Memory) 2368 * with desired unicast and multicast address entries. 2369 * 2370 * Returns: 2371 * Condition code 2372 * 2373 * Arguments: 2374 * bp - pointer to board information 2375 * 2376 * Functional Description: 2377 * Updates adapter CAM with current contents of board structure 2378 * unicast and multicast address tables. Since there are only 62 2379 * free entries in CAM, this routine ensures that the command 2380 * request buffer is not overrun. 2381 * 2382 * Return Codes: 2383 * DFX_K_SUCCESS - Request succeeded 2384 * DFX_K_FAILURE - Request failed 2385 * 2386 * Assumptions: 2387 * All addresses being added (unicast and multicast) are in canonical 2388 * order. 2389 * 2390 * Side Effects: 2391 * On-board adapter CAM is updated. 2392 */ 2393 2394 static int dfx_ctl_update_cam(DFX_board_t *bp) 2395 { 2396 int i; /* used as index */ 2397 PI_LAN_ADDR *p_addr; /* pointer to CAM entry */ 2398 2399 /* 2400 * Fill in command request information 2401 * 2402 * Note: Even though both the unicast and multicast address 2403 * table entries are stored as contiguous 6 byte entries, 2404 * the firmware address filter set command expects each 2405 * entry to be two longwords (8 bytes total). We must be 2406 * careful to only copy the six bytes of each unicast and 2407 * multicast table entry into each command entry. This 2408 * is also why we must first clear the entire command 2409 * request buffer. 2410 */ 2411 2412 memset(bp->cmd_req_virt, 0, PI_CMD_REQ_K_SIZE_MAX); /* first clear buffer */ 2413 bp->cmd_req_virt->cmd_type = PI_CMD_K_ADDR_FILTER_SET; 2414 p_addr = &bp->cmd_req_virt->addr_filter_set.entry[0]; 2415 2416 /* Now add unicast addresses to command request buffer, if any */ 2417 2418 for (i=0; i < (int)bp->uc_count; i++) 2419 { 2420 if (i < PI_CMD_ADDR_FILTER_K_SIZE) 2421 { 2422 memcpy(p_addr, &bp->uc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); 2423 p_addr++; /* point to next command entry */ 2424 } 2425 } 2426 2427 /* Now add multicast addresses to command request buffer, if any */ 2428 2429 for (i=0; i < (int)bp->mc_count; i++) 2430 { 2431 if ((i + bp->uc_count) < PI_CMD_ADDR_FILTER_K_SIZE) 2432 { 2433 memcpy(p_addr, &bp->mc_table[i*FDDI_K_ALEN], FDDI_K_ALEN); 2434 p_addr++; /* point to next command entry */ 2435 } 2436 } 2437 2438 /* Issue command to update adapter CAM, then return */ 2439 2440 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2441 return DFX_K_FAILURE; 2442 return DFX_K_SUCCESS; 2443 } 2444 2445 2446 /* 2447 * ========================== 2448 * = dfx_ctl_update_filters = 2449 * ========================== 2450 * 2451 * Overview: 2452 * Procedure to update adapter filters with desired 2453 * filter settings. 2454 * 2455 * Returns: 2456 * Condition code 2457 * 2458 * Arguments: 2459 * bp - pointer to board information 2460 * 2461 * Functional Description: 2462 * Enables or disables filter using current filter settings. 2463 * 2464 * Return Codes: 2465 * DFX_K_SUCCESS - Request succeeded. 2466 * DFX_K_FAILURE - Request failed. 2467 * 2468 * Assumptions: 2469 * We must always pass up packets destined to the broadcast 2470 * address (FF-FF-FF-FF-FF-FF), so we'll always keep the 2471 * broadcast filter enabled. 2472 * 2473 * Side Effects: 2474 * On-board adapter filters are updated. 2475 */ 2476 2477 static int dfx_ctl_update_filters(DFX_board_t *bp) 2478 { 2479 int i = 0; /* used as index */ 2480 2481 /* Fill in command request information */ 2482 2483 bp->cmd_req_virt->cmd_type = PI_CMD_K_FILTERS_SET; 2484 2485 /* Initialize Broadcast filter - * ALWAYS ENABLED * */ 2486 2487 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_BROADCAST; 2488 bp->cmd_req_virt->filter_set.item[i++].value = PI_FSTATE_K_PASS; 2489 2490 /* Initialize LLC Individual/Group Promiscuous filter */ 2491 2492 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_IND_GROUP_PROM; 2493 bp->cmd_req_virt->filter_set.item[i++].value = bp->ind_group_prom; 2494 2495 /* Initialize LLC Group Promiscuous filter */ 2496 2497 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_GROUP_PROM; 2498 bp->cmd_req_virt->filter_set.item[i++].value = bp->group_prom; 2499 2500 /* Terminate the item code list */ 2501 2502 bp->cmd_req_virt->filter_set.item[i].item_code = PI_ITEM_K_EOL; 2503 2504 /* Issue command to update adapter filters, then return */ 2505 2506 if (dfx_hw_dma_cmd_req(bp) != DFX_K_SUCCESS) 2507 return DFX_K_FAILURE; 2508 return DFX_K_SUCCESS; 2509 } 2510 2511 2512 /* 2513 * ====================== 2514 * = dfx_hw_dma_cmd_req = 2515 * ====================== 2516 * 2517 * Overview: 2518 * Sends PDQ DMA command to adapter firmware 2519 * 2520 * Returns: 2521 * Condition code 2522 * 2523 * Arguments: 2524 * bp - pointer to board information 2525 * 2526 * Functional Description: 2527 * The command request and response buffers are posted to the adapter in the manner 2528 * described in the PDQ Port Specification: 2529 * 2530 * 1. Command Response Buffer is posted to adapter. 2531 * 2. Command Request Buffer is posted to adapter. 2532 * 3. Command Request consumer index is polled until it indicates that request 2533 * buffer has been DMA'd to adapter. 2534 * 4. Command Response consumer index is polled until it indicates that response 2535 * buffer has been DMA'd from adapter. 2536 * 2537 * This ordering ensures that a response buffer is already available for the firmware 2538 * to use once it's done processing the request buffer. 2539 * 2540 * Return Codes: 2541 * DFX_K_SUCCESS - DMA command succeeded 2542 * DFX_K_OUTSTATE - Adapter is NOT in proper state 2543 * DFX_K_HW_TIMEOUT - DMA command timed out 2544 * 2545 * Assumptions: 2546 * Command request buffer has already been filled with desired DMA command. 2547 * 2548 * Side Effects: 2549 * None 2550 */ 2551 2552 static int dfx_hw_dma_cmd_req(DFX_board_t *bp) 2553 { 2554 int status; /* adapter status */ 2555 int timeout_cnt; /* used in for loops */ 2556 2557 /* Make sure the adapter is in a state that we can issue the DMA command in */ 2558 2559 status = dfx_hw_adap_state_rd(bp); 2560 if ((status == PI_STATE_K_RESET) || 2561 (status == PI_STATE_K_HALTED) || 2562 (status == PI_STATE_K_DMA_UNAVAIL) || 2563 (status == PI_STATE_K_UPGRADE)) 2564 return DFX_K_OUTSTATE; 2565 2566 /* Put response buffer on the command response queue */ 2567 2568 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2569 ((PI_CMD_RSP_K_SIZE_MAX / PI_ALIGN_K_CMD_RSP_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2570 bp->descr_block_virt->cmd_rsp[bp->cmd_rsp_reg.index.prod].long_1 = bp->cmd_rsp_phys; 2571 2572 /* Bump (and wrap) the producer index and write out to register */ 2573 2574 bp->cmd_rsp_reg.index.prod += 1; 2575 bp->cmd_rsp_reg.index.prod &= PI_CMD_RSP_K_NUM_ENTRIES-1; 2576 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); 2577 2578 /* Put request buffer on the command request queue */ 2579 2580 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_0 = (u32) (PI_XMT_DESCR_M_SOP | 2581 PI_XMT_DESCR_M_EOP | (PI_CMD_REQ_K_SIZE_MAX << PI_XMT_DESCR_V_SEG_LEN)); 2582 bp->descr_block_virt->cmd_req[bp->cmd_req_reg.index.prod].long_1 = bp->cmd_req_phys; 2583 2584 /* Bump (and wrap) the producer index and write out to register */ 2585 2586 bp->cmd_req_reg.index.prod += 1; 2587 bp->cmd_req_reg.index.prod &= PI_CMD_REQ_K_NUM_ENTRIES-1; 2588 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); 2589 2590 /* 2591 * Here we wait for the command request consumer index to be equal 2592 * to the producer, indicating that the adapter has DMAed the request. 2593 */ 2594 2595 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--) 2596 { 2597 if (bp->cmd_req_reg.index.prod == (u8)(bp->cons_block_virt->cmd_req)) 2598 break; 2599 udelay(100); /* wait for 100 microseconds */ 2600 } 2601 if (timeout_cnt == 0) 2602 return DFX_K_HW_TIMEOUT; 2603 2604 /* Bump (and wrap) the completion index and write out to register */ 2605 2606 bp->cmd_req_reg.index.comp += 1; 2607 bp->cmd_req_reg.index.comp &= PI_CMD_REQ_K_NUM_ENTRIES-1; 2608 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_REQ_PROD, bp->cmd_req_reg.lword); 2609 2610 /* 2611 * Here we wait for the command response consumer index to be equal 2612 * to the producer, indicating that the adapter has DMAed the response. 2613 */ 2614 2615 for (timeout_cnt = 20000; timeout_cnt > 0; timeout_cnt--) 2616 { 2617 if (bp->cmd_rsp_reg.index.prod == (u8)(bp->cons_block_virt->cmd_rsp)) 2618 break; 2619 udelay(100); /* wait for 100 microseconds */ 2620 } 2621 if (timeout_cnt == 0) 2622 return DFX_K_HW_TIMEOUT; 2623 2624 /* Bump (and wrap) the completion index and write out to register */ 2625 2626 bp->cmd_rsp_reg.index.comp += 1; 2627 bp->cmd_rsp_reg.index.comp &= PI_CMD_RSP_K_NUM_ENTRIES-1; 2628 dfx_port_write_long(bp, PI_PDQ_K_REG_CMD_RSP_PROD, bp->cmd_rsp_reg.lword); 2629 return DFX_K_SUCCESS; 2630 } 2631 2632 2633 /* 2634 * ======================== 2635 * = dfx_hw_port_ctrl_req = 2636 * ======================== 2637 * 2638 * Overview: 2639 * Sends PDQ port control command to adapter firmware 2640 * 2641 * Returns: 2642 * Host data register value in host_data if ptr is not NULL 2643 * 2644 * Arguments: 2645 * bp - pointer to board information 2646 * command - port control command 2647 * data_a - port data A register value 2648 * data_b - port data B register value 2649 * host_data - ptr to host data register value 2650 * 2651 * Functional Description: 2652 * Send generic port control command to adapter by writing 2653 * to various PDQ port registers, then polling for completion. 2654 * 2655 * Return Codes: 2656 * DFX_K_SUCCESS - port control command succeeded 2657 * DFX_K_HW_TIMEOUT - port control command timed out 2658 * 2659 * Assumptions: 2660 * None 2661 * 2662 * Side Effects: 2663 * None 2664 */ 2665 2666 static int dfx_hw_port_ctrl_req( 2667 DFX_board_t *bp, 2668 PI_UINT32 command, 2669 PI_UINT32 data_a, 2670 PI_UINT32 data_b, 2671 PI_UINT32 *host_data 2672 ) 2673 2674 { 2675 PI_UINT32 port_cmd; /* Port Control command register value */ 2676 int timeout_cnt; /* used in for loops */ 2677 2678 /* Set Command Error bit in command longword */ 2679 2680 port_cmd = (PI_UINT32) (command | PI_PCTRL_M_CMD_ERROR); 2681 2682 /* Issue port command to the adapter */ 2683 2684 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, data_a); 2685 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_B, data_b); 2686 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_CTRL, port_cmd); 2687 2688 /* Now wait for command to complete */ 2689 2690 if (command == PI_PCTRL_M_BLAST_FLASH) 2691 timeout_cnt = 600000; /* set command timeout count to 60 seconds */ 2692 else 2693 timeout_cnt = 20000; /* set command timeout count to 2 seconds */ 2694 2695 for (; timeout_cnt > 0; timeout_cnt--) 2696 { 2697 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_CTRL, &port_cmd); 2698 if (!(port_cmd & PI_PCTRL_M_CMD_ERROR)) 2699 break; 2700 udelay(100); /* wait for 100 microseconds */ 2701 } 2702 if (timeout_cnt == 0) 2703 return DFX_K_HW_TIMEOUT; 2704 2705 /* 2706 * If the address of host_data is non-zero, assume caller has supplied a 2707 * non NULL pointer, and return the contents of the HOST_DATA register in 2708 * it. 2709 */ 2710 2711 if (host_data != NULL) 2712 dfx_port_read_long(bp, PI_PDQ_K_REG_HOST_DATA, host_data); 2713 return DFX_K_SUCCESS; 2714 } 2715 2716 2717 /* 2718 * ===================== 2719 * = dfx_hw_adap_reset = 2720 * ===================== 2721 * 2722 * Overview: 2723 * Resets adapter 2724 * 2725 * Returns: 2726 * None 2727 * 2728 * Arguments: 2729 * bp - pointer to board information 2730 * type - type of reset to perform 2731 * 2732 * Functional Description: 2733 * Issue soft reset to adapter by writing to PDQ Port Reset 2734 * register. Use incoming reset type to tell adapter what 2735 * kind of reset operation to perform. 2736 * 2737 * Return Codes: 2738 * None 2739 * 2740 * Assumptions: 2741 * This routine merely issues a soft reset to the adapter. 2742 * It is expected that after this routine returns, the caller 2743 * will appropriately poll the Port Status register for the 2744 * adapter to enter the proper state. 2745 * 2746 * Side Effects: 2747 * Internal adapter registers are cleared. 2748 */ 2749 2750 static void dfx_hw_adap_reset( 2751 DFX_board_t *bp, 2752 PI_UINT32 type 2753 ) 2754 2755 { 2756 /* Set Reset type and assert reset */ 2757 2758 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_DATA_A, type); /* tell adapter type of reset */ 2759 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, PI_RESET_M_ASSERT_RESET); 2760 2761 /* Wait for at least 1 Microsecond according to the spec. We wait 20 just to be safe */ 2762 2763 udelay(20); 2764 2765 /* Deassert reset */ 2766 2767 dfx_port_write_long(bp, PI_PDQ_K_REG_PORT_RESET, 0); 2768 } 2769 2770 2771 /* 2772 * ======================== 2773 * = dfx_hw_adap_state_rd = 2774 * ======================== 2775 * 2776 * Overview: 2777 * Returns current adapter state 2778 * 2779 * Returns: 2780 * Adapter state per PDQ Port Specification 2781 * 2782 * Arguments: 2783 * bp - pointer to board information 2784 * 2785 * Functional Description: 2786 * Reads PDQ Port Status register and returns adapter state. 2787 * 2788 * Return Codes: 2789 * None 2790 * 2791 * Assumptions: 2792 * None 2793 * 2794 * Side Effects: 2795 * None 2796 */ 2797 2798 static int dfx_hw_adap_state_rd(DFX_board_t *bp) 2799 { 2800 PI_UINT32 port_status; /* Port Status register value */ 2801 2802 dfx_port_read_long(bp, PI_PDQ_K_REG_PORT_STATUS, &port_status); 2803 return (port_status & PI_PSTATUS_M_STATE) >> PI_PSTATUS_V_STATE; 2804 } 2805 2806 2807 /* 2808 * ===================== 2809 * = dfx_hw_dma_uninit = 2810 * ===================== 2811 * 2812 * Overview: 2813 * Brings adapter to DMA_UNAVAILABLE state 2814 * 2815 * Returns: 2816 * Condition code 2817 * 2818 * Arguments: 2819 * bp - pointer to board information 2820 * type - type of reset to perform 2821 * 2822 * Functional Description: 2823 * Bring adapter to DMA_UNAVAILABLE state by performing the following: 2824 * 1. Set reset type bit in Port Data A Register then reset adapter. 2825 * 2. Check that adapter is in DMA_UNAVAILABLE state. 2826 * 2827 * Return Codes: 2828 * DFX_K_SUCCESS - adapter is in DMA_UNAVAILABLE state 2829 * DFX_K_HW_TIMEOUT - adapter did not reset properly 2830 * 2831 * Assumptions: 2832 * None 2833 * 2834 * Side Effects: 2835 * Internal adapter registers are cleared. 2836 */ 2837 2838 static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type) 2839 { 2840 int timeout_cnt; /* used in for loops */ 2841 2842 /* Set reset type bit and reset adapter */ 2843 2844 dfx_hw_adap_reset(bp, type); 2845 2846 /* Now wait for adapter to enter DMA_UNAVAILABLE state */ 2847 2848 for (timeout_cnt = 100000; timeout_cnt > 0; timeout_cnt--) 2849 { 2850 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_DMA_UNAVAIL) 2851 break; 2852 udelay(100); /* wait for 100 microseconds */ 2853 } 2854 if (timeout_cnt == 0) 2855 return DFX_K_HW_TIMEOUT; 2856 return DFX_K_SUCCESS; 2857 } 2858 2859 /* 2860 * Align an sk_buff to a boundary power of 2 2861 * 2862 */ 2863 2864 static void my_skb_align(struct sk_buff *skb, int n) 2865 { 2866 unsigned long x = (unsigned long)skb->data; 2867 unsigned long v; 2868 2869 v = ALIGN(x, n); /* Where we want to be */ 2870 2871 skb_reserve(skb, v - x); 2872 } 2873 2874 2875 /* 2876 * ================ 2877 * = dfx_rcv_init = 2878 * ================ 2879 * 2880 * Overview: 2881 * Produces buffers to adapter LLC Host receive descriptor block 2882 * 2883 * Returns: 2884 * None 2885 * 2886 * Arguments: 2887 * bp - pointer to board information 2888 * get_buffers - non-zero if buffers to be allocated 2889 * 2890 * Functional Description: 2891 * This routine can be called during dfx_adap_init() or during an adapter 2892 * reset. It initializes the descriptor block and produces all allocated 2893 * LLC Host queue receive buffers. 2894 * 2895 * Return Codes: 2896 * Return 0 on success or -ENOMEM if buffer allocation failed (when using 2897 * dynamic buffer allocation). If the buffer allocation failed, the 2898 * already allocated buffers will not be released and the caller should do 2899 * this. 2900 * 2901 * Assumptions: 2902 * The PDQ has been reset and the adapter and driver maintained Type 2 2903 * register indices are cleared. 2904 * 2905 * Side Effects: 2906 * Receive buffers are posted to the adapter LLC queue and the adapter 2907 * is notified. 2908 */ 2909 2910 static int dfx_rcv_init(DFX_board_t *bp, int get_buffers) 2911 { 2912 int i, j; /* used in for loop */ 2913 2914 /* 2915 * Since each receive buffer is a single fragment of same length, initialize 2916 * first longword in each receive descriptor for entire LLC Host descriptor 2917 * block. Also initialize second longword in each receive descriptor with 2918 * physical address of receive buffer. We'll always allocate receive 2919 * buffers in powers of 2 so that we can easily fill the 256 entry descriptor 2920 * block and produce new receive buffers by simply updating the receive 2921 * producer index. 2922 * 2923 * Assumptions: 2924 * To support all shipping versions of PDQ, the receive buffer size 2925 * must be mod 128 in length and the physical address must be 128 byte 2926 * aligned. In other words, bits 0-6 of the length and address must 2927 * be zero for the following descriptor field entries to be correct on 2928 * all PDQ-based boards. We guaranteed both requirements during 2929 * driver initialization when we allocated memory for the receive buffers. 2930 */ 2931 2932 if (get_buffers) { 2933 #ifdef DYNAMIC_BUFFERS 2934 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) 2935 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 2936 { 2937 struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO); 2938 if (!newskb) 2939 return -ENOMEM; 2940 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2941 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2942 /* 2943 * align to 128 bytes for compatibility with 2944 * the old EISA boards. 2945 */ 2946 2947 my_skb_align(newskb, 128); 2948 bp->descr_block_virt->rcv_data[i + j].long_1 = 2949 (u32)dma_map_single(bp->bus_dev, newskb->data, 2950 NEW_SKB_SIZE, 2951 DMA_FROM_DEVICE); 2952 /* 2953 * p_rcv_buff_va is only used inside the 2954 * kernel so we put the skb pointer here. 2955 */ 2956 bp->p_rcv_buff_va[i+j] = (char *) newskb; 2957 } 2958 #else 2959 for (i=0; i < (int)(bp->rcv_bufs_to_post); i++) 2960 for (j=0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 2961 { 2962 bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP | 2963 ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN)); 2964 bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX)); 2965 bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX)); 2966 } 2967 #endif 2968 } 2969 2970 /* Update receive producer and Type 2 register */ 2971 2972 bp->rcv_xmt_reg.index.rcv_prod = bp->rcv_bufs_to_post; 2973 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); 2974 return 0; 2975 } 2976 2977 2978 /* 2979 * ========================= 2980 * = dfx_rcv_queue_process = 2981 * ========================= 2982 * 2983 * Overview: 2984 * Process received LLC frames. 2985 * 2986 * Returns: 2987 * None 2988 * 2989 * Arguments: 2990 * bp - pointer to board information 2991 * 2992 * Functional Description: 2993 * Received LLC frames are processed until there are no more consumed frames. 2994 * Once all frames are processed, the receive buffers are returned to the 2995 * adapter. Note that this algorithm fixes the length of time that can be spent 2996 * in this routine, because there are a fixed number of receive buffers to 2997 * process and buffers are not produced until this routine exits and returns 2998 * to the ISR. 2999 * 3000 * Return Codes: 3001 * None 3002 * 3003 * Assumptions: 3004 * None 3005 * 3006 * Side Effects: 3007 * None 3008 */ 3009 3010 static void dfx_rcv_queue_process( 3011 DFX_board_t *bp 3012 ) 3013 3014 { 3015 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */ 3016 char *p_buff; /* ptr to start of packet receive buffer (FMC descriptor) */ 3017 u32 descr, pkt_len; /* FMC descriptor field and packet length */ 3018 struct sk_buff *skb; /* pointer to a sk_buff to hold incoming packet data */ 3019 3020 /* Service all consumed LLC receive frames */ 3021 3022 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); 3023 while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons) 3024 { 3025 /* Process any errors */ 3026 3027 int entry; 3028 3029 entry = bp->rcv_xmt_reg.index.rcv_comp; 3030 #ifdef DYNAMIC_BUFFERS 3031 p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data); 3032 #else 3033 p_buff = bp->p_rcv_buff_va[entry]; 3034 #endif 3035 memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32)); 3036 3037 if (descr & PI_FMC_DESCR_M_RCC_FLUSH) 3038 { 3039 if (descr & PI_FMC_DESCR_M_RCC_CRC) 3040 bp->rcv_crc_errors++; 3041 else 3042 bp->rcv_frame_status_errors++; 3043 } 3044 else 3045 { 3046 int rx_in_place = 0; 3047 3048 /* The frame was received without errors - verify packet length */ 3049 3050 pkt_len = (u32)((descr & PI_FMC_DESCR_M_LEN) >> PI_FMC_DESCR_V_LEN); 3051 pkt_len -= 4; /* subtract 4 byte CRC */ 3052 if (!IN_RANGE(pkt_len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN)) 3053 bp->rcv_length_errors++; 3054 else{ 3055 #ifdef DYNAMIC_BUFFERS 3056 if (pkt_len > SKBUFF_RX_COPYBREAK) { 3057 struct sk_buff *newskb; 3058 3059 newskb = dev_alloc_skb(NEW_SKB_SIZE); 3060 if (newskb){ 3061 rx_in_place = 1; 3062 3063 my_skb_align(newskb, 128); 3064 skb = (struct sk_buff *)bp->p_rcv_buff_va[entry]; 3065 dma_unmap_single(bp->bus_dev, 3066 bp->descr_block_virt->rcv_data[entry].long_1, 3067 NEW_SKB_SIZE, 3068 DMA_FROM_DEVICE); 3069 skb_reserve(skb, RCV_BUFF_K_PADDING); 3070 bp->p_rcv_buff_va[entry] = (char *)newskb; 3071 bp->descr_block_virt->rcv_data[entry].long_1 = 3072 (u32)dma_map_single(bp->bus_dev, 3073 newskb->data, 3074 NEW_SKB_SIZE, 3075 DMA_FROM_DEVICE); 3076 } else 3077 skb = NULL; 3078 } else 3079 #endif 3080 skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */ 3081 if (skb == NULL) 3082 { 3083 printk("%s: Could not allocate receive buffer. Dropping packet.\n", bp->dev->name); 3084 bp->rcv_discards++; 3085 break; 3086 } 3087 else { 3088 #ifndef DYNAMIC_BUFFERS 3089 if (! rx_in_place) 3090 #endif 3091 { 3092 /* Receive buffer allocated, pass receive packet up */ 3093 3094 skb_copy_to_linear_data(skb, 3095 p_buff + RCV_BUFF_K_PADDING, 3096 pkt_len + 3); 3097 } 3098 3099 skb_reserve(skb,3); /* adjust data field so that it points to FC byte */ 3100 skb_put(skb, pkt_len); /* pass up packet length, NOT including CRC */ 3101 skb->protocol = fddi_type_trans(skb, bp->dev); 3102 bp->rcv_total_bytes += skb->len; 3103 netif_rx(skb); 3104 3105 /* Update the rcv counters */ 3106 bp->rcv_total_frames++; 3107 if (*(p_buff + RCV_BUFF_K_DA) & 0x01) 3108 bp->rcv_multicast_frames++; 3109 } 3110 } 3111 } 3112 3113 /* 3114 * Advance the producer (for recycling) and advance the completion 3115 * (for servicing received frames). Note that it is okay to 3116 * advance the producer without checking that it passes the 3117 * completion index because they are both advanced at the same 3118 * rate. 3119 */ 3120 3121 bp->rcv_xmt_reg.index.rcv_prod += 1; 3122 bp->rcv_xmt_reg.index.rcv_comp += 1; 3123 } 3124 } 3125 3126 3127 /* 3128 * ===================== 3129 * = dfx_xmt_queue_pkt = 3130 * ===================== 3131 * 3132 * Overview: 3133 * Queues packets for transmission 3134 * 3135 * Returns: 3136 * Condition code 3137 * 3138 * Arguments: 3139 * skb - pointer to sk_buff to queue for transmission 3140 * dev - pointer to device information 3141 * 3142 * Functional Description: 3143 * Here we assume that an incoming skb transmit request 3144 * is contained in a single physically contiguous buffer 3145 * in which the virtual address of the start of packet 3146 * (skb->data) can be converted to a physical address 3147 * by using pci_map_single(). 3148 * 3149 * Since the adapter architecture requires a three byte 3150 * packet request header to prepend the start of packet, 3151 * we'll write the three byte field immediately prior to 3152 * the FC byte. This assumption is valid because we've 3153 * ensured that dev->hard_header_len includes three pad 3154 * bytes. By posting a single fragment to the adapter, 3155 * we'll reduce the number of descriptor fetches and 3156 * bus traffic needed to send the request. 3157 * 3158 * Also, we can't free the skb until after it's been DMA'd 3159 * out by the adapter, so we'll queue it in the driver and 3160 * return it in dfx_xmt_done. 3161 * 3162 * Return Codes: 3163 * 0 - driver queued packet, link is unavailable, or skbuff was bad 3164 * 1 - caller should requeue the sk_buff for later transmission 3165 * 3166 * Assumptions: 3167 * First and foremost, we assume the incoming skb pointer 3168 * is NOT NULL and is pointing to a valid sk_buff structure. 3169 * 3170 * The outgoing packet is complete, starting with the 3171 * frame control byte including the last byte of data, 3172 * but NOT including the 4 byte CRC. We'll let the 3173 * adapter hardware generate and append the CRC. 3174 * 3175 * The entire packet is stored in one physically 3176 * contiguous buffer which is not cached and whose 3177 * 32-bit physical address can be determined. 3178 * 3179 * It's vital that this routine is NOT reentered for the 3180 * same board and that the OS is not in another section of 3181 * code (eg. dfx_int_common) for the same board on a 3182 * different thread. 3183 * 3184 * Side Effects: 3185 * None 3186 */ 3187 3188 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 3189 struct net_device *dev) 3190 { 3191 DFX_board_t *bp = netdev_priv(dev); 3192 u8 prod; /* local transmit producer index */ 3193 PI_XMT_DESCR *p_xmt_descr; /* ptr to transmit descriptor block entry */ 3194 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3195 unsigned long flags; 3196 3197 netif_stop_queue(dev); 3198 3199 /* 3200 * Verify that incoming transmit request is OK 3201 * 3202 * Note: The packet size check is consistent with other 3203 * Linux device drivers, although the correct packet 3204 * size should be verified before calling the 3205 * transmit routine. 3206 */ 3207 3208 if (!IN_RANGE(skb->len, FDDI_K_LLC_ZLEN, FDDI_K_LLC_LEN)) 3209 { 3210 printk("%s: Invalid packet length - %u bytes\n", 3211 dev->name, skb->len); 3212 bp->xmt_length_errors++; /* bump error counter */ 3213 netif_wake_queue(dev); 3214 dev_kfree_skb(skb); 3215 return NETDEV_TX_OK; /* return "success" */ 3216 } 3217 /* 3218 * See if adapter link is available, if not, free buffer 3219 * 3220 * Note: If the link isn't available, free buffer and return 0 3221 * rather than tell the upper layer to requeue the packet. 3222 * The methodology here is that by the time the link 3223 * becomes available, the packet to be sent will be 3224 * fairly stale. By simply dropping the packet, the 3225 * higher layer protocols will eventually time out 3226 * waiting for response packets which it won't receive. 3227 */ 3228 3229 if (bp->link_available == PI_K_FALSE) 3230 { 3231 if (dfx_hw_adap_state_rd(bp) == PI_STATE_K_LINK_AVAIL) /* is link really available? */ 3232 bp->link_available = PI_K_TRUE; /* if so, set flag and continue */ 3233 else 3234 { 3235 bp->xmt_discards++; /* bump error counter */ 3236 dev_kfree_skb(skb); /* free sk_buff now */ 3237 netif_wake_queue(dev); 3238 return NETDEV_TX_OK; /* return "success" */ 3239 } 3240 } 3241 3242 spin_lock_irqsave(&bp->lock, flags); 3243 3244 /* Get the current producer and the next free xmt data descriptor */ 3245 3246 prod = bp->rcv_xmt_reg.index.xmt_prod; 3247 p_xmt_descr = &(bp->descr_block_virt->xmt_data[prod]); 3248 3249 /* 3250 * Get pointer to auxiliary queue entry to contain information 3251 * for this packet. 3252 * 3253 * Note: The current xmt producer index will become the 3254 * current xmt completion index when we complete this 3255 * packet later on. So, we'll get the pointer to the 3256 * next auxiliary queue entry now before we bump the 3257 * producer index. 3258 */ 3259 3260 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]); /* also bump producer index */ 3261 3262 /* Write the three PRH bytes immediately before the FC byte */ 3263 3264 skb_push(skb,3); 3265 skb->data[0] = DFX_PRH0_BYTE; /* these byte values are defined */ 3266 skb->data[1] = DFX_PRH1_BYTE; /* in the Motorola FDDI MAC chip */ 3267 skb->data[2] = DFX_PRH2_BYTE; /* specification */ 3268 3269 /* 3270 * Write the descriptor with buffer info and bump producer 3271 * 3272 * Note: Since we need to start DMA from the packet request 3273 * header, we'll add 3 bytes to the DMA buffer length, 3274 * and we'll determine the physical address of the 3275 * buffer from the PRH, not skb->data. 3276 * 3277 * Assumptions: 3278 * 1. Packet starts with the frame control (FC) byte 3279 * at skb->data. 3280 * 2. The 4-byte CRC is not appended to the buffer or 3281 * included in the length. 3282 * 3. Packet length (skb->len) is from FC to end of 3283 * data, inclusive. 3284 * 4. The packet length does not exceed the maximum 3285 * FDDI LLC frame length of 4491 bytes. 3286 * 5. The entire packet is contained in a physically 3287 * contiguous, non-cached, locked memory space 3288 * comprised of a single buffer pointed to by 3289 * skb->data. 3290 * 6. The physical address of the start of packet 3291 * can be determined from the virtual address 3292 * by using pci_map_single() and is only 32-bits 3293 * wide. 3294 */ 3295 3296 p_xmt_descr->long_0 = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN)); 3297 p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data, 3298 skb->len, DMA_TO_DEVICE); 3299 3300 /* 3301 * Verify that descriptor is actually available 3302 * 3303 * Note: If descriptor isn't available, return 1 which tells 3304 * the upper layer to requeue the packet for later 3305 * transmission. 3306 * 3307 * We need to ensure that the producer never reaches the 3308 * completion, except to indicate that the queue is empty. 3309 */ 3310 3311 if (prod == bp->rcv_xmt_reg.index.xmt_comp) 3312 { 3313 skb_pull(skb,3); 3314 spin_unlock_irqrestore(&bp->lock, flags); 3315 return NETDEV_TX_BUSY; /* requeue packet for later */ 3316 } 3317 3318 /* 3319 * Save info for this packet for xmt done indication routine 3320 * 3321 * Normally, we'd save the producer index in the p_xmt_drv_descr 3322 * structure so that we'd have it handy when we complete this 3323 * packet later (in dfx_xmt_done). However, since the current 3324 * transmit architecture guarantees a single fragment for the 3325 * entire packet, we can simply bump the completion index by 3326 * one (1) for each completed packet. 3327 * 3328 * Note: If this assumption changes and we're presented with 3329 * an inconsistent number of transmit fragments for packet 3330 * data, we'll need to modify this code to save the current 3331 * transmit producer index. 3332 */ 3333 3334 p_xmt_drv_descr->p_skb = skb; 3335 3336 /* Update Type 2 register */ 3337 3338 bp->rcv_xmt_reg.index.xmt_prod = prod; 3339 dfx_port_write_long(bp, PI_PDQ_K_REG_TYPE_2_PROD, bp->rcv_xmt_reg.lword); 3340 spin_unlock_irqrestore(&bp->lock, flags); 3341 netif_wake_queue(dev); 3342 return NETDEV_TX_OK; /* packet queued to adapter */ 3343 } 3344 3345 3346 /* 3347 * ================ 3348 * = dfx_xmt_done = 3349 * ================ 3350 * 3351 * Overview: 3352 * Processes all frames that have been transmitted. 3353 * 3354 * Returns: 3355 * None 3356 * 3357 * Arguments: 3358 * bp - pointer to board information 3359 * 3360 * Functional Description: 3361 * For all consumed transmit descriptors that have not 3362 * yet been completed, we'll free the skb we were holding 3363 * onto using dev_kfree_skb and bump the appropriate 3364 * counters. 3365 * 3366 * Return Codes: 3367 * None 3368 * 3369 * Assumptions: 3370 * The Type 2 register is not updated in this routine. It is 3371 * assumed that it will be updated in the ISR when dfx_xmt_done 3372 * returns. 3373 * 3374 * Side Effects: 3375 * None 3376 */ 3377 3378 static int dfx_xmt_done(DFX_board_t *bp) 3379 { 3380 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3381 PI_TYPE_2_CONSUMER *p_type_2_cons; /* ptr to rcv/xmt consumer block register */ 3382 u8 comp; /* local transmit completion index */ 3383 int freed = 0; /* buffers freed */ 3384 3385 /* Service all consumed transmit frames */ 3386 3387 p_type_2_cons = (PI_TYPE_2_CONSUMER *)(&bp->cons_block_virt->xmt_rcv_data); 3388 while (bp->rcv_xmt_reg.index.xmt_comp != p_type_2_cons->index.xmt_cons) 3389 { 3390 /* Get pointer to the transmit driver descriptor block information */ 3391 3392 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); 3393 3394 /* Increment transmit counters */ 3395 3396 bp->xmt_total_frames++; 3397 bp->xmt_total_bytes += p_xmt_drv_descr->p_skb->len; 3398 3399 /* Return skb to operating system */ 3400 comp = bp->rcv_xmt_reg.index.xmt_comp; 3401 dma_unmap_single(bp->bus_dev, 3402 bp->descr_block_virt->xmt_data[comp].long_1, 3403 p_xmt_drv_descr->p_skb->len, 3404 DMA_TO_DEVICE); 3405 dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); 3406 3407 /* 3408 * Move to start of next packet by updating completion index 3409 * 3410 * Here we assume that a transmit packet request is always 3411 * serviced by posting one fragment. We can therefore 3412 * simplify the completion code by incrementing the 3413 * completion index by one. This code will need to be 3414 * modified if this assumption changes. See comments 3415 * in dfx_xmt_queue_pkt for more details. 3416 */ 3417 3418 bp->rcv_xmt_reg.index.xmt_comp += 1; 3419 freed++; 3420 } 3421 return freed; 3422 } 3423 3424 3425 /* 3426 * ================= 3427 * = dfx_rcv_flush = 3428 * ================= 3429 * 3430 * Overview: 3431 * Remove all skb's in the receive ring. 3432 * 3433 * Returns: 3434 * None 3435 * 3436 * Arguments: 3437 * bp - pointer to board information 3438 * 3439 * Functional Description: 3440 * Free's all the dynamically allocated skb's that are 3441 * currently attached to the device receive ring. This 3442 * function is typically only used when the device is 3443 * initialized or reinitialized. 3444 * 3445 * Return Codes: 3446 * None 3447 * 3448 * Side Effects: 3449 * None 3450 */ 3451 #ifdef DYNAMIC_BUFFERS 3452 static void dfx_rcv_flush( DFX_board_t *bp ) 3453 { 3454 int i, j; 3455 3456 for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++) 3457 for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post) 3458 { 3459 struct sk_buff *skb; 3460 skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j]; 3461 if (skb) 3462 dev_kfree_skb(skb); 3463 bp->p_rcv_buff_va[i+j] = NULL; 3464 } 3465 3466 } 3467 #else 3468 static inline void dfx_rcv_flush( DFX_board_t *bp ) 3469 { 3470 } 3471 #endif /* DYNAMIC_BUFFERS */ 3472 3473 /* 3474 * ================= 3475 * = dfx_xmt_flush = 3476 * ================= 3477 * 3478 * Overview: 3479 * Processes all frames whether they've been transmitted 3480 * or not. 3481 * 3482 * Returns: 3483 * None 3484 * 3485 * Arguments: 3486 * bp - pointer to board information 3487 * 3488 * Functional Description: 3489 * For all produced transmit descriptors that have not 3490 * yet been completed, we'll free the skb we were holding 3491 * onto using dev_kfree_skb and bump the appropriate 3492 * counters. Of course, it's possible that some of 3493 * these transmit requests actually did go out, but we 3494 * won't make that distinction here. Finally, we'll 3495 * update the consumer index to match the producer. 3496 * 3497 * Return Codes: 3498 * None 3499 * 3500 * Assumptions: 3501 * This routine does NOT update the Type 2 register. It 3502 * is assumed that this routine is being called during a 3503 * transmit flush interrupt, or a shutdown or close routine. 3504 * 3505 * Side Effects: 3506 * None 3507 */ 3508 3509 static void dfx_xmt_flush( DFX_board_t *bp ) 3510 { 3511 u32 prod_cons; /* rcv/xmt consumer block longword */ 3512 XMT_DRIVER_DESCR *p_xmt_drv_descr; /* ptr to transmit driver descriptor */ 3513 u8 comp; /* local transmit completion index */ 3514 3515 /* Flush all outstanding transmit frames */ 3516 3517 while (bp->rcv_xmt_reg.index.xmt_comp != bp->rcv_xmt_reg.index.xmt_prod) 3518 { 3519 /* Get pointer to the transmit driver descriptor block information */ 3520 3521 p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[bp->rcv_xmt_reg.index.xmt_comp]); 3522 3523 /* Return skb to operating system */ 3524 comp = bp->rcv_xmt_reg.index.xmt_comp; 3525 dma_unmap_single(bp->bus_dev, 3526 bp->descr_block_virt->xmt_data[comp].long_1, 3527 p_xmt_drv_descr->p_skb->len, 3528 DMA_TO_DEVICE); 3529 dev_kfree_skb(p_xmt_drv_descr->p_skb); 3530 3531 /* Increment transmit error counter */ 3532 3533 bp->xmt_discards++; 3534 3535 /* 3536 * Move to start of next packet by updating completion index 3537 * 3538 * Here we assume that a transmit packet request is always 3539 * serviced by posting one fragment. We can therefore 3540 * simplify the completion code by incrementing the 3541 * completion index by one. This code will need to be 3542 * modified if this assumption changes. See comments 3543 * in dfx_xmt_queue_pkt for more details. 3544 */ 3545 3546 bp->rcv_xmt_reg.index.xmt_comp += 1; 3547 } 3548 3549 /* Update the transmit consumer index in the consumer block */ 3550 3551 prod_cons = (u32)(bp->cons_block_virt->xmt_rcv_data & ~PI_CONS_M_XMT_INDEX); 3552 prod_cons |= (u32)(bp->rcv_xmt_reg.index.xmt_prod << PI_CONS_V_XMT_INDEX); 3553 bp->cons_block_virt->xmt_rcv_data = prod_cons; 3554 } 3555 3556 /* 3557 * ================== 3558 * = dfx_unregister = 3559 * ================== 3560 * 3561 * Overview: 3562 * Shuts down an FDDI controller 3563 * 3564 * Returns: 3565 * Condition code 3566 * 3567 * Arguments: 3568 * bdev - pointer to device information 3569 * 3570 * Functional Description: 3571 * 3572 * Return Codes: 3573 * None 3574 * 3575 * Assumptions: 3576 * It compiles so it should work :-( (PCI cards do :-) 3577 * 3578 * Side Effects: 3579 * Device structures for FDDI adapters (fddi0, fddi1, etc) are 3580 * freed. 3581 */ 3582 static void __devexit dfx_unregister(struct device *bdev) 3583 { 3584 struct net_device *dev = dev_get_drvdata(bdev); 3585 DFX_board_t *bp = netdev_priv(dev); 3586 int dfx_bus_pci = DFX_BUS_PCI(bdev); 3587 int dfx_bus_tc = DFX_BUS_TC(bdev); 3588 int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; 3589 resource_size_t bar_start = 0; /* pointer to port */ 3590 resource_size_t bar_len = 0; /* resource length */ 3591 int alloc_size; /* total buffer size used */ 3592 3593 unregister_netdev(dev); 3594 3595 alloc_size = sizeof(PI_DESCR_BLOCK) + 3596 PI_CMD_REQ_K_SIZE_MAX + PI_CMD_RSP_K_SIZE_MAX + 3597 #ifndef DYNAMIC_BUFFERS 3598 (bp->rcv_bufs_to_post * PI_RCV_DATA_K_SIZE_MAX) + 3599 #endif 3600 sizeof(PI_CONSUMER_BLOCK) + 3601 (PI_ALIGN_K_DESC_BLK - 1); 3602 if (bp->kmalloced) 3603 dma_free_coherent(bdev, alloc_size, 3604 bp->kmalloced, bp->kmalloced_dma); 3605 3606 dfx_bus_uninit(dev); 3607 3608 dfx_get_bars(bdev, &bar_start, &bar_len); 3609 if (dfx_use_mmio) { 3610 iounmap(bp->base.mem); 3611 release_mem_region(bar_start, bar_len); 3612 } else 3613 release_region(bar_start, bar_len); 3614 3615 if (dfx_bus_pci) 3616 pci_disable_device(to_pci_dev(bdev)); 3617 3618 free_netdev(dev); 3619 } 3620 3621 3622 static int __devinit __maybe_unused dfx_dev_register(struct device *); 3623 static int __devexit __maybe_unused dfx_dev_unregister(struct device *); 3624 3625 #ifdef CONFIG_PCI 3626 static int __devinit dfx_pci_register(struct pci_dev *, 3627 const struct pci_device_id *); 3628 static void __devexit dfx_pci_unregister(struct pci_dev *); 3629 3630 static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = { 3631 { PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) }, 3632 { } 3633 }; 3634 MODULE_DEVICE_TABLE(pci, dfx_pci_table); 3635 3636 static struct pci_driver dfx_pci_driver = { 3637 .name = "defxx", 3638 .id_table = dfx_pci_table, 3639 .probe = dfx_pci_register, 3640 .remove = __devexit_p(dfx_pci_unregister), 3641 }; 3642 3643 static __devinit int dfx_pci_register(struct pci_dev *pdev, 3644 const struct pci_device_id *ent) 3645 { 3646 return dfx_register(&pdev->dev); 3647 } 3648 3649 static void __devexit dfx_pci_unregister(struct pci_dev *pdev) 3650 { 3651 dfx_unregister(&pdev->dev); 3652 } 3653 #endif /* CONFIG_PCI */ 3654 3655 #ifdef CONFIG_EISA 3656 static struct eisa_device_id dfx_eisa_table[] = { 3657 { "DEC3001", DEFEA_PROD_ID_1 }, 3658 { "DEC3002", DEFEA_PROD_ID_2 }, 3659 { "DEC3003", DEFEA_PROD_ID_3 }, 3660 { "DEC3004", DEFEA_PROD_ID_4 }, 3661 { } 3662 }; 3663 MODULE_DEVICE_TABLE(eisa, dfx_eisa_table); 3664 3665 static struct eisa_driver dfx_eisa_driver = { 3666 .id_table = dfx_eisa_table, 3667 .driver = { 3668 .name = "defxx", 3669 .bus = &eisa_bus_type, 3670 .probe = dfx_dev_register, 3671 .remove = __devexit_p(dfx_dev_unregister), 3672 }, 3673 }; 3674 #endif /* CONFIG_EISA */ 3675 3676 #ifdef CONFIG_TC 3677 static struct tc_device_id const dfx_tc_table[] = { 3678 { "DEC ", "PMAF-FA " }, 3679 { "DEC ", "PMAF-FD " }, 3680 { "DEC ", "PMAF-FS " }, 3681 { "DEC ", "PMAF-FU " }, 3682 { } 3683 }; 3684 MODULE_DEVICE_TABLE(tc, dfx_tc_table); 3685 3686 static struct tc_driver dfx_tc_driver = { 3687 .id_table = dfx_tc_table, 3688 .driver = { 3689 .name = "defxx", 3690 .bus = &tc_bus_type, 3691 .probe = dfx_dev_register, 3692 .remove = __devexit_p(dfx_dev_unregister), 3693 }, 3694 }; 3695 #endif /* CONFIG_TC */ 3696 3697 static int __devinit __maybe_unused dfx_dev_register(struct device *dev) 3698 { 3699 int status; 3700 3701 status = dfx_register(dev); 3702 if (!status) 3703 get_device(dev); 3704 return status; 3705 } 3706 3707 static int __devexit __maybe_unused dfx_dev_unregister(struct device *dev) 3708 { 3709 put_device(dev); 3710 dfx_unregister(dev); 3711 return 0; 3712 } 3713 3714 3715 static int __devinit dfx_init(void) 3716 { 3717 int status; 3718 3719 status = pci_register_driver(&dfx_pci_driver); 3720 if (!status) 3721 status = eisa_driver_register(&dfx_eisa_driver); 3722 if (!status) 3723 status = tc_register_driver(&dfx_tc_driver); 3724 return status; 3725 } 3726 3727 static void __devexit dfx_cleanup(void) 3728 { 3729 tc_unregister_driver(&dfx_tc_driver); 3730 eisa_driver_unregister(&dfx_eisa_driver); 3731 pci_unregister_driver(&dfx_pci_driver); 3732 } 3733 3734 module_init(dfx_init); 3735 module_exit(dfx_cleanup); 3736 MODULE_AUTHOR("Lawrence V. Stefani"); 3737 MODULE_DESCRIPTION("DEC FDDIcontroller TC/EISA/PCI (DEFTA/DEFEA/DEFPA) driver " 3738 DRV_VERSION " " DRV_RELDATE); 3739 MODULE_LICENSE("GPL"); 3740