1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/module.h> 14 #include <linux/list.h> 15 #include <linux/pci.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/sched.h> 18 #include <linux/slab.h> 19 #include <linux/dmapool.h> 20 #include <linux/mempool.h> 21 #include <linux/spinlock.h> 22 #include <linux/kthread.h> 23 #include <linux/interrupt.h> 24 #include <linux/errno.h> 25 #include <linux/ioport.h> 26 #include <linux/ip.h> 27 #include <linux/in.h> 28 #include <linux/if_arp.h> 29 #include <linux/if_ether.h> 30 #include <linux/netdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/ethtool.h> 33 #include <linux/skbuff.h> 34 #include <linux/rtnetlink.h> 35 #include <linux/if_vlan.h> 36 #include <linux/delay.h> 37 #include <linux/mm.h> 38 #include <linux/prefetch.h> 39 40 #include "qla3xxx.h" 41 42 #define DRV_NAME "qla3xxx" 43 #define DRV_STRING "QLogic ISP3XXX Network Driver" 44 #define DRV_VERSION "v2.03.00-k5" 45 46 static const char ql3xxx_driver_name[] = DRV_NAME; 47 static const char ql3xxx_driver_version[] = DRV_VERSION; 48 49 #define TIMED_OUT_MSG \ 50 "Timed out waiting for management port to get free before issuing command\n" 51 52 MODULE_AUTHOR("QLogic Corporation"); 53 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 54 MODULE_LICENSE("GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 static const u32 default_msg 58 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 59 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 60 61 static int debug = -1; /* defaults above */ 62 module_param(debug, int, 0); 63 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 64 65 static int msi; 66 module_param(msi, int, 0); 67 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 68 69 static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = { 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 71 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 72 /* required last entry */ 73 {0,} 74 }; 75 76 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 77 78 /* 79 * These are the known PHY's which are used 80 */ 81 enum PHY_DEVICE_TYPE { 82 PHY_TYPE_UNKNOWN = 0, 83 PHY_VITESSE_VSC8211, 84 PHY_AGERE_ET1011C, 85 MAX_PHY_DEV_TYPES 86 }; 87 88 struct PHY_DEVICE_INFO { 89 const enum PHY_DEVICE_TYPE phyDevice; 90 const u32 phyIdOUI; 91 const u16 phyIdModel; 92 const char *name; 93 }; 94 95 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 96 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 97 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 98 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 99 }; 100 101 102 /* 103 * Caller must take hw_lock. 104 */ 105 static int ql_sem_spinlock(struct ql3_adapter *qdev, 106 u32 sem_mask, u32 sem_bits) 107 { 108 struct ql3xxx_port_registers __iomem *port_regs = 109 qdev->mem_map_registers; 110 u32 value; 111 unsigned int seconds = 3; 112 113 do { 114 writel((sem_mask | sem_bits), 115 &port_regs->CommonRegs.semaphoreReg); 116 value = readl(&port_regs->CommonRegs.semaphoreReg); 117 if ((value & (sem_mask >> 16)) == sem_bits) 118 return 0; 119 ssleep(1); 120 } while (--seconds); 121 return -1; 122 } 123 124 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 125 { 126 struct ql3xxx_port_registers __iomem *port_regs = 127 qdev->mem_map_registers; 128 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 129 readl(&port_regs->CommonRegs.semaphoreReg); 130 } 131 132 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 133 { 134 struct ql3xxx_port_registers __iomem *port_regs = 135 qdev->mem_map_registers; 136 u32 value; 137 138 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 139 value = readl(&port_regs->CommonRegs.semaphoreReg); 140 return ((value & (sem_mask >> 16)) == sem_bits); 141 } 142 143 /* 144 * Caller holds hw_lock. 145 */ 146 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 147 { 148 int i = 0; 149 150 while (i < 10) { 151 if (i) 152 ssleep(1); 153 154 if (ql_sem_lock(qdev, 155 QL_DRVR_SEM_MASK, 156 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 157 * 2) << 1)) { 158 netdev_printk(KERN_DEBUG, qdev->ndev, 159 "driver lock acquired\n"); 160 return 1; 161 } 162 } 163 164 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 165 return 0; 166 } 167 168 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 169 { 170 struct ql3xxx_port_registers __iomem *port_regs = 171 qdev->mem_map_registers; 172 173 writel(((ISP_CONTROL_NP_MASK << 16) | page), 174 &port_regs->CommonRegs.ispControlStatus); 175 readl(&port_regs->CommonRegs.ispControlStatus); 176 qdev->current_page = page; 177 } 178 179 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 180 { 181 u32 value; 182 unsigned long hw_flags; 183 184 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 185 value = readl(reg); 186 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 187 188 return value; 189 } 190 191 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 192 { 193 return readl(reg); 194 } 195 196 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 197 { 198 u32 value; 199 unsigned long hw_flags; 200 201 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 202 203 if (qdev->current_page != 0) 204 ql_set_register_page(qdev, 0); 205 value = readl(reg); 206 207 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 208 return value; 209 } 210 211 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 212 { 213 if (qdev->current_page != 0) 214 ql_set_register_page(qdev, 0); 215 return readl(reg); 216 } 217 218 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 219 u32 __iomem *reg, u32 value) 220 { 221 unsigned long hw_flags; 222 223 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 224 writel(value, reg); 225 readl(reg); 226 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 227 } 228 229 static void ql_write_common_reg(struct ql3_adapter *qdev, 230 u32 __iomem *reg, u32 value) 231 { 232 writel(value, reg); 233 readl(reg); 234 } 235 236 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 237 u32 __iomem *reg, u32 value) 238 { 239 writel(value, reg); 240 readl(reg); 241 udelay(1); 242 } 243 244 static void ql_write_page0_reg(struct ql3_adapter *qdev, 245 u32 __iomem *reg, u32 value) 246 { 247 if (qdev->current_page != 0) 248 ql_set_register_page(qdev, 0); 249 writel(value, reg); 250 readl(reg); 251 } 252 253 /* 254 * Caller holds hw_lock. Only called during init. 255 */ 256 static void ql_write_page1_reg(struct ql3_adapter *qdev, 257 u32 __iomem *reg, u32 value) 258 { 259 if (qdev->current_page != 1) 260 ql_set_register_page(qdev, 1); 261 writel(value, reg); 262 readl(reg); 263 } 264 265 /* 266 * Caller holds hw_lock. Only called during init. 267 */ 268 static void ql_write_page2_reg(struct ql3_adapter *qdev, 269 u32 __iomem *reg, u32 value) 270 { 271 if (qdev->current_page != 2) 272 ql_set_register_page(qdev, 2); 273 writel(value, reg); 274 readl(reg); 275 } 276 277 static void ql_disable_interrupts(struct ql3_adapter *qdev) 278 { 279 struct ql3xxx_port_registers __iomem *port_regs = 280 qdev->mem_map_registers; 281 282 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 283 (ISP_IMR_ENABLE_INT << 16)); 284 285 } 286 287 static void ql_enable_interrupts(struct ql3_adapter *qdev) 288 { 289 struct ql3xxx_port_registers __iomem *port_regs = 290 qdev->mem_map_registers; 291 292 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 293 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 294 295 } 296 297 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 298 struct ql_rcv_buf_cb *lrg_buf_cb) 299 { 300 dma_addr_t map; 301 int err; 302 lrg_buf_cb->next = NULL; 303 304 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 305 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 306 } else { 307 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 308 qdev->lrg_buf_free_tail = lrg_buf_cb; 309 } 310 311 if (!lrg_buf_cb->skb) { 312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 313 qdev->lrg_buffer_len); 314 if (unlikely(!lrg_buf_cb->skb)) { 315 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n"); 316 qdev->lrg_buf_skb_check++; 317 } else { 318 /* 319 * We save some space to copy the ethhdr from first 320 * buffer 321 */ 322 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 323 map = pci_map_single(qdev->pdev, 324 lrg_buf_cb->skb->data, 325 qdev->lrg_buffer_len - 326 QL_HEADER_SPACE, 327 PCI_DMA_FROMDEVICE); 328 err = pci_dma_mapping_error(qdev->pdev, map); 329 if (err) { 330 netdev_err(qdev->ndev, 331 "PCI mapping failed with error: %d\n", 332 err); 333 dev_kfree_skb(lrg_buf_cb->skb); 334 lrg_buf_cb->skb = NULL; 335 336 qdev->lrg_buf_skb_check++; 337 return; 338 } 339 340 lrg_buf_cb->buf_phy_addr_low = 341 cpu_to_le32(LS_64BITS(map)); 342 lrg_buf_cb->buf_phy_addr_high = 343 cpu_to_le32(MS_64BITS(map)); 344 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 345 dma_unmap_len_set(lrg_buf_cb, maplen, 346 qdev->lrg_buffer_len - 347 QL_HEADER_SPACE); 348 } 349 } 350 351 qdev->lrg_buf_free_count++; 352 } 353 354 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 355 *qdev) 356 { 357 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 358 359 if (lrg_buf_cb != NULL) { 360 qdev->lrg_buf_free_head = lrg_buf_cb->next; 361 if (qdev->lrg_buf_free_head == NULL) 362 qdev->lrg_buf_free_tail = NULL; 363 qdev->lrg_buf_free_count--; 364 } 365 366 return lrg_buf_cb; 367 } 368 369 static u32 addrBits = EEPROM_NO_ADDR_BITS; 370 static u32 dataBits = EEPROM_NO_DATA_BITS; 371 372 static void fm93c56a_deselect(struct ql3_adapter *qdev); 373 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 374 unsigned short *value); 375 376 /* 377 * Caller holds hw_lock. 378 */ 379 static void fm93c56a_select(struct ql3_adapter *qdev) 380 { 381 struct ql3xxx_port_registers __iomem *port_regs = 382 qdev->mem_map_registers; 383 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 384 385 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 386 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 387 ql_write_nvram_reg(qdev, spir, 388 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 389 } 390 391 /* 392 * Caller holds hw_lock. 393 */ 394 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 395 { 396 int i; 397 u32 mask; 398 u32 dataBit; 399 u32 previousBit; 400 struct ql3xxx_port_registers __iomem *port_regs = 401 qdev->mem_map_registers; 402 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 403 404 /* Clock in a zero, then do the start bit */ 405 ql_write_nvram_reg(qdev, spir, 406 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 407 AUBURN_EEPROM_DO_1)); 408 ql_write_nvram_reg(qdev, spir, 409 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 410 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 411 ql_write_nvram_reg(qdev, spir, 412 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 413 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 414 415 mask = 1 << (FM93C56A_CMD_BITS - 1); 416 /* Force the previous data bit to be different */ 417 previousBit = 0xffff; 418 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 419 dataBit = (cmd & mask) 420 ? AUBURN_EEPROM_DO_1 421 : AUBURN_EEPROM_DO_0; 422 if (previousBit != dataBit) { 423 /* If the bit changed, change the DO state to match */ 424 ql_write_nvram_reg(qdev, spir, 425 (ISP_NVRAM_MASK | 426 qdev->eeprom_cmd_data | dataBit)); 427 previousBit = dataBit; 428 } 429 ql_write_nvram_reg(qdev, spir, 430 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 431 dataBit | AUBURN_EEPROM_CLK_RISE)); 432 ql_write_nvram_reg(qdev, spir, 433 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 434 dataBit | AUBURN_EEPROM_CLK_FALL)); 435 cmd = cmd << 1; 436 } 437 438 mask = 1 << (addrBits - 1); 439 /* Force the previous data bit to be different */ 440 previousBit = 0xffff; 441 for (i = 0; i < addrBits; i++) { 442 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 443 : AUBURN_EEPROM_DO_0; 444 if (previousBit != dataBit) { 445 /* 446 * If the bit changed, then change the DO state to 447 * match 448 */ 449 ql_write_nvram_reg(qdev, spir, 450 (ISP_NVRAM_MASK | 451 qdev->eeprom_cmd_data | dataBit)); 452 previousBit = dataBit; 453 } 454 ql_write_nvram_reg(qdev, spir, 455 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 456 dataBit | AUBURN_EEPROM_CLK_RISE)); 457 ql_write_nvram_reg(qdev, spir, 458 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 459 dataBit | AUBURN_EEPROM_CLK_FALL)); 460 eepromAddr = eepromAddr << 1; 461 } 462 } 463 464 /* 465 * Caller holds hw_lock. 466 */ 467 static void fm93c56a_deselect(struct ql3_adapter *qdev) 468 { 469 struct ql3xxx_port_registers __iomem *port_regs = 470 qdev->mem_map_registers; 471 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 472 473 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 474 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 475 } 476 477 /* 478 * Caller holds hw_lock. 479 */ 480 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 481 { 482 int i; 483 u32 data = 0; 484 u32 dataBit; 485 struct ql3xxx_port_registers __iomem *port_regs = 486 qdev->mem_map_registers; 487 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 488 489 /* Read the data bits */ 490 /* The first bit is a dummy. Clock right over it. */ 491 for (i = 0; i < dataBits; i++) { 492 ql_write_nvram_reg(qdev, spir, 493 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 494 AUBURN_EEPROM_CLK_RISE); 495 ql_write_nvram_reg(qdev, spir, 496 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 497 AUBURN_EEPROM_CLK_FALL); 498 dataBit = (ql_read_common_reg(qdev, spir) & 499 AUBURN_EEPROM_DI_1) ? 1 : 0; 500 data = (data << 1) | dataBit; 501 } 502 *value = (u16)data; 503 } 504 505 /* 506 * Caller holds hw_lock. 507 */ 508 static void eeprom_readword(struct ql3_adapter *qdev, 509 u32 eepromAddr, unsigned short *value) 510 { 511 fm93c56a_select(qdev); 512 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 513 fm93c56a_datain(qdev, value); 514 fm93c56a_deselect(qdev); 515 } 516 517 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 518 { 519 __le16 *p = (__le16 *)ndev->dev_addr; 520 p[0] = cpu_to_le16(addr[0]); 521 p[1] = cpu_to_le16(addr[1]); 522 p[2] = cpu_to_le16(addr[2]); 523 } 524 525 static int ql_get_nvram_params(struct ql3_adapter *qdev) 526 { 527 u16 *pEEPROMData; 528 u16 checksum = 0; 529 u32 index; 530 unsigned long hw_flags; 531 532 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 533 534 pEEPROMData = (u16 *)&qdev->nvram_data; 535 qdev->eeprom_cmd_data = 0; 536 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 537 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 538 2) << 10)) { 539 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 541 return -1; 542 } 543 544 for (index = 0; index < EEPROM_SIZE; index++) { 545 eeprom_readword(qdev, index, pEEPROMData); 546 checksum += *pEEPROMData; 547 pEEPROMData++; 548 } 549 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 550 551 if (checksum != 0) { 552 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 553 checksum); 554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 555 return -1; 556 } 557 558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 559 return checksum; 560 } 561 562 static const u32 PHYAddr[2] = { 563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 564 }; 565 566 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 567 { 568 struct ql3xxx_port_registers __iomem *port_regs = 569 qdev->mem_map_registers; 570 u32 temp; 571 int count = 1000; 572 573 while (count) { 574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 575 if (!(temp & MAC_MII_STATUS_BSY)) 576 return 0; 577 udelay(10); 578 count--; 579 } 580 return -1; 581 } 582 583 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 584 { 585 struct ql3xxx_port_registers __iomem *port_regs = 586 qdev->mem_map_registers; 587 u32 scanControl; 588 589 if (qdev->numPorts > 1) { 590 /* Auto scan will cycle through multiple ports */ 591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 592 } else { 593 scanControl = MAC_MII_CONTROL_SC; 594 } 595 596 /* 597 * Scan register 1 of PHY/PETBI, 598 * Set up to scan both devices 599 * The autoscan starts from the first register, completes 600 * the last one before rolling over to the first 601 */ 602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 603 PHYAddr[0] | MII_SCAN_REGISTER); 604 605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 606 (scanControl) | 607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 608 } 609 610 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 611 { 612 u8 ret; 613 struct ql3xxx_port_registers __iomem *port_regs = 614 qdev->mem_map_registers; 615 616 /* See if scan mode is enabled before we turn it off */ 617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 619 /* Scan is enabled */ 620 ret = 1; 621 } else { 622 /* Scan is disabled */ 623 ret = 0; 624 } 625 626 /* 627 * When disabling scan mode you must first change the MII register 628 * address 629 */ 630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 631 PHYAddr[0] | MII_SCAN_REGISTER); 632 633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 635 MAC_MII_CONTROL_RC) << 16)); 636 637 return ret; 638 } 639 640 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 641 u16 regAddr, u16 value, u32 phyAddr) 642 { 643 struct ql3xxx_port_registers __iomem *port_regs = 644 qdev->mem_map_registers; 645 u8 scanWasEnabled; 646 647 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 648 649 if (ql_wait_for_mii_ready(qdev)) { 650 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 651 return -1; 652 } 653 654 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 655 phyAddr | regAddr); 656 657 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 658 659 /* Wait for write to complete 9/10/04 SJP */ 660 if (ql_wait_for_mii_ready(qdev)) { 661 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 662 return -1; 663 } 664 665 if (scanWasEnabled) 666 ql_mii_enable_scan_mode(qdev); 667 668 return 0; 669 } 670 671 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 672 u16 *value, u32 phyAddr) 673 { 674 struct ql3xxx_port_registers __iomem *port_regs = 675 qdev->mem_map_registers; 676 u8 scanWasEnabled; 677 u32 temp; 678 679 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 680 681 if (ql_wait_for_mii_ready(qdev)) { 682 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 683 return -1; 684 } 685 686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 687 phyAddr | regAddr); 688 689 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 690 (MAC_MII_CONTROL_RC << 16)); 691 692 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 693 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 694 695 /* Wait for the read to complete */ 696 if (ql_wait_for_mii_ready(qdev)) { 697 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 698 return -1; 699 } 700 701 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 702 *value = (u16) temp; 703 704 if (scanWasEnabled) 705 ql_mii_enable_scan_mode(qdev); 706 707 return 0; 708 } 709 710 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 711 { 712 struct ql3xxx_port_registers __iomem *port_regs = 713 qdev->mem_map_registers; 714 715 ql_mii_disable_scan_mode(qdev); 716 717 if (ql_wait_for_mii_ready(qdev)) { 718 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 719 return -1; 720 } 721 722 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 723 qdev->PHYAddr | regAddr); 724 725 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 726 727 /* Wait for write to complete. */ 728 if (ql_wait_for_mii_ready(qdev)) { 729 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 730 return -1; 731 } 732 733 ql_mii_enable_scan_mode(qdev); 734 735 return 0; 736 } 737 738 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 739 { 740 u32 temp; 741 struct ql3xxx_port_registers __iomem *port_regs = 742 qdev->mem_map_registers; 743 744 ql_mii_disable_scan_mode(qdev); 745 746 if (ql_wait_for_mii_ready(qdev)) { 747 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 748 return -1; 749 } 750 751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 752 qdev->PHYAddr | regAddr); 753 754 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 755 (MAC_MII_CONTROL_RC << 16)); 756 757 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 758 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 759 760 /* Wait for the read to complete */ 761 if (ql_wait_for_mii_ready(qdev)) { 762 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 763 return -1; 764 } 765 766 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 767 *value = (u16) temp; 768 769 ql_mii_enable_scan_mode(qdev); 770 771 return 0; 772 } 773 774 static void ql_petbi_reset(struct ql3_adapter *qdev) 775 { 776 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 777 } 778 779 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 780 { 781 u16 reg; 782 783 /* Enable Auto-negotiation sense */ 784 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 785 reg |= PETBI_TBI_AUTO_SENSE; 786 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 787 788 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 789 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 790 791 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 792 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 793 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 794 795 } 796 797 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 798 { 799 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 800 PHYAddr[qdev->mac_index]); 801 } 802 803 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 804 { 805 u16 reg; 806 807 /* Enable Auto-negotiation sense */ 808 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 809 PHYAddr[qdev->mac_index]); 810 reg |= PETBI_TBI_AUTO_SENSE; 811 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 812 PHYAddr[qdev->mac_index]); 813 814 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 815 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 816 PHYAddr[qdev->mac_index]); 817 818 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 819 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 820 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 821 PHYAddr[qdev->mac_index]); 822 } 823 824 static void ql_petbi_init(struct ql3_adapter *qdev) 825 { 826 ql_petbi_reset(qdev); 827 ql_petbi_start_neg(qdev); 828 } 829 830 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 831 { 832 ql_petbi_reset_ex(qdev); 833 ql_petbi_start_neg_ex(qdev); 834 } 835 836 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 837 { 838 u16 reg; 839 840 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 841 return 0; 842 843 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 844 } 845 846 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 847 { 848 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 849 /* power down device bit 11 = 1 */ 850 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 851 /* enable diagnostic mode bit 2 = 1 */ 852 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 853 /* 1000MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 855 /* 1000MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 857 /* 100MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 859 /* 100MB amplitude adjust (see Agere errata) */ 860 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 861 /* 10MB amplitude adjust (see Agere errata) */ 862 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 863 /* 10MB amplitude adjust (see Agere errata) */ 864 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 865 /* point to hidden reg 0x2806 */ 866 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 867 /* Write new PHYAD w/bit 5 set */ 868 ql_mii_write_reg_ex(qdev, 0x11, 869 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 870 /* 871 * Disable diagnostic mode bit 2 = 0 872 * Power up device bit 11 = 0 873 * Link up (on) and activity (blink) 874 */ 875 ql_mii_write_reg(qdev, 0x12, 0x840a); 876 ql_mii_write_reg(qdev, 0x00, 0x1140); 877 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 878 } 879 880 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 881 u16 phyIdReg0, u16 phyIdReg1) 882 { 883 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 884 u32 oui; 885 u16 model; 886 int i; 887 888 if (phyIdReg0 == 0xffff) 889 return result; 890 891 if (phyIdReg1 == 0xffff) 892 return result; 893 894 /* oui is split between two registers */ 895 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 896 897 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 898 899 /* Scan table for this PHY */ 900 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 901 if ((oui == PHY_DEVICES[i].phyIdOUI) && 902 (model == PHY_DEVICES[i].phyIdModel)) { 903 netdev_info(qdev->ndev, "Phy: %s\n", 904 PHY_DEVICES[i].name); 905 result = PHY_DEVICES[i].phyDevice; 906 break; 907 } 908 } 909 910 return result; 911 } 912 913 static int ql_phy_get_speed(struct ql3_adapter *qdev) 914 { 915 u16 reg; 916 917 switch (qdev->phyType) { 918 case PHY_AGERE_ET1011C: { 919 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 920 return 0; 921 922 reg = (reg >> 8) & 3; 923 break; 924 } 925 default: 926 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 927 return 0; 928 929 reg = (((reg & 0x18) >> 3) & 3); 930 } 931 932 switch (reg) { 933 case 2: 934 return SPEED_1000; 935 case 1: 936 return SPEED_100; 937 case 0: 938 return SPEED_10; 939 default: 940 return -1; 941 } 942 } 943 944 static int ql_is_full_dup(struct ql3_adapter *qdev) 945 { 946 u16 reg; 947 948 switch (qdev->phyType) { 949 case PHY_AGERE_ET1011C: { 950 if (ql_mii_read_reg(qdev, 0x1A, ®)) 951 return 0; 952 953 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 954 } 955 case PHY_VITESSE_VSC8211: 956 default: { 957 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 958 return 0; 959 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 960 } 961 } 962 } 963 964 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 965 { 966 u16 reg; 967 968 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 969 return 0; 970 971 return (reg & PHY_NEG_PAUSE) != 0; 972 } 973 974 static int PHY_Setup(struct ql3_adapter *qdev) 975 { 976 u16 reg1; 977 u16 reg2; 978 bool agereAddrChangeNeeded = false; 979 u32 miiAddr = 0; 980 int err; 981 982 /* Determine the PHY we are using by reading the ID's */ 983 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 984 if (err != 0) { 985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 986 return err; 987 } 988 989 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 990 if (err != 0) { 991 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 992 return err; 993 } 994 995 /* Check if we have a Agere PHY */ 996 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 997 998 /* Determine which MII address we should be using 999 determined by the index of the card */ 1000 if (qdev->mac_index == 0) 1001 miiAddr = MII_AGERE_ADDR_1; 1002 else 1003 miiAddr = MII_AGERE_ADDR_2; 1004 1005 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1006 if (err != 0) { 1007 netdev_err(qdev->ndev, 1008 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1009 return err; 1010 } 1011 1012 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1013 if (err != 0) { 1014 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1015 return err; 1016 } 1017 1018 /* We need to remember to initialize the Agere PHY */ 1019 agereAddrChangeNeeded = true; 1020 } 1021 1022 /* Determine the particular PHY we have on board to apply 1023 PHY specific initializations */ 1024 qdev->phyType = getPhyType(qdev, reg1, reg2); 1025 1026 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1027 /* need this here so address gets changed */ 1028 phyAgereSpecificInit(qdev, miiAddr); 1029 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1030 netdev_err(qdev->ndev, "PHY is unknown\n"); 1031 return -EIO; 1032 } 1033 1034 return 0; 1035 } 1036 1037 /* 1038 * Caller holds hw_lock. 1039 */ 1040 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1041 { 1042 struct ql3xxx_port_registers __iomem *port_regs = 1043 qdev->mem_map_registers; 1044 u32 value; 1045 1046 if (enable) 1047 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1048 else 1049 value = (MAC_CONFIG_REG_PE << 16); 1050 1051 if (qdev->mac_index) 1052 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1053 else 1054 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1055 } 1056 1057 /* 1058 * Caller holds hw_lock. 1059 */ 1060 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1061 { 1062 struct ql3xxx_port_registers __iomem *port_regs = 1063 qdev->mem_map_registers; 1064 u32 value; 1065 1066 if (enable) 1067 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1068 else 1069 value = (MAC_CONFIG_REG_SR << 16); 1070 1071 if (qdev->mac_index) 1072 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1073 else 1074 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1075 } 1076 1077 /* 1078 * Caller holds hw_lock. 1079 */ 1080 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1081 { 1082 struct ql3xxx_port_registers __iomem *port_regs = 1083 qdev->mem_map_registers; 1084 u32 value; 1085 1086 if (enable) 1087 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1088 else 1089 value = (MAC_CONFIG_REG_GM << 16); 1090 1091 if (qdev->mac_index) 1092 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1093 else 1094 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1095 } 1096 1097 /* 1098 * Caller holds hw_lock. 1099 */ 1100 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1101 { 1102 struct ql3xxx_port_registers __iomem *port_regs = 1103 qdev->mem_map_registers; 1104 u32 value; 1105 1106 if (enable) 1107 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1108 else 1109 value = (MAC_CONFIG_REG_FD << 16); 1110 1111 if (qdev->mac_index) 1112 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1113 else 1114 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1115 } 1116 1117 /* 1118 * Caller holds hw_lock. 1119 */ 1120 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1121 { 1122 struct ql3xxx_port_registers __iomem *port_regs = 1123 qdev->mem_map_registers; 1124 u32 value; 1125 1126 if (enable) 1127 value = 1128 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1129 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1130 else 1131 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1132 1133 if (qdev->mac_index) 1134 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1135 else 1136 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1137 } 1138 1139 /* 1140 * Caller holds hw_lock. 1141 */ 1142 static int ql_is_fiber(struct ql3_adapter *qdev) 1143 { 1144 struct ql3xxx_port_registers __iomem *port_regs = 1145 qdev->mem_map_registers; 1146 u32 bitToCheck = 0; 1147 u32 temp; 1148 1149 switch (qdev->mac_index) { 1150 case 0: 1151 bitToCheck = PORT_STATUS_SM0; 1152 break; 1153 case 1: 1154 bitToCheck = PORT_STATUS_SM1; 1155 break; 1156 } 1157 1158 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1159 return (temp & bitToCheck) != 0; 1160 } 1161 1162 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1163 { 1164 u16 reg; 1165 ql_mii_read_reg(qdev, 0x00, ®); 1166 return (reg & 0x1000) != 0; 1167 } 1168 1169 /* 1170 * Caller holds hw_lock. 1171 */ 1172 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1173 { 1174 struct ql3xxx_port_registers __iomem *port_regs = 1175 qdev->mem_map_registers; 1176 u32 bitToCheck = 0; 1177 u32 temp; 1178 1179 switch (qdev->mac_index) { 1180 case 0: 1181 bitToCheck = PORT_STATUS_AC0; 1182 break; 1183 case 1: 1184 bitToCheck = PORT_STATUS_AC1; 1185 break; 1186 } 1187 1188 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1189 if (temp & bitToCheck) { 1190 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1191 return 1; 1192 } 1193 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1194 return 0; 1195 } 1196 1197 /* 1198 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1199 */ 1200 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1201 { 1202 if (ql_is_fiber(qdev)) 1203 return ql_is_petbi_neg_pause(qdev); 1204 else 1205 return ql_is_phy_neg_pause(qdev); 1206 } 1207 1208 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1209 { 1210 struct ql3xxx_port_registers __iomem *port_regs = 1211 qdev->mem_map_registers; 1212 u32 bitToCheck = 0; 1213 u32 temp; 1214 1215 switch (qdev->mac_index) { 1216 case 0: 1217 bitToCheck = PORT_STATUS_AE0; 1218 break; 1219 case 1: 1220 bitToCheck = PORT_STATUS_AE1; 1221 break; 1222 } 1223 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1224 return (temp & bitToCheck) != 0; 1225 } 1226 1227 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1228 { 1229 if (ql_is_fiber(qdev)) 1230 return SPEED_1000; 1231 else 1232 return ql_phy_get_speed(qdev); 1233 } 1234 1235 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1236 { 1237 if (ql_is_fiber(qdev)) 1238 return 1; 1239 else 1240 return ql_is_full_dup(qdev); 1241 } 1242 1243 /* 1244 * Caller holds hw_lock. 1245 */ 1246 static int ql_link_down_detect(struct ql3_adapter *qdev) 1247 { 1248 struct ql3xxx_port_registers __iomem *port_regs = 1249 qdev->mem_map_registers; 1250 u32 bitToCheck = 0; 1251 u32 temp; 1252 1253 switch (qdev->mac_index) { 1254 case 0: 1255 bitToCheck = ISP_CONTROL_LINK_DN_0; 1256 break; 1257 case 1: 1258 bitToCheck = ISP_CONTROL_LINK_DN_1; 1259 break; 1260 } 1261 1262 temp = 1263 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1264 return (temp & bitToCheck) != 0; 1265 } 1266 1267 /* 1268 * Caller holds hw_lock. 1269 */ 1270 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1271 { 1272 struct ql3xxx_port_registers __iomem *port_regs = 1273 qdev->mem_map_registers; 1274 1275 switch (qdev->mac_index) { 1276 case 0: 1277 ql_write_common_reg(qdev, 1278 &port_regs->CommonRegs.ispControlStatus, 1279 (ISP_CONTROL_LINK_DN_0) | 1280 (ISP_CONTROL_LINK_DN_0 << 16)); 1281 break; 1282 1283 case 1: 1284 ql_write_common_reg(qdev, 1285 &port_regs->CommonRegs.ispControlStatus, 1286 (ISP_CONTROL_LINK_DN_1) | 1287 (ISP_CONTROL_LINK_DN_1 << 16)); 1288 break; 1289 1290 default: 1291 return 1; 1292 } 1293 1294 return 0; 1295 } 1296 1297 /* 1298 * Caller holds hw_lock. 1299 */ 1300 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1301 { 1302 struct ql3xxx_port_registers __iomem *port_regs = 1303 qdev->mem_map_registers; 1304 u32 bitToCheck = 0; 1305 u32 temp; 1306 1307 switch (qdev->mac_index) { 1308 case 0: 1309 bitToCheck = PORT_STATUS_F1_ENABLED; 1310 break; 1311 case 1: 1312 bitToCheck = PORT_STATUS_F3_ENABLED; 1313 break; 1314 default: 1315 break; 1316 } 1317 1318 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1319 if (temp & bitToCheck) { 1320 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1321 "not link master\n"); 1322 return 0; 1323 } 1324 1325 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1326 return 1; 1327 } 1328 1329 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1330 { 1331 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1332 PHYAddr[qdev->mac_index]); 1333 } 1334 1335 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1336 { 1337 u16 reg; 1338 u16 portConfiguration; 1339 1340 if (qdev->phyType == PHY_AGERE_ET1011C) 1341 ql_mii_write_reg(qdev, 0x13, 0x0000); 1342 /* turn off external loopback */ 1343 1344 if (qdev->mac_index == 0) 1345 portConfiguration = 1346 qdev->nvram_data.macCfg_port0.portConfiguration; 1347 else 1348 portConfiguration = 1349 qdev->nvram_data.macCfg_port1.portConfiguration; 1350 1351 /* Some HBA's in the field are set to 0 and they need to 1352 be reinterpreted with a default value */ 1353 if (portConfiguration == 0) 1354 portConfiguration = PORT_CONFIG_DEFAULT; 1355 1356 /* Set the 1000 advertisements */ 1357 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1358 PHYAddr[qdev->mac_index]); 1359 reg &= ~PHY_GIG_ALL_PARAMS; 1360 1361 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1362 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1363 reg |= PHY_GIG_ADV_1000F; 1364 else 1365 reg |= PHY_GIG_ADV_1000H; 1366 } 1367 1368 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1369 PHYAddr[qdev->mac_index]); 1370 1371 /* Set the 10/100 & pause negotiation advertisements */ 1372 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1373 PHYAddr[qdev->mac_index]); 1374 reg &= ~PHY_NEG_ALL_PARAMS; 1375 1376 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1377 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1378 1379 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1380 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1381 reg |= PHY_NEG_ADV_100F; 1382 1383 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1384 reg |= PHY_NEG_ADV_10F; 1385 } 1386 1387 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1388 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1389 reg |= PHY_NEG_ADV_100H; 1390 1391 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1392 reg |= PHY_NEG_ADV_10H; 1393 } 1394 1395 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1396 reg |= 1; 1397 1398 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1399 PHYAddr[qdev->mac_index]); 1400 1401 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1402 1403 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1404 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1405 PHYAddr[qdev->mac_index]); 1406 } 1407 1408 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1409 { 1410 ql_phy_reset_ex(qdev); 1411 PHY_Setup(qdev); 1412 ql_phy_start_neg_ex(qdev); 1413 } 1414 1415 /* 1416 * Caller holds hw_lock. 1417 */ 1418 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1419 { 1420 struct ql3xxx_port_registers __iomem *port_regs = 1421 qdev->mem_map_registers; 1422 u32 bitToCheck = 0; 1423 u32 temp, linkState; 1424 1425 switch (qdev->mac_index) { 1426 case 0: 1427 bitToCheck = PORT_STATUS_UP0; 1428 break; 1429 case 1: 1430 bitToCheck = PORT_STATUS_UP1; 1431 break; 1432 } 1433 1434 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1435 if (temp & bitToCheck) 1436 linkState = LS_UP; 1437 else 1438 linkState = LS_DOWN; 1439 1440 return linkState; 1441 } 1442 1443 static int ql_port_start(struct ql3_adapter *qdev) 1444 { 1445 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1446 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1447 2) << 7)) { 1448 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1449 return -1; 1450 } 1451 1452 if (ql_is_fiber(qdev)) { 1453 ql_petbi_init(qdev); 1454 } else { 1455 /* Copper port */ 1456 ql_phy_init_ex(qdev); 1457 } 1458 1459 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1460 return 0; 1461 } 1462 1463 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1464 { 1465 1466 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1467 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1468 2) << 7)) 1469 return -1; 1470 1471 if (!ql_auto_neg_error(qdev)) { 1472 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1473 /* configure the MAC */ 1474 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1475 "Configuring link\n"); 1476 ql_mac_cfg_soft_reset(qdev, 1); 1477 ql_mac_cfg_gig(qdev, 1478 (ql_get_link_speed 1479 (qdev) == 1480 SPEED_1000)); 1481 ql_mac_cfg_full_dup(qdev, 1482 ql_is_link_full_dup 1483 (qdev)); 1484 ql_mac_cfg_pause(qdev, 1485 ql_is_neg_pause 1486 (qdev)); 1487 ql_mac_cfg_soft_reset(qdev, 0); 1488 1489 /* enable the MAC */ 1490 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1491 "Enabling mac\n"); 1492 ql_mac_enable(qdev, 1); 1493 } 1494 1495 qdev->port_link_state = LS_UP; 1496 netif_start_queue(qdev->ndev); 1497 netif_carrier_on(qdev->ndev); 1498 netif_info(qdev, link, qdev->ndev, 1499 "Link is up at %d Mbps, %s duplex\n", 1500 ql_get_link_speed(qdev), 1501 ql_is_link_full_dup(qdev) ? "full" : "half"); 1502 1503 } else { /* Remote error detected */ 1504 1505 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1506 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1507 "Remote error detected. Calling ql_port_start()\n"); 1508 /* 1509 * ql_port_start() is shared code and needs 1510 * to lock the PHY on it's own. 1511 */ 1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1513 if (ql_port_start(qdev)) /* Restart port */ 1514 return -1; 1515 return 0; 1516 } 1517 } 1518 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1519 return 0; 1520 } 1521 1522 static void ql_link_state_machine_work(struct work_struct *work) 1523 { 1524 struct ql3_adapter *qdev = 1525 container_of(work, struct ql3_adapter, link_state_work.work); 1526 1527 u32 curr_link_state; 1528 unsigned long hw_flags; 1529 1530 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1531 1532 curr_link_state = ql_get_link_state(qdev); 1533 1534 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1535 netif_info(qdev, link, qdev->ndev, 1536 "Reset in progress, skip processing link state\n"); 1537 1538 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1539 1540 /* Restart timer on 2 second interval. */ 1541 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1542 1543 return; 1544 } 1545 1546 switch (qdev->port_link_state) { 1547 default: 1548 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1549 ql_port_start(qdev); 1550 qdev->port_link_state = LS_DOWN; 1551 /* Fall Through */ 1552 1553 case LS_DOWN: 1554 if (curr_link_state == LS_UP) { 1555 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1556 if (ql_is_auto_neg_complete(qdev)) 1557 ql_finish_auto_neg(qdev); 1558 1559 if (qdev->port_link_state == LS_UP) 1560 ql_link_down_detect_clear(qdev); 1561 1562 qdev->port_link_state = LS_UP; 1563 } 1564 break; 1565 1566 case LS_UP: 1567 /* 1568 * See if the link is currently down or went down and came 1569 * back up 1570 */ 1571 if (curr_link_state == LS_DOWN) { 1572 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1573 qdev->port_link_state = LS_DOWN; 1574 } 1575 if (ql_link_down_detect(qdev)) 1576 qdev->port_link_state = LS_DOWN; 1577 break; 1578 } 1579 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1580 1581 /* Restart timer on 2 second interval. */ 1582 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1583 } 1584 1585 /* 1586 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1587 */ 1588 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1589 { 1590 if (ql_this_adapter_controls_port(qdev)) 1591 set_bit(QL_LINK_MASTER, &qdev->flags); 1592 else 1593 clear_bit(QL_LINK_MASTER, &qdev->flags); 1594 } 1595 1596 /* 1597 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1598 */ 1599 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1600 { 1601 ql_mii_enable_scan_mode(qdev); 1602 1603 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1604 if (ql_this_adapter_controls_port(qdev)) 1605 ql_petbi_init_ex(qdev); 1606 } else { 1607 if (ql_this_adapter_controls_port(qdev)) 1608 ql_phy_init_ex(qdev); 1609 } 1610 } 1611 1612 /* 1613 * MII_Setup needs to be called before taking the PHY out of reset 1614 * so that the management interface clock speed can be set properly. 1615 * It would be better if we had a way to disable MDC until after the 1616 * PHY is out of reset, but we don't have that capability. 1617 */ 1618 static int ql_mii_setup(struct ql3_adapter *qdev) 1619 { 1620 u32 reg; 1621 struct ql3xxx_port_registers __iomem *port_regs = 1622 qdev->mem_map_registers; 1623 1624 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1625 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1626 2) << 7)) 1627 return -1; 1628 1629 if (qdev->device_id == QL3032_DEVICE_ID) 1630 ql_write_page0_reg(qdev, 1631 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1632 1633 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1634 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1635 1636 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1637 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1638 1639 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1640 return 0; 1641 } 1642 1643 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1644 SUPPORTED_FIBRE | \ 1645 SUPPORTED_Autoneg) 1646 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1647 SUPPORTED_10baseT_Full | \ 1648 SUPPORTED_100baseT_Half | \ 1649 SUPPORTED_100baseT_Full | \ 1650 SUPPORTED_1000baseT_Half | \ 1651 SUPPORTED_1000baseT_Full | \ 1652 SUPPORTED_Autoneg | \ 1653 SUPPORTED_TP) \ 1654 1655 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1656 { 1657 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1658 return SUPPORTED_OPTICAL_MODES; 1659 1660 return SUPPORTED_TP_MODES; 1661 } 1662 1663 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1664 { 1665 int status; 1666 unsigned long hw_flags; 1667 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1668 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1669 (QL_RESOURCE_BITS_BASE_CODE | 1670 (qdev->mac_index) * 2) << 7)) { 1671 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1672 return 0; 1673 } 1674 status = ql_is_auto_cfg(qdev); 1675 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1676 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1677 return status; 1678 } 1679 1680 static u32 ql_get_speed(struct ql3_adapter *qdev) 1681 { 1682 u32 status; 1683 unsigned long hw_flags; 1684 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1685 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1686 (QL_RESOURCE_BITS_BASE_CODE | 1687 (qdev->mac_index) * 2) << 7)) { 1688 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1689 return 0; 1690 } 1691 status = ql_get_link_speed(qdev); 1692 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1693 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1694 return status; 1695 } 1696 1697 static int ql_get_full_dup(struct ql3_adapter *qdev) 1698 { 1699 int status; 1700 unsigned long hw_flags; 1701 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1702 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1703 (QL_RESOURCE_BITS_BASE_CODE | 1704 (qdev->mac_index) * 2) << 7)) { 1705 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1706 return 0; 1707 } 1708 status = ql_is_link_full_dup(qdev); 1709 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1710 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1711 return status; 1712 } 1713 1714 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1715 { 1716 struct ql3_adapter *qdev = netdev_priv(ndev); 1717 1718 ecmd->transceiver = XCVR_INTERNAL; 1719 ecmd->supported = ql_supported_modes(qdev); 1720 1721 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1722 ecmd->port = PORT_FIBRE; 1723 } else { 1724 ecmd->port = PORT_TP; 1725 ecmd->phy_address = qdev->PHYAddr; 1726 } 1727 ecmd->advertising = ql_supported_modes(qdev); 1728 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1729 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); 1730 ecmd->duplex = ql_get_full_dup(qdev); 1731 return 0; 1732 } 1733 1734 static void ql_get_drvinfo(struct net_device *ndev, 1735 struct ethtool_drvinfo *drvinfo) 1736 { 1737 struct ql3_adapter *qdev = netdev_priv(ndev); 1738 strncpy(drvinfo->driver, ql3xxx_driver_name, 32); 1739 strncpy(drvinfo->version, ql3xxx_driver_version, 32); 1740 strncpy(drvinfo->fw_version, "N/A", 32); 1741 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 1742 drvinfo->regdump_len = 0; 1743 drvinfo->eedump_len = 0; 1744 } 1745 1746 static u32 ql_get_msglevel(struct net_device *ndev) 1747 { 1748 struct ql3_adapter *qdev = netdev_priv(ndev); 1749 return qdev->msg_enable; 1750 } 1751 1752 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1753 { 1754 struct ql3_adapter *qdev = netdev_priv(ndev); 1755 qdev->msg_enable = value; 1756 } 1757 1758 static void ql_get_pauseparam(struct net_device *ndev, 1759 struct ethtool_pauseparam *pause) 1760 { 1761 struct ql3_adapter *qdev = netdev_priv(ndev); 1762 struct ql3xxx_port_registers __iomem *port_regs = 1763 qdev->mem_map_registers; 1764 1765 u32 reg; 1766 if (qdev->mac_index == 0) 1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1768 else 1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1770 1771 pause->autoneg = ql_get_auto_cfg_status(qdev); 1772 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1773 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1774 } 1775 1776 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1777 .get_settings = ql_get_settings, 1778 .get_drvinfo = ql_get_drvinfo, 1779 .get_link = ethtool_op_get_link, 1780 .get_msglevel = ql_get_msglevel, 1781 .set_msglevel = ql_set_msglevel, 1782 .get_pauseparam = ql_get_pauseparam, 1783 }; 1784 1785 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1786 { 1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1788 dma_addr_t map; 1789 int err; 1790 1791 while (lrg_buf_cb) { 1792 if (!lrg_buf_cb->skb) { 1793 lrg_buf_cb->skb = 1794 netdev_alloc_skb(qdev->ndev, 1795 qdev->lrg_buffer_len); 1796 if (unlikely(!lrg_buf_cb->skb)) { 1797 netdev_printk(KERN_DEBUG, qdev->ndev, 1798 "Failed netdev_alloc_skb()\n"); 1799 break; 1800 } else { 1801 /* 1802 * We save some space to copy the ethhdr from 1803 * first buffer 1804 */ 1805 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1806 map = pci_map_single(qdev->pdev, 1807 lrg_buf_cb->skb->data, 1808 qdev->lrg_buffer_len - 1809 QL_HEADER_SPACE, 1810 PCI_DMA_FROMDEVICE); 1811 1812 err = pci_dma_mapping_error(qdev->pdev, map); 1813 if (err) { 1814 netdev_err(qdev->ndev, 1815 "PCI mapping failed with error: %d\n", 1816 err); 1817 dev_kfree_skb(lrg_buf_cb->skb); 1818 lrg_buf_cb->skb = NULL; 1819 break; 1820 } 1821 1822 1823 lrg_buf_cb->buf_phy_addr_low = 1824 cpu_to_le32(LS_64BITS(map)); 1825 lrg_buf_cb->buf_phy_addr_high = 1826 cpu_to_le32(MS_64BITS(map)); 1827 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1828 dma_unmap_len_set(lrg_buf_cb, maplen, 1829 qdev->lrg_buffer_len - 1830 QL_HEADER_SPACE); 1831 --qdev->lrg_buf_skb_check; 1832 if (!qdev->lrg_buf_skb_check) 1833 return 1; 1834 } 1835 } 1836 lrg_buf_cb = lrg_buf_cb->next; 1837 } 1838 return 0; 1839 } 1840 1841 /* 1842 * Caller holds hw_lock. 1843 */ 1844 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1845 { 1846 struct ql3xxx_port_registers __iomem *port_regs = 1847 qdev->mem_map_registers; 1848 1849 if (qdev->small_buf_release_cnt >= 16) { 1850 while (qdev->small_buf_release_cnt >= 16) { 1851 qdev->small_buf_q_producer_index++; 1852 1853 if (qdev->small_buf_q_producer_index == 1854 NUM_SBUFQ_ENTRIES) 1855 qdev->small_buf_q_producer_index = 0; 1856 qdev->small_buf_release_cnt -= 8; 1857 } 1858 wmb(); 1859 writel(qdev->small_buf_q_producer_index, 1860 &port_regs->CommonRegs.rxSmallQProducerIndex); 1861 } 1862 } 1863 1864 /* 1865 * Caller holds hw_lock. 1866 */ 1867 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1868 { 1869 struct bufq_addr_element *lrg_buf_q_ele; 1870 int i; 1871 struct ql_rcv_buf_cb *lrg_buf_cb; 1872 struct ql3xxx_port_registers __iomem *port_regs = 1873 qdev->mem_map_registers; 1874 1875 if ((qdev->lrg_buf_free_count >= 8) && 1876 (qdev->lrg_buf_release_cnt >= 16)) { 1877 1878 if (qdev->lrg_buf_skb_check) 1879 if (!ql_populate_free_queue(qdev)) 1880 return; 1881 1882 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1883 1884 while ((qdev->lrg_buf_release_cnt >= 16) && 1885 (qdev->lrg_buf_free_count >= 8)) { 1886 1887 for (i = 0; i < 8; i++) { 1888 lrg_buf_cb = 1889 ql_get_from_lrg_buf_free_list(qdev); 1890 lrg_buf_q_ele->addr_high = 1891 lrg_buf_cb->buf_phy_addr_high; 1892 lrg_buf_q_ele->addr_low = 1893 lrg_buf_cb->buf_phy_addr_low; 1894 lrg_buf_q_ele++; 1895 1896 qdev->lrg_buf_release_cnt--; 1897 } 1898 1899 qdev->lrg_buf_q_producer_index++; 1900 1901 if (qdev->lrg_buf_q_producer_index == 1902 qdev->num_lbufq_entries) 1903 qdev->lrg_buf_q_producer_index = 0; 1904 1905 if (qdev->lrg_buf_q_producer_index == 1906 (qdev->num_lbufq_entries - 1)) { 1907 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1908 } 1909 } 1910 wmb(); 1911 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1912 writel(qdev->lrg_buf_q_producer_index, 1913 &port_regs->CommonRegs.rxLargeQProducerIndex); 1914 } 1915 } 1916 1917 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1918 struct ob_mac_iocb_rsp *mac_rsp) 1919 { 1920 struct ql_tx_buf_cb *tx_cb; 1921 int i; 1922 int retval = 0; 1923 1924 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1925 netdev_warn(qdev->ndev, 1926 "Frame too short but it was padded and sent\n"); 1927 } 1928 1929 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1930 1931 /* Check the transmit response flags for any errors */ 1932 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1933 netdev_err(qdev->ndev, 1934 "Frame too short to be legal, frame not sent\n"); 1935 1936 qdev->ndev->stats.tx_errors++; 1937 retval = -EIO; 1938 goto frame_not_sent; 1939 } 1940 1941 if (tx_cb->seg_count == 0) { 1942 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1943 mac_rsp->transaction_id); 1944 1945 qdev->ndev->stats.tx_errors++; 1946 retval = -EIO; 1947 goto invalid_seg_count; 1948 } 1949 1950 pci_unmap_single(qdev->pdev, 1951 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1952 dma_unmap_len(&tx_cb->map[0], maplen), 1953 PCI_DMA_TODEVICE); 1954 tx_cb->seg_count--; 1955 if (tx_cb->seg_count) { 1956 for (i = 1; i < tx_cb->seg_count; i++) { 1957 pci_unmap_page(qdev->pdev, 1958 dma_unmap_addr(&tx_cb->map[i], 1959 mapaddr), 1960 dma_unmap_len(&tx_cb->map[i], maplen), 1961 PCI_DMA_TODEVICE); 1962 } 1963 } 1964 qdev->ndev->stats.tx_packets++; 1965 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1966 1967 frame_not_sent: 1968 dev_kfree_skb_irq(tx_cb->skb); 1969 tx_cb->skb = NULL; 1970 1971 invalid_seg_count: 1972 atomic_inc(&qdev->tx_count); 1973 } 1974 1975 static void ql_get_sbuf(struct ql3_adapter *qdev) 1976 { 1977 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1978 qdev->small_buf_index = 0; 1979 qdev->small_buf_release_cnt++; 1980 } 1981 1982 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1983 { 1984 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1985 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1986 qdev->lrg_buf_release_cnt++; 1987 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1988 qdev->lrg_buf_index = 0; 1989 return lrg_buf_cb; 1990 } 1991 1992 /* 1993 * The difference between 3022 and 3032 for inbound completions: 1994 * 3022 uses two buffers per completion. The first buffer contains 1995 * (some) header info, the second the remainder of the headers plus 1996 * the data. For this chip we reserve some space at the top of the 1997 * receive buffer so that the header info in buffer one can be 1998 * prepended to the buffer two. Buffer two is the sent up while 1999 * buffer one is returned to the hardware to be reused. 2000 * 3032 receives all of it's data and headers in one buffer for a 2001 * simpler process. 3032 also supports checksum verification as 2002 * can be seen in ql_process_macip_rx_intr(). 2003 */ 2004 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2005 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2006 { 2007 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2008 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2009 struct sk_buff *skb; 2010 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2011 2012 /* 2013 * Get the inbound address list (small buffer). 2014 */ 2015 ql_get_sbuf(qdev); 2016 2017 if (qdev->device_id == QL3022_DEVICE_ID) 2018 lrg_buf_cb1 = ql_get_lbuf(qdev); 2019 2020 /* start of second buffer */ 2021 lrg_buf_cb2 = ql_get_lbuf(qdev); 2022 skb = lrg_buf_cb2->skb; 2023 2024 qdev->ndev->stats.rx_packets++; 2025 qdev->ndev->stats.rx_bytes += length; 2026 2027 skb_put(skb, length); 2028 pci_unmap_single(qdev->pdev, 2029 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2030 dma_unmap_len(lrg_buf_cb2, maplen), 2031 PCI_DMA_FROMDEVICE); 2032 prefetch(skb->data); 2033 skb_checksum_none_assert(skb); 2034 skb->protocol = eth_type_trans(skb, qdev->ndev); 2035 2036 netif_receive_skb(skb); 2037 lrg_buf_cb2->skb = NULL; 2038 2039 if (qdev->device_id == QL3022_DEVICE_ID) 2040 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2041 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2042 } 2043 2044 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2045 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2046 { 2047 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2048 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2049 struct sk_buff *skb1 = NULL, *skb2; 2050 struct net_device *ndev = qdev->ndev; 2051 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2052 u16 size = 0; 2053 2054 /* 2055 * Get the inbound address list (small buffer). 2056 */ 2057 2058 ql_get_sbuf(qdev); 2059 2060 if (qdev->device_id == QL3022_DEVICE_ID) { 2061 /* start of first buffer on 3022 */ 2062 lrg_buf_cb1 = ql_get_lbuf(qdev); 2063 skb1 = lrg_buf_cb1->skb; 2064 size = ETH_HLEN; 2065 if (*((u16 *) skb1->data) != 0xFFFF) 2066 size += VLAN_ETH_HLEN - ETH_HLEN; 2067 } 2068 2069 /* start of second buffer */ 2070 lrg_buf_cb2 = ql_get_lbuf(qdev); 2071 skb2 = lrg_buf_cb2->skb; 2072 2073 skb_put(skb2, length); /* Just the second buffer length here. */ 2074 pci_unmap_single(qdev->pdev, 2075 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2076 dma_unmap_len(lrg_buf_cb2, maplen), 2077 PCI_DMA_FROMDEVICE); 2078 prefetch(skb2->data); 2079 2080 skb_checksum_none_assert(skb2); 2081 if (qdev->device_id == QL3022_DEVICE_ID) { 2082 /* 2083 * Copy the ethhdr from first buffer to second. This 2084 * is necessary for 3022 IP completions. 2085 */ 2086 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2087 skb_push(skb2, size), size); 2088 } else { 2089 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2090 if (checksum & 2091 (IB_IP_IOCB_RSP_3032_ICE | 2092 IB_IP_IOCB_RSP_3032_CE)) { 2093 netdev_err(ndev, 2094 "%s: Bad checksum for this %s packet, checksum = %x\n", 2095 __func__, 2096 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2097 "TCP" : "UDP"), checksum); 2098 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2099 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2100 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2101 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2102 } 2103 } 2104 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2105 2106 netif_receive_skb(skb2); 2107 ndev->stats.rx_packets++; 2108 ndev->stats.rx_bytes += length; 2109 lrg_buf_cb2->skb = NULL; 2110 2111 if (qdev->device_id == QL3022_DEVICE_ID) 2112 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2113 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2114 } 2115 2116 static int ql_tx_rx_clean(struct ql3_adapter *qdev, 2117 int *tx_cleaned, int *rx_cleaned, int work_to_do) 2118 { 2119 struct net_rsp_iocb *net_rsp; 2120 struct net_device *ndev = qdev->ndev; 2121 int work_done = 0; 2122 2123 /* While there are entries in the completion queue. */ 2124 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2125 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2126 2127 net_rsp = qdev->rsp_current; 2128 rmb(); 2129 /* 2130 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2131 * if the inbound completion is for a VLAN. 2132 */ 2133 if (qdev->device_id == QL3032_DEVICE_ID) 2134 net_rsp->opcode &= 0x7f; 2135 switch (net_rsp->opcode) { 2136 2137 case OPCODE_OB_MAC_IOCB_FN0: 2138 case OPCODE_OB_MAC_IOCB_FN2: 2139 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2140 net_rsp); 2141 (*tx_cleaned)++; 2142 break; 2143 2144 case OPCODE_IB_MAC_IOCB: 2145 case OPCODE_IB_3032_MAC_IOCB: 2146 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2147 net_rsp); 2148 (*rx_cleaned)++; 2149 break; 2150 2151 case OPCODE_IB_IP_IOCB: 2152 case OPCODE_IB_3032_IP_IOCB: 2153 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2154 net_rsp); 2155 (*rx_cleaned)++; 2156 break; 2157 default: { 2158 u32 *tmp = (u32 *)net_rsp; 2159 netdev_err(ndev, 2160 "Hit default case, not handled!\n" 2161 " dropping the packet, opcode = %x\n" 2162 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2163 net_rsp->opcode, 2164 (unsigned long int)tmp[0], 2165 (unsigned long int)tmp[1], 2166 (unsigned long int)tmp[2], 2167 (unsigned long int)tmp[3]); 2168 } 2169 } 2170 2171 qdev->rsp_consumer_index++; 2172 2173 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2174 qdev->rsp_consumer_index = 0; 2175 qdev->rsp_current = qdev->rsp_q_virt_addr; 2176 } else { 2177 qdev->rsp_current++; 2178 } 2179 2180 work_done = *tx_cleaned + *rx_cleaned; 2181 } 2182 2183 return work_done; 2184 } 2185 2186 static int ql_poll(struct napi_struct *napi, int budget) 2187 { 2188 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2189 int rx_cleaned = 0, tx_cleaned = 0; 2190 unsigned long hw_flags; 2191 struct ql3xxx_port_registers __iomem *port_regs = 2192 qdev->mem_map_registers; 2193 2194 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2195 2196 if (tx_cleaned + rx_cleaned != budget) { 2197 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2198 __napi_complete(napi); 2199 ql_update_small_bufq_prod_index(qdev); 2200 ql_update_lrg_bufq_prod_index(qdev); 2201 writel(qdev->rsp_consumer_index, 2202 &port_regs->CommonRegs.rspQConsumerIndex); 2203 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2204 2205 ql_enable_interrupts(qdev); 2206 } 2207 return tx_cleaned + rx_cleaned; 2208 } 2209 2210 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2211 { 2212 2213 struct net_device *ndev = dev_id; 2214 struct ql3_adapter *qdev = netdev_priv(ndev); 2215 struct ql3xxx_port_registers __iomem *port_regs = 2216 qdev->mem_map_registers; 2217 u32 value; 2218 int handled = 1; 2219 u32 var; 2220 2221 value = ql_read_common_reg_l(qdev, 2222 &port_regs->CommonRegs.ispControlStatus); 2223 2224 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2225 spin_lock(&qdev->adapter_lock); 2226 netif_stop_queue(qdev->ndev); 2227 netif_carrier_off(qdev->ndev); 2228 ql_disable_interrupts(qdev); 2229 qdev->port_link_state = LS_DOWN; 2230 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2231 2232 if (value & ISP_CONTROL_FE) { 2233 /* 2234 * Chip Fatal Error. 2235 */ 2236 var = 2237 ql_read_page0_reg_l(qdev, 2238 &port_regs->PortFatalErrStatus); 2239 netdev_warn(ndev, 2240 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2241 var); 2242 set_bit(QL_RESET_START, &qdev->flags) ; 2243 } else { 2244 /* 2245 * Soft Reset Requested. 2246 */ 2247 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2248 netdev_err(ndev, 2249 "Another function issued a reset to the chip. ISR value = %x\n", 2250 value); 2251 } 2252 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2253 spin_unlock(&qdev->adapter_lock); 2254 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2255 ql_disable_interrupts(qdev); 2256 if (likely(napi_schedule_prep(&qdev->napi))) 2257 __napi_schedule(&qdev->napi); 2258 } else 2259 return IRQ_NONE; 2260 2261 return IRQ_RETVAL(handled); 2262 } 2263 2264 /* 2265 * Get the total number of segments needed for the given number of fragments. 2266 * This is necessary because outbound address lists (OAL) will be used when 2267 * more than two frags are given. Each address list has 5 addr/len pairs. 2268 * The 5th pair in each OAL is used to point to the next OAL if more frags 2269 * are coming. That is why the frags:segment count ratio is not linear. 2270 */ 2271 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2272 { 2273 if (qdev->device_id == QL3022_DEVICE_ID) 2274 return 1; 2275 2276 if (frags <= 2) 2277 return frags + 1; 2278 else if (frags <= 6) 2279 return frags + 2; 2280 else if (frags <= 10) 2281 return frags + 3; 2282 else if (frags <= 14) 2283 return frags + 4; 2284 else if (frags <= 18) 2285 return frags + 5; 2286 return -1; 2287 } 2288 2289 static void ql_hw_csum_setup(const struct sk_buff *skb, 2290 struct ob_mac_iocb_req *mac_iocb_ptr) 2291 { 2292 const struct iphdr *ip = ip_hdr(skb); 2293 2294 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2295 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2296 2297 if (ip->protocol == IPPROTO_TCP) { 2298 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2299 OB_3032MAC_IOCB_REQ_IC; 2300 } else { 2301 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2302 OB_3032MAC_IOCB_REQ_IC; 2303 } 2304 2305 } 2306 2307 /* 2308 * Map the buffers for this transmit. 2309 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2310 */ 2311 static int ql_send_map(struct ql3_adapter *qdev, 2312 struct ob_mac_iocb_req *mac_iocb_ptr, 2313 struct ql_tx_buf_cb *tx_cb, 2314 struct sk_buff *skb) 2315 { 2316 struct oal *oal; 2317 struct oal_entry *oal_entry; 2318 int len = skb_headlen(skb); 2319 dma_addr_t map; 2320 int err; 2321 int completed_segs, i; 2322 int seg_cnt, seg = 0; 2323 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2324 2325 seg_cnt = tx_cb->seg_count; 2326 /* 2327 * Map the skb buffer first. 2328 */ 2329 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2330 2331 err = pci_dma_mapping_error(qdev->pdev, map); 2332 if (err) { 2333 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2334 err); 2335 2336 return NETDEV_TX_BUSY; 2337 } 2338 2339 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2340 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2341 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2342 oal_entry->len = cpu_to_le32(len); 2343 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2344 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2345 seg++; 2346 2347 if (seg_cnt == 1) { 2348 /* Terminate the last segment. */ 2349 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2350 return NETDEV_TX_OK; 2351 } 2352 oal = tx_cb->oal; 2353 for (completed_segs = 0; 2354 completed_segs < frag_cnt; 2355 completed_segs++, seg++) { 2356 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2357 oal_entry++; 2358 /* 2359 * Check for continuation requirements. 2360 * It's strange but necessary. 2361 * Continuation entry points to outbound address list. 2362 */ 2363 if ((seg == 2 && seg_cnt > 3) || 2364 (seg == 7 && seg_cnt > 8) || 2365 (seg == 12 && seg_cnt > 13) || 2366 (seg == 17 && seg_cnt > 18)) { 2367 map = pci_map_single(qdev->pdev, oal, 2368 sizeof(struct oal), 2369 PCI_DMA_TODEVICE); 2370 2371 err = pci_dma_mapping_error(qdev->pdev, map); 2372 if (err) { 2373 netdev_err(qdev->ndev, 2374 "PCI mapping outbound address list with error: %d\n", 2375 err); 2376 goto map_error; 2377 } 2378 2379 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2380 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2381 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2382 OAL_CONT_ENTRY); 2383 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2384 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2385 sizeof(struct oal)); 2386 oal_entry = (struct oal_entry *)oal; 2387 oal++; 2388 seg++; 2389 } 2390 2391 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2392 DMA_TO_DEVICE); 2393 2394 err = dma_mapping_error(&qdev->pdev->dev, map); 2395 if (err) { 2396 netdev_err(qdev->ndev, 2397 "PCI mapping frags failed with error: %d\n", 2398 err); 2399 goto map_error; 2400 } 2401 2402 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2403 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2404 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2405 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2406 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2407 } 2408 /* Terminate the last segment. */ 2409 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2410 return NETDEV_TX_OK; 2411 2412 map_error: 2413 /* A PCI mapping failed and now we will need to back out 2414 * We need to traverse through the oal's and associated pages which 2415 * have been mapped and now we must unmap them to clean up properly 2416 */ 2417 2418 seg = 1; 2419 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2420 oal = tx_cb->oal; 2421 for (i = 0; i < completed_segs; i++, seg++) { 2422 oal_entry++; 2423 2424 /* 2425 * Check for continuation requirements. 2426 * It's strange but necessary. 2427 */ 2428 2429 if ((seg == 2 && seg_cnt > 3) || 2430 (seg == 7 && seg_cnt > 8) || 2431 (seg == 12 && seg_cnt > 13) || 2432 (seg == 17 && seg_cnt > 18)) { 2433 pci_unmap_single(qdev->pdev, 2434 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2435 dma_unmap_len(&tx_cb->map[seg], maplen), 2436 PCI_DMA_TODEVICE); 2437 oal++; 2438 seg++; 2439 } 2440 2441 pci_unmap_page(qdev->pdev, 2442 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2443 dma_unmap_len(&tx_cb->map[seg], maplen), 2444 PCI_DMA_TODEVICE); 2445 } 2446 2447 pci_unmap_single(qdev->pdev, 2448 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2449 dma_unmap_addr(&tx_cb->map[0], maplen), 2450 PCI_DMA_TODEVICE); 2451 2452 return NETDEV_TX_BUSY; 2453 2454 } 2455 2456 /* 2457 * The difference between 3022 and 3032 sends: 2458 * 3022 only supports a simple single segment transmission. 2459 * 3032 supports checksumming and scatter/gather lists (fragments). 2460 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2461 * in the IOCB plus a chain of outbound address lists (OAL) that 2462 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2463 * will be used to point to an OAL when more ALP entries are required. 2464 * The IOCB is always the top of the chain followed by one or more 2465 * OALs (when necessary). 2466 */ 2467 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2468 struct net_device *ndev) 2469 { 2470 struct ql3_adapter *qdev = netdev_priv(ndev); 2471 struct ql3xxx_port_registers __iomem *port_regs = 2472 qdev->mem_map_registers; 2473 struct ql_tx_buf_cb *tx_cb; 2474 u32 tot_len = skb->len; 2475 struct ob_mac_iocb_req *mac_iocb_ptr; 2476 2477 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2478 return NETDEV_TX_BUSY; 2479 2480 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2481 tx_cb->seg_count = ql_get_seg_count(qdev, 2482 skb_shinfo(skb)->nr_frags); 2483 if (tx_cb->seg_count == -1) { 2484 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2485 return NETDEV_TX_OK; 2486 } 2487 2488 mac_iocb_ptr = tx_cb->queue_entry; 2489 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2490 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2491 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2492 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2493 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2494 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2495 tx_cb->skb = skb; 2496 if (qdev->device_id == QL3032_DEVICE_ID && 2497 skb->ip_summed == CHECKSUM_PARTIAL) 2498 ql_hw_csum_setup(skb, mac_iocb_ptr); 2499 2500 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2501 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2502 return NETDEV_TX_BUSY; 2503 } 2504 2505 wmb(); 2506 qdev->req_producer_index++; 2507 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2508 qdev->req_producer_index = 0; 2509 wmb(); 2510 ql_write_common_reg_l(qdev, 2511 &port_regs->CommonRegs.reqQProducerIndex, 2512 qdev->req_producer_index); 2513 2514 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2515 "tx queued, slot %d, len %d\n", 2516 qdev->req_producer_index, skb->len); 2517 2518 atomic_dec(&qdev->tx_count); 2519 return NETDEV_TX_OK; 2520 } 2521 2522 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2523 { 2524 qdev->req_q_size = 2525 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2526 2527 qdev->req_q_virt_addr = 2528 pci_alloc_consistent(qdev->pdev, 2529 (size_t) qdev->req_q_size, 2530 &qdev->req_q_phy_addr); 2531 2532 if ((qdev->req_q_virt_addr == NULL) || 2533 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2534 netdev_err(qdev->ndev, "reqQ failed\n"); 2535 return -ENOMEM; 2536 } 2537 2538 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2539 2540 qdev->rsp_q_virt_addr = 2541 pci_alloc_consistent(qdev->pdev, 2542 (size_t) qdev->rsp_q_size, 2543 &qdev->rsp_q_phy_addr); 2544 2545 if ((qdev->rsp_q_virt_addr == NULL) || 2546 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2547 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2548 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2549 qdev->req_q_virt_addr, 2550 qdev->req_q_phy_addr); 2551 return -ENOMEM; 2552 } 2553 2554 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2555 2556 return 0; 2557 } 2558 2559 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2560 { 2561 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2562 netdev_info(qdev->ndev, "Already done\n"); 2563 return; 2564 } 2565 2566 pci_free_consistent(qdev->pdev, 2567 qdev->req_q_size, 2568 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2569 2570 qdev->req_q_virt_addr = NULL; 2571 2572 pci_free_consistent(qdev->pdev, 2573 qdev->rsp_q_size, 2574 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2575 2576 qdev->rsp_q_virt_addr = NULL; 2577 2578 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2579 } 2580 2581 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2582 { 2583 /* Create Large Buffer Queue */ 2584 qdev->lrg_buf_q_size = 2585 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2586 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2587 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2588 else 2589 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2590 2591 qdev->lrg_buf = 2592 kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb), 2593 GFP_KERNEL); 2594 if (qdev->lrg_buf == NULL) { 2595 netdev_err(qdev->ndev, "qdev->lrg_buf alloc failed\n"); 2596 return -ENOMEM; 2597 } 2598 2599 qdev->lrg_buf_q_alloc_virt_addr = 2600 pci_alloc_consistent(qdev->pdev, 2601 qdev->lrg_buf_q_alloc_size, 2602 &qdev->lrg_buf_q_alloc_phy_addr); 2603 2604 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2605 netdev_err(qdev->ndev, "lBufQ failed\n"); 2606 return -ENOMEM; 2607 } 2608 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2609 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2610 2611 /* Create Small Buffer Queue */ 2612 qdev->small_buf_q_size = 2613 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2614 if (qdev->small_buf_q_size < PAGE_SIZE) 2615 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2616 else 2617 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2618 2619 qdev->small_buf_q_alloc_virt_addr = 2620 pci_alloc_consistent(qdev->pdev, 2621 qdev->small_buf_q_alloc_size, 2622 &qdev->small_buf_q_alloc_phy_addr); 2623 2624 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2625 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2626 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2627 qdev->lrg_buf_q_alloc_virt_addr, 2628 qdev->lrg_buf_q_alloc_phy_addr); 2629 return -ENOMEM; 2630 } 2631 2632 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2633 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2634 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2635 return 0; 2636 } 2637 2638 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2639 { 2640 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2641 netdev_info(qdev->ndev, "Already done\n"); 2642 return; 2643 } 2644 kfree(qdev->lrg_buf); 2645 pci_free_consistent(qdev->pdev, 2646 qdev->lrg_buf_q_alloc_size, 2647 qdev->lrg_buf_q_alloc_virt_addr, 2648 qdev->lrg_buf_q_alloc_phy_addr); 2649 2650 qdev->lrg_buf_q_virt_addr = NULL; 2651 2652 pci_free_consistent(qdev->pdev, 2653 qdev->small_buf_q_alloc_size, 2654 qdev->small_buf_q_alloc_virt_addr, 2655 qdev->small_buf_q_alloc_phy_addr); 2656 2657 qdev->small_buf_q_virt_addr = NULL; 2658 2659 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2660 } 2661 2662 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2663 { 2664 int i; 2665 struct bufq_addr_element *small_buf_q_entry; 2666 2667 /* Currently we allocate on one of memory and use it for smallbuffers */ 2668 qdev->small_buf_total_size = 2669 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2670 QL_SMALL_BUFFER_SIZE); 2671 2672 qdev->small_buf_virt_addr = 2673 pci_alloc_consistent(qdev->pdev, 2674 qdev->small_buf_total_size, 2675 &qdev->small_buf_phy_addr); 2676 2677 if (qdev->small_buf_virt_addr == NULL) { 2678 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2679 return -ENOMEM; 2680 } 2681 2682 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2683 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2684 2685 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2686 2687 /* Initialize the small buffer queue. */ 2688 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2689 small_buf_q_entry->addr_high = 2690 cpu_to_le32(qdev->small_buf_phy_addr_high); 2691 small_buf_q_entry->addr_low = 2692 cpu_to_le32(qdev->small_buf_phy_addr_low + 2693 (i * QL_SMALL_BUFFER_SIZE)); 2694 small_buf_q_entry++; 2695 } 2696 qdev->small_buf_index = 0; 2697 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2698 return 0; 2699 } 2700 2701 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2702 { 2703 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2704 netdev_info(qdev->ndev, "Already done\n"); 2705 return; 2706 } 2707 if (qdev->small_buf_virt_addr != NULL) { 2708 pci_free_consistent(qdev->pdev, 2709 qdev->small_buf_total_size, 2710 qdev->small_buf_virt_addr, 2711 qdev->small_buf_phy_addr); 2712 2713 qdev->small_buf_virt_addr = NULL; 2714 } 2715 } 2716 2717 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2718 { 2719 int i = 0; 2720 struct ql_rcv_buf_cb *lrg_buf_cb; 2721 2722 for (i = 0; i < qdev->num_large_buffers; i++) { 2723 lrg_buf_cb = &qdev->lrg_buf[i]; 2724 if (lrg_buf_cb->skb) { 2725 dev_kfree_skb(lrg_buf_cb->skb); 2726 pci_unmap_single(qdev->pdev, 2727 dma_unmap_addr(lrg_buf_cb, mapaddr), 2728 dma_unmap_len(lrg_buf_cb, maplen), 2729 PCI_DMA_FROMDEVICE); 2730 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2731 } else { 2732 break; 2733 } 2734 } 2735 } 2736 2737 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2738 { 2739 int i; 2740 struct ql_rcv_buf_cb *lrg_buf_cb; 2741 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2742 2743 for (i = 0; i < qdev->num_large_buffers; i++) { 2744 lrg_buf_cb = &qdev->lrg_buf[i]; 2745 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2746 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2747 buf_addr_ele++; 2748 } 2749 qdev->lrg_buf_index = 0; 2750 qdev->lrg_buf_skb_check = 0; 2751 } 2752 2753 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2754 { 2755 int i; 2756 struct ql_rcv_buf_cb *lrg_buf_cb; 2757 struct sk_buff *skb; 2758 dma_addr_t map; 2759 int err; 2760 2761 for (i = 0; i < qdev->num_large_buffers; i++) { 2762 skb = netdev_alloc_skb(qdev->ndev, 2763 qdev->lrg_buffer_len); 2764 if (unlikely(!skb)) { 2765 /* Better luck next round */ 2766 netdev_err(qdev->ndev, 2767 "large buff alloc failed for %d bytes at index %d\n", 2768 qdev->lrg_buffer_len * 2, i); 2769 ql_free_large_buffers(qdev); 2770 return -ENOMEM; 2771 } else { 2772 2773 lrg_buf_cb = &qdev->lrg_buf[i]; 2774 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2775 lrg_buf_cb->index = i; 2776 lrg_buf_cb->skb = skb; 2777 /* 2778 * We save some space to copy the ethhdr from first 2779 * buffer 2780 */ 2781 skb_reserve(skb, QL_HEADER_SPACE); 2782 map = pci_map_single(qdev->pdev, 2783 skb->data, 2784 qdev->lrg_buffer_len - 2785 QL_HEADER_SPACE, 2786 PCI_DMA_FROMDEVICE); 2787 2788 err = pci_dma_mapping_error(qdev->pdev, map); 2789 if (err) { 2790 netdev_err(qdev->ndev, 2791 "PCI mapping failed with error: %d\n", 2792 err); 2793 ql_free_large_buffers(qdev); 2794 return -ENOMEM; 2795 } 2796 2797 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2798 dma_unmap_len_set(lrg_buf_cb, maplen, 2799 qdev->lrg_buffer_len - 2800 QL_HEADER_SPACE); 2801 lrg_buf_cb->buf_phy_addr_low = 2802 cpu_to_le32(LS_64BITS(map)); 2803 lrg_buf_cb->buf_phy_addr_high = 2804 cpu_to_le32(MS_64BITS(map)); 2805 } 2806 } 2807 return 0; 2808 } 2809 2810 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2811 { 2812 struct ql_tx_buf_cb *tx_cb; 2813 int i; 2814 2815 tx_cb = &qdev->tx_buf[0]; 2816 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2817 kfree(tx_cb->oal); 2818 tx_cb->oal = NULL; 2819 tx_cb++; 2820 } 2821 } 2822 2823 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2824 { 2825 struct ql_tx_buf_cb *tx_cb; 2826 int i; 2827 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2828 2829 /* Create free list of transmit buffers */ 2830 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2831 2832 tx_cb = &qdev->tx_buf[i]; 2833 tx_cb->skb = NULL; 2834 tx_cb->queue_entry = req_q_curr; 2835 req_q_curr++; 2836 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2837 if (tx_cb->oal == NULL) 2838 return -1; 2839 } 2840 return 0; 2841 } 2842 2843 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2844 { 2845 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2846 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2847 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2848 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2849 /* 2850 * Bigger buffers, so less of them. 2851 */ 2852 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2853 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2854 } else { 2855 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2856 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2857 return -ENOMEM; 2858 } 2859 qdev->num_large_buffers = 2860 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2861 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2862 qdev->max_frame_size = 2863 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2864 2865 /* 2866 * First allocate a page of shared memory and use it for shadow 2867 * locations of Network Request Queue Consumer Address Register and 2868 * Network Completion Queue Producer Index Register 2869 */ 2870 qdev->shadow_reg_virt_addr = 2871 pci_alloc_consistent(qdev->pdev, 2872 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2873 2874 if (qdev->shadow_reg_virt_addr != NULL) { 2875 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2876 qdev->req_consumer_index_phy_addr_high = 2877 MS_64BITS(qdev->shadow_reg_phy_addr); 2878 qdev->req_consumer_index_phy_addr_low = 2879 LS_64BITS(qdev->shadow_reg_phy_addr); 2880 2881 qdev->prsp_producer_index = 2882 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2883 qdev->rsp_producer_index_phy_addr_high = 2884 qdev->req_consumer_index_phy_addr_high; 2885 qdev->rsp_producer_index_phy_addr_low = 2886 qdev->req_consumer_index_phy_addr_low + 8; 2887 } else { 2888 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2889 return -ENOMEM; 2890 } 2891 2892 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2893 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2894 goto err_req_rsp; 2895 } 2896 2897 if (ql_alloc_buffer_queues(qdev) != 0) { 2898 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2899 goto err_buffer_queues; 2900 } 2901 2902 if (ql_alloc_small_buffers(qdev) != 0) { 2903 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2904 goto err_small_buffers; 2905 } 2906 2907 if (ql_alloc_large_buffers(qdev) != 0) { 2908 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2909 goto err_small_buffers; 2910 } 2911 2912 /* Initialize the large buffer queue. */ 2913 ql_init_large_buffers(qdev); 2914 if (ql_create_send_free_list(qdev)) 2915 goto err_free_list; 2916 2917 qdev->rsp_current = qdev->rsp_q_virt_addr; 2918 2919 return 0; 2920 err_free_list: 2921 ql_free_send_free_list(qdev); 2922 err_small_buffers: 2923 ql_free_buffer_queues(qdev); 2924 err_buffer_queues: 2925 ql_free_net_req_rsp_queues(qdev); 2926 err_req_rsp: 2927 pci_free_consistent(qdev->pdev, 2928 PAGE_SIZE, 2929 qdev->shadow_reg_virt_addr, 2930 qdev->shadow_reg_phy_addr); 2931 2932 return -ENOMEM; 2933 } 2934 2935 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2936 { 2937 ql_free_send_free_list(qdev); 2938 ql_free_large_buffers(qdev); 2939 ql_free_small_buffers(qdev); 2940 ql_free_buffer_queues(qdev); 2941 ql_free_net_req_rsp_queues(qdev); 2942 if (qdev->shadow_reg_virt_addr != NULL) { 2943 pci_free_consistent(qdev->pdev, 2944 PAGE_SIZE, 2945 qdev->shadow_reg_virt_addr, 2946 qdev->shadow_reg_phy_addr); 2947 qdev->shadow_reg_virt_addr = NULL; 2948 } 2949 } 2950 2951 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2952 { 2953 struct ql3xxx_local_ram_registers __iomem *local_ram = 2954 (void __iomem *)qdev->mem_map_registers; 2955 2956 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2957 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2958 2) << 4)) 2959 return -1; 2960 2961 ql_write_page2_reg(qdev, 2962 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2963 2964 ql_write_page2_reg(qdev, 2965 &local_ram->maxBufletCount, 2966 qdev->nvram_data.bufletCount); 2967 2968 ql_write_page2_reg(qdev, 2969 &local_ram->freeBufletThresholdLow, 2970 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2971 (qdev->nvram_data.tcpWindowThreshold0)); 2972 2973 ql_write_page2_reg(qdev, 2974 &local_ram->freeBufletThresholdHigh, 2975 qdev->nvram_data.tcpWindowThreshold50); 2976 2977 ql_write_page2_reg(qdev, 2978 &local_ram->ipHashTableBase, 2979 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2980 qdev->nvram_data.ipHashTableBaseLo); 2981 ql_write_page2_reg(qdev, 2982 &local_ram->ipHashTableCount, 2983 qdev->nvram_data.ipHashTableSize); 2984 ql_write_page2_reg(qdev, 2985 &local_ram->tcpHashTableBase, 2986 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2987 qdev->nvram_data.tcpHashTableBaseLo); 2988 ql_write_page2_reg(qdev, 2989 &local_ram->tcpHashTableCount, 2990 qdev->nvram_data.tcpHashTableSize); 2991 ql_write_page2_reg(qdev, 2992 &local_ram->ncbBase, 2993 (qdev->nvram_data.ncbTableBaseHi << 16) | 2994 qdev->nvram_data.ncbTableBaseLo); 2995 ql_write_page2_reg(qdev, 2996 &local_ram->maxNcbCount, 2997 qdev->nvram_data.ncbTableSize); 2998 ql_write_page2_reg(qdev, 2999 &local_ram->drbBase, 3000 (qdev->nvram_data.drbTableBaseHi << 16) | 3001 qdev->nvram_data.drbTableBaseLo); 3002 ql_write_page2_reg(qdev, 3003 &local_ram->maxDrbCount, 3004 qdev->nvram_data.drbTableSize); 3005 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3006 return 0; 3007 } 3008 3009 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3010 { 3011 u32 value; 3012 struct ql3xxx_port_registers __iomem *port_regs = 3013 qdev->mem_map_registers; 3014 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3015 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3016 (void __iomem *)port_regs; 3017 u32 delay = 10; 3018 int status = 0; 3019 unsigned long hw_flags = 0; 3020 3021 if (ql_mii_setup(qdev)) 3022 return -1; 3023 3024 /* Bring out PHY out of reset */ 3025 ql_write_common_reg(qdev, spir, 3026 (ISP_SERIAL_PORT_IF_WE | 3027 (ISP_SERIAL_PORT_IF_WE << 16))); 3028 /* Give the PHY time to come out of reset. */ 3029 mdelay(100); 3030 qdev->port_link_state = LS_DOWN; 3031 netif_carrier_off(qdev->ndev); 3032 3033 /* V2 chip fix for ARS-39168. */ 3034 ql_write_common_reg(qdev, spir, 3035 (ISP_SERIAL_PORT_IF_SDE | 3036 (ISP_SERIAL_PORT_IF_SDE << 16))); 3037 3038 /* Request Queue Registers */ 3039 *((u32 *)(qdev->preq_consumer_index)) = 0; 3040 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3041 qdev->req_producer_index = 0; 3042 3043 ql_write_page1_reg(qdev, 3044 &hmem_regs->reqConsumerIndexAddrHigh, 3045 qdev->req_consumer_index_phy_addr_high); 3046 ql_write_page1_reg(qdev, 3047 &hmem_regs->reqConsumerIndexAddrLow, 3048 qdev->req_consumer_index_phy_addr_low); 3049 3050 ql_write_page1_reg(qdev, 3051 &hmem_regs->reqBaseAddrHigh, 3052 MS_64BITS(qdev->req_q_phy_addr)); 3053 ql_write_page1_reg(qdev, 3054 &hmem_regs->reqBaseAddrLow, 3055 LS_64BITS(qdev->req_q_phy_addr)); 3056 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3057 3058 /* Response Queue Registers */ 3059 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3060 qdev->rsp_consumer_index = 0; 3061 qdev->rsp_current = qdev->rsp_q_virt_addr; 3062 3063 ql_write_page1_reg(qdev, 3064 &hmem_regs->rspProducerIndexAddrHigh, 3065 qdev->rsp_producer_index_phy_addr_high); 3066 3067 ql_write_page1_reg(qdev, 3068 &hmem_regs->rspProducerIndexAddrLow, 3069 qdev->rsp_producer_index_phy_addr_low); 3070 3071 ql_write_page1_reg(qdev, 3072 &hmem_regs->rspBaseAddrHigh, 3073 MS_64BITS(qdev->rsp_q_phy_addr)); 3074 3075 ql_write_page1_reg(qdev, 3076 &hmem_regs->rspBaseAddrLow, 3077 LS_64BITS(qdev->rsp_q_phy_addr)); 3078 3079 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3080 3081 /* Large Buffer Queue */ 3082 ql_write_page1_reg(qdev, 3083 &hmem_regs->rxLargeQBaseAddrHigh, 3084 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3085 3086 ql_write_page1_reg(qdev, 3087 &hmem_regs->rxLargeQBaseAddrLow, 3088 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3089 3090 ql_write_page1_reg(qdev, 3091 &hmem_regs->rxLargeQLength, 3092 qdev->num_lbufq_entries); 3093 3094 ql_write_page1_reg(qdev, 3095 &hmem_regs->rxLargeBufferLength, 3096 qdev->lrg_buffer_len); 3097 3098 /* Small Buffer Queue */ 3099 ql_write_page1_reg(qdev, 3100 &hmem_regs->rxSmallQBaseAddrHigh, 3101 MS_64BITS(qdev->small_buf_q_phy_addr)); 3102 3103 ql_write_page1_reg(qdev, 3104 &hmem_regs->rxSmallQBaseAddrLow, 3105 LS_64BITS(qdev->small_buf_q_phy_addr)); 3106 3107 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3108 ql_write_page1_reg(qdev, 3109 &hmem_regs->rxSmallBufferLength, 3110 QL_SMALL_BUFFER_SIZE); 3111 3112 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3113 qdev->small_buf_release_cnt = 8; 3114 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3115 qdev->lrg_buf_release_cnt = 8; 3116 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3117 qdev->small_buf_index = 0; 3118 qdev->lrg_buf_index = 0; 3119 qdev->lrg_buf_free_count = 0; 3120 qdev->lrg_buf_free_head = NULL; 3121 qdev->lrg_buf_free_tail = NULL; 3122 3123 ql_write_common_reg(qdev, 3124 &port_regs->CommonRegs. 3125 rxSmallQProducerIndex, 3126 qdev->small_buf_q_producer_index); 3127 ql_write_common_reg(qdev, 3128 &port_regs->CommonRegs. 3129 rxLargeQProducerIndex, 3130 qdev->lrg_buf_q_producer_index); 3131 3132 /* 3133 * Find out if the chip has already been initialized. If it has, then 3134 * we skip some of the initialization. 3135 */ 3136 clear_bit(QL_LINK_MASTER, &qdev->flags); 3137 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3138 if ((value & PORT_STATUS_IC) == 0) { 3139 3140 /* Chip has not been configured yet, so let it rip. */ 3141 if (ql_init_misc_registers(qdev)) { 3142 status = -1; 3143 goto out; 3144 } 3145 3146 value = qdev->nvram_data.tcpMaxWindowSize; 3147 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3148 3149 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3150 3151 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3152 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3153 * 2) << 13)) { 3154 status = -1; 3155 goto out; 3156 } 3157 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3158 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3159 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3160 16) | (INTERNAL_CHIP_SD | 3161 INTERNAL_CHIP_WE))); 3162 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3163 } 3164 3165 if (qdev->mac_index) 3166 ql_write_page0_reg(qdev, 3167 &port_regs->mac1MaxFrameLengthReg, 3168 qdev->max_frame_size); 3169 else 3170 ql_write_page0_reg(qdev, 3171 &port_regs->mac0MaxFrameLengthReg, 3172 qdev->max_frame_size); 3173 3174 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3175 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3176 2) << 7)) { 3177 status = -1; 3178 goto out; 3179 } 3180 3181 PHY_Setup(qdev); 3182 ql_init_scan_mode(qdev); 3183 ql_get_phy_owner(qdev); 3184 3185 /* Load the MAC Configuration */ 3186 3187 /* Program lower 32 bits of the MAC address */ 3188 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3189 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3190 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3191 ((qdev->ndev->dev_addr[2] << 24) 3192 | (qdev->ndev->dev_addr[3] << 16) 3193 | (qdev->ndev->dev_addr[4] << 8) 3194 | qdev->ndev->dev_addr[5])); 3195 3196 /* Program top 16 bits of the MAC address */ 3197 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3198 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3199 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3200 ((qdev->ndev->dev_addr[0] << 8) 3201 | qdev->ndev->dev_addr[1])); 3202 3203 /* Enable Primary MAC */ 3204 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3205 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3206 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3207 3208 /* Clear Primary and Secondary IP addresses */ 3209 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3210 ((IP_ADDR_INDEX_REG_MASK << 16) | 3211 (qdev->mac_index << 2))); 3212 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3213 3214 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3215 ((IP_ADDR_INDEX_REG_MASK << 16) | 3216 ((qdev->mac_index << 2) + 1))); 3217 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3218 3219 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3220 3221 /* Indicate Configuration Complete */ 3222 ql_write_page0_reg(qdev, 3223 &port_regs->portControl, 3224 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3225 3226 do { 3227 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3228 if (value & PORT_STATUS_IC) 3229 break; 3230 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3231 msleep(500); 3232 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3233 } while (--delay); 3234 3235 if (delay == 0) { 3236 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3237 status = -1; 3238 goto out; 3239 } 3240 3241 /* Enable Ethernet Function */ 3242 if (qdev->device_id == QL3032_DEVICE_ID) { 3243 value = 3244 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3245 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3246 QL3032_PORT_CONTROL_ET); 3247 ql_write_page0_reg(qdev, &port_regs->functionControl, 3248 ((value << 16) | value)); 3249 } else { 3250 value = 3251 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3252 PORT_CONTROL_HH); 3253 ql_write_page0_reg(qdev, &port_regs->portControl, 3254 ((value << 16) | value)); 3255 } 3256 3257 3258 out: 3259 return status; 3260 } 3261 3262 /* 3263 * Caller holds hw_lock. 3264 */ 3265 static int ql_adapter_reset(struct ql3_adapter *qdev) 3266 { 3267 struct ql3xxx_port_registers __iomem *port_regs = 3268 qdev->mem_map_registers; 3269 int status = 0; 3270 u16 value; 3271 int max_wait_time; 3272 3273 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3274 clear_bit(QL_RESET_DONE, &qdev->flags); 3275 3276 /* 3277 * Issue soft reset to chip. 3278 */ 3279 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3280 ql_write_common_reg(qdev, 3281 &port_regs->CommonRegs.ispControlStatus, 3282 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3283 3284 /* Wait 3 seconds for reset to complete. */ 3285 netdev_printk(KERN_DEBUG, qdev->ndev, 3286 "Wait 10 milliseconds for reset to complete\n"); 3287 3288 /* Wait until the firmware tells us the Soft Reset is done */ 3289 max_wait_time = 5; 3290 do { 3291 value = 3292 ql_read_common_reg(qdev, 3293 &port_regs->CommonRegs.ispControlStatus); 3294 if ((value & ISP_CONTROL_SR) == 0) 3295 break; 3296 3297 ssleep(1); 3298 } while ((--max_wait_time)); 3299 3300 /* 3301 * Also, make sure that the Network Reset Interrupt bit has been 3302 * cleared after the soft reset has taken place. 3303 */ 3304 value = 3305 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3306 if (value & ISP_CONTROL_RI) { 3307 netdev_printk(KERN_DEBUG, qdev->ndev, 3308 "clearing RI after reset\n"); 3309 ql_write_common_reg(qdev, 3310 &port_regs->CommonRegs. 3311 ispControlStatus, 3312 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3313 } 3314 3315 if (max_wait_time == 0) { 3316 /* Issue Force Soft Reset */ 3317 ql_write_common_reg(qdev, 3318 &port_regs->CommonRegs. 3319 ispControlStatus, 3320 ((ISP_CONTROL_FSR << 16) | 3321 ISP_CONTROL_FSR)); 3322 /* 3323 * Wait until the firmware tells us the Force Soft Reset is 3324 * done 3325 */ 3326 max_wait_time = 5; 3327 do { 3328 value = ql_read_common_reg(qdev, 3329 &port_regs->CommonRegs. 3330 ispControlStatus); 3331 if ((value & ISP_CONTROL_FSR) == 0) 3332 break; 3333 ssleep(1); 3334 } while ((--max_wait_time)); 3335 } 3336 if (max_wait_time == 0) 3337 status = 1; 3338 3339 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3340 set_bit(QL_RESET_DONE, &qdev->flags); 3341 return status; 3342 } 3343 3344 static void ql_set_mac_info(struct ql3_adapter *qdev) 3345 { 3346 struct ql3xxx_port_registers __iomem *port_regs = 3347 qdev->mem_map_registers; 3348 u32 value, port_status; 3349 u8 func_number; 3350 3351 /* Get the function number */ 3352 value = 3353 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3354 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3355 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3356 switch (value & ISP_CONTROL_FN_MASK) { 3357 case ISP_CONTROL_FN0_NET: 3358 qdev->mac_index = 0; 3359 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3360 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3361 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3362 if (port_status & PORT_STATUS_SM0) 3363 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3364 else 3365 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3366 break; 3367 3368 case ISP_CONTROL_FN1_NET: 3369 qdev->mac_index = 1; 3370 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3371 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3372 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3373 if (port_status & PORT_STATUS_SM1) 3374 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3375 else 3376 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3377 break; 3378 3379 case ISP_CONTROL_FN0_SCSI: 3380 case ISP_CONTROL_FN1_SCSI: 3381 default: 3382 netdev_printk(KERN_DEBUG, qdev->ndev, 3383 "Invalid function number, ispControlStatus = 0x%x\n", 3384 value); 3385 break; 3386 } 3387 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3388 } 3389 3390 static void ql_display_dev_info(struct net_device *ndev) 3391 { 3392 struct ql3_adapter *qdev = netdev_priv(ndev); 3393 struct pci_dev *pdev = qdev->pdev; 3394 3395 netdev_info(ndev, 3396 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3397 DRV_NAME, qdev->index, qdev->chip_rev_id, 3398 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3399 qdev->pci_slot); 3400 netdev_info(ndev, "%s Interface\n", 3401 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3402 3403 /* 3404 * Print PCI bus width/type. 3405 */ 3406 netdev_info(ndev, "Bus interface is %s %s\n", 3407 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3408 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3409 3410 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3411 qdev->mem_map_registers); 3412 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3413 3414 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3415 } 3416 3417 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3418 { 3419 struct net_device *ndev = qdev->ndev; 3420 int retval = 0; 3421 3422 netif_stop_queue(ndev); 3423 netif_carrier_off(ndev); 3424 3425 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3426 clear_bit(QL_LINK_MASTER, &qdev->flags); 3427 3428 ql_disable_interrupts(qdev); 3429 3430 free_irq(qdev->pdev->irq, ndev); 3431 3432 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3433 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3434 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3435 pci_disable_msi(qdev->pdev); 3436 } 3437 3438 del_timer_sync(&qdev->adapter_timer); 3439 3440 napi_disable(&qdev->napi); 3441 3442 if (do_reset) { 3443 int soft_reset; 3444 unsigned long hw_flags; 3445 3446 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3447 if (ql_wait_for_drvr_lock(qdev)) { 3448 soft_reset = ql_adapter_reset(qdev); 3449 if (soft_reset) { 3450 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3451 qdev->index); 3452 } 3453 netdev_err(ndev, 3454 "Releasing driver lock via chip reset\n"); 3455 } else { 3456 netdev_err(ndev, 3457 "Could not acquire driver lock to do reset!\n"); 3458 retval = -1; 3459 } 3460 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3461 } 3462 ql_free_mem_resources(qdev); 3463 return retval; 3464 } 3465 3466 static int ql_adapter_up(struct ql3_adapter *qdev) 3467 { 3468 struct net_device *ndev = qdev->ndev; 3469 int err; 3470 unsigned long irq_flags = IRQF_SHARED; 3471 unsigned long hw_flags; 3472 3473 if (ql_alloc_mem_resources(qdev)) { 3474 netdev_err(ndev, "Unable to allocate buffers\n"); 3475 return -ENOMEM; 3476 } 3477 3478 if (qdev->msi) { 3479 if (pci_enable_msi(qdev->pdev)) { 3480 netdev_err(ndev, 3481 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3482 qdev->msi = 0; 3483 } else { 3484 netdev_info(ndev, "MSI Enabled...\n"); 3485 set_bit(QL_MSI_ENABLED, &qdev->flags); 3486 irq_flags &= ~IRQF_SHARED; 3487 } 3488 } 3489 3490 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3491 irq_flags, ndev->name, ndev); 3492 if (err) { 3493 netdev_err(ndev, 3494 "Failed to reserve interrupt %d - already in use\n", 3495 qdev->pdev->irq); 3496 goto err_irq; 3497 } 3498 3499 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3500 3501 err = ql_wait_for_drvr_lock(qdev); 3502 if (err) { 3503 err = ql_adapter_initialize(qdev); 3504 if (err) { 3505 netdev_err(ndev, "Unable to initialize adapter\n"); 3506 goto err_init; 3507 } 3508 netdev_err(ndev, "Releasing driver lock\n"); 3509 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3510 } else { 3511 netdev_err(ndev, "Could not acquire driver lock\n"); 3512 goto err_lock; 3513 } 3514 3515 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3516 3517 set_bit(QL_ADAPTER_UP, &qdev->flags); 3518 3519 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3520 3521 napi_enable(&qdev->napi); 3522 ql_enable_interrupts(qdev); 3523 return 0; 3524 3525 err_init: 3526 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3527 err_lock: 3528 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3529 free_irq(qdev->pdev->irq, ndev); 3530 err_irq: 3531 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3532 netdev_info(ndev, "calling pci_disable_msi()\n"); 3533 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3534 pci_disable_msi(qdev->pdev); 3535 } 3536 return err; 3537 } 3538 3539 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3540 { 3541 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3542 netdev_err(qdev->ndev, 3543 "Driver up/down cycle failed, closing device\n"); 3544 rtnl_lock(); 3545 dev_close(qdev->ndev); 3546 rtnl_unlock(); 3547 return -1; 3548 } 3549 return 0; 3550 } 3551 3552 static int ql3xxx_close(struct net_device *ndev) 3553 { 3554 struct ql3_adapter *qdev = netdev_priv(ndev); 3555 3556 /* 3557 * Wait for device to recover from a reset. 3558 * (Rarely happens, but possible.) 3559 */ 3560 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3561 msleep(50); 3562 3563 ql_adapter_down(qdev, QL_DO_RESET); 3564 return 0; 3565 } 3566 3567 static int ql3xxx_open(struct net_device *ndev) 3568 { 3569 struct ql3_adapter *qdev = netdev_priv(ndev); 3570 return ql_adapter_up(qdev); 3571 } 3572 3573 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3574 { 3575 struct ql3_adapter *qdev = netdev_priv(ndev); 3576 struct ql3xxx_port_registers __iomem *port_regs = 3577 qdev->mem_map_registers; 3578 struct sockaddr *addr = p; 3579 unsigned long hw_flags; 3580 3581 if (netif_running(ndev)) 3582 return -EBUSY; 3583 3584 if (!is_valid_ether_addr(addr->sa_data)) 3585 return -EADDRNOTAVAIL; 3586 3587 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3588 3589 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3590 /* Program lower 32 bits of the MAC address */ 3591 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3592 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3593 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3594 ((ndev->dev_addr[2] << 24) | (ndev-> 3595 dev_addr[3] << 16) | 3596 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3597 3598 /* Program top 16 bits of the MAC address */ 3599 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3600 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3601 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3602 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3603 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3604 3605 return 0; 3606 } 3607 3608 static void ql3xxx_tx_timeout(struct net_device *ndev) 3609 { 3610 struct ql3_adapter *qdev = netdev_priv(ndev); 3611 3612 netdev_err(ndev, "Resetting...\n"); 3613 /* 3614 * Stop the queues, we've got a problem. 3615 */ 3616 netif_stop_queue(ndev); 3617 3618 /* 3619 * Wake up the worker to process this event. 3620 */ 3621 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3622 } 3623 3624 static void ql_reset_work(struct work_struct *work) 3625 { 3626 struct ql3_adapter *qdev = 3627 container_of(work, struct ql3_adapter, reset_work.work); 3628 struct net_device *ndev = qdev->ndev; 3629 u32 value; 3630 struct ql_tx_buf_cb *tx_cb; 3631 int max_wait_time, i; 3632 struct ql3xxx_port_registers __iomem *port_regs = 3633 qdev->mem_map_registers; 3634 unsigned long hw_flags; 3635 3636 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3637 clear_bit(QL_LINK_MASTER, &qdev->flags); 3638 3639 /* 3640 * Loop through the active list and return the skb. 3641 */ 3642 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3643 int j; 3644 tx_cb = &qdev->tx_buf[i]; 3645 if (tx_cb->skb) { 3646 netdev_printk(KERN_DEBUG, ndev, 3647 "Freeing lost SKB\n"); 3648 pci_unmap_single(qdev->pdev, 3649 dma_unmap_addr(&tx_cb->map[0], 3650 mapaddr), 3651 dma_unmap_len(&tx_cb->map[0], maplen), 3652 PCI_DMA_TODEVICE); 3653 for (j = 1; j < tx_cb->seg_count; j++) { 3654 pci_unmap_page(qdev->pdev, 3655 dma_unmap_addr(&tx_cb->map[j], 3656 mapaddr), 3657 dma_unmap_len(&tx_cb->map[j], 3658 maplen), 3659 PCI_DMA_TODEVICE); 3660 } 3661 dev_kfree_skb(tx_cb->skb); 3662 tx_cb->skb = NULL; 3663 } 3664 } 3665 3666 netdev_err(ndev, "Clearing NRI after reset\n"); 3667 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3668 ql_write_common_reg(qdev, 3669 &port_regs->CommonRegs. 3670 ispControlStatus, 3671 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3672 /* 3673 * Wait the for Soft Reset to Complete. 3674 */ 3675 max_wait_time = 10; 3676 do { 3677 value = ql_read_common_reg(qdev, 3678 &port_regs->CommonRegs. 3679 3680 ispControlStatus); 3681 if ((value & ISP_CONTROL_SR) == 0) { 3682 netdev_printk(KERN_DEBUG, ndev, 3683 "reset completed\n"); 3684 break; 3685 } 3686 3687 if (value & ISP_CONTROL_RI) { 3688 netdev_printk(KERN_DEBUG, ndev, 3689 "clearing NRI after reset\n"); 3690 ql_write_common_reg(qdev, 3691 &port_regs-> 3692 CommonRegs. 3693 ispControlStatus, 3694 ((ISP_CONTROL_RI << 3695 16) | ISP_CONTROL_RI)); 3696 } 3697 3698 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3699 ssleep(1); 3700 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3701 } while (--max_wait_time); 3702 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3703 3704 if (value & ISP_CONTROL_SR) { 3705 3706 /* 3707 * Set the reset flags and clear the board again. 3708 * Nothing else to do... 3709 */ 3710 netdev_err(ndev, 3711 "Timed out waiting for reset to complete\n"); 3712 netdev_err(ndev, "Do a reset\n"); 3713 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3714 clear_bit(QL_RESET_START, &qdev->flags); 3715 ql_cycle_adapter(qdev, QL_DO_RESET); 3716 return; 3717 } 3718 3719 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3720 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3721 clear_bit(QL_RESET_START, &qdev->flags); 3722 ql_cycle_adapter(qdev, QL_NO_RESET); 3723 } 3724 } 3725 3726 static void ql_tx_timeout_work(struct work_struct *work) 3727 { 3728 struct ql3_adapter *qdev = 3729 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3730 3731 ql_cycle_adapter(qdev, QL_DO_RESET); 3732 } 3733 3734 static void ql_get_board_info(struct ql3_adapter *qdev) 3735 { 3736 struct ql3xxx_port_registers __iomem *port_regs = 3737 qdev->mem_map_registers; 3738 u32 value; 3739 3740 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3741 3742 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3743 if (value & PORT_STATUS_64) 3744 qdev->pci_width = 64; 3745 else 3746 qdev->pci_width = 32; 3747 if (value & PORT_STATUS_X) 3748 qdev->pci_x = 1; 3749 else 3750 qdev->pci_x = 0; 3751 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3752 } 3753 3754 static void ql3xxx_timer(unsigned long ptr) 3755 { 3756 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; 3757 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3758 } 3759 3760 static const struct net_device_ops ql3xxx_netdev_ops = { 3761 .ndo_open = ql3xxx_open, 3762 .ndo_start_xmit = ql3xxx_send, 3763 .ndo_stop = ql3xxx_close, 3764 .ndo_change_mtu = eth_change_mtu, 3765 .ndo_validate_addr = eth_validate_addr, 3766 .ndo_set_mac_address = ql3xxx_set_mac_address, 3767 .ndo_tx_timeout = ql3xxx_tx_timeout, 3768 }; 3769 3770 static int __devinit ql3xxx_probe(struct pci_dev *pdev, 3771 const struct pci_device_id *pci_entry) 3772 { 3773 struct net_device *ndev = NULL; 3774 struct ql3_adapter *qdev = NULL; 3775 static int cards_found; 3776 int uninitialized_var(pci_using_dac), err; 3777 3778 err = pci_enable_device(pdev); 3779 if (err) { 3780 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3781 goto err_out; 3782 } 3783 3784 err = pci_request_regions(pdev, DRV_NAME); 3785 if (err) { 3786 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3787 goto err_out_disable_pdev; 3788 } 3789 3790 pci_set_master(pdev); 3791 3792 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3793 pci_using_dac = 1; 3794 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3795 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3796 pci_using_dac = 0; 3797 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3798 } 3799 3800 if (err) { 3801 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3802 goto err_out_free_regions; 3803 } 3804 3805 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3806 if (!ndev) { 3807 pr_err("%s could not alloc etherdev\n", pci_name(pdev)); 3808 err = -ENOMEM; 3809 goto err_out_free_regions; 3810 } 3811 3812 SET_NETDEV_DEV(ndev, &pdev->dev); 3813 3814 pci_set_drvdata(pdev, ndev); 3815 3816 qdev = netdev_priv(ndev); 3817 qdev->index = cards_found; 3818 qdev->ndev = ndev; 3819 qdev->pdev = pdev; 3820 qdev->device_id = pci_entry->device; 3821 qdev->port_link_state = LS_DOWN; 3822 if (msi) 3823 qdev->msi = 1; 3824 3825 qdev->msg_enable = netif_msg_init(debug, default_msg); 3826 3827 if (pci_using_dac) 3828 ndev->features |= NETIF_F_HIGHDMA; 3829 if (qdev->device_id == QL3032_DEVICE_ID) 3830 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3831 3832 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3833 if (!qdev->mem_map_registers) { 3834 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3835 err = -EIO; 3836 goto err_out_free_ndev; 3837 } 3838 3839 spin_lock_init(&qdev->adapter_lock); 3840 spin_lock_init(&qdev->hw_lock); 3841 3842 /* Set driver entry points */ 3843 ndev->netdev_ops = &ql3xxx_netdev_ops; 3844 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); 3845 ndev->watchdog_timeo = 5 * HZ; 3846 3847 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3848 3849 ndev->irq = pdev->irq; 3850 3851 /* make sure the EEPROM is good */ 3852 if (ql_get_nvram_params(qdev)) { 3853 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3854 __func__, qdev->index); 3855 err = -EIO; 3856 goto err_out_iounmap; 3857 } 3858 3859 ql_set_mac_info(qdev); 3860 3861 /* Validate and set parameters */ 3862 if (qdev->mac_index) { 3863 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3864 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3865 } else { 3866 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3867 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3868 } 3869 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); 3870 3871 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3872 3873 /* Record PCI bus information. */ 3874 ql_get_board_info(qdev); 3875 3876 /* 3877 * Set the Maximum Memory Read Byte Count value. We do this to handle 3878 * jumbo frames. 3879 */ 3880 if (qdev->pci_x) 3881 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3882 3883 err = register_netdev(ndev); 3884 if (err) { 3885 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3886 goto err_out_iounmap; 3887 } 3888 3889 /* we're going to reset, so assume we have no link for now */ 3890 3891 netif_carrier_off(ndev); 3892 netif_stop_queue(ndev); 3893 3894 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3895 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3896 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3897 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3898 3899 init_timer(&qdev->adapter_timer); 3900 qdev->adapter_timer.function = ql3xxx_timer; 3901 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3902 qdev->adapter_timer.data = (unsigned long)qdev; 3903 3904 if (!cards_found) { 3905 pr_alert("%s\n", DRV_STRING); 3906 pr_alert("Driver name: %s, Version: %s\n", 3907 DRV_NAME, DRV_VERSION); 3908 } 3909 ql_display_dev_info(ndev); 3910 3911 cards_found++; 3912 return 0; 3913 3914 err_out_iounmap: 3915 iounmap(qdev->mem_map_registers); 3916 err_out_free_ndev: 3917 free_netdev(ndev); 3918 err_out_free_regions: 3919 pci_release_regions(pdev); 3920 err_out_disable_pdev: 3921 pci_disable_device(pdev); 3922 pci_set_drvdata(pdev, NULL); 3923 err_out: 3924 return err; 3925 } 3926 3927 static void __devexit ql3xxx_remove(struct pci_dev *pdev) 3928 { 3929 struct net_device *ndev = pci_get_drvdata(pdev); 3930 struct ql3_adapter *qdev = netdev_priv(ndev); 3931 3932 unregister_netdev(ndev); 3933 3934 ql_disable_interrupts(qdev); 3935 3936 if (qdev->workqueue) { 3937 cancel_delayed_work(&qdev->reset_work); 3938 cancel_delayed_work(&qdev->tx_timeout_work); 3939 destroy_workqueue(qdev->workqueue); 3940 qdev->workqueue = NULL; 3941 } 3942 3943 iounmap(qdev->mem_map_registers); 3944 pci_release_regions(pdev); 3945 pci_set_drvdata(pdev, NULL); 3946 free_netdev(ndev); 3947 } 3948 3949 static struct pci_driver ql3xxx_driver = { 3950 3951 .name = DRV_NAME, 3952 .id_table = ql3xxx_pci_tbl, 3953 .probe = ql3xxx_probe, 3954 .remove = __devexit_p(ql3xxx_remove), 3955 }; 3956 3957 static int __init ql3xxx_init_module(void) 3958 { 3959 return pci_register_driver(&ql3xxx_driver); 3960 } 3961 3962 static void __exit ql3xxx_exit(void) 3963 { 3964 pci_unregister_driver(&ql3xxx_driver); 3965 } 3966 3967 module_init(ql3xxx_init_module); 3968 module_exit(ql3xxx_exit); 3969