1 /* 2 * QLogic QLA3xxx NIC HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla3xxx for copyright and licensing details. 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/module.h> 13 #include <linux/list.h> 14 #include <linux/pci.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/dmapool.h> 19 #include <linux/mempool.h> 20 #include <linux/spinlock.h> 21 #include <linux/kthread.h> 22 #include <linux/interrupt.h> 23 #include <linux/errno.h> 24 #include <linux/ioport.h> 25 #include <linux/ip.h> 26 #include <linux/in.h> 27 #include <linux/if_arp.h> 28 #include <linux/if_ether.h> 29 #include <linux/netdevice.h> 30 #include <linux/etherdevice.h> 31 #include <linux/ethtool.h> 32 #include <linux/skbuff.h> 33 #include <linux/rtnetlink.h> 34 #include <linux/if_vlan.h> 35 #include <linux/delay.h> 36 #include <linux/mm.h> 37 #include <linux/prefetch.h> 38 39 #include "qla3xxx.h" 40 41 #define DRV_NAME "qla3xxx" 42 #define DRV_STRING "QLogic ISP3XXX Network Driver" 43 #define DRV_VERSION "v2.03.00-k5" 44 45 static const char ql3xxx_driver_name[] = DRV_NAME; 46 static const char ql3xxx_driver_version[] = DRV_VERSION; 47 48 #define TIMED_OUT_MSG \ 49 "Timed out waiting for management port to get free before issuing command\n" 50 51 MODULE_AUTHOR("QLogic Corporation"); 52 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " "); 53 MODULE_LICENSE("GPL"); 54 MODULE_VERSION(DRV_VERSION); 55 56 static const u32 default_msg 57 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK 58 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN; 59 60 static int debug = -1; /* defaults above */ 61 module_param(debug, int, 0); 62 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 63 64 static int msi; 65 module_param(msi, int, 0); 66 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts."); 67 68 static const struct pci_device_id ql3xxx_pci_tbl[] = { 69 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)}, 70 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)}, 71 /* required last entry */ 72 {0,} 73 }; 74 75 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl); 76 77 /* 78 * These are the known PHY's which are used 79 */ 80 enum PHY_DEVICE_TYPE { 81 PHY_TYPE_UNKNOWN = 0, 82 PHY_VITESSE_VSC8211, 83 PHY_AGERE_ET1011C, 84 MAX_PHY_DEV_TYPES 85 }; 86 87 struct PHY_DEVICE_INFO { 88 const enum PHY_DEVICE_TYPE phyDevice; 89 const u32 phyIdOUI; 90 const u16 phyIdModel; 91 const char *name; 92 }; 93 94 static const struct PHY_DEVICE_INFO PHY_DEVICES[] = { 95 {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"}, 96 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"}, 97 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"}, 98 }; 99 100 101 /* 102 * Caller must take hw_lock. 103 */ 104 static int ql_sem_spinlock(struct ql3_adapter *qdev, 105 u32 sem_mask, u32 sem_bits) 106 { 107 struct ql3xxx_port_registers __iomem *port_regs = 108 qdev->mem_map_registers; 109 u32 value; 110 unsigned int seconds = 3; 111 112 do { 113 writel((sem_mask | sem_bits), 114 &port_regs->CommonRegs.semaphoreReg); 115 value = readl(&port_regs->CommonRegs.semaphoreReg); 116 if ((value & (sem_mask >> 16)) == sem_bits) 117 return 0; 118 ssleep(1); 119 } while (--seconds); 120 return -1; 121 } 122 123 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) 124 { 125 struct ql3xxx_port_registers __iomem *port_regs = 126 qdev->mem_map_registers; 127 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg); 128 readl(&port_regs->CommonRegs.semaphoreReg); 129 } 130 131 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) 132 { 133 struct ql3xxx_port_registers __iomem *port_regs = 134 qdev->mem_map_registers; 135 u32 value; 136 137 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg); 138 value = readl(&port_regs->CommonRegs.semaphoreReg); 139 return ((value & (sem_mask >> 16)) == sem_bits); 140 } 141 142 /* 143 * Caller holds hw_lock. 144 */ 145 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) 146 { 147 int i = 0; 148 149 while (i < 10) { 150 if (i) 151 ssleep(1); 152 153 if (ql_sem_lock(qdev, 154 QL_DRVR_SEM_MASK, 155 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 156 * 2) << 1)) { 157 netdev_printk(KERN_DEBUG, qdev->ndev, 158 "driver lock acquired\n"); 159 return 1; 160 } 161 } 162 163 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); 164 return 0; 165 } 166 167 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) 168 { 169 struct ql3xxx_port_registers __iomem *port_regs = 170 qdev->mem_map_registers; 171 172 writel(((ISP_CONTROL_NP_MASK << 16) | page), 173 &port_regs->CommonRegs.ispControlStatus); 174 readl(&port_regs->CommonRegs.ispControlStatus); 175 qdev->current_page = page; 176 } 177 178 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 179 { 180 u32 value; 181 unsigned long hw_flags; 182 183 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 184 value = readl(reg); 185 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 186 187 return value; 188 } 189 190 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 191 { 192 return readl(reg); 193 } 194 195 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) 196 { 197 u32 value; 198 unsigned long hw_flags; 199 200 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 201 202 if (qdev->current_page != 0) 203 ql_set_register_page(qdev, 0); 204 value = readl(reg); 205 206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 207 return value; 208 } 209 210 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) 211 { 212 if (qdev->current_page != 0) 213 ql_set_register_page(qdev, 0); 214 return readl(reg); 215 } 216 217 static void ql_write_common_reg_l(struct ql3_adapter *qdev, 218 u32 __iomem *reg, u32 value) 219 { 220 unsigned long hw_flags; 221 222 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 223 writel(value, reg); 224 readl(reg); 225 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 226 } 227 228 static void ql_write_common_reg(struct ql3_adapter *qdev, 229 u32 __iomem *reg, u32 value) 230 { 231 writel(value, reg); 232 readl(reg); 233 } 234 235 static void ql_write_nvram_reg(struct ql3_adapter *qdev, 236 u32 __iomem *reg, u32 value) 237 { 238 writel(value, reg); 239 readl(reg); 240 udelay(1); 241 } 242 243 static void ql_write_page0_reg(struct ql3_adapter *qdev, 244 u32 __iomem *reg, u32 value) 245 { 246 if (qdev->current_page != 0) 247 ql_set_register_page(qdev, 0); 248 writel(value, reg); 249 readl(reg); 250 } 251 252 /* 253 * Caller holds hw_lock. Only called during init. 254 */ 255 static void ql_write_page1_reg(struct ql3_adapter *qdev, 256 u32 __iomem *reg, u32 value) 257 { 258 if (qdev->current_page != 1) 259 ql_set_register_page(qdev, 1); 260 writel(value, reg); 261 readl(reg); 262 } 263 264 /* 265 * Caller holds hw_lock. Only called during init. 266 */ 267 static void ql_write_page2_reg(struct ql3_adapter *qdev, 268 u32 __iomem *reg, u32 value) 269 { 270 if (qdev->current_page != 2) 271 ql_set_register_page(qdev, 2); 272 writel(value, reg); 273 readl(reg); 274 } 275 276 static void ql_disable_interrupts(struct ql3_adapter *qdev) 277 { 278 struct ql3xxx_port_registers __iomem *port_regs = 279 qdev->mem_map_registers; 280 281 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 282 (ISP_IMR_ENABLE_INT << 16)); 283 284 } 285 286 static void ql_enable_interrupts(struct ql3_adapter *qdev) 287 { 288 struct ql3xxx_port_registers __iomem *port_regs = 289 qdev->mem_map_registers; 290 291 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, 292 ((0xff << 16) | ISP_IMR_ENABLE_INT)); 293 294 } 295 296 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, 297 struct ql_rcv_buf_cb *lrg_buf_cb) 298 { 299 dma_addr_t map; 300 int err; 301 lrg_buf_cb->next = NULL; 302 303 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ 304 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; 305 } else { 306 qdev->lrg_buf_free_tail->next = lrg_buf_cb; 307 qdev->lrg_buf_free_tail = lrg_buf_cb; 308 } 309 310 if (!lrg_buf_cb->skb) { 311 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 312 qdev->lrg_buffer_len); 313 if (unlikely(!lrg_buf_cb->skb)) { 314 qdev->lrg_buf_skb_check++; 315 } else { 316 /* 317 * We save some space to copy the ethhdr from first 318 * buffer 319 */ 320 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 321 map = pci_map_single(qdev->pdev, 322 lrg_buf_cb->skb->data, 323 qdev->lrg_buffer_len - 324 QL_HEADER_SPACE, 325 PCI_DMA_FROMDEVICE); 326 err = pci_dma_mapping_error(qdev->pdev, map); 327 if (err) { 328 netdev_err(qdev->ndev, 329 "PCI mapping failed with error: %d\n", 330 err); 331 dev_kfree_skb(lrg_buf_cb->skb); 332 lrg_buf_cb->skb = NULL; 333 334 qdev->lrg_buf_skb_check++; 335 return; 336 } 337 338 lrg_buf_cb->buf_phy_addr_low = 339 cpu_to_le32(LS_64BITS(map)); 340 lrg_buf_cb->buf_phy_addr_high = 341 cpu_to_le32(MS_64BITS(map)); 342 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 343 dma_unmap_len_set(lrg_buf_cb, maplen, 344 qdev->lrg_buffer_len - 345 QL_HEADER_SPACE); 346 } 347 } 348 349 qdev->lrg_buf_free_count++; 350 } 351 352 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter 353 *qdev) 354 { 355 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 356 357 if (lrg_buf_cb != NULL) { 358 qdev->lrg_buf_free_head = lrg_buf_cb->next; 359 if (qdev->lrg_buf_free_head == NULL) 360 qdev->lrg_buf_free_tail = NULL; 361 qdev->lrg_buf_free_count--; 362 } 363 364 return lrg_buf_cb; 365 } 366 367 static u32 addrBits = EEPROM_NO_ADDR_BITS; 368 static u32 dataBits = EEPROM_NO_DATA_BITS; 369 370 static void fm93c56a_deselect(struct ql3_adapter *qdev); 371 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr, 372 unsigned short *value); 373 374 /* 375 * Caller holds hw_lock. 376 */ 377 static void fm93c56a_select(struct ql3_adapter *qdev) 378 { 379 struct ql3xxx_port_registers __iomem *port_regs = 380 qdev->mem_map_registers; 381 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 382 383 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 384 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 385 ql_write_nvram_reg(qdev, spir, 386 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 387 } 388 389 /* 390 * Caller holds hw_lock. 391 */ 392 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) 393 { 394 int i; 395 u32 mask; 396 u32 dataBit; 397 u32 previousBit; 398 struct ql3xxx_port_registers __iomem *port_regs = 399 qdev->mem_map_registers; 400 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 401 402 /* Clock in a zero, then do the start bit */ 403 ql_write_nvram_reg(qdev, spir, 404 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 405 AUBURN_EEPROM_DO_1)); 406 ql_write_nvram_reg(qdev, spir, 407 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 408 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE)); 409 ql_write_nvram_reg(qdev, spir, 410 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 411 AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL)); 412 413 mask = 1 << (FM93C56A_CMD_BITS - 1); 414 /* Force the previous data bit to be different */ 415 previousBit = 0xffff; 416 for (i = 0; i < FM93C56A_CMD_BITS; i++) { 417 dataBit = (cmd & mask) 418 ? AUBURN_EEPROM_DO_1 419 : AUBURN_EEPROM_DO_0; 420 if (previousBit != dataBit) { 421 /* If the bit changed, change the DO state to match */ 422 ql_write_nvram_reg(qdev, spir, 423 (ISP_NVRAM_MASK | 424 qdev->eeprom_cmd_data | dataBit)); 425 previousBit = dataBit; 426 } 427 ql_write_nvram_reg(qdev, spir, 428 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 429 dataBit | AUBURN_EEPROM_CLK_RISE)); 430 ql_write_nvram_reg(qdev, spir, 431 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 432 dataBit | AUBURN_EEPROM_CLK_FALL)); 433 cmd = cmd << 1; 434 } 435 436 mask = 1 << (addrBits - 1); 437 /* Force the previous data bit to be different */ 438 previousBit = 0xffff; 439 for (i = 0; i < addrBits; i++) { 440 dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 441 : AUBURN_EEPROM_DO_0; 442 if (previousBit != dataBit) { 443 /* 444 * If the bit changed, then change the DO state to 445 * match 446 */ 447 ql_write_nvram_reg(qdev, spir, 448 (ISP_NVRAM_MASK | 449 qdev->eeprom_cmd_data | dataBit)); 450 previousBit = dataBit; 451 } 452 ql_write_nvram_reg(qdev, spir, 453 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 454 dataBit | AUBURN_EEPROM_CLK_RISE)); 455 ql_write_nvram_reg(qdev, spir, 456 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 457 dataBit | AUBURN_EEPROM_CLK_FALL)); 458 eepromAddr = eepromAddr << 1; 459 } 460 } 461 462 /* 463 * Caller holds hw_lock. 464 */ 465 static void fm93c56a_deselect(struct ql3_adapter *qdev) 466 { 467 struct ql3xxx_port_registers __iomem *port_regs = 468 qdev->mem_map_registers; 469 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 470 471 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 472 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 473 } 474 475 /* 476 * Caller holds hw_lock. 477 */ 478 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) 479 { 480 int i; 481 u32 data = 0; 482 u32 dataBit; 483 struct ql3xxx_port_registers __iomem *port_regs = 484 qdev->mem_map_registers; 485 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 486 487 /* Read the data bits */ 488 /* The first bit is a dummy. Clock right over it. */ 489 for (i = 0; i < dataBits; i++) { 490 ql_write_nvram_reg(qdev, spir, 491 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 492 AUBURN_EEPROM_CLK_RISE); 493 ql_write_nvram_reg(qdev, spir, 494 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 495 AUBURN_EEPROM_CLK_FALL); 496 dataBit = (ql_read_common_reg(qdev, spir) & 497 AUBURN_EEPROM_DI_1) ? 1 : 0; 498 data = (data << 1) | dataBit; 499 } 500 *value = (u16)data; 501 } 502 503 /* 504 * Caller holds hw_lock. 505 */ 506 static void eeprom_readword(struct ql3_adapter *qdev, 507 u32 eepromAddr, unsigned short *value) 508 { 509 fm93c56a_select(qdev); 510 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); 511 fm93c56a_datain(qdev, value); 512 fm93c56a_deselect(qdev); 513 } 514 515 static void ql_set_mac_addr(struct net_device *ndev, u16 *addr) 516 { 517 __le16 *p = (__le16 *)ndev->dev_addr; 518 p[0] = cpu_to_le16(addr[0]); 519 p[1] = cpu_to_le16(addr[1]); 520 p[2] = cpu_to_le16(addr[2]); 521 } 522 523 static int ql_get_nvram_params(struct ql3_adapter *qdev) 524 { 525 u16 *pEEPROMData; 526 u16 checksum = 0; 527 u32 index; 528 unsigned long hw_flags; 529 530 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 531 532 pEEPROMData = (u16 *)&qdev->nvram_data; 533 qdev->eeprom_cmd_data = 0; 534 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, 535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 536 2) << 10)) { 537 pr_err("%s: Failed ql_sem_spinlock()\n", __func__); 538 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 539 return -1; 540 } 541 542 for (index = 0; index < EEPROM_SIZE; index++) { 543 eeprom_readword(qdev, index, pEEPROMData); 544 checksum += *pEEPROMData; 545 pEEPROMData++; 546 } 547 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); 548 549 if (checksum != 0) { 550 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", 551 checksum); 552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 553 return -1; 554 } 555 556 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 557 return checksum; 558 } 559 560 static const u32 PHYAddr[2] = { 561 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS 562 }; 563 564 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) 565 { 566 struct ql3xxx_port_registers __iomem *port_regs = 567 qdev->mem_map_registers; 568 u32 temp; 569 int count = 1000; 570 571 while (count) { 572 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); 573 if (!(temp & MAC_MII_STATUS_BSY)) 574 return 0; 575 udelay(10); 576 count--; 577 } 578 return -1; 579 } 580 581 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) 582 { 583 struct ql3xxx_port_registers __iomem *port_regs = 584 qdev->mem_map_registers; 585 u32 scanControl; 586 587 if (qdev->numPorts > 1) { 588 /* Auto scan will cycle through multiple ports */ 589 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC; 590 } else { 591 scanControl = MAC_MII_CONTROL_SC; 592 } 593 594 /* 595 * Scan register 1 of PHY/PETBI, 596 * Set up to scan both devices 597 * The autoscan starts from the first register, completes 598 * the last one before rolling over to the first 599 */ 600 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 601 PHYAddr[0] | MII_SCAN_REGISTER); 602 603 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 604 (scanControl) | 605 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16)); 606 } 607 608 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) 609 { 610 u8 ret; 611 struct ql3xxx_port_registers __iomem *port_regs = 612 qdev->mem_map_registers; 613 614 /* See if scan mode is enabled before we turn it off */ 615 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & 616 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) { 617 /* Scan is enabled */ 618 ret = 1; 619 } else { 620 /* Scan is disabled */ 621 ret = 0; 622 } 623 624 /* 625 * When disabling scan mode you must first change the MII register 626 * address 627 */ 628 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 629 PHYAddr[0] | MII_SCAN_REGISTER); 630 631 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 632 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS | 633 MAC_MII_CONTROL_RC) << 16)); 634 635 return ret; 636 } 637 638 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, 639 u16 regAddr, u16 value, u32 phyAddr) 640 { 641 struct ql3xxx_port_registers __iomem *port_regs = 642 qdev->mem_map_registers; 643 u8 scanWasEnabled; 644 645 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 646 647 if (ql_wait_for_mii_ready(qdev)) { 648 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 649 return -1; 650 } 651 652 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 653 phyAddr | regAddr); 654 655 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 656 657 /* Wait for write to complete 9/10/04 SJP */ 658 if (ql_wait_for_mii_ready(qdev)) { 659 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 660 return -1; 661 } 662 663 if (scanWasEnabled) 664 ql_mii_enable_scan_mode(qdev); 665 666 return 0; 667 } 668 669 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, 670 u16 *value, u32 phyAddr) 671 { 672 struct ql3xxx_port_registers __iomem *port_regs = 673 qdev->mem_map_registers; 674 u8 scanWasEnabled; 675 u32 temp; 676 677 scanWasEnabled = ql_mii_disable_scan_mode(qdev); 678 679 if (ql_wait_for_mii_ready(qdev)) { 680 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 681 return -1; 682 } 683 684 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 685 phyAddr | regAddr); 686 687 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 688 (MAC_MII_CONTROL_RC << 16)); 689 690 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 691 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 692 693 /* Wait for the read to complete */ 694 if (ql_wait_for_mii_ready(qdev)) { 695 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 696 return -1; 697 } 698 699 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 700 *value = (u16) temp; 701 702 if (scanWasEnabled) 703 ql_mii_enable_scan_mode(qdev); 704 705 return 0; 706 } 707 708 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) 709 { 710 struct ql3xxx_port_registers __iomem *port_regs = 711 qdev->mem_map_registers; 712 713 ql_mii_disable_scan_mode(qdev); 714 715 if (ql_wait_for_mii_ready(qdev)) { 716 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 717 return -1; 718 } 719 720 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 721 qdev->PHYAddr | regAddr); 722 723 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); 724 725 /* Wait for write to complete. */ 726 if (ql_wait_for_mii_ready(qdev)) { 727 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 728 return -1; 729 } 730 731 ql_mii_enable_scan_mode(qdev); 732 733 return 0; 734 } 735 736 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) 737 { 738 u32 temp; 739 struct ql3xxx_port_registers __iomem *port_regs = 740 qdev->mem_map_registers; 741 742 ql_mii_disable_scan_mode(qdev); 743 744 if (ql_wait_for_mii_ready(qdev)) { 745 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 746 return -1; 747 } 748 749 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, 750 qdev->PHYAddr | regAddr); 751 752 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 753 (MAC_MII_CONTROL_RC << 16)); 754 755 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 756 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC); 757 758 /* Wait for the read to complete */ 759 if (ql_wait_for_mii_ready(qdev)) { 760 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); 761 return -1; 762 } 763 764 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); 765 *value = (u16) temp; 766 767 ql_mii_enable_scan_mode(qdev); 768 769 return 0; 770 } 771 772 static void ql_petbi_reset(struct ql3_adapter *qdev) 773 { 774 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); 775 } 776 777 static void ql_petbi_start_neg(struct ql3_adapter *qdev) 778 { 779 u16 reg; 780 781 /* Enable Auto-negotiation sense */ 782 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); 783 reg |= PETBI_TBI_AUTO_SENSE; 784 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); 785 786 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, 787 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX); 788 789 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, 790 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 791 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000); 792 793 } 794 795 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) 796 { 797 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, 798 PHYAddr[qdev->mac_index]); 799 } 800 801 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) 802 { 803 u16 reg; 804 805 /* Enable Auto-negotiation sense */ 806 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, 807 PHYAddr[qdev->mac_index]); 808 reg |= PETBI_TBI_AUTO_SENSE; 809 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, 810 PHYAddr[qdev->mac_index]); 811 812 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, 813 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, 814 PHYAddr[qdev->mac_index]); 815 816 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, 817 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG | 818 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000, 819 PHYAddr[qdev->mac_index]); 820 } 821 822 static void ql_petbi_init(struct ql3_adapter *qdev) 823 { 824 ql_petbi_reset(qdev); 825 ql_petbi_start_neg(qdev); 826 } 827 828 static void ql_petbi_init_ex(struct ql3_adapter *qdev) 829 { 830 ql_petbi_reset_ex(qdev); 831 ql_petbi_start_neg_ex(qdev); 832 } 833 834 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) 835 { 836 u16 reg; 837 838 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) 839 return 0; 840 841 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE; 842 } 843 844 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) 845 { 846 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); 847 /* power down device bit 11 = 1 */ 848 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); 849 /* enable diagnostic mode bit 2 = 1 */ 850 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); 851 /* 1000MB amplitude adjust (see Agere errata) */ 852 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); 853 /* 1000MB amplitude adjust (see Agere errata) */ 854 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); 855 /* 100MB amplitude adjust (see Agere errata) */ 856 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); 857 /* 100MB amplitude adjust (see Agere errata) */ 858 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); 859 /* 10MB amplitude adjust (see Agere errata) */ 860 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); 861 /* 10MB amplitude adjust (see Agere errata) */ 862 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); 863 /* point to hidden reg 0x2806 */ 864 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); 865 /* Write new PHYAD w/bit 5 set */ 866 ql_mii_write_reg_ex(qdev, 0x11, 867 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); 868 /* 869 * Disable diagnostic mode bit 2 = 0 870 * Power up device bit 11 = 0 871 * Link up (on) and activity (blink) 872 */ 873 ql_mii_write_reg(qdev, 0x12, 0x840a); 874 ql_mii_write_reg(qdev, 0x00, 0x1140); 875 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); 876 } 877 878 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, 879 u16 phyIdReg0, u16 phyIdReg1) 880 { 881 enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN; 882 u32 oui; 883 u16 model; 884 int i; 885 886 if (phyIdReg0 == 0xffff) 887 return result; 888 889 if (phyIdReg1 == 0xffff) 890 return result; 891 892 /* oui is split between two registers */ 893 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10); 894 895 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4; 896 897 /* Scan table for this PHY */ 898 for (i = 0; i < MAX_PHY_DEV_TYPES; i++) { 899 if ((oui == PHY_DEVICES[i].phyIdOUI) && 900 (model == PHY_DEVICES[i].phyIdModel)) { 901 netdev_info(qdev->ndev, "Phy: %s\n", 902 PHY_DEVICES[i].name); 903 result = PHY_DEVICES[i].phyDevice; 904 break; 905 } 906 } 907 908 return result; 909 } 910 911 static int ql_phy_get_speed(struct ql3_adapter *qdev) 912 { 913 u16 reg; 914 915 switch (qdev->phyType) { 916 case PHY_AGERE_ET1011C: { 917 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) 918 return 0; 919 920 reg = (reg >> 8) & 3; 921 break; 922 } 923 default: 924 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 925 return 0; 926 927 reg = (((reg & 0x18) >> 3) & 3); 928 } 929 930 switch (reg) { 931 case 2: 932 return SPEED_1000; 933 case 1: 934 return SPEED_100; 935 case 0: 936 return SPEED_10; 937 default: 938 return -1; 939 } 940 } 941 942 static int ql_is_full_dup(struct ql3_adapter *qdev) 943 { 944 u16 reg; 945 946 switch (qdev->phyType) { 947 case PHY_AGERE_ET1011C: { 948 if (ql_mii_read_reg(qdev, 0x1A, ®)) 949 return 0; 950 951 return ((reg & 0x0080) && (reg & 0x1000)) != 0; 952 } 953 case PHY_VITESSE_VSC8211: 954 default: { 955 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) 956 return 0; 957 return (reg & PHY_AUX_DUPLEX_STAT) != 0; 958 } 959 } 960 } 961 962 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) 963 { 964 u16 reg; 965 966 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) 967 return 0; 968 969 return (reg & PHY_NEG_PAUSE) != 0; 970 } 971 972 static int PHY_Setup(struct ql3_adapter *qdev) 973 { 974 u16 reg1; 975 u16 reg2; 976 bool agereAddrChangeNeeded = false; 977 u32 miiAddr = 0; 978 int err; 979 980 /* Determine the PHY we are using by reading the ID's */ 981 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); 982 if (err != 0) { 983 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); 984 return err; 985 } 986 987 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); 988 if (err != 0) { 989 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); 990 return err; 991 } 992 993 /* Check if we have a Agere PHY */ 994 if ((reg1 == 0xffff) || (reg2 == 0xffff)) { 995 996 /* Determine which MII address we should be using 997 determined by the index of the card */ 998 if (qdev->mac_index == 0) 999 miiAddr = MII_AGERE_ADDR_1; 1000 else 1001 miiAddr = MII_AGERE_ADDR_2; 1002 1003 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); 1004 if (err != 0) { 1005 netdev_err(qdev->ndev, 1006 "Could not read from reg PHY_ID_0_REG after Agere detected\n"); 1007 return err; 1008 } 1009 1010 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); 1011 if (err != 0) { 1012 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); 1013 return err; 1014 } 1015 1016 /* We need to remember to initialize the Agere PHY */ 1017 agereAddrChangeNeeded = true; 1018 } 1019 1020 /* Determine the particular PHY we have on board to apply 1021 PHY specific initializations */ 1022 qdev->phyType = getPhyType(qdev, reg1, reg2); 1023 1024 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { 1025 /* need this here so address gets changed */ 1026 phyAgereSpecificInit(qdev, miiAddr); 1027 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { 1028 netdev_err(qdev->ndev, "PHY is unknown\n"); 1029 return -EIO; 1030 } 1031 1032 return 0; 1033 } 1034 1035 /* 1036 * Caller holds hw_lock. 1037 */ 1038 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) 1039 { 1040 struct ql3xxx_port_registers __iomem *port_regs = 1041 qdev->mem_map_registers; 1042 u32 value; 1043 1044 if (enable) 1045 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16)); 1046 else 1047 value = (MAC_CONFIG_REG_PE << 16); 1048 1049 if (qdev->mac_index) 1050 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1051 else 1052 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1053 } 1054 1055 /* 1056 * Caller holds hw_lock. 1057 */ 1058 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) 1059 { 1060 struct ql3xxx_port_registers __iomem *port_regs = 1061 qdev->mem_map_registers; 1062 u32 value; 1063 1064 if (enable) 1065 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16)); 1066 else 1067 value = (MAC_CONFIG_REG_SR << 16); 1068 1069 if (qdev->mac_index) 1070 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1071 else 1072 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1073 } 1074 1075 /* 1076 * Caller holds hw_lock. 1077 */ 1078 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) 1079 { 1080 struct ql3xxx_port_registers __iomem *port_regs = 1081 qdev->mem_map_registers; 1082 u32 value; 1083 1084 if (enable) 1085 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16)); 1086 else 1087 value = (MAC_CONFIG_REG_GM << 16); 1088 1089 if (qdev->mac_index) 1090 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1091 else 1092 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1093 } 1094 1095 /* 1096 * Caller holds hw_lock. 1097 */ 1098 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) 1099 { 1100 struct ql3xxx_port_registers __iomem *port_regs = 1101 qdev->mem_map_registers; 1102 u32 value; 1103 1104 if (enable) 1105 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16)); 1106 else 1107 value = (MAC_CONFIG_REG_FD << 16); 1108 1109 if (qdev->mac_index) 1110 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1111 else 1112 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1113 } 1114 1115 /* 1116 * Caller holds hw_lock. 1117 */ 1118 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) 1119 { 1120 struct ql3xxx_port_registers __iomem *port_regs = 1121 qdev->mem_map_registers; 1122 u32 value; 1123 1124 if (enable) 1125 value = 1126 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) | 1127 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16)); 1128 else 1129 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16); 1130 1131 if (qdev->mac_index) 1132 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); 1133 else 1134 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); 1135 } 1136 1137 /* 1138 * Caller holds hw_lock. 1139 */ 1140 static int ql_is_fiber(struct ql3_adapter *qdev) 1141 { 1142 struct ql3xxx_port_registers __iomem *port_regs = 1143 qdev->mem_map_registers; 1144 u32 bitToCheck = 0; 1145 u32 temp; 1146 1147 switch (qdev->mac_index) { 1148 case 0: 1149 bitToCheck = PORT_STATUS_SM0; 1150 break; 1151 case 1: 1152 bitToCheck = PORT_STATUS_SM1; 1153 break; 1154 } 1155 1156 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1157 return (temp & bitToCheck) != 0; 1158 } 1159 1160 static int ql_is_auto_cfg(struct ql3_adapter *qdev) 1161 { 1162 u16 reg; 1163 ql_mii_read_reg(qdev, 0x00, ®); 1164 return (reg & 0x1000) != 0; 1165 } 1166 1167 /* 1168 * Caller holds hw_lock. 1169 */ 1170 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) 1171 { 1172 struct ql3xxx_port_registers __iomem *port_regs = 1173 qdev->mem_map_registers; 1174 u32 bitToCheck = 0; 1175 u32 temp; 1176 1177 switch (qdev->mac_index) { 1178 case 0: 1179 bitToCheck = PORT_STATUS_AC0; 1180 break; 1181 case 1: 1182 bitToCheck = PORT_STATUS_AC1; 1183 break; 1184 } 1185 1186 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1187 if (temp & bitToCheck) { 1188 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); 1189 return 1; 1190 } 1191 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); 1192 return 0; 1193 } 1194 1195 /* 1196 * ql_is_neg_pause() returns 1 if pause was negotiated to be on 1197 */ 1198 static int ql_is_neg_pause(struct ql3_adapter *qdev) 1199 { 1200 if (ql_is_fiber(qdev)) 1201 return ql_is_petbi_neg_pause(qdev); 1202 else 1203 return ql_is_phy_neg_pause(qdev); 1204 } 1205 1206 static int ql_auto_neg_error(struct ql3_adapter *qdev) 1207 { 1208 struct ql3xxx_port_registers __iomem *port_regs = 1209 qdev->mem_map_registers; 1210 u32 bitToCheck = 0; 1211 u32 temp; 1212 1213 switch (qdev->mac_index) { 1214 case 0: 1215 bitToCheck = PORT_STATUS_AE0; 1216 break; 1217 case 1: 1218 bitToCheck = PORT_STATUS_AE1; 1219 break; 1220 } 1221 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1222 return (temp & bitToCheck) != 0; 1223 } 1224 1225 static u32 ql_get_link_speed(struct ql3_adapter *qdev) 1226 { 1227 if (ql_is_fiber(qdev)) 1228 return SPEED_1000; 1229 else 1230 return ql_phy_get_speed(qdev); 1231 } 1232 1233 static int ql_is_link_full_dup(struct ql3_adapter *qdev) 1234 { 1235 if (ql_is_fiber(qdev)) 1236 return 1; 1237 else 1238 return ql_is_full_dup(qdev); 1239 } 1240 1241 /* 1242 * Caller holds hw_lock. 1243 */ 1244 static int ql_link_down_detect(struct ql3_adapter *qdev) 1245 { 1246 struct ql3xxx_port_registers __iomem *port_regs = 1247 qdev->mem_map_registers; 1248 u32 bitToCheck = 0; 1249 u32 temp; 1250 1251 switch (qdev->mac_index) { 1252 case 0: 1253 bitToCheck = ISP_CONTROL_LINK_DN_0; 1254 break; 1255 case 1: 1256 bitToCheck = ISP_CONTROL_LINK_DN_1; 1257 break; 1258 } 1259 1260 temp = 1261 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 1262 return (temp & bitToCheck) != 0; 1263 } 1264 1265 /* 1266 * Caller holds hw_lock. 1267 */ 1268 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) 1269 { 1270 struct ql3xxx_port_registers __iomem *port_regs = 1271 qdev->mem_map_registers; 1272 1273 switch (qdev->mac_index) { 1274 case 0: 1275 ql_write_common_reg(qdev, 1276 &port_regs->CommonRegs.ispControlStatus, 1277 (ISP_CONTROL_LINK_DN_0) | 1278 (ISP_CONTROL_LINK_DN_0 << 16)); 1279 break; 1280 1281 case 1: 1282 ql_write_common_reg(qdev, 1283 &port_regs->CommonRegs.ispControlStatus, 1284 (ISP_CONTROL_LINK_DN_1) | 1285 (ISP_CONTROL_LINK_DN_1 << 16)); 1286 break; 1287 1288 default: 1289 return 1; 1290 } 1291 1292 return 0; 1293 } 1294 1295 /* 1296 * Caller holds hw_lock. 1297 */ 1298 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) 1299 { 1300 struct ql3xxx_port_registers __iomem *port_regs = 1301 qdev->mem_map_registers; 1302 u32 bitToCheck = 0; 1303 u32 temp; 1304 1305 switch (qdev->mac_index) { 1306 case 0: 1307 bitToCheck = PORT_STATUS_F1_ENABLED; 1308 break; 1309 case 1: 1310 bitToCheck = PORT_STATUS_F3_ENABLED; 1311 break; 1312 default: 1313 break; 1314 } 1315 1316 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1317 if (temp & bitToCheck) { 1318 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1319 "not link master\n"); 1320 return 0; 1321 } 1322 1323 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); 1324 return 1; 1325 } 1326 1327 static void ql_phy_reset_ex(struct ql3_adapter *qdev) 1328 { 1329 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, 1330 PHYAddr[qdev->mac_index]); 1331 } 1332 1333 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) 1334 { 1335 u16 reg; 1336 u16 portConfiguration; 1337 1338 if (qdev->phyType == PHY_AGERE_ET1011C) 1339 ql_mii_write_reg(qdev, 0x13, 0x0000); 1340 /* turn off external loopback */ 1341 1342 if (qdev->mac_index == 0) 1343 portConfiguration = 1344 qdev->nvram_data.macCfg_port0.portConfiguration; 1345 else 1346 portConfiguration = 1347 qdev->nvram_data.macCfg_port1.portConfiguration; 1348 1349 /* Some HBA's in the field are set to 0 and they need to 1350 be reinterpreted with a default value */ 1351 if (portConfiguration == 0) 1352 portConfiguration = PORT_CONFIG_DEFAULT; 1353 1354 /* Set the 1000 advertisements */ 1355 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, 1356 PHYAddr[qdev->mac_index]); 1357 reg &= ~PHY_GIG_ALL_PARAMS; 1358 1359 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) { 1360 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) 1361 reg |= PHY_GIG_ADV_1000F; 1362 else 1363 reg |= PHY_GIG_ADV_1000H; 1364 } 1365 1366 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, 1367 PHYAddr[qdev->mac_index]); 1368 1369 /* Set the 10/100 & pause negotiation advertisements */ 1370 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, 1371 PHYAddr[qdev->mac_index]); 1372 reg &= ~PHY_NEG_ALL_PARAMS; 1373 1374 if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED) 1375 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE; 1376 1377 if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) { 1378 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1379 reg |= PHY_NEG_ADV_100F; 1380 1381 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1382 reg |= PHY_NEG_ADV_10F; 1383 } 1384 1385 if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) { 1386 if (portConfiguration & PORT_CONFIG_100MB_SPEED) 1387 reg |= PHY_NEG_ADV_100H; 1388 1389 if (portConfiguration & PORT_CONFIG_10MB_SPEED) 1390 reg |= PHY_NEG_ADV_10H; 1391 } 1392 1393 if (portConfiguration & PORT_CONFIG_1000MB_SPEED) 1394 reg |= 1; 1395 1396 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, 1397 PHYAddr[qdev->mac_index]); 1398 1399 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); 1400 1401 ql_mii_write_reg_ex(qdev, CONTROL_REG, 1402 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG, 1403 PHYAddr[qdev->mac_index]); 1404 } 1405 1406 static void ql_phy_init_ex(struct ql3_adapter *qdev) 1407 { 1408 ql_phy_reset_ex(qdev); 1409 PHY_Setup(qdev); 1410 ql_phy_start_neg_ex(qdev); 1411 } 1412 1413 /* 1414 * Caller holds hw_lock. 1415 */ 1416 static u32 ql_get_link_state(struct ql3_adapter *qdev) 1417 { 1418 struct ql3xxx_port_registers __iomem *port_regs = 1419 qdev->mem_map_registers; 1420 u32 bitToCheck = 0; 1421 u32 temp, linkState; 1422 1423 switch (qdev->mac_index) { 1424 case 0: 1425 bitToCheck = PORT_STATUS_UP0; 1426 break; 1427 case 1: 1428 bitToCheck = PORT_STATUS_UP1; 1429 break; 1430 } 1431 1432 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); 1433 if (temp & bitToCheck) 1434 linkState = LS_UP; 1435 else 1436 linkState = LS_DOWN; 1437 1438 return linkState; 1439 } 1440 1441 static int ql_port_start(struct ql3_adapter *qdev) 1442 { 1443 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1444 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1445 2) << 7)) { 1446 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); 1447 return -1; 1448 } 1449 1450 if (ql_is_fiber(qdev)) { 1451 ql_petbi_init(qdev); 1452 } else { 1453 /* Copper port */ 1454 ql_phy_init_ex(qdev); 1455 } 1456 1457 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1458 return 0; 1459 } 1460 1461 static int ql_finish_auto_neg(struct ql3_adapter *qdev) 1462 { 1463 1464 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1465 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1466 2) << 7)) 1467 return -1; 1468 1469 if (!ql_auto_neg_error(qdev)) { 1470 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1471 /* configure the MAC */ 1472 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1473 "Configuring link\n"); 1474 ql_mac_cfg_soft_reset(qdev, 1); 1475 ql_mac_cfg_gig(qdev, 1476 (ql_get_link_speed 1477 (qdev) == 1478 SPEED_1000)); 1479 ql_mac_cfg_full_dup(qdev, 1480 ql_is_link_full_dup 1481 (qdev)); 1482 ql_mac_cfg_pause(qdev, 1483 ql_is_neg_pause 1484 (qdev)); 1485 ql_mac_cfg_soft_reset(qdev, 0); 1486 1487 /* enable the MAC */ 1488 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1489 "Enabling mac\n"); 1490 ql_mac_enable(qdev, 1); 1491 } 1492 1493 qdev->port_link_state = LS_UP; 1494 netif_start_queue(qdev->ndev); 1495 netif_carrier_on(qdev->ndev); 1496 netif_info(qdev, link, qdev->ndev, 1497 "Link is up at %d Mbps, %s duplex\n", 1498 ql_get_link_speed(qdev), 1499 ql_is_link_full_dup(qdev) ? "full" : "half"); 1500 1501 } else { /* Remote error detected */ 1502 1503 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { 1504 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, 1505 "Remote error detected. Calling ql_port_start()\n"); 1506 /* 1507 * ql_port_start() is shared code and needs 1508 * to lock the PHY on it's own. 1509 */ 1510 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1511 if (ql_port_start(qdev)) /* Restart port */ 1512 return -1; 1513 return 0; 1514 } 1515 } 1516 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1517 return 0; 1518 } 1519 1520 static void ql_link_state_machine_work(struct work_struct *work) 1521 { 1522 struct ql3_adapter *qdev = 1523 container_of(work, struct ql3_adapter, link_state_work.work); 1524 1525 u32 curr_link_state; 1526 unsigned long hw_flags; 1527 1528 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1529 1530 curr_link_state = ql_get_link_state(qdev); 1531 1532 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { 1533 netif_info(qdev, link, qdev->ndev, 1534 "Reset in progress, skip processing link state\n"); 1535 1536 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1537 1538 /* Restart timer on 2 second interval. */ 1539 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1540 1541 return; 1542 } 1543 1544 switch (qdev->port_link_state) { 1545 default: 1546 if (test_bit(QL_LINK_MASTER, &qdev->flags)) 1547 ql_port_start(qdev); 1548 qdev->port_link_state = LS_DOWN; 1549 /* Fall Through */ 1550 1551 case LS_DOWN: 1552 if (curr_link_state == LS_UP) { 1553 netif_info(qdev, link, qdev->ndev, "Link is up\n"); 1554 if (ql_is_auto_neg_complete(qdev)) 1555 ql_finish_auto_neg(qdev); 1556 1557 if (qdev->port_link_state == LS_UP) 1558 ql_link_down_detect_clear(qdev); 1559 1560 qdev->port_link_state = LS_UP; 1561 } 1562 break; 1563 1564 case LS_UP: 1565 /* 1566 * See if the link is currently down or went down and came 1567 * back up 1568 */ 1569 if (curr_link_state == LS_DOWN) { 1570 netif_info(qdev, link, qdev->ndev, "Link is down\n"); 1571 qdev->port_link_state = LS_DOWN; 1572 } 1573 if (ql_link_down_detect(qdev)) 1574 qdev->port_link_state = LS_DOWN; 1575 break; 1576 } 1577 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1578 1579 /* Restart timer on 2 second interval. */ 1580 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 1581 } 1582 1583 /* 1584 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1585 */ 1586 static void ql_get_phy_owner(struct ql3_adapter *qdev) 1587 { 1588 if (ql_this_adapter_controls_port(qdev)) 1589 set_bit(QL_LINK_MASTER, &qdev->flags); 1590 else 1591 clear_bit(QL_LINK_MASTER, &qdev->flags); 1592 } 1593 1594 /* 1595 * Caller must take hw_lock and QL_PHY_GIO_SEM. 1596 */ 1597 static void ql_init_scan_mode(struct ql3_adapter *qdev) 1598 { 1599 ql_mii_enable_scan_mode(qdev); 1600 1601 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1602 if (ql_this_adapter_controls_port(qdev)) 1603 ql_petbi_init_ex(qdev); 1604 } else { 1605 if (ql_this_adapter_controls_port(qdev)) 1606 ql_phy_init_ex(qdev); 1607 } 1608 } 1609 1610 /* 1611 * MII_Setup needs to be called before taking the PHY out of reset 1612 * so that the management interface clock speed can be set properly. 1613 * It would be better if we had a way to disable MDC until after the 1614 * PHY is out of reset, but we don't have that capability. 1615 */ 1616 static int ql_mii_setup(struct ql3_adapter *qdev) 1617 { 1618 u32 reg; 1619 struct ql3xxx_port_registers __iomem *port_regs = 1620 qdev->mem_map_registers; 1621 1622 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1623 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 1624 2) << 7)) 1625 return -1; 1626 1627 if (qdev->device_id == QL3032_DEVICE_ID) 1628 ql_write_page0_reg(qdev, 1629 &port_regs->macMIIMgmtControlReg, 0x0f00000); 1630 1631 /* Divide 125MHz clock by 28 to meet PHY timing requirements */ 1632 reg = MAC_MII_CONTROL_CLK_SEL_DIV28; 1633 1634 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, 1635 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16)); 1636 1637 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1638 return 0; 1639 } 1640 1641 #define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \ 1642 SUPPORTED_FIBRE | \ 1643 SUPPORTED_Autoneg) 1644 #define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \ 1645 SUPPORTED_10baseT_Full | \ 1646 SUPPORTED_100baseT_Half | \ 1647 SUPPORTED_100baseT_Full | \ 1648 SUPPORTED_1000baseT_Half | \ 1649 SUPPORTED_1000baseT_Full | \ 1650 SUPPORTED_Autoneg | \ 1651 SUPPORTED_TP) \ 1652 1653 static u32 ql_supported_modes(struct ql3_adapter *qdev) 1654 { 1655 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) 1656 return SUPPORTED_OPTICAL_MODES; 1657 1658 return SUPPORTED_TP_MODES; 1659 } 1660 1661 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) 1662 { 1663 int status; 1664 unsigned long hw_flags; 1665 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1666 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1667 (QL_RESOURCE_BITS_BASE_CODE | 1668 (qdev->mac_index) * 2) << 7)) { 1669 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1670 return 0; 1671 } 1672 status = ql_is_auto_cfg(qdev); 1673 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1674 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1675 return status; 1676 } 1677 1678 static u32 ql_get_speed(struct ql3_adapter *qdev) 1679 { 1680 u32 status; 1681 unsigned long hw_flags; 1682 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1683 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1684 (QL_RESOURCE_BITS_BASE_CODE | 1685 (qdev->mac_index) * 2) << 7)) { 1686 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1687 return 0; 1688 } 1689 status = ql_get_link_speed(qdev); 1690 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1691 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1692 return status; 1693 } 1694 1695 static int ql_get_full_dup(struct ql3_adapter *qdev) 1696 { 1697 int status; 1698 unsigned long hw_flags; 1699 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 1700 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 1701 (QL_RESOURCE_BITS_BASE_CODE | 1702 (qdev->mac_index) * 2) << 7)) { 1703 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1704 return 0; 1705 } 1706 status = ql_is_link_full_dup(qdev); 1707 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 1708 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 1709 return status; 1710 } 1711 1712 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) 1713 { 1714 struct ql3_adapter *qdev = netdev_priv(ndev); 1715 1716 ecmd->transceiver = XCVR_INTERNAL; 1717 ecmd->supported = ql_supported_modes(qdev); 1718 1719 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { 1720 ecmd->port = PORT_FIBRE; 1721 } else { 1722 ecmd->port = PORT_TP; 1723 ecmd->phy_address = qdev->PHYAddr; 1724 } 1725 ecmd->advertising = ql_supported_modes(qdev); 1726 ecmd->autoneg = ql_get_auto_cfg_status(qdev); 1727 ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev)); 1728 ecmd->duplex = ql_get_full_dup(qdev); 1729 return 0; 1730 } 1731 1732 static void ql_get_drvinfo(struct net_device *ndev, 1733 struct ethtool_drvinfo *drvinfo) 1734 { 1735 struct ql3_adapter *qdev = netdev_priv(ndev); 1736 strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver)); 1737 strlcpy(drvinfo->version, ql3xxx_driver_version, 1738 sizeof(drvinfo->version)); 1739 strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), 1740 sizeof(drvinfo->bus_info)); 1741 drvinfo->regdump_len = 0; 1742 drvinfo->eedump_len = 0; 1743 } 1744 1745 static u32 ql_get_msglevel(struct net_device *ndev) 1746 { 1747 struct ql3_adapter *qdev = netdev_priv(ndev); 1748 return qdev->msg_enable; 1749 } 1750 1751 static void ql_set_msglevel(struct net_device *ndev, u32 value) 1752 { 1753 struct ql3_adapter *qdev = netdev_priv(ndev); 1754 qdev->msg_enable = value; 1755 } 1756 1757 static void ql_get_pauseparam(struct net_device *ndev, 1758 struct ethtool_pauseparam *pause) 1759 { 1760 struct ql3_adapter *qdev = netdev_priv(ndev); 1761 struct ql3xxx_port_registers __iomem *port_regs = 1762 qdev->mem_map_registers; 1763 1764 u32 reg; 1765 if (qdev->mac_index == 0) 1766 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); 1767 else 1768 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); 1769 1770 pause->autoneg = ql_get_auto_cfg_status(qdev); 1771 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2; 1772 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1; 1773 } 1774 1775 static const struct ethtool_ops ql3xxx_ethtool_ops = { 1776 .get_settings = ql_get_settings, 1777 .get_drvinfo = ql_get_drvinfo, 1778 .get_link = ethtool_op_get_link, 1779 .get_msglevel = ql_get_msglevel, 1780 .set_msglevel = ql_set_msglevel, 1781 .get_pauseparam = ql_get_pauseparam, 1782 }; 1783 1784 static int ql_populate_free_queue(struct ql3_adapter *qdev) 1785 { 1786 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; 1787 dma_addr_t map; 1788 int err; 1789 1790 while (lrg_buf_cb) { 1791 if (!lrg_buf_cb->skb) { 1792 lrg_buf_cb->skb = 1793 netdev_alloc_skb(qdev->ndev, 1794 qdev->lrg_buffer_len); 1795 if (unlikely(!lrg_buf_cb->skb)) { 1796 netdev_printk(KERN_DEBUG, qdev->ndev, 1797 "Failed netdev_alloc_skb()\n"); 1798 break; 1799 } else { 1800 /* 1801 * We save some space to copy the ethhdr from 1802 * first buffer 1803 */ 1804 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE); 1805 map = pci_map_single(qdev->pdev, 1806 lrg_buf_cb->skb->data, 1807 qdev->lrg_buffer_len - 1808 QL_HEADER_SPACE, 1809 PCI_DMA_FROMDEVICE); 1810 1811 err = pci_dma_mapping_error(qdev->pdev, map); 1812 if (err) { 1813 netdev_err(qdev->ndev, 1814 "PCI mapping failed with error: %d\n", 1815 err); 1816 dev_kfree_skb(lrg_buf_cb->skb); 1817 lrg_buf_cb->skb = NULL; 1818 break; 1819 } 1820 1821 1822 lrg_buf_cb->buf_phy_addr_low = 1823 cpu_to_le32(LS_64BITS(map)); 1824 lrg_buf_cb->buf_phy_addr_high = 1825 cpu_to_le32(MS_64BITS(map)); 1826 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1827 dma_unmap_len_set(lrg_buf_cb, maplen, 1828 qdev->lrg_buffer_len - 1829 QL_HEADER_SPACE); 1830 --qdev->lrg_buf_skb_check; 1831 if (!qdev->lrg_buf_skb_check) 1832 return 1; 1833 } 1834 } 1835 lrg_buf_cb = lrg_buf_cb->next; 1836 } 1837 return 0; 1838 } 1839 1840 /* 1841 * Caller holds hw_lock. 1842 */ 1843 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) 1844 { 1845 struct ql3xxx_port_registers __iomem *port_regs = 1846 qdev->mem_map_registers; 1847 1848 if (qdev->small_buf_release_cnt >= 16) { 1849 while (qdev->small_buf_release_cnt >= 16) { 1850 qdev->small_buf_q_producer_index++; 1851 1852 if (qdev->small_buf_q_producer_index == 1853 NUM_SBUFQ_ENTRIES) 1854 qdev->small_buf_q_producer_index = 0; 1855 qdev->small_buf_release_cnt -= 8; 1856 } 1857 wmb(); 1858 writel(qdev->small_buf_q_producer_index, 1859 &port_regs->CommonRegs.rxSmallQProducerIndex); 1860 } 1861 } 1862 1863 /* 1864 * Caller holds hw_lock. 1865 */ 1866 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) 1867 { 1868 struct bufq_addr_element *lrg_buf_q_ele; 1869 int i; 1870 struct ql_rcv_buf_cb *lrg_buf_cb; 1871 struct ql3xxx_port_registers __iomem *port_regs = 1872 qdev->mem_map_registers; 1873 1874 if ((qdev->lrg_buf_free_count >= 8) && 1875 (qdev->lrg_buf_release_cnt >= 16)) { 1876 1877 if (qdev->lrg_buf_skb_check) 1878 if (!ql_populate_free_queue(qdev)) 1879 return; 1880 1881 lrg_buf_q_ele = qdev->lrg_buf_next_free; 1882 1883 while ((qdev->lrg_buf_release_cnt >= 16) && 1884 (qdev->lrg_buf_free_count >= 8)) { 1885 1886 for (i = 0; i < 8; i++) { 1887 lrg_buf_cb = 1888 ql_get_from_lrg_buf_free_list(qdev); 1889 lrg_buf_q_ele->addr_high = 1890 lrg_buf_cb->buf_phy_addr_high; 1891 lrg_buf_q_ele->addr_low = 1892 lrg_buf_cb->buf_phy_addr_low; 1893 lrg_buf_q_ele++; 1894 1895 qdev->lrg_buf_release_cnt--; 1896 } 1897 1898 qdev->lrg_buf_q_producer_index++; 1899 1900 if (qdev->lrg_buf_q_producer_index == 1901 qdev->num_lbufq_entries) 1902 qdev->lrg_buf_q_producer_index = 0; 1903 1904 if (qdev->lrg_buf_q_producer_index == 1905 (qdev->num_lbufq_entries - 1)) { 1906 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; 1907 } 1908 } 1909 wmb(); 1910 qdev->lrg_buf_next_free = lrg_buf_q_ele; 1911 writel(qdev->lrg_buf_q_producer_index, 1912 &port_regs->CommonRegs.rxLargeQProducerIndex); 1913 } 1914 } 1915 1916 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, 1917 struct ob_mac_iocb_rsp *mac_rsp) 1918 { 1919 struct ql_tx_buf_cb *tx_cb; 1920 int i; 1921 1922 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1923 netdev_warn(qdev->ndev, 1924 "Frame too short but it was padded and sent\n"); 1925 } 1926 1927 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; 1928 1929 /* Check the transmit response flags for any errors */ 1930 if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) { 1931 netdev_err(qdev->ndev, 1932 "Frame too short to be legal, frame not sent\n"); 1933 1934 qdev->ndev->stats.tx_errors++; 1935 goto frame_not_sent; 1936 } 1937 1938 if (tx_cb->seg_count == 0) { 1939 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", 1940 mac_rsp->transaction_id); 1941 1942 qdev->ndev->stats.tx_errors++; 1943 goto invalid_seg_count; 1944 } 1945 1946 pci_unmap_single(qdev->pdev, 1947 dma_unmap_addr(&tx_cb->map[0], mapaddr), 1948 dma_unmap_len(&tx_cb->map[0], maplen), 1949 PCI_DMA_TODEVICE); 1950 tx_cb->seg_count--; 1951 if (tx_cb->seg_count) { 1952 for (i = 1; i < tx_cb->seg_count; i++) { 1953 pci_unmap_page(qdev->pdev, 1954 dma_unmap_addr(&tx_cb->map[i], 1955 mapaddr), 1956 dma_unmap_len(&tx_cb->map[i], maplen), 1957 PCI_DMA_TODEVICE); 1958 } 1959 } 1960 qdev->ndev->stats.tx_packets++; 1961 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; 1962 1963 frame_not_sent: 1964 dev_kfree_skb_irq(tx_cb->skb); 1965 tx_cb->skb = NULL; 1966 1967 invalid_seg_count: 1968 atomic_inc(&qdev->tx_count); 1969 } 1970 1971 static void ql_get_sbuf(struct ql3_adapter *qdev) 1972 { 1973 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) 1974 qdev->small_buf_index = 0; 1975 qdev->small_buf_release_cnt++; 1976 } 1977 1978 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) 1979 { 1980 struct ql_rcv_buf_cb *lrg_buf_cb = NULL; 1981 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; 1982 qdev->lrg_buf_release_cnt++; 1983 if (++qdev->lrg_buf_index == qdev->num_large_buffers) 1984 qdev->lrg_buf_index = 0; 1985 return lrg_buf_cb; 1986 } 1987 1988 /* 1989 * The difference between 3022 and 3032 for inbound completions: 1990 * 3022 uses two buffers per completion. The first buffer contains 1991 * (some) header info, the second the remainder of the headers plus 1992 * the data. For this chip we reserve some space at the top of the 1993 * receive buffer so that the header info in buffer one can be 1994 * prepended to the buffer two. Buffer two is the sent up while 1995 * buffer one is returned to the hardware to be reused. 1996 * 3032 receives all of it's data and headers in one buffer for a 1997 * simpler process. 3032 also supports checksum verification as 1998 * can be seen in ql_process_macip_rx_intr(). 1999 */ 2000 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, 2001 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr) 2002 { 2003 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2004 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2005 struct sk_buff *skb; 2006 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length); 2007 2008 /* 2009 * Get the inbound address list (small buffer). 2010 */ 2011 ql_get_sbuf(qdev); 2012 2013 if (qdev->device_id == QL3022_DEVICE_ID) 2014 lrg_buf_cb1 = ql_get_lbuf(qdev); 2015 2016 /* start of second buffer */ 2017 lrg_buf_cb2 = ql_get_lbuf(qdev); 2018 skb = lrg_buf_cb2->skb; 2019 2020 qdev->ndev->stats.rx_packets++; 2021 qdev->ndev->stats.rx_bytes += length; 2022 2023 skb_put(skb, length); 2024 pci_unmap_single(qdev->pdev, 2025 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2026 dma_unmap_len(lrg_buf_cb2, maplen), 2027 PCI_DMA_FROMDEVICE); 2028 prefetch(skb->data); 2029 skb_checksum_none_assert(skb); 2030 skb->protocol = eth_type_trans(skb, qdev->ndev); 2031 2032 netif_receive_skb(skb); 2033 lrg_buf_cb2->skb = NULL; 2034 2035 if (qdev->device_id == QL3022_DEVICE_ID) 2036 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2037 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2038 } 2039 2040 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, 2041 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr) 2042 { 2043 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL; 2044 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL; 2045 struct sk_buff *skb1 = NULL, *skb2; 2046 struct net_device *ndev = qdev->ndev; 2047 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length); 2048 u16 size = 0; 2049 2050 /* 2051 * Get the inbound address list (small buffer). 2052 */ 2053 2054 ql_get_sbuf(qdev); 2055 2056 if (qdev->device_id == QL3022_DEVICE_ID) { 2057 /* start of first buffer on 3022 */ 2058 lrg_buf_cb1 = ql_get_lbuf(qdev); 2059 skb1 = lrg_buf_cb1->skb; 2060 size = ETH_HLEN; 2061 if (*((u16 *) skb1->data) != 0xFFFF) 2062 size += VLAN_ETH_HLEN - ETH_HLEN; 2063 } 2064 2065 /* start of second buffer */ 2066 lrg_buf_cb2 = ql_get_lbuf(qdev); 2067 skb2 = lrg_buf_cb2->skb; 2068 2069 skb_put(skb2, length); /* Just the second buffer length here. */ 2070 pci_unmap_single(qdev->pdev, 2071 dma_unmap_addr(lrg_buf_cb2, mapaddr), 2072 dma_unmap_len(lrg_buf_cb2, maplen), 2073 PCI_DMA_FROMDEVICE); 2074 prefetch(skb2->data); 2075 2076 skb_checksum_none_assert(skb2); 2077 if (qdev->device_id == QL3022_DEVICE_ID) { 2078 /* 2079 * Copy the ethhdr from first buffer to second. This 2080 * is necessary for 3022 IP completions. 2081 */ 2082 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN, 2083 skb_push(skb2, size), size); 2084 } else { 2085 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum); 2086 if (checksum & 2087 (IB_IP_IOCB_RSP_3032_ICE | 2088 IB_IP_IOCB_RSP_3032_CE)) { 2089 netdev_err(ndev, 2090 "%s: Bad checksum for this %s packet, checksum = %x\n", 2091 __func__, 2092 ((checksum & IB_IP_IOCB_RSP_3032_TCP) ? 2093 "TCP" : "UDP"), checksum); 2094 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) || 2095 (checksum & IB_IP_IOCB_RSP_3032_UDP && 2096 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) { 2097 skb2->ip_summed = CHECKSUM_UNNECESSARY; 2098 } 2099 } 2100 skb2->protocol = eth_type_trans(skb2, qdev->ndev); 2101 2102 netif_receive_skb(skb2); 2103 ndev->stats.rx_packets++; 2104 ndev->stats.rx_bytes += length; 2105 lrg_buf_cb2->skb = NULL; 2106 2107 if (qdev->device_id == QL3022_DEVICE_ID) 2108 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); 2109 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); 2110 } 2111 2112 static int ql_tx_rx_clean(struct ql3_adapter *qdev, 2113 int *tx_cleaned, int *rx_cleaned, int work_to_do) 2114 { 2115 struct net_rsp_iocb *net_rsp; 2116 struct net_device *ndev = qdev->ndev; 2117 int work_done = 0; 2118 2119 /* While there are entries in the completion queue. */ 2120 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != 2121 qdev->rsp_consumer_index) && (work_done < work_to_do)) { 2122 2123 net_rsp = qdev->rsp_current; 2124 rmb(); 2125 /* 2126 * Fix 4032 chip's undocumented "feature" where bit-8 is set 2127 * if the inbound completion is for a VLAN. 2128 */ 2129 if (qdev->device_id == QL3032_DEVICE_ID) 2130 net_rsp->opcode &= 0x7f; 2131 switch (net_rsp->opcode) { 2132 2133 case OPCODE_OB_MAC_IOCB_FN0: 2134 case OPCODE_OB_MAC_IOCB_FN2: 2135 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) 2136 net_rsp); 2137 (*tx_cleaned)++; 2138 break; 2139 2140 case OPCODE_IB_MAC_IOCB: 2141 case OPCODE_IB_3032_MAC_IOCB: 2142 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) 2143 net_rsp); 2144 (*rx_cleaned)++; 2145 break; 2146 2147 case OPCODE_IB_IP_IOCB: 2148 case OPCODE_IB_3032_IP_IOCB: 2149 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) 2150 net_rsp); 2151 (*rx_cleaned)++; 2152 break; 2153 default: { 2154 u32 *tmp = (u32 *)net_rsp; 2155 netdev_err(ndev, 2156 "Hit default case, not handled!\n" 2157 " dropping the packet, opcode = %x\n" 2158 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", 2159 net_rsp->opcode, 2160 (unsigned long int)tmp[0], 2161 (unsigned long int)tmp[1], 2162 (unsigned long int)tmp[2], 2163 (unsigned long int)tmp[3]); 2164 } 2165 } 2166 2167 qdev->rsp_consumer_index++; 2168 2169 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { 2170 qdev->rsp_consumer_index = 0; 2171 qdev->rsp_current = qdev->rsp_q_virt_addr; 2172 } else { 2173 qdev->rsp_current++; 2174 } 2175 2176 work_done = *tx_cleaned + *rx_cleaned; 2177 } 2178 2179 return work_done; 2180 } 2181 2182 static int ql_poll(struct napi_struct *napi, int budget) 2183 { 2184 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); 2185 int rx_cleaned = 0, tx_cleaned = 0; 2186 unsigned long hw_flags; 2187 struct ql3xxx_port_registers __iomem *port_regs = 2188 qdev->mem_map_registers; 2189 2190 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, budget); 2191 2192 if (tx_cleaned + rx_cleaned != budget) { 2193 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 2194 __napi_complete(napi); 2195 ql_update_small_bufq_prod_index(qdev); 2196 ql_update_lrg_bufq_prod_index(qdev); 2197 writel(qdev->rsp_consumer_index, 2198 &port_regs->CommonRegs.rspQConsumerIndex); 2199 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 2200 2201 ql_enable_interrupts(qdev); 2202 } 2203 return tx_cleaned + rx_cleaned; 2204 } 2205 2206 static irqreturn_t ql3xxx_isr(int irq, void *dev_id) 2207 { 2208 2209 struct net_device *ndev = dev_id; 2210 struct ql3_adapter *qdev = netdev_priv(ndev); 2211 struct ql3xxx_port_registers __iomem *port_regs = 2212 qdev->mem_map_registers; 2213 u32 value; 2214 int handled = 1; 2215 u32 var; 2216 2217 value = ql_read_common_reg_l(qdev, 2218 &port_regs->CommonRegs.ispControlStatus); 2219 2220 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) { 2221 spin_lock(&qdev->adapter_lock); 2222 netif_stop_queue(qdev->ndev); 2223 netif_carrier_off(qdev->ndev); 2224 ql_disable_interrupts(qdev); 2225 qdev->port_link_state = LS_DOWN; 2226 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; 2227 2228 if (value & ISP_CONTROL_FE) { 2229 /* 2230 * Chip Fatal Error. 2231 */ 2232 var = 2233 ql_read_page0_reg_l(qdev, 2234 &port_regs->PortFatalErrStatus); 2235 netdev_warn(ndev, 2236 "Resetting chip. PortFatalErrStatus register = 0x%x\n", 2237 var); 2238 set_bit(QL_RESET_START, &qdev->flags) ; 2239 } else { 2240 /* 2241 * Soft Reset Requested. 2242 */ 2243 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; 2244 netdev_err(ndev, 2245 "Another function issued a reset to the chip. ISR value = %x\n", 2246 value); 2247 } 2248 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); 2249 spin_unlock(&qdev->adapter_lock); 2250 } else if (value & ISP_IMR_DISABLE_CMPL_INT) { 2251 ql_disable_interrupts(qdev); 2252 if (likely(napi_schedule_prep(&qdev->napi))) 2253 __napi_schedule(&qdev->napi); 2254 } else 2255 return IRQ_NONE; 2256 2257 return IRQ_RETVAL(handled); 2258 } 2259 2260 /* 2261 * Get the total number of segments needed for the given number of fragments. 2262 * This is necessary because outbound address lists (OAL) will be used when 2263 * more than two frags are given. Each address list has 5 addr/len pairs. 2264 * The 5th pair in each OAL is used to point to the next OAL if more frags 2265 * are coming. That is why the frags:segment count ratio is not linear. 2266 */ 2267 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) 2268 { 2269 if (qdev->device_id == QL3022_DEVICE_ID) 2270 return 1; 2271 2272 if (frags <= 2) 2273 return frags + 1; 2274 else if (frags <= 6) 2275 return frags + 2; 2276 else if (frags <= 10) 2277 return frags + 3; 2278 else if (frags <= 14) 2279 return frags + 4; 2280 else if (frags <= 18) 2281 return frags + 5; 2282 return -1; 2283 } 2284 2285 static void ql_hw_csum_setup(const struct sk_buff *skb, 2286 struct ob_mac_iocb_req *mac_iocb_ptr) 2287 { 2288 const struct iphdr *ip = ip_hdr(skb); 2289 2290 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb); 2291 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2292 2293 if (ip->protocol == IPPROTO_TCP) { 2294 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC | 2295 OB_3032MAC_IOCB_REQ_IC; 2296 } else { 2297 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC | 2298 OB_3032MAC_IOCB_REQ_IC; 2299 } 2300 2301 } 2302 2303 /* 2304 * Map the buffers for this transmit. 2305 * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 2306 */ 2307 static int ql_send_map(struct ql3_adapter *qdev, 2308 struct ob_mac_iocb_req *mac_iocb_ptr, 2309 struct ql_tx_buf_cb *tx_cb, 2310 struct sk_buff *skb) 2311 { 2312 struct oal *oal; 2313 struct oal_entry *oal_entry; 2314 int len = skb_headlen(skb); 2315 dma_addr_t map; 2316 int err; 2317 int completed_segs, i; 2318 int seg_cnt, seg = 0; 2319 int frag_cnt = (int)skb_shinfo(skb)->nr_frags; 2320 2321 seg_cnt = tx_cb->seg_count; 2322 /* 2323 * Map the skb buffer first. 2324 */ 2325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2326 2327 err = pci_dma_mapping_error(qdev->pdev, map); 2328 if (err) { 2329 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", 2330 err); 2331 2332 return NETDEV_TX_BUSY; 2333 } 2334 2335 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2336 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2337 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2338 oal_entry->len = cpu_to_le32(len); 2339 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2340 dma_unmap_len_set(&tx_cb->map[seg], maplen, len); 2341 seg++; 2342 2343 if (seg_cnt == 1) { 2344 /* Terminate the last segment. */ 2345 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2346 return NETDEV_TX_OK; 2347 } 2348 oal = tx_cb->oal; 2349 for (completed_segs = 0; 2350 completed_segs < frag_cnt; 2351 completed_segs++, seg++) { 2352 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs]; 2353 oal_entry++; 2354 /* 2355 * Check for continuation requirements. 2356 * It's strange but necessary. 2357 * Continuation entry points to outbound address list. 2358 */ 2359 if ((seg == 2 && seg_cnt > 3) || 2360 (seg == 7 && seg_cnt > 8) || 2361 (seg == 12 && seg_cnt > 13) || 2362 (seg == 17 && seg_cnt > 18)) { 2363 map = pci_map_single(qdev->pdev, oal, 2364 sizeof(struct oal), 2365 PCI_DMA_TODEVICE); 2366 2367 err = pci_dma_mapping_error(qdev->pdev, map); 2368 if (err) { 2369 netdev_err(qdev->ndev, 2370 "PCI mapping outbound address list with error: %d\n", 2371 err); 2372 goto map_error; 2373 } 2374 2375 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2376 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2377 oal_entry->len = cpu_to_le32(sizeof(struct oal) | 2378 OAL_CONT_ENTRY); 2379 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2380 dma_unmap_len_set(&tx_cb->map[seg], maplen, 2381 sizeof(struct oal)); 2382 oal_entry = (struct oal_entry *)oal; 2383 oal++; 2384 seg++; 2385 } 2386 2387 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2388 DMA_TO_DEVICE); 2389 2390 err = dma_mapping_error(&qdev->pdev->dev, map); 2391 if (err) { 2392 netdev_err(qdev->ndev, 2393 "PCI mapping frags failed with error: %d\n", 2394 err); 2395 goto map_error; 2396 } 2397 2398 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2399 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2400 oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2401 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2402 dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2403 } 2404 /* Terminate the last segment. */ 2405 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY); 2406 return NETDEV_TX_OK; 2407 2408 map_error: 2409 /* A PCI mapping failed and now we will need to back out 2410 * We need to traverse through the oal's and associated pages which 2411 * have been mapped and now we must unmap them to clean up properly 2412 */ 2413 2414 seg = 1; 2415 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low; 2416 oal = tx_cb->oal; 2417 for (i = 0; i < completed_segs; i++, seg++) { 2418 oal_entry++; 2419 2420 /* 2421 * Check for continuation requirements. 2422 * It's strange but necessary. 2423 */ 2424 2425 if ((seg == 2 && seg_cnt > 3) || 2426 (seg == 7 && seg_cnt > 8) || 2427 (seg == 12 && seg_cnt > 13) || 2428 (seg == 17 && seg_cnt > 18)) { 2429 pci_unmap_single(qdev->pdev, 2430 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2431 dma_unmap_len(&tx_cb->map[seg], maplen), 2432 PCI_DMA_TODEVICE); 2433 oal++; 2434 seg++; 2435 } 2436 2437 pci_unmap_page(qdev->pdev, 2438 dma_unmap_addr(&tx_cb->map[seg], mapaddr), 2439 dma_unmap_len(&tx_cb->map[seg], maplen), 2440 PCI_DMA_TODEVICE); 2441 } 2442 2443 pci_unmap_single(qdev->pdev, 2444 dma_unmap_addr(&tx_cb->map[0], mapaddr), 2445 dma_unmap_addr(&tx_cb->map[0], maplen), 2446 PCI_DMA_TODEVICE); 2447 2448 return NETDEV_TX_BUSY; 2449 2450 } 2451 2452 /* 2453 * The difference between 3022 and 3032 sends: 2454 * 3022 only supports a simple single segment transmission. 2455 * 3032 supports checksumming and scatter/gather lists (fragments). 2456 * The 3032 supports sglists by using the 3 addr/len pairs (ALP) 2457 * in the IOCB plus a chain of outbound address lists (OAL) that 2458 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th) 2459 * will be used to point to an OAL when more ALP entries are required. 2460 * The IOCB is always the top of the chain followed by one or more 2461 * OALs (when necessary). 2462 */ 2463 static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2464 struct net_device *ndev) 2465 { 2466 struct ql3_adapter *qdev = netdev_priv(ndev); 2467 struct ql3xxx_port_registers __iomem *port_regs = 2468 qdev->mem_map_registers; 2469 struct ql_tx_buf_cb *tx_cb; 2470 u32 tot_len = skb->len; 2471 struct ob_mac_iocb_req *mac_iocb_ptr; 2472 2473 if (unlikely(atomic_read(&qdev->tx_count) < 2)) 2474 return NETDEV_TX_BUSY; 2475 2476 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; 2477 tx_cb->seg_count = ql_get_seg_count(qdev, 2478 skb_shinfo(skb)->nr_frags); 2479 if (tx_cb->seg_count == -1) { 2480 netdev_err(ndev, "%s: invalid segment count!\n", __func__); 2481 return NETDEV_TX_OK; 2482 } 2483 2484 mac_iocb_ptr = tx_cb->queue_entry; 2485 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req)); 2486 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; 2487 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X; 2488 mac_iocb_ptr->flags |= qdev->mb_bit_mask; 2489 mac_iocb_ptr->transaction_id = qdev->req_producer_index; 2490 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len); 2491 tx_cb->skb = skb; 2492 if (qdev->device_id == QL3032_DEVICE_ID && 2493 skb->ip_summed == CHECKSUM_PARTIAL) 2494 ql_hw_csum_setup(skb, mac_iocb_ptr); 2495 2496 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { 2497 netdev_err(ndev, "%s: Could not map the segments!\n", __func__); 2498 return NETDEV_TX_BUSY; 2499 } 2500 2501 wmb(); 2502 qdev->req_producer_index++; 2503 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) 2504 qdev->req_producer_index = 0; 2505 wmb(); 2506 ql_write_common_reg_l(qdev, 2507 &port_regs->CommonRegs.reqQProducerIndex, 2508 qdev->req_producer_index); 2509 2510 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, 2511 "tx queued, slot %d, len %d\n", 2512 qdev->req_producer_index, skb->len); 2513 2514 atomic_dec(&qdev->tx_count); 2515 return NETDEV_TX_OK; 2516 } 2517 2518 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) 2519 { 2520 qdev->req_q_size = 2521 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2522 2523 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); 2524 2525 /* The barrier is required to ensure request and response queue 2526 * addr writes to the registers. 2527 */ 2528 wmb(); 2529 2530 qdev->req_q_virt_addr = 2531 pci_alloc_consistent(qdev->pdev, 2532 (size_t) qdev->req_q_size, 2533 &qdev->req_q_phy_addr); 2534 2535 if ((qdev->req_q_virt_addr == NULL) || 2536 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { 2537 netdev_err(qdev->ndev, "reqQ failed\n"); 2538 return -ENOMEM; 2539 } 2540 2541 qdev->rsp_q_virt_addr = 2542 pci_alloc_consistent(qdev->pdev, 2543 (size_t) qdev->rsp_q_size, 2544 &qdev->rsp_q_phy_addr); 2545 2546 if ((qdev->rsp_q_virt_addr == NULL) || 2547 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { 2548 netdev_err(qdev->ndev, "rspQ allocation failed\n"); 2549 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size, 2550 qdev->req_q_virt_addr, 2551 qdev->req_q_phy_addr); 2552 return -ENOMEM; 2553 } 2554 2555 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2556 2557 return 0; 2558 } 2559 2560 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) 2561 { 2562 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { 2563 netdev_info(qdev->ndev, "Already done\n"); 2564 return; 2565 } 2566 2567 pci_free_consistent(qdev->pdev, 2568 qdev->req_q_size, 2569 qdev->req_q_virt_addr, qdev->req_q_phy_addr); 2570 2571 qdev->req_q_virt_addr = NULL; 2572 2573 pci_free_consistent(qdev->pdev, 2574 qdev->rsp_q_size, 2575 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); 2576 2577 qdev->rsp_q_virt_addr = NULL; 2578 2579 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); 2580 } 2581 2582 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) 2583 { 2584 /* Create Large Buffer Queue */ 2585 qdev->lrg_buf_q_size = 2586 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); 2587 if (qdev->lrg_buf_q_size < PAGE_SIZE) 2588 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; 2589 else 2590 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; 2591 2592 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, 2593 sizeof(struct ql_rcv_buf_cb), 2594 GFP_KERNEL); 2595 if (qdev->lrg_buf == NULL) 2596 return -ENOMEM; 2597 2598 qdev->lrg_buf_q_alloc_virt_addr = 2599 pci_alloc_consistent(qdev->pdev, 2600 qdev->lrg_buf_q_alloc_size, 2601 &qdev->lrg_buf_q_alloc_phy_addr); 2602 2603 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { 2604 netdev_err(qdev->ndev, "lBufQ failed\n"); 2605 return -ENOMEM; 2606 } 2607 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; 2608 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; 2609 2610 /* Create Small Buffer Queue */ 2611 qdev->small_buf_q_size = 2612 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry); 2613 if (qdev->small_buf_q_size < PAGE_SIZE) 2614 qdev->small_buf_q_alloc_size = PAGE_SIZE; 2615 else 2616 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; 2617 2618 qdev->small_buf_q_alloc_virt_addr = 2619 pci_alloc_consistent(qdev->pdev, 2620 qdev->small_buf_q_alloc_size, 2621 &qdev->small_buf_q_alloc_phy_addr); 2622 2623 if (qdev->small_buf_q_alloc_virt_addr == NULL) { 2624 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); 2625 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size, 2626 qdev->lrg_buf_q_alloc_virt_addr, 2627 qdev->lrg_buf_q_alloc_phy_addr); 2628 return -ENOMEM; 2629 } 2630 2631 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; 2632 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; 2633 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2634 return 0; 2635 } 2636 2637 static void ql_free_buffer_queues(struct ql3_adapter *qdev) 2638 { 2639 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { 2640 netdev_info(qdev->ndev, "Already done\n"); 2641 return; 2642 } 2643 kfree(qdev->lrg_buf); 2644 pci_free_consistent(qdev->pdev, 2645 qdev->lrg_buf_q_alloc_size, 2646 qdev->lrg_buf_q_alloc_virt_addr, 2647 qdev->lrg_buf_q_alloc_phy_addr); 2648 2649 qdev->lrg_buf_q_virt_addr = NULL; 2650 2651 pci_free_consistent(qdev->pdev, 2652 qdev->small_buf_q_alloc_size, 2653 qdev->small_buf_q_alloc_virt_addr, 2654 qdev->small_buf_q_alloc_phy_addr); 2655 2656 qdev->small_buf_q_virt_addr = NULL; 2657 2658 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); 2659 } 2660 2661 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) 2662 { 2663 int i; 2664 struct bufq_addr_element *small_buf_q_entry; 2665 2666 /* Currently we allocate on one of memory and use it for smallbuffers */ 2667 qdev->small_buf_total_size = 2668 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES * 2669 QL_SMALL_BUFFER_SIZE); 2670 2671 qdev->small_buf_virt_addr = 2672 pci_alloc_consistent(qdev->pdev, 2673 qdev->small_buf_total_size, 2674 &qdev->small_buf_phy_addr); 2675 2676 if (qdev->small_buf_virt_addr == NULL) { 2677 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); 2678 return -ENOMEM; 2679 } 2680 2681 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); 2682 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); 2683 2684 small_buf_q_entry = qdev->small_buf_q_virt_addr; 2685 2686 /* Initialize the small buffer queue. */ 2687 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) { 2688 small_buf_q_entry->addr_high = 2689 cpu_to_le32(qdev->small_buf_phy_addr_high); 2690 small_buf_q_entry->addr_low = 2691 cpu_to_le32(qdev->small_buf_phy_addr_low + 2692 (i * QL_SMALL_BUFFER_SIZE)); 2693 small_buf_q_entry++; 2694 } 2695 qdev->small_buf_index = 0; 2696 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); 2697 return 0; 2698 } 2699 2700 static void ql_free_small_buffers(struct ql3_adapter *qdev) 2701 { 2702 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { 2703 netdev_info(qdev->ndev, "Already done\n"); 2704 return; 2705 } 2706 if (qdev->small_buf_virt_addr != NULL) { 2707 pci_free_consistent(qdev->pdev, 2708 qdev->small_buf_total_size, 2709 qdev->small_buf_virt_addr, 2710 qdev->small_buf_phy_addr); 2711 2712 qdev->small_buf_virt_addr = NULL; 2713 } 2714 } 2715 2716 static void ql_free_large_buffers(struct ql3_adapter *qdev) 2717 { 2718 int i = 0; 2719 struct ql_rcv_buf_cb *lrg_buf_cb; 2720 2721 for (i = 0; i < qdev->num_large_buffers; i++) { 2722 lrg_buf_cb = &qdev->lrg_buf[i]; 2723 if (lrg_buf_cb->skb) { 2724 dev_kfree_skb(lrg_buf_cb->skb); 2725 pci_unmap_single(qdev->pdev, 2726 dma_unmap_addr(lrg_buf_cb, mapaddr), 2727 dma_unmap_len(lrg_buf_cb, maplen), 2728 PCI_DMA_FROMDEVICE); 2729 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2730 } else { 2731 break; 2732 } 2733 } 2734 } 2735 2736 static void ql_init_large_buffers(struct ql3_adapter *qdev) 2737 { 2738 int i; 2739 struct ql_rcv_buf_cb *lrg_buf_cb; 2740 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; 2741 2742 for (i = 0; i < qdev->num_large_buffers; i++) { 2743 lrg_buf_cb = &qdev->lrg_buf[i]; 2744 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high; 2745 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low; 2746 buf_addr_ele++; 2747 } 2748 qdev->lrg_buf_index = 0; 2749 qdev->lrg_buf_skb_check = 0; 2750 } 2751 2752 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) 2753 { 2754 int i; 2755 struct ql_rcv_buf_cb *lrg_buf_cb; 2756 struct sk_buff *skb; 2757 dma_addr_t map; 2758 int err; 2759 2760 for (i = 0; i < qdev->num_large_buffers; i++) { 2761 skb = netdev_alloc_skb(qdev->ndev, 2762 qdev->lrg_buffer_len); 2763 if (unlikely(!skb)) { 2764 /* Better luck next round */ 2765 netdev_err(qdev->ndev, 2766 "large buff alloc failed for %d bytes at index %d\n", 2767 qdev->lrg_buffer_len * 2, i); 2768 ql_free_large_buffers(qdev); 2769 return -ENOMEM; 2770 } else { 2771 2772 lrg_buf_cb = &qdev->lrg_buf[i]; 2773 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2774 lrg_buf_cb->index = i; 2775 lrg_buf_cb->skb = skb; 2776 /* 2777 * We save some space to copy the ethhdr from first 2778 * buffer 2779 */ 2780 skb_reserve(skb, QL_HEADER_SPACE); 2781 map = pci_map_single(qdev->pdev, 2782 skb->data, 2783 qdev->lrg_buffer_len - 2784 QL_HEADER_SPACE, 2785 PCI_DMA_FROMDEVICE); 2786 2787 err = pci_dma_mapping_error(qdev->pdev, map); 2788 if (err) { 2789 netdev_err(qdev->ndev, 2790 "PCI mapping failed with error: %d\n", 2791 err); 2792 ql_free_large_buffers(qdev); 2793 return -ENOMEM; 2794 } 2795 2796 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2797 dma_unmap_len_set(lrg_buf_cb, maplen, 2798 qdev->lrg_buffer_len - 2799 QL_HEADER_SPACE); 2800 lrg_buf_cb->buf_phy_addr_low = 2801 cpu_to_le32(LS_64BITS(map)); 2802 lrg_buf_cb->buf_phy_addr_high = 2803 cpu_to_le32(MS_64BITS(map)); 2804 } 2805 } 2806 return 0; 2807 } 2808 2809 static void ql_free_send_free_list(struct ql3_adapter *qdev) 2810 { 2811 struct ql_tx_buf_cb *tx_cb; 2812 int i; 2813 2814 tx_cb = &qdev->tx_buf[0]; 2815 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2816 kfree(tx_cb->oal); 2817 tx_cb->oal = NULL; 2818 tx_cb++; 2819 } 2820 } 2821 2822 static int ql_create_send_free_list(struct ql3_adapter *qdev) 2823 { 2824 struct ql_tx_buf_cb *tx_cb; 2825 int i; 2826 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; 2827 2828 /* Create free list of transmit buffers */ 2829 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 2830 2831 tx_cb = &qdev->tx_buf[i]; 2832 tx_cb->skb = NULL; 2833 tx_cb->queue_entry = req_q_curr; 2834 req_q_curr++; 2835 tx_cb->oal = kmalloc(512, GFP_KERNEL); 2836 if (tx_cb->oal == NULL) 2837 return -ENOMEM; 2838 } 2839 return 0; 2840 } 2841 2842 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) 2843 { 2844 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { 2845 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; 2846 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; 2847 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { 2848 /* 2849 * Bigger buffers, so less of them. 2850 */ 2851 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; 2852 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; 2853 } else { 2854 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", 2855 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); 2856 return -ENOMEM; 2857 } 2858 qdev->num_large_buffers = 2859 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; 2860 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; 2861 qdev->max_frame_size = 2862 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; 2863 2864 /* 2865 * First allocate a page of shared memory and use it for shadow 2866 * locations of Network Request Queue Consumer Address Register and 2867 * Network Completion Queue Producer Index Register 2868 */ 2869 qdev->shadow_reg_virt_addr = 2870 pci_alloc_consistent(qdev->pdev, 2871 PAGE_SIZE, &qdev->shadow_reg_phy_addr); 2872 2873 if (qdev->shadow_reg_virt_addr != NULL) { 2874 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; 2875 qdev->req_consumer_index_phy_addr_high = 2876 MS_64BITS(qdev->shadow_reg_phy_addr); 2877 qdev->req_consumer_index_phy_addr_low = 2878 LS_64BITS(qdev->shadow_reg_phy_addr); 2879 2880 qdev->prsp_producer_index = 2881 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); 2882 qdev->rsp_producer_index_phy_addr_high = 2883 qdev->req_consumer_index_phy_addr_high; 2884 qdev->rsp_producer_index_phy_addr_low = 2885 qdev->req_consumer_index_phy_addr_low + 8; 2886 } else { 2887 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); 2888 return -ENOMEM; 2889 } 2890 2891 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { 2892 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); 2893 goto err_req_rsp; 2894 } 2895 2896 if (ql_alloc_buffer_queues(qdev) != 0) { 2897 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); 2898 goto err_buffer_queues; 2899 } 2900 2901 if (ql_alloc_small_buffers(qdev) != 0) { 2902 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); 2903 goto err_small_buffers; 2904 } 2905 2906 if (ql_alloc_large_buffers(qdev) != 0) { 2907 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); 2908 goto err_small_buffers; 2909 } 2910 2911 /* Initialize the large buffer queue. */ 2912 ql_init_large_buffers(qdev); 2913 if (ql_create_send_free_list(qdev)) 2914 goto err_free_list; 2915 2916 qdev->rsp_current = qdev->rsp_q_virt_addr; 2917 2918 return 0; 2919 err_free_list: 2920 ql_free_send_free_list(qdev); 2921 err_small_buffers: 2922 ql_free_buffer_queues(qdev); 2923 err_buffer_queues: 2924 ql_free_net_req_rsp_queues(qdev); 2925 err_req_rsp: 2926 pci_free_consistent(qdev->pdev, 2927 PAGE_SIZE, 2928 qdev->shadow_reg_virt_addr, 2929 qdev->shadow_reg_phy_addr); 2930 2931 return -ENOMEM; 2932 } 2933 2934 static void ql_free_mem_resources(struct ql3_adapter *qdev) 2935 { 2936 ql_free_send_free_list(qdev); 2937 ql_free_large_buffers(qdev); 2938 ql_free_small_buffers(qdev); 2939 ql_free_buffer_queues(qdev); 2940 ql_free_net_req_rsp_queues(qdev); 2941 if (qdev->shadow_reg_virt_addr != NULL) { 2942 pci_free_consistent(qdev->pdev, 2943 PAGE_SIZE, 2944 qdev->shadow_reg_virt_addr, 2945 qdev->shadow_reg_phy_addr); 2946 qdev->shadow_reg_virt_addr = NULL; 2947 } 2948 } 2949 2950 static int ql_init_misc_registers(struct ql3_adapter *qdev) 2951 { 2952 struct ql3xxx_local_ram_registers __iomem *local_ram = 2953 (void __iomem *)qdev->mem_map_registers; 2954 2955 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, 2956 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 2957 2) << 4)) 2958 return -1; 2959 2960 ql_write_page2_reg(qdev, 2961 &local_ram->bufletSize, qdev->nvram_data.bufletSize); 2962 2963 ql_write_page2_reg(qdev, 2964 &local_ram->maxBufletCount, 2965 qdev->nvram_data.bufletCount); 2966 2967 ql_write_page2_reg(qdev, 2968 &local_ram->freeBufletThresholdLow, 2969 (qdev->nvram_data.tcpWindowThreshold25 << 16) | 2970 (qdev->nvram_data.tcpWindowThreshold0)); 2971 2972 ql_write_page2_reg(qdev, 2973 &local_ram->freeBufletThresholdHigh, 2974 qdev->nvram_data.tcpWindowThreshold50); 2975 2976 ql_write_page2_reg(qdev, 2977 &local_ram->ipHashTableBase, 2978 (qdev->nvram_data.ipHashTableBaseHi << 16) | 2979 qdev->nvram_data.ipHashTableBaseLo); 2980 ql_write_page2_reg(qdev, 2981 &local_ram->ipHashTableCount, 2982 qdev->nvram_data.ipHashTableSize); 2983 ql_write_page2_reg(qdev, 2984 &local_ram->tcpHashTableBase, 2985 (qdev->nvram_data.tcpHashTableBaseHi << 16) | 2986 qdev->nvram_data.tcpHashTableBaseLo); 2987 ql_write_page2_reg(qdev, 2988 &local_ram->tcpHashTableCount, 2989 qdev->nvram_data.tcpHashTableSize); 2990 ql_write_page2_reg(qdev, 2991 &local_ram->ncbBase, 2992 (qdev->nvram_data.ncbTableBaseHi << 16) | 2993 qdev->nvram_data.ncbTableBaseLo); 2994 ql_write_page2_reg(qdev, 2995 &local_ram->maxNcbCount, 2996 qdev->nvram_data.ncbTableSize); 2997 ql_write_page2_reg(qdev, 2998 &local_ram->drbBase, 2999 (qdev->nvram_data.drbTableBaseHi << 16) | 3000 qdev->nvram_data.drbTableBaseLo); 3001 ql_write_page2_reg(qdev, 3002 &local_ram->maxDrbCount, 3003 qdev->nvram_data.drbTableSize); 3004 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); 3005 return 0; 3006 } 3007 3008 static int ql_adapter_initialize(struct ql3_adapter *qdev) 3009 { 3010 u32 value; 3011 struct ql3xxx_port_registers __iomem *port_regs = 3012 qdev->mem_map_registers; 3013 __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg; 3014 struct ql3xxx_host_memory_registers __iomem *hmem_regs = 3015 (void __iomem *)port_regs; 3016 u32 delay = 10; 3017 int status = 0; 3018 3019 if (ql_mii_setup(qdev)) 3020 return -1; 3021 3022 /* Bring out PHY out of reset */ 3023 ql_write_common_reg(qdev, spir, 3024 (ISP_SERIAL_PORT_IF_WE | 3025 (ISP_SERIAL_PORT_IF_WE << 16))); 3026 /* Give the PHY time to come out of reset. */ 3027 mdelay(100); 3028 qdev->port_link_state = LS_DOWN; 3029 netif_carrier_off(qdev->ndev); 3030 3031 /* V2 chip fix for ARS-39168. */ 3032 ql_write_common_reg(qdev, spir, 3033 (ISP_SERIAL_PORT_IF_SDE | 3034 (ISP_SERIAL_PORT_IF_SDE << 16))); 3035 3036 /* Request Queue Registers */ 3037 *((u32 *)(qdev->preq_consumer_index)) = 0; 3038 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); 3039 qdev->req_producer_index = 0; 3040 3041 ql_write_page1_reg(qdev, 3042 &hmem_regs->reqConsumerIndexAddrHigh, 3043 qdev->req_consumer_index_phy_addr_high); 3044 ql_write_page1_reg(qdev, 3045 &hmem_regs->reqConsumerIndexAddrLow, 3046 qdev->req_consumer_index_phy_addr_low); 3047 3048 ql_write_page1_reg(qdev, 3049 &hmem_regs->reqBaseAddrHigh, 3050 MS_64BITS(qdev->req_q_phy_addr)); 3051 ql_write_page1_reg(qdev, 3052 &hmem_regs->reqBaseAddrLow, 3053 LS_64BITS(qdev->req_q_phy_addr)); 3054 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); 3055 3056 /* Response Queue Registers */ 3057 *((__le16 *) (qdev->prsp_producer_index)) = 0; 3058 qdev->rsp_consumer_index = 0; 3059 qdev->rsp_current = qdev->rsp_q_virt_addr; 3060 3061 ql_write_page1_reg(qdev, 3062 &hmem_regs->rspProducerIndexAddrHigh, 3063 qdev->rsp_producer_index_phy_addr_high); 3064 3065 ql_write_page1_reg(qdev, 3066 &hmem_regs->rspProducerIndexAddrLow, 3067 qdev->rsp_producer_index_phy_addr_low); 3068 3069 ql_write_page1_reg(qdev, 3070 &hmem_regs->rspBaseAddrHigh, 3071 MS_64BITS(qdev->rsp_q_phy_addr)); 3072 3073 ql_write_page1_reg(qdev, 3074 &hmem_regs->rspBaseAddrLow, 3075 LS_64BITS(qdev->rsp_q_phy_addr)); 3076 3077 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); 3078 3079 /* Large Buffer Queue */ 3080 ql_write_page1_reg(qdev, 3081 &hmem_regs->rxLargeQBaseAddrHigh, 3082 MS_64BITS(qdev->lrg_buf_q_phy_addr)); 3083 3084 ql_write_page1_reg(qdev, 3085 &hmem_regs->rxLargeQBaseAddrLow, 3086 LS_64BITS(qdev->lrg_buf_q_phy_addr)); 3087 3088 ql_write_page1_reg(qdev, 3089 &hmem_regs->rxLargeQLength, 3090 qdev->num_lbufq_entries); 3091 3092 ql_write_page1_reg(qdev, 3093 &hmem_regs->rxLargeBufferLength, 3094 qdev->lrg_buffer_len); 3095 3096 /* Small Buffer Queue */ 3097 ql_write_page1_reg(qdev, 3098 &hmem_regs->rxSmallQBaseAddrHigh, 3099 MS_64BITS(qdev->small_buf_q_phy_addr)); 3100 3101 ql_write_page1_reg(qdev, 3102 &hmem_regs->rxSmallQBaseAddrLow, 3103 LS_64BITS(qdev->small_buf_q_phy_addr)); 3104 3105 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); 3106 ql_write_page1_reg(qdev, 3107 &hmem_regs->rxSmallBufferLength, 3108 QL_SMALL_BUFFER_SIZE); 3109 3110 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; 3111 qdev->small_buf_release_cnt = 8; 3112 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; 3113 qdev->lrg_buf_release_cnt = 8; 3114 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; 3115 qdev->small_buf_index = 0; 3116 qdev->lrg_buf_index = 0; 3117 qdev->lrg_buf_free_count = 0; 3118 qdev->lrg_buf_free_head = NULL; 3119 qdev->lrg_buf_free_tail = NULL; 3120 3121 ql_write_common_reg(qdev, 3122 &port_regs->CommonRegs. 3123 rxSmallQProducerIndex, 3124 qdev->small_buf_q_producer_index); 3125 ql_write_common_reg(qdev, 3126 &port_regs->CommonRegs. 3127 rxLargeQProducerIndex, 3128 qdev->lrg_buf_q_producer_index); 3129 3130 /* 3131 * Find out if the chip has already been initialized. If it has, then 3132 * we skip some of the initialization. 3133 */ 3134 clear_bit(QL_LINK_MASTER, &qdev->flags); 3135 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3136 if ((value & PORT_STATUS_IC) == 0) { 3137 3138 /* Chip has not been configured yet, so let it rip. */ 3139 if (ql_init_misc_registers(qdev)) { 3140 status = -1; 3141 goto out; 3142 } 3143 3144 value = qdev->nvram_data.tcpMaxWindowSize; 3145 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); 3146 3147 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; 3148 3149 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, 3150 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) 3151 * 2) << 13)) { 3152 status = -1; 3153 goto out; 3154 } 3155 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); 3156 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, 3157 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) << 3158 16) | (INTERNAL_CHIP_SD | 3159 INTERNAL_CHIP_WE))); 3160 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); 3161 } 3162 3163 if (qdev->mac_index) 3164 ql_write_page0_reg(qdev, 3165 &port_regs->mac1MaxFrameLengthReg, 3166 qdev->max_frame_size); 3167 else 3168 ql_write_page0_reg(qdev, 3169 &port_regs->mac0MaxFrameLengthReg, 3170 qdev->max_frame_size); 3171 3172 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, 3173 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * 3174 2) << 7)) { 3175 status = -1; 3176 goto out; 3177 } 3178 3179 PHY_Setup(qdev); 3180 ql_init_scan_mode(qdev); 3181 ql_get_phy_owner(qdev); 3182 3183 /* Load the MAC Configuration */ 3184 3185 /* Program lower 32 bits of the MAC address */ 3186 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3187 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3188 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3189 ((qdev->ndev->dev_addr[2] << 24) 3190 | (qdev->ndev->dev_addr[3] << 16) 3191 | (qdev->ndev->dev_addr[4] << 8) 3192 | qdev->ndev->dev_addr[5])); 3193 3194 /* Program top 16 bits of the MAC address */ 3195 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3196 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3197 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3198 ((qdev->ndev->dev_addr[0] << 8) 3199 | qdev->ndev->dev_addr[1])); 3200 3201 /* Enable Primary MAC */ 3202 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3203 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) | 3204 MAC_ADDR_INDIRECT_PTR_REG_PE)); 3205 3206 /* Clear Primary and Secondary IP addresses */ 3207 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3208 ((IP_ADDR_INDEX_REG_MASK << 16) | 3209 (qdev->mac_index << 2))); 3210 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3211 3212 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, 3213 ((IP_ADDR_INDEX_REG_MASK << 16) | 3214 ((qdev->mac_index << 2) + 1))); 3215 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); 3216 3217 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); 3218 3219 /* Indicate Configuration Complete */ 3220 ql_write_page0_reg(qdev, 3221 &port_regs->portControl, 3222 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC)); 3223 3224 do { 3225 value = ql_read_page0_reg(qdev, &port_regs->portStatus); 3226 if (value & PORT_STATUS_IC) 3227 break; 3228 spin_unlock_irq(&qdev->hw_lock); 3229 msleep(500); 3230 spin_lock_irq(&qdev->hw_lock); 3231 } while (--delay); 3232 3233 if (delay == 0) { 3234 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); 3235 status = -1; 3236 goto out; 3237 } 3238 3239 /* Enable Ethernet Function */ 3240 if (qdev->device_id == QL3032_DEVICE_ID) { 3241 value = 3242 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE | 3243 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 | 3244 QL3032_PORT_CONTROL_ET); 3245 ql_write_page0_reg(qdev, &port_regs->functionControl, 3246 ((value << 16) | value)); 3247 } else { 3248 value = 3249 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI | 3250 PORT_CONTROL_HH); 3251 ql_write_page0_reg(qdev, &port_regs->portControl, 3252 ((value << 16) | value)); 3253 } 3254 3255 3256 out: 3257 return status; 3258 } 3259 3260 /* 3261 * Caller holds hw_lock. 3262 */ 3263 static int ql_adapter_reset(struct ql3_adapter *qdev) 3264 { 3265 struct ql3xxx_port_registers __iomem *port_regs = 3266 qdev->mem_map_registers; 3267 int status = 0; 3268 u16 value; 3269 int max_wait_time; 3270 3271 set_bit(QL_RESET_ACTIVE, &qdev->flags); 3272 clear_bit(QL_RESET_DONE, &qdev->flags); 3273 3274 /* 3275 * Issue soft reset to chip. 3276 */ 3277 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); 3278 ql_write_common_reg(qdev, 3279 &port_regs->CommonRegs.ispControlStatus, 3280 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR)); 3281 3282 /* Wait 3 seconds for reset to complete. */ 3283 netdev_printk(KERN_DEBUG, qdev->ndev, 3284 "Wait 10 milliseconds for reset to complete\n"); 3285 3286 /* Wait until the firmware tells us the Soft Reset is done */ 3287 max_wait_time = 5; 3288 do { 3289 value = 3290 ql_read_common_reg(qdev, 3291 &port_regs->CommonRegs.ispControlStatus); 3292 if ((value & ISP_CONTROL_SR) == 0) 3293 break; 3294 3295 ssleep(1); 3296 } while ((--max_wait_time)); 3297 3298 /* 3299 * Also, make sure that the Network Reset Interrupt bit has been 3300 * cleared after the soft reset has taken place. 3301 */ 3302 value = 3303 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); 3304 if (value & ISP_CONTROL_RI) { 3305 netdev_printk(KERN_DEBUG, qdev->ndev, 3306 "clearing RI after reset\n"); 3307 ql_write_common_reg(qdev, 3308 &port_regs->CommonRegs. 3309 ispControlStatus, 3310 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3311 } 3312 3313 if (max_wait_time == 0) { 3314 /* Issue Force Soft Reset */ 3315 ql_write_common_reg(qdev, 3316 &port_regs->CommonRegs. 3317 ispControlStatus, 3318 ((ISP_CONTROL_FSR << 16) | 3319 ISP_CONTROL_FSR)); 3320 /* 3321 * Wait until the firmware tells us the Force Soft Reset is 3322 * done 3323 */ 3324 max_wait_time = 5; 3325 do { 3326 value = ql_read_common_reg(qdev, 3327 &port_regs->CommonRegs. 3328 ispControlStatus); 3329 if ((value & ISP_CONTROL_FSR) == 0) 3330 break; 3331 ssleep(1); 3332 } while ((--max_wait_time)); 3333 } 3334 if (max_wait_time == 0) 3335 status = 1; 3336 3337 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3338 set_bit(QL_RESET_DONE, &qdev->flags); 3339 return status; 3340 } 3341 3342 static void ql_set_mac_info(struct ql3_adapter *qdev) 3343 { 3344 struct ql3xxx_port_registers __iomem *port_regs = 3345 qdev->mem_map_registers; 3346 u32 value, port_status; 3347 u8 func_number; 3348 3349 /* Get the function number */ 3350 value = 3351 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); 3352 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK); 3353 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); 3354 switch (value & ISP_CONTROL_FN_MASK) { 3355 case ISP_CONTROL_FN0_NET: 3356 qdev->mac_index = 0; 3357 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3358 qdev->mb_bit_mask = FN0_MA_BITS_MASK; 3359 qdev->PHYAddr = PORT0_PHY_ADDRESS; 3360 if (port_status & PORT_STATUS_SM0) 3361 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3362 else 3363 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3364 break; 3365 3366 case ISP_CONTROL_FN1_NET: 3367 qdev->mac_index = 1; 3368 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; 3369 qdev->mb_bit_mask = FN1_MA_BITS_MASK; 3370 qdev->PHYAddr = PORT1_PHY_ADDRESS; 3371 if (port_status & PORT_STATUS_SM1) 3372 set_bit(QL_LINK_OPTICAL, &qdev->flags); 3373 else 3374 clear_bit(QL_LINK_OPTICAL, &qdev->flags); 3375 break; 3376 3377 case ISP_CONTROL_FN0_SCSI: 3378 case ISP_CONTROL_FN1_SCSI: 3379 default: 3380 netdev_printk(KERN_DEBUG, qdev->ndev, 3381 "Invalid function number, ispControlStatus = 0x%x\n", 3382 value); 3383 break; 3384 } 3385 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; 3386 } 3387 3388 static void ql_display_dev_info(struct net_device *ndev) 3389 { 3390 struct ql3_adapter *qdev = netdev_priv(ndev); 3391 struct pci_dev *pdev = qdev->pdev; 3392 3393 netdev_info(ndev, 3394 "%s Adapter %d RevisionID %d found %s on PCI slot %d\n", 3395 DRV_NAME, qdev->index, qdev->chip_rev_id, 3396 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", 3397 qdev->pci_slot); 3398 netdev_info(ndev, "%s Interface\n", 3399 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); 3400 3401 /* 3402 * Print PCI bus width/type. 3403 */ 3404 netdev_info(ndev, "Bus interface is %s %s\n", 3405 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), 3406 ((qdev->pci_x) ? "PCI-X" : "PCI")); 3407 3408 netdev_info(ndev, "mem IO base address adjusted = 0x%p\n", 3409 qdev->mem_map_registers); 3410 netdev_info(ndev, "Interrupt number = %d\n", pdev->irq); 3411 3412 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); 3413 } 3414 3415 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) 3416 { 3417 struct net_device *ndev = qdev->ndev; 3418 int retval = 0; 3419 3420 netif_stop_queue(ndev); 3421 netif_carrier_off(ndev); 3422 3423 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3424 clear_bit(QL_LINK_MASTER, &qdev->flags); 3425 3426 ql_disable_interrupts(qdev); 3427 3428 free_irq(qdev->pdev->irq, ndev); 3429 3430 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3431 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); 3432 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3433 pci_disable_msi(qdev->pdev); 3434 } 3435 3436 del_timer_sync(&qdev->adapter_timer); 3437 3438 napi_disable(&qdev->napi); 3439 3440 if (do_reset) { 3441 int soft_reset; 3442 unsigned long hw_flags; 3443 3444 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3445 if (ql_wait_for_drvr_lock(qdev)) { 3446 soft_reset = ql_adapter_reset(qdev); 3447 if (soft_reset) { 3448 netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n", 3449 qdev->index); 3450 } 3451 netdev_err(ndev, 3452 "Releasing driver lock via chip reset\n"); 3453 } else { 3454 netdev_err(ndev, 3455 "Could not acquire driver lock to do reset!\n"); 3456 retval = -1; 3457 } 3458 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3459 } 3460 ql_free_mem_resources(qdev); 3461 return retval; 3462 } 3463 3464 static int ql_adapter_up(struct ql3_adapter *qdev) 3465 { 3466 struct net_device *ndev = qdev->ndev; 3467 int err; 3468 unsigned long irq_flags = IRQF_SHARED; 3469 unsigned long hw_flags; 3470 3471 if (ql_alloc_mem_resources(qdev)) { 3472 netdev_err(ndev, "Unable to allocate buffers\n"); 3473 return -ENOMEM; 3474 } 3475 3476 if (qdev->msi) { 3477 if (pci_enable_msi(qdev->pdev)) { 3478 netdev_err(ndev, 3479 "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n"); 3480 qdev->msi = 0; 3481 } else { 3482 netdev_info(ndev, "MSI Enabled...\n"); 3483 set_bit(QL_MSI_ENABLED, &qdev->flags); 3484 irq_flags &= ~IRQF_SHARED; 3485 } 3486 } 3487 3488 err = request_irq(qdev->pdev->irq, ql3xxx_isr, 3489 irq_flags, ndev->name, ndev); 3490 if (err) { 3491 netdev_err(ndev, 3492 "Failed to reserve interrupt %d - already in use\n", 3493 qdev->pdev->irq); 3494 goto err_irq; 3495 } 3496 3497 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3498 3499 err = ql_wait_for_drvr_lock(qdev); 3500 if (err) { 3501 err = ql_adapter_initialize(qdev); 3502 if (err) { 3503 netdev_err(ndev, "Unable to initialize adapter\n"); 3504 goto err_init; 3505 } 3506 netdev_err(ndev, "Releasing driver lock\n"); 3507 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3508 } else { 3509 netdev_err(ndev, "Could not acquire driver lock\n"); 3510 goto err_lock; 3511 } 3512 3513 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3514 3515 set_bit(QL_ADAPTER_UP, &qdev->flags); 3516 3517 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); 3518 3519 napi_enable(&qdev->napi); 3520 ql_enable_interrupts(qdev); 3521 return 0; 3522 3523 err_init: 3524 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); 3525 err_lock: 3526 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3527 free_irq(qdev->pdev->irq, ndev); 3528 err_irq: 3529 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { 3530 netdev_info(ndev, "calling pci_disable_msi()\n"); 3531 clear_bit(QL_MSI_ENABLED, &qdev->flags); 3532 pci_disable_msi(qdev->pdev); 3533 } 3534 return err; 3535 } 3536 3537 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) 3538 { 3539 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { 3540 netdev_err(qdev->ndev, 3541 "Driver up/down cycle failed, closing device\n"); 3542 rtnl_lock(); 3543 dev_close(qdev->ndev); 3544 rtnl_unlock(); 3545 return -1; 3546 } 3547 return 0; 3548 } 3549 3550 static int ql3xxx_close(struct net_device *ndev) 3551 { 3552 struct ql3_adapter *qdev = netdev_priv(ndev); 3553 3554 /* 3555 * Wait for device to recover from a reset. 3556 * (Rarely happens, but possible.) 3557 */ 3558 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) 3559 msleep(50); 3560 3561 ql_adapter_down(qdev, QL_DO_RESET); 3562 return 0; 3563 } 3564 3565 static int ql3xxx_open(struct net_device *ndev) 3566 { 3567 struct ql3_adapter *qdev = netdev_priv(ndev); 3568 return ql_adapter_up(qdev); 3569 } 3570 3571 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3572 { 3573 struct ql3_adapter *qdev = netdev_priv(ndev); 3574 struct ql3xxx_port_registers __iomem *port_regs = 3575 qdev->mem_map_registers; 3576 struct sockaddr *addr = p; 3577 unsigned long hw_flags; 3578 3579 if (netif_running(ndev)) 3580 return -EBUSY; 3581 3582 if (!is_valid_ether_addr(addr->sa_data)) 3583 return -EADDRNOTAVAIL; 3584 3585 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3586 3587 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3588 /* Program lower 32 bits of the MAC address */ 3589 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3590 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16)); 3591 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3592 ((ndev->dev_addr[2] << 24) | (ndev-> 3593 dev_addr[3] << 16) | 3594 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5])); 3595 3596 /* Program top 16 bits of the MAC address */ 3597 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, 3598 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1)); 3599 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, 3600 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1])); 3601 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3602 3603 return 0; 3604 } 3605 3606 static void ql3xxx_tx_timeout(struct net_device *ndev) 3607 { 3608 struct ql3_adapter *qdev = netdev_priv(ndev); 3609 3610 netdev_err(ndev, "Resetting...\n"); 3611 /* 3612 * Stop the queues, we've got a problem. 3613 */ 3614 netif_stop_queue(ndev); 3615 3616 /* 3617 * Wake up the worker to process this event. 3618 */ 3619 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); 3620 } 3621 3622 static void ql_reset_work(struct work_struct *work) 3623 { 3624 struct ql3_adapter *qdev = 3625 container_of(work, struct ql3_adapter, reset_work.work); 3626 struct net_device *ndev = qdev->ndev; 3627 u32 value; 3628 struct ql_tx_buf_cb *tx_cb; 3629 int max_wait_time, i; 3630 struct ql3xxx_port_registers __iomem *port_regs = 3631 qdev->mem_map_registers; 3632 unsigned long hw_flags; 3633 3634 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START), &qdev->flags)) { 3635 clear_bit(QL_LINK_MASTER, &qdev->flags); 3636 3637 /* 3638 * Loop through the active list and return the skb. 3639 */ 3640 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) { 3641 int j; 3642 tx_cb = &qdev->tx_buf[i]; 3643 if (tx_cb->skb) { 3644 netdev_printk(KERN_DEBUG, ndev, 3645 "Freeing lost SKB\n"); 3646 pci_unmap_single(qdev->pdev, 3647 dma_unmap_addr(&tx_cb->map[0], 3648 mapaddr), 3649 dma_unmap_len(&tx_cb->map[0], maplen), 3650 PCI_DMA_TODEVICE); 3651 for (j = 1; j < tx_cb->seg_count; j++) { 3652 pci_unmap_page(qdev->pdev, 3653 dma_unmap_addr(&tx_cb->map[j], 3654 mapaddr), 3655 dma_unmap_len(&tx_cb->map[j], 3656 maplen), 3657 PCI_DMA_TODEVICE); 3658 } 3659 dev_kfree_skb(tx_cb->skb); 3660 tx_cb->skb = NULL; 3661 } 3662 } 3663 3664 netdev_err(ndev, "Clearing NRI after reset\n"); 3665 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3666 ql_write_common_reg(qdev, 3667 &port_regs->CommonRegs. 3668 ispControlStatus, 3669 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI)); 3670 /* 3671 * Wait the for Soft Reset to Complete. 3672 */ 3673 max_wait_time = 10; 3674 do { 3675 value = ql_read_common_reg(qdev, 3676 &port_regs->CommonRegs. 3677 3678 ispControlStatus); 3679 if ((value & ISP_CONTROL_SR) == 0) { 3680 netdev_printk(KERN_DEBUG, ndev, 3681 "reset completed\n"); 3682 break; 3683 } 3684 3685 if (value & ISP_CONTROL_RI) { 3686 netdev_printk(KERN_DEBUG, ndev, 3687 "clearing NRI after reset\n"); 3688 ql_write_common_reg(qdev, 3689 &port_regs-> 3690 CommonRegs. 3691 ispControlStatus, 3692 ((ISP_CONTROL_RI << 3693 16) | ISP_CONTROL_RI)); 3694 } 3695 3696 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3697 ssleep(1); 3698 spin_lock_irqsave(&qdev->hw_lock, hw_flags); 3699 } while (--max_wait_time); 3700 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 3701 3702 if (value & ISP_CONTROL_SR) { 3703 3704 /* 3705 * Set the reset flags and clear the board again. 3706 * Nothing else to do... 3707 */ 3708 netdev_err(ndev, 3709 "Timed out waiting for reset to complete\n"); 3710 netdev_err(ndev, "Do a reset\n"); 3711 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3712 clear_bit(QL_RESET_START, &qdev->flags); 3713 ql_cycle_adapter(qdev, QL_DO_RESET); 3714 return; 3715 } 3716 3717 clear_bit(QL_RESET_ACTIVE, &qdev->flags); 3718 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); 3719 clear_bit(QL_RESET_START, &qdev->flags); 3720 ql_cycle_adapter(qdev, QL_NO_RESET); 3721 } 3722 } 3723 3724 static void ql_tx_timeout_work(struct work_struct *work) 3725 { 3726 struct ql3_adapter *qdev = 3727 container_of(work, struct ql3_adapter, tx_timeout_work.work); 3728 3729 ql_cycle_adapter(qdev, QL_DO_RESET); 3730 } 3731 3732 static void ql_get_board_info(struct ql3_adapter *qdev) 3733 { 3734 struct ql3xxx_port_registers __iomem *port_regs = 3735 qdev->mem_map_registers; 3736 u32 value; 3737 3738 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); 3739 3740 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); 3741 if (value & PORT_STATUS_64) 3742 qdev->pci_width = 64; 3743 else 3744 qdev->pci_width = 32; 3745 if (value & PORT_STATUS_X) 3746 qdev->pci_x = 1; 3747 else 3748 qdev->pci_x = 0; 3749 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); 3750 } 3751 3752 static void ql3xxx_timer(unsigned long ptr) 3753 { 3754 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr; 3755 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); 3756 } 3757 3758 static const struct net_device_ops ql3xxx_netdev_ops = { 3759 .ndo_open = ql3xxx_open, 3760 .ndo_start_xmit = ql3xxx_send, 3761 .ndo_stop = ql3xxx_close, 3762 .ndo_change_mtu = eth_change_mtu, 3763 .ndo_validate_addr = eth_validate_addr, 3764 .ndo_set_mac_address = ql3xxx_set_mac_address, 3765 .ndo_tx_timeout = ql3xxx_tx_timeout, 3766 }; 3767 3768 static int ql3xxx_probe(struct pci_dev *pdev, 3769 const struct pci_device_id *pci_entry) 3770 { 3771 struct net_device *ndev = NULL; 3772 struct ql3_adapter *qdev = NULL; 3773 static int cards_found; 3774 int uninitialized_var(pci_using_dac), err; 3775 3776 err = pci_enable_device(pdev); 3777 if (err) { 3778 pr_err("%s cannot enable PCI device\n", pci_name(pdev)); 3779 goto err_out; 3780 } 3781 3782 err = pci_request_regions(pdev, DRV_NAME); 3783 if (err) { 3784 pr_err("%s cannot obtain PCI resources\n", pci_name(pdev)); 3785 goto err_out_disable_pdev; 3786 } 3787 3788 pci_set_master(pdev); 3789 3790 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3791 pci_using_dac = 1; 3792 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3793 } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 3794 pci_using_dac = 0; 3795 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3796 } 3797 3798 if (err) { 3799 pr_err("%s no usable DMA configuration\n", pci_name(pdev)); 3800 goto err_out_free_regions; 3801 } 3802 3803 ndev = alloc_etherdev(sizeof(struct ql3_adapter)); 3804 if (!ndev) { 3805 err = -ENOMEM; 3806 goto err_out_free_regions; 3807 } 3808 3809 SET_NETDEV_DEV(ndev, &pdev->dev); 3810 3811 pci_set_drvdata(pdev, ndev); 3812 3813 qdev = netdev_priv(ndev); 3814 qdev->index = cards_found; 3815 qdev->ndev = ndev; 3816 qdev->pdev = pdev; 3817 qdev->device_id = pci_entry->device; 3818 qdev->port_link_state = LS_DOWN; 3819 if (msi) 3820 qdev->msi = 1; 3821 3822 qdev->msg_enable = netif_msg_init(debug, default_msg); 3823 3824 if (pci_using_dac) 3825 ndev->features |= NETIF_F_HIGHDMA; 3826 if (qdev->device_id == QL3032_DEVICE_ID) 3827 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3828 3829 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); 3830 if (!qdev->mem_map_registers) { 3831 pr_err("%s: cannot map device registers\n", pci_name(pdev)); 3832 err = -EIO; 3833 goto err_out_free_ndev; 3834 } 3835 3836 spin_lock_init(&qdev->adapter_lock); 3837 spin_lock_init(&qdev->hw_lock); 3838 3839 /* Set driver entry points */ 3840 ndev->netdev_ops = &ql3xxx_netdev_ops; 3841 ndev->ethtool_ops = &ql3xxx_ethtool_ops; 3842 ndev->watchdog_timeo = 5 * HZ; 3843 3844 netif_napi_add(ndev, &qdev->napi, ql_poll, 64); 3845 3846 ndev->irq = pdev->irq; 3847 3848 /* make sure the EEPROM is good */ 3849 if (ql_get_nvram_params(qdev)) { 3850 pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n", 3851 __func__, qdev->index); 3852 err = -EIO; 3853 goto err_out_iounmap; 3854 } 3855 3856 ql_set_mac_info(qdev); 3857 3858 /* Validate and set parameters */ 3859 if (qdev->mac_index) { 3860 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; 3861 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); 3862 } else { 3863 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; 3864 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); 3865 } 3866 3867 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; 3868 3869 /* Record PCI bus information. */ 3870 ql_get_board_info(qdev); 3871 3872 /* 3873 * Set the Maximum Memory Read Byte Count value. We do this to handle 3874 * jumbo frames. 3875 */ 3876 if (qdev->pci_x) 3877 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036); 3878 3879 err = register_netdev(ndev); 3880 if (err) { 3881 pr_err("%s: cannot register net device\n", pci_name(pdev)); 3882 goto err_out_iounmap; 3883 } 3884 3885 /* we're going to reset, so assume we have no link for now */ 3886 3887 netif_carrier_off(ndev); 3888 netif_stop_queue(ndev); 3889 3890 qdev->workqueue = create_singlethread_workqueue(ndev->name); 3891 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); 3892 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); 3893 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); 3894 3895 init_timer(&qdev->adapter_timer); 3896 qdev->adapter_timer.function = ql3xxx_timer; 3897 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ 3898 qdev->adapter_timer.data = (unsigned long)qdev; 3899 3900 if (!cards_found) { 3901 pr_alert("%s\n", DRV_STRING); 3902 pr_alert("Driver name: %s, Version: %s\n", 3903 DRV_NAME, DRV_VERSION); 3904 } 3905 ql_display_dev_info(ndev); 3906 3907 cards_found++; 3908 return 0; 3909 3910 err_out_iounmap: 3911 iounmap(qdev->mem_map_registers); 3912 err_out_free_ndev: 3913 free_netdev(ndev); 3914 err_out_free_regions: 3915 pci_release_regions(pdev); 3916 err_out_disable_pdev: 3917 pci_disable_device(pdev); 3918 err_out: 3919 return err; 3920 } 3921 3922 static void ql3xxx_remove(struct pci_dev *pdev) 3923 { 3924 struct net_device *ndev = pci_get_drvdata(pdev); 3925 struct ql3_adapter *qdev = netdev_priv(ndev); 3926 3927 unregister_netdev(ndev); 3928 3929 ql_disable_interrupts(qdev); 3930 3931 if (qdev->workqueue) { 3932 cancel_delayed_work(&qdev->reset_work); 3933 cancel_delayed_work(&qdev->tx_timeout_work); 3934 destroy_workqueue(qdev->workqueue); 3935 qdev->workqueue = NULL; 3936 } 3937 3938 iounmap(qdev->mem_map_registers); 3939 pci_release_regions(pdev); 3940 free_netdev(ndev); 3941 } 3942 3943 static struct pci_driver ql3xxx_driver = { 3944 3945 .name = DRV_NAME, 3946 .id_table = ql3xxx_pci_tbl, 3947 .probe = ql3xxx_probe, 3948 .remove = ql3xxx_remove, 3949 }; 3950 3951 module_pci_driver(ql3xxx_driver); 3952