Lines Matching refs:qdev
103 static int ql_sem_spinlock(struct ql3_adapter *qdev, in ql_sem_spinlock() argument
107 qdev->mem_map_registers; in ql_sem_spinlock()
122 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask) in ql_sem_unlock() argument
125 qdev->mem_map_registers; in ql_sem_unlock()
130 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits) in ql_sem_lock() argument
133 qdev->mem_map_registers; in ql_sem_lock()
144 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) in ql_wait_for_drvr_lock() argument
149 if (ql_sem_lock(qdev, in ql_wait_for_drvr_lock()
151 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) in ql_wait_for_drvr_lock()
153 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_wait_for_drvr_lock()
160 netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); in ql_wait_for_drvr_lock()
164 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page) in ql_set_register_page() argument
167 qdev->mem_map_registers; in ql_set_register_page()
172 qdev->current_page = page; in ql_set_register_page()
175 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) in ql_read_common_reg_l() argument
180 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_read_common_reg_l()
182 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_read_common_reg_l()
187 static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg) in ql_read_common_reg() argument
192 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg) in ql_read_page0_reg_l() argument
197 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_read_page0_reg_l()
199 if (qdev->current_page != 0) in ql_read_page0_reg_l()
200 ql_set_register_page(qdev, 0); in ql_read_page0_reg_l()
203 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_read_page0_reg_l()
207 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg) in ql_read_page0_reg() argument
209 if (qdev->current_page != 0) in ql_read_page0_reg()
210 ql_set_register_page(qdev, 0); in ql_read_page0_reg()
214 static void ql_write_common_reg_l(struct ql3_adapter *qdev, in ql_write_common_reg_l() argument
219 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_write_common_reg_l()
222 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_write_common_reg_l()
225 static void ql_write_common_reg(struct ql3_adapter *qdev, in ql_write_common_reg() argument
232 static void ql_write_nvram_reg(struct ql3_adapter *qdev, in ql_write_nvram_reg() argument
240 static void ql_write_page0_reg(struct ql3_adapter *qdev, in ql_write_page0_reg() argument
243 if (qdev->current_page != 0) in ql_write_page0_reg()
244 ql_set_register_page(qdev, 0); in ql_write_page0_reg()
252 static void ql_write_page1_reg(struct ql3_adapter *qdev, in ql_write_page1_reg() argument
255 if (qdev->current_page != 1) in ql_write_page1_reg()
256 ql_set_register_page(qdev, 1); in ql_write_page1_reg()
264 static void ql_write_page2_reg(struct ql3_adapter *qdev, in ql_write_page2_reg() argument
267 if (qdev->current_page != 2) in ql_write_page2_reg()
268 ql_set_register_page(qdev, 2); in ql_write_page2_reg()
273 static void ql_disable_interrupts(struct ql3_adapter *qdev) in ql_disable_interrupts() argument
276 qdev->mem_map_registers; in ql_disable_interrupts()
278 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, in ql_disable_interrupts()
283 static void ql_enable_interrupts(struct ql3_adapter *qdev) in ql_enable_interrupts() argument
286 qdev->mem_map_registers; in ql_enable_interrupts()
288 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg, in ql_enable_interrupts()
293 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev, in ql_release_to_lrg_buf_free_list() argument
300 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */ in ql_release_to_lrg_buf_free_list()
301 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb; in ql_release_to_lrg_buf_free_list()
303 qdev->lrg_buf_free_tail->next = lrg_buf_cb; in ql_release_to_lrg_buf_free_list()
304 qdev->lrg_buf_free_tail = lrg_buf_cb; in ql_release_to_lrg_buf_free_list()
308 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, in ql_release_to_lrg_buf_free_list()
309 qdev->lrg_buffer_len); in ql_release_to_lrg_buf_free_list()
311 qdev->lrg_buf_skb_check++; in ql_release_to_lrg_buf_free_list()
318 map = dma_map_single(&qdev->pdev->dev, in ql_release_to_lrg_buf_free_list()
320 qdev->lrg_buffer_len - QL_HEADER_SPACE, in ql_release_to_lrg_buf_free_list()
322 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_release_to_lrg_buf_free_list()
324 netdev_err(qdev->ndev, in ql_release_to_lrg_buf_free_list()
330 qdev->lrg_buf_skb_check++; in ql_release_to_lrg_buf_free_list()
340 qdev->lrg_buffer_len - in ql_release_to_lrg_buf_free_list()
345 qdev->lrg_buf_free_count++; in ql_release_to_lrg_buf_free_list()
349 *qdev) in ql_get_from_lrg_buf_free_list()
351 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; in ql_get_from_lrg_buf_free_list()
354 qdev->lrg_buf_free_head = lrg_buf_cb->next; in ql_get_from_lrg_buf_free_list()
355 if (qdev->lrg_buf_free_head == NULL) in ql_get_from_lrg_buf_free_list()
356 qdev->lrg_buf_free_tail = NULL; in ql_get_from_lrg_buf_free_list()
357 qdev->lrg_buf_free_count--; in ql_get_from_lrg_buf_free_list()
366 static void fm93c56a_deselect(struct ql3_adapter *qdev);
367 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
373 static void fm93c56a_select(struct ql3_adapter *qdev) in fm93c56a_select() argument
376 qdev->mem_map_registers; in fm93c56a_select()
379 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; in fm93c56a_select()
380 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); in fm93c56a_select()
386 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr) in fm93c56a_cmd() argument
393 qdev->mem_map_registers; in fm93c56a_cmd()
397 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
398 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
400 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
401 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
403 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
404 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
416 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
418 qdev->eeprom_cmd_data | dataBit)); in fm93c56a_cmd()
421 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
422 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
424 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
425 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
441 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
443 qdev->eeprom_cmd_data | dataBit)); in fm93c56a_cmd()
446 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
447 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
449 ql_write_nvram_reg(qdev, spir, in fm93c56a_cmd()
450 (ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_cmd()
459 static void fm93c56a_deselect(struct ql3_adapter *qdev) in fm93c56a_deselect() argument
462 qdev->mem_map_registers; in fm93c56a_deselect()
465 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; in fm93c56a_deselect()
466 ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); in fm93c56a_deselect()
472 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value) in fm93c56a_datain() argument
478 qdev->mem_map_registers; in fm93c56a_datain()
484 ql_write_nvram_reg(qdev, spir, in fm93c56a_datain()
485 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_datain()
487 ql_write_nvram_reg(qdev, spir, in fm93c56a_datain()
488 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | in fm93c56a_datain()
490 dataBit = (ql_read_common_reg(qdev, spir) & in fm93c56a_datain()
500 static void eeprom_readword(struct ql3_adapter *qdev, in eeprom_readword() argument
503 fm93c56a_select(qdev); in eeprom_readword()
504 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr); in eeprom_readword()
505 fm93c56a_datain(qdev, value); in eeprom_readword()
506 fm93c56a_deselect(qdev); in eeprom_readword()
519 static int ql_get_nvram_params(struct ql3_adapter *qdev) in ql_get_nvram_params() argument
526 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_get_nvram_params()
528 pEEPROMData = (u16 *)&qdev->nvram_data; in ql_get_nvram_params()
529 qdev->eeprom_cmd_data = 0; in ql_get_nvram_params()
530 if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK, in ql_get_nvram_params()
531 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * in ql_get_nvram_params()
534 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_nvram_params()
539 eeprom_readword(qdev, index, pEEPROMData); in ql_get_nvram_params()
543 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK); in ql_get_nvram_params()
546 netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n", in ql_get_nvram_params()
548 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_nvram_params()
552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_nvram_params()
560 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev) in ql_wait_for_mii_ready() argument
563 qdev->mem_map_registers; in ql_wait_for_mii_ready()
568 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg); in ql_wait_for_mii_ready()
577 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev) in ql_mii_enable_scan_mode() argument
580 qdev->mem_map_registers; in ql_mii_enable_scan_mode()
583 if (qdev->numPorts > 1) { in ql_mii_enable_scan_mode()
596 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, in ql_mii_enable_scan_mode()
599 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_enable_scan_mode()
604 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev) in ql_mii_disable_scan_mode() argument
608 qdev->mem_map_registers; in ql_mii_disable_scan_mode()
611 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) & in ql_mii_disable_scan_mode()
624 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, in ql_mii_disable_scan_mode()
627 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_disable_scan_mode()
634 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev, in ql_mii_write_reg_ex() argument
638 qdev->mem_map_registers; in ql_mii_write_reg_ex()
641 scanWasEnabled = ql_mii_disable_scan_mode(qdev); in ql_mii_write_reg_ex()
643 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_write_reg_ex()
644 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_write_reg_ex()
648 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, in ql_mii_write_reg_ex()
651 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); in ql_mii_write_reg_ex()
654 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_write_reg_ex()
655 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_write_reg_ex()
660 ql_mii_enable_scan_mode(qdev); in ql_mii_write_reg_ex()
665 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr, in ql_mii_read_reg_ex() argument
669 qdev->mem_map_registers; in ql_mii_read_reg_ex()
673 scanWasEnabled = ql_mii_disable_scan_mode(qdev); in ql_mii_read_reg_ex()
675 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_read_reg_ex()
676 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_read_reg_ex()
680 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, in ql_mii_read_reg_ex()
683 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_read_reg_ex()
686 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_read_reg_ex()
690 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_read_reg_ex()
691 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_read_reg_ex()
695 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); in ql_mii_read_reg_ex()
699 ql_mii_enable_scan_mode(qdev); in ql_mii_read_reg_ex()
704 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value) in ql_mii_write_reg() argument
707 qdev->mem_map_registers; in ql_mii_write_reg()
709 ql_mii_disable_scan_mode(qdev); in ql_mii_write_reg()
711 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_write_reg()
712 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_write_reg()
716 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, in ql_mii_write_reg()
717 qdev->PHYAddr | regAddr); in ql_mii_write_reg()
719 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value); in ql_mii_write_reg()
722 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_write_reg()
723 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_write_reg()
727 ql_mii_enable_scan_mode(qdev); in ql_mii_write_reg()
732 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value) in ql_mii_read_reg() argument
736 qdev->mem_map_registers; in ql_mii_read_reg()
738 ql_mii_disable_scan_mode(qdev); in ql_mii_read_reg()
740 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_read_reg()
741 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_read_reg()
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg, in ql_mii_read_reg()
746 qdev->PHYAddr | regAddr); in ql_mii_read_reg()
748 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_read_reg()
751 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_read_reg()
755 if (ql_wait_for_mii_ready(qdev)) { in ql_mii_read_reg()
756 netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG); in ql_mii_read_reg()
760 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg); in ql_mii_read_reg()
763 ql_mii_enable_scan_mode(qdev); in ql_mii_read_reg()
768 static void ql_petbi_reset(struct ql3_adapter *qdev) in ql_petbi_reset() argument
770 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET); in ql_petbi_reset()
773 static void ql_petbi_start_neg(struct ql3_adapter *qdev) in ql_petbi_start_neg() argument
778 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®); in ql_petbi_start_neg()
780 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg); in ql_petbi_start_neg()
782 ql_mii_write_reg(qdev, PETBI_NEG_ADVER, in ql_petbi_start_neg()
785 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, in ql_petbi_start_neg()
791 static void ql_petbi_reset_ex(struct ql3_adapter *qdev) in ql_petbi_reset_ex() argument
793 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET, in ql_petbi_reset_ex()
794 PHYAddr[qdev->mac_index]); in ql_petbi_reset_ex()
797 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev) in ql_petbi_start_neg_ex() argument
802 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, in ql_petbi_start_neg_ex()
803 PHYAddr[qdev->mac_index]); in ql_petbi_start_neg_ex()
805 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, in ql_petbi_start_neg_ex()
806 PHYAddr[qdev->mac_index]); in ql_petbi_start_neg_ex()
808 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER, in ql_petbi_start_neg_ex()
810 PHYAddr[qdev->mac_index]); in ql_petbi_start_neg_ex()
812 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, in ql_petbi_start_neg_ex()
815 PHYAddr[qdev->mac_index]); in ql_petbi_start_neg_ex()
818 static void ql_petbi_init(struct ql3_adapter *qdev) in ql_petbi_init() argument
820 ql_petbi_reset(qdev); in ql_petbi_init()
821 ql_petbi_start_neg(qdev); in ql_petbi_init()
824 static void ql_petbi_init_ex(struct ql3_adapter *qdev) in ql_petbi_init_ex() argument
826 ql_petbi_reset_ex(qdev); in ql_petbi_init_ex()
827 ql_petbi_start_neg_ex(qdev); in ql_petbi_init_ex()
830 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev) in ql_is_petbi_neg_pause() argument
834 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0) in ql_is_petbi_neg_pause()
840 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr) in phyAgereSpecificInit() argument
842 netdev_info(qdev->ndev, "enabling Agere specific PHY\n"); in phyAgereSpecificInit()
844 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr); in phyAgereSpecificInit()
846 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr); in phyAgereSpecificInit()
848 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr); in phyAgereSpecificInit()
850 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr); in phyAgereSpecificInit()
852 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr); in phyAgereSpecificInit()
854 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr); in phyAgereSpecificInit()
856 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr); in phyAgereSpecificInit()
858 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr); in phyAgereSpecificInit()
860 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr); in phyAgereSpecificInit()
862 ql_mii_write_reg_ex(qdev, 0x11, in phyAgereSpecificInit()
863 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr); in phyAgereSpecificInit()
869 ql_mii_write_reg(qdev, 0x12, 0x840a); in phyAgereSpecificInit()
870 ql_mii_write_reg(qdev, 0x00, 0x1140); in phyAgereSpecificInit()
871 ql_mii_write_reg(qdev, 0x1c, 0xfaf0); in phyAgereSpecificInit()
874 static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev, in getPhyType() argument
897 netdev_info(qdev->ndev, "Phy: %s\n", in getPhyType()
907 static int ql_phy_get_speed(struct ql3_adapter *qdev) in ql_phy_get_speed() argument
911 switch (qdev->phyType) { in ql_phy_get_speed()
913 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0) in ql_phy_get_speed()
920 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) in ql_phy_get_speed()
938 static int ql_is_full_dup(struct ql3_adapter *qdev) in ql_is_full_dup() argument
942 switch (qdev->phyType) { in ql_is_full_dup()
944 if (ql_mii_read_reg(qdev, 0x1A, ®)) in ql_is_full_dup()
951 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0) in ql_is_full_dup()
958 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev) in ql_is_phy_neg_pause() argument
962 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0) in ql_is_phy_neg_pause()
968 static int PHY_Setup(struct ql3_adapter *qdev) in PHY_Setup() argument
977 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1); in PHY_Setup()
979 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n"); in PHY_Setup()
983 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2); in PHY_Setup()
985 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n"); in PHY_Setup()
994 if (qdev->mac_index == 0) in PHY_Setup()
999 err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr); in PHY_Setup()
1001 netdev_err(qdev->ndev, in PHY_Setup()
1006 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr); in PHY_Setup()
1008 netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n"); in PHY_Setup()
1018 qdev->phyType = getPhyType(qdev, reg1, reg2); in PHY_Setup()
1020 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) { in PHY_Setup()
1022 phyAgereSpecificInit(qdev, miiAddr); in PHY_Setup()
1023 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) { in PHY_Setup()
1024 netdev_err(qdev->ndev, "PHY is unknown\n"); in PHY_Setup()
1034 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable) in ql_mac_enable() argument
1037 qdev->mem_map_registers; in ql_mac_enable()
1045 if (qdev->mac_index) in ql_mac_enable()
1046 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); in ql_mac_enable()
1048 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); in ql_mac_enable()
1054 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable) in ql_mac_cfg_soft_reset() argument
1057 qdev->mem_map_registers; in ql_mac_cfg_soft_reset()
1065 if (qdev->mac_index) in ql_mac_cfg_soft_reset()
1066 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); in ql_mac_cfg_soft_reset()
1068 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); in ql_mac_cfg_soft_reset()
1074 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable) in ql_mac_cfg_gig() argument
1077 qdev->mem_map_registers; in ql_mac_cfg_gig()
1085 if (qdev->mac_index) in ql_mac_cfg_gig()
1086 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); in ql_mac_cfg_gig()
1088 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); in ql_mac_cfg_gig()
1094 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable) in ql_mac_cfg_full_dup() argument
1097 qdev->mem_map_registers; in ql_mac_cfg_full_dup()
1105 if (qdev->mac_index) in ql_mac_cfg_full_dup()
1106 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); in ql_mac_cfg_full_dup()
1108 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); in ql_mac_cfg_full_dup()
1114 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable) in ql_mac_cfg_pause() argument
1117 qdev->mem_map_registers; in ql_mac_cfg_pause()
1127 if (qdev->mac_index) in ql_mac_cfg_pause()
1128 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value); in ql_mac_cfg_pause()
1130 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value); in ql_mac_cfg_pause()
1136 static int ql_is_fiber(struct ql3_adapter *qdev) in ql_is_fiber() argument
1139 qdev->mem_map_registers; in ql_is_fiber()
1143 switch (qdev->mac_index) { in ql_is_fiber()
1152 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_is_fiber()
1156 static int ql_is_auto_cfg(struct ql3_adapter *qdev) in ql_is_auto_cfg() argument
1159 ql_mii_read_reg(qdev, 0x00, ®); in ql_is_auto_cfg()
1166 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev) in ql_is_auto_neg_complete() argument
1169 qdev->mem_map_registers; in ql_is_auto_neg_complete()
1173 switch (qdev->mac_index) { in ql_is_auto_neg_complete()
1182 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_is_auto_neg_complete()
1184 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n"); in ql_is_auto_neg_complete()
1187 netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n"); in ql_is_auto_neg_complete()
1194 static int ql_is_neg_pause(struct ql3_adapter *qdev) in ql_is_neg_pause() argument
1196 if (ql_is_fiber(qdev)) in ql_is_neg_pause()
1197 return ql_is_petbi_neg_pause(qdev); in ql_is_neg_pause()
1199 return ql_is_phy_neg_pause(qdev); in ql_is_neg_pause()
1202 static int ql_auto_neg_error(struct ql3_adapter *qdev) in ql_auto_neg_error() argument
1205 qdev->mem_map_registers; in ql_auto_neg_error()
1209 switch (qdev->mac_index) { in ql_auto_neg_error()
1217 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_auto_neg_error()
1221 static u32 ql_get_link_speed(struct ql3_adapter *qdev) in ql_get_link_speed() argument
1223 if (ql_is_fiber(qdev)) in ql_get_link_speed()
1226 return ql_phy_get_speed(qdev); in ql_get_link_speed()
1229 static int ql_is_link_full_dup(struct ql3_adapter *qdev) in ql_is_link_full_dup() argument
1231 if (ql_is_fiber(qdev)) in ql_is_link_full_dup()
1234 return ql_is_full_dup(qdev); in ql_is_link_full_dup()
1240 static int ql_link_down_detect(struct ql3_adapter *qdev) in ql_link_down_detect() argument
1243 qdev->mem_map_registers; in ql_link_down_detect()
1247 switch (qdev->mac_index) { in ql_link_down_detect()
1257 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); in ql_link_down_detect()
1264 static int ql_link_down_detect_clear(struct ql3_adapter *qdev) in ql_link_down_detect_clear() argument
1267 qdev->mem_map_registers; in ql_link_down_detect_clear()
1269 switch (qdev->mac_index) { in ql_link_down_detect_clear()
1271 ql_write_common_reg(qdev, in ql_link_down_detect_clear()
1278 ql_write_common_reg(qdev, in ql_link_down_detect_clear()
1294 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev) in ql_this_adapter_controls_port() argument
1297 qdev->mem_map_registers; in ql_this_adapter_controls_port()
1301 switch (qdev->mac_index) { in ql_this_adapter_controls_port()
1312 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_this_adapter_controls_port()
1314 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, in ql_this_adapter_controls_port()
1319 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n"); in ql_this_adapter_controls_port()
1323 static void ql_phy_reset_ex(struct ql3_adapter *qdev) in ql_phy_reset_ex() argument
1325 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, in ql_phy_reset_ex()
1326 PHYAddr[qdev->mac_index]); in ql_phy_reset_ex()
1329 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev) in ql_phy_start_neg_ex() argument
1334 if (qdev->phyType == PHY_AGERE_ET1011C) in ql_phy_start_neg_ex()
1335 ql_mii_write_reg(qdev, 0x13, 0x0000); in ql_phy_start_neg_ex()
1338 if (qdev->mac_index == 0) in ql_phy_start_neg_ex()
1340 qdev->nvram_data.macCfg_port0.portConfiguration; in ql_phy_start_neg_ex()
1343 qdev->nvram_data.macCfg_port1.portConfiguration; in ql_phy_start_neg_ex()
1351 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®, in ql_phy_start_neg_ex()
1352 PHYAddr[qdev->mac_index]); in ql_phy_start_neg_ex()
1362 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg, in ql_phy_start_neg_ex()
1363 PHYAddr[qdev->mac_index]); in ql_phy_start_neg_ex()
1366 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®, in ql_phy_start_neg_ex()
1367 PHYAddr[qdev->mac_index]); in ql_phy_start_neg_ex()
1392 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg, in ql_phy_start_neg_ex()
1393 PHYAddr[qdev->mac_index]); in ql_phy_start_neg_ex()
1395 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]); in ql_phy_start_neg_ex()
1397 ql_mii_write_reg_ex(qdev, CONTROL_REG, in ql_phy_start_neg_ex()
1399 PHYAddr[qdev->mac_index]); in ql_phy_start_neg_ex()
1402 static void ql_phy_init_ex(struct ql3_adapter *qdev) in ql_phy_init_ex() argument
1404 ql_phy_reset_ex(qdev); in ql_phy_init_ex()
1405 PHY_Setup(qdev); in ql_phy_init_ex()
1406 ql_phy_start_neg_ex(qdev); in ql_phy_init_ex()
1412 static u32 ql_get_link_state(struct ql3_adapter *qdev) in ql_get_link_state() argument
1415 qdev->mem_map_registers; in ql_get_link_state()
1419 switch (qdev->mac_index) { in ql_get_link_state()
1428 temp = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_get_link_state()
1437 static int ql_port_start(struct ql3_adapter *qdev) in ql_port_start() argument
1439 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_port_start()
1440 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * in ql_port_start()
1442 netdev_err(qdev->ndev, "Could not get hw lock for GIO\n"); in ql_port_start()
1446 if (ql_is_fiber(qdev)) { in ql_port_start()
1447 ql_petbi_init(qdev); in ql_port_start()
1450 ql_phy_init_ex(qdev); in ql_port_start()
1453 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_port_start()
1457 static int ql_finish_auto_neg(struct ql3_adapter *qdev) in ql_finish_auto_neg() argument
1460 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_finish_auto_neg()
1461 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * in ql_finish_auto_neg()
1465 if (!ql_auto_neg_error(qdev)) { in ql_finish_auto_neg()
1466 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { in ql_finish_auto_neg()
1468 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, in ql_finish_auto_neg()
1470 ql_mac_cfg_soft_reset(qdev, 1); in ql_finish_auto_neg()
1471 ql_mac_cfg_gig(qdev, in ql_finish_auto_neg()
1473 (qdev) == in ql_finish_auto_neg()
1475 ql_mac_cfg_full_dup(qdev, in ql_finish_auto_neg()
1477 (qdev)); in ql_finish_auto_neg()
1478 ql_mac_cfg_pause(qdev, in ql_finish_auto_neg()
1480 (qdev)); in ql_finish_auto_neg()
1481 ql_mac_cfg_soft_reset(qdev, 0); in ql_finish_auto_neg()
1484 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, in ql_finish_auto_neg()
1486 ql_mac_enable(qdev, 1); in ql_finish_auto_neg()
1489 qdev->port_link_state = LS_UP; in ql_finish_auto_neg()
1490 netif_start_queue(qdev->ndev); in ql_finish_auto_neg()
1491 netif_carrier_on(qdev->ndev); in ql_finish_auto_neg()
1492 netif_info(qdev, link, qdev->ndev, in ql_finish_auto_neg()
1494 ql_get_link_speed(qdev), in ql_finish_auto_neg()
1495 ql_is_link_full_dup(qdev) ? "full" : "half"); in ql_finish_auto_neg()
1499 if (test_bit(QL_LINK_MASTER, &qdev->flags)) { in ql_finish_auto_neg()
1500 netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, in ql_finish_auto_neg()
1506 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_finish_auto_neg()
1507 if (ql_port_start(qdev)) /* Restart port */ in ql_finish_auto_neg()
1512 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_finish_auto_neg()
1518 struct ql3_adapter *qdev = in ql_link_state_machine_work() local
1524 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_link_state_machine_work()
1526 curr_link_state = ql_get_link_state(qdev); in ql_link_state_machine_work()
1528 if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) { in ql_link_state_machine_work()
1529 netif_info(qdev, link, qdev->ndev, in ql_link_state_machine_work()
1532 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_link_state_machine_work()
1535 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); in ql_link_state_machine_work()
1540 switch (qdev->port_link_state) { in ql_link_state_machine_work()
1542 if (test_bit(QL_LINK_MASTER, &qdev->flags)) in ql_link_state_machine_work()
1543 ql_port_start(qdev); in ql_link_state_machine_work()
1544 qdev->port_link_state = LS_DOWN; in ql_link_state_machine_work()
1549 netif_info(qdev, link, qdev->ndev, "Link is up\n"); in ql_link_state_machine_work()
1550 if (ql_is_auto_neg_complete(qdev)) in ql_link_state_machine_work()
1551 ql_finish_auto_neg(qdev); in ql_link_state_machine_work()
1553 if (qdev->port_link_state == LS_UP) in ql_link_state_machine_work()
1554 ql_link_down_detect_clear(qdev); in ql_link_state_machine_work()
1556 qdev->port_link_state = LS_UP; in ql_link_state_machine_work()
1566 netif_info(qdev, link, qdev->ndev, "Link is down\n"); in ql_link_state_machine_work()
1567 qdev->port_link_state = LS_DOWN; in ql_link_state_machine_work()
1569 if (ql_link_down_detect(qdev)) in ql_link_state_machine_work()
1570 qdev->port_link_state = LS_DOWN; in ql_link_state_machine_work()
1573 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_link_state_machine_work()
1576 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); in ql_link_state_machine_work()
1582 static void ql_get_phy_owner(struct ql3_adapter *qdev) in ql_get_phy_owner() argument
1584 if (ql_this_adapter_controls_port(qdev)) in ql_get_phy_owner()
1585 set_bit(QL_LINK_MASTER, &qdev->flags); in ql_get_phy_owner()
1587 clear_bit(QL_LINK_MASTER, &qdev->flags); in ql_get_phy_owner()
1593 static void ql_init_scan_mode(struct ql3_adapter *qdev) in ql_init_scan_mode() argument
1595 ql_mii_enable_scan_mode(qdev); in ql_init_scan_mode()
1597 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { in ql_init_scan_mode()
1598 if (ql_this_adapter_controls_port(qdev)) in ql_init_scan_mode()
1599 ql_petbi_init_ex(qdev); in ql_init_scan_mode()
1601 if (ql_this_adapter_controls_port(qdev)) in ql_init_scan_mode()
1602 ql_phy_init_ex(qdev); in ql_init_scan_mode()
1612 static int ql_mii_setup(struct ql3_adapter *qdev) in ql_mii_setup() argument
1616 qdev->mem_map_registers; in ql_mii_setup()
1618 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_mii_setup()
1619 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * in ql_mii_setup()
1623 if (qdev->device_id == QL3032_DEVICE_ID) in ql_mii_setup()
1624 ql_write_page0_reg(qdev, in ql_mii_setup()
1630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg, in ql_mii_setup()
1633 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_mii_setup()
1649 static u32 ql_supported_modes(struct ql3_adapter *qdev) in ql_supported_modes() argument
1651 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) in ql_supported_modes()
1657 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev) in ql_get_auto_cfg_status() argument
1661 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_get_auto_cfg_status()
1662 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_get_auto_cfg_status()
1664 (qdev->mac_index) * 2) << 7)) { in ql_get_auto_cfg_status()
1665 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_auto_cfg_status()
1668 status = ql_is_auto_cfg(qdev); in ql_get_auto_cfg_status()
1669 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_get_auto_cfg_status()
1670 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_auto_cfg_status()
1674 static u32 ql_get_speed(struct ql3_adapter *qdev) in ql_get_speed() argument
1678 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_get_speed()
1679 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_get_speed()
1681 (qdev->mac_index) * 2) << 7)) { in ql_get_speed()
1682 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_speed()
1685 status = ql_get_link_speed(qdev); in ql_get_speed()
1686 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_get_speed()
1687 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_speed()
1691 static int ql_get_full_dup(struct ql3_adapter *qdev) in ql_get_full_dup() argument
1695 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_get_full_dup()
1696 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_get_full_dup()
1698 (qdev->mac_index) * 2) << 7)) { in ql_get_full_dup()
1699 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_full_dup()
1702 status = ql_is_link_full_dup(qdev); in ql_get_full_dup()
1703 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_get_full_dup()
1704 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_get_full_dup()
1711 struct ql3_adapter *qdev = netdev_priv(ndev); in ql_get_link_ksettings() local
1714 supported = ql_supported_modes(qdev); in ql_get_link_ksettings()
1716 if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) { in ql_get_link_ksettings()
1720 cmd->base.phy_address = qdev->PHYAddr; in ql_get_link_ksettings()
1722 advertising = ql_supported_modes(qdev); in ql_get_link_ksettings()
1723 cmd->base.autoneg = ql_get_auto_cfg_status(qdev); in ql_get_link_ksettings()
1724 cmd->base.speed = ql_get_speed(qdev); in ql_get_link_ksettings()
1725 cmd->base.duplex = ql_get_full_dup(qdev); in ql_get_link_ksettings()
1738 struct ql3_adapter *qdev = netdev_priv(ndev); in ql_get_drvinfo() local
1742 strscpy(drvinfo->bus_info, pci_name(qdev->pdev), in ql_get_drvinfo()
1748 struct ql3_adapter *qdev = netdev_priv(ndev); in ql_get_msglevel() local
1749 return qdev->msg_enable; in ql_get_msglevel()
1754 struct ql3_adapter *qdev = netdev_priv(ndev); in ql_set_msglevel() local
1755 qdev->msg_enable = value; in ql_set_msglevel()
1761 struct ql3_adapter *qdev = netdev_priv(ndev); in ql_get_pauseparam() local
1763 qdev->mem_map_registers; in ql_get_pauseparam()
1766 if (qdev->mac_index == 0) in ql_get_pauseparam()
1767 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg); in ql_get_pauseparam()
1769 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg); in ql_get_pauseparam()
1771 pause->autoneg = ql_get_auto_cfg_status(qdev); in ql_get_pauseparam()
1785 static int ql_populate_free_queue(struct ql3_adapter *qdev) in ql_populate_free_queue() argument
1787 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head; in ql_populate_free_queue()
1794 netdev_alloc_skb(qdev->ndev, in ql_populate_free_queue()
1795 qdev->lrg_buffer_len); in ql_populate_free_queue()
1797 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_populate_free_queue()
1806 map = dma_map_single(&qdev->pdev->dev, in ql_populate_free_queue()
1808 qdev->lrg_buffer_len - QL_HEADER_SPACE, in ql_populate_free_queue()
1811 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_populate_free_queue()
1813 netdev_err(qdev->ndev, in ql_populate_free_queue()
1828 qdev->lrg_buffer_len - in ql_populate_free_queue()
1830 --qdev->lrg_buf_skb_check; in ql_populate_free_queue()
1831 if (!qdev->lrg_buf_skb_check) in ql_populate_free_queue()
1843 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev) in ql_update_small_bufq_prod_index() argument
1846 qdev->mem_map_registers; in ql_update_small_bufq_prod_index()
1848 if (qdev->small_buf_release_cnt >= 16) { in ql_update_small_bufq_prod_index()
1849 while (qdev->small_buf_release_cnt >= 16) { in ql_update_small_bufq_prod_index()
1850 qdev->small_buf_q_producer_index++; in ql_update_small_bufq_prod_index()
1852 if (qdev->small_buf_q_producer_index == in ql_update_small_bufq_prod_index()
1854 qdev->small_buf_q_producer_index = 0; in ql_update_small_bufq_prod_index()
1855 qdev->small_buf_release_cnt -= 8; in ql_update_small_bufq_prod_index()
1858 writel_relaxed(qdev->small_buf_q_producer_index, in ql_update_small_bufq_prod_index()
1866 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev) in ql_update_lrg_bufq_prod_index() argument
1872 qdev->mem_map_registers; in ql_update_lrg_bufq_prod_index()
1874 if ((qdev->lrg_buf_free_count >= 8) && in ql_update_lrg_bufq_prod_index()
1875 (qdev->lrg_buf_release_cnt >= 16)) { in ql_update_lrg_bufq_prod_index()
1877 if (qdev->lrg_buf_skb_check) in ql_update_lrg_bufq_prod_index()
1878 if (!ql_populate_free_queue(qdev)) in ql_update_lrg_bufq_prod_index()
1881 lrg_buf_q_ele = qdev->lrg_buf_next_free; in ql_update_lrg_bufq_prod_index()
1883 while ((qdev->lrg_buf_release_cnt >= 16) && in ql_update_lrg_bufq_prod_index()
1884 (qdev->lrg_buf_free_count >= 8)) { in ql_update_lrg_bufq_prod_index()
1888 ql_get_from_lrg_buf_free_list(qdev); in ql_update_lrg_bufq_prod_index()
1895 qdev->lrg_buf_release_cnt--; in ql_update_lrg_bufq_prod_index()
1898 qdev->lrg_buf_q_producer_index++; in ql_update_lrg_bufq_prod_index()
1900 if (qdev->lrg_buf_q_producer_index == in ql_update_lrg_bufq_prod_index()
1901 qdev->num_lbufq_entries) in ql_update_lrg_bufq_prod_index()
1902 qdev->lrg_buf_q_producer_index = 0; in ql_update_lrg_bufq_prod_index()
1904 if (qdev->lrg_buf_q_producer_index == in ql_update_lrg_bufq_prod_index()
1905 (qdev->num_lbufq_entries - 1)) { in ql_update_lrg_bufq_prod_index()
1906 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr; in ql_update_lrg_bufq_prod_index()
1910 qdev->lrg_buf_next_free = lrg_buf_q_ele; in ql_update_lrg_bufq_prod_index()
1911 writel(qdev->lrg_buf_q_producer_index, in ql_update_lrg_bufq_prod_index()
1916 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev, in ql_process_mac_tx_intr() argument
1923 netdev_warn(qdev->ndev, in ql_process_mac_tx_intr()
1927 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id]; in ql_process_mac_tx_intr()
1931 netdev_err(qdev->ndev, in ql_process_mac_tx_intr()
1934 qdev->ndev->stats.tx_errors++; in ql_process_mac_tx_intr()
1939 netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n", in ql_process_mac_tx_intr()
1942 qdev->ndev->stats.tx_errors++; in ql_process_mac_tx_intr()
1946 dma_unmap_single(&qdev->pdev->dev, in ql_process_mac_tx_intr()
1952 dma_unmap_page(&qdev->pdev->dev, in ql_process_mac_tx_intr()
1958 qdev->ndev->stats.tx_packets++; in ql_process_mac_tx_intr()
1959 qdev->ndev->stats.tx_bytes += tx_cb->skb->len; in ql_process_mac_tx_intr()
1966 atomic_inc(&qdev->tx_count); in ql_process_mac_tx_intr()
1969 static void ql_get_sbuf(struct ql3_adapter *qdev) in ql_get_sbuf() argument
1971 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS) in ql_get_sbuf()
1972 qdev->small_buf_index = 0; in ql_get_sbuf()
1973 qdev->small_buf_release_cnt++; in ql_get_sbuf()
1976 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev) in ql_get_lbuf() argument
1979 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index]; in ql_get_lbuf()
1980 qdev->lrg_buf_release_cnt++; in ql_get_lbuf()
1981 if (++qdev->lrg_buf_index == qdev->num_large_buffers) in ql_get_lbuf()
1982 qdev->lrg_buf_index = 0; in ql_get_lbuf()
1998 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev, in ql_process_mac_rx_intr() argument
2009 ql_get_sbuf(qdev); in ql_process_mac_rx_intr()
2011 if (qdev->device_id == QL3022_DEVICE_ID) in ql_process_mac_rx_intr()
2012 lrg_buf_cb1 = ql_get_lbuf(qdev); in ql_process_mac_rx_intr()
2015 lrg_buf_cb2 = ql_get_lbuf(qdev); in ql_process_mac_rx_intr()
2018 qdev->ndev->stats.rx_packets++; in ql_process_mac_rx_intr()
2019 qdev->ndev->stats.rx_bytes += length; in ql_process_mac_rx_intr()
2022 dma_unmap_single(&qdev->pdev->dev, in ql_process_mac_rx_intr()
2027 skb->protocol = eth_type_trans(skb, qdev->ndev); in ql_process_mac_rx_intr()
2029 napi_gro_receive(&qdev->napi, skb); in ql_process_mac_rx_intr()
2032 if (qdev->device_id == QL3022_DEVICE_ID) in ql_process_mac_rx_intr()
2033 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); in ql_process_mac_rx_intr()
2034 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); in ql_process_mac_rx_intr()
2037 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev, in ql_process_macip_rx_intr() argument
2043 struct net_device *ndev = qdev->ndev; in ql_process_macip_rx_intr()
2051 ql_get_sbuf(qdev); in ql_process_macip_rx_intr()
2053 if (qdev->device_id == QL3022_DEVICE_ID) { in ql_process_macip_rx_intr()
2055 lrg_buf_cb1 = ql_get_lbuf(qdev); in ql_process_macip_rx_intr()
2063 lrg_buf_cb2 = ql_get_lbuf(qdev); in ql_process_macip_rx_intr()
2067 dma_unmap_single(&qdev->pdev->dev, in ql_process_macip_rx_intr()
2073 if (qdev->device_id == QL3022_DEVICE_ID) { in ql_process_macip_rx_intr()
2096 skb2->protocol = eth_type_trans(skb2, qdev->ndev); in ql_process_macip_rx_intr()
2098 napi_gro_receive(&qdev->napi, skb2); in ql_process_macip_rx_intr()
2103 if (qdev->device_id == QL3022_DEVICE_ID) in ql_process_macip_rx_intr()
2104 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1); in ql_process_macip_rx_intr()
2105 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2); in ql_process_macip_rx_intr()
2108 static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget) in ql_tx_rx_clean() argument
2111 struct net_device *ndev = qdev->ndev; in ql_tx_rx_clean()
2115 while ((le32_to_cpu(*(qdev->prsp_producer_index)) != in ql_tx_rx_clean()
2116 qdev->rsp_consumer_index) && (work_done < budget)) { in ql_tx_rx_clean()
2118 net_rsp = qdev->rsp_current; in ql_tx_rx_clean()
2124 if (qdev->device_id == QL3032_DEVICE_ID) in ql_tx_rx_clean()
2130 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *) in ql_tx_rx_clean()
2136 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *) in ql_tx_rx_clean()
2143 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *) in ql_tx_rx_clean()
2161 qdev->rsp_consumer_index++; in ql_tx_rx_clean()
2163 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) { in ql_tx_rx_clean()
2164 qdev->rsp_consumer_index = 0; in ql_tx_rx_clean()
2165 qdev->rsp_current = qdev->rsp_q_virt_addr; in ql_tx_rx_clean()
2167 qdev->rsp_current++; in ql_tx_rx_clean()
2177 struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi); in ql_poll() local
2179 qdev->mem_map_registers; in ql_poll()
2182 work_done = ql_tx_rx_clean(qdev, budget); in ql_poll()
2187 spin_lock_irqsave(&qdev->hw_lock, flags); in ql_poll()
2188 ql_update_small_bufq_prod_index(qdev); in ql_poll()
2189 ql_update_lrg_bufq_prod_index(qdev); in ql_poll()
2190 writel(qdev->rsp_consumer_index, in ql_poll()
2192 spin_unlock_irqrestore(&qdev->hw_lock, flags); in ql_poll()
2194 ql_enable_interrupts(qdev); in ql_poll()
2203 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_isr() local
2205 qdev->mem_map_registers; in ql3xxx_isr()
2210 value = ql_read_common_reg_l(qdev, in ql3xxx_isr()
2214 spin_lock(&qdev->adapter_lock); in ql3xxx_isr()
2215 netif_stop_queue(qdev->ndev); in ql3xxx_isr()
2216 netif_carrier_off(qdev->ndev); in ql3xxx_isr()
2217 ql_disable_interrupts(qdev); in ql3xxx_isr()
2218 qdev->port_link_state = LS_DOWN; in ql3xxx_isr()
2219 set_bit(QL_RESET_ACTIVE, &qdev->flags) ; in ql3xxx_isr()
2226 ql_read_page0_reg_l(qdev, in ql3xxx_isr()
2231 set_bit(QL_RESET_START, &qdev->flags) ; in ql3xxx_isr()
2236 set_bit(QL_RESET_PER_SCSI, &qdev->flags) ; in ql3xxx_isr()
2241 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); in ql3xxx_isr()
2242 spin_unlock(&qdev->adapter_lock); in ql3xxx_isr()
2244 ql_disable_interrupts(qdev); in ql3xxx_isr()
2245 if (likely(napi_schedule_prep(&qdev->napi))) in ql3xxx_isr()
2246 __napi_schedule(&qdev->napi); in ql3xxx_isr()
2260 static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags) in ql_get_seg_count() argument
2262 if (qdev->device_id == QL3022_DEVICE_ID) in ql_get_seg_count()
2300 static int ql_send_map(struct ql3_adapter *qdev, in ql_send_map() argument
2318 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE); in ql_send_map()
2320 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_send_map()
2322 netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n", in ql_send_map()
2356 map = dma_map_single(&qdev->pdev->dev, oal, in ql_send_map()
2360 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_send_map()
2362 netdev_err(qdev->ndev, in ql_send_map()
2380 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), in ql_send_map()
2383 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_send_map()
2385 netdev_err(qdev->ndev, in ql_send_map()
2422 dma_unmap_single(&qdev->pdev->dev, in ql_send_map()
2430 dma_unmap_page(&qdev->pdev->dev, in ql_send_map()
2436 dma_unmap_single(&qdev->pdev->dev, in ql_send_map()
2459 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_send() local
2461 qdev->mem_map_registers; in ql3xxx_send()
2466 if (unlikely(atomic_read(&qdev->tx_count) < 2)) in ql3xxx_send()
2469 tx_cb = &qdev->tx_buf[qdev->req_producer_index]; in ql3xxx_send()
2470 tx_cb->seg_count = ql_get_seg_count(qdev, in ql3xxx_send()
2480 mac_iocb_ptr->opcode = qdev->mac_ob_opcode; in ql3xxx_send()
2482 mac_iocb_ptr->flags |= qdev->mb_bit_mask; in ql3xxx_send()
2483 mac_iocb_ptr->transaction_id = qdev->req_producer_index; in ql3xxx_send()
2486 if (qdev->device_id == QL3032_DEVICE_ID && in ql3xxx_send()
2490 if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) { in ql3xxx_send()
2496 qdev->req_producer_index++; in ql3xxx_send()
2497 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) in ql3xxx_send()
2498 qdev->req_producer_index = 0; in ql3xxx_send()
2500 ql_write_common_reg_l(qdev, in ql3xxx_send()
2502 qdev->req_producer_index); in ql3xxx_send()
2504 netif_printk(qdev, tx_queued, KERN_DEBUG, ndev, in ql3xxx_send()
2506 qdev->req_producer_index, skb->len); in ql3xxx_send()
2508 atomic_dec(&qdev->tx_count); in ql3xxx_send()
2512 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev) in ql_alloc_net_req_rsp_queues() argument
2514 qdev->req_q_size = in ql_alloc_net_req_rsp_queues()
2517 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb); in ql_alloc_net_req_rsp_queues()
2524 qdev->req_q_virt_addr = in ql_alloc_net_req_rsp_queues()
2525 dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size, in ql_alloc_net_req_rsp_queues()
2526 &qdev->req_q_phy_addr, GFP_KERNEL); in ql_alloc_net_req_rsp_queues()
2528 if ((qdev->req_q_virt_addr == NULL) || in ql_alloc_net_req_rsp_queues()
2529 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) { in ql_alloc_net_req_rsp_queues()
2530 netdev_err(qdev->ndev, "reqQ failed\n"); in ql_alloc_net_req_rsp_queues()
2534 qdev->rsp_q_virt_addr = in ql_alloc_net_req_rsp_queues()
2535 dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size, in ql_alloc_net_req_rsp_queues()
2536 &qdev->rsp_q_phy_addr, GFP_KERNEL); in ql_alloc_net_req_rsp_queues()
2538 if ((qdev->rsp_q_virt_addr == NULL) || in ql_alloc_net_req_rsp_queues()
2539 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) { in ql_alloc_net_req_rsp_queues()
2540 netdev_err(qdev->ndev, "rspQ allocation failed\n"); in ql_alloc_net_req_rsp_queues()
2541 dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size, in ql_alloc_net_req_rsp_queues()
2542 qdev->req_q_virt_addr, qdev->req_q_phy_addr); in ql_alloc_net_req_rsp_queues()
2546 set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); in ql_alloc_net_req_rsp_queues()
2551 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev) in ql_free_net_req_rsp_queues() argument
2553 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) { in ql_free_net_req_rsp_queues()
2554 netdev_info(qdev->ndev, "Already done\n"); in ql_free_net_req_rsp_queues()
2558 dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size, in ql_free_net_req_rsp_queues()
2559 qdev->req_q_virt_addr, qdev->req_q_phy_addr); in ql_free_net_req_rsp_queues()
2561 qdev->req_q_virt_addr = NULL; in ql_free_net_req_rsp_queues()
2563 dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size, in ql_free_net_req_rsp_queues()
2564 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr); in ql_free_net_req_rsp_queues()
2566 qdev->rsp_q_virt_addr = NULL; in ql_free_net_req_rsp_queues()
2568 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags); in ql_free_net_req_rsp_queues()
2571 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev) in ql_alloc_buffer_queues() argument
2574 qdev->lrg_buf_q_size = in ql_alloc_buffer_queues()
2575 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry); in ql_alloc_buffer_queues()
2576 if (qdev->lrg_buf_q_size < PAGE_SIZE) in ql_alloc_buffer_queues()
2577 qdev->lrg_buf_q_alloc_size = PAGE_SIZE; in ql_alloc_buffer_queues()
2579 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2; in ql_alloc_buffer_queues()
2581 qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers, in ql_alloc_buffer_queues()
2584 if (qdev->lrg_buf == NULL) in ql_alloc_buffer_queues()
2587 qdev->lrg_buf_q_alloc_virt_addr = in ql_alloc_buffer_queues()
2588 dma_alloc_coherent(&qdev->pdev->dev, in ql_alloc_buffer_queues()
2589 qdev->lrg_buf_q_alloc_size, in ql_alloc_buffer_queues()
2590 &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL); in ql_alloc_buffer_queues()
2592 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) { in ql_alloc_buffer_queues()
2593 netdev_err(qdev->ndev, "lBufQ failed\n"); in ql_alloc_buffer_queues()
2594 kfree(qdev->lrg_buf); in ql_alloc_buffer_queues()
2597 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr; in ql_alloc_buffer_queues()
2598 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr; in ql_alloc_buffer_queues()
2601 qdev->small_buf_q_size = in ql_alloc_buffer_queues()
2603 if (qdev->small_buf_q_size < PAGE_SIZE) in ql_alloc_buffer_queues()
2604 qdev->small_buf_q_alloc_size = PAGE_SIZE; in ql_alloc_buffer_queues()
2606 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2; in ql_alloc_buffer_queues()
2608 qdev->small_buf_q_alloc_virt_addr = in ql_alloc_buffer_queues()
2609 dma_alloc_coherent(&qdev->pdev->dev, in ql_alloc_buffer_queues()
2610 qdev->small_buf_q_alloc_size, in ql_alloc_buffer_queues()
2611 &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL); in ql_alloc_buffer_queues()
2613 if (qdev->small_buf_q_alloc_virt_addr == NULL) { in ql_alloc_buffer_queues()
2614 netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n"); in ql_alloc_buffer_queues()
2615 dma_free_coherent(&qdev->pdev->dev, in ql_alloc_buffer_queues()
2616 qdev->lrg_buf_q_alloc_size, in ql_alloc_buffer_queues()
2617 qdev->lrg_buf_q_alloc_virt_addr, in ql_alloc_buffer_queues()
2618 qdev->lrg_buf_q_alloc_phy_addr); in ql_alloc_buffer_queues()
2619 kfree(qdev->lrg_buf); in ql_alloc_buffer_queues()
2623 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr; in ql_alloc_buffer_queues()
2624 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr; in ql_alloc_buffer_queues()
2625 set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); in ql_alloc_buffer_queues()
2629 static void ql_free_buffer_queues(struct ql3_adapter *qdev) in ql_free_buffer_queues() argument
2631 if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) { in ql_free_buffer_queues()
2632 netdev_info(qdev->ndev, "Already done\n"); in ql_free_buffer_queues()
2635 kfree(qdev->lrg_buf); in ql_free_buffer_queues()
2636 dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size, in ql_free_buffer_queues()
2637 qdev->lrg_buf_q_alloc_virt_addr, in ql_free_buffer_queues()
2638 qdev->lrg_buf_q_alloc_phy_addr); in ql_free_buffer_queues()
2640 qdev->lrg_buf_q_virt_addr = NULL; in ql_free_buffer_queues()
2642 dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size, in ql_free_buffer_queues()
2643 qdev->small_buf_q_alloc_virt_addr, in ql_free_buffer_queues()
2644 qdev->small_buf_q_alloc_phy_addr); in ql_free_buffer_queues()
2646 qdev->small_buf_q_virt_addr = NULL; in ql_free_buffer_queues()
2648 clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags); in ql_free_buffer_queues()
2651 static int ql_alloc_small_buffers(struct ql3_adapter *qdev) in ql_alloc_small_buffers() argument
2657 qdev->small_buf_total_size = in ql_alloc_small_buffers()
2661 qdev->small_buf_virt_addr = in ql_alloc_small_buffers()
2662 dma_alloc_coherent(&qdev->pdev->dev, in ql_alloc_small_buffers()
2663 qdev->small_buf_total_size, in ql_alloc_small_buffers()
2664 &qdev->small_buf_phy_addr, GFP_KERNEL); in ql_alloc_small_buffers()
2666 if (qdev->small_buf_virt_addr == NULL) { in ql_alloc_small_buffers()
2667 netdev_err(qdev->ndev, "Failed to get small buffer memory\n"); in ql_alloc_small_buffers()
2671 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr); in ql_alloc_small_buffers()
2672 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr); in ql_alloc_small_buffers()
2674 small_buf_q_entry = qdev->small_buf_q_virt_addr; in ql_alloc_small_buffers()
2679 cpu_to_le32(qdev->small_buf_phy_addr_high); in ql_alloc_small_buffers()
2681 cpu_to_le32(qdev->small_buf_phy_addr_low + in ql_alloc_small_buffers()
2685 qdev->small_buf_index = 0; in ql_alloc_small_buffers()
2686 set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags); in ql_alloc_small_buffers()
2690 static void ql_free_small_buffers(struct ql3_adapter *qdev) in ql_free_small_buffers() argument
2692 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) { in ql_free_small_buffers()
2693 netdev_info(qdev->ndev, "Already done\n"); in ql_free_small_buffers()
2696 if (qdev->small_buf_virt_addr != NULL) { in ql_free_small_buffers()
2697 dma_free_coherent(&qdev->pdev->dev, in ql_free_small_buffers()
2698 qdev->small_buf_total_size, in ql_free_small_buffers()
2699 qdev->small_buf_virt_addr, in ql_free_small_buffers()
2700 qdev->small_buf_phy_addr); in ql_free_small_buffers()
2702 qdev->small_buf_virt_addr = NULL; in ql_free_small_buffers()
2706 static void ql_free_large_buffers(struct ql3_adapter *qdev) in ql_free_large_buffers() argument
2711 for (i = 0; i < qdev->num_large_buffers; i++) { in ql_free_large_buffers()
2712 lrg_buf_cb = &qdev->lrg_buf[i]; in ql_free_large_buffers()
2715 dma_unmap_single(&qdev->pdev->dev, in ql_free_large_buffers()
2726 static void ql_init_large_buffers(struct ql3_adapter *qdev) in ql_init_large_buffers() argument
2730 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr; in ql_init_large_buffers()
2732 for (i = 0; i < qdev->num_large_buffers; i++) { in ql_init_large_buffers()
2733 lrg_buf_cb = &qdev->lrg_buf[i]; in ql_init_large_buffers()
2738 qdev->lrg_buf_index = 0; in ql_init_large_buffers()
2739 qdev->lrg_buf_skb_check = 0; in ql_init_large_buffers()
2742 static int ql_alloc_large_buffers(struct ql3_adapter *qdev) in ql_alloc_large_buffers() argument
2750 for (i = 0; i < qdev->num_large_buffers; i++) { in ql_alloc_large_buffers()
2751 lrg_buf_cb = &qdev->lrg_buf[i]; in ql_alloc_large_buffers()
2754 skb = netdev_alloc_skb(qdev->ndev, in ql_alloc_large_buffers()
2755 qdev->lrg_buffer_len); in ql_alloc_large_buffers()
2758 netdev_err(qdev->ndev, in ql_alloc_large_buffers()
2760 qdev->lrg_buffer_len * 2, i); in ql_alloc_large_buffers()
2761 ql_free_large_buffers(qdev); in ql_alloc_large_buffers()
2770 map = dma_map_single(&qdev->pdev->dev, skb->data, in ql_alloc_large_buffers()
2771 qdev->lrg_buffer_len - QL_HEADER_SPACE, in ql_alloc_large_buffers()
2774 err = dma_mapping_error(&qdev->pdev->dev, map); in ql_alloc_large_buffers()
2776 netdev_err(qdev->ndev, in ql_alloc_large_buffers()
2780 ql_free_large_buffers(qdev); in ql_alloc_large_buffers()
2787 qdev->lrg_buffer_len - in ql_alloc_large_buffers()
2798 static void ql_free_send_free_list(struct ql3_adapter *qdev) in ql_free_send_free_list() argument
2803 tx_cb = &qdev->tx_buf[0]; in ql_free_send_free_list()
2811 static int ql_create_send_free_list(struct ql3_adapter *qdev) in ql_create_send_free_list() argument
2815 struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr; in ql_create_send_free_list()
2820 tx_cb = &qdev->tx_buf[i]; in ql_create_send_free_list()
2831 static int ql_alloc_mem_resources(struct ql3_adapter *qdev) in ql_alloc_mem_resources() argument
2833 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) { in ql_alloc_mem_resources()
2834 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES; in ql_alloc_mem_resources()
2835 qdev->lrg_buffer_len = NORMAL_MTU_SIZE; in ql_alloc_mem_resources()
2836 } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) { in ql_alloc_mem_resources()
2840 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES; in ql_alloc_mem_resources()
2841 qdev->lrg_buffer_len = JUMBO_MTU_SIZE; in ql_alloc_mem_resources()
2843 netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n", in ql_alloc_mem_resources()
2844 qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE); in ql_alloc_mem_resources()
2847 qdev->num_large_buffers = in ql_alloc_mem_resources()
2848 qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY; in ql_alloc_mem_resources()
2849 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE; in ql_alloc_mem_resources()
2850 qdev->max_frame_size = in ql_alloc_mem_resources()
2851 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE; in ql_alloc_mem_resources()
2858 qdev->shadow_reg_virt_addr = in ql_alloc_mem_resources()
2859 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE, in ql_alloc_mem_resources()
2860 &qdev->shadow_reg_phy_addr, GFP_KERNEL); in ql_alloc_mem_resources()
2862 if (qdev->shadow_reg_virt_addr != NULL) { in ql_alloc_mem_resources()
2863 qdev->preq_consumer_index = qdev->shadow_reg_virt_addr; in ql_alloc_mem_resources()
2864 qdev->req_consumer_index_phy_addr_high = in ql_alloc_mem_resources()
2865 MS_64BITS(qdev->shadow_reg_phy_addr); in ql_alloc_mem_resources()
2866 qdev->req_consumer_index_phy_addr_low = in ql_alloc_mem_resources()
2867 LS_64BITS(qdev->shadow_reg_phy_addr); in ql_alloc_mem_resources()
2869 qdev->prsp_producer_index = in ql_alloc_mem_resources()
2870 (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8); in ql_alloc_mem_resources()
2871 qdev->rsp_producer_index_phy_addr_high = in ql_alloc_mem_resources()
2872 qdev->req_consumer_index_phy_addr_high; in ql_alloc_mem_resources()
2873 qdev->rsp_producer_index_phy_addr_low = in ql_alloc_mem_resources()
2874 qdev->req_consumer_index_phy_addr_low + 8; in ql_alloc_mem_resources()
2876 netdev_err(qdev->ndev, "shadowReg Alloc failed\n"); in ql_alloc_mem_resources()
2880 if (ql_alloc_net_req_rsp_queues(qdev) != 0) { in ql_alloc_mem_resources()
2881 netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n"); in ql_alloc_mem_resources()
2885 if (ql_alloc_buffer_queues(qdev) != 0) { in ql_alloc_mem_resources()
2886 netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n"); in ql_alloc_mem_resources()
2890 if (ql_alloc_small_buffers(qdev) != 0) { in ql_alloc_mem_resources()
2891 netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n"); in ql_alloc_mem_resources()
2895 if (ql_alloc_large_buffers(qdev) != 0) { in ql_alloc_mem_resources()
2896 netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n"); in ql_alloc_mem_resources()
2901 ql_init_large_buffers(qdev); in ql_alloc_mem_resources()
2902 if (ql_create_send_free_list(qdev)) in ql_alloc_mem_resources()
2905 qdev->rsp_current = qdev->rsp_q_virt_addr; in ql_alloc_mem_resources()
2909 ql_free_send_free_list(qdev); in ql_alloc_mem_resources()
2911 ql_free_buffer_queues(qdev); in ql_alloc_mem_resources()
2913 ql_free_net_req_rsp_queues(qdev); in ql_alloc_mem_resources()
2915 dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE, in ql_alloc_mem_resources()
2916 qdev->shadow_reg_virt_addr, in ql_alloc_mem_resources()
2917 qdev->shadow_reg_phy_addr); in ql_alloc_mem_resources()
2922 static void ql_free_mem_resources(struct ql3_adapter *qdev) in ql_free_mem_resources() argument
2924 ql_free_send_free_list(qdev); in ql_free_mem_resources()
2925 ql_free_large_buffers(qdev); in ql_free_mem_resources()
2926 ql_free_small_buffers(qdev); in ql_free_mem_resources()
2927 ql_free_buffer_queues(qdev); in ql_free_mem_resources()
2928 ql_free_net_req_rsp_queues(qdev); in ql_free_mem_resources()
2929 if (qdev->shadow_reg_virt_addr != NULL) { in ql_free_mem_resources()
2930 dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE, in ql_free_mem_resources()
2931 qdev->shadow_reg_virt_addr, in ql_free_mem_resources()
2932 qdev->shadow_reg_phy_addr); in ql_free_mem_resources()
2933 qdev->shadow_reg_virt_addr = NULL; in ql_free_mem_resources()
2937 static int ql_init_misc_registers(struct ql3_adapter *qdev) in ql_init_misc_registers() argument
2940 (void __iomem *)qdev->mem_map_registers; in ql_init_misc_registers()
2942 if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK, in ql_init_misc_registers()
2943 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * in ql_init_misc_registers()
2947 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2948 &local_ram->bufletSize, qdev->nvram_data.bufletSize); in ql_init_misc_registers()
2950 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2952 qdev->nvram_data.bufletCount); in ql_init_misc_registers()
2954 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2956 (qdev->nvram_data.tcpWindowThreshold25 << 16) | in ql_init_misc_registers()
2957 (qdev->nvram_data.tcpWindowThreshold0)); in ql_init_misc_registers()
2959 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2961 qdev->nvram_data.tcpWindowThreshold50); in ql_init_misc_registers()
2963 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2965 (qdev->nvram_data.ipHashTableBaseHi << 16) | in ql_init_misc_registers()
2966 qdev->nvram_data.ipHashTableBaseLo); in ql_init_misc_registers()
2967 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2969 qdev->nvram_data.ipHashTableSize); in ql_init_misc_registers()
2970 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2972 (qdev->nvram_data.tcpHashTableBaseHi << 16) | in ql_init_misc_registers()
2973 qdev->nvram_data.tcpHashTableBaseLo); in ql_init_misc_registers()
2974 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2976 qdev->nvram_data.tcpHashTableSize); in ql_init_misc_registers()
2977 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2979 (qdev->nvram_data.ncbTableBaseHi << 16) | in ql_init_misc_registers()
2980 qdev->nvram_data.ncbTableBaseLo); in ql_init_misc_registers()
2981 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2983 qdev->nvram_data.ncbTableSize); in ql_init_misc_registers()
2984 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2986 (qdev->nvram_data.drbTableBaseHi << 16) | in ql_init_misc_registers()
2987 qdev->nvram_data.drbTableBaseLo); in ql_init_misc_registers()
2988 ql_write_page2_reg(qdev, in ql_init_misc_registers()
2990 qdev->nvram_data.drbTableSize); in ql_init_misc_registers()
2991 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK); in ql_init_misc_registers()
2995 static int ql_adapter_initialize(struct ql3_adapter *qdev) in ql_adapter_initialize() argument
2999 qdev->mem_map_registers; in ql_adapter_initialize()
3006 if (ql_mii_setup(qdev)) in ql_adapter_initialize()
3010 ql_write_common_reg(qdev, spir, in ql_adapter_initialize()
3015 qdev->port_link_state = LS_DOWN; in ql_adapter_initialize()
3016 netif_carrier_off(qdev->ndev); in ql_adapter_initialize()
3019 ql_write_common_reg(qdev, spir, in ql_adapter_initialize()
3024 *((u32 *)(qdev->preq_consumer_index)) = 0; in ql_adapter_initialize()
3025 atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES); in ql_adapter_initialize()
3026 qdev->req_producer_index = 0; in ql_adapter_initialize()
3028 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3030 qdev->req_consumer_index_phy_addr_high); in ql_adapter_initialize()
3031 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3033 qdev->req_consumer_index_phy_addr_low); in ql_adapter_initialize()
3035 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3037 MS_64BITS(qdev->req_q_phy_addr)); in ql_adapter_initialize()
3038 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3040 LS_64BITS(qdev->req_q_phy_addr)); in ql_adapter_initialize()
3041 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES); in ql_adapter_initialize()
3044 *((__le16 *) (qdev->prsp_producer_index)) = 0; in ql_adapter_initialize()
3045 qdev->rsp_consumer_index = 0; in ql_adapter_initialize()
3046 qdev->rsp_current = qdev->rsp_q_virt_addr; in ql_adapter_initialize()
3048 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3050 qdev->rsp_producer_index_phy_addr_high); in ql_adapter_initialize()
3052 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3054 qdev->rsp_producer_index_phy_addr_low); in ql_adapter_initialize()
3056 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3058 MS_64BITS(qdev->rsp_q_phy_addr)); in ql_adapter_initialize()
3060 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3062 LS_64BITS(qdev->rsp_q_phy_addr)); in ql_adapter_initialize()
3064 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES); in ql_adapter_initialize()
3067 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3069 MS_64BITS(qdev->lrg_buf_q_phy_addr)); in ql_adapter_initialize()
3071 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3073 LS_64BITS(qdev->lrg_buf_q_phy_addr)); in ql_adapter_initialize()
3075 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3077 qdev->num_lbufq_entries); in ql_adapter_initialize()
3079 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3081 qdev->lrg_buffer_len); in ql_adapter_initialize()
3084 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3086 MS_64BITS(qdev->small_buf_q_phy_addr)); in ql_adapter_initialize()
3088 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3090 LS_64BITS(qdev->small_buf_q_phy_addr)); in ql_adapter_initialize()
3092 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES); in ql_adapter_initialize()
3093 ql_write_page1_reg(qdev, in ql_adapter_initialize()
3097 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1; in ql_adapter_initialize()
3098 qdev->small_buf_release_cnt = 8; in ql_adapter_initialize()
3099 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1; in ql_adapter_initialize()
3100 qdev->lrg_buf_release_cnt = 8; in ql_adapter_initialize()
3101 qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr; in ql_adapter_initialize()
3102 qdev->small_buf_index = 0; in ql_adapter_initialize()
3103 qdev->lrg_buf_index = 0; in ql_adapter_initialize()
3104 qdev->lrg_buf_free_count = 0; in ql_adapter_initialize()
3105 qdev->lrg_buf_free_head = NULL; in ql_adapter_initialize()
3106 qdev->lrg_buf_free_tail = NULL; in ql_adapter_initialize()
3108 ql_write_common_reg(qdev, in ql_adapter_initialize()
3111 qdev->small_buf_q_producer_index); in ql_adapter_initialize()
3112 ql_write_common_reg(qdev, in ql_adapter_initialize()
3115 qdev->lrg_buf_q_producer_index); in ql_adapter_initialize()
3121 clear_bit(QL_LINK_MASTER, &qdev->flags); in ql_adapter_initialize()
3122 value = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_adapter_initialize()
3126 if (ql_init_misc_registers(qdev)) { in ql_adapter_initialize()
3131 value = qdev->nvram_data.tcpMaxWindowSize; in ql_adapter_initialize()
3132 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value); in ql_adapter_initialize()
3134 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig; in ql_adapter_initialize()
3136 if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK, in ql_adapter_initialize()
3137 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) in ql_adapter_initialize()
3142 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value); in ql_adapter_initialize()
3143 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig, in ql_adapter_initialize()
3147 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK); in ql_adapter_initialize()
3150 if (qdev->mac_index) in ql_adapter_initialize()
3151 ql_write_page0_reg(qdev, in ql_adapter_initialize()
3153 qdev->max_frame_size); in ql_adapter_initialize()
3155 ql_write_page0_reg(qdev, in ql_adapter_initialize()
3157 qdev->max_frame_size); in ql_adapter_initialize()
3159 if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK, in ql_adapter_initialize()
3160 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) * in ql_adapter_initialize()
3166 PHY_Setup(qdev); in ql_adapter_initialize()
3167 ql_init_scan_mode(qdev); in ql_adapter_initialize()
3168 ql_get_phy_owner(qdev); in ql_adapter_initialize()
3173 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, in ql_adapter_initialize()
3175 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, in ql_adapter_initialize()
3176 ((qdev->ndev->dev_addr[2] << 24) in ql_adapter_initialize()
3177 | (qdev->ndev->dev_addr[3] << 16) in ql_adapter_initialize()
3178 | (qdev->ndev->dev_addr[4] << 8) in ql_adapter_initialize()
3179 | qdev->ndev->dev_addr[5])); in ql_adapter_initialize()
3182 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, in ql_adapter_initialize()
3184 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, in ql_adapter_initialize()
3185 ((qdev->ndev->dev_addr[0] << 8) in ql_adapter_initialize()
3186 | qdev->ndev->dev_addr[1])); in ql_adapter_initialize()
3189 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, in ql_adapter_initialize()
3194 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, in ql_adapter_initialize()
3196 (qdev->mac_index << 2))); in ql_adapter_initialize()
3197 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); in ql_adapter_initialize()
3199 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg, in ql_adapter_initialize()
3201 ((qdev->mac_index << 2) + 1))); in ql_adapter_initialize()
3202 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0); in ql_adapter_initialize()
3204 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK); in ql_adapter_initialize()
3207 ql_write_page0_reg(qdev, in ql_adapter_initialize()
3212 value = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_adapter_initialize()
3215 spin_unlock_irq(&qdev->hw_lock); in ql_adapter_initialize()
3217 spin_lock_irq(&qdev->hw_lock); in ql_adapter_initialize()
3221 netdev_err(qdev->ndev, "Hw Initialization timeout\n"); in ql_adapter_initialize()
3227 if (qdev->device_id == QL3032_DEVICE_ID) { in ql_adapter_initialize()
3232 ql_write_page0_reg(qdev, &port_regs->functionControl, in ql_adapter_initialize()
3238 ql_write_page0_reg(qdev, &port_regs->portControl, in ql_adapter_initialize()
3250 static int ql_adapter_reset(struct ql3_adapter *qdev) in ql_adapter_reset() argument
3253 qdev->mem_map_registers; in ql_adapter_reset()
3258 set_bit(QL_RESET_ACTIVE, &qdev->flags); in ql_adapter_reset()
3259 clear_bit(QL_RESET_DONE, &qdev->flags); in ql_adapter_reset()
3264 netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n"); in ql_adapter_reset()
3265 ql_write_common_reg(qdev, in ql_adapter_reset()
3270 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_adapter_reset()
3277 ql_read_common_reg(qdev, in ql_adapter_reset()
3290 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus); in ql_adapter_reset()
3292 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_adapter_reset()
3294 ql_write_common_reg(qdev, in ql_adapter_reset()
3302 ql_write_common_reg(qdev, in ql_adapter_reset()
3313 value = ql_read_common_reg(qdev, in ql_adapter_reset()
3324 clear_bit(QL_RESET_ACTIVE, &qdev->flags); in ql_adapter_reset()
3325 set_bit(QL_RESET_DONE, &qdev->flags); in ql_adapter_reset()
3329 static void ql_set_mac_info(struct ql3_adapter *qdev) in ql_set_mac_info() argument
3332 qdev->mem_map_registers; in ql_set_mac_info()
3338 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus); in ql_set_mac_info()
3340 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus); in ql_set_mac_info()
3343 qdev->mac_index = 0; in ql_set_mac_info()
3344 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; in ql_set_mac_info()
3345 qdev->mb_bit_mask = FN0_MA_BITS_MASK; in ql_set_mac_info()
3346 qdev->PHYAddr = PORT0_PHY_ADDRESS; in ql_set_mac_info()
3348 set_bit(QL_LINK_OPTICAL, &qdev->flags); in ql_set_mac_info()
3350 clear_bit(QL_LINK_OPTICAL, &qdev->flags); in ql_set_mac_info()
3354 qdev->mac_index = 1; in ql_set_mac_info()
3355 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; in ql_set_mac_info()
3356 qdev->mb_bit_mask = FN1_MA_BITS_MASK; in ql_set_mac_info()
3357 qdev->PHYAddr = PORT1_PHY_ADDRESS; in ql_set_mac_info()
3359 set_bit(QL_LINK_OPTICAL, &qdev->flags); in ql_set_mac_info()
3361 clear_bit(QL_LINK_OPTICAL, &qdev->flags); in ql_set_mac_info()
3367 netdev_printk(KERN_DEBUG, qdev->ndev, in ql_set_mac_info()
3372 qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8; in ql_set_mac_info()
3377 struct ql3_adapter *qdev = netdev_priv(ndev); in ql_display_dev_info() local
3378 struct pci_dev *pdev = qdev->pdev; in ql_display_dev_info()
3382 DRV_NAME, qdev->index, qdev->chip_rev_id, in ql_display_dev_info()
3383 qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022", in ql_display_dev_info()
3384 qdev->pci_slot); in ql_display_dev_info()
3386 test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER"); in ql_display_dev_info()
3392 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"), in ql_display_dev_info()
3393 ((qdev->pci_x) ? "PCI-X" : "PCI")); in ql_display_dev_info()
3396 qdev->mem_map_registers); in ql_display_dev_info()
3399 netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr); in ql_display_dev_info()
3402 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset) in ql_adapter_down() argument
3404 struct net_device *ndev = qdev->ndev; in ql_adapter_down()
3410 clear_bit(QL_ADAPTER_UP, &qdev->flags); in ql_adapter_down()
3411 clear_bit(QL_LINK_MASTER, &qdev->flags); in ql_adapter_down()
3413 ql_disable_interrupts(qdev); in ql_adapter_down()
3415 free_irq(qdev->pdev->irq, ndev); in ql_adapter_down()
3417 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { in ql_adapter_down()
3418 netdev_info(qdev->ndev, "calling pci_disable_msi()\n"); in ql_adapter_down()
3419 clear_bit(QL_MSI_ENABLED, &qdev->flags); in ql_adapter_down()
3420 pci_disable_msi(qdev->pdev); in ql_adapter_down()
3423 del_timer_sync(&qdev->adapter_timer); in ql_adapter_down()
3425 napi_disable(&qdev->napi); in ql_adapter_down()
3431 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_adapter_down()
3432 if (ql_wait_for_drvr_lock(qdev)) { in ql_adapter_down()
3433 soft_reset = ql_adapter_reset(qdev); in ql_adapter_down()
3436 qdev->index); in ql_adapter_down()
3445 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_adapter_down()
3447 ql_free_mem_resources(qdev); in ql_adapter_down()
3451 static int ql_adapter_up(struct ql3_adapter *qdev) in ql_adapter_up() argument
3453 struct net_device *ndev = qdev->ndev; in ql_adapter_up()
3458 if (ql_alloc_mem_resources(qdev)) { in ql_adapter_up()
3463 if (qdev->msi) { in ql_adapter_up()
3464 if (pci_enable_msi(qdev->pdev)) { in ql_adapter_up()
3467 qdev->msi = 0; in ql_adapter_up()
3470 set_bit(QL_MSI_ENABLED, &qdev->flags); in ql_adapter_up()
3475 err = request_irq(qdev->pdev->irq, ql3xxx_isr, in ql_adapter_up()
3480 qdev->pdev->irq); in ql_adapter_up()
3484 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_adapter_up()
3486 if (!ql_wait_for_drvr_lock(qdev)) { in ql_adapter_up()
3492 err = ql_adapter_initialize(qdev); in ql_adapter_up()
3497 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); in ql_adapter_up()
3499 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_adapter_up()
3501 set_bit(QL_ADAPTER_UP, &qdev->flags); in ql_adapter_up()
3503 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1); in ql_adapter_up()
3505 napi_enable(&qdev->napi); in ql_adapter_up()
3506 ql_enable_interrupts(qdev); in ql_adapter_up()
3510 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK); in ql_adapter_up()
3512 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_adapter_up()
3513 free_irq(qdev->pdev->irq, ndev); in ql_adapter_up()
3515 if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) { in ql_adapter_up()
3517 clear_bit(QL_MSI_ENABLED, &qdev->flags); in ql_adapter_up()
3518 pci_disable_msi(qdev->pdev); in ql_adapter_up()
3523 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset) in ql_cycle_adapter() argument
3525 if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) { in ql_cycle_adapter()
3526 netdev_err(qdev->ndev, in ql_cycle_adapter()
3529 dev_close(qdev->ndev); in ql_cycle_adapter()
3538 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_close() local
3544 while (!test_bit(QL_ADAPTER_UP, &qdev->flags)) in ql3xxx_close()
3547 ql_adapter_down(qdev, QL_DO_RESET); in ql3xxx_close()
3553 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_open() local
3554 return ql_adapter_up(qdev); in ql3xxx_open()
3559 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_set_mac_address() local
3561 qdev->mem_map_registers; in ql3xxx_set_mac_address()
3573 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql3xxx_set_mac_address()
3575 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, in ql3xxx_set_mac_address()
3577 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, in ql3xxx_set_mac_address()
3583 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg, in ql3xxx_set_mac_address()
3585 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg, in ql3xxx_set_mac_address()
3587 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql3xxx_set_mac_address()
3594 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_tx_timeout() local
3605 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0); in ql3xxx_tx_timeout()
3610 struct ql3_adapter *qdev = in ql_reset_work() local
3612 struct net_device *ndev = qdev->ndev; in ql_reset_work()
3617 qdev->mem_map_registers; in ql_reset_work()
3620 if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) || in ql_reset_work()
3621 test_bit(QL_RESET_START, &qdev->flags)) { in ql_reset_work()
3622 clear_bit(QL_LINK_MASTER, &qdev->flags); in ql_reset_work()
3629 tx_cb = &qdev->tx_buf[i]; in ql_reset_work()
3633 dma_unmap_single(&qdev->pdev->dev, in ql_reset_work()
3638 dma_unmap_page(&qdev->pdev->dev, in ql_reset_work()
3649 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_reset_work()
3650 ql_write_common_reg(qdev, in ql_reset_work()
3659 value = ql_read_common_reg(qdev, in ql_reset_work()
3672 ql_write_common_reg(qdev, in ql_reset_work()
3680 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_reset_work()
3682 spin_lock_irqsave(&qdev->hw_lock, hw_flags); in ql_reset_work()
3684 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); in ql_reset_work()
3695 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); in ql_reset_work()
3696 clear_bit(QL_RESET_START, &qdev->flags); in ql_reset_work()
3697 ql_cycle_adapter(qdev, QL_DO_RESET); in ql_reset_work()
3701 clear_bit(QL_RESET_ACTIVE, &qdev->flags); in ql_reset_work()
3702 clear_bit(QL_RESET_PER_SCSI, &qdev->flags); in ql_reset_work()
3703 clear_bit(QL_RESET_START, &qdev->flags); in ql_reset_work()
3704 ql_cycle_adapter(qdev, QL_NO_RESET); in ql_reset_work()
3710 struct ql3_adapter *qdev = in ql_tx_timeout_work() local
3713 ql_cycle_adapter(qdev, QL_DO_RESET); in ql_tx_timeout_work()
3716 static void ql_get_board_info(struct ql3_adapter *qdev) in ql_get_board_info() argument
3719 qdev->mem_map_registers; in ql_get_board_info()
3722 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus); in ql_get_board_info()
3724 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12); in ql_get_board_info()
3726 qdev->pci_width = 64; in ql_get_board_info()
3728 qdev->pci_width = 32; in ql_get_board_info()
3730 qdev->pci_x = 1; in ql_get_board_info()
3732 qdev->pci_x = 0; in ql_get_board_info()
3733 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn); in ql_get_board_info()
3738 struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer); in ql3xxx_timer() local
3739 queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0); in ql3xxx_timer()
3755 struct ql3_adapter *qdev = NULL; in ql3xxx_probe() local
3789 qdev = netdev_priv(ndev); in ql3xxx_probe()
3790 qdev->index = cards_found; in ql3xxx_probe()
3791 qdev->ndev = ndev; in ql3xxx_probe()
3792 qdev->pdev = pdev; in ql3xxx_probe()
3793 qdev->device_id = pci_entry->device; in ql3xxx_probe()
3794 qdev->port_link_state = LS_DOWN; in ql3xxx_probe()
3796 qdev->msi = 1; in ql3xxx_probe()
3798 qdev->msg_enable = netif_msg_init(debug, default_msg); in ql3xxx_probe()
3801 if (qdev->device_id == QL3032_DEVICE_ID) in ql3xxx_probe()
3804 qdev->mem_map_registers = pci_ioremap_bar(pdev, 1); in ql3xxx_probe()
3805 if (!qdev->mem_map_registers) { in ql3xxx_probe()
3811 spin_lock_init(&qdev->adapter_lock); in ql3xxx_probe()
3812 spin_lock_init(&qdev->hw_lock); in ql3xxx_probe()
3819 netif_napi_add(ndev, &qdev->napi, ql_poll); in ql3xxx_probe()
3824 if (ql_get_nvram_params(qdev)) { in ql3xxx_probe()
3826 __func__, qdev->index); in ql3xxx_probe()
3831 ql_set_mac_info(qdev); in ql3xxx_probe()
3834 if (qdev->mac_index) { in ql3xxx_probe()
3835 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ; in ql3xxx_probe()
3836 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress); in ql3xxx_probe()
3838 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ; in ql3xxx_probe()
3839 ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress); in ql3xxx_probe()
3845 ql_get_board_info(qdev); in ql3xxx_probe()
3851 if (qdev->pci_x) in ql3xxx_probe()
3865 qdev->workqueue = create_singlethread_workqueue(ndev->name); in ql3xxx_probe()
3866 if (!qdev->workqueue) { in ql3xxx_probe()
3872 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work); in ql3xxx_probe()
3873 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); in ql3xxx_probe()
3874 INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); in ql3xxx_probe()
3876 timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0); in ql3xxx_probe()
3877 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ in ql3xxx_probe()
3890 iounmap(qdev->mem_map_registers); in ql3xxx_probe()
3904 struct ql3_adapter *qdev = netdev_priv(ndev); in ql3xxx_remove() local
3908 ql_disable_interrupts(qdev); in ql3xxx_remove()
3910 if (qdev->workqueue) { in ql3xxx_remove()
3911 cancel_delayed_work(&qdev->reset_work); in ql3xxx_remove()
3912 cancel_delayed_work(&qdev->tx_timeout_work); in ql3xxx_remove()
3913 destroy_workqueue(qdev->workqueue); in ql3xxx_remove()
3914 qdev->workqueue = NULL; in ql3xxx_remove()
3917 iounmap(qdev->mem_map_registers); in ql3xxx_remove()