1 /*- 2 * Copyright 2021 Intel Corp 3 * Copyright 2021 Rubicon Communications, LLC (Netgate) 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <sys/cdefs.h> 8 #include "igc_hw.h" 9 #include "igc_i225.h" 10 #include "igc_mac.h" 11 #include "igc_base.h" 12 13 /** 14 * igc_acquire_phy_base - Acquire rights to access PHY 15 * @hw: pointer to the HW structure 16 * 17 * Acquire access rights to the correct PHY. 18 **/ 19 s32 igc_acquire_phy_base(struct igc_hw *hw) 20 { 21 u16 mask = IGC_SWFW_PHY0_SM; 22 23 DEBUGFUNC("igc_acquire_phy_base"); 24 25 if (hw->bus.func == IGC_FUNC_1) 26 mask = IGC_SWFW_PHY1_SM; 27 28 return hw->mac.ops.acquire_swfw_sync(hw, mask); 29 } 30 31 /** 32 * igc_release_phy_base - Release rights to access PHY 33 * @hw: pointer to the HW structure 34 * 35 * A wrapper to release access rights to the correct PHY. 36 **/ 37 void igc_release_phy_base(struct igc_hw *hw) 38 { 39 u16 mask = IGC_SWFW_PHY0_SM; 40 41 DEBUGFUNC("igc_release_phy_base"); 42 43 if (hw->bus.func == IGC_FUNC_1) 44 mask = IGC_SWFW_PHY1_SM; 45 46 hw->mac.ops.release_swfw_sync(hw, mask); 47 } 48 49 /** 50 * igc_init_hw_base - Initialize hardware 51 * @hw: pointer to the HW structure 52 * 53 * This inits the hardware readying it for operation. 54 **/ 55 s32 igc_init_hw_base(struct igc_hw *hw) 56 { 57 struct igc_mac_info *mac = &hw->mac; 58 s32 ret_val; 59 u16 i, rar_count = mac->rar_entry_count; 60 61 DEBUGFUNC("igc_init_hw_base"); 62 63 /* Setup the receive address */ 64 igc_init_rx_addrs_generic(hw, rar_count); 65 66 /* Zero out the Multicast HASH table */ 67 DEBUGOUT("Zeroing the MTA\n"); 68 for (i = 0; i < mac->mta_reg_count; i++) 69 IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0); 70 71 /* Zero out the Unicast HASH table */ 72 DEBUGOUT("Zeroing the UTA\n"); 73 for (i = 0; i < mac->uta_reg_count; i++) 74 IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0); 75 76 /* Setup link and flow control */ 77 ret_val = mac->ops.setup_link(hw); 78 /* 79 * Clear all of the statistics registers (clear on read). It is 80 * important that we do this after we have tried to establish link 81 * because the symbol error count will increment wildly if there 82 * is no link. 83 */ 84 igc_clear_hw_cntrs_base_generic(hw); 85 86 return ret_val; 87 } 88 89 /** 90 * igc_power_down_phy_copper_base - Remove link during PHY power down 91 * @hw: pointer to the HW structure 92 * 93 * In the case of a PHY power down to save power, or to turn off link during a 94 * driver unload, or wake on lan is not enabled, remove the link. 95 **/ 96 void igc_power_down_phy_copper_base(struct igc_hw *hw) 97 { 98 struct igc_phy_info *phy = &hw->phy; 99 100 if (!(phy->ops.check_reset_block)) 101 return; 102 103 /* If the management interface is not enabled, then power down */ 104 if (phy->ops.check_reset_block(hw)) 105 igc_power_down_phy_copper(hw); 106 107 return; 108 } 109 110 /** 111 * igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable 112 * @hw: pointer to the HW structure 113 * 114 * After Rx enable, if manageability is enabled then there is likely some 115 * bad data at the start of the FIFO and possibly in the DMA FIFO. This 116 * function clears the FIFOs and flushes any packets that came in as Rx was 117 * being enabled. 118 **/ 119 void igc_rx_fifo_flush_base(struct igc_hw *hw) 120 { 121 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 122 int i, ms_wait; 123 124 DEBUGFUNC("igc_rx_fifo_flush_base"); 125 126 /* disable IPv6 options as per hardware errata */ 127 rfctl = IGC_READ_REG(hw, IGC_RFCTL); 128 rfctl |= IGC_RFCTL_IPV6_EX_DIS; 129 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); 130 131 if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN)) 132 return; 133 134 /* Disable all Rx queues */ 135 for (i = 0; i < 4; i++) { 136 rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i)); 137 IGC_WRITE_REG(hw, IGC_RXDCTL(i), 138 rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); 139 } 140 /* Poll all queues to verify they have shut down */ 141 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 142 msec_delay(1); 143 rx_enabled = 0; 144 for (i = 0; i < 4; i++) 145 rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i)); 146 if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) 147 break; 148 } 149 150 if (ms_wait == 10) 151 DEBUGOUT("Queue disable timed out after 10ms\n"); 152 153 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 154 * incoming packets are rejected. Set enable and wait 2ms so that 155 * any packet that was coming in as RCTL.EN was set is flushed 156 */ 157 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); 158 159 rlpml = IGC_READ_REG(hw, IGC_RLPML); 160 IGC_WRITE_REG(hw, IGC_RLPML, 0); 161 162 rctl = IGC_READ_REG(hw, IGC_RCTL); 163 temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); 164 temp_rctl |= IGC_RCTL_LPE; 165 166 IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl); 167 IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN); 168 IGC_WRITE_FLUSH(hw); 169 msec_delay(2); 170 171 /* Enable Rx queues that were previously enabled and restore our 172 * previous state 173 */ 174 for (i = 0; i < 4; i++) 175 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]); 176 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 177 IGC_WRITE_FLUSH(hw); 178 179 IGC_WRITE_REG(hw, IGC_RLPML, rlpml); 180 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); 181 182 /* Flush receive errors generated by workaround */ 183 IGC_READ_REG(hw, IGC_ROC); 184 IGC_READ_REG(hw, IGC_RNBC); 185 IGC_READ_REG(hw, IGC_MPC); 186 } 187