1 /*- 2 * Copyright 2021 Intel Corp 3 * Copyright 2021 Rubicon Communications, LLC (Netgate) 4 * SPDX-License-Identifier: BSD-3-Clause 5 */ 6 7 #include <sys/cdefs.h> 8 __FBSDID("$FreeBSD$"); 9 10 #include "igc_hw.h" 11 #include "igc_i225.h" 12 #include "igc_mac.h" 13 #include "igc_base.h" 14 15 /** 16 * igc_acquire_phy_base - Acquire rights to access PHY 17 * @hw: pointer to the HW structure 18 * 19 * Acquire access rights to the correct PHY. 20 **/ 21 s32 igc_acquire_phy_base(struct igc_hw *hw) 22 { 23 u16 mask = IGC_SWFW_PHY0_SM; 24 25 DEBUGFUNC("igc_acquire_phy_base"); 26 27 if (hw->bus.func == IGC_FUNC_1) 28 mask = IGC_SWFW_PHY1_SM; 29 30 return hw->mac.ops.acquire_swfw_sync(hw, mask); 31 } 32 33 /** 34 * igc_release_phy_base - Release rights to access PHY 35 * @hw: pointer to the HW structure 36 * 37 * A wrapper to release access rights to the correct PHY. 38 **/ 39 void igc_release_phy_base(struct igc_hw *hw) 40 { 41 u16 mask = IGC_SWFW_PHY0_SM; 42 43 DEBUGFUNC("igc_release_phy_base"); 44 45 if (hw->bus.func == IGC_FUNC_1) 46 mask = IGC_SWFW_PHY1_SM; 47 48 hw->mac.ops.release_swfw_sync(hw, mask); 49 } 50 51 /** 52 * igc_init_hw_base - Initialize hardware 53 * @hw: pointer to the HW structure 54 * 55 * This inits the hardware readying it for operation. 56 **/ 57 s32 igc_init_hw_base(struct igc_hw *hw) 58 { 59 struct igc_mac_info *mac = &hw->mac; 60 s32 ret_val; 61 u16 i, rar_count = mac->rar_entry_count; 62 63 DEBUGFUNC("igc_init_hw_base"); 64 65 /* Setup the receive address */ 66 igc_init_rx_addrs_generic(hw, rar_count); 67 68 /* Zero out the Multicast HASH table */ 69 DEBUGOUT("Zeroing the MTA\n"); 70 for (i = 0; i < mac->mta_reg_count; i++) 71 IGC_WRITE_REG_ARRAY(hw, IGC_MTA, i, 0); 72 73 /* Zero out the Unicast HASH table */ 74 DEBUGOUT("Zeroing the UTA\n"); 75 for (i = 0; i < mac->uta_reg_count; i++) 76 IGC_WRITE_REG_ARRAY(hw, IGC_UTA, i, 0); 77 78 /* Setup link and flow control */ 79 ret_val = mac->ops.setup_link(hw); 80 /* 81 * Clear all of the statistics registers (clear on read). It is 82 * important that we do this after we have tried to establish link 83 * because the symbol error count will increment wildly if there 84 * is no link. 85 */ 86 igc_clear_hw_cntrs_base_generic(hw); 87 88 return ret_val; 89 } 90 91 /** 92 * igc_power_down_phy_copper_base - Remove link during PHY power down 93 * @hw: pointer to the HW structure 94 * 95 * In the case of a PHY power down to save power, or to turn off link during a 96 * driver unload, or wake on lan is not enabled, remove the link. 97 **/ 98 void igc_power_down_phy_copper_base(struct igc_hw *hw) 99 { 100 struct igc_phy_info *phy = &hw->phy; 101 102 if (!(phy->ops.check_reset_block)) 103 return; 104 105 /* If the management interface is not enabled, then power down */ 106 if (phy->ops.check_reset_block(hw)) 107 igc_power_down_phy_copper(hw); 108 109 return; 110 } 111 112 /** 113 * igc_rx_fifo_flush_base - Clean Rx FIFO after Rx enable 114 * @hw: pointer to the HW structure 115 * 116 * After Rx enable, if manageability is enabled then there is likely some 117 * bad data at the start of the FIFO and possibly in the DMA FIFO. This 118 * function clears the FIFOs and flushes any packets that came in as Rx was 119 * being enabled. 120 **/ 121 void igc_rx_fifo_flush_base(struct igc_hw *hw) 122 { 123 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 124 int i, ms_wait; 125 126 DEBUGFUNC("igc_rx_fifo_flush_base"); 127 128 /* disable IPv6 options as per hardware errata */ 129 rfctl = IGC_READ_REG(hw, IGC_RFCTL); 130 rfctl |= IGC_RFCTL_IPV6_EX_DIS; 131 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); 132 133 if (!(IGC_READ_REG(hw, IGC_MANC) & IGC_MANC_RCV_TCO_EN)) 134 return; 135 136 /* Disable all Rx queues */ 137 for (i = 0; i < 4; i++) { 138 rxdctl[i] = IGC_READ_REG(hw, IGC_RXDCTL(i)); 139 IGC_WRITE_REG(hw, IGC_RXDCTL(i), 140 rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); 141 } 142 /* Poll all queues to verify they have shut down */ 143 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 144 msec_delay(1); 145 rx_enabled = 0; 146 for (i = 0; i < 4; i++) 147 rx_enabled |= IGC_READ_REG(hw, IGC_RXDCTL(i)); 148 if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) 149 break; 150 } 151 152 if (ms_wait == 10) 153 DEBUGOUT("Queue disable timed out after 10ms\n"); 154 155 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 156 * incoming packets are rejected. Set enable and wait 2ms so that 157 * any packet that was coming in as RCTL.EN was set is flushed 158 */ 159 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); 160 161 rlpml = IGC_READ_REG(hw, IGC_RLPML); 162 IGC_WRITE_REG(hw, IGC_RLPML, 0); 163 164 rctl = IGC_READ_REG(hw, IGC_RCTL); 165 temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); 166 temp_rctl |= IGC_RCTL_LPE; 167 168 IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl); 169 IGC_WRITE_REG(hw, IGC_RCTL, temp_rctl | IGC_RCTL_EN); 170 IGC_WRITE_FLUSH(hw); 171 msec_delay(2); 172 173 /* Enable Rx queues that were previously enabled and restore our 174 * previous state 175 */ 176 for (i = 0; i < 4; i++) 177 IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl[i]); 178 IGC_WRITE_REG(hw, IGC_RCTL, rctl); 179 IGC_WRITE_FLUSH(hw); 180 181 IGC_WRITE_REG(hw, IGC_RLPML, rlpml); 182 IGC_WRITE_REG(hw, IGC_RFCTL, rfctl); 183 184 /* Flush receive errors generated by workaround */ 185 IGC_READ_REG(hw, IGC_ROC); 186 IGC_READ_REG(hw, IGC_RNBC); 187 IGC_READ_REG(hw, IGC_MPC); 188 } 189