1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2018 Intel Corporation */ 3 4 #include <linux/delay.h> 5 6 #include "igc_hw.h" 7 #include "igc_i225.h" 8 #include "igc_mac.h" 9 #include "igc_base.h" 10 #include "igc.h" 11 12 /** 13 * igc_reset_hw_base - Reset hardware 14 * @hw: pointer to the HW structure 15 * 16 * This resets the hardware into a known state. This is a 17 * function pointer entry point called by the api module. 18 */ 19 static s32 igc_reset_hw_base(struct igc_hw *hw) 20 { 21 s32 ret_val; 22 u32 ctrl; 23 24 /* Prevent the PCI-E bus from sticking if there is no TLP connection 25 * on the last TLP read/write transaction when MAC is reset. 26 */ 27 ret_val = igc_disable_pcie_master(hw); 28 if (ret_val) 29 hw_dbg("PCI-E Master disable polling has failed\n"); 30 31 hw_dbg("Masking off all interrupts\n"); 32 wr32(IGC_IMC, 0xffffffff); 33 34 wr32(IGC_RCTL, 0); 35 wr32(IGC_TCTL, IGC_TCTL_PSP); 36 wrfl(); 37 38 usleep_range(10000, 20000); 39 40 ctrl = rd32(IGC_CTRL); 41 42 hw_dbg("Issuing a global reset to MAC\n"); 43 wr32(IGC_CTRL, ctrl | IGC_CTRL_RST); 44 45 ret_val = igc_get_auto_rd_done(hw); 46 if (ret_val) { 47 /* When auto config read does not complete, do not 48 * return with an error. This can happen in situations 49 * where there is no eeprom and prevents getting link. 50 */ 51 hw_dbg("Auto Read Done did not complete\n"); 52 } 53 54 /* Clear any pending interrupt events. */ 55 wr32(IGC_IMC, 0xffffffff); 56 rd32(IGC_ICR); 57 58 return ret_val; 59 } 60 61 /** 62 * igc_init_nvm_params_base - Init NVM func ptrs. 63 * @hw: pointer to the HW structure 64 */ 65 static s32 igc_init_nvm_params_base(struct igc_hw *hw) 66 { 67 struct igc_nvm_info *nvm = &hw->nvm; 68 u32 eecd = rd32(IGC_EECD); 69 u16 size; 70 71 /* failed to read reg and got all F's */ 72 if (!(~eecd)) 73 return -ENXIO; 74 75 size = FIELD_GET(IGC_EECD_SIZE_EX_MASK, eecd); 76 77 /* Added to a constant, "size" becomes the left-shift value 78 * for setting word_size. 79 */ 80 size += NVM_WORD_SIZE_BASE_SHIFT; 81 82 /* Just in case size is out of range, cap it to the largest 83 * EEPROM size supported 84 */ 85 if (size > 15) 86 size = 15; 87 88 nvm->type = igc_nvm_eeprom_spi; 89 nvm->word_size = BIT(size); 90 nvm->opcode_bits = 8; 91 nvm->delay_usec = 1; 92 93 nvm->page_size = eecd & IGC_EECD_ADDR_BITS ? 32 : 8; 94 nvm->address_bits = eecd & IGC_EECD_ADDR_BITS ? 95 16 : 8; 96 97 if (nvm->word_size == BIT(15)) 98 nvm->page_size = 128; 99 100 return 0; 101 } 102 103 /** 104 * igc_setup_copper_link_base - Configure copper link settings 105 * @hw: pointer to the HW structure 106 * 107 * Configures the link for auto-neg or forced speed and duplex. Then we check 108 * for link, once link is established calls to configure collision distance 109 * and flow control are called. 110 */ 111 static s32 igc_setup_copper_link_base(struct igc_hw *hw) 112 { 113 s32 ret_val = 0; 114 u32 ctrl; 115 116 ctrl = rd32(IGC_CTRL); 117 ctrl |= IGC_CTRL_SLU; 118 ctrl &= ~(IGC_CTRL_FRCSPD | IGC_CTRL_FRCDPX); 119 wr32(IGC_CTRL, ctrl); 120 121 ret_val = igc_setup_copper_link(hw); 122 123 return ret_val; 124 } 125 126 /** 127 * igc_init_mac_params_base - Init MAC func ptrs. 128 * @hw: pointer to the HW structure 129 */ 130 static s32 igc_init_mac_params_base(struct igc_hw *hw) 131 { 132 struct igc_dev_spec_base *dev_spec = &hw->dev_spec._base; 133 struct igc_mac_info *mac = &hw->mac; 134 135 /* Set mta register count */ 136 mac->mta_reg_count = 128; 137 mac->rar_entry_count = IGC_RAR_ENTRIES; 138 139 /* reset */ 140 mac->ops.reset_hw = igc_reset_hw_base; 141 142 mac->ops.acquire_swfw_sync = igc_acquire_swfw_sync_i225; 143 mac->ops.release_swfw_sync = igc_release_swfw_sync_i225; 144 145 /* Allow a single clear of the SW semaphore on I225 */ 146 if (mac->type == igc_i225) 147 dev_spec->clear_semaphore_once = true; 148 149 /* physical interface link setup */ 150 mac->ops.setup_physical_interface = igc_setup_copper_link_base; 151 152 return 0; 153 } 154 155 /** 156 * igc_init_phy_params_base - Init PHY func ptrs. 157 * @hw: pointer to the HW structure 158 */ 159 static s32 igc_init_phy_params_base(struct igc_hw *hw) 160 { 161 struct igc_phy_info *phy = &hw->phy; 162 s32 ret_val = 0; 163 164 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; 165 phy->reset_delay_us = 100; 166 167 /* set lan id */ 168 hw->bus.func = FIELD_GET(IGC_STATUS_FUNC_MASK, rd32(IGC_STATUS)); 169 170 /* Make sure the PHY is in a good state. Several people have reported 171 * firmware leaving the PHY's page select register set to something 172 * other than the default of zero, which causes the PHY ID read to 173 * access something other than the intended register. 174 */ 175 ret_val = hw->phy.ops.reset(hw); 176 if (ret_val) { 177 hw_dbg("Error resetting the PHY\n"); 178 goto out; 179 } 180 181 ret_val = igc_get_phy_id(hw); 182 if (ret_val) 183 return ret_val; 184 185 igc_check_for_copper_link(hw); 186 187 out: 188 return ret_val; 189 } 190 191 static s32 igc_get_invariants_base(struct igc_hw *hw) 192 { 193 struct igc_mac_info *mac = &hw->mac; 194 s32 ret_val = 0; 195 196 switch (hw->device_id) { 197 case IGC_DEV_ID_I225_LM: 198 case IGC_DEV_ID_I225_V: 199 case IGC_DEV_ID_I225_I: 200 case IGC_DEV_ID_I220_V: 201 case IGC_DEV_ID_I225_K: 202 case IGC_DEV_ID_I225_K2: 203 case IGC_DEV_ID_I226_K: 204 case IGC_DEV_ID_I225_LMVP: 205 case IGC_DEV_ID_I226_LMVP: 206 case IGC_DEV_ID_I225_IT: 207 case IGC_DEV_ID_I226_LM: 208 case IGC_DEV_ID_I226_V: 209 case IGC_DEV_ID_I226_IT: 210 case IGC_DEV_ID_I221_V: 211 case IGC_DEV_ID_I226_BLANK_NVM: 212 case IGC_DEV_ID_I225_BLANK_NVM: 213 mac->type = igc_i225; 214 break; 215 default: 216 return -IGC_ERR_MAC_INIT; 217 } 218 219 hw->phy.media_type = igc_media_type_copper; 220 221 /* mac initialization and operations */ 222 ret_val = igc_init_mac_params_base(hw); 223 if (ret_val) 224 goto out; 225 226 /* NVM initialization */ 227 ret_val = igc_init_nvm_params_base(hw); 228 if (ret_val) 229 goto out; 230 switch (hw->mac.type) { 231 case igc_i225: 232 ret_val = igc_init_nvm_params_i225(hw); 233 break; 234 default: 235 break; 236 } 237 238 /* setup PHY parameters */ 239 ret_val = igc_init_phy_params_base(hw); 240 if (ret_val) 241 goto out; 242 243 out: 244 return ret_val; 245 } 246 247 /** 248 * igc_acquire_phy_base - Acquire rights to access PHY 249 * @hw: pointer to the HW structure 250 * 251 * Acquire access rights to the correct PHY. This is a 252 * function pointer entry point called by the api module. 253 */ 254 static s32 igc_acquire_phy_base(struct igc_hw *hw) 255 { 256 u16 mask = IGC_SWFW_PHY0_SM; 257 258 return hw->mac.ops.acquire_swfw_sync(hw, mask); 259 } 260 261 /** 262 * igc_release_phy_base - Release rights to access PHY 263 * @hw: pointer to the HW structure 264 * 265 * A wrapper to release access rights to the correct PHY. This is a 266 * function pointer entry point called by the api module. 267 */ 268 static void igc_release_phy_base(struct igc_hw *hw) 269 { 270 u16 mask = IGC_SWFW_PHY0_SM; 271 272 hw->mac.ops.release_swfw_sync(hw, mask); 273 } 274 275 /** 276 * igc_init_hw_base - Initialize hardware 277 * @hw: pointer to the HW structure 278 * 279 * This inits the hardware readying it for operation. 280 */ 281 static s32 igc_init_hw_base(struct igc_hw *hw) 282 { 283 struct igc_mac_info *mac = &hw->mac; 284 u16 i, rar_count = mac->rar_entry_count; 285 s32 ret_val = 0; 286 287 /* Setup the receive address */ 288 igc_init_rx_addrs(hw, rar_count); 289 290 /* Zero out the Multicast HASH table */ 291 hw_dbg("Zeroing the MTA\n"); 292 for (i = 0; i < mac->mta_reg_count; i++) 293 array_wr32(IGC_MTA, i, 0); 294 295 /* Zero out the Unicast HASH table */ 296 hw_dbg("Zeroing the UTA\n"); 297 for (i = 0; i < mac->uta_reg_count; i++) 298 array_wr32(IGC_UTA, i, 0); 299 300 /* Setup link and flow control */ 301 ret_val = igc_setup_link(hw); 302 303 /* Clear all of the statistics registers (clear on read). It is 304 * important that we do this after we have tried to establish link 305 * because the symbol error count will increment wildly if there 306 * is no link. 307 */ 308 igc_clear_hw_cntrs_base(hw); 309 310 return ret_val; 311 } 312 313 /** 314 * igc_power_down_phy_copper_base - Remove link during PHY power down 315 * @hw: pointer to the HW structure 316 * 317 * In the case of a PHY power down to save power, or to turn off link during a 318 * driver unload, or wake on lan is not enabled, remove the link. 319 */ 320 void igc_power_down_phy_copper_base(struct igc_hw *hw) 321 { 322 /* If the management interface is not enabled, then power down */ 323 if (!(igc_enable_mng_pass_thru(hw) || igc_check_reset_block(hw))) 324 igc_power_down_phy_copper(hw); 325 } 326 327 /** 328 * igc_rx_fifo_flush_base - Clean rx fifo after Rx enable 329 * @hw: pointer to the HW structure 330 * 331 * After Rx enable, if manageability is enabled then there is likely some 332 * bad data at the start of the fifo and possibly in the DMA fifo. This 333 * function clears the fifos and flushes any packets that came in as rx was 334 * being enabled. 335 */ 336 void igc_rx_fifo_flush_base(struct igc_hw *hw) 337 { 338 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 339 int i, ms_wait; 340 341 /* disable IPv6 options as per hardware errata */ 342 rfctl = rd32(IGC_RFCTL); 343 rfctl |= IGC_RFCTL_IPV6_EX_DIS; 344 wr32(IGC_RFCTL, rfctl); 345 346 if (!(rd32(IGC_MANC) & IGC_MANC_RCV_TCO_EN)) 347 return; 348 349 /* Disable all Rx queues */ 350 for (i = 0; i < 4; i++) { 351 rxdctl[i] = rd32(IGC_RXDCTL(i)); 352 wr32(IGC_RXDCTL(i), 353 rxdctl[i] & ~IGC_RXDCTL_QUEUE_ENABLE); 354 } 355 /* Poll all queues to verify they have shut down */ 356 for (ms_wait = 0; ms_wait < 10; ms_wait++) { 357 usleep_range(1000, 2000); 358 rx_enabled = 0; 359 for (i = 0; i < 4; i++) 360 rx_enabled |= rd32(IGC_RXDCTL(i)); 361 if (!(rx_enabled & IGC_RXDCTL_QUEUE_ENABLE)) 362 break; 363 } 364 365 if (ms_wait == 10) 366 hw_dbg("Queue disable timed out after 10ms\n"); 367 368 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all 369 * incoming packets are rejected. Set enable and wait 2ms so that 370 * any packet that was coming in as RCTL.EN was set is flushed 371 */ 372 wr32(IGC_RFCTL, rfctl & ~IGC_RFCTL_LEF); 373 374 rlpml = rd32(IGC_RLPML); 375 wr32(IGC_RLPML, 0); 376 377 rctl = rd32(IGC_RCTL); 378 temp_rctl = rctl & ~(IGC_RCTL_EN | IGC_RCTL_SBP); 379 temp_rctl |= IGC_RCTL_LPE; 380 381 wr32(IGC_RCTL, temp_rctl); 382 wr32(IGC_RCTL, temp_rctl | IGC_RCTL_EN); 383 wrfl(); 384 usleep_range(2000, 3000); 385 386 /* Enable Rx queues that were previously enabled and restore our 387 * previous state 388 */ 389 for (i = 0; i < 4; i++) 390 wr32(IGC_RXDCTL(i), rxdctl[i]); 391 wr32(IGC_RCTL, rctl); 392 wrfl(); 393 394 wr32(IGC_RLPML, rlpml); 395 wr32(IGC_RFCTL, rfctl); 396 397 /* Flush receive errors generated by workaround */ 398 rd32(IGC_ROC); 399 rd32(IGC_RNBC); 400 rd32(IGC_MPC); 401 } 402 403 bool igc_is_device_id_i225(struct igc_hw *hw) 404 { 405 switch (hw->device_id) { 406 case IGC_DEV_ID_I225_LM: 407 case IGC_DEV_ID_I225_V: 408 case IGC_DEV_ID_I225_I: 409 case IGC_DEV_ID_I225_K: 410 case IGC_DEV_ID_I225_K2: 411 case IGC_DEV_ID_I225_LMVP: 412 case IGC_DEV_ID_I225_IT: 413 return true; 414 default: 415 return false; 416 } 417 } 418 419 bool igc_is_device_id_i226(struct igc_hw *hw) 420 { 421 switch (hw->device_id) { 422 case IGC_DEV_ID_I226_LM: 423 case IGC_DEV_ID_I226_V: 424 case IGC_DEV_ID_I226_K: 425 case IGC_DEV_ID_I226_IT: 426 return true; 427 default: 428 return false; 429 } 430 } 431 432 static struct igc_mac_operations igc_mac_ops_base = { 433 .init_hw = igc_init_hw_base, 434 .check_for_link = igc_check_for_copper_link, 435 .rar_set = igc_rar_set, 436 .read_mac_addr = igc_read_mac_addr, 437 .get_speed_and_duplex = igc_get_speed_and_duplex_copper, 438 }; 439 440 static const struct igc_phy_operations igc_phy_ops_base = { 441 .acquire = igc_acquire_phy_base, 442 .release = igc_release_phy_base, 443 .reset = igc_phy_hw_reset, 444 .read_reg = igc_read_phy_reg_gpy, 445 .write_reg = igc_write_phy_reg_gpy, 446 }; 447 448 const struct igc_info igc_base_info = { 449 .get_invariants = igc_get_invariants_base, 450 .mac_ops = &igc_mac_ops_base, 451 .phy_ops = &igc_phy_ops_base, 452 }; 453