1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 1999 - 2018 Intel Corporation. */ 3 4 #include <linux/pci.h> 5 #include <linux/delay.h> 6 #include <linux/sched.h> 7 8 #include "ixgbe.h" 9 #include "ixgbe_phy.h" 10 #include "ixgbe_mbx.h" 11 12 #define IXGBE_82599_MAX_TX_QUEUES 128 13 #define IXGBE_82599_MAX_RX_QUEUES 128 14 #define IXGBE_82599_RAR_ENTRIES 128 15 #define IXGBE_82599_MC_TBL_SIZE 128 16 #define IXGBE_82599_VFT_TBL_SIZE 128 17 #define IXGBE_82599_RX_PB_SIZE 512 18 19 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 20 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 21 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 22 static void 23 ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *, ixgbe_link_speed); 24 static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 25 ixgbe_link_speed speed, 26 bool autoneg_wait_to_complete); 27 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); 28 static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 29 bool autoneg_wait_to_complete); 30 static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 31 ixgbe_link_speed speed, 32 bool autoneg_wait_to_complete); 33 static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 34 ixgbe_link_speed speed, 35 bool autoneg_wait_to_complete); 36 static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 37 static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 38 u8 dev_addr, u8 *data); 39 static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 40 u8 dev_addr, u8 data); 41 static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); 42 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); 43 44 bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 45 { 46 u32 fwsm, manc, factps; 47 48 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); 49 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) 50 return false; 51 52 manc = IXGBE_READ_REG(hw, IXGBE_MANC); 53 if (!(manc & IXGBE_MANC_RCV_TCO_EN)) 54 return false; 55 56 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); 57 if (factps & IXGBE_FACTPS_MNGCG) 58 return false; 59 60 return true; 61 } 62 63 static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 64 { 65 struct ixgbe_mac_info *mac = &hw->mac; 66 67 /* enable the laser control functions for SFP+ fiber 68 * and MNG not enabled 69 */ 70 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 71 !ixgbe_mng_enabled(hw)) { 72 mac->ops.disable_tx_laser = 73 &ixgbe_disable_tx_laser_multispeed_fiber; 74 mac->ops.enable_tx_laser = 75 &ixgbe_enable_tx_laser_multispeed_fiber; 76 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 77 } else { 78 mac->ops.disable_tx_laser = NULL; 79 mac->ops.enable_tx_laser = NULL; 80 mac->ops.flap_tx_laser = NULL; 81 } 82 83 if (hw->phy.multispeed_fiber) { 84 /* Set up dual speed SFP+ support */ 85 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 86 mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; 87 mac->ops.set_rate_select_speed = 88 ixgbe_set_hard_rate_select_speed; 89 } else { 90 if ((mac->ops.get_media_type(hw) == 91 ixgbe_media_type_backplane) && 92 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 93 hw->phy.smart_speed == ixgbe_smart_speed_on) && 94 !ixgbe_verify_lesm_fw_enabled_82599(hw)) 95 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 96 else 97 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 98 } 99 } 100 101 static int ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 102 { 103 u16 list_offset, data_offset, data_value; 104 int ret_val; 105 106 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 107 ixgbe_init_mac_link_ops_82599(hw); 108 109 hw->phy.ops.reset = NULL; 110 111 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 112 &data_offset); 113 if (ret_val) 114 return ret_val; 115 116 /* PHY config will finish before releasing the semaphore */ 117 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 118 IXGBE_GSSR_MAC_CSR_SM); 119 if (ret_val) 120 return -EBUSY; 121 122 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 123 goto setup_sfp_err; 124 while (data_value != 0xffff) { 125 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 126 IXGBE_WRITE_FLUSH(hw); 127 if (hw->eeprom.ops.read(hw, ++data_offset, &data_value)) 128 goto setup_sfp_err; 129 } 130 131 /* Release the semaphore */ 132 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 133 /* 134 * Delay obtaining semaphore again to allow FW access, 135 * semaphore_delay is in ms usleep_range needs us. 136 */ 137 usleep_range(hw->eeprom.semaphore_delay * 1000, 138 hw->eeprom.semaphore_delay * 2000); 139 140 /* Restart DSP and set SFI mode */ 141 ret_val = hw->mac.ops.prot_autoc_write(hw, 142 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL, 143 false); 144 145 if (ret_val) { 146 hw_dbg(hw, " sfp module setup not complete\n"); 147 return -EIO; 148 } 149 } 150 151 return 0; 152 153 setup_sfp_err: 154 /* Release the semaphore */ 155 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 156 /* Delay obtaining semaphore again to allow FW access, 157 * semaphore_delay is in ms usleep_range needs us. 158 */ 159 usleep_range(hw->eeprom.semaphore_delay * 1000, 160 hw->eeprom.semaphore_delay * 2000); 161 hw_err(hw, "eeprom read at offset %d failed\n", data_offset); 162 return -EIO; 163 } 164 165 /** 166 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read 167 * @hw: pointer to hardware structure 168 * @locked: Return the if we locked for this read. 169 * @reg_val: Value we read from AUTOC 170 * 171 * For this part (82599) we need to wrap read-modify-writes with a possible 172 * FW/SW lock. It is assumed this lock will be freed with the next 173 * prot_autoc_write_82599(). Note, that locked can only be true in cases 174 * where this function doesn't return an error. 175 **/ 176 static int prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, 177 u32 *reg_val) 178 { 179 int ret_val; 180 181 *locked = false; 182 /* If LESM is on then we need to hold the SW/FW semaphore. */ 183 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 184 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 185 IXGBE_GSSR_MAC_CSR_SM); 186 if (ret_val) 187 return -EBUSY; 188 189 *locked = true; 190 } 191 192 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); 193 return 0; 194 } 195 196 /** 197 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write 198 * @hw: pointer to hardware structure 199 * @autoc: value to write to AUTOC 200 * @locked: bool to indicate whether the SW/FW lock was already taken by 201 * previous proc_autoc_read_82599. 202 * 203 * This part (82599) may need to hold a the SW/FW lock around all writes to 204 * AUTOC. Likewise after a write we need to do a pipeline reset. 205 **/ 206 static int prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) 207 { 208 int ret_val = 0; 209 210 /* Blocked by MNG FW so bail */ 211 if (ixgbe_check_reset_blocked(hw)) 212 goto out; 213 214 /* We only need to get the lock if: 215 * - We didn't do it already (in the read part of a read-modify-write) 216 * - LESM is enabled. 217 */ 218 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { 219 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 220 IXGBE_GSSR_MAC_CSR_SM); 221 if (ret_val) 222 return -EBUSY; 223 224 locked = true; 225 } 226 227 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 228 ret_val = ixgbe_reset_pipeline_82599(hw); 229 230 out: 231 /* Free the SW/FW semaphore as we either grabbed it here or 232 * already had it when this function was called. 233 */ 234 if (locked) 235 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 236 237 return ret_val; 238 } 239 240 static int ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 241 { 242 struct ixgbe_mac_info *mac = &hw->mac; 243 244 ixgbe_init_mac_link_ops_82599(hw); 245 246 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; 247 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; 248 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 249 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; 250 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 251 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 252 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 253 254 return 0; 255 } 256 257 /** 258 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 259 * @hw: pointer to hardware structure 260 * 261 * Initialize any function pointers that were not able to be 262 * set during get_invariants because the PHY/SFP type was 263 * not known. Perform the SFP init if necessary. 264 * 265 **/ 266 static int ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 267 { 268 struct ixgbe_mac_info *mac = &hw->mac; 269 struct ixgbe_phy_info *phy = &hw->phy; 270 int ret_val; 271 u32 esdp; 272 273 if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { 274 /* Store flag indicating I2C bus access control unit. */ 275 hw->phy.qsfp_shared_i2c_bus = true; 276 277 /* Initialize access to QSFP+ I2C bus */ 278 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 279 esdp |= IXGBE_ESDP_SDP0_DIR; 280 esdp &= ~IXGBE_ESDP_SDP1_DIR; 281 esdp &= ~IXGBE_ESDP_SDP0; 282 esdp &= ~IXGBE_ESDP_SDP0_NATIVE; 283 esdp &= ~IXGBE_ESDP_SDP1_NATIVE; 284 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 285 IXGBE_WRITE_FLUSH(hw); 286 287 phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; 288 phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; 289 } 290 291 /* Identify the PHY or SFP module */ 292 ret_val = phy->ops.identify(hw); 293 294 /* Setup function pointers based on detected SFP module and speeds */ 295 ixgbe_init_mac_link_ops_82599(hw); 296 297 /* If copper media, overwrite with copper function pointers */ 298 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 299 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 300 mac->ops.get_link_capabilities = 301 &ixgbe_get_copper_link_capabilities_generic; 302 } 303 304 /* Set necessary function pointers based on phy type */ 305 switch (hw->phy.type) { 306 case ixgbe_phy_tn: 307 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 308 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 309 break; 310 default: 311 break; 312 } 313 314 return ret_val; 315 } 316 317 /** 318 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 319 * @hw: pointer to hardware structure 320 * @speed: pointer to link speed 321 * @autoneg: true when autoneg or autotry is enabled 322 * 323 * Determines the link capabilities by reading the AUTOC register. 324 **/ 325 static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 326 ixgbe_link_speed *speed, 327 bool *autoneg) 328 { 329 u32 autoc = 0; 330 331 /* Determine 1G link capabilities off of SFP+ type */ 332 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 333 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || 334 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || 335 hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || 336 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || 337 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || 338 hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 || 339 hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) { 340 *speed = IXGBE_LINK_SPEED_1GB_FULL; 341 *autoneg = true; 342 return 0; 343 } 344 345 /* 346 * Determine link capabilities based on the stored value of AUTOC, 347 * which represents EEPROM defaults. If AUTOC value has not been 348 * stored, use the current register value. 349 */ 350 if (hw->mac.orig_link_settings_stored) 351 autoc = hw->mac.orig_autoc; 352 else 353 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 354 355 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 356 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 357 *speed = IXGBE_LINK_SPEED_1GB_FULL; 358 *autoneg = false; 359 break; 360 361 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 362 *speed = IXGBE_LINK_SPEED_10GB_FULL; 363 *autoneg = false; 364 break; 365 366 case IXGBE_AUTOC_LMS_1G_AN: 367 *speed = IXGBE_LINK_SPEED_1GB_FULL; 368 *autoneg = true; 369 break; 370 371 case IXGBE_AUTOC_LMS_10G_SERIAL: 372 *speed = IXGBE_LINK_SPEED_10GB_FULL; 373 *autoneg = false; 374 break; 375 376 case IXGBE_AUTOC_LMS_KX4_KX_KR: 377 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 378 *speed = IXGBE_LINK_SPEED_UNKNOWN; 379 if (autoc & IXGBE_AUTOC_KR_SUPP) 380 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 381 if (autoc & IXGBE_AUTOC_KX4_SUPP) 382 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 383 if (autoc & IXGBE_AUTOC_KX_SUPP) 384 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 385 *autoneg = true; 386 break; 387 388 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 389 *speed = IXGBE_LINK_SPEED_100_FULL; 390 if (autoc & IXGBE_AUTOC_KR_SUPP) 391 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 392 if (autoc & IXGBE_AUTOC_KX4_SUPP) 393 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 394 if (autoc & IXGBE_AUTOC_KX_SUPP) 395 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 396 *autoneg = true; 397 break; 398 399 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 400 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 401 *autoneg = false; 402 break; 403 404 default: 405 return -EIO; 406 } 407 408 if (hw->phy.multispeed_fiber) { 409 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 410 IXGBE_LINK_SPEED_1GB_FULL; 411 412 /* QSFP must not enable auto-negotiation */ 413 if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) 414 *autoneg = false; 415 else 416 *autoneg = true; 417 } 418 419 return 0; 420 } 421 422 /** 423 * ixgbe_get_media_type_82599 - Get media type 424 * @hw: pointer to hardware structure 425 * 426 * Returns the media type (fiber, copper, backplane) 427 **/ 428 static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 429 { 430 /* Detect if there is a copper PHY attached. */ 431 switch (hw->phy.type) { 432 case ixgbe_phy_cu_unknown: 433 case ixgbe_phy_tn: 434 return ixgbe_media_type_copper; 435 436 default: 437 break; 438 } 439 440 switch (hw->device_id) { 441 case IXGBE_DEV_ID_82599_KX4: 442 case IXGBE_DEV_ID_82599_KX4_MEZZ: 443 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 444 case IXGBE_DEV_ID_82599_KR: 445 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: 446 case IXGBE_DEV_ID_82599_XAUI_LOM: 447 /* Default device ID is mezzanine card KX/KX4 */ 448 return ixgbe_media_type_backplane; 449 450 case IXGBE_DEV_ID_82599_SFP: 451 case IXGBE_DEV_ID_82599_SFP_FCOE: 452 case IXGBE_DEV_ID_82599_SFP_EM: 453 case IXGBE_DEV_ID_82599_SFP_SF2: 454 case IXGBE_DEV_ID_82599_SFP_SF_QP: 455 case IXGBE_DEV_ID_82599EN_SFP: 456 return ixgbe_media_type_fiber; 457 458 case IXGBE_DEV_ID_82599_CX4: 459 return ixgbe_media_type_cx4; 460 461 case IXGBE_DEV_ID_82599_T3_LOM: 462 return ixgbe_media_type_copper; 463 464 case IXGBE_DEV_ID_82599_LS: 465 return ixgbe_media_type_fiber_lco; 466 467 case IXGBE_DEV_ID_82599_QSFP_SF_QP: 468 return ixgbe_media_type_fiber_qsfp; 469 470 default: 471 return ixgbe_media_type_unknown; 472 } 473 } 474 475 /** 476 * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 477 * @hw: pointer to hardware structure 478 * 479 * Disables link, should be called during D3 power down sequence. 480 * 481 **/ 482 static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) 483 { 484 u32 autoc2_reg; 485 u16 ee_ctrl_2 = 0; 486 487 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); 488 489 if (!ixgbe_mng_present(hw) && !hw->wol_enabled && 490 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { 491 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 492 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; 493 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 494 } 495 } 496 497 /** 498 * ixgbe_start_mac_link_82599 - Setup MAC link settings 499 * @hw: pointer to hardware structure 500 * @autoneg_wait_to_complete: true when waiting for completion is needed 501 * 502 * Configures link settings based on values in the ixgbe_hw struct. 503 * Restarts the link. Performs autonegotiation if needed. 504 **/ 505 static int ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 506 bool autoneg_wait_to_complete) 507 { 508 bool got_lock = false; 509 int status = 0; 510 u32 autoc_reg; 511 u32 links_reg; 512 u32 i; 513 514 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { 515 status = hw->mac.ops.acquire_swfw_sync(hw, 516 IXGBE_GSSR_MAC_CSR_SM); 517 if (status) 518 return status; 519 520 got_lock = true; 521 } 522 523 /* Restart link */ 524 ixgbe_reset_pipeline_82599(hw); 525 526 if (got_lock) 527 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 528 529 /* Only poll for autoneg to complete if specified to do so */ 530 if (autoneg_wait_to_complete) { 531 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 532 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 533 IXGBE_AUTOC_LMS_KX4_KX_KR || 534 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 535 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 536 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 537 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 538 links_reg = 0; /* Just in case Autoneg time = 0 */ 539 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 540 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 541 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 542 break; 543 msleep(100); 544 } 545 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 546 status = -EIO; 547 hw_dbg(hw, "Autoneg did not complete.\n"); 548 } 549 } 550 } 551 552 /* Add delay to filter out noises during initial link setup */ 553 msleep(50); 554 555 return status; 556 } 557 558 /** 559 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 560 * @hw: pointer to hardware structure 561 * 562 * The base drivers may require better control over SFP+ module 563 * PHY states. This includes selectively shutting down the Tx 564 * laser on the PHY, effectively halting physical link. 565 **/ 566 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 567 { 568 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 569 570 /* Blocked by MNG FW so bail */ 571 if (ixgbe_check_reset_blocked(hw)) 572 return; 573 574 /* Disable tx laser; allow 100us to go dark per spec */ 575 esdp_reg |= IXGBE_ESDP_SDP3; 576 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 577 IXGBE_WRITE_FLUSH(hw); 578 udelay(100); 579 } 580 581 /** 582 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 583 * @hw: pointer to hardware structure 584 * 585 * The base drivers may require better control over SFP+ module 586 * PHY states. This includes selectively turning on the Tx 587 * laser on the PHY, effectively starting physical link. 588 **/ 589 static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 590 { 591 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 592 593 /* Enable tx laser; allow 100ms to light up */ 594 esdp_reg &= ~IXGBE_ESDP_SDP3; 595 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 596 IXGBE_WRITE_FLUSH(hw); 597 msleep(100); 598 } 599 600 /** 601 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 602 * @hw: pointer to hardware structure 603 * 604 * When the driver changes the link speeds that it can support, 605 * it sets autotry_restart to true to indicate that we need to 606 * initiate a new autotry session with the link partner. To do 607 * so, we set the speed then disable and re-enable the tx laser, to 608 * alert the link partner that it also needs to restart autotry on its 609 * end. This is consistent with true clause 37 autoneg, which also 610 * involves a loss of signal. 611 **/ 612 static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 613 { 614 /* Blocked by MNG FW so bail */ 615 if (ixgbe_check_reset_blocked(hw)) 616 return; 617 618 if (hw->mac.autotry_restart) { 619 ixgbe_disable_tx_laser_multispeed_fiber(hw); 620 ixgbe_enable_tx_laser_multispeed_fiber(hw); 621 hw->mac.autotry_restart = false; 622 } 623 } 624 625 /** 626 * ixgbe_set_hard_rate_select_speed - Set module link speed 627 * @hw: pointer to hardware structure 628 * @speed: link speed to set 629 * 630 * Set module link speed via RS0/RS1 rate select pins. 631 */ 632 static void 633 ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) 634 { 635 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 636 637 switch (speed) { 638 case IXGBE_LINK_SPEED_10GB_FULL: 639 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 640 break; 641 case IXGBE_LINK_SPEED_1GB_FULL: 642 esdp_reg &= ~IXGBE_ESDP_SDP5; 643 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 644 break; 645 default: 646 hw_dbg(hw, "Invalid fixed module speed\n"); 647 return; 648 } 649 650 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 651 IXGBE_WRITE_FLUSH(hw); 652 } 653 654 /** 655 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 656 * @hw: pointer to hardware structure 657 * @speed: new link speed 658 * @autoneg_wait_to_complete: true when waiting for completion is needed 659 * 660 * Implements the Intel SmartSpeed algorithm. 661 **/ 662 static int ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 663 ixgbe_link_speed speed, 664 bool autoneg_wait_to_complete) 665 { 666 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 667 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 668 bool link_up = false; 669 int status = 0; 670 s32 i, j; 671 672 /* Set autoneg_advertised value based on input link speed */ 673 hw->phy.autoneg_advertised = 0; 674 675 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 676 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 677 678 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 679 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 680 681 if (speed & IXGBE_LINK_SPEED_100_FULL) 682 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 683 684 /* 685 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 686 * autoneg advertisement if link is unable to be established at the 687 * highest negotiated rate. This can sometimes happen due to integrity 688 * issues with the physical media connection. 689 */ 690 691 /* First, try to get link with full advertisement */ 692 hw->phy.smart_speed_active = false; 693 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 694 status = ixgbe_setup_mac_link_82599(hw, speed, 695 autoneg_wait_to_complete); 696 if (status != 0) 697 goto out; 698 699 /* 700 * Wait for the controller to acquire link. Per IEEE 802.3ap, 701 * Section 73.10.2, we may have to wait up to 500ms if KR is 702 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 703 * Table 9 in the AN MAS. 704 */ 705 for (i = 0; i < 5; i++) { 706 mdelay(100); 707 708 /* If we have link, just jump out */ 709 status = hw->mac.ops.check_link(hw, &link_speed, 710 &link_up, false); 711 if (status != 0) 712 goto out; 713 714 if (link_up) 715 goto out; 716 } 717 } 718 719 /* 720 * We didn't get link. If we advertised KR plus one of KX4/KX 721 * (or BX4/BX), then disable KR and try again. 722 */ 723 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 724 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 725 goto out; 726 727 /* Turn SmartSpeed on to disable KR support */ 728 hw->phy.smart_speed_active = true; 729 status = ixgbe_setup_mac_link_82599(hw, speed, 730 autoneg_wait_to_complete); 731 if (status != 0) 732 goto out; 733 734 /* 735 * Wait for the controller to acquire link. 600ms will allow for 736 * the AN link_fail_inhibit_timer as well for multiple cycles of 737 * parallel detect, both 10g and 1g. This allows for the maximum 738 * connect attempts as defined in the AN MAS table 73-7. 739 */ 740 for (i = 0; i < 6; i++) { 741 mdelay(100); 742 743 /* If we have link, just jump out */ 744 status = hw->mac.ops.check_link(hw, &link_speed, 745 &link_up, false); 746 if (status != 0) 747 goto out; 748 749 if (link_up) 750 goto out; 751 } 752 753 /* We didn't get link. Turn SmartSpeed back off. */ 754 hw->phy.smart_speed_active = false; 755 status = ixgbe_setup_mac_link_82599(hw, speed, 756 autoneg_wait_to_complete); 757 758 out: 759 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 760 hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); 761 return status; 762 } 763 764 /** 765 * ixgbe_setup_mac_link_82599 - Set MAC link speed 766 * @hw: pointer to hardware structure 767 * @speed: new link speed 768 * @autoneg_wait_to_complete: true when waiting for completion is needed 769 * 770 * Set the link speed in the AUTOC register and restarts link. 771 **/ 772 static int ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 773 ixgbe_link_speed speed, 774 bool autoneg_wait_to_complete) 775 { 776 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 777 u32 pma_pmd_10g_serial, pma_pmd_1g, link_mode, links_reg, i; 778 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 779 bool autoneg = false; 780 int status; 781 782 /* holds the value of AUTOC register at this current point in time */ 783 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 784 /* holds the cached value of AUTOC register */ 785 u32 orig_autoc = 0; 786 /* temporary variable used for comparison purposes */ 787 u32 autoc = current_autoc; 788 789 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 790 791 /* Check to see if speed passed in is supported. */ 792 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, 793 &autoneg); 794 if (status) 795 return status; 796 797 speed &= link_capabilities; 798 799 if (speed == IXGBE_LINK_SPEED_UNKNOWN) 800 return -EINVAL; 801 802 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 803 if (hw->mac.orig_link_settings_stored) 804 orig_autoc = hw->mac.orig_autoc; 805 else 806 orig_autoc = autoc; 807 808 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 809 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 810 811 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 812 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 813 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 814 /* Set KX4/KX/KR support according to speed requested */ 815 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 816 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 817 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 818 autoc |= IXGBE_AUTOC_KX4_SUPP; 819 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 820 (hw->phy.smart_speed_active == false)) 821 autoc |= IXGBE_AUTOC_KR_SUPP; 822 } 823 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 824 autoc |= IXGBE_AUTOC_KX_SUPP; 825 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 826 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 827 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 828 /* Switch from 1G SFI to 10G SFI if requested */ 829 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 830 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 831 autoc &= ~IXGBE_AUTOC_LMS_MASK; 832 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 833 } 834 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 835 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 836 /* Switch from 10G SFI to 1G SFI if requested */ 837 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 838 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 839 autoc &= ~IXGBE_AUTOC_LMS_MASK; 840 if (autoneg) 841 autoc |= IXGBE_AUTOC_LMS_1G_AN; 842 else 843 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 844 } 845 } 846 847 if (autoc != current_autoc) { 848 /* Restart link */ 849 status = hw->mac.ops.prot_autoc_write(hw, autoc, false); 850 if (status) 851 return status; 852 853 /* Only poll for autoneg to complete if specified to do so */ 854 if (autoneg_wait_to_complete) { 855 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 856 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 857 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 858 links_reg = 0; /*Just in case Autoneg time=0*/ 859 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 860 links_reg = 861 IXGBE_READ_REG(hw, IXGBE_LINKS); 862 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 863 break; 864 msleep(100); 865 } 866 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 867 status = -EIO; 868 hw_dbg(hw, "Autoneg did not complete.\n"); 869 } 870 } 871 } 872 873 /* Add delay to filter out noises during initial link setup */ 874 msleep(50); 875 } 876 877 return status; 878 } 879 880 /** 881 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 882 * @hw: pointer to hardware structure 883 * @speed: new link speed 884 * @autoneg_wait_to_complete: true if waiting is needed to complete 885 * 886 * Restarts link on PHY and MAC based on settings passed in. 887 **/ 888 static int ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 889 ixgbe_link_speed speed, 890 bool autoneg_wait_to_complete) 891 { 892 int status; 893 894 /* Setup the PHY according to input speed */ 895 status = hw->phy.ops.setup_link_speed(hw, speed, 896 autoneg_wait_to_complete); 897 /* Set up MAC */ 898 ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 899 900 return status; 901 } 902 903 /** 904 * ixgbe_reset_hw_82599 - Perform hardware reset 905 * @hw: pointer to hardware structure 906 * 907 * Resets the hardware by resetting the transmit and receive units, masks 908 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 909 * reset. 910 **/ 911 static int ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 912 { 913 ixgbe_link_speed link_speed; 914 u32 ctrl, i, autoc, autoc2; 915 bool link_up = false; 916 u32 curr_lms; 917 int status; 918 919 /* Call adapter stop to disable tx/rx and clear interrupts */ 920 status = hw->mac.ops.stop_adapter(hw); 921 if (status) 922 return status; 923 924 /* flush pending Tx transactions */ 925 ixgbe_clear_tx_pending(hw); 926 927 /* PHY ops must be identified and initialized prior to reset */ 928 929 /* Identify PHY and related function pointers */ 930 status = hw->phy.ops.init(hw); 931 932 if (status == -EOPNOTSUPP) 933 return status; 934 935 /* Setup SFP module if there is one present. */ 936 if (hw->phy.sfp_setup_needed) { 937 status = hw->mac.ops.setup_sfp(hw); 938 hw->phy.sfp_setup_needed = false; 939 } 940 941 if (status == -EOPNOTSUPP) 942 return status; 943 944 /* Reset PHY */ 945 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 946 hw->phy.ops.reset(hw); 947 948 /* remember AUTOC from before we reset */ 949 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK; 950 951 mac_reset_top: 952 /* 953 * Issue global reset to the MAC. Needs to be SW reset if link is up. 954 * If link reset is used when link is up, it might reset the PHY when 955 * mng is using it. If link is down or the flag to force full link 956 * reset is set, then perform link reset. 957 */ 958 ctrl = IXGBE_CTRL_LNK_RST; 959 if (!hw->force_full_reset) { 960 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 961 if (link_up) 962 ctrl = IXGBE_CTRL_RST; 963 } 964 965 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); 966 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 967 IXGBE_WRITE_FLUSH(hw); 968 usleep_range(1000, 1200); 969 970 /* Poll for reset bit to self-clear indicating reset is complete */ 971 for (i = 0; i < 10; i++) { 972 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 973 if (!(ctrl & IXGBE_CTRL_RST_MASK)) 974 break; 975 udelay(1); 976 } 977 978 if (ctrl & IXGBE_CTRL_RST_MASK) { 979 status = -EIO; 980 hw_dbg(hw, "Reset polling failed to complete.\n"); 981 } 982 983 msleep(50); 984 985 /* 986 * Double resets are required for recovery from certain error 987 * conditions. Between resets, it is necessary to stall to allow time 988 * for any pending HW events to complete. 989 */ 990 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 991 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 992 goto mac_reset_top; 993 } 994 995 /* 996 * Store the original AUTOC/AUTOC2 values if they have not been 997 * stored off yet. Otherwise restore the stored original 998 * values since the reset operation sets back to defaults. 999 */ 1000 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1001 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1002 1003 /* Enable link if disabled in NVM */ 1004 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 1005 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 1006 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1007 IXGBE_WRITE_FLUSH(hw); 1008 } 1009 1010 if (hw->mac.orig_link_settings_stored == false) { 1011 hw->mac.orig_autoc = autoc; 1012 hw->mac.orig_autoc2 = autoc2; 1013 hw->mac.orig_link_settings_stored = true; 1014 } else { 1015 1016 /* If MNG FW is running on a multi-speed device that 1017 * doesn't autoneg with out driver support we need to 1018 * leave LMS in the state it was before we MAC reset. 1019 * Likewise if we support WoL we don't want change the 1020 * LMS state either. 1021 */ 1022 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || 1023 hw->wol_enabled) 1024 hw->mac.orig_autoc = 1025 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | 1026 curr_lms; 1027 1028 if (autoc != hw->mac.orig_autoc) { 1029 status = hw->mac.ops.prot_autoc_write(hw, 1030 hw->mac.orig_autoc, 1031 false); 1032 if (status) 1033 return status; 1034 } 1035 1036 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1037 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1038 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1039 autoc2 |= (hw->mac.orig_autoc2 & 1040 IXGBE_AUTOC2_UPPER_MASK); 1041 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1042 } 1043 } 1044 1045 /* Store the permanent mac address */ 1046 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1047 1048 /* 1049 * Store MAC address from RAR0, clear receive address registers, and 1050 * clear the multicast table. Also reset num_rar_entries to 128, 1051 * since we modify this value when programming the SAN MAC address. 1052 */ 1053 hw->mac.num_rar_entries = IXGBE_82599_RAR_ENTRIES; 1054 hw->mac.ops.init_rx_addrs(hw); 1055 1056 /* Store the permanent SAN mac address */ 1057 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1058 1059 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1060 if (is_valid_ether_addr(hw->mac.san_addr)) { 1061 /* Save the SAN MAC RAR index */ 1062 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; 1063 1064 hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, 1065 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1066 1067 /* clear VMDq pool/queue selection for this RAR */ 1068 hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, 1069 IXGBE_CLEAR_VMDQ_ALL); 1070 1071 /* Reserve the last RAR for the SAN MAC address */ 1072 hw->mac.num_rar_entries--; 1073 } 1074 1075 /* Store the alternative WWNN/WWPN prefix */ 1076 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1077 &hw->mac.wwpn_prefix); 1078 1079 return status; 1080 } 1081 1082 /** 1083 * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete 1084 * @hw: pointer to hardware structure 1085 * @fdircmd: current value of FDIRCMD register 1086 */ 1087 static int ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) 1088 { 1089 int i; 1090 1091 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1092 *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); 1093 if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) 1094 return 0; 1095 udelay(10); 1096 } 1097 1098 return -EIO; 1099 } 1100 1101 /** 1102 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1103 * @hw: pointer to hardware structure 1104 **/ 1105 int ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1106 { 1107 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1108 u32 fdircmd; 1109 int err; 1110 int i; 1111 1112 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1113 1114 /* 1115 * Before starting reinitialization process, 1116 * FDIRCMD.CMD must be zero. 1117 */ 1118 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1119 if (err) { 1120 hw_dbg(hw, "Flow Director previous command did not complete, aborting table re-initialization.\n"); 1121 return err; 1122 } 1123 1124 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1125 IXGBE_WRITE_FLUSH(hw); 1126 /* 1127 * 82599 adapters flow director init flow cannot be restarted, 1128 * Workaround 82599 silicon errata by performing the following steps 1129 * before re-writing the FDIRCTRL control register with the same value. 1130 * - write 1 to bit 8 of FDIRCMD register & 1131 * - write 0 to bit 8 of FDIRCMD register 1132 */ 1133 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1134 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1135 IXGBE_FDIRCMD_CLEARHT)); 1136 IXGBE_WRITE_FLUSH(hw); 1137 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1138 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1139 ~IXGBE_FDIRCMD_CLEARHT)); 1140 IXGBE_WRITE_FLUSH(hw); 1141 /* 1142 * Clear FDIR Hash register to clear any leftover hashes 1143 * waiting to be programmed. 1144 */ 1145 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1146 IXGBE_WRITE_FLUSH(hw); 1147 1148 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1149 IXGBE_WRITE_FLUSH(hw); 1150 1151 /* Poll init-done after we write FDIRCTRL register */ 1152 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1153 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1154 IXGBE_FDIRCTRL_INIT_DONE) 1155 break; 1156 usleep_range(1000, 2000); 1157 } 1158 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1159 hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); 1160 return -EIO; 1161 } 1162 1163 /* Clear FDIR statistics registers (read to clear) */ 1164 IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1165 IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1166 IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1167 IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1168 IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1169 1170 return 0; 1171 } 1172 1173 /** 1174 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers 1175 * @hw: pointer to hardware structure 1176 * @fdirctrl: value to write to flow director control register 1177 **/ 1178 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1179 { 1180 int i; 1181 1182 /* Prime the keys for hashing */ 1183 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); 1184 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); 1185 1186 /* 1187 * Poll init-done after we write the register. Estimated times: 1188 * 10G: PBALLOC = 11b, timing is 60us 1189 * 1G: PBALLOC = 11b, timing is 600us 1190 * 100M: PBALLOC = 11b, timing is 6ms 1191 * 1192 * Multiple these timings by 4 if under full Rx load 1193 * 1194 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1195 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1196 * this might not finish in our poll time, but we can live with that 1197 * for now. 1198 */ 1199 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1200 IXGBE_WRITE_FLUSH(hw); 1201 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1202 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1203 IXGBE_FDIRCTRL_INIT_DONE) 1204 break; 1205 usleep_range(1000, 2000); 1206 } 1207 1208 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1209 hw_dbg(hw, "Flow Director poll time exceeded!\n"); 1210 } 1211 1212 /** 1213 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1214 * @hw: pointer to hardware structure 1215 * @fdirctrl: value to write to flow director control register, initially 1216 * contains just the value of the Rx packet buffer allocation 1217 **/ 1218 int ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1219 { 1220 /* 1221 * Continue setup of fdirctrl register bits: 1222 * Move the flexible bytes to use the ethertype - shift 6 words 1223 * Set the maximum length per hash bucket to 0xA filters 1224 * Send interrupt when 64 filters are left 1225 */ 1226 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1227 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1228 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1229 1230 /* write hashes and fdirctrl register, poll for completion */ 1231 ixgbe_fdir_enable_82599(hw, fdirctrl); 1232 1233 return 0; 1234 } 1235 1236 /** 1237 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1238 * @hw: pointer to hardware structure 1239 * @fdirctrl: value to write to flow director control register, initially 1240 * contains just the value of the Rx packet buffer allocation 1241 **/ 1242 int ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) 1243 { 1244 /* 1245 * Continue setup of fdirctrl register bits: 1246 * Turn perfect match filtering on 1247 * Initialize the drop queue 1248 * Move the flexible bytes to use the ethertype - shift 6 words 1249 * Set the maximum length per hash bucket to 0xA filters 1250 * Send interrupt when 64 (0x4 * 16) filters are left 1251 */ 1252 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | 1253 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | 1254 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | 1255 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | 1256 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); 1257 1258 /* write hashes and fdirctrl register, poll for completion */ 1259 ixgbe_fdir_enable_82599(hw, fdirctrl); 1260 1261 return 0; 1262 } 1263 1264 /* 1265 * These defines allow us to quickly generate all of the necessary instructions 1266 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION 1267 * for values 0 through 15 1268 */ 1269 #define IXGBE_ATR_COMMON_HASH_KEY \ 1270 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) 1271 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ 1272 do { \ 1273 u32 n = (_n); \ 1274 if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ 1275 common_hash ^= lo_hash_dword >> n; \ 1276 else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ 1277 bucket_hash ^= lo_hash_dword >> n; \ 1278 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ 1279 sig_hash ^= lo_hash_dword << (16 - n); \ 1280 if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ 1281 common_hash ^= hi_hash_dword >> n; \ 1282 else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ 1283 bucket_hash ^= hi_hash_dword >> n; \ 1284 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ 1285 sig_hash ^= hi_hash_dword << (16 - n); \ 1286 } while (0) 1287 1288 /** 1289 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash 1290 * @input: input bitstream to compute the hash on 1291 * @common: compressed common input dword 1292 * 1293 * This function is almost identical to the function above but contains 1294 * several optimizations such as unwinding all of the loops, letting the 1295 * compiler work out all of the conditional ifs since the keys are static 1296 * defines, and computing two keys at once since the hashed dword stream 1297 * will be the same for both keys. 1298 **/ 1299 static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, 1300 union ixgbe_atr_hash_dword common) 1301 { 1302 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1303 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; 1304 1305 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1306 flow_vm_vlan = ntohl(input.dword); 1307 1308 /* generate common hash dword */ 1309 hi_hash_dword = ntohl(common.dword); 1310 1311 /* low dword is word swapped version of common */ 1312 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1313 1314 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1315 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1316 1317 /* Process bits 0 and 16 */ 1318 IXGBE_COMPUTE_SIG_HASH_ITERATION(0); 1319 1320 /* 1321 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1322 * delay this because bit 0 of the stream should not be processed 1323 * so we do not add the vlan until after bit 0 was processed 1324 */ 1325 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1326 1327 /* Process remaining 30 bit of the key */ 1328 IXGBE_COMPUTE_SIG_HASH_ITERATION(1); 1329 IXGBE_COMPUTE_SIG_HASH_ITERATION(2); 1330 IXGBE_COMPUTE_SIG_HASH_ITERATION(3); 1331 IXGBE_COMPUTE_SIG_HASH_ITERATION(4); 1332 IXGBE_COMPUTE_SIG_HASH_ITERATION(5); 1333 IXGBE_COMPUTE_SIG_HASH_ITERATION(6); 1334 IXGBE_COMPUTE_SIG_HASH_ITERATION(7); 1335 IXGBE_COMPUTE_SIG_HASH_ITERATION(8); 1336 IXGBE_COMPUTE_SIG_HASH_ITERATION(9); 1337 IXGBE_COMPUTE_SIG_HASH_ITERATION(10); 1338 IXGBE_COMPUTE_SIG_HASH_ITERATION(11); 1339 IXGBE_COMPUTE_SIG_HASH_ITERATION(12); 1340 IXGBE_COMPUTE_SIG_HASH_ITERATION(13); 1341 IXGBE_COMPUTE_SIG_HASH_ITERATION(14); 1342 IXGBE_COMPUTE_SIG_HASH_ITERATION(15); 1343 1344 /* combine common_hash result with signature and bucket hashes */ 1345 bucket_hash ^= common_hash; 1346 bucket_hash &= IXGBE_ATR_HASH_MASK; 1347 1348 sig_hash ^= common_hash << 16; 1349 sig_hash &= IXGBE_ATR_HASH_MASK << 16; 1350 1351 /* return completed signature hash */ 1352 return sig_hash ^ bucket_hash; 1353 } 1354 1355 /** 1356 * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter 1357 * @hw: pointer to hardware structure 1358 * @input: unique input dword 1359 * @common: compressed common input dword 1360 * @queue: queue index to direct traffic to 1361 * 1362 * Note that the tunnel bit in input must not be set when the hardware 1363 * tunneling support does not exist. 1364 **/ 1365 int ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1366 union ixgbe_atr_hash_dword input, 1367 union ixgbe_atr_hash_dword common, 1368 u8 queue) 1369 { 1370 u64 fdirhashcmd; 1371 u8 flow_type; 1372 bool tunnel; 1373 u32 fdircmd; 1374 1375 /* 1376 * Get the flow_type in order to program FDIRCMD properly 1377 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 1378 */ 1379 tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); 1380 flow_type = input.formatted.flow_type & 1381 (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); 1382 switch (flow_type) { 1383 case IXGBE_ATR_FLOW_TYPE_TCPV4: 1384 case IXGBE_ATR_FLOW_TYPE_UDPV4: 1385 case IXGBE_ATR_FLOW_TYPE_SCTPV4: 1386 case IXGBE_ATR_FLOW_TYPE_TCPV6: 1387 case IXGBE_ATR_FLOW_TYPE_UDPV6: 1388 case IXGBE_ATR_FLOW_TYPE_SCTPV6: 1389 break; 1390 default: 1391 hw_dbg(hw, " Error on flow type input\n"); 1392 return -EIO; 1393 } 1394 1395 /* configure FDIRCMD register */ 1396 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1397 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1398 fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1399 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1400 if (tunnel) 1401 fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; 1402 1403 /* 1404 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1405 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1406 */ 1407 fdirhashcmd = (u64)fdircmd << 32; 1408 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); 1409 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1410 1411 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); 1412 1413 return 0; 1414 } 1415 1416 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ 1417 do { \ 1418 u32 n = (_n); \ 1419 if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ 1420 bucket_hash ^= lo_hash_dword >> n; \ 1421 if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ 1422 bucket_hash ^= hi_hash_dword >> n; \ 1423 } while (0) 1424 1425 /** 1426 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash 1427 * @input: input bitstream to compute the hash on 1428 * @input_mask: mask for the input bitstream 1429 * 1430 * This function serves two main purposes. First it applies the input_mask 1431 * to the atr_input resulting in a cleaned up atr_input data stream. 1432 * Secondly it computes the hash and stores it in the bkt_hash field at 1433 * the end of the input byte stream. This way it will be available for 1434 * future use without needing to recompute the hash. 1435 **/ 1436 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 1437 union ixgbe_atr_input *input_mask) 1438 { 1439 1440 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1441 u32 bucket_hash = 0; 1442 __be32 hi_dword = 0; 1443 int i; 1444 1445 /* Apply masks to input data */ 1446 for (i = 0; i <= 10; i++) 1447 input->dword_stream[i] &= input_mask->dword_stream[i]; 1448 1449 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1450 flow_vm_vlan = ntohl(input->dword_stream[0]); 1451 1452 /* generate common hash dword */ 1453 for (i = 1; i <= 10; i++) 1454 hi_dword ^= input->dword_stream[i]; 1455 hi_hash_dword = ntohl(hi_dword); 1456 1457 /* low dword is word swapped version of common */ 1458 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1459 1460 /* apply flow ID/VM pool/VLAN ID bits to hash words */ 1461 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); 1462 1463 /* Process bits 0 and 16 */ 1464 IXGBE_COMPUTE_BKT_HASH_ITERATION(0); 1465 1466 /* 1467 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to 1468 * delay this because bit 0 of the stream should not be processed 1469 * so we do not add the vlan until after bit 0 was processed 1470 */ 1471 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1472 1473 /* Process remaining 30 bit of the key */ 1474 for (i = 1; i <= 15; i++) 1475 IXGBE_COMPUTE_BKT_HASH_ITERATION(i); 1476 1477 /* 1478 * Limit hash to 13 bits since max bucket count is 8K. 1479 * Store result at the end of the input stream. 1480 */ 1481 input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); 1482 } 1483 1484 /** 1485 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks 1486 * @input_mask: mask to be bit swapped 1487 * 1488 * The source and destination port masks for flow director are bit swapped 1489 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to 1490 * generate a correctly swapped value we need to bit swap the mask and that 1491 * is what is accomplished by this function. 1492 **/ 1493 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) 1494 { 1495 u32 mask = ntohs(input_mask->formatted.dst_port); 1496 1497 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; 1498 mask |= ntohs(input_mask->formatted.src_port); 1499 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); 1500 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); 1501 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); 1502 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); 1503 } 1504 1505 /* 1506 * These two macros are meant to address the fact that we have registers 1507 * that are either all or in part big-endian. As a result on big-endian 1508 * systems we will end up byte swapping the value to little-endian before 1509 * it is byte swapped again and written to the hardware in the original 1510 * big-endian format. 1511 */ 1512 #define IXGBE_STORE_AS_BE32(_value) \ 1513 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ 1514 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) 1515 1516 #define IXGBE_WRITE_REG_BE32(a, reg, value) \ 1517 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) 1518 1519 #define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value))) 1520 1521 int ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, 1522 union ixgbe_atr_input *input_mask) 1523 { 1524 /* mask IPv6 since it is currently not supported */ 1525 u32 fdirm = IXGBE_FDIRM_DIPv6; 1526 u32 fdirtcpm; 1527 1528 /* 1529 * Program the relevant mask registers. If src/dst_port or src/dst_addr 1530 * are zero, then assume a full mask for that field. Also assume that 1531 * a VLAN of 0 is unspecified, so mask that out as well. L4type 1532 * cannot be masked out in this implementation. 1533 * 1534 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1535 * point in time. 1536 */ 1537 1538 /* verify bucket hash is cleared on hash generation */ 1539 if (input_mask->formatted.bkt_hash) 1540 hw_dbg(hw, " bucket hash should always be 0 in mask\n"); 1541 1542 /* Program FDIRM and verify partial masks */ 1543 switch (input_mask->formatted.vm_pool & 0x7F) { 1544 case 0x0: 1545 fdirm |= IXGBE_FDIRM_POOL; 1546 break; 1547 case 0x7F: 1548 break; 1549 default: 1550 hw_dbg(hw, " Error on vm pool mask\n"); 1551 return -EIO; 1552 } 1553 1554 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { 1555 case 0x0: 1556 fdirm |= IXGBE_FDIRM_L4P; 1557 if (input_mask->formatted.dst_port || 1558 input_mask->formatted.src_port) { 1559 hw_dbg(hw, " Error on src/dst port mask\n"); 1560 return -EIO; 1561 } 1562 break; 1563 case IXGBE_ATR_L4TYPE_MASK: 1564 break; 1565 default: 1566 hw_dbg(hw, " Error on flow type mask\n"); 1567 return -EIO; 1568 } 1569 1570 switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { 1571 case 0x0000: 1572 /* mask VLAN ID */ 1573 fdirm |= IXGBE_FDIRM_VLANID; 1574 fallthrough; 1575 case 0x0FFF: 1576 /* mask VLAN priority */ 1577 fdirm |= IXGBE_FDIRM_VLANP; 1578 break; 1579 case 0xE000: 1580 /* mask VLAN ID only */ 1581 fdirm |= IXGBE_FDIRM_VLANID; 1582 fallthrough; 1583 case 0xEFFF: 1584 /* no VLAN fields masked */ 1585 break; 1586 default: 1587 hw_dbg(hw, " Error on VLAN mask\n"); 1588 return -EIO; 1589 } 1590 1591 switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { 1592 case 0x0000: 1593 /* Mask Flex Bytes */ 1594 fdirm |= IXGBE_FDIRM_FLEX; 1595 fallthrough; 1596 case 0xFFFF: 1597 break; 1598 default: 1599 hw_dbg(hw, " Error on flexible byte mask\n"); 1600 return -EIO; 1601 } 1602 1603 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1604 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1605 1606 /* store the TCP/UDP port masks, bit reversed from port layout */ 1607 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); 1608 1609 /* write both the same so that UDP and TCP use the same mask */ 1610 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); 1611 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); 1612 1613 /* also use it for SCTP */ 1614 switch (hw->mac.type) { 1615 case ixgbe_mac_X550: 1616 case ixgbe_mac_X550EM_x: 1617 case ixgbe_mac_x550em_a: 1618 IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); 1619 break; 1620 default: 1621 break; 1622 } 1623 1624 /* store source and destination IP masks (big-enian) */ 1625 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 1626 ~input_mask->formatted.src_ip[0]); 1627 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 1628 ~input_mask->formatted.dst_ip[0]); 1629 1630 return 0; 1631 } 1632 1633 int ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, 1634 union ixgbe_atr_input *input, 1635 u16 soft_id, u8 queue) 1636 { 1637 u32 fdirport, fdirvlan, fdirhash, fdircmd; 1638 int err; 1639 1640 /* currently IPv6 is not supported, must be programmed with 0 */ 1641 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), 1642 input->formatted.src_ip[0]); 1643 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), 1644 input->formatted.src_ip[1]); 1645 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), 1646 input->formatted.src_ip[2]); 1647 1648 /* record the source address (big-endian) */ 1649 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); 1650 1651 /* record the first 32 bits of the destination address (big-endian) */ 1652 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); 1653 1654 /* record source and destination port (little-endian)*/ 1655 fdirport = be16_to_cpu(input->formatted.dst_port); 1656 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; 1657 fdirport |= be16_to_cpu(input->formatted.src_port); 1658 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); 1659 1660 /* record vlan (little-endian) and flex_bytes(big-endian) */ 1661 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); 1662 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; 1663 fdirvlan |= ntohs(input->formatted.vlan_id); 1664 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); 1665 1666 /* configure FDIRHASH register */ 1667 fdirhash = (__force u32)input->formatted.bkt_hash; 1668 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1669 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1670 1671 /* 1672 * flush all previous writes to make certain registers are 1673 * programmed prior to issuing the command 1674 */ 1675 IXGBE_WRITE_FLUSH(hw); 1676 1677 /* configure FDIRCMD register */ 1678 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1679 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; 1680 if (queue == IXGBE_FDIR_DROP_QUEUE) 1681 fdircmd |= IXGBE_FDIRCMD_DROP; 1682 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; 1683 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1684 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; 1685 1686 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1687 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1688 if (err) { 1689 hw_dbg(hw, "Flow Director command did not complete!\n"); 1690 return err; 1691 } 1692 1693 return 0; 1694 } 1695 1696 int ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, 1697 union ixgbe_atr_input *input, 1698 u16 soft_id) 1699 { 1700 u32 fdirhash; 1701 u32 fdircmd; 1702 int err; 1703 1704 /* configure FDIRHASH register */ 1705 fdirhash = (__force u32)input->formatted.bkt_hash; 1706 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; 1707 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1708 1709 /* flush hash to HW */ 1710 IXGBE_WRITE_FLUSH(hw); 1711 1712 /* Query if filter is present */ 1713 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); 1714 1715 err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); 1716 if (err) { 1717 hw_dbg(hw, "Flow Director command did not complete!\n"); 1718 return err; 1719 } 1720 1721 /* if filter exists in hardware then remove it */ 1722 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { 1723 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1724 IXGBE_WRITE_FLUSH(hw); 1725 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1726 IXGBE_FDIRCMD_CMD_REMOVE_FLOW); 1727 } 1728 1729 return 0; 1730 } 1731 1732 /** 1733 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1734 * @hw: pointer to hardware structure 1735 * @reg: analog register to read 1736 * @val: read value 1737 * 1738 * Performs read operation to Omer analog register specified. 1739 **/ 1740 static int ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 1741 { 1742 u32 core_ctl; 1743 1744 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 1745 (reg << 8)); 1746 IXGBE_WRITE_FLUSH(hw); 1747 udelay(10); 1748 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 1749 *val = (u8)core_ctl; 1750 1751 return 0; 1752 } 1753 1754 /** 1755 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 1756 * @hw: pointer to hardware structure 1757 * @reg: atlas register to write 1758 * @val: value to write 1759 * 1760 * Performs write operation to Omer analog register specified. 1761 **/ 1762 static int ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 1763 { 1764 u32 core_ctl; 1765 1766 core_ctl = (reg << 8) | val; 1767 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 1768 IXGBE_WRITE_FLUSH(hw); 1769 udelay(10); 1770 1771 return 0; 1772 } 1773 1774 /** 1775 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx 1776 * @hw: pointer to hardware structure 1777 * 1778 * Starts the hardware using the generic start_hw function 1779 * and the generation start_hw function. 1780 * Then performs revision-specific operations, if any. 1781 **/ 1782 static int ixgbe_start_hw_82599(struct ixgbe_hw *hw) 1783 { 1784 int ret_val = 0; 1785 1786 ret_val = ixgbe_start_hw_generic(hw); 1787 if (ret_val) 1788 return ret_val; 1789 1790 ret_val = ixgbe_start_hw_gen2(hw); 1791 if (ret_val) 1792 return ret_val; 1793 1794 /* We need to run link autotry after the driver loads */ 1795 hw->mac.autotry_restart = true; 1796 1797 return ixgbe_verify_fw_version_82599(hw); 1798 } 1799 1800 /** 1801 * ixgbe_identify_phy_82599 - Get physical layer module 1802 * @hw: pointer to hardware structure 1803 * 1804 * Determines the physical layer module found on the current adapter. 1805 * If PHY already detected, maintains current PHY type in hw struct, 1806 * otherwise executes the PHY detection routine. 1807 **/ 1808 static int ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 1809 { 1810 int status; 1811 1812 /* Detect PHY if not unknown - returns success if already detected. */ 1813 status = ixgbe_identify_phy_generic(hw); 1814 if (status) { 1815 /* 82599 10GBASE-T requires an external PHY */ 1816 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) 1817 return status; 1818 status = ixgbe_identify_module_generic(hw); 1819 } 1820 1821 /* Set PHY type none if no PHY detected */ 1822 if (hw->phy.type == ixgbe_phy_unknown) { 1823 hw->phy.type = ixgbe_phy_none; 1824 status = 0; 1825 } 1826 1827 /* Return error if SFP module has been detected but is not supported */ 1828 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 1829 return -EOPNOTSUPP; 1830 1831 return status; 1832 } 1833 1834 /** 1835 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 1836 * @hw: pointer to hardware structure 1837 * @regval: register value to write to RXCTRL 1838 * 1839 * Enables the Rx DMA unit for 82599 1840 **/ 1841 static int ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 1842 { 1843 /* 1844 * Workaround for 82599 silicon errata when enabling the Rx datapath. 1845 * If traffic is incoming before we enable the Rx unit, it could hang 1846 * the Rx DMA unit. Therefore, make sure the security engine is 1847 * completely disabled prior to enabling the Rx unit. 1848 */ 1849 hw->mac.ops.disable_rx_buff(hw); 1850 1851 if (regval & IXGBE_RXCTRL_RXEN) 1852 hw->mac.ops.enable_rx(hw); 1853 else 1854 hw->mac.ops.disable_rx(hw); 1855 1856 hw->mac.ops.enable_rx_buff(hw); 1857 1858 return 0; 1859 } 1860 1861 /** 1862 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 1863 * @hw: pointer to hardware structure 1864 * 1865 * Verifies that installed the firmware version is 0.6 or higher 1866 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 1867 * 1868 * Return: -EACCES if the FW is not present or if the FW version is 1869 * not supported. 1870 **/ 1871 static int ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 1872 { 1873 u16 fw_offset, fw_ptp_cfg_offset; 1874 int status = -EACCES; 1875 u16 fw_version = 0; 1876 u16 offset; 1877 1878 /* firmware check is only necessary for SFI devices */ 1879 if (hw->phy.media_type != ixgbe_media_type_fiber) 1880 return 0; 1881 1882 /* get the offset to the Firmware Module block */ 1883 offset = IXGBE_FW_PTR; 1884 if (hw->eeprom.ops.read(hw, offset, &fw_offset)) 1885 goto fw_version_err; 1886 1887 if (fw_offset == 0 || fw_offset == 0xFFFF) 1888 return -EACCES; 1889 1890 /* get the offset to the Pass Through Patch Configuration block */ 1891 offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; 1892 if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) 1893 goto fw_version_err; 1894 1895 if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF) 1896 return -EACCES; 1897 1898 /* get the firmware version */ 1899 offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; 1900 if (hw->eeprom.ops.read(hw, offset, &fw_version)) 1901 goto fw_version_err; 1902 1903 if (fw_version > 0x5) 1904 status = 0; 1905 1906 return status; 1907 1908 fw_version_err: 1909 hw_err(hw, "eeprom read at offset %d failed\n", offset); 1910 return -EACCES; 1911 } 1912 1913 /** 1914 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. 1915 * @hw: pointer to hardware structure 1916 * 1917 * Returns true if the LESM FW module is present and enabled. Otherwise 1918 * returns false. Smart Speed must be disabled if LESM FW module is enabled. 1919 **/ 1920 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 1921 { 1922 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 1923 int status; 1924 1925 /* get the offset to the Firmware Module block */ 1926 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 1927 1928 if (status || fw_offset == 0 || fw_offset == 0xFFFF) 1929 return false; 1930 1931 /* get the offset to the LESM Parameters block */ 1932 status = hw->eeprom.ops.read(hw, (fw_offset + 1933 IXGBE_FW_LESM_PARAMETERS_PTR), 1934 &fw_lesm_param_offset); 1935 1936 if (status || 1937 fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF) 1938 return false; 1939 1940 /* get the lesm state word */ 1941 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + 1942 IXGBE_FW_LESM_STATE_1), 1943 &fw_lesm_state); 1944 1945 if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) 1946 return true; 1947 1948 return false; 1949 } 1950 1951 /** 1952 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using 1953 * fastest available method 1954 * 1955 * @hw: pointer to hardware structure 1956 * @offset: offset of word in EEPROM to read 1957 * @words: number of words 1958 * @data: word(s) read from the EEPROM 1959 * 1960 * Retrieves 16 bit word(s) read from EEPROM 1961 **/ 1962 static int ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, 1963 u16 words, u16 *data) 1964 { 1965 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1966 1967 /* If EEPROM is detected and can be addressed using 14 bits, 1968 * use EERD otherwise use bit bang 1969 */ 1970 if (eeprom->type == ixgbe_eeprom_spi && 1971 offset + (words - 1) <= IXGBE_EERD_MAX_ADDR) 1972 return ixgbe_read_eerd_buffer_generic(hw, offset, words, data); 1973 1974 return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, 1975 data); 1976 } 1977 1978 /** 1979 * ixgbe_read_eeprom_82599 - Read EEPROM word using 1980 * fastest available method 1981 * 1982 * @hw: pointer to hardware structure 1983 * @offset: offset of word in the EEPROM to read 1984 * @data: word read from the EEPROM 1985 * 1986 * Reads a 16 bit word from the EEPROM 1987 **/ 1988 static int ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, 1989 u16 offset, u16 *data) 1990 { 1991 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 1992 1993 /* 1994 * If EEPROM is detected and can be addressed using 14 bits, 1995 * use EERD otherwise use bit bang 1996 */ 1997 if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR) 1998 return ixgbe_read_eerd_generic(hw, offset, data); 1999 2000 return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); 2001 } 2002 2003 /** 2004 * ixgbe_reset_pipeline_82599 - perform pipeline reset 2005 * 2006 * @hw: pointer to hardware structure 2007 * 2008 * Reset pipeline by asserting Restart_AN together with LMS change to ensure 2009 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing 2010 * to AUTOC, so this function assumes the semaphore is held. 2011 **/ 2012 static int ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2013 { 2014 u32 i, autoc_reg, autoc2_reg; 2015 u32 anlp1_reg = 0; 2016 int ret_val; 2017 2018 /* Enable link if disabled in NVM */ 2019 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2020 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) { 2021 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK; 2022 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 2023 IXGBE_WRITE_FLUSH(hw); 2024 } 2025 2026 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2027 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2028 2029 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2030 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, 2031 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); 2032 2033 /* Wait for AN to leave state 0 */ 2034 for (i = 0; i < 10; i++) { 2035 usleep_range(4000, 8000); 2036 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); 2037 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) 2038 break; 2039 } 2040 2041 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { 2042 hw_dbg(hw, "auto negotiation not completed\n"); 2043 ret_val = -EIO; 2044 goto reset_pipeline_out; 2045 } 2046 2047 ret_val = 0; 2048 2049 reset_pipeline_out: 2050 /* Write AUTOC register with original LMS field and Restart_AN */ 2051 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2052 IXGBE_WRITE_FLUSH(hw); 2053 2054 return ret_val; 2055 } 2056 2057 /** 2058 * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C 2059 * @hw: pointer to hardware structure 2060 * @byte_offset: byte offset to read 2061 * @dev_addr: address to read from 2062 * @data: value read 2063 * 2064 * Performs byte read operation to SFP module's EEPROM over I2C interface at 2065 * a specified device address. 2066 **/ 2067 static int ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2068 u8 dev_addr, u8 *data) 2069 { 2070 s32 timeout = 200; 2071 int status; 2072 u32 esdp; 2073 2074 if (hw->phy.qsfp_shared_i2c_bus == true) { 2075 /* Acquire I2C bus ownership. */ 2076 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2077 esdp |= IXGBE_ESDP_SDP0; 2078 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2079 IXGBE_WRITE_FLUSH(hw); 2080 2081 while (timeout) { 2082 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2083 if (esdp & IXGBE_ESDP_SDP1) 2084 break; 2085 2086 usleep_range(5000, 10000); 2087 timeout--; 2088 } 2089 2090 if (!timeout) { 2091 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); 2092 status = -EIO; 2093 goto release_i2c_access; 2094 } 2095 } 2096 2097 status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2098 2099 release_i2c_access: 2100 if (hw->phy.qsfp_shared_i2c_bus == true) { 2101 /* Release I2C bus ownership. */ 2102 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2103 esdp &= ~IXGBE_ESDP_SDP0; 2104 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2105 IXGBE_WRITE_FLUSH(hw); 2106 } 2107 2108 return status; 2109 } 2110 2111 /** 2112 * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C 2113 * @hw: pointer to hardware structure 2114 * @byte_offset: byte offset to write 2115 * @dev_addr: address to write to 2116 * @data: value to write 2117 * 2118 * Performs byte write operation to SFP module's EEPROM over I2C interface at 2119 * a specified device address. 2120 **/ 2121 static int ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 2122 u8 dev_addr, u8 data) 2123 { 2124 s32 timeout = 200; 2125 int status; 2126 u32 esdp; 2127 2128 if (hw->phy.qsfp_shared_i2c_bus == true) { 2129 /* Acquire I2C bus ownership. */ 2130 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2131 esdp |= IXGBE_ESDP_SDP0; 2132 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2133 IXGBE_WRITE_FLUSH(hw); 2134 2135 while (timeout) { 2136 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2137 if (esdp & IXGBE_ESDP_SDP1) 2138 break; 2139 2140 usleep_range(5000, 10000); 2141 timeout--; 2142 } 2143 2144 if (!timeout) { 2145 hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); 2146 status = -EIO; 2147 goto release_i2c_access; 2148 } 2149 } 2150 2151 status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); 2152 2153 release_i2c_access: 2154 if (hw->phy.qsfp_shared_i2c_bus == true) { 2155 /* Release I2C bus ownership. */ 2156 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); 2157 esdp &= ~IXGBE_ESDP_SDP0; 2158 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); 2159 IXGBE_WRITE_FLUSH(hw); 2160 } 2161 2162 return status; 2163 } 2164 2165 static const struct ixgbe_mac_operations mac_ops_82599 = { 2166 .init_hw = &ixgbe_init_hw_generic, 2167 .reset_hw = &ixgbe_reset_hw_82599, 2168 .start_hw = &ixgbe_start_hw_82599, 2169 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 2170 .get_media_type = &ixgbe_get_media_type_82599, 2171 .enable_rx_dma = &ixgbe_enable_rx_dma_82599, 2172 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, 2173 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 2174 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2175 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 2176 .get_device_caps = &ixgbe_get_device_caps_generic, 2177 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, 2178 .stop_adapter = &ixgbe_stop_adapter_generic, 2179 .get_bus_info = &ixgbe_get_bus_info_generic, 2180 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2181 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, 2182 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, 2183 .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, 2184 .setup_link = &ixgbe_setup_mac_link_82599, 2185 .set_rxpba = &ixgbe_set_rxpba_generic, 2186 .check_link = &ixgbe_check_mac_link_generic, 2187 .get_link_capabilities = &ixgbe_get_link_capabilities_82599, 2188 .led_on = &ixgbe_led_on_generic, 2189 .led_off = &ixgbe_led_off_generic, 2190 .init_led_link_act = ixgbe_init_led_link_act_generic, 2191 .blink_led_start = &ixgbe_blink_led_start_generic, 2192 .blink_led_stop = &ixgbe_blink_led_stop_generic, 2193 .set_rar = &ixgbe_set_rar_generic, 2194 .clear_rar = &ixgbe_clear_rar_generic, 2195 .set_vmdq = &ixgbe_set_vmdq_generic, 2196 .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, 2197 .clear_vmdq = &ixgbe_clear_vmdq_generic, 2198 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2199 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2200 .enable_mc = &ixgbe_enable_mc_generic, 2201 .disable_mc = &ixgbe_disable_mc_generic, 2202 .clear_vfta = &ixgbe_clear_vfta_generic, 2203 .set_vfta = &ixgbe_set_vfta_generic, 2204 .fc_enable = &ixgbe_fc_enable_generic, 2205 .setup_fc = ixgbe_setup_fc_generic, 2206 .fc_autoneg = ixgbe_fc_autoneg, 2207 .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, 2208 .init_uta_tables = &ixgbe_init_uta_tables_generic, 2209 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2210 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, 2211 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2212 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 2213 .release_swfw_sync = &ixgbe_release_swfw_sync, 2214 .init_swfw_sync = NULL, 2215 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, 2216 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, 2217 .prot_autoc_read = &prot_autoc_read_82599, 2218 .prot_autoc_write = &prot_autoc_write_82599, 2219 .enable_rx = &ixgbe_enable_rx_generic, 2220 .disable_rx = &ixgbe_disable_rx_generic, 2221 }; 2222 2223 static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2224 .init_params = &ixgbe_init_eeprom_params_generic, 2225 .read = &ixgbe_read_eeprom_82599, 2226 .read_buffer = &ixgbe_read_eeprom_buffer_82599, 2227 .write = &ixgbe_write_eeprom_generic, 2228 .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, 2229 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, 2230 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2231 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2232 }; 2233 2234 static const struct ixgbe_phy_operations phy_ops_82599 = { 2235 .identify = &ixgbe_identify_phy_82599, 2236 .identify_sfp = &ixgbe_identify_module_generic, 2237 .init = &ixgbe_init_phy_ops_82599, 2238 .reset = &ixgbe_reset_phy_generic, 2239 .read_reg = &ixgbe_read_phy_reg_generic, 2240 .write_reg = &ixgbe_write_phy_reg_generic, 2241 .setup_link = &ixgbe_setup_phy_link_generic, 2242 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 2243 .read_i2c_byte = &ixgbe_read_i2c_byte_generic, 2244 .write_i2c_byte = &ixgbe_write_i2c_byte_generic, 2245 .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, 2246 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, 2247 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, 2248 .check_overtemp = &ixgbe_tn_check_overtemp, 2249 }; 2250 2251 const struct ixgbe_info ixgbe_82599_info = { 2252 .mac = ixgbe_mac_82599EB, 2253 .get_invariants = &ixgbe_get_invariants_82599, 2254 .mac_ops = &mac_ops_82599, 2255 .eeprom_ops = &eeprom_ops_82599, 2256 .phy_ops = &phy_ops_82599, 2257 .mbx_ops = &mbx_ops_generic, 2258 .mvals = ixgbe_mvals_8259X, 2259 }; 2260