1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at: 9 * http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When using or redistributing this file, you may do so under the 14 * License only. No other modification of this header is permitted. 15 * 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 23 /* 24 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved. 25 */ 26 27 /* 28 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. 29 */ 30 31 /* IntelVersion: 1.217 scm_061610_003709 */ 32 33 #include "ixgbe_type.h" 34 #include "ixgbe_api.h" 35 #include "ixgbe_common.h" 36 #include "ixgbe_phy.h" 37 38 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); 39 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 40 ixgbe_link_speed *speed, bool *autoneg); 41 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); 42 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 43 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 45 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 46 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); 47 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 48 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); 49 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 50 bool autoneg_wait_to_complete); 51 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 52 ixgbe_link_speed speed, bool autoneg, 53 bool autoneg_wait_to_complete); 54 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 55 ixgbe_link_speed speed, bool autoneg, 56 bool autoneg_wait_to_complete); 57 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); 58 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); 59 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); 60 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 61 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 62 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); 63 void ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw); 64 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); 65 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); 66 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); 67 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); 68 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps); 69 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); 70 71 void 72 ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 73 { 74 struct ixgbe_mac_info *mac = &hw->mac; 75 76 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 77 78 if (hw->phy.multispeed_fiber) { 79 /* Set up dual speed SFP+ support */ 80 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 81 mac->ops.disable_tx_laser = 82 &ixgbe_disable_tx_laser_multispeed_fiber; 83 mac->ops.enable_tx_laser = 84 &ixgbe_enable_tx_laser_multispeed_fiber; 85 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 86 } else { 87 mac->ops.disable_tx_laser = NULL; 88 mac->ops.enable_tx_laser = NULL; 89 mac->ops.flap_tx_laser = NULL; 90 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && 91 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 92 hw->phy.smart_speed == ixgbe_smart_speed_on)) 93 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; 94 else 95 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 96 } 97 } 98 99 /* 100 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 101 * @hw: pointer to hardware structure 102 * 103 * Initialize any function pointers that were not able to be 104 * set during init_shared_code because the PHY/SFP type was 105 * not known. Perform the SFP init if necessary. 106 * 107 */ 108 s32 109 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 110 { 111 struct ixgbe_mac_info *mac = &hw->mac; 112 struct ixgbe_phy_info *phy = &hw->phy; 113 s32 ret_val = IXGBE_SUCCESS; 114 115 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 116 117 /* Identify the PHY or SFP module */ 118 ret_val = phy->ops.identify(hw); 119 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 120 goto init_phy_ops_out; 121 122 /* Setup function pointers based on detected SFP module and speeds */ 123 ixgbe_init_mac_link_ops_82599(hw); 124 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 125 hw->phy.ops.reset = NULL; 126 127 /* If copper media, overwrite with copper function pointers */ 128 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 129 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 130 mac->ops.get_link_capabilities = 131 &ixgbe_get_copper_link_capabilities_generic; 132 } 133 134 /* Set necessary function pointers based on phy type */ 135 switch (hw->phy.type) { 136 case ixgbe_phy_tn: 137 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; 138 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 139 phy->ops.get_firmware_version = 140 &ixgbe_get_phy_firmware_version_tnx; 141 break; 142 case ixgbe_phy_aq: 143 phy->ops.get_firmware_version = 144 &ixgbe_get_phy_firmware_version_generic; 145 break; 146 default: 147 break; 148 } 149 150 init_phy_ops_out: 151 return (ret_val); 152 } 153 154 s32 155 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 156 { 157 s32 ret_val = IXGBE_SUCCESS; 158 u16 list_offset, data_offset, data_value; 159 160 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 161 162 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 163 ixgbe_init_mac_link_ops_82599(hw); 164 165 hw->phy.ops.reset = NULL; 166 167 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 168 &data_offset); 169 170 if (ret_val != IXGBE_SUCCESS) 171 goto setup_sfp_out; 172 173 /* PHY config will finish before releasing the semaphore */ 174 ret_val = ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 175 if (ret_val != IXGBE_SUCCESS) { 176 ret_val = IXGBE_ERR_SWFW_SYNC; 177 goto setup_sfp_out; 178 } 179 180 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 181 while (data_value != 0xffff) { 182 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 183 IXGBE_WRITE_FLUSH(hw); 184 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 185 } 186 /* Now restart DSP by setting Restart_AN */ 187 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, 188 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART)); 189 190 /* Release the semaphore */ 191 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 192 /* Delay obtaining semaphore again to allow FW access */ 193 msec_delay(hw->eeprom.semaphore_delay); 194 } 195 196 setup_sfp_out: 197 return (ret_val); 198 } 199 200 /* 201 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 202 * @hw: pointer to hardware structure 203 * 204 * Initialize the function pointers and assign the MAC type for 82599. 205 * Does not touch the hardware. 206 */ 207 208 s32 209 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 210 { 211 struct ixgbe_mac_info *mac = &hw->mac; 212 struct ixgbe_phy_info *phy = &hw->phy; 213 s32 ret_val; 214 215 DEBUGFUNC("ixgbe_init_ops_82599"); 216 217 ret_val = ixgbe_init_phy_ops_generic(hw); 218 ret_val = ixgbe_init_ops_generic(hw); 219 220 /* PHY */ 221 phy->ops.identify = &ixgbe_identify_phy_82599; 222 phy->ops.init = &ixgbe_init_phy_ops_82599; 223 224 /* MAC */ 225 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 226 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599; 227 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 228 mac->ops.get_supported_physical_layer = 229 &ixgbe_get_supported_physical_layer_82599; 230 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 231 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 232 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 233 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; 234 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82599; 235 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; 236 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; 237 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599; 238 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; 239 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; 240 241 /* RAR, Multicast, VLAN */ 242 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; 243 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; 244 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; 245 mac->rar_highwater = 1; 246 mac->ops.set_vfta = &ixgbe_set_vfta_generic; 247 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; 248 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; 249 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 250 251 /* Link */ 252 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 253 mac->ops.check_link = &ixgbe_check_mac_link_generic; 254 ixgbe_init_mac_link_ops_82599(hw); 255 256 mac->mcft_size = 128; 257 mac->vft_size = 128; 258 mac->num_rar_entries = 128; 259 mac->max_tx_queues = 128; 260 mac->max_rx_queues = 128; 261 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 262 263 return (ret_val); 264 } 265 266 /* 267 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 268 * @hw: pointer to hardware structure 269 * @speed: pointer to link speed 270 * @negotiation: true when autoneg or autotry is enabled 271 * 272 * Determines the link capabilities by reading the AUTOC register. 273 */ 274 s32 275 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 276 ixgbe_link_speed *speed, bool *negotiation) 277 { 278 s32 status = IXGBE_SUCCESS; 279 u32 autoc = 0; 280 281 DEBUGFUNC("ixgbe_get_link_capabilities_82599"); 282 283 /* Check if 1G SFP module. */ 284 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || 285 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { 286 *speed = IXGBE_LINK_SPEED_1GB_FULL; 287 *negotiation = true; 288 goto out; 289 } 290 291 /* 292 * Determine link capabilities based on the stored value of AUTOC, 293 * which represents EEPROM defaults. If AUTOC value has not 294 * been stored, use the current register values. 295 */ 296 if (hw->mac.orig_link_settings_stored) 297 autoc = hw->mac.orig_autoc; 298 else 299 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 300 301 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 302 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 303 *speed = IXGBE_LINK_SPEED_1GB_FULL; 304 *negotiation = false; 305 break; 306 307 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 308 *speed = IXGBE_LINK_SPEED_10GB_FULL; 309 *negotiation = false; 310 break; 311 312 case IXGBE_AUTOC_LMS_1G_AN: 313 *speed = IXGBE_LINK_SPEED_1GB_FULL; 314 *negotiation = true; 315 break; 316 317 case IXGBE_AUTOC_LMS_10G_SERIAL: 318 *speed = IXGBE_LINK_SPEED_10GB_FULL; 319 *negotiation = false; 320 break; 321 322 case IXGBE_AUTOC_LMS_KX4_KX_KR: 323 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 324 *speed = IXGBE_LINK_SPEED_UNKNOWN; 325 if (autoc & IXGBE_AUTOC_KR_SUPP) 326 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 327 if (autoc & IXGBE_AUTOC_KX4_SUPP) 328 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 329 if (autoc & IXGBE_AUTOC_KX_SUPP) 330 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 331 *negotiation = true; 332 break; 333 334 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 335 *speed = IXGBE_LINK_SPEED_100_FULL; 336 if (autoc & IXGBE_AUTOC_KR_SUPP) 337 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 338 if (autoc & IXGBE_AUTOC_KX4_SUPP) 339 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 340 if (autoc & IXGBE_AUTOC_KX_SUPP) 341 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 342 *negotiation = true; 343 break; 344 345 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 346 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 347 *negotiation = false; 348 break; 349 350 default: 351 status = IXGBE_ERR_LINK_SETUP; 352 goto out; 353 } 354 355 if (hw->phy.multispeed_fiber) { 356 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 357 IXGBE_LINK_SPEED_1GB_FULL; 358 *negotiation = true; 359 } 360 361 out: 362 return (status); 363 } 364 365 /* 366 * ixgbe_get_media_type_82599 - Get media type 367 * @hw: pointer to hardware structure 368 * 369 * Returns the media type (fiber, copper, backplane) 370 */ 371 enum ixgbe_media_type 372 ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 373 { 374 enum ixgbe_media_type media_type; 375 376 DEBUGFUNC("ixgbe_get_media_type_82599"); 377 378 /* Detect if there is a copper PHY attached. */ 379 if (hw->phy.type == ixgbe_phy_cu_unknown || 380 hw->phy.type == ixgbe_phy_tn || 381 hw->phy.type == ixgbe_phy_aq) { 382 media_type = ixgbe_media_type_copper; 383 goto out; 384 } 385 386 switch (hw->device_id) { 387 case IXGBE_DEV_ID_82599_KX4: 388 case IXGBE_DEV_ID_82599_KX4_MEZZ: 389 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 390 case IXGBE_DEV_ID_82599_KR: 391 case IXGBE_DEV_ID_82599_XAUI_LOM: 392 /* Default device ID is mezzanine card KX/KX4 */ 393 media_type = ixgbe_media_type_backplane; 394 break; 395 case IXGBE_DEV_ID_82599_SFP: 396 case IXGBE_DEV_ID_82599_SFP_EM: 397 media_type = ixgbe_media_type_fiber; 398 break; 399 case IXGBE_DEV_ID_82599_CX4: 400 media_type = ixgbe_media_type_cx4; 401 break; 402 case IXGBE_DEV_ID_82599_T3_LOM: 403 media_type = ixgbe_media_type_copper; 404 break; 405 default: 406 media_type = ixgbe_media_type_unknown; 407 break; 408 } 409 out: 410 return (media_type); 411 } 412 413 /* 414 * ixgbe_start_mac_link_82599 - Setup MAC link settings 415 * @hw: pointer to hardware structure 416 * 417 * Configures link settings based on values in the ixgbe_hw struct. 418 * Restarts the link. Performs autonegotiation if needed. 419 */ 420 s32 421 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) 422 { 423 u32 autoc_reg; 424 u32 links_reg; 425 u32 i; 426 s32 status = IXGBE_SUCCESS; 427 428 DEBUGFUNC("ixgbe_start_mac_link_82599"); 429 430 /* Restart link */ 431 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 432 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 433 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 434 435 /* Only poll for autoneg to complete if specified to do so */ 436 if (autoneg_wait_to_complete) { 437 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 438 IXGBE_AUTOC_LMS_KX4_KX_KR || 439 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 440 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 441 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 442 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 443 links_reg = 0; /* Just in case Autoneg time = 0 */ 444 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 445 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 446 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 447 break; 448 msec_delay(100); 449 } 450 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 451 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 452 DEBUGOUT("Autoneg did not complete.\n"); 453 } 454 } 455 } 456 457 /* Add delay to filter out noises during initial link setup */ 458 msec_delay(50); 459 460 return (status); 461 } 462 463 /* 464 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser 465 * @hw: pointer to hardware structure 466 * 467 * The base drivers may require better control over SFP+ module 468 * PHY states. This includes selectively shutting down the Tx 469 * laser on the PHY, effectively halting physical link. 470 */ 471 void 472 ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 473 { 474 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 475 476 /* 477 * Disable tx laser; allow 100us to go dark per spec 478 */ 479 esdp_reg |= IXGBE_ESDP_SDP3; 480 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 481 IXGBE_WRITE_FLUSH(hw); 482 usec_delay(100); 483 } 484 485 /* 486 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser 487 * @hw: pointer to hardware structure 488 * 489 * The base drivers may require better control over SFP+ module 490 * PHY states. This includes selectively turning on the Tx 491 * laser on the PHY, effectively starting physical link. 492 */ 493 void 494 ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 495 { 496 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 497 498 /* 499 * Enable tx laser; allow 100ms to light up 500 */ 501 esdp_reg &= ~IXGBE_ESDP_SDP3; 502 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 503 IXGBE_WRITE_FLUSH(hw); 504 msec_delay(100); 505 } 506 507 /* 508 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 509 * @hw: pointer to hardware structure 510 * 511 * When the driver changes the link speeds that it can support, 512 * it sets autotry_restart to true to indicate that we need to 513 * initiate a new autotry session with the link partner. To do 514 * so, we set the speed then disable and re-enable the tx laser, to 515 * alert the link partner that it also needs to restart autotry on its 516 * end. This is consistent with true clause 37 autoneg, which also 517 * involves a loss of signal. 518 */ 519 void 520 ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 521 { 522 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); 523 524 if (hw->mac.autotry_restart) { 525 ixgbe_disable_tx_laser_multispeed_fiber(hw); 526 ixgbe_enable_tx_laser_multispeed_fiber(hw); 527 hw->mac.autotry_restart = false; 528 } 529 } 530 531 /* 532 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 533 * @hw: pointer to hardware structure 534 * @speed: new link speed 535 * @autoneg: true if autonegotiation enabled 536 * @autoneg_wait_to_complete: true when waiting for completion is needed 537 * 538 * Set the link speed in the AUTOC register and restarts link. 539 */ 540 s32 541 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 542 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 543 { 544 s32 status = IXGBE_SUCCESS; 545 ixgbe_link_speed link_speed; 546 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 547 u32 speedcnt = 0; 548 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 549 u32 i = 0; 550 bool link_up = false; 551 bool negotiation; 552 553 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 554 555 /* Mask off requested but non-supported speeds */ 556 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 557 if (status != IXGBE_SUCCESS) 558 return (status); 559 560 speed &= link_speed; 561 562 /* 563 * Try each speed one by one, highest priority first. We do this in 564 * software because 10gb fiber doesn't support speed autonegotiation. 565 */ 566 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 567 speedcnt++; 568 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 569 570 /* If we already have link at this speed, just jump out */ 571 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 572 if (status != IXGBE_SUCCESS) 573 return (status); 574 575 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 576 goto out; 577 578 /* Set the module link speed */ 579 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 580 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 581 IXGBE_WRITE_FLUSH(hw); 582 583 /* Allow module to change analog characteristics (1G->10G) */ 584 msec_delay(40); 585 586 status = ixgbe_setup_mac_link_82599( 587 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, 588 autoneg_wait_to_complete); 589 if (status != IXGBE_SUCCESS) 590 return (status); 591 592 /* Flap the tx laser if it has not already been done */ 593 ixgbe_flap_tx_laser(hw); 594 595 /* 596 * Wait for the controller to acquire link. Per IEEE 802.3ap, 597 * Section 73.10.2, we may have to wait up to 500ms if KR is 598 * attempted. 82599 uses the same timing for 10g SFI. 599 */ 600 for (i = 0; i < 5; i++) { 601 /* Wait for the link partner to also set speed */ 602 msec_delay(100); 603 604 /* If we have link, just jump out */ 605 status = ixgbe_check_link(hw, &link_speed, 606 &link_up, false); 607 if (status != IXGBE_SUCCESS) 608 return (status); 609 610 if (link_up) 611 goto out; 612 } 613 } 614 615 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 616 speedcnt++; 617 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 618 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 619 620 /* If we already have link at this speed, just jump out */ 621 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 622 if (status != IXGBE_SUCCESS) 623 return (status); 624 625 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 626 goto out; 627 628 /* Set the module link speed */ 629 esdp_reg &= ~IXGBE_ESDP_SDP5; 630 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 631 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 632 IXGBE_WRITE_FLUSH(hw); 633 634 /* Allow module to change analog characteristics (10G->1G) */ 635 msec_delay(40); 636 637 status = ixgbe_setup_mac_link_82599( 638 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, 639 autoneg_wait_to_complete); 640 if (status != IXGBE_SUCCESS) 641 return (status); 642 643 /* Flap the tx laser if it has not already been done */ 644 ixgbe_flap_tx_laser(hw); 645 646 /* Wait for the link partner to also set speed */ 647 msec_delay(100); 648 649 /* If we have link, just jump out */ 650 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 651 if (status != IXGBE_SUCCESS) 652 return (status); 653 654 if (link_up) 655 goto out; 656 } 657 658 /* 659 * We didn't get link. Configure back to the highest speed we tried, 660 * (if there was more than one). We call ourselves back with just the 661 * single highest speed that the user requested. 662 */ 663 if (speedcnt > 1) 664 status = ixgbe_setup_mac_link_multispeed_fiber(hw, 665 highest_link_speed, autoneg, autoneg_wait_to_complete); 666 667 out: 668 /* Set autoneg_advertised value based on input link speed */ 669 hw->phy.autoneg_advertised = 0; 670 671 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 672 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 673 674 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 675 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 676 677 return (status); 678 } 679 680 /* 681 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed 682 * @hw: pointer to hardware structure 683 * @speed: new link speed 684 * @autoneg: true if autonegotiation enabled 685 * @autoneg_wait_to_complete: true when waiting for completion is needed 686 * 687 * Implements the Intel SmartSpeed algorithm. 688 */ 689 s32 690 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, 691 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 692 { 693 s32 status = IXGBE_SUCCESS; 694 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; 695 s32 i, j; 696 bool link_up = false; 697 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 698 699 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); 700 701 /* Set autoneg_advertised value based on input link speed */ 702 hw->phy.autoneg_advertised = 0; 703 704 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 705 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 706 707 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 708 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 709 710 if (speed & IXGBE_LINK_SPEED_100_FULL) 711 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; 712 713 /* 714 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the 715 * autoneg advertisement if link is unable to be established at the 716 * highest negotiated rate. This can sometimes happen due to integrity 717 * issues with the physical media connection. 718 */ 719 720 /* First, try to get link with full advertisement */ 721 hw->phy.smart_speed_active = false; 722 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { 723 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 724 autoneg_wait_to_complete); 725 if (status != IXGBE_SUCCESS) 726 goto out; 727 728 /* 729 * Wait for the controller to acquire link. Per IEEE 802.3ap, 730 * Section 73.10.2, we may have to wait up to 500ms if KR is 731 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per 732 * Table 9 in the AN MAS. 733 */ 734 for (i = 0; i < 5; i++) { 735 msec_delay(100); 736 737 /* If we have link, just jump out */ 738 status = ixgbe_check_link(hw, &link_speed, &link_up, 739 false); 740 if (status != IXGBE_SUCCESS) 741 goto out; 742 743 if (link_up) 744 goto out; 745 } 746 } 747 748 /* 749 * We didn't get link. If we advertised KR plus one of KX4/KX 750 * (or BX4/BX), then disable KR and try again. 751 */ 752 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || 753 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) 754 goto out; 755 756 /* Turn SmartSpeed on to disable KR support */ 757 hw->phy.smart_speed_active = true; 758 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 759 autoneg_wait_to_complete); 760 if (status != IXGBE_SUCCESS) 761 goto out; 762 763 /* 764 * Wait for the controller to acquire link. 600ms will allow for 765 * the AN link_fail_inhibit_timer as well for multiple cycles of 766 * parallel detect, both 10g and 1g. This allows for the maximum 767 * connect attempts as defined in the AN MAS table 73-7. 768 */ 769 for (i = 0; i < 6; i++) { 770 msec_delay(100); 771 772 /* If we have link, just jump out */ 773 status = ixgbe_check_link(hw, &link_speed, &link_up, false); 774 if (status != IXGBE_SUCCESS) 775 goto out; 776 777 if (link_up) 778 goto out; 779 } 780 781 /* We didn't get link. Turn SmartSpeed back off. */ 782 hw->phy.smart_speed_active = false; 783 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, 784 autoneg_wait_to_complete); 785 786 out: 787 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 788 DEBUGOUT("Smartspeed has downgraded the link speed " 789 "from the maximum advertised\n"); 790 return (status); 791 } 792 793 /* 794 * ixgbe_setup_mac_link_82599 - Set MAC link speed 795 * @hw: pointer to hardware structure 796 * @speed: new link speed 797 * @autoneg: true if autonegotiation enabled 798 * @autoneg_wait_to_complete: true when waiting for completion is needed 799 * 800 * Set the link speed in the AUTOC register and restarts link. 801 */ 802 s32 803 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 804 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 805 { 806 s32 status = IXGBE_SUCCESS; 807 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 808 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 809 u32 start_autoc = autoc; 810 u32 orig_autoc = 0; 811 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 812 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 813 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 814 u32 links_reg; 815 u32 i; 816 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 817 818 DEBUGFUNC("ixgbe_setup_mac_link_82599"); 819 820 /* Check to see if speed passed in is supported. */ 821 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 822 if (status != IXGBE_SUCCESS) 823 goto out; 824 825 speed &= link_capabilities; 826 827 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 828 status = IXGBE_ERR_LINK_SETUP; 829 goto out; 830 } 831 832 /* 833 * Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support 834 */ 835 if (hw->mac.orig_link_settings_stored) 836 orig_autoc = hw->mac.orig_autoc; 837 else 838 orig_autoc = autoc; 839 840 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 841 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 842 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 843 /* Set KX4/KX/KR support according to speed requested */ 844 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 845 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 846 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 847 autoc |= IXGBE_AUTOC_KX4_SUPP; 848 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && 849 (hw->phy.smart_speed_active == false)) 850 autoc |= IXGBE_AUTOC_KR_SUPP; 851 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 852 autoc |= IXGBE_AUTOC_KX_SUPP; 853 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 854 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 855 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 856 /* Switch from 1G SFI to 10G SFI if requested */ 857 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 858 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 859 autoc &= ~IXGBE_AUTOC_LMS_MASK; 860 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 861 } 862 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 863 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 864 /* Switch from 10G SFI to 1G SFI if requested */ 865 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 866 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 867 autoc &= ~IXGBE_AUTOC_LMS_MASK; 868 if (autoneg) 869 autoc |= IXGBE_AUTOC_LMS_1G_AN; 870 else 871 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 872 } 873 } 874 875 if (autoc != start_autoc) { 876 /* Restart link */ 877 autoc |= IXGBE_AUTOC_AN_RESTART; 878 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 879 880 /* Only poll for autoneg to complete if specified to do so */ 881 if (autoneg_wait_to_complete) { 882 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 883 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 884 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 885 links_reg = 0; /* Just in case Autoneg time=0 */ 886 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 887 links_reg = 888 IXGBE_READ_REG(hw, IXGBE_LINKS); 889 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 890 break; 891 msec_delay(100); 892 } 893 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 894 status = 895 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 896 DEBUGOUT("Autoneg did not complete.\n"); 897 } 898 } 899 } 900 901 /* Add delay to filter out noises during initial link setup */ 902 msec_delay(50); 903 } 904 905 out: 906 return (status); 907 } 908 909 /* 910 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field 911 * @hw: pointer to hardware structure 912 * @speed: new link speed 913 * @autoneg: true if autonegotiation enabled 914 * @autoneg_wait_to_complete: true if waiting is needed to complete 915 * 916 * Restarts link on PHY and MAC based on settings passed in. 917 */ 918 static s32 919 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 920 ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) 921 { 922 s32 status; 923 924 DEBUGFUNC("ixgbe_setup_copper_link_82599"); 925 926 /* Setup the PHY according to input speed */ 927 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 928 autoneg_wait_to_complete); 929 /* Set up MAC */ 930 (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); 931 932 return (status); 933 } 934 /* 935 * ixgbe_reset_hw_82599 - Perform hardware reset 936 * @hw: pointer to hardware structure 937 * 938 * Resets the hardware by resetting the transmit and receive units, masks 939 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 940 * reset. 941 */ 942 s32 943 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 944 { 945 s32 status = IXGBE_SUCCESS; 946 u32 ctrl; 947 u32 i; 948 u32 autoc; 949 u32 autoc2; 950 951 DEBUGFUNC("ixgbe_reset_hw_82599"); 952 953 /* Call adapter stop to disable tx/rx and clear interrupts */ 954 hw->mac.ops.stop_adapter(hw); 955 956 /* PHY ops must be identified and initialized prior to reset */ 957 958 /* Identify PHY and related function pointers */ 959 status = hw->phy.ops.init(hw); 960 961 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 962 goto reset_hw_out; 963 964 /* Setup SFP module if there is one present. */ 965 if (hw->phy.sfp_setup_needed) { 966 status = hw->mac.ops.setup_sfp(hw); 967 hw->phy.sfp_setup_needed = false; 968 } 969 970 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 971 goto reset_hw_out; 972 973 /* Reset PHY */ 974 if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) 975 hw->phy.ops.reset(hw); 976 977 /* 978 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 979 * access and verify no pending requests before reset 980 */ 981 (void) ixgbe_disable_pcie_master(hw); 982 983 mac_reset_top: 984 /* 985 * Issue global reset to the MAC. This needs to be a SW reset. 986 * If link reset is used, it might reset the MAC when mng is using it 987 */ 988 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 989 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 990 IXGBE_WRITE_FLUSH(hw); 991 992 /* Poll for reset bit to self-clear indicating reset is complete */ 993 for (i = 0; i < 10; i++) { 994 usec_delay(1); 995 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 996 if (!(ctrl & IXGBE_CTRL_RST)) { 997 break; 998 } 999 } 1000 if (ctrl & IXGBE_CTRL_RST) { 1001 status = IXGBE_ERR_RESET_FAILED; 1002 DEBUGOUT("Reset polling failed to complete.\n"); 1003 } 1004 1005 /* 1006 * Double resets are required for recovery from certain error 1007 * conditions. Between resets, it is necessary to stall to allow time 1008 * for any pending HW events to complete. We use 1usec since that is 1009 * what is needed for ixgbe_disable_pcie_master(). The second reset 1010 * then clears out any effects of those events. 1011 */ 1012 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { 1013 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; 1014 usec_delay(1); 1015 goto mac_reset_top; 1016 } 1017 1018 msec_delay(50); 1019 1020 /* 1021 * Store the original AUTOC/AUTOC2 values if they have not been 1022 * stored off yet. Otherwise restore the stored original 1023 * values since the reset operation sets back to defaults. 1024 */ 1025 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1026 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1027 if (hw->mac.orig_link_settings_stored == false) { 1028 hw->mac.orig_autoc = autoc; 1029 hw->mac.orig_autoc2 = autoc2; 1030 hw->mac.orig_link_settings_stored = true; 1031 } else { 1032 if (autoc != hw->mac.orig_autoc) { 1033 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 1034 IXGBE_AUTOC_AN_RESTART)); 1035 } 1036 1037 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1038 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 1039 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 1040 autoc2 |= (hw->mac.orig_autoc2 & 1041 IXGBE_AUTOC2_UPPER_MASK); 1042 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 1043 } 1044 } 1045 1046 /* Store the permanent mac address */ 1047 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 1048 1049 /* 1050 * Store MAC address from RAR0, clear receive address registers, and 1051 * clear the multicast table. Also reset num_rar_entries to 128, 1052 * since we modify this value when programming the SAN MAC address. 1053 */ 1054 hw->mac.num_rar_entries = 128; 1055 hw->mac.ops.init_rx_addrs(hw); 1056 1057 /* Store the permanent SAN mac address */ 1058 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 1059 1060 /* Add the SAN MAC address to the RAR only if it's a valid address */ 1061 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 1062 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 1063 hw->mac.san_addr, 0, IXGBE_RAH_AV); 1064 1065 /* Reserve the last RAR for the SAN MAC address */ 1066 hw->mac.num_rar_entries--; 1067 } 1068 1069 /* Store the alternative WWNN/WWPN prefix */ 1070 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, 1071 &hw->mac.wwpn_prefix); 1072 1073 reset_hw_out: 1074 return (status); 1075 } 1076 1077 /* 1078 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1079 * @hw: pointer to hardware structure 1080 */ 1081 s32 1082 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1083 { 1084 int i; 1085 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1086 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1087 1088 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); 1089 1090 /* 1091 * Before starting reinitialization process, 1092 * FDIRCMD.CMD must be zero. 1093 */ 1094 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { 1095 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1096 IXGBE_FDIRCMD_CMD_MASK)) 1097 break; 1098 usec_delay(10); 1099 } 1100 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1101 DEBUGOUT("Flow Director previous command isn't complete, " 1102 "aborting table re-initialization. \n"); 1103 return (IXGBE_ERR_FDIR_REINIT_FAILED); 1104 } 1105 1106 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1107 IXGBE_WRITE_FLUSH(hw); 1108 /* 1109 * 82599 adapters flow director init flow cannot be restarted, 1110 * Workaround 82599 silicon errata by performing the following steps 1111 * before re-writing the FDIRCTRL control register with the same value. 1112 * - write 1 to bit 8 of FDIRCMD register & 1113 * - write 0 to bit 8 of FDIRCMD register 1114 */ 1115 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1116 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | 1117 IXGBE_FDIRCMD_CLEARHT)); 1118 IXGBE_WRITE_FLUSH(hw); 1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, 1120 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & 1121 ~IXGBE_FDIRCMD_CLEARHT)); 1122 IXGBE_WRITE_FLUSH(hw); 1123 /* 1124 * Clear FDIR Hash register to clear any leftover hashes 1125 * waiting to be programmed. 1126 */ 1127 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); 1128 IXGBE_WRITE_FLUSH(hw); 1129 1130 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1131 IXGBE_WRITE_FLUSH(hw); 1132 1133 /* Poll init-done after we write FDIRCTRL register */ 1134 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1135 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1136 IXGBE_FDIRCTRL_INIT_DONE) 1137 break; 1138 usec_delay(10); 1139 } 1140 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1141 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1142 return (IXGBE_ERR_FDIR_REINIT_FAILED); 1143 } 1144 1145 /* Clear FDIR statistics registers (read to clear) */ 1146 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); 1147 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); 1148 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 1149 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 1150 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN); 1151 1152 return (IXGBE_SUCCESS); 1153 } 1154 1155 /* 1156 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1157 * @hw: pointer to hardware structure 1158 * @pballoc: which mode to allocate filters with 1159 */ 1160 s32 1161 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) 1162 { 1163 u32 fdirctrl = 0; 1164 u32 pbsize; 1165 int i; 1166 1167 DEBUGFUNC("ixgbe_init_fdir_signature_82599"); 1168 1169 /* 1170 * Before enabling Flow Director, the Rx Packet Buffer size 1171 * must be reduced. The new value is the current size minus 1172 * flow director memory usage size. 1173 */ 1174 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1175 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1176 IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize); 1177 1178 /* 1179 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1180 * intialized to zero for non DCB mode otherwise actual total RX PB 1181 * would be bigger than programmed and filter space would run into 1182 * the PB 0 region. 1183 */ 1184 for (i = 1; i < 8; i++) 1185 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1186 1187 /* Send interrupt when 64 filters are left */ 1188 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1189 1190 /* Set the maximum length per hash bucket to 0xA filters */ 1191 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; 1192 1193 switch (pballoc) { 1194 case IXGBE_FDIR_PBALLOC_64K: 1195 /* 8k - 1 signature filters */ 1196 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1197 break; 1198 case IXGBE_FDIR_PBALLOC_128K: 1199 /* 16k - 1 signature filters */ 1200 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1201 break; 1202 case IXGBE_FDIR_PBALLOC_256K: 1203 /* 32k - 1 signature filters */ 1204 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1205 break; 1206 default: 1207 /* bad value */ 1208 return (IXGBE_ERR_CONFIG); 1209 }; 1210 1211 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1212 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1213 1214 /* Prime the keys for hashing */ 1215 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1216 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); 1217 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1218 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1219 1220 /* 1221 * Poll init-done after we write the register. Estimated times: 1222 * 10G: PBALLOC = 11b, timing is 60us 1223 * 1G: PBALLOC = 11b, timing is 600us 1224 * 100M: PBALLOC = 11b, timing is 6ms 1225 * 1226 * Multiple these timings by 4 if under full Rx load 1227 * 1228 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1229 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1230 * this might not finish in our poll time, but we can live with that 1231 * for now. 1232 */ 1233 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1234 IXGBE_WRITE_FLUSH(hw); 1235 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1236 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1237 IXGBE_FDIRCTRL_INIT_DONE) 1238 break; 1239 1240 msec_delay(1); 1241 } 1242 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1243 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1244 } 1245 1246 return (IXGBE_SUCCESS); 1247 } 1248 1249 /* 1250 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1251 * @hw: pointer to hardware structure 1252 * @pballoc: which mode to allocate filters with 1253 */ 1254 s32 1255 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) 1256 { 1257 u32 fdirctrl = 0; 1258 u32 pbsize; 1259 int i; 1260 1261 DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); 1262 1263 /* 1264 * Before enabling Flow Director, the Rx Packet Buffer size 1265 * must be reduced. The new value is the current size minus 1266 * flow director memory usage size. 1267 */ 1268 1269 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1270 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1271 IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize); 1272 1273 /* 1274 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1275 * intialized to zero for non DCB mode otherwise actual total RX PB 1276 * would be bigger than programmed and filter space would run into 1277 * the PB 0 region. 1278 */ 1279 for (i = 1; i < 8; i++) 1280 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1281 1282 /* Send interrupt when 64 filters are left */ 1283 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1284 1285 /* Initialize the drop queue to Rx queue 127 */ 1286 fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT); 1287 1288 switch (pballoc) { 1289 case IXGBE_FDIR_PBALLOC_64K: 1290 /* 2k - 1 perfect filters */ 1291 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1292 break; 1293 case IXGBE_FDIR_PBALLOC_128K: 1294 /* 4k - 1 perfect filters */ 1295 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1296 break; 1297 case IXGBE_FDIR_PBALLOC_256K: 1298 /* 8k - 1 perfect filters */ 1299 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1300 break; 1301 default: 1302 /* bad value */ 1303 return (IXGBE_ERR_CONFIG); 1304 }; 1305 1306 /* Turn perfect match filtering on */ 1307 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; 1308 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; 1309 1310 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1311 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1312 1313 /* Prime the keys for hashing */ 1314 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1315 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); 1316 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1317 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1318 1319 /* 1320 * Poll init-done after we write the register. Estimated times: 1321 * 10G: PBALLOC = 11b, timing is 60us 1322 * 1G: PBALLOC = 11b, timing is 600us 1323 * 100M: PBALLOC = 11b, timing is 6ms 1324 * 1325 * Multiple these timings by 4 if under full Rx load 1326 * 1327 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1328 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1329 * this might not finish in our poll time, but we can live with that 1330 * for now. 1331 */ 1332 1333 /* Set the maximum length per hash bucket to 0xA filters */ 1334 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); 1335 1336 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1337 IXGBE_WRITE_FLUSH(hw); 1338 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1339 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1340 IXGBE_FDIRCTRL_INIT_DONE) 1341 break; 1342 1343 msec_delay(1); 1344 } 1345 if (i >= IXGBE_FDIR_INIT_DONE_POLL) { 1346 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); 1347 } 1348 1349 return (IXGBE_SUCCESS); 1350 } 1351 1352 /* 1353 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR 1354 * @stream: input bitstream to compute the hash on 1355 * @key: 32-bit hash key 1356 */ 1357 u16 1358 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key) 1359 { 1360 /* 1361 * The algorithm is as follows: 1362 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 1363 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] 1364 * and A[n] x B[n] is bitwise AND between same length strings 1365 * 1366 * K[n] is 16 bits, defined as: 1367 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] 1368 * for n modulo 32 < 15, K[n] = 1369 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] 1370 * 1371 * S[n] is 16 bits, defined as: 1372 * for n >= 15, S[n] = S[n:n - 15] 1373 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] 1374 * 1375 * To simplify for programming, the algorithm is implemented 1376 * in software this way: 1377 * 1378 * Key[31:0], Stream[335:0] 1379 * 1380 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1381 * int_key[350:0] = tmp_key[351:1] 1382 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] 1383 * 1384 * hash[15:0] = 0; 1385 * for (i = 0; i < 351; i++) { 1386 * if (int_key[i]) 1387 * hash ^= int_stream[(i + 15):i]; 1388 * } 1389 */ 1390 1391 union { 1392 u64 fill[6]; 1393 u32 key[11]; 1394 u8 key_stream[44]; 1395 } tmp_key; 1396 1397 u8 *stream = (u8 *)atr_input; 1398 u8 int_key[44]; /* upper-most bit unused */ 1399 u8 hash_str[46]; /* upper-most 2 bits unused */ 1400 u16 hash_result = 0; 1401 int i, j, k, h; 1402 1403 DEBUGFUNC("ixgbe_atr_compute_hash_82599"); 1404 1405 /* 1406 * Initialize the fill member to prevent warnings 1407 * on some compilers 1408 */ 1409 tmp_key.fill[0] = 0; 1410 1411 /* First load the temporary key stream */ 1412 for (i = 0; i < 6; i++) { 1413 u64 fillkey = ((u64)key << 32) | key; 1414 tmp_key.fill[i] = fillkey; 1415 } 1416 1417 /* 1418 * Set the interim key for the hashing. Bit 352 is unused, so we must 1419 * shift and compensate when building the key. 1420 */ 1421 int_key[0] = tmp_key.key_stream[0] >> 1; 1422 for (i = 1, j = 0; i < 44; i++) { 1423 unsigned int this_key = tmp_key.key_stream[j] << 7; 1424 j++; 1425 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1)); 1426 } 1427 1428 /* 1429 * Set the interim bit string for the hashing. Bits 368 and 367 are 1430 * unused, so shift and compensate when building the string. 1431 */ 1432 hash_str[0] = (stream[40] & 0x7f) >> 1; 1433 for (i = 1, j = 40; i < 46; i++) { 1434 unsigned int this_str = stream[j] << 7; 1435 j++; 1436 if (j > 41) 1437 j = 0; 1438 hash_str[i] = (u8)(this_str | (stream[j] >> 1)); 1439 } 1440 1441 /* 1442 * Now compute the hash. i is the index into hash_str, j is into our 1443 * key stream, k is counting the number of bits, and h interates within 1444 * each byte. 1445 */ 1446 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1447 for (h = 0; h < 8 && k < 351; h++, k++) { 1448 if (int_key[j] & (1 << h)) { 1449 /* 1450 * Key bit is set, XOR in the current 16-bit 1451 * string. Example of processing: 1452 * h = 0, 1453 * tmp = (hash_str[i - 2] & 0 << 16) | 1454 * (hash_str[i - 1] & 0xff << 8) | 1455 * (hash_str[i] & 0xff >> 0) 1456 * So tmp = hash_str[15 + k:k], since the 1457 * i + 2 clause rolls off the 16-bit value 1458 * h = 7, 1459 * tmp = (hash_str[i - 2] & 0x7f << 9) | 1460 * (hash_str[i - 1] & 0xff << 1) | 1461 * (hash_str[i] & 0x80 >> 7) 1462 */ 1463 int tmp = (hash_str[i] >> h); 1464 tmp |= (hash_str[i - 1] << (8 - h)); 1465 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1)) 1466 << (16 - h); 1467 hash_result ^= (u16)tmp; 1468 } 1469 } 1470 } 1471 1472 return (hash_result); 1473 } 1474 1475 /* 1476 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream 1477 * @input: input stream to modify 1478 * @vlan: the VLAN id to load 1479 */ 1480 s32 1481 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) 1482 { 1483 DEBUGFUNC("ixgbe_atr_set_vlan_id_82599"); 1484 1485 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; 1486 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; 1487 1488 return (IXGBE_SUCCESS); 1489 } 1490 1491 /* 1492 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address 1493 * @input: input stream to modify 1494 * @src_addr: the IP address to load 1495 */ 1496 s32 1497 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) 1498 { 1499 DEBUGFUNC("ixgbe_atr_set_src_ipv4_82599"); 1500 1501 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; 1502 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = 1503 (src_addr >> 16) & 0xff; 1504 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = 1505 (src_addr >> 8) & 0xff; 1506 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; 1507 1508 return (IXGBE_SUCCESS); 1509 } 1510 1511 /* 1512 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address 1513 * @input: input stream to modify 1514 * @dst_addr: the IP address to load 1515 */ 1516 s32 1517 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) 1518 { 1519 DEBUGFUNC("ixgbe_atr_set_dst_ipv4_82599"); 1520 1521 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; 1522 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = 1523 (dst_addr >> 16) & 0xff; 1524 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = 1525 (dst_addr >> 8) & 0xff; 1526 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; 1527 1528 return (IXGBE_SUCCESS); 1529 } 1530 1531 /* 1532 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address 1533 * @input: input stream to modify 1534 * @src_addr_1: the first 4 bytes of the IP address to load 1535 * @src_addr_2: the second 4 bytes of the IP address to load 1536 * @src_addr_3: the third 4 bytes of the IP address to load 1537 * @src_addr_4: the fourth 4 bytes of the IP address to load 1538 */ 1539 s32 1540 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, 1541 u32 src_addr_1, u32 src_addr_2, u32 src_addr_3, u32 src_addr_4) 1542 { 1543 DEBUGFUNC("ixgbe_atr_set_src_ipv6_82599"); 1544 1545 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; 1546 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = 1547 (src_addr_4 >> 8) & 0xff; 1548 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = 1549 (src_addr_4 >> 16) & 0xff; 1550 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; 1551 1552 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; 1553 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = 1554 (src_addr_3 >> 8) & 0xff; 1555 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = 1556 (src_addr_3 >> 16) & 0xff; 1557 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; 1558 1559 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; 1560 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = 1561 (src_addr_2 >> 8) & 0xff; 1562 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = 1563 (src_addr_2 >> 16) & 0xff; 1564 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; 1565 1566 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; 1567 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = 1568 (src_addr_1 >> 8) & 0xff; 1569 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = 1570 (src_addr_1 >> 16) & 0xff; 1571 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; 1572 1573 return (IXGBE_SUCCESS); 1574 } 1575 1576 /* 1577 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address 1578 * @input: input stream to modify 1579 * @dst_addr_1: the first 4 bytes of the IP address to load 1580 * @dst_addr_2: the second 4 bytes of the IP address to load 1581 * @dst_addr_3: the third 4 bytes of the IP address to load 1582 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1583 */ 1584 s32 1585 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, 1586 u32 dst_addr_1, u32 dst_addr_2, u32 dst_addr_3, u32 dst_addr_4) 1587 { 1588 DEBUGFUNC("ixgbe_atr_set_dst_ipv6_82599"); 1589 1590 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; 1591 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = 1592 (dst_addr_4 >> 8) & 0xff; 1593 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = 1594 (dst_addr_4 >> 16) & 0xff; 1595 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; 1596 1597 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; 1598 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = 1599 (dst_addr_3 >> 8) & 0xff; 1600 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = 1601 (dst_addr_3 >> 16) & 0xff; 1602 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; 1603 1604 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; 1605 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = 1606 (dst_addr_2 >> 8) & 0xff; 1607 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = 1608 (dst_addr_2 >> 16) & 0xff; 1609 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; 1610 1611 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; 1612 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = 1613 (dst_addr_1 >> 8) & 0xff; 1614 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = 1615 (dst_addr_1 >> 16) & 0xff; 1616 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; 1617 1618 return (IXGBE_SUCCESS); 1619 } 1620 1621 /* 1622 * ixgbe_atr_set_src_port_82599 - Sets the source port 1623 * @input: input stream to modify 1624 * @src_port: the source port to load 1625 */ 1626 s32 1627 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) 1628 { 1629 DEBUGFUNC("ixgbe_atr_set_src_port_82599"); 1630 1631 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; 1632 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; 1633 1634 return (IXGBE_SUCCESS); 1635 } 1636 1637 /* 1638 * ixgbe_atr_set_dst_port_82599 - Sets the destination port 1639 * @input: input stream to modify 1640 * @dst_port: the destination port to load 1641 */ 1642 s32 1643 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) 1644 { 1645 DEBUGFUNC("ixgbe_atr_set_dst_port_82599"); 1646 1647 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; 1648 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; 1649 1650 return (IXGBE_SUCCESS); 1651 } 1652 1653 /* 1654 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes 1655 * @input: input stream to modify 1656 * @flex_bytes: the flexible bytes to load 1657 */ 1658 s32 1659 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) 1660 { 1661 DEBUGFUNC("ixgbe_atr_set_flex_byte_82599"); 1662 1663 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; 1664 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; 1665 1666 return (IXGBE_SUCCESS); 1667 } 1668 1669 /* 1670 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool 1671 * @input: input stream to modify 1672 * @vm_pool: the Virtual Machine pool to load 1673 */ 1674 s32 1675 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool) 1676 { 1677 DEBUGFUNC("ixgbe_atr_set_vm_pool_82599"); 1678 1679 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; 1680 1681 return (IXGBE_SUCCESS); 1682 } 1683 1684 /* 1685 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type 1686 * @input: input stream to modify 1687 * @l4type: the layer 4 type value to load 1688 */ 1689 s32 1690 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) 1691 { 1692 DEBUGFUNC("ixgbe_atr_set_l4type_82599"); 1693 1694 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; 1695 1696 return (IXGBE_SUCCESS); 1697 } 1698 1699 /* 1700 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream 1701 * @input: input stream to search 1702 * @vlan: the VLAN id to load 1703 */ 1704 s32 1705 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) 1706 { 1707 DEBUGFUNC("ixgbe_atr_get_vlan_id_82599"); 1708 1709 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; 1710 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; 1711 1712 return (IXGBE_SUCCESS); 1713 } 1714 1715 /* 1716 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address 1717 * @input: input stream to search 1718 * @src_addr: the IP address to load 1719 */ 1720 s32 1721 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr) 1722 { 1723 DEBUGFUNC("ixgbe_atr_get_src_ipv4_82599"); 1724 1725 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; 1726 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; 1727 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; 1728 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; 1729 1730 return (IXGBE_SUCCESS); 1731 } 1732 1733 /* 1734 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1735 * @input: input stream to search 1736 * @dst_addr: the IP address to load 1737 */ 1738 s32 1739 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr) 1740 { 1741 DEBUGFUNC("ixgbe_atr_get_dst_ipv4_82599"); 1742 1743 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; 1744 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; 1745 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; 1746 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; 1747 1748 return (IXGBE_SUCCESS); 1749 } 1750 1751 /* 1752 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1753 * @input: input stream to search 1754 * @src_addr_1: the first 4 bytes of the IP address to load 1755 * @src_addr_2: the second 4 bytes of the IP address to load 1756 * @src_addr_3: the third 4 bytes of the IP address to load 1757 * @src_addr_4: the fourth 4 bytes of the IP address to load 1758 */ 1759 s32 1760 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, 1761 u32 *src_addr_1, u32 *src_addr_2, u32 *src_addr_3, u32 *src_addr_4) 1762 { 1763 DEBUGFUNC("ixgbe_atr_get_src_ipv6_82599"); 1764 1765 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; 1766 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; 1767 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; 1768 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; 1769 1770 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; 1771 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; 1772 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; 1773 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; 1774 1775 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; 1776 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; 1777 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; 1778 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; 1779 1780 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; 1781 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; 1782 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; 1783 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; 1784 1785 return (IXGBE_SUCCESS); 1786 } 1787 1788 /* 1789 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address 1790 * @input: input stream to search 1791 * @dst_addr_1: the first 4 bytes of the IP address to load 1792 * @dst_addr_2: the second 4 bytes of the IP address to load 1793 * @dst_addr_3: the third 4 bytes of the IP address to load 1794 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1795 */ 1796 s32 1797 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, 1798 u32 *dst_addr_1, u32 *dst_addr_2, u32 *dst_addr_3, u32 *dst_addr_4) 1799 { 1800 DEBUGFUNC("ixgbe_atr_get_dst_ipv6_82599"); 1801 1802 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; 1803 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; 1804 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; 1805 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; 1806 1807 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; 1808 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; 1809 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; 1810 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; 1811 1812 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; 1813 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; 1814 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; 1815 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; 1816 1817 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; 1818 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; 1819 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; 1820 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; 1821 1822 return (IXGBE_SUCCESS); 1823 } 1824 1825 /* 1826 * ixgbe_atr_get_src_port_82599 - Gets the source port 1827 * @input: input stream to modify 1828 * @src_port: the source port to load 1829 * 1830 * Even though the input is given in big-endian, the FDIRPORT registers 1831 * expect the ports to be programmed in little-endian. Hence the need to swap 1832 * endianness when retrieving the data. This can be confusing since the 1833 * internal hash engine expects it to be big-endian. 1834 */ 1835 s32 1836 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port) 1837 { 1838 DEBUGFUNC("ixgbe_atr_get_src_port_82599"); 1839 1840 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1841 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1842 1843 return (IXGBE_SUCCESS); 1844 } 1845 1846 /* 1847 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1848 * @input: input stream to modify 1849 * @dst_port: the destination port to load 1850 * 1851 * Even though the input is given in big-endian, the FDIRPORT registers 1852 * expect the ports to be programmed in little-endian. Hence the need to swap 1853 * endianness when retrieving the data. This can be confusing since the 1854 * internal hash engine expects it to be big-endian. 1855 */ 1856 s32 1857 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port) 1858 { 1859 DEBUGFUNC("ixgbe_atr_get_dst_port_82599"); 1860 1861 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; 1862 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; 1863 1864 return (IXGBE_SUCCESS); 1865 } 1866 1867 /* 1868 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1869 * @input: input stream to modify 1870 * @flex_bytes: the flexible bytes to load 1871 */ 1872 s32 1873 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte) 1874 { 1875 DEBUGFUNC("ixgbe_atr_get_flex_byte_82599"); 1876 1877 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; 1878 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; 1879 1880 return (IXGBE_SUCCESS); 1881 } 1882 1883 /* 1884 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool 1885 * @input: input stream to modify 1886 * @vm_pool: the Virtual Machine pool to load 1887 */ 1888 s32 1889 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool) 1890 { 1891 DEBUGFUNC("ixgbe_atr_get_vm_pool_82599"); 1892 1893 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; 1894 1895 return (IXGBE_SUCCESS); 1896 } 1897 1898 /* 1899 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1900 * @input: input stream to modify 1901 * @l4type: the layer 4 type value to load 1902 */ 1903 s32 1904 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type) 1905 { 1906 DEBUGFUNC("ixgbe_atr_get_l4type__82599"); 1907 1908 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1909 1910 return (IXGBE_SUCCESS); 1911 } 1912 1913 /* 1914 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1915 * @hw: pointer to hardware structure 1916 * @stream: input bitstream 1917 * @queue: queue index to direct traffic to 1918 */ 1919 s32 1920 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1921 struct ixgbe_atr_input *input, u8 queue) 1922 { 1923 u64 fdirhashcmd; 1924 u64 fdircmd; 1925 u32 fdirhash; 1926 u16 bucket_hash, sig_hash; 1927 u8 l4type; 1928 1929 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); 1930 1931 bucket_hash = ixgbe_atr_compute_hash_82599(input, 1932 IXGBE_ATR_BUCKET_HASH_KEY); 1933 1934 /* bucket_hash is only 15 bits */ 1935 bucket_hash &= IXGBE_ATR_HASH_MASK; 1936 1937 sig_hash = ixgbe_atr_compute_hash_82599(input, 1938 IXGBE_ATR_SIGNATURE_HASH_KEY); 1939 1940 /* Get the l4type in order to program FDIRCMD properly */ 1941 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ 1942 (void) ixgbe_atr_get_l4type_82599(input, &l4type); 1943 1944 /* 1945 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1946 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1947 */ 1948 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1949 1950 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1951 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1952 1953 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1954 case IXGBE_ATR_L4TYPE_TCP: 1955 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1956 break; 1957 case IXGBE_ATR_L4TYPE_UDP: 1958 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1959 break; 1960 case IXGBE_ATR_L4TYPE_SCTP: 1961 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1962 break; 1963 default: 1964 DEBUGOUT(" Error on l4type input\n"); 1965 return (IXGBE_ERR_CONFIG); 1966 } 1967 1968 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1969 fdircmd |= IXGBE_FDIRCMD_IPV6; 1970 1971 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1972 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1973 1974 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF); 1975 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1976 1977 return (IXGBE_SUCCESS); 1978 } 1979 1980 /* 1981 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1982 * @hw: pointer to hardware structure 1983 * @input: input bitstream 1984 * @input_masks: masks for the input bitstream 1985 * @soft_id: software index for the filters 1986 * @queue: queue index to direct traffic to 1987 * 1988 * Note that the caller to this function must lock before calling, since the 1989 * hardware writes must be protected from one another. 1990 */ 1991 s32 1992 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1993 struct ixgbe_atr_input *input, struct ixgbe_atr_input_masks *input_masks, 1994 u16 soft_id, u8 queue) 1995 { 1996 u32 fdircmd = 0; 1997 u32 fdirhash; 1998 u32 src_ipv4 = 0, dst_ipv4 = 0; 1999 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 2000 u16 src_port, dst_port, vlan_id, flex_bytes; 2001 u16 bucket_hash; 2002 u8 l4type; 2003 u8 fdirm = 0; 2004 2005 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); 2006 2007 /* Get our input values */ 2008 (void) ixgbe_atr_get_l4type_82599(input, &l4type); 2009 2010 /* 2011 * Check l4type formatting, and bail out before we touch the hardware 2012 * if there's a configuration issue 2013 */ 2014 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 2015 case IXGBE_ATR_L4TYPE_TCP: 2016 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 2017 break; 2018 case IXGBE_ATR_L4TYPE_UDP: 2019 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 2020 break; 2021 case IXGBE_ATR_L4TYPE_SCTP: 2022 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 2023 break; 2024 default: 2025 DEBUGOUT(" Error on l4type input\n"); 2026 return (IXGBE_ERR_CONFIG); 2027 } 2028 2029 bucket_hash = ixgbe_atr_compute_hash_82599(input, 2030 IXGBE_ATR_BUCKET_HASH_KEY); 2031 2032 /* bucket_hash is only 15 bits */ 2033 bucket_hash &= IXGBE_ATR_HASH_MASK; 2034 2035 (void) ixgbe_atr_get_vlan_id_82599(input, &vlan_id); 2036 (void) ixgbe_atr_get_src_port_82599(input, &src_port); 2037 (void) ixgbe_atr_get_dst_port_82599(input, &dst_port); 2038 (void) ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); 2039 2040 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 2041 2042 /* Now figure out if we're IPv4 or IPv6 */ 2043 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { 2044 /* IPv6 */ 2045 (void) ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, 2046 &src_ipv6_2, &src_ipv6_3, &src_ipv6_4); 2047 2048 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); 2049 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); 2050 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); 2051 /* The last 4 bytes is the same register as IPv4 */ 2052 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); 2053 2054 fdircmd |= IXGBE_FDIRCMD_IPV6; 2055 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; 2056 } else { 2057 /* IPv4 */ 2058 (void) ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); 2059 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); 2060 2061 } 2062 2063 (void) ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); 2064 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); 2065 2066 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | 2067 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); 2068 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | 2069 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 2070 2071 /* 2072 * Program the relevant mask registers. If src/dst_port or src/dst_addr 2073 * are zero, then assume a full mask for that field. Also assume that 2074 * a VLAN of 0 is unspecified, so mask that out as well. L4type 2075 * cannot be masked out in this implementation. 2076 * 2077 * This also assumes IPv4 only. IPv6 masking isn't supported at this 2078 * point in time. 2079 */ 2080 if (src_ipv4 == 0) 2081 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff); 2082 else 2083 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); 2084 2085 if (dst_ipv4 == 0) 2086 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff); 2087 else 2088 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); 2089 2090 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 2091 case IXGBE_ATR_L4TYPE_TCP: 2092 if (src_port == 0) 2093 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff); 2094 else 2095 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 2096 input_masks->src_port_mask); 2097 2098 if (dst_port == 0) 2099 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 2100 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | 2101 0xffff0000)); 2102 else 2103 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 2104 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | 2105 (input_masks->dst_port_mask << 16))); 2106 break; 2107 case IXGBE_ATR_L4TYPE_UDP: 2108 if (src_port == 0) 2109 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff); 2110 else 2111 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 2112 input_masks->src_port_mask); 2113 2114 if (dst_port == 0) 2115 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 2116 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | 2117 0xffff0000)); 2118 else 2119 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 2120 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) | 2121 (input_masks->src_port_mask << 16))); 2122 break; 2123 default: 2124 /* this already would have failed above */ 2125 break; 2126 } 2127 2128 /* Program the last mask register, FDIRM */ 2129 if (input_masks->vlan_id_mask || !vlan_id) 2130 /* Mask both VLAN and VLANP - bits 0 and 1 */ 2131 fdirm |= (IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP); 2132 2133 if (input_masks->data_mask || !flex_bytes) 2134 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 2135 fdirm |= IXGBE_FDIRM_FLEX; 2136 2137 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 2138 fdirm |= (IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6); 2139 2140 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 2141 2142 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 2143 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 2144 fdircmd |= IXGBE_FDIRCMD_LAST; 2145 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 2146 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 2147 2148 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 2149 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 2150 2151 return (IXGBE_SUCCESS); 2152 } 2153 2154 /* 2155 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2156 * @hw: pointer to hardware structure 2157 * @reg: analog register to read 2158 * @val: read value 2159 * 2160 * Performs read operation to Omer analog register specified. 2161 */ 2162 s32 2163 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 2164 { 2165 u32 core_ctl; 2166 2167 DEBUGFUNC("ixgbe_read_analog_reg8_82599"); 2168 2169 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 2170 (reg << 8)); 2171 IXGBE_WRITE_FLUSH(hw); 2172 usec_delay(10); 2173 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 2174 *val = (u8)core_ctl; 2175 2176 return (IXGBE_SUCCESS); 2177 } 2178 2179 /* 2180 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 2181 * @hw: pointer to hardware structure 2182 * @reg: atlas register to write 2183 * @val: value to write 2184 * 2185 * Performs write operation to Omer analog register specified. 2186 */ 2187 s32 2188 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 2189 { 2190 u32 core_ctl; 2191 2192 DEBUGFUNC("ixgbe_write_analog_reg8_82599"); 2193 2194 core_ctl = (reg << 8) | val; 2195 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 2196 IXGBE_WRITE_FLUSH(hw); 2197 usec_delay(10); 2198 2199 return (IXGBE_SUCCESS); 2200 } 2201 2202 /* 2203 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx 2204 * @hw: pointer to hardware structure 2205 * 2206 * Starts the hardware using the generic start_hw function. 2207 * Then performs revision-specific operations: 2208 * Clears the rate limiter registers. 2209 */ 2210 s32 2211 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) 2212 { 2213 u32 i; 2214 u32 regval; 2215 s32 ret_val = IXGBE_SUCCESS; 2216 2217 DEBUGFUNC("ixgbe_start_hw_rev_1__82599"); 2218 2219 ret_val = ixgbe_start_hw_generic(hw); 2220 2221 /* Clear the rate limiters */ 2222 for (i = 0; i < hw->mac.max_tx_queues; i++) { 2223 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); 2224 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 2225 } 2226 IXGBE_WRITE_FLUSH(hw); 2227 2228 /* Disable relaxed ordering */ 2229 for (i = 0; i < hw->mac.max_tx_queues; i++) { 2230 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 2231 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 2232 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 2233 } 2234 2235 for (i = 0; i < hw->mac.max_rx_queues; i++) { 2236 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 2237 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | 2238 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 2239 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 2240 } 2241 2242 /* We need to run link autotry after the driver loads */ 2243 hw->mac.autotry_restart = true; 2244 2245 if (ret_val == IXGBE_SUCCESS) 2246 ret_val = ixgbe_verify_fw_version_82599(hw); 2247 2248 return (ret_val); 2249 } 2250 2251 /* 2252 * ixgbe_identify_phy_82599 - Get physical layer module 2253 * @hw: pointer to hardware structure 2254 * 2255 * Determines the physical layer module found on the current adapter. 2256 * If PHY already detected, maintains current PHY type in hw struct, 2257 * otherwise executes the PHY detection routine. 2258 */ 2259 s32 2260 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 2261 { 2262 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 2263 2264 DEBUGFUNC("ixgbe_identify_phy_82599"); 2265 2266 /* Detect PHY if not unknown - returns success if already detected. */ 2267 status = ixgbe_identify_phy_generic(hw); 2268 if (status != IXGBE_SUCCESS) 2269 status = ixgbe_identify_sfp_module_generic(hw); 2270 /* Set PHY type none if no PHY detected */ 2271 if (hw->phy.type == ixgbe_phy_unknown) { 2272 hw->phy.type = ixgbe_phy_none; 2273 status = IXGBE_SUCCESS; 2274 } 2275 2276 /* Return error if SFP module has been detected but is not supported */ 2277 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 2278 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 2279 2280 return (status); 2281 } 2282 2283 /* 2284 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 2285 * @hw: pointer to hardware structure 2286 * 2287 * Determines physical layer capabilities of the current configuration. 2288 */ 2289 u32 2290 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 2291 { 2292 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 2293 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2294 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2295 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 2296 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 2297 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2298 u16 ext_ability = 0; 2299 u8 comp_codes_10g = 0; 2300 u8 comp_codes_1g = 0; 2301 2302 DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); 2303 2304 hw->phy.ops.identify(hw); 2305 2306 if (hw->phy.type == ixgbe_phy_tn || 2307 hw->phy.type == ixgbe_phy_aq || 2308 hw->phy.type == ixgbe_phy_cu_unknown) { 2309 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 2310 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 2311 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 2312 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 2313 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 2314 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 2315 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 2316 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 2317 goto out; 2318 } 2319 2320 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 2321 case IXGBE_AUTOC_LMS_1G_AN: 2322 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 2323 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 2324 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 2325 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 2326 goto out; 2327 } else { 2328 /* SFI mode so read SFP module */ 2329 goto sfp_check; 2330 } 2331 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 2332 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 2333 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 2334 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 2335 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2336 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) 2337 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; 2338 goto out; 2339 case IXGBE_AUTOC_LMS_10G_SERIAL: 2340 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 2341 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2342 goto out; 2343 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2344 goto sfp_check; 2345 break; 2346 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2347 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2348 if (autoc & IXGBE_AUTOC_KX_SUPP) 2349 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2350 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2351 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2352 if (autoc & IXGBE_AUTOC_KR_SUPP) 2353 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2354 goto out; 2355 default: 2356 goto out; 2357 } 2358 2359 sfp_check: 2360 /* 2361 * SFP check must be done last since DA modules are sometimes used to 2362 * test KR mode - we need to id KR mode correctly before SFP module. 2363 * Call identify_sfp because the pluggable module may have changed 2364 */ 2365 hw->phy.ops.identify_sfp(hw); 2366 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2367 goto out; 2368 2369 switch (hw->phy.type) { 2370 case ixgbe_phy_sfp_passive_tyco: 2371 case ixgbe_phy_sfp_passive_unknown: 2372 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2373 break; 2374 case ixgbe_phy_sfp_ftl_active: 2375 case ixgbe_phy_sfp_active_unknown: 2376 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; 2377 break; 2378 case ixgbe_phy_sfp_avago: 2379 case ixgbe_phy_sfp_ftl: 2380 case ixgbe_phy_sfp_intel: 2381 case ixgbe_phy_sfp_unknown: 2382 hw->phy.ops.read_i2c_eeprom(hw, 2383 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); 2384 hw->phy.ops.read_i2c_eeprom(hw, 2385 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2386 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2387 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2388 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2389 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2390 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) 2391 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; 2392 break; 2393 default: 2394 break; 2395 } 2396 2397 out: 2398 return (physical_layer); 2399 } 2400 2401 /* 2402 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2403 * @hw: pointer to hardware structure 2404 * @regval: register value to write to RXCTRL 2405 * 2406 * Enables the Rx DMA unit for 82599 2407 */ 2408 s32 2409 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2410 { 2411 #define IXGBE_MAX_SECRX_POLL 30 2412 int i; 2413 int secrxreg; 2414 2415 DEBUGFUNC("ixgbe_enable_rx_dma_82599"); 2416 2417 /* 2418 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2419 * If traffic is incoming before we enable the Rx unit, it could hang 2420 * the Rx DMA unit. Therefore, make sure the security engine is 2421 * completely disabled prior to enabling the Rx unit. 2422 */ 2423 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2424 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2425 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2426 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2427 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2428 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2429 break; 2430 else 2431 /* Use interrupt-safe sleep just in case */ 2432 usec_delay(10); 2433 } 2434 2435 /* For informational purposes only */ 2436 if (i >= IXGBE_MAX_SECRX_POLL) 2437 DEBUGOUT("Rx unit being enabled before security " 2438 "path fully disabled. Continuing with init.\n"); 2439 2440 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2441 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2442 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2443 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2444 IXGBE_WRITE_FLUSH(hw); 2445 2446 return (IXGBE_SUCCESS); 2447 } 2448 2449 /* 2450 * ixgbe_get_device_caps_82599 - Get additional device capabilities 2451 * @hw: pointer to hardware structure 2452 * @device_caps: the EEPROM word with the extra device capabilities 2453 * 2454 * This function will read the EEPROM location for the device capabilities, 2455 * and return the word through device_caps. 2456 */ 2457 s32 2458 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) 2459 { 2460 DEBUGFUNC("ixgbe_get_device_caps_82599"); 2461 2462 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 2463 2464 return (IXGBE_SUCCESS); 2465 } 2466 2467 /* 2468 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2469 * @hw: pointer to hardware structure 2470 * 2471 * Verifies that installed the firmware version is 0.6 or higher 2472 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. 2473 * 2474 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or 2475 * if the FW version is not supported. 2476 */ 2477 static s32 2478 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) 2479 { 2480 s32 status = IXGBE_ERR_EEPROM_VERSION; 2481 u16 fw_offset, fw_ptp_cfg_offset; 2482 u16 fw_version = 0; 2483 2484 DEBUGFUNC("ixgbe_verify_fw_version_82599"); 2485 2486 /* firmware check is only necessary for SFI devices */ 2487 if (hw->phy.media_type != ixgbe_media_type_fiber) { 2488 status = IXGBE_SUCCESS; 2489 goto fw_version_out; 2490 } 2491 2492 /* get the offset to the Firmware Module block */ 2493 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); 2494 2495 if ((fw_offset == 0) || (fw_offset == 0xFFFF)) 2496 goto fw_version_out; 2497 2498 /* get the offset to the Pass Through Patch Configuration block */ 2499 hw->eeprom.ops.read(hw, (fw_offset + 2500 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), &fw_ptp_cfg_offset); 2501 2502 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) 2503 goto fw_version_out; 2504 2505 /* get the firmware version */ 2506 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4), 2507 &fw_version); 2508 2509 if (fw_version > 0x5) 2510 status = IXGBE_SUCCESS; 2511 2512 fw_version_out: 2513 return (status); 2514 } 2515 2516 /* 2517 * ixgbe_enable_relaxed_ordering_82599 - Enable relaxed ordering 2518 * @hw: pointer to hardware structure 2519 */ 2520 void 2521 ixgbe_enable_relaxed_ordering_82599(struct ixgbe_hw *hw) 2522 { 2523 u32 regval; 2524 u32 i; 2525 2526 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82599"); 2527 2528 /* Enable relaxed ordering */ 2529 for (i = 0; i < hw->mac.max_tx_queues; i++) { 2530 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); 2531 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 2532 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); 2533 } 2534 2535 for (i = 0; i < hw->mac.max_rx_queues; i++) { 2536 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); 2537 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | 2538 IXGBE_DCA_RXCTRL_DESC_HSRO_EN); 2539 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 2540 } 2541 } 2542