1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e.h" 5 #include "i40e_type.h" 6 #include "i40e_adminq.h" 7 #include "i40e_prototype.h" 8 #include <linux/avf/virtchnl.h> 9 10 /** 11 * i40e_set_mac_type - Sets MAC type 12 * @hw: pointer to the HW structure 13 * 14 * This function sets the mac type of the adapter based on the 15 * vendor ID and device ID stored in the hw structure. 16 **/ 17 i40e_status i40e_set_mac_type(struct i40e_hw *hw) 18 { 19 i40e_status status = 0; 20 21 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 22 switch (hw->device_id) { 23 case I40E_DEV_ID_SFP_XL710: 24 case I40E_DEV_ID_QEMU: 25 case I40E_DEV_ID_KX_B: 26 case I40E_DEV_ID_KX_C: 27 case I40E_DEV_ID_QSFP_A: 28 case I40E_DEV_ID_QSFP_B: 29 case I40E_DEV_ID_QSFP_C: 30 case I40E_DEV_ID_10G_BASE_T: 31 case I40E_DEV_ID_10G_BASE_T4: 32 case I40E_DEV_ID_10G_B: 33 case I40E_DEV_ID_10G_SFP: 34 case I40E_DEV_ID_20G_KR2: 35 case I40E_DEV_ID_20G_KR2_A: 36 case I40E_DEV_ID_25G_B: 37 case I40E_DEV_ID_25G_SFP28: 38 case I40E_DEV_ID_X710_N3000: 39 case I40E_DEV_ID_XXV710_N3000: 40 hw->mac.type = I40E_MAC_XL710; 41 break; 42 case I40E_DEV_ID_KX_X722: 43 case I40E_DEV_ID_QSFP_X722: 44 case I40E_DEV_ID_SFP_X722: 45 case I40E_DEV_ID_1G_BASE_T_X722: 46 case I40E_DEV_ID_10G_BASE_T_X722: 47 case I40E_DEV_ID_SFP_I_X722: 48 hw->mac.type = I40E_MAC_X722; 49 break; 50 default: 51 hw->mac.type = I40E_MAC_GENERIC; 52 break; 53 } 54 } else { 55 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 56 } 57 58 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 59 hw->mac.type, status); 60 return status; 61 } 62 63 /** 64 * i40e_aq_str - convert AQ err code to a string 65 * @hw: pointer to the HW structure 66 * @aq_err: the AQ error code to convert 67 **/ 68 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 69 { 70 switch (aq_err) { 71 case I40E_AQ_RC_OK: 72 return "OK"; 73 case I40E_AQ_RC_EPERM: 74 return "I40E_AQ_RC_EPERM"; 75 case I40E_AQ_RC_ENOENT: 76 return "I40E_AQ_RC_ENOENT"; 77 case I40E_AQ_RC_ESRCH: 78 return "I40E_AQ_RC_ESRCH"; 79 case I40E_AQ_RC_EINTR: 80 return "I40E_AQ_RC_EINTR"; 81 case I40E_AQ_RC_EIO: 82 return "I40E_AQ_RC_EIO"; 83 case I40E_AQ_RC_ENXIO: 84 return "I40E_AQ_RC_ENXIO"; 85 case I40E_AQ_RC_E2BIG: 86 return "I40E_AQ_RC_E2BIG"; 87 case I40E_AQ_RC_EAGAIN: 88 return "I40E_AQ_RC_EAGAIN"; 89 case I40E_AQ_RC_ENOMEM: 90 return "I40E_AQ_RC_ENOMEM"; 91 case I40E_AQ_RC_EACCES: 92 return "I40E_AQ_RC_EACCES"; 93 case I40E_AQ_RC_EFAULT: 94 return "I40E_AQ_RC_EFAULT"; 95 case I40E_AQ_RC_EBUSY: 96 return "I40E_AQ_RC_EBUSY"; 97 case I40E_AQ_RC_EEXIST: 98 return "I40E_AQ_RC_EEXIST"; 99 case I40E_AQ_RC_EINVAL: 100 return "I40E_AQ_RC_EINVAL"; 101 case I40E_AQ_RC_ENOTTY: 102 return "I40E_AQ_RC_ENOTTY"; 103 case I40E_AQ_RC_ENOSPC: 104 return "I40E_AQ_RC_ENOSPC"; 105 case I40E_AQ_RC_ENOSYS: 106 return "I40E_AQ_RC_ENOSYS"; 107 case I40E_AQ_RC_ERANGE: 108 return "I40E_AQ_RC_ERANGE"; 109 case I40E_AQ_RC_EFLUSHED: 110 return "I40E_AQ_RC_EFLUSHED"; 111 case I40E_AQ_RC_BAD_ADDR: 112 return "I40E_AQ_RC_BAD_ADDR"; 113 case I40E_AQ_RC_EMODE: 114 return "I40E_AQ_RC_EMODE"; 115 case I40E_AQ_RC_EFBIG: 116 return "I40E_AQ_RC_EFBIG"; 117 } 118 119 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 120 return hw->err_str; 121 } 122 123 /** 124 * i40e_stat_str - convert status err code to a string 125 * @hw: pointer to the HW structure 126 * @stat_err: the status error code to convert 127 **/ 128 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 129 { 130 switch (stat_err) { 131 case 0: 132 return "OK"; 133 case I40E_ERR_NVM: 134 return "I40E_ERR_NVM"; 135 case I40E_ERR_NVM_CHECKSUM: 136 return "I40E_ERR_NVM_CHECKSUM"; 137 case I40E_ERR_PHY: 138 return "I40E_ERR_PHY"; 139 case I40E_ERR_CONFIG: 140 return "I40E_ERR_CONFIG"; 141 case I40E_ERR_PARAM: 142 return "I40E_ERR_PARAM"; 143 case I40E_ERR_MAC_TYPE: 144 return "I40E_ERR_MAC_TYPE"; 145 case I40E_ERR_UNKNOWN_PHY: 146 return "I40E_ERR_UNKNOWN_PHY"; 147 case I40E_ERR_LINK_SETUP: 148 return "I40E_ERR_LINK_SETUP"; 149 case I40E_ERR_ADAPTER_STOPPED: 150 return "I40E_ERR_ADAPTER_STOPPED"; 151 case I40E_ERR_INVALID_MAC_ADDR: 152 return "I40E_ERR_INVALID_MAC_ADDR"; 153 case I40E_ERR_DEVICE_NOT_SUPPORTED: 154 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 155 case I40E_ERR_MASTER_REQUESTS_PENDING: 156 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 157 case I40E_ERR_INVALID_LINK_SETTINGS: 158 return "I40E_ERR_INVALID_LINK_SETTINGS"; 159 case I40E_ERR_AUTONEG_NOT_COMPLETE: 160 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 161 case I40E_ERR_RESET_FAILED: 162 return "I40E_ERR_RESET_FAILED"; 163 case I40E_ERR_SWFW_SYNC: 164 return "I40E_ERR_SWFW_SYNC"; 165 case I40E_ERR_NO_AVAILABLE_VSI: 166 return "I40E_ERR_NO_AVAILABLE_VSI"; 167 case I40E_ERR_NO_MEMORY: 168 return "I40E_ERR_NO_MEMORY"; 169 case I40E_ERR_BAD_PTR: 170 return "I40E_ERR_BAD_PTR"; 171 case I40E_ERR_RING_FULL: 172 return "I40E_ERR_RING_FULL"; 173 case I40E_ERR_INVALID_PD_ID: 174 return "I40E_ERR_INVALID_PD_ID"; 175 case I40E_ERR_INVALID_QP_ID: 176 return "I40E_ERR_INVALID_QP_ID"; 177 case I40E_ERR_INVALID_CQ_ID: 178 return "I40E_ERR_INVALID_CQ_ID"; 179 case I40E_ERR_INVALID_CEQ_ID: 180 return "I40E_ERR_INVALID_CEQ_ID"; 181 case I40E_ERR_INVALID_AEQ_ID: 182 return "I40E_ERR_INVALID_AEQ_ID"; 183 case I40E_ERR_INVALID_SIZE: 184 return "I40E_ERR_INVALID_SIZE"; 185 case I40E_ERR_INVALID_ARP_INDEX: 186 return "I40E_ERR_INVALID_ARP_INDEX"; 187 case I40E_ERR_INVALID_FPM_FUNC_ID: 188 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 189 case I40E_ERR_QP_INVALID_MSG_SIZE: 190 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 191 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 192 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 193 case I40E_ERR_INVALID_FRAG_COUNT: 194 return "I40E_ERR_INVALID_FRAG_COUNT"; 195 case I40E_ERR_QUEUE_EMPTY: 196 return "I40E_ERR_QUEUE_EMPTY"; 197 case I40E_ERR_INVALID_ALIGNMENT: 198 return "I40E_ERR_INVALID_ALIGNMENT"; 199 case I40E_ERR_FLUSHED_QUEUE: 200 return "I40E_ERR_FLUSHED_QUEUE"; 201 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 202 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 203 case I40E_ERR_INVALID_IMM_DATA_SIZE: 204 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 205 case I40E_ERR_TIMEOUT: 206 return "I40E_ERR_TIMEOUT"; 207 case I40E_ERR_OPCODE_MISMATCH: 208 return "I40E_ERR_OPCODE_MISMATCH"; 209 case I40E_ERR_CQP_COMPL_ERROR: 210 return "I40E_ERR_CQP_COMPL_ERROR"; 211 case I40E_ERR_INVALID_VF_ID: 212 return "I40E_ERR_INVALID_VF_ID"; 213 case I40E_ERR_INVALID_HMCFN_ID: 214 return "I40E_ERR_INVALID_HMCFN_ID"; 215 case I40E_ERR_BACKING_PAGE_ERROR: 216 return "I40E_ERR_BACKING_PAGE_ERROR"; 217 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 218 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 219 case I40E_ERR_INVALID_PBLE_INDEX: 220 return "I40E_ERR_INVALID_PBLE_INDEX"; 221 case I40E_ERR_INVALID_SD_INDEX: 222 return "I40E_ERR_INVALID_SD_INDEX"; 223 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 224 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 225 case I40E_ERR_INVALID_SD_TYPE: 226 return "I40E_ERR_INVALID_SD_TYPE"; 227 case I40E_ERR_MEMCPY_FAILED: 228 return "I40E_ERR_MEMCPY_FAILED"; 229 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 230 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 231 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 232 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 233 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 234 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 235 case I40E_ERR_SRQ_ENABLED: 236 return "I40E_ERR_SRQ_ENABLED"; 237 case I40E_ERR_ADMIN_QUEUE_ERROR: 238 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 239 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 240 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 241 case I40E_ERR_BUF_TOO_SHORT: 242 return "I40E_ERR_BUF_TOO_SHORT"; 243 case I40E_ERR_ADMIN_QUEUE_FULL: 244 return "I40E_ERR_ADMIN_QUEUE_FULL"; 245 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 246 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 247 case I40E_ERR_BAD_IWARP_CQE: 248 return "I40E_ERR_BAD_IWARP_CQE"; 249 case I40E_ERR_NVM_BLANK_MODE: 250 return "I40E_ERR_NVM_BLANK_MODE"; 251 case I40E_ERR_NOT_IMPLEMENTED: 252 return "I40E_ERR_NOT_IMPLEMENTED"; 253 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 254 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 255 case I40E_ERR_DIAG_TEST_FAILED: 256 return "I40E_ERR_DIAG_TEST_FAILED"; 257 case I40E_ERR_NOT_READY: 258 return "I40E_ERR_NOT_READY"; 259 case I40E_NOT_SUPPORTED: 260 return "I40E_NOT_SUPPORTED"; 261 case I40E_ERR_FIRMWARE_API_VERSION: 262 return "I40E_ERR_FIRMWARE_API_VERSION"; 263 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 264 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 265 } 266 267 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 268 return hw->err_str; 269 } 270 271 /** 272 * i40e_debug_aq 273 * @hw: debug mask related to admin queue 274 * @mask: debug mask 275 * @desc: pointer to admin queue descriptor 276 * @buffer: pointer to command buffer 277 * @buf_len: max length of buffer 278 * 279 * Dumps debug log about adminq command with descriptor contents. 280 **/ 281 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 282 void *buffer, u16 buf_len) 283 { 284 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 285 u32 effective_mask = hw->debug_mask & mask; 286 char prefix[27]; 287 u16 len; 288 u8 *buf = (u8 *)buffer; 289 290 if (!effective_mask || !desc) 291 return; 292 293 len = le16_to_cpu(aq_desc->datalen); 294 295 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 296 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 297 le16_to_cpu(aq_desc->opcode), 298 le16_to_cpu(aq_desc->flags), 299 le16_to_cpu(aq_desc->datalen), 300 le16_to_cpu(aq_desc->retval)); 301 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 302 "\tcookie (h,l) 0x%08X 0x%08X\n", 303 le32_to_cpu(aq_desc->cookie_high), 304 le32_to_cpu(aq_desc->cookie_low)); 305 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 306 "\tparam (0,1) 0x%08X 0x%08X\n", 307 le32_to_cpu(aq_desc->params.internal.param0), 308 le32_to_cpu(aq_desc->params.internal.param1)); 309 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 310 "\taddr (h,l) 0x%08X 0x%08X\n", 311 le32_to_cpu(aq_desc->params.external.addr_high), 312 le32_to_cpu(aq_desc->params.external.addr_low)); 313 314 if (buffer && buf_len != 0 && len != 0 && 315 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 316 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 317 if (buf_len < len) 318 len = buf_len; 319 320 snprintf(prefix, sizeof(prefix), 321 "i40e %02x:%02x.%x: \t0x", 322 hw->bus.bus_id, 323 hw->bus.device, 324 hw->bus.func); 325 326 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 327 16, 1, buf, len, false); 328 } 329 } 330 331 /** 332 * i40e_check_asq_alive 333 * @hw: pointer to the hw struct 334 * 335 * Returns true if Queue is enabled else false. 336 **/ 337 bool i40e_check_asq_alive(struct i40e_hw *hw) 338 { 339 if (hw->aq.asq.len) 340 return !!(rd32(hw, hw->aq.asq.len) & 341 I40E_PF_ATQLEN_ATQENABLE_MASK); 342 else 343 return false; 344 } 345 346 /** 347 * i40e_aq_queue_shutdown 348 * @hw: pointer to the hw struct 349 * @unloading: is the driver unloading itself 350 * 351 * Tell the Firmware that we're shutting down the AdminQ and whether 352 * or not the driver is unloading as well. 353 **/ 354 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 355 bool unloading) 356 { 357 struct i40e_aq_desc desc; 358 struct i40e_aqc_queue_shutdown *cmd = 359 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 360 i40e_status status; 361 362 i40e_fill_default_direct_cmd_desc(&desc, 363 i40e_aqc_opc_queue_shutdown); 364 365 if (unloading) 366 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 367 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 368 369 return status; 370 } 371 372 /** 373 * i40e_aq_get_set_rss_lut 374 * @hw: pointer to the hardware structure 375 * @vsi_id: vsi fw index 376 * @pf_lut: for PF table set true, for VSI table set false 377 * @lut: pointer to the lut buffer provided by the caller 378 * @lut_size: size of the lut buffer 379 * @set: set true to set the table, false to get the table 380 * 381 * Internal function to get or set RSS look up table 382 **/ 383 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 384 u16 vsi_id, bool pf_lut, 385 u8 *lut, u16 lut_size, 386 bool set) 387 { 388 i40e_status status; 389 struct i40e_aq_desc desc; 390 struct i40e_aqc_get_set_rss_lut *cmd_resp = 391 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 392 393 if (set) 394 i40e_fill_default_direct_cmd_desc(&desc, 395 i40e_aqc_opc_set_rss_lut); 396 else 397 i40e_fill_default_direct_cmd_desc(&desc, 398 i40e_aqc_opc_get_rss_lut); 399 400 /* Indirect command */ 401 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 402 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 403 404 cmd_resp->vsi_id = 405 cpu_to_le16((u16)((vsi_id << 406 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 407 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 408 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 409 410 if (pf_lut) 411 cmd_resp->flags |= cpu_to_le16((u16) 412 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 413 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 414 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 415 else 416 cmd_resp->flags |= cpu_to_le16((u16) 417 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 419 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 420 421 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 422 423 return status; 424 } 425 426 /** 427 * i40e_aq_get_rss_lut 428 * @hw: pointer to the hardware structure 429 * @vsi_id: vsi fw index 430 * @pf_lut: for PF table set true, for VSI table set false 431 * @lut: pointer to the lut buffer provided by the caller 432 * @lut_size: size of the lut buffer 433 * 434 * get the RSS lookup table, PF or VSI type 435 **/ 436 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 437 bool pf_lut, u8 *lut, u16 lut_size) 438 { 439 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 440 false); 441 } 442 443 /** 444 * i40e_aq_set_rss_lut 445 * @hw: pointer to the hardware structure 446 * @vsi_id: vsi fw index 447 * @pf_lut: for PF table set true, for VSI table set false 448 * @lut: pointer to the lut buffer provided by the caller 449 * @lut_size: size of the lut buffer 450 * 451 * set the RSS lookup table, PF or VSI type 452 **/ 453 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 454 bool pf_lut, u8 *lut, u16 lut_size) 455 { 456 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 457 } 458 459 /** 460 * i40e_aq_get_set_rss_key 461 * @hw: pointer to the hw struct 462 * @vsi_id: vsi fw index 463 * @key: pointer to key info struct 464 * @set: set true to set the key, false to get the key 465 * 466 * get the RSS key per VSI 467 **/ 468 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 469 u16 vsi_id, 470 struct i40e_aqc_get_set_rss_key_data *key, 471 bool set) 472 { 473 i40e_status status; 474 struct i40e_aq_desc desc; 475 struct i40e_aqc_get_set_rss_key *cmd_resp = 476 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 477 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 478 479 if (set) 480 i40e_fill_default_direct_cmd_desc(&desc, 481 i40e_aqc_opc_set_rss_key); 482 else 483 i40e_fill_default_direct_cmd_desc(&desc, 484 i40e_aqc_opc_get_rss_key); 485 486 /* Indirect command */ 487 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 488 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 489 490 cmd_resp->vsi_id = 491 cpu_to_le16((u16)((vsi_id << 492 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 493 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 494 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 495 496 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 497 498 return status; 499 } 500 501 /** 502 * i40e_aq_get_rss_key 503 * @hw: pointer to the hw struct 504 * @vsi_id: vsi fw index 505 * @key: pointer to key info struct 506 * 507 **/ 508 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 509 u16 vsi_id, 510 struct i40e_aqc_get_set_rss_key_data *key) 511 { 512 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 513 } 514 515 /** 516 * i40e_aq_set_rss_key 517 * @hw: pointer to the hw struct 518 * @vsi_id: vsi fw index 519 * @key: pointer to key info struct 520 * 521 * set the RSS key per VSI 522 **/ 523 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 524 u16 vsi_id, 525 struct i40e_aqc_get_set_rss_key_data *key) 526 { 527 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 528 } 529 530 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 531 * hardware to a bit-field that can be used by SW to more easily determine the 532 * packet type. 533 * 534 * Macros are used to shorten the table lines and make this table human 535 * readable. 536 * 537 * We store the PTYPE in the top byte of the bit field - this is just so that 538 * we can check that the table doesn't have a row missing, as the index into 539 * the table should be the PTYPE. 540 * 541 * Typical work flow: 542 * 543 * IF NOT i40e_ptype_lookup[ptype].known 544 * THEN 545 * Packet is unknown 546 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 547 * Use the rest of the fields to look at the tunnels, inner protocols, etc 548 * ELSE 549 * Use the enum i40e_rx_l2_ptype to decode the packet type 550 * ENDIF 551 */ 552 553 /* macro to make the table lines short */ 554 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 555 { PTYPE, \ 556 1, \ 557 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 558 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 559 I40E_RX_PTYPE_##OUTER_FRAG, \ 560 I40E_RX_PTYPE_TUNNEL_##T, \ 561 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 562 I40E_RX_PTYPE_##TEF, \ 563 I40E_RX_PTYPE_INNER_PROT_##I, \ 564 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 565 566 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 567 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 568 569 /* shorter macros makes the table fit but are terse */ 570 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 571 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 572 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 573 574 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 575 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 576 /* L2 Packet types */ 577 I40E_PTT_UNUSED_ENTRY(0), 578 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 579 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 580 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 581 I40E_PTT_UNUSED_ENTRY(4), 582 I40E_PTT_UNUSED_ENTRY(5), 583 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 584 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 585 I40E_PTT_UNUSED_ENTRY(8), 586 I40E_PTT_UNUSED_ENTRY(9), 587 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 588 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 589 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 590 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 591 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 599 600 /* Non Tunneled IPv4 */ 601 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 602 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 603 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 604 I40E_PTT_UNUSED_ENTRY(25), 605 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 606 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 607 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 608 609 /* IPv4 --> IPv4 */ 610 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 611 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 612 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 613 I40E_PTT_UNUSED_ENTRY(32), 614 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 615 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 616 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 617 618 /* IPv4 --> IPv6 */ 619 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 620 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 621 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 622 I40E_PTT_UNUSED_ENTRY(39), 623 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 624 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 625 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 626 627 /* IPv4 --> GRE/NAT */ 628 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 629 630 /* IPv4 --> GRE/NAT --> IPv4 */ 631 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 632 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 633 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 634 I40E_PTT_UNUSED_ENTRY(47), 635 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 636 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 637 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 638 639 /* IPv4 --> GRE/NAT --> IPv6 */ 640 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 641 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 642 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 643 I40E_PTT_UNUSED_ENTRY(54), 644 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 645 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 646 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 647 648 /* IPv4 --> GRE/NAT --> MAC */ 649 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 650 651 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 652 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 653 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 654 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 655 I40E_PTT_UNUSED_ENTRY(62), 656 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 657 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 658 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 659 660 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 661 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 662 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 663 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 664 I40E_PTT_UNUSED_ENTRY(69), 665 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 666 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 667 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 668 669 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 670 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 671 672 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 673 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 674 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 675 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 676 I40E_PTT_UNUSED_ENTRY(77), 677 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 678 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 679 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 680 681 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 682 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 683 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 684 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 685 I40E_PTT_UNUSED_ENTRY(84), 686 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 687 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 688 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 689 690 /* Non Tunneled IPv6 */ 691 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 692 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 693 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 694 I40E_PTT_UNUSED_ENTRY(91), 695 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 696 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 697 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 698 699 /* IPv6 --> IPv4 */ 700 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 701 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 702 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 703 I40E_PTT_UNUSED_ENTRY(98), 704 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 705 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 706 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 707 708 /* IPv6 --> IPv6 */ 709 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 710 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 711 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 712 I40E_PTT_UNUSED_ENTRY(105), 713 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 714 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 715 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 716 717 /* IPv6 --> GRE/NAT */ 718 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 719 720 /* IPv6 --> GRE/NAT -> IPv4 */ 721 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 722 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 723 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 724 I40E_PTT_UNUSED_ENTRY(113), 725 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 726 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 727 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 728 729 /* IPv6 --> GRE/NAT -> IPv6 */ 730 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 731 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 732 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 733 I40E_PTT_UNUSED_ENTRY(120), 734 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 735 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 736 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 737 738 /* IPv6 --> GRE/NAT -> MAC */ 739 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 740 741 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 742 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 743 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 744 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 745 I40E_PTT_UNUSED_ENTRY(128), 746 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 747 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 748 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 749 750 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 751 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 752 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 753 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 754 I40E_PTT_UNUSED_ENTRY(135), 755 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 756 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 757 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 758 759 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 760 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 761 762 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 763 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 764 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 765 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 766 I40E_PTT_UNUSED_ENTRY(143), 767 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 768 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 769 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 770 771 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 772 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 773 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 774 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 775 I40E_PTT_UNUSED_ENTRY(150), 776 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 777 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 778 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 779 780 /* unused entries */ 781 I40E_PTT_UNUSED_ENTRY(154), 782 I40E_PTT_UNUSED_ENTRY(155), 783 I40E_PTT_UNUSED_ENTRY(156), 784 I40E_PTT_UNUSED_ENTRY(157), 785 I40E_PTT_UNUSED_ENTRY(158), 786 I40E_PTT_UNUSED_ENTRY(159), 787 788 I40E_PTT_UNUSED_ENTRY(160), 789 I40E_PTT_UNUSED_ENTRY(161), 790 I40E_PTT_UNUSED_ENTRY(162), 791 I40E_PTT_UNUSED_ENTRY(163), 792 I40E_PTT_UNUSED_ENTRY(164), 793 I40E_PTT_UNUSED_ENTRY(165), 794 I40E_PTT_UNUSED_ENTRY(166), 795 I40E_PTT_UNUSED_ENTRY(167), 796 I40E_PTT_UNUSED_ENTRY(168), 797 I40E_PTT_UNUSED_ENTRY(169), 798 799 I40E_PTT_UNUSED_ENTRY(170), 800 I40E_PTT_UNUSED_ENTRY(171), 801 I40E_PTT_UNUSED_ENTRY(172), 802 I40E_PTT_UNUSED_ENTRY(173), 803 I40E_PTT_UNUSED_ENTRY(174), 804 I40E_PTT_UNUSED_ENTRY(175), 805 I40E_PTT_UNUSED_ENTRY(176), 806 I40E_PTT_UNUSED_ENTRY(177), 807 I40E_PTT_UNUSED_ENTRY(178), 808 I40E_PTT_UNUSED_ENTRY(179), 809 810 I40E_PTT_UNUSED_ENTRY(180), 811 I40E_PTT_UNUSED_ENTRY(181), 812 I40E_PTT_UNUSED_ENTRY(182), 813 I40E_PTT_UNUSED_ENTRY(183), 814 I40E_PTT_UNUSED_ENTRY(184), 815 I40E_PTT_UNUSED_ENTRY(185), 816 I40E_PTT_UNUSED_ENTRY(186), 817 I40E_PTT_UNUSED_ENTRY(187), 818 I40E_PTT_UNUSED_ENTRY(188), 819 I40E_PTT_UNUSED_ENTRY(189), 820 821 I40E_PTT_UNUSED_ENTRY(190), 822 I40E_PTT_UNUSED_ENTRY(191), 823 I40E_PTT_UNUSED_ENTRY(192), 824 I40E_PTT_UNUSED_ENTRY(193), 825 I40E_PTT_UNUSED_ENTRY(194), 826 I40E_PTT_UNUSED_ENTRY(195), 827 I40E_PTT_UNUSED_ENTRY(196), 828 I40E_PTT_UNUSED_ENTRY(197), 829 I40E_PTT_UNUSED_ENTRY(198), 830 I40E_PTT_UNUSED_ENTRY(199), 831 832 I40E_PTT_UNUSED_ENTRY(200), 833 I40E_PTT_UNUSED_ENTRY(201), 834 I40E_PTT_UNUSED_ENTRY(202), 835 I40E_PTT_UNUSED_ENTRY(203), 836 I40E_PTT_UNUSED_ENTRY(204), 837 I40E_PTT_UNUSED_ENTRY(205), 838 I40E_PTT_UNUSED_ENTRY(206), 839 I40E_PTT_UNUSED_ENTRY(207), 840 I40E_PTT_UNUSED_ENTRY(208), 841 I40E_PTT_UNUSED_ENTRY(209), 842 843 I40E_PTT_UNUSED_ENTRY(210), 844 I40E_PTT_UNUSED_ENTRY(211), 845 I40E_PTT_UNUSED_ENTRY(212), 846 I40E_PTT_UNUSED_ENTRY(213), 847 I40E_PTT_UNUSED_ENTRY(214), 848 I40E_PTT_UNUSED_ENTRY(215), 849 I40E_PTT_UNUSED_ENTRY(216), 850 I40E_PTT_UNUSED_ENTRY(217), 851 I40E_PTT_UNUSED_ENTRY(218), 852 I40E_PTT_UNUSED_ENTRY(219), 853 854 I40E_PTT_UNUSED_ENTRY(220), 855 I40E_PTT_UNUSED_ENTRY(221), 856 I40E_PTT_UNUSED_ENTRY(222), 857 I40E_PTT_UNUSED_ENTRY(223), 858 I40E_PTT_UNUSED_ENTRY(224), 859 I40E_PTT_UNUSED_ENTRY(225), 860 I40E_PTT_UNUSED_ENTRY(226), 861 I40E_PTT_UNUSED_ENTRY(227), 862 I40E_PTT_UNUSED_ENTRY(228), 863 I40E_PTT_UNUSED_ENTRY(229), 864 865 I40E_PTT_UNUSED_ENTRY(230), 866 I40E_PTT_UNUSED_ENTRY(231), 867 I40E_PTT_UNUSED_ENTRY(232), 868 I40E_PTT_UNUSED_ENTRY(233), 869 I40E_PTT_UNUSED_ENTRY(234), 870 I40E_PTT_UNUSED_ENTRY(235), 871 I40E_PTT_UNUSED_ENTRY(236), 872 I40E_PTT_UNUSED_ENTRY(237), 873 I40E_PTT_UNUSED_ENTRY(238), 874 I40E_PTT_UNUSED_ENTRY(239), 875 876 I40E_PTT_UNUSED_ENTRY(240), 877 I40E_PTT_UNUSED_ENTRY(241), 878 I40E_PTT_UNUSED_ENTRY(242), 879 I40E_PTT_UNUSED_ENTRY(243), 880 I40E_PTT_UNUSED_ENTRY(244), 881 I40E_PTT_UNUSED_ENTRY(245), 882 I40E_PTT_UNUSED_ENTRY(246), 883 I40E_PTT_UNUSED_ENTRY(247), 884 I40E_PTT_UNUSED_ENTRY(248), 885 I40E_PTT_UNUSED_ENTRY(249), 886 887 I40E_PTT_UNUSED_ENTRY(250), 888 I40E_PTT_UNUSED_ENTRY(251), 889 I40E_PTT_UNUSED_ENTRY(252), 890 I40E_PTT_UNUSED_ENTRY(253), 891 I40E_PTT_UNUSED_ENTRY(254), 892 I40E_PTT_UNUSED_ENTRY(255) 893 }; 894 895 /** 896 * i40e_init_shared_code - Initialize the shared code 897 * @hw: pointer to hardware structure 898 * 899 * This assigns the MAC type and PHY code and inits the NVM. 900 * Does not touch the hardware. This function must be called prior to any 901 * other function in the shared code. The i40e_hw structure should be 902 * memset to 0 prior to calling this function. The following fields in 903 * hw structure should be filled in prior to calling this function: 904 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 905 * subsystem_vendor_id, and revision_id 906 **/ 907 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 908 { 909 i40e_status status = 0; 910 u32 port, ari, func_rid; 911 912 i40e_set_mac_type(hw); 913 914 switch (hw->mac.type) { 915 case I40E_MAC_XL710: 916 case I40E_MAC_X722: 917 break; 918 default: 919 return I40E_ERR_DEVICE_NOT_SUPPORTED; 920 } 921 922 hw->phy.get_link_info = true; 923 924 /* Determine port number and PF number*/ 925 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 926 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 927 hw->port = (u8)port; 928 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 929 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 930 func_rid = rd32(hw, I40E_PF_FUNC_RID); 931 if (ari) 932 hw->pf_id = (u8)(func_rid & 0xff); 933 else 934 hw->pf_id = (u8)(func_rid & 0x7); 935 936 if (hw->mac.type == I40E_MAC_X722) 937 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 938 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 939 940 status = i40e_init_nvm(hw); 941 return status; 942 } 943 944 /** 945 * i40e_aq_mac_address_read - Retrieve the MAC addresses 946 * @hw: pointer to the hw struct 947 * @flags: a return indicator of what addresses were added to the addr store 948 * @addrs: the requestor's mac addr store 949 * @cmd_details: pointer to command details structure or NULL 950 **/ 951 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 952 u16 *flags, 953 struct i40e_aqc_mac_address_read_data *addrs, 954 struct i40e_asq_cmd_details *cmd_details) 955 { 956 struct i40e_aq_desc desc; 957 struct i40e_aqc_mac_address_read *cmd_data = 958 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 959 i40e_status status; 960 961 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 962 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 963 964 status = i40e_asq_send_command(hw, &desc, addrs, 965 sizeof(*addrs), cmd_details); 966 *flags = le16_to_cpu(cmd_data->command_flags); 967 968 return status; 969 } 970 971 /** 972 * i40e_aq_mac_address_write - Change the MAC addresses 973 * @hw: pointer to the hw struct 974 * @flags: indicates which MAC to be written 975 * @mac_addr: address to write 976 * @cmd_details: pointer to command details structure or NULL 977 **/ 978 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 979 u16 flags, u8 *mac_addr, 980 struct i40e_asq_cmd_details *cmd_details) 981 { 982 struct i40e_aq_desc desc; 983 struct i40e_aqc_mac_address_write *cmd_data = 984 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 985 i40e_status status; 986 987 i40e_fill_default_direct_cmd_desc(&desc, 988 i40e_aqc_opc_mac_address_write); 989 cmd_data->command_flags = cpu_to_le16(flags); 990 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 991 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 992 ((u32)mac_addr[3] << 16) | 993 ((u32)mac_addr[4] << 8) | 994 mac_addr[5]); 995 996 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 997 998 return status; 999 } 1000 1001 /** 1002 * i40e_get_mac_addr - get MAC address 1003 * @hw: pointer to the HW structure 1004 * @mac_addr: pointer to MAC address 1005 * 1006 * Reads the adapter's MAC address from register 1007 **/ 1008 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1009 { 1010 struct i40e_aqc_mac_address_read_data addrs; 1011 i40e_status status; 1012 u16 flags = 0; 1013 1014 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1015 1016 if (flags & I40E_AQC_LAN_ADDR_VALID) 1017 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1018 1019 return status; 1020 } 1021 1022 /** 1023 * i40e_get_port_mac_addr - get Port MAC address 1024 * @hw: pointer to the HW structure 1025 * @mac_addr: pointer to Port MAC address 1026 * 1027 * Reads the adapter's Port MAC address 1028 **/ 1029 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1030 { 1031 struct i40e_aqc_mac_address_read_data addrs; 1032 i40e_status status; 1033 u16 flags = 0; 1034 1035 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1036 if (status) 1037 return status; 1038 1039 if (flags & I40E_AQC_PORT_ADDR_VALID) 1040 ether_addr_copy(mac_addr, addrs.port_mac); 1041 else 1042 status = I40E_ERR_INVALID_MAC_ADDR; 1043 1044 return status; 1045 } 1046 1047 /** 1048 * i40e_pre_tx_queue_cfg - pre tx queue configure 1049 * @hw: pointer to the HW structure 1050 * @queue: target PF queue index 1051 * @enable: state change request 1052 * 1053 * Handles hw requirement to indicate intention to enable 1054 * or disable target queue. 1055 **/ 1056 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1057 { 1058 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1059 u32 reg_block = 0; 1060 u32 reg_val; 1061 1062 if (abs_queue_idx >= 128) { 1063 reg_block = abs_queue_idx / 128; 1064 abs_queue_idx %= 128; 1065 } 1066 1067 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1068 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1069 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1070 1071 if (enable) 1072 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1073 else 1074 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1075 1076 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1077 } 1078 1079 /** 1080 * i40e_read_pba_string - Reads part number string from EEPROM 1081 * @hw: pointer to hardware structure 1082 * @pba_num: stores the part number string from the EEPROM 1083 * @pba_num_size: part number string buffer length 1084 * 1085 * Reads the part number string from the EEPROM. 1086 **/ 1087 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1088 u32 pba_num_size) 1089 { 1090 i40e_status status = 0; 1091 u16 pba_word = 0; 1092 u16 pba_size = 0; 1093 u16 pba_ptr = 0; 1094 u16 i = 0; 1095 1096 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1097 if (status || (pba_word != 0xFAFA)) { 1098 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1099 return status; 1100 } 1101 1102 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1103 if (status) { 1104 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1105 return status; 1106 } 1107 1108 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1109 if (status) { 1110 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1111 return status; 1112 } 1113 1114 /* Subtract one to get PBA word count (PBA Size word is included in 1115 * total size) 1116 */ 1117 pba_size--; 1118 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1119 hw_dbg(hw, "Buffer to small for PBA data.\n"); 1120 return I40E_ERR_PARAM; 1121 } 1122 1123 for (i = 0; i < pba_size; i++) { 1124 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1125 if (status) { 1126 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1127 return status; 1128 } 1129 1130 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1131 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1132 } 1133 pba_num[(pba_size * 2)] = '\0'; 1134 1135 return status; 1136 } 1137 1138 /** 1139 * i40e_get_media_type - Gets media type 1140 * @hw: pointer to the hardware structure 1141 **/ 1142 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1143 { 1144 enum i40e_media_type media; 1145 1146 switch (hw->phy.link_info.phy_type) { 1147 case I40E_PHY_TYPE_10GBASE_SR: 1148 case I40E_PHY_TYPE_10GBASE_LR: 1149 case I40E_PHY_TYPE_1000BASE_SX: 1150 case I40E_PHY_TYPE_1000BASE_LX: 1151 case I40E_PHY_TYPE_40GBASE_SR4: 1152 case I40E_PHY_TYPE_40GBASE_LR4: 1153 case I40E_PHY_TYPE_25GBASE_LR: 1154 case I40E_PHY_TYPE_25GBASE_SR: 1155 media = I40E_MEDIA_TYPE_FIBER; 1156 break; 1157 case I40E_PHY_TYPE_100BASE_TX: 1158 case I40E_PHY_TYPE_1000BASE_T: 1159 case I40E_PHY_TYPE_2_5GBASE_T: 1160 case I40E_PHY_TYPE_5GBASE_T: 1161 case I40E_PHY_TYPE_10GBASE_T: 1162 media = I40E_MEDIA_TYPE_BASET; 1163 break; 1164 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1165 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1166 case I40E_PHY_TYPE_10GBASE_CR1: 1167 case I40E_PHY_TYPE_40GBASE_CR4: 1168 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1169 case I40E_PHY_TYPE_40GBASE_AOC: 1170 case I40E_PHY_TYPE_10GBASE_AOC: 1171 case I40E_PHY_TYPE_25GBASE_CR: 1172 case I40E_PHY_TYPE_25GBASE_AOC: 1173 case I40E_PHY_TYPE_25GBASE_ACC: 1174 media = I40E_MEDIA_TYPE_DA; 1175 break; 1176 case I40E_PHY_TYPE_1000BASE_KX: 1177 case I40E_PHY_TYPE_10GBASE_KX4: 1178 case I40E_PHY_TYPE_10GBASE_KR: 1179 case I40E_PHY_TYPE_40GBASE_KR4: 1180 case I40E_PHY_TYPE_20GBASE_KR2: 1181 case I40E_PHY_TYPE_25GBASE_KR: 1182 media = I40E_MEDIA_TYPE_BACKPLANE; 1183 break; 1184 case I40E_PHY_TYPE_SGMII: 1185 case I40E_PHY_TYPE_XAUI: 1186 case I40E_PHY_TYPE_XFI: 1187 case I40E_PHY_TYPE_XLAUI: 1188 case I40E_PHY_TYPE_XLPPI: 1189 default: 1190 media = I40E_MEDIA_TYPE_UNKNOWN; 1191 break; 1192 } 1193 1194 return media; 1195 } 1196 1197 /** 1198 * i40e_poll_globr - Poll for Global Reset completion 1199 * @hw: pointer to the hardware structure 1200 * @retry_limit: how many times to retry before failure 1201 **/ 1202 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1203 u32 retry_limit) 1204 { 1205 u32 cnt, reg = 0; 1206 1207 for (cnt = 0; cnt < retry_limit; cnt++) { 1208 reg = rd32(hw, I40E_GLGEN_RSTAT); 1209 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1210 return 0; 1211 msleep(100); 1212 } 1213 1214 hw_dbg(hw, "Global reset failed.\n"); 1215 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1216 1217 return I40E_ERR_RESET_FAILED; 1218 } 1219 1220 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1221 #define I40E_PF_RESET_WAIT_COUNT 200 1222 /** 1223 * i40e_pf_reset - Reset the PF 1224 * @hw: pointer to the hardware structure 1225 * 1226 * Assuming someone else has triggered a global reset, 1227 * assure the global reset is complete and then reset the PF 1228 **/ 1229 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1230 { 1231 u32 cnt = 0; 1232 u32 cnt1 = 0; 1233 u32 reg = 0; 1234 u32 grst_del; 1235 1236 /* Poll for Global Reset steady state in case of recent GRST. 1237 * The grst delay value is in 100ms units, and we'll wait a 1238 * couple counts longer to be sure we don't just miss the end. 1239 */ 1240 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1241 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1242 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1243 1244 /* It can take upto 15 secs for GRST steady state. 1245 * Bump it to 16 secs max to be safe. 1246 */ 1247 grst_del = grst_del * 20; 1248 1249 for (cnt = 0; cnt < grst_del; cnt++) { 1250 reg = rd32(hw, I40E_GLGEN_RSTAT); 1251 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1252 break; 1253 msleep(100); 1254 } 1255 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1256 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1257 return I40E_ERR_RESET_FAILED; 1258 } 1259 1260 /* Now Wait for the FW to be ready */ 1261 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1262 reg = rd32(hw, I40E_GLNVM_ULD); 1263 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1264 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1265 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1266 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1267 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1268 break; 1269 } 1270 usleep_range(10000, 20000); 1271 } 1272 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1273 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1274 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1275 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1276 return I40E_ERR_RESET_FAILED; 1277 } 1278 1279 /* If there was a Global Reset in progress when we got here, 1280 * we don't need to do the PF Reset 1281 */ 1282 if (!cnt) { 1283 u32 reg2 = 0; 1284 if (hw->revision_id == 0) 1285 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1286 else 1287 cnt = I40E_PF_RESET_WAIT_COUNT; 1288 reg = rd32(hw, I40E_PFGEN_CTRL); 1289 wr32(hw, I40E_PFGEN_CTRL, 1290 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1291 for (; cnt; cnt--) { 1292 reg = rd32(hw, I40E_PFGEN_CTRL); 1293 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1294 break; 1295 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1296 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1297 break; 1298 usleep_range(1000, 2000); 1299 } 1300 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1301 if (i40e_poll_globr(hw, grst_del)) 1302 return I40E_ERR_RESET_FAILED; 1303 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1304 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1305 return I40E_ERR_RESET_FAILED; 1306 } 1307 } 1308 1309 i40e_clear_pxe_mode(hw); 1310 1311 return 0; 1312 } 1313 1314 /** 1315 * i40e_clear_hw - clear out any left over hw state 1316 * @hw: pointer to the hw struct 1317 * 1318 * Clear queues and interrupts, typically called at init time, 1319 * but after the capabilities have been found so we know how many 1320 * queues and msix vectors have been allocated. 1321 **/ 1322 void i40e_clear_hw(struct i40e_hw *hw) 1323 { 1324 u32 num_queues, base_queue; 1325 u32 num_pf_int; 1326 u32 num_vf_int; 1327 u32 num_vfs; 1328 u32 i, j; 1329 u32 val; 1330 u32 eol = 0x7ff; 1331 1332 /* get number of interrupts, queues, and VFs */ 1333 val = rd32(hw, I40E_GLPCI_CNF2); 1334 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1335 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1336 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1337 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1338 1339 val = rd32(hw, I40E_PFLAN_QALLOC); 1340 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1341 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1342 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1343 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1344 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1345 num_queues = (j - base_queue) + 1; 1346 else 1347 num_queues = 0; 1348 1349 val = rd32(hw, I40E_PF_VT_PFALLOC); 1350 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1351 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1352 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1353 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1354 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1355 num_vfs = (j - i) + 1; 1356 else 1357 num_vfs = 0; 1358 1359 /* stop all the interrupts */ 1360 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1361 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1362 for (i = 0; i < num_pf_int - 2; i++) 1363 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1364 1365 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1366 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1367 wr32(hw, I40E_PFINT_LNKLST0, val); 1368 for (i = 0; i < num_pf_int - 2; i++) 1369 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1370 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1371 for (i = 0; i < num_vfs; i++) 1372 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1373 for (i = 0; i < num_vf_int - 2; i++) 1374 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1375 1376 /* warn the HW of the coming Tx disables */ 1377 for (i = 0; i < num_queues; i++) { 1378 u32 abs_queue_idx = base_queue + i; 1379 u32 reg_block = 0; 1380 1381 if (abs_queue_idx >= 128) { 1382 reg_block = abs_queue_idx / 128; 1383 abs_queue_idx %= 128; 1384 } 1385 1386 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1387 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1388 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1389 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1390 1391 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1392 } 1393 udelay(400); 1394 1395 /* stop all the queues */ 1396 for (i = 0; i < num_queues; i++) { 1397 wr32(hw, I40E_QINT_TQCTL(i), 0); 1398 wr32(hw, I40E_QTX_ENA(i), 0); 1399 wr32(hw, I40E_QINT_RQCTL(i), 0); 1400 wr32(hw, I40E_QRX_ENA(i), 0); 1401 } 1402 1403 /* short wait for all queue disables to settle */ 1404 udelay(50); 1405 } 1406 1407 /** 1408 * i40e_clear_pxe_mode - clear pxe operations mode 1409 * @hw: pointer to the hw struct 1410 * 1411 * Make sure all PXE mode settings are cleared, including things 1412 * like descriptor fetch/write-back mode. 1413 **/ 1414 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1415 { 1416 u32 reg; 1417 1418 if (i40e_check_asq_alive(hw)) 1419 i40e_aq_clear_pxe_mode(hw, NULL); 1420 1421 /* Clear single descriptor fetch/write-back mode */ 1422 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1423 1424 if (hw->revision_id == 0) { 1425 /* As a work around clear PXE_MODE instead of setting it */ 1426 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1427 } else { 1428 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1429 } 1430 } 1431 1432 /** 1433 * i40e_led_is_mine - helper to find matching led 1434 * @hw: pointer to the hw struct 1435 * @idx: index into GPIO registers 1436 * 1437 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1438 */ 1439 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1440 { 1441 u32 gpio_val = 0; 1442 u32 port; 1443 1444 if (!hw->func_caps.led[idx]) 1445 return 0; 1446 1447 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1448 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1449 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1450 1451 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1452 * if it is not our port then ignore 1453 */ 1454 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1455 (port != hw->port)) 1456 return 0; 1457 1458 return gpio_val; 1459 } 1460 1461 #define I40E_COMBINED_ACTIVITY 0xA 1462 #define I40E_FILTER_ACTIVITY 0xE 1463 #define I40E_LINK_ACTIVITY 0xC 1464 #define I40E_MAC_ACTIVITY 0xD 1465 #define I40E_LED0 22 1466 1467 /** 1468 * i40e_led_get - return current on/off mode 1469 * @hw: pointer to the hw struct 1470 * 1471 * The value returned is the 'mode' field as defined in the 1472 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1473 * values are variations of possible behaviors relating to 1474 * blink, link, and wire. 1475 **/ 1476 u32 i40e_led_get(struct i40e_hw *hw) 1477 { 1478 u32 mode = 0; 1479 int i; 1480 1481 /* as per the documentation GPIO 22-29 are the LED 1482 * GPIO pins named LED0..LED7 1483 */ 1484 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1485 u32 gpio_val = i40e_led_is_mine(hw, i); 1486 1487 if (!gpio_val) 1488 continue; 1489 1490 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1491 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1492 break; 1493 } 1494 1495 return mode; 1496 } 1497 1498 /** 1499 * i40e_led_set - set new on/off mode 1500 * @hw: pointer to the hw struct 1501 * @mode: 0=off, 0xf=on (else see manual for mode details) 1502 * @blink: true if the LED should blink when on, false if steady 1503 * 1504 * if this function is used to turn on the blink it should 1505 * be used to disable the blink when restoring the original state. 1506 **/ 1507 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1508 { 1509 int i; 1510 1511 if (mode & 0xfffffff0) 1512 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1513 1514 /* as per the documentation GPIO 22-29 are the LED 1515 * GPIO pins named LED0..LED7 1516 */ 1517 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1518 u32 gpio_val = i40e_led_is_mine(hw, i); 1519 1520 if (!gpio_val) 1521 continue; 1522 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1523 /* this & is a bit of paranoia, but serves as a range check */ 1524 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1525 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1526 1527 if (blink) 1528 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1529 else 1530 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1531 1532 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1533 break; 1534 } 1535 } 1536 1537 /* Admin command wrappers */ 1538 1539 /** 1540 * i40e_aq_get_phy_capabilities 1541 * @hw: pointer to the hw struct 1542 * @abilities: structure for PHY capabilities to be filled 1543 * @qualified_modules: report Qualified Modules 1544 * @report_init: report init capabilities (active are default) 1545 * @cmd_details: pointer to command details structure or NULL 1546 * 1547 * Returns the various PHY abilities supported on the Port. 1548 **/ 1549 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1550 bool qualified_modules, bool report_init, 1551 struct i40e_aq_get_phy_abilities_resp *abilities, 1552 struct i40e_asq_cmd_details *cmd_details) 1553 { 1554 struct i40e_aq_desc desc; 1555 i40e_status status; 1556 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1557 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1558 1559 if (!abilities) 1560 return I40E_ERR_PARAM; 1561 1562 do { 1563 i40e_fill_default_direct_cmd_desc(&desc, 1564 i40e_aqc_opc_get_phy_abilities); 1565 1566 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1567 if (abilities_size > I40E_AQ_LARGE_BUF) 1568 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1569 1570 if (qualified_modules) 1571 desc.params.external.param0 |= 1572 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1573 1574 if (report_init) 1575 desc.params.external.param0 |= 1576 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1577 1578 status = i40e_asq_send_command(hw, &desc, abilities, 1579 abilities_size, cmd_details); 1580 1581 switch (hw->aq.asq_last_status) { 1582 case I40E_AQ_RC_EIO: 1583 status = I40E_ERR_UNKNOWN_PHY; 1584 break; 1585 case I40E_AQ_RC_EAGAIN: 1586 usleep_range(1000, 2000); 1587 total_delay++; 1588 status = I40E_ERR_TIMEOUT; 1589 break; 1590 /* also covers I40E_AQ_RC_OK */ 1591 default: 1592 break; 1593 } 1594 1595 } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) && 1596 (total_delay < max_delay)); 1597 1598 if (status) 1599 return status; 1600 1601 if (report_init) { 1602 if (hw->mac.type == I40E_MAC_XL710 && 1603 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1604 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1605 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1606 } else { 1607 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1608 hw->phy.phy_types |= 1609 ((u64)abilities->phy_type_ext << 32); 1610 } 1611 } 1612 1613 return status; 1614 } 1615 1616 /** 1617 * i40e_aq_set_phy_config 1618 * @hw: pointer to the hw struct 1619 * @config: structure with PHY configuration to be set 1620 * @cmd_details: pointer to command details structure or NULL 1621 * 1622 * Set the various PHY configuration parameters 1623 * supported on the Port.One or more of the Set PHY config parameters may be 1624 * ignored in an MFP mode as the PF may not have the privilege to set some 1625 * of the PHY Config parameters. This status will be indicated by the 1626 * command response. 1627 **/ 1628 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1629 struct i40e_aq_set_phy_config *config, 1630 struct i40e_asq_cmd_details *cmd_details) 1631 { 1632 struct i40e_aq_desc desc; 1633 struct i40e_aq_set_phy_config *cmd = 1634 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1635 enum i40e_status_code status; 1636 1637 if (!config) 1638 return I40E_ERR_PARAM; 1639 1640 i40e_fill_default_direct_cmd_desc(&desc, 1641 i40e_aqc_opc_set_phy_config); 1642 1643 *cmd = *config; 1644 1645 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1646 1647 return status; 1648 } 1649 1650 static noinline_for_stack enum i40e_status_code 1651 i40e_set_fc_status(struct i40e_hw *hw, 1652 struct i40e_aq_get_phy_abilities_resp *abilities, 1653 bool atomic_restart) 1654 { 1655 struct i40e_aq_set_phy_config config; 1656 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1657 u8 pause_mask = 0x0; 1658 1659 switch (fc_mode) { 1660 case I40E_FC_FULL: 1661 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1662 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1663 break; 1664 case I40E_FC_RX_PAUSE: 1665 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1666 break; 1667 case I40E_FC_TX_PAUSE: 1668 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1669 break; 1670 default: 1671 break; 1672 } 1673 1674 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1675 /* clear the old pause settings */ 1676 config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1677 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1678 /* set the new abilities */ 1679 config.abilities |= pause_mask; 1680 /* If the abilities have changed, then set the new config */ 1681 if (config.abilities == abilities->abilities) 1682 return 0; 1683 1684 /* Auto restart link so settings take effect */ 1685 if (atomic_restart) 1686 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1687 /* Copy over all the old settings */ 1688 config.phy_type = abilities->phy_type; 1689 config.phy_type_ext = abilities->phy_type_ext; 1690 config.link_speed = abilities->link_speed; 1691 config.eee_capability = abilities->eee_capability; 1692 config.eeer = abilities->eeer_val; 1693 config.low_power_ctrl = abilities->d3_lpan; 1694 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & 1695 I40E_AQ_PHY_FEC_CONFIG_MASK; 1696 1697 return i40e_aq_set_phy_config(hw, &config, NULL); 1698 } 1699 1700 /** 1701 * i40e_set_fc 1702 * @hw: pointer to the hw struct 1703 * @aq_failures: buffer to return AdminQ failure information 1704 * @atomic_restart: whether to enable atomic link restart 1705 * 1706 * Set the requested flow control mode using set_phy_config. 1707 **/ 1708 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1709 bool atomic_restart) 1710 { 1711 struct i40e_aq_get_phy_abilities_resp abilities; 1712 enum i40e_status_code status; 1713 1714 *aq_failures = 0x0; 1715 1716 /* Get the current phy config */ 1717 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1718 NULL); 1719 if (status) { 1720 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1721 return status; 1722 } 1723 1724 status = i40e_set_fc_status(hw, &abilities, atomic_restart); 1725 if (status) 1726 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1727 1728 /* Update the link info */ 1729 status = i40e_update_link_info(hw); 1730 if (status) { 1731 /* Wait a little bit (on 40G cards it sometimes takes a really 1732 * long time for link to come back from the atomic reset) 1733 * and try once more 1734 */ 1735 msleep(1000); 1736 status = i40e_update_link_info(hw); 1737 } 1738 if (status) 1739 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1740 1741 return status; 1742 } 1743 1744 /** 1745 * i40e_aq_clear_pxe_mode 1746 * @hw: pointer to the hw struct 1747 * @cmd_details: pointer to command details structure or NULL 1748 * 1749 * Tell the firmware that the driver is taking over from PXE 1750 **/ 1751 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1752 struct i40e_asq_cmd_details *cmd_details) 1753 { 1754 i40e_status status; 1755 struct i40e_aq_desc desc; 1756 struct i40e_aqc_clear_pxe *cmd = 1757 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1758 1759 i40e_fill_default_direct_cmd_desc(&desc, 1760 i40e_aqc_opc_clear_pxe_mode); 1761 1762 cmd->rx_cnt = 0x2; 1763 1764 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1765 1766 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1767 1768 return status; 1769 } 1770 1771 /** 1772 * i40e_aq_set_link_restart_an 1773 * @hw: pointer to the hw struct 1774 * @enable_link: if true: enable link, if false: disable link 1775 * @cmd_details: pointer to command details structure or NULL 1776 * 1777 * Sets up the link and restarts the Auto-Negotiation over the link. 1778 **/ 1779 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1780 bool enable_link, 1781 struct i40e_asq_cmd_details *cmd_details) 1782 { 1783 struct i40e_aq_desc desc; 1784 struct i40e_aqc_set_link_restart_an *cmd = 1785 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1786 i40e_status status; 1787 1788 i40e_fill_default_direct_cmd_desc(&desc, 1789 i40e_aqc_opc_set_link_restart_an); 1790 1791 cmd->command = I40E_AQ_PHY_RESTART_AN; 1792 if (enable_link) 1793 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1794 else 1795 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1796 1797 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1798 1799 return status; 1800 } 1801 1802 /** 1803 * i40e_aq_get_link_info 1804 * @hw: pointer to the hw struct 1805 * @enable_lse: enable/disable LinkStatusEvent reporting 1806 * @link: pointer to link status structure - optional 1807 * @cmd_details: pointer to command details structure or NULL 1808 * 1809 * Returns the link status of the adapter. 1810 **/ 1811 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1812 bool enable_lse, struct i40e_link_status *link, 1813 struct i40e_asq_cmd_details *cmd_details) 1814 { 1815 struct i40e_aq_desc desc; 1816 struct i40e_aqc_get_link_status *resp = 1817 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1818 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1819 i40e_status status; 1820 bool tx_pause, rx_pause; 1821 u16 command_flags; 1822 1823 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1824 1825 if (enable_lse) 1826 command_flags = I40E_AQ_LSE_ENABLE; 1827 else 1828 command_flags = I40E_AQ_LSE_DISABLE; 1829 resp->command_flags = cpu_to_le16(command_flags); 1830 1831 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1832 1833 if (status) 1834 goto aq_get_link_info_exit; 1835 1836 /* save off old link status information */ 1837 hw->phy.link_info_old = *hw_link_info; 1838 1839 /* update link status */ 1840 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1841 hw->phy.media_type = i40e_get_media_type(hw); 1842 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1843 hw_link_info->link_info = resp->link_info; 1844 hw_link_info->an_info = resp->an_info; 1845 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1846 I40E_AQ_CONFIG_FEC_RS_ENA); 1847 hw_link_info->ext_info = resp->ext_info; 1848 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1849 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1850 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1851 1852 /* update fc info */ 1853 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1854 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1855 if (tx_pause & rx_pause) 1856 hw->fc.current_mode = I40E_FC_FULL; 1857 else if (tx_pause) 1858 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1859 else if (rx_pause) 1860 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1861 else 1862 hw->fc.current_mode = I40E_FC_NONE; 1863 1864 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1865 hw_link_info->crc_enable = true; 1866 else 1867 hw_link_info->crc_enable = false; 1868 1869 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1870 hw_link_info->lse_enable = true; 1871 else 1872 hw_link_info->lse_enable = false; 1873 1874 if ((hw->mac.type == I40E_MAC_XL710) && 1875 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1876 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1877 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1878 1879 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE && 1880 hw->mac.type != I40E_MAC_X722) { 1881 __le32 tmp; 1882 1883 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1884 hw->phy.phy_types = le32_to_cpu(tmp); 1885 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1886 } 1887 1888 /* save link status information */ 1889 if (link) 1890 *link = *hw_link_info; 1891 1892 /* flag cleared so helper functions don't call AQ again */ 1893 hw->phy.get_link_info = false; 1894 1895 aq_get_link_info_exit: 1896 return status; 1897 } 1898 1899 /** 1900 * i40e_aq_set_phy_int_mask 1901 * @hw: pointer to the hw struct 1902 * @mask: interrupt mask to be set 1903 * @cmd_details: pointer to command details structure or NULL 1904 * 1905 * Set link interrupt mask. 1906 **/ 1907 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1908 u16 mask, 1909 struct i40e_asq_cmd_details *cmd_details) 1910 { 1911 struct i40e_aq_desc desc; 1912 struct i40e_aqc_set_phy_int_mask *cmd = 1913 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1914 i40e_status status; 1915 1916 i40e_fill_default_direct_cmd_desc(&desc, 1917 i40e_aqc_opc_set_phy_int_mask); 1918 1919 cmd->event_mask = cpu_to_le16(mask); 1920 1921 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1922 1923 return status; 1924 } 1925 1926 /** 1927 * i40e_aq_set_phy_debug 1928 * @hw: pointer to the hw struct 1929 * @cmd_flags: debug command flags 1930 * @cmd_details: pointer to command details structure or NULL 1931 * 1932 * Reset the external PHY. 1933 **/ 1934 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1935 struct i40e_asq_cmd_details *cmd_details) 1936 { 1937 struct i40e_aq_desc desc; 1938 struct i40e_aqc_set_phy_debug *cmd = 1939 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1940 i40e_status status; 1941 1942 i40e_fill_default_direct_cmd_desc(&desc, 1943 i40e_aqc_opc_set_phy_debug); 1944 1945 cmd->command_flags = cmd_flags; 1946 1947 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1948 1949 return status; 1950 } 1951 1952 /** 1953 * i40e_aq_add_vsi 1954 * @hw: pointer to the hw struct 1955 * @vsi_ctx: pointer to a vsi context struct 1956 * @cmd_details: pointer to command details structure or NULL 1957 * 1958 * Add a VSI context to the hardware. 1959 **/ 1960 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1961 struct i40e_vsi_context *vsi_ctx, 1962 struct i40e_asq_cmd_details *cmd_details) 1963 { 1964 struct i40e_aq_desc desc; 1965 struct i40e_aqc_add_get_update_vsi *cmd = 1966 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1967 struct i40e_aqc_add_get_update_vsi_completion *resp = 1968 (struct i40e_aqc_add_get_update_vsi_completion *) 1969 &desc.params.raw; 1970 i40e_status status; 1971 1972 i40e_fill_default_direct_cmd_desc(&desc, 1973 i40e_aqc_opc_add_vsi); 1974 1975 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1976 cmd->connection_type = vsi_ctx->connection_type; 1977 cmd->vf_id = vsi_ctx->vf_num; 1978 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1979 1980 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1981 1982 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1983 sizeof(vsi_ctx->info), cmd_details); 1984 1985 if (status) 1986 goto aq_add_vsi_exit; 1987 1988 vsi_ctx->seid = le16_to_cpu(resp->seid); 1989 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1990 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1991 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1992 1993 aq_add_vsi_exit: 1994 return status; 1995 } 1996 1997 /** 1998 * i40e_aq_set_default_vsi 1999 * @hw: pointer to the hw struct 2000 * @seid: vsi number 2001 * @cmd_details: pointer to command details structure or NULL 2002 **/ 2003 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 2004 u16 seid, 2005 struct i40e_asq_cmd_details *cmd_details) 2006 { 2007 struct i40e_aq_desc desc; 2008 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2009 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2010 &desc.params.raw; 2011 i40e_status status; 2012 2013 i40e_fill_default_direct_cmd_desc(&desc, 2014 i40e_aqc_opc_set_vsi_promiscuous_modes); 2015 2016 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2017 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2018 cmd->seid = cpu_to_le16(seid); 2019 2020 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2021 2022 return status; 2023 } 2024 2025 /** 2026 * i40e_aq_clear_default_vsi 2027 * @hw: pointer to the hw struct 2028 * @seid: vsi number 2029 * @cmd_details: pointer to command details structure or NULL 2030 **/ 2031 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2032 u16 seid, 2033 struct i40e_asq_cmd_details *cmd_details) 2034 { 2035 struct i40e_aq_desc desc; 2036 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2037 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2038 &desc.params.raw; 2039 i40e_status status; 2040 2041 i40e_fill_default_direct_cmd_desc(&desc, 2042 i40e_aqc_opc_set_vsi_promiscuous_modes); 2043 2044 cmd->promiscuous_flags = cpu_to_le16(0); 2045 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2046 cmd->seid = cpu_to_le16(seid); 2047 2048 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2049 2050 return status; 2051 } 2052 2053 /** 2054 * i40e_aq_set_vsi_unicast_promiscuous 2055 * @hw: pointer to the hw struct 2056 * @seid: vsi number 2057 * @set: set unicast promiscuous enable/disable 2058 * @cmd_details: pointer to command details structure or NULL 2059 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2060 **/ 2061 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2062 u16 seid, bool set, 2063 struct i40e_asq_cmd_details *cmd_details, 2064 bool rx_only_promisc) 2065 { 2066 struct i40e_aq_desc desc; 2067 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2068 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2069 i40e_status status; 2070 u16 flags = 0; 2071 2072 i40e_fill_default_direct_cmd_desc(&desc, 2073 i40e_aqc_opc_set_vsi_promiscuous_modes); 2074 2075 if (set) { 2076 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2077 if (rx_only_promisc && 2078 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || 2079 (hw->aq.api_maj_ver > 1))) 2080 flags |= I40E_AQC_SET_VSI_PROMISC_TX; 2081 } 2082 2083 cmd->promiscuous_flags = cpu_to_le16(flags); 2084 2085 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2086 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || 2087 (hw->aq.api_maj_ver > 1)) 2088 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); 2089 2090 cmd->seid = cpu_to_le16(seid); 2091 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2092 2093 return status; 2094 } 2095 2096 /** 2097 * i40e_aq_set_vsi_multicast_promiscuous 2098 * @hw: pointer to the hw struct 2099 * @seid: vsi number 2100 * @set: set multicast promiscuous enable/disable 2101 * @cmd_details: pointer to command details structure or NULL 2102 **/ 2103 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2104 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2105 { 2106 struct i40e_aq_desc desc; 2107 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2108 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2109 i40e_status status; 2110 u16 flags = 0; 2111 2112 i40e_fill_default_direct_cmd_desc(&desc, 2113 i40e_aqc_opc_set_vsi_promiscuous_modes); 2114 2115 if (set) 2116 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2117 2118 cmd->promiscuous_flags = cpu_to_le16(flags); 2119 2120 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2121 2122 cmd->seid = cpu_to_le16(seid); 2123 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2124 2125 return status; 2126 } 2127 2128 /** 2129 * i40e_aq_set_vsi_mc_promisc_on_vlan 2130 * @hw: pointer to the hw struct 2131 * @seid: vsi number 2132 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2133 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2134 * @cmd_details: pointer to command details structure or NULL 2135 **/ 2136 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2137 u16 seid, bool enable, 2138 u16 vid, 2139 struct i40e_asq_cmd_details *cmd_details) 2140 { 2141 struct i40e_aq_desc desc; 2142 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2143 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2144 enum i40e_status_code status; 2145 u16 flags = 0; 2146 2147 i40e_fill_default_direct_cmd_desc(&desc, 2148 i40e_aqc_opc_set_vsi_promiscuous_modes); 2149 2150 if (enable) 2151 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2152 2153 cmd->promiscuous_flags = cpu_to_le16(flags); 2154 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2155 cmd->seid = cpu_to_le16(seid); 2156 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2157 2158 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2159 2160 return status; 2161 } 2162 2163 /** 2164 * i40e_aq_set_vsi_uc_promisc_on_vlan 2165 * @hw: pointer to the hw struct 2166 * @seid: vsi number 2167 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2168 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2169 * @cmd_details: pointer to command details structure or NULL 2170 **/ 2171 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2172 u16 seid, bool enable, 2173 u16 vid, 2174 struct i40e_asq_cmd_details *cmd_details) 2175 { 2176 struct i40e_aq_desc desc; 2177 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2178 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2179 enum i40e_status_code status; 2180 u16 flags = 0; 2181 2182 i40e_fill_default_direct_cmd_desc(&desc, 2183 i40e_aqc_opc_set_vsi_promiscuous_modes); 2184 2185 if (enable) 2186 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2187 2188 cmd->promiscuous_flags = cpu_to_le16(flags); 2189 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2190 cmd->seid = cpu_to_le16(seid); 2191 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2192 2193 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2194 2195 return status; 2196 } 2197 2198 /** 2199 * i40e_aq_set_vsi_bc_promisc_on_vlan 2200 * @hw: pointer to the hw struct 2201 * @seid: vsi number 2202 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2203 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2204 * @cmd_details: pointer to command details structure or NULL 2205 **/ 2206 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2207 u16 seid, bool enable, u16 vid, 2208 struct i40e_asq_cmd_details *cmd_details) 2209 { 2210 struct i40e_aq_desc desc; 2211 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2212 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2213 i40e_status status; 2214 u16 flags = 0; 2215 2216 i40e_fill_default_direct_cmd_desc(&desc, 2217 i40e_aqc_opc_set_vsi_promiscuous_modes); 2218 2219 if (enable) 2220 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2221 2222 cmd->promiscuous_flags = cpu_to_le16(flags); 2223 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2224 cmd->seid = cpu_to_le16(seid); 2225 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2226 2227 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2228 2229 return status; 2230 } 2231 2232 /** 2233 * i40e_aq_set_vsi_broadcast 2234 * @hw: pointer to the hw struct 2235 * @seid: vsi number 2236 * @set_filter: true to set filter, false to clear filter 2237 * @cmd_details: pointer to command details structure or NULL 2238 * 2239 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2240 **/ 2241 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2242 u16 seid, bool set_filter, 2243 struct i40e_asq_cmd_details *cmd_details) 2244 { 2245 struct i40e_aq_desc desc; 2246 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2247 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2248 i40e_status status; 2249 2250 i40e_fill_default_direct_cmd_desc(&desc, 2251 i40e_aqc_opc_set_vsi_promiscuous_modes); 2252 2253 if (set_filter) 2254 cmd->promiscuous_flags 2255 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2256 else 2257 cmd->promiscuous_flags 2258 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2259 2260 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2261 cmd->seid = cpu_to_le16(seid); 2262 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2263 2264 return status; 2265 } 2266 2267 /** 2268 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2269 * @hw: pointer to the hw struct 2270 * @seid: vsi number 2271 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2272 * @cmd_details: pointer to command details structure or NULL 2273 **/ 2274 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2275 u16 seid, bool enable, 2276 struct i40e_asq_cmd_details *cmd_details) 2277 { 2278 struct i40e_aq_desc desc; 2279 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2280 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2281 i40e_status status; 2282 u16 flags = 0; 2283 2284 i40e_fill_default_direct_cmd_desc(&desc, 2285 i40e_aqc_opc_set_vsi_promiscuous_modes); 2286 if (enable) 2287 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2288 2289 cmd->promiscuous_flags = cpu_to_le16(flags); 2290 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2291 cmd->seid = cpu_to_le16(seid); 2292 2293 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2294 2295 return status; 2296 } 2297 2298 /** 2299 * i40e_get_vsi_params - get VSI configuration info 2300 * @hw: pointer to the hw struct 2301 * @vsi_ctx: pointer to a vsi context struct 2302 * @cmd_details: pointer to command details structure or NULL 2303 **/ 2304 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2305 struct i40e_vsi_context *vsi_ctx, 2306 struct i40e_asq_cmd_details *cmd_details) 2307 { 2308 struct i40e_aq_desc desc; 2309 struct i40e_aqc_add_get_update_vsi *cmd = 2310 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2311 struct i40e_aqc_add_get_update_vsi_completion *resp = 2312 (struct i40e_aqc_add_get_update_vsi_completion *) 2313 &desc.params.raw; 2314 i40e_status status; 2315 2316 i40e_fill_default_direct_cmd_desc(&desc, 2317 i40e_aqc_opc_get_vsi_parameters); 2318 2319 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2320 2321 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2322 2323 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2324 sizeof(vsi_ctx->info), NULL); 2325 2326 if (status) 2327 goto aq_get_vsi_params_exit; 2328 2329 vsi_ctx->seid = le16_to_cpu(resp->seid); 2330 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2331 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2332 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2333 2334 aq_get_vsi_params_exit: 2335 return status; 2336 } 2337 2338 /** 2339 * i40e_aq_update_vsi_params 2340 * @hw: pointer to the hw struct 2341 * @vsi_ctx: pointer to a vsi context struct 2342 * @cmd_details: pointer to command details structure or NULL 2343 * 2344 * Update a VSI context. 2345 **/ 2346 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2347 struct i40e_vsi_context *vsi_ctx, 2348 struct i40e_asq_cmd_details *cmd_details) 2349 { 2350 struct i40e_aq_desc desc; 2351 struct i40e_aqc_add_get_update_vsi *cmd = 2352 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2353 struct i40e_aqc_add_get_update_vsi_completion *resp = 2354 (struct i40e_aqc_add_get_update_vsi_completion *) 2355 &desc.params.raw; 2356 i40e_status status; 2357 2358 i40e_fill_default_direct_cmd_desc(&desc, 2359 i40e_aqc_opc_update_vsi_parameters); 2360 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2361 2362 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2363 2364 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2365 sizeof(vsi_ctx->info), cmd_details); 2366 2367 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2368 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2369 2370 return status; 2371 } 2372 2373 /** 2374 * i40e_aq_get_switch_config 2375 * @hw: pointer to the hardware structure 2376 * @buf: pointer to the result buffer 2377 * @buf_size: length of input buffer 2378 * @start_seid: seid to start for the report, 0 == beginning 2379 * @cmd_details: pointer to command details structure or NULL 2380 * 2381 * Fill the buf with switch configuration returned from AdminQ command 2382 **/ 2383 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2384 struct i40e_aqc_get_switch_config_resp *buf, 2385 u16 buf_size, u16 *start_seid, 2386 struct i40e_asq_cmd_details *cmd_details) 2387 { 2388 struct i40e_aq_desc desc; 2389 struct i40e_aqc_switch_seid *scfg = 2390 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2391 i40e_status status; 2392 2393 i40e_fill_default_direct_cmd_desc(&desc, 2394 i40e_aqc_opc_get_switch_config); 2395 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2396 if (buf_size > I40E_AQ_LARGE_BUF) 2397 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2398 scfg->seid = cpu_to_le16(*start_seid); 2399 2400 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2401 *start_seid = le16_to_cpu(scfg->seid); 2402 2403 return status; 2404 } 2405 2406 /** 2407 * i40e_aq_set_switch_config 2408 * @hw: pointer to the hardware structure 2409 * @flags: bit flag values to set 2410 * @mode: cloud filter mode 2411 * @valid_flags: which bit flags to set 2412 * @mode: cloud filter mode 2413 * @cmd_details: pointer to command details structure or NULL 2414 * 2415 * Set switch configuration bits 2416 **/ 2417 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2418 u16 flags, 2419 u16 valid_flags, u8 mode, 2420 struct i40e_asq_cmd_details *cmd_details) 2421 { 2422 struct i40e_aq_desc desc; 2423 struct i40e_aqc_set_switch_config *scfg = 2424 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2425 enum i40e_status_code status; 2426 2427 i40e_fill_default_direct_cmd_desc(&desc, 2428 i40e_aqc_opc_set_switch_config); 2429 scfg->flags = cpu_to_le16(flags); 2430 scfg->valid_flags = cpu_to_le16(valid_flags); 2431 scfg->mode = mode; 2432 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2433 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2434 scfg->first_tag = cpu_to_le16(hw->first_tag); 2435 scfg->second_tag = cpu_to_le16(hw->second_tag); 2436 } 2437 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2438 2439 return status; 2440 } 2441 2442 /** 2443 * i40e_aq_get_firmware_version 2444 * @hw: pointer to the hw struct 2445 * @fw_major_version: firmware major version 2446 * @fw_minor_version: firmware minor version 2447 * @fw_build: firmware build number 2448 * @api_major_version: major queue version 2449 * @api_minor_version: minor queue version 2450 * @cmd_details: pointer to command details structure or NULL 2451 * 2452 * Get the firmware version from the admin queue commands 2453 **/ 2454 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2455 u16 *fw_major_version, u16 *fw_minor_version, 2456 u32 *fw_build, 2457 u16 *api_major_version, u16 *api_minor_version, 2458 struct i40e_asq_cmd_details *cmd_details) 2459 { 2460 struct i40e_aq_desc desc; 2461 struct i40e_aqc_get_version *resp = 2462 (struct i40e_aqc_get_version *)&desc.params.raw; 2463 i40e_status status; 2464 2465 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2466 2467 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2468 2469 if (!status) { 2470 if (fw_major_version) 2471 *fw_major_version = le16_to_cpu(resp->fw_major); 2472 if (fw_minor_version) 2473 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2474 if (fw_build) 2475 *fw_build = le32_to_cpu(resp->fw_build); 2476 if (api_major_version) 2477 *api_major_version = le16_to_cpu(resp->api_major); 2478 if (api_minor_version) 2479 *api_minor_version = le16_to_cpu(resp->api_minor); 2480 } 2481 2482 return status; 2483 } 2484 2485 /** 2486 * i40e_aq_send_driver_version 2487 * @hw: pointer to the hw struct 2488 * @dv: driver's major, minor version 2489 * @cmd_details: pointer to command details structure or NULL 2490 * 2491 * Send the driver version to the firmware 2492 **/ 2493 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2494 struct i40e_driver_version *dv, 2495 struct i40e_asq_cmd_details *cmd_details) 2496 { 2497 struct i40e_aq_desc desc; 2498 struct i40e_aqc_driver_version *cmd = 2499 (struct i40e_aqc_driver_version *)&desc.params.raw; 2500 i40e_status status; 2501 u16 len; 2502 2503 if (dv == NULL) 2504 return I40E_ERR_PARAM; 2505 2506 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2507 2508 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2509 cmd->driver_major_ver = dv->major_version; 2510 cmd->driver_minor_ver = dv->minor_version; 2511 cmd->driver_build_ver = dv->build_version; 2512 cmd->driver_subbuild_ver = dv->subbuild_version; 2513 2514 len = 0; 2515 while (len < sizeof(dv->driver_string) && 2516 (dv->driver_string[len] < 0x80) && 2517 dv->driver_string[len]) 2518 len++; 2519 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2520 len, cmd_details); 2521 2522 return status; 2523 } 2524 2525 /** 2526 * i40e_get_link_status - get status of the HW network link 2527 * @hw: pointer to the hw struct 2528 * @link_up: pointer to bool (true/false = linkup/linkdown) 2529 * 2530 * Variable link_up true if link is up, false if link is down. 2531 * The variable link_up is invalid if returned value of status != 0 2532 * 2533 * Side effect: LinkStatusEvent reporting becomes enabled 2534 **/ 2535 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2536 { 2537 i40e_status status = 0; 2538 2539 if (hw->phy.get_link_info) { 2540 status = i40e_update_link_info(hw); 2541 2542 if (status) 2543 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2544 status); 2545 } 2546 2547 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2548 2549 return status; 2550 } 2551 2552 /** 2553 * i40e_updatelink_status - update status of the HW network link 2554 * @hw: pointer to the hw struct 2555 **/ 2556 noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw) 2557 { 2558 struct i40e_aq_get_phy_abilities_resp abilities; 2559 i40e_status status = 0; 2560 2561 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2562 if (status) 2563 return status; 2564 2565 /* extra checking needed to ensure link info to user is timely */ 2566 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2567 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2568 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2569 status = i40e_aq_get_phy_capabilities(hw, false, false, 2570 &abilities, NULL); 2571 if (status) 2572 return status; 2573 2574 hw->phy.link_info.req_fec_info = 2575 abilities.fec_cfg_curr_mod_ext_info & 2576 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); 2577 2578 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2579 sizeof(hw->phy.link_info.module_type)); 2580 } 2581 2582 return status; 2583 } 2584 2585 /** 2586 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2587 * @hw: pointer to the hw struct 2588 * @uplink_seid: the MAC or other gizmo SEID 2589 * @downlink_seid: the VSI SEID 2590 * @enabled_tc: bitmap of TCs to be enabled 2591 * @default_port: true for default port VSI, false for control port 2592 * @veb_seid: pointer to where to put the resulting VEB SEID 2593 * @enable_stats: true to turn on VEB stats 2594 * @cmd_details: pointer to command details structure or NULL 2595 * 2596 * This asks the FW to add a VEB between the uplink and downlink 2597 * elements. If the uplink SEID is 0, this will be a floating VEB. 2598 **/ 2599 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2600 u16 downlink_seid, u8 enabled_tc, 2601 bool default_port, u16 *veb_seid, 2602 bool enable_stats, 2603 struct i40e_asq_cmd_details *cmd_details) 2604 { 2605 struct i40e_aq_desc desc; 2606 struct i40e_aqc_add_veb *cmd = 2607 (struct i40e_aqc_add_veb *)&desc.params.raw; 2608 struct i40e_aqc_add_veb_completion *resp = 2609 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2610 i40e_status status; 2611 u16 veb_flags = 0; 2612 2613 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2614 if (!!uplink_seid != !!downlink_seid) 2615 return I40E_ERR_PARAM; 2616 2617 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2618 2619 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2620 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2621 cmd->enable_tcs = enabled_tc; 2622 if (!uplink_seid) 2623 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2624 if (default_port) 2625 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2626 else 2627 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2628 2629 /* reverse logic here: set the bitflag to disable the stats */ 2630 if (!enable_stats) 2631 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2632 2633 cmd->veb_flags = cpu_to_le16(veb_flags); 2634 2635 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2636 2637 if (!status && veb_seid) 2638 *veb_seid = le16_to_cpu(resp->veb_seid); 2639 2640 return status; 2641 } 2642 2643 /** 2644 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2645 * @hw: pointer to the hw struct 2646 * @veb_seid: the SEID of the VEB to query 2647 * @switch_id: the uplink switch id 2648 * @floating: set to true if the VEB is floating 2649 * @statistic_index: index of the stats counter block for this VEB 2650 * @vebs_used: number of VEB's used by function 2651 * @vebs_free: total VEB's not reserved by any function 2652 * @cmd_details: pointer to command details structure or NULL 2653 * 2654 * This retrieves the parameters for a particular VEB, specified by 2655 * uplink_seid, and returns them to the caller. 2656 **/ 2657 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2658 u16 veb_seid, u16 *switch_id, 2659 bool *floating, u16 *statistic_index, 2660 u16 *vebs_used, u16 *vebs_free, 2661 struct i40e_asq_cmd_details *cmd_details) 2662 { 2663 struct i40e_aq_desc desc; 2664 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2665 (struct i40e_aqc_get_veb_parameters_completion *) 2666 &desc.params.raw; 2667 i40e_status status; 2668 2669 if (veb_seid == 0) 2670 return I40E_ERR_PARAM; 2671 2672 i40e_fill_default_direct_cmd_desc(&desc, 2673 i40e_aqc_opc_get_veb_parameters); 2674 cmd_resp->seid = cpu_to_le16(veb_seid); 2675 2676 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2677 if (status) 2678 goto get_veb_exit; 2679 2680 if (switch_id) 2681 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2682 if (statistic_index) 2683 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2684 if (vebs_used) 2685 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2686 if (vebs_free) 2687 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2688 if (floating) { 2689 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2690 2691 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2692 *floating = true; 2693 else 2694 *floating = false; 2695 } 2696 2697 get_veb_exit: 2698 return status; 2699 } 2700 2701 /** 2702 * i40e_aq_add_macvlan 2703 * @hw: pointer to the hw struct 2704 * @seid: VSI for the mac address 2705 * @mv_list: list of macvlans to be added 2706 * @count: length of the list 2707 * @cmd_details: pointer to command details structure or NULL 2708 * 2709 * Add MAC/VLAN addresses to the HW filtering 2710 **/ 2711 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2712 struct i40e_aqc_add_macvlan_element_data *mv_list, 2713 u16 count, struct i40e_asq_cmd_details *cmd_details) 2714 { 2715 struct i40e_aq_desc desc; 2716 struct i40e_aqc_macvlan *cmd = 2717 (struct i40e_aqc_macvlan *)&desc.params.raw; 2718 i40e_status status; 2719 u16 buf_size; 2720 int i; 2721 2722 if (count == 0 || !mv_list || !hw) 2723 return I40E_ERR_PARAM; 2724 2725 buf_size = count * sizeof(*mv_list); 2726 2727 /* prep the rest of the request */ 2728 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2729 cmd->num_addresses = cpu_to_le16(count); 2730 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2731 cmd->seid[1] = 0; 2732 cmd->seid[2] = 0; 2733 2734 for (i = 0; i < count; i++) 2735 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2736 mv_list[i].flags |= 2737 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2738 2739 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2740 if (buf_size > I40E_AQ_LARGE_BUF) 2741 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2742 2743 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2744 cmd_details); 2745 2746 return status; 2747 } 2748 2749 /** 2750 * i40e_aq_remove_macvlan 2751 * @hw: pointer to the hw struct 2752 * @seid: VSI for the mac address 2753 * @mv_list: list of macvlans to be removed 2754 * @count: length of the list 2755 * @cmd_details: pointer to command details structure or NULL 2756 * 2757 * Remove MAC/VLAN addresses from the HW filtering 2758 **/ 2759 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2760 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2761 u16 count, struct i40e_asq_cmd_details *cmd_details) 2762 { 2763 struct i40e_aq_desc desc; 2764 struct i40e_aqc_macvlan *cmd = 2765 (struct i40e_aqc_macvlan *)&desc.params.raw; 2766 i40e_status status; 2767 u16 buf_size; 2768 2769 if (count == 0 || !mv_list || !hw) 2770 return I40E_ERR_PARAM; 2771 2772 buf_size = count * sizeof(*mv_list); 2773 2774 /* prep the rest of the request */ 2775 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2776 cmd->num_addresses = cpu_to_le16(count); 2777 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2778 cmd->seid[1] = 0; 2779 cmd->seid[2] = 0; 2780 2781 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2782 if (buf_size > I40E_AQ_LARGE_BUF) 2783 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2784 2785 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2786 cmd_details); 2787 2788 return status; 2789 } 2790 2791 /** 2792 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2793 * @hw: pointer to the hw struct 2794 * @opcode: AQ opcode for add or delete mirror rule 2795 * @sw_seid: Switch SEID (to which rule refers) 2796 * @rule_type: Rule Type (ingress/egress/VLAN) 2797 * @id: Destination VSI SEID or Rule ID 2798 * @count: length of the list 2799 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2800 * @cmd_details: pointer to command details structure or NULL 2801 * @rule_id: Rule ID returned from FW 2802 * @rules_used: Number of rules used in internal switch 2803 * @rules_free: Number of rules free in internal switch 2804 * 2805 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2806 * VEBs/VEPA elements only 2807 **/ 2808 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2809 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2810 u16 count, __le16 *mr_list, 2811 struct i40e_asq_cmd_details *cmd_details, 2812 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2813 { 2814 struct i40e_aq_desc desc; 2815 struct i40e_aqc_add_delete_mirror_rule *cmd = 2816 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2817 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2818 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2819 i40e_status status; 2820 u16 buf_size; 2821 2822 buf_size = count * sizeof(*mr_list); 2823 2824 /* prep the rest of the request */ 2825 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2826 cmd->seid = cpu_to_le16(sw_seid); 2827 cmd->rule_type = cpu_to_le16(rule_type & 2828 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2829 cmd->num_entries = cpu_to_le16(count); 2830 /* Dest VSI for add, rule_id for delete */ 2831 cmd->destination = cpu_to_le16(id); 2832 if (mr_list) { 2833 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2834 I40E_AQ_FLAG_RD)); 2835 if (buf_size > I40E_AQ_LARGE_BUF) 2836 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2837 } 2838 2839 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2840 cmd_details); 2841 if (!status || 2842 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2843 if (rule_id) 2844 *rule_id = le16_to_cpu(resp->rule_id); 2845 if (rules_used) 2846 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2847 if (rules_free) 2848 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2849 } 2850 return status; 2851 } 2852 2853 /** 2854 * i40e_aq_add_mirrorrule - add a mirror rule 2855 * @hw: pointer to the hw struct 2856 * @sw_seid: Switch SEID (to which rule refers) 2857 * @rule_type: Rule Type (ingress/egress/VLAN) 2858 * @dest_vsi: SEID of VSI to which packets will be mirrored 2859 * @count: length of the list 2860 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2861 * @cmd_details: pointer to command details structure or NULL 2862 * @rule_id: Rule ID returned from FW 2863 * @rules_used: Number of rules used in internal switch 2864 * @rules_free: Number of rules free in internal switch 2865 * 2866 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2867 **/ 2868 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2869 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2870 struct i40e_asq_cmd_details *cmd_details, 2871 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2872 { 2873 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2874 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2875 if (count == 0 || !mr_list) 2876 return I40E_ERR_PARAM; 2877 } 2878 2879 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2880 rule_type, dest_vsi, count, mr_list, 2881 cmd_details, rule_id, rules_used, rules_free); 2882 } 2883 2884 /** 2885 * i40e_aq_delete_mirrorrule - delete a mirror rule 2886 * @hw: pointer to the hw struct 2887 * @sw_seid: Switch SEID (to which rule refers) 2888 * @rule_type: Rule Type (ingress/egress/VLAN) 2889 * @count: length of the list 2890 * @rule_id: Rule ID that is returned in the receive desc as part of 2891 * add_mirrorrule. 2892 * @mr_list: list of mirrored VLAN IDs to be removed 2893 * @cmd_details: pointer to command details structure or NULL 2894 * @rules_used: Number of rules used in internal switch 2895 * @rules_free: Number of rules free in internal switch 2896 * 2897 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2898 **/ 2899 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2900 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2901 struct i40e_asq_cmd_details *cmd_details, 2902 u16 *rules_used, u16 *rules_free) 2903 { 2904 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2905 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2906 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2907 * mirroring. For other rule_type, count and rule_type should 2908 * not matter. 2909 */ 2910 if (count == 0 || !mr_list) 2911 return I40E_ERR_PARAM; 2912 } 2913 2914 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2915 rule_type, rule_id, count, mr_list, 2916 cmd_details, NULL, rules_used, rules_free); 2917 } 2918 2919 /** 2920 * i40e_aq_send_msg_to_vf 2921 * @hw: pointer to the hardware structure 2922 * @vfid: VF id to send msg 2923 * @v_opcode: opcodes for VF-PF communication 2924 * @v_retval: return error code 2925 * @msg: pointer to the msg buffer 2926 * @msglen: msg length 2927 * @cmd_details: pointer to command details 2928 * 2929 * send msg to vf 2930 **/ 2931 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2932 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2933 struct i40e_asq_cmd_details *cmd_details) 2934 { 2935 struct i40e_aq_desc desc; 2936 struct i40e_aqc_pf_vf_message *cmd = 2937 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2938 i40e_status status; 2939 2940 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2941 cmd->id = cpu_to_le32(vfid); 2942 desc.cookie_high = cpu_to_le32(v_opcode); 2943 desc.cookie_low = cpu_to_le32(v_retval); 2944 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2945 if (msglen) { 2946 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2947 I40E_AQ_FLAG_RD)); 2948 if (msglen > I40E_AQ_LARGE_BUF) 2949 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2950 desc.datalen = cpu_to_le16(msglen); 2951 } 2952 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2953 2954 return status; 2955 } 2956 2957 /** 2958 * i40e_aq_debug_read_register 2959 * @hw: pointer to the hw struct 2960 * @reg_addr: register address 2961 * @reg_val: register value 2962 * @cmd_details: pointer to command details structure or NULL 2963 * 2964 * Read the register using the admin queue commands 2965 **/ 2966 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 2967 u32 reg_addr, u64 *reg_val, 2968 struct i40e_asq_cmd_details *cmd_details) 2969 { 2970 struct i40e_aq_desc desc; 2971 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2972 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2973 i40e_status status; 2974 2975 if (reg_val == NULL) 2976 return I40E_ERR_PARAM; 2977 2978 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2979 2980 cmd_resp->address = cpu_to_le32(reg_addr); 2981 2982 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2983 2984 if (!status) { 2985 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2986 (u64)le32_to_cpu(cmd_resp->value_low); 2987 } 2988 2989 return status; 2990 } 2991 2992 /** 2993 * i40e_aq_debug_write_register 2994 * @hw: pointer to the hw struct 2995 * @reg_addr: register address 2996 * @reg_val: register value 2997 * @cmd_details: pointer to command details structure or NULL 2998 * 2999 * Write to a register using the admin queue commands 3000 **/ 3001 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 3002 u32 reg_addr, u64 reg_val, 3003 struct i40e_asq_cmd_details *cmd_details) 3004 { 3005 struct i40e_aq_desc desc; 3006 struct i40e_aqc_debug_reg_read_write *cmd = 3007 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 3008 i40e_status status; 3009 3010 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 3011 3012 cmd->address = cpu_to_le32(reg_addr); 3013 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 3014 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3015 3016 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3017 3018 return status; 3019 } 3020 3021 /** 3022 * i40e_aq_request_resource 3023 * @hw: pointer to the hw struct 3024 * @resource: resource id 3025 * @access: access type 3026 * @sdp_number: resource number 3027 * @timeout: the maximum time in ms that the driver may hold the resource 3028 * @cmd_details: pointer to command details structure or NULL 3029 * 3030 * requests common resource using the admin queue commands 3031 **/ 3032 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3033 enum i40e_aq_resources_ids resource, 3034 enum i40e_aq_resource_access_type access, 3035 u8 sdp_number, u64 *timeout, 3036 struct i40e_asq_cmd_details *cmd_details) 3037 { 3038 struct i40e_aq_desc desc; 3039 struct i40e_aqc_request_resource *cmd_resp = 3040 (struct i40e_aqc_request_resource *)&desc.params.raw; 3041 i40e_status status; 3042 3043 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3044 3045 cmd_resp->resource_id = cpu_to_le16(resource); 3046 cmd_resp->access_type = cpu_to_le16(access); 3047 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3048 3049 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3050 /* The completion specifies the maximum time in ms that the driver 3051 * may hold the resource in the Timeout field. 3052 * If the resource is held by someone else, the command completes with 3053 * busy return value and the timeout field indicates the maximum time 3054 * the current owner of the resource has to free it. 3055 */ 3056 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3057 *timeout = le32_to_cpu(cmd_resp->timeout); 3058 3059 return status; 3060 } 3061 3062 /** 3063 * i40e_aq_release_resource 3064 * @hw: pointer to the hw struct 3065 * @resource: resource id 3066 * @sdp_number: resource number 3067 * @cmd_details: pointer to command details structure or NULL 3068 * 3069 * release common resource using the admin queue commands 3070 **/ 3071 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3072 enum i40e_aq_resources_ids resource, 3073 u8 sdp_number, 3074 struct i40e_asq_cmd_details *cmd_details) 3075 { 3076 struct i40e_aq_desc desc; 3077 struct i40e_aqc_request_resource *cmd = 3078 (struct i40e_aqc_request_resource *)&desc.params.raw; 3079 i40e_status status; 3080 3081 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3082 3083 cmd->resource_id = cpu_to_le16(resource); 3084 cmd->resource_number = cpu_to_le32(sdp_number); 3085 3086 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3087 3088 return status; 3089 } 3090 3091 /** 3092 * i40e_aq_read_nvm 3093 * @hw: pointer to the hw struct 3094 * @module_pointer: module pointer location in words from the NVM beginning 3095 * @offset: byte offset from the module beginning 3096 * @length: length of the section to be read (in bytes from the offset) 3097 * @data: command buffer (size [bytes] = length) 3098 * @last_command: tells if this is the last command in a series 3099 * @cmd_details: pointer to command details structure or NULL 3100 * 3101 * Read the NVM using the admin queue commands 3102 **/ 3103 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3104 u32 offset, u16 length, void *data, 3105 bool last_command, 3106 struct i40e_asq_cmd_details *cmd_details) 3107 { 3108 struct i40e_aq_desc desc; 3109 struct i40e_aqc_nvm_update *cmd = 3110 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3111 i40e_status status; 3112 3113 /* In offset the highest byte must be zeroed. */ 3114 if (offset & 0xFF000000) { 3115 status = I40E_ERR_PARAM; 3116 goto i40e_aq_read_nvm_exit; 3117 } 3118 3119 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3120 3121 /* If this is the last command in a series, set the proper flag. */ 3122 if (last_command) 3123 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3124 cmd->module_pointer = module_pointer; 3125 cmd->offset = cpu_to_le32(offset); 3126 cmd->length = cpu_to_le16(length); 3127 3128 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3129 if (length > I40E_AQ_LARGE_BUF) 3130 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3131 3132 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3133 3134 i40e_aq_read_nvm_exit: 3135 return status; 3136 } 3137 3138 /** 3139 * i40e_aq_erase_nvm 3140 * @hw: pointer to the hw struct 3141 * @module_pointer: module pointer location in words from the NVM beginning 3142 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3143 * @length: length of the section to be erased (expressed in 4 KB) 3144 * @last_command: tells if this is the last command in a series 3145 * @cmd_details: pointer to command details structure or NULL 3146 * 3147 * Erase the NVM sector using the admin queue commands 3148 **/ 3149 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3150 u32 offset, u16 length, bool last_command, 3151 struct i40e_asq_cmd_details *cmd_details) 3152 { 3153 struct i40e_aq_desc desc; 3154 struct i40e_aqc_nvm_update *cmd = 3155 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3156 i40e_status status; 3157 3158 /* In offset the highest byte must be zeroed. */ 3159 if (offset & 0xFF000000) { 3160 status = I40E_ERR_PARAM; 3161 goto i40e_aq_erase_nvm_exit; 3162 } 3163 3164 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3165 3166 /* If this is the last command in a series, set the proper flag. */ 3167 if (last_command) 3168 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3169 cmd->module_pointer = module_pointer; 3170 cmd->offset = cpu_to_le32(offset); 3171 cmd->length = cpu_to_le16(length); 3172 3173 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3174 3175 i40e_aq_erase_nvm_exit: 3176 return status; 3177 } 3178 3179 /** 3180 * i40e_parse_discover_capabilities 3181 * @hw: pointer to the hw struct 3182 * @buff: pointer to a buffer containing device/function capability records 3183 * @cap_count: number of capability records in the list 3184 * @list_type_opc: type of capabilities list to parse 3185 * 3186 * Parse the device/function capabilities list. 3187 **/ 3188 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3189 u32 cap_count, 3190 enum i40e_admin_queue_opc list_type_opc) 3191 { 3192 struct i40e_aqc_list_capabilities_element_resp *cap; 3193 u32 valid_functions, num_functions; 3194 u32 number, logical_id, phys_id; 3195 struct i40e_hw_capabilities *p; 3196 u16 id, ocp_cfg_word0; 3197 i40e_status status; 3198 u8 major_rev; 3199 u32 i = 0; 3200 3201 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3202 3203 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3204 p = &hw->dev_caps; 3205 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3206 p = &hw->func_caps; 3207 else 3208 return; 3209 3210 for (i = 0; i < cap_count; i++, cap++) { 3211 id = le16_to_cpu(cap->id); 3212 number = le32_to_cpu(cap->number); 3213 logical_id = le32_to_cpu(cap->logical_id); 3214 phys_id = le32_to_cpu(cap->phys_id); 3215 major_rev = cap->major_rev; 3216 3217 switch (id) { 3218 case I40E_AQ_CAP_ID_SWITCH_MODE: 3219 p->switch_mode = number; 3220 break; 3221 case I40E_AQ_CAP_ID_MNG_MODE: 3222 p->management_mode = number; 3223 if (major_rev > 1) { 3224 p->mng_protocols_over_mctp = logical_id; 3225 i40e_debug(hw, I40E_DEBUG_INIT, 3226 "HW Capability: Protocols over MCTP = %d\n", 3227 p->mng_protocols_over_mctp); 3228 } else { 3229 p->mng_protocols_over_mctp = 0; 3230 } 3231 break; 3232 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3233 p->npar_enable = number; 3234 break; 3235 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3236 p->os2bmc = number; 3237 break; 3238 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3239 p->valid_functions = number; 3240 break; 3241 case I40E_AQ_CAP_ID_SRIOV: 3242 if (number == 1) 3243 p->sr_iov_1_1 = true; 3244 break; 3245 case I40E_AQ_CAP_ID_VF: 3246 p->num_vfs = number; 3247 p->vf_base_id = logical_id; 3248 break; 3249 case I40E_AQ_CAP_ID_VMDQ: 3250 if (number == 1) 3251 p->vmdq = true; 3252 break; 3253 case I40E_AQ_CAP_ID_8021QBG: 3254 if (number == 1) 3255 p->evb_802_1_qbg = true; 3256 break; 3257 case I40E_AQ_CAP_ID_8021QBR: 3258 if (number == 1) 3259 p->evb_802_1_qbh = true; 3260 break; 3261 case I40E_AQ_CAP_ID_VSI: 3262 p->num_vsis = number; 3263 break; 3264 case I40E_AQ_CAP_ID_DCB: 3265 if (number == 1) { 3266 p->dcb = true; 3267 p->enabled_tcmap = logical_id; 3268 p->maxtc = phys_id; 3269 } 3270 break; 3271 case I40E_AQ_CAP_ID_FCOE: 3272 if (number == 1) 3273 p->fcoe = true; 3274 break; 3275 case I40E_AQ_CAP_ID_ISCSI: 3276 if (number == 1) 3277 p->iscsi = true; 3278 break; 3279 case I40E_AQ_CAP_ID_RSS: 3280 p->rss = true; 3281 p->rss_table_size = number; 3282 p->rss_table_entry_width = logical_id; 3283 break; 3284 case I40E_AQ_CAP_ID_RXQ: 3285 p->num_rx_qp = number; 3286 p->base_queue = phys_id; 3287 break; 3288 case I40E_AQ_CAP_ID_TXQ: 3289 p->num_tx_qp = number; 3290 p->base_queue = phys_id; 3291 break; 3292 case I40E_AQ_CAP_ID_MSIX: 3293 p->num_msix_vectors = number; 3294 i40e_debug(hw, I40E_DEBUG_INIT, 3295 "HW Capability: MSIX vector count = %d\n", 3296 p->num_msix_vectors); 3297 break; 3298 case I40E_AQ_CAP_ID_VF_MSIX: 3299 p->num_msix_vectors_vf = number; 3300 break; 3301 case I40E_AQ_CAP_ID_FLEX10: 3302 if (major_rev == 1) { 3303 if (number == 1) { 3304 p->flex10_enable = true; 3305 p->flex10_capable = true; 3306 } 3307 } else { 3308 /* Capability revision >= 2 */ 3309 if (number & 1) 3310 p->flex10_enable = true; 3311 if (number & 2) 3312 p->flex10_capable = true; 3313 } 3314 p->flex10_mode = logical_id; 3315 p->flex10_status = phys_id; 3316 break; 3317 case I40E_AQ_CAP_ID_CEM: 3318 if (number == 1) 3319 p->mgmt_cem = true; 3320 break; 3321 case I40E_AQ_CAP_ID_IWARP: 3322 if (number == 1) 3323 p->iwarp = true; 3324 break; 3325 case I40E_AQ_CAP_ID_LED: 3326 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3327 p->led[phys_id] = true; 3328 break; 3329 case I40E_AQ_CAP_ID_SDP: 3330 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3331 p->sdp[phys_id] = true; 3332 break; 3333 case I40E_AQ_CAP_ID_MDIO: 3334 if (number == 1) { 3335 p->mdio_port_num = phys_id; 3336 p->mdio_port_mode = logical_id; 3337 } 3338 break; 3339 case I40E_AQ_CAP_ID_1588: 3340 if (number == 1) 3341 p->ieee_1588 = true; 3342 break; 3343 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3344 p->fd = true; 3345 p->fd_filters_guaranteed = number; 3346 p->fd_filters_best_effort = logical_id; 3347 break; 3348 case I40E_AQ_CAP_ID_WSR_PROT: 3349 p->wr_csr_prot = (u64)number; 3350 p->wr_csr_prot |= (u64)logical_id << 32; 3351 break; 3352 case I40E_AQ_CAP_ID_NVM_MGMT: 3353 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3354 p->sec_rev_disabled = true; 3355 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3356 p->update_disabled = true; 3357 break; 3358 default: 3359 break; 3360 } 3361 } 3362 3363 if (p->fcoe) 3364 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3365 3366 /* Software override ensuring FCoE is disabled if npar or mfp 3367 * mode because it is not supported in these modes. 3368 */ 3369 if (p->npar_enable || p->flex10_enable) 3370 p->fcoe = false; 3371 3372 /* count the enabled ports (aka the "not disabled" ports) */ 3373 hw->num_ports = 0; 3374 for (i = 0; i < 4; i++) { 3375 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3376 u64 port_cfg = 0; 3377 3378 /* use AQ read to get the physical register offset instead 3379 * of the port relative offset 3380 */ 3381 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3382 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3383 hw->num_ports++; 3384 } 3385 3386 /* OCP cards case: if a mezz is removed the Ethernet port is at 3387 * disabled state in PRTGEN_CNF register. Additional NVM read is 3388 * needed in order to check if we are dealing with OCP card. 3389 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3390 * physical ports results in wrong partition id calculation and thus 3391 * not supporting WoL. 3392 */ 3393 if (hw->mac.type == I40E_MAC_X722) { 3394 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3395 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3396 2 * I40E_SR_OCP_CFG_WORD0, 3397 sizeof(ocp_cfg_word0), 3398 &ocp_cfg_word0, true, NULL); 3399 if (!status && 3400 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3401 hw->num_ports = 4; 3402 i40e_release_nvm(hw); 3403 } 3404 } 3405 3406 valid_functions = p->valid_functions; 3407 num_functions = 0; 3408 while (valid_functions) { 3409 if (valid_functions & 1) 3410 num_functions++; 3411 valid_functions >>= 1; 3412 } 3413 3414 /* partition id is 1-based, and functions are evenly spread 3415 * across the ports as partitions 3416 */ 3417 if (hw->num_ports != 0) { 3418 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3419 hw->num_partitions = num_functions / hw->num_ports; 3420 } 3421 3422 /* additional HW specific goodies that might 3423 * someday be HW version specific 3424 */ 3425 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3426 } 3427 3428 /** 3429 * i40e_aq_discover_capabilities 3430 * @hw: pointer to the hw struct 3431 * @buff: a virtual buffer to hold the capabilities 3432 * @buff_size: Size of the virtual buffer 3433 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3434 * @list_type_opc: capabilities type to discover - pass in the command opcode 3435 * @cmd_details: pointer to command details structure or NULL 3436 * 3437 * Get the device capabilities descriptions from the firmware 3438 **/ 3439 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3440 void *buff, u16 buff_size, u16 *data_size, 3441 enum i40e_admin_queue_opc list_type_opc, 3442 struct i40e_asq_cmd_details *cmd_details) 3443 { 3444 struct i40e_aqc_list_capabilites *cmd; 3445 struct i40e_aq_desc desc; 3446 i40e_status status = 0; 3447 3448 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3449 3450 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3451 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3452 status = I40E_ERR_PARAM; 3453 goto exit; 3454 } 3455 3456 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3457 3458 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3459 if (buff_size > I40E_AQ_LARGE_BUF) 3460 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3461 3462 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3463 *data_size = le16_to_cpu(desc.datalen); 3464 3465 if (status) 3466 goto exit; 3467 3468 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3469 list_type_opc); 3470 3471 exit: 3472 return status; 3473 } 3474 3475 /** 3476 * i40e_aq_update_nvm 3477 * @hw: pointer to the hw struct 3478 * @module_pointer: module pointer location in words from the NVM beginning 3479 * @offset: byte offset from the module beginning 3480 * @length: length of the section to be written (in bytes from the offset) 3481 * @data: command buffer (size [bytes] = length) 3482 * @last_command: tells if this is the last command in a series 3483 * @preservation_flags: Preservation mode flags 3484 * @cmd_details: pointer to command details structure or NULL 3485 * 3486 * Update the NVM using the admin queue commands 3487 **/ 3488 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3489 u32 offset, u16 length, void *data, 3490 bool last_command, u8 preservation_flags, 3491 struct i40e_asq_cmd_details *cmd_details) 3492 { 3493 struct i40e_aq_desc desc; 3494 struct i40e_aqc_nvm_update *cmd = 3495 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3496 i40e_status status; 3497 3498 /* In offset the highest byte must be zeroed. */ 3499 if (offset & 0xFF000000) { 3500 status = I40E_ERR_PARAM; 3501 goto i40e_aq_update_nvm_exit; 3502 } 3503 3504 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3505 3506 /* If this is the last command in a series, set the proper flag. */ 3507 if (last_command) 3508 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3509 if (hw->mac.type == I40E_MAC_X722) { 3510 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3511 cmd->command_flags |= 3512 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3513 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3514 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3515 cmd->command_flags |= 3516 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3517 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3518 } 3519 cmd->module_pointer = module_pointer; 3520 cmd->offset = cpu_to_le32(offset); 3521 cmd->length = cpu_to_le16(length); 3522 3523 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3524 if (length > I40E_AQ_LARGE_BUF) 3525 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3526 3527 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3528 3529 i40e_aq_update_nvm_exit: 3530 return status; 3531 } 3532 3533 /** 3534 * i40e_aq_rearrange_nvm 3535 * @hw: pointer to the hw struct 3536 * @rearrange_nvm: defines direction of rearrangement 3537 * @cmd_details: pointer to command details structure or NULL 3538 * 3539 * Rearrange NVM structure, available only for transition FW 3540 **/ 3541 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3542 u8 rearrange_nvm, 3543 struct i40e_asq_cmd_details *cmd_details) 3544 { 3545 struct i40e_aqc_nvm_update *cmd; 3546 i40e_status status; 3547 struct i40e_aq_desc desc; 3548 3549 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3550 3551 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3552 3553 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3554 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3555 3556 if (!rearrange_nvm) { 3557 status = I40E_ERR_PARAM; 3558 goto i40e_aq_rearrange_nvm_exit; 3559 } 3560 3561 cmd->command_flags |= rearrange_nvm; 3562 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3563 3564 i40e_aq_rearrange_nvm_exit: 3565 return status; 3566 } 3567 3568 /** 3569 * i40e_aq_get_lldp_mib 3570 * @hw: pointer to the hw struct 3571 * @bridge_type: type of bridge requested 3572 * @mib_type: Local, Remote or both Local and Remote MIBs 3573 * @buff: pointer to a user supplied buffer to store the MIB block 3574 * @buff_size: size of the buffer (in bytes) 3575 * @local_len : length of the returned Local LLDP MIB 3576 * @remote_len: length of the returned Remote LLDP MIB 3577 * @cmd_details: pointer to command details structure or NULL 3578 * 3579 * Requests the complete LLDP MIB (entire packet). 3580 **/ 3581 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3582 u8 mib_type, void *buff, u16 buff_size, 3583 u16 *local_len, u16 *remote_len, 3584 struct i40e_asq_cmd_details *cmd_details) 3585 { 3586 struct i40e_aq_desc desc; 3587 struct i40e_aqc_lldp_get_mib *cmd = 3588 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3589 struct i40e_aqc_lldp_get_mib *resp = 3590 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3591 i40e_status status; 3592 3593 if (buff_size == 0 || !buff) 3594 return I40E_ERR_PARAM; 3595 3596 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3597 /* Indirect Command */ 3598 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3599 3600 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3601 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3602 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3603 3604 desc.datalen = cpu_to_le16(buff_size); 3605 3606 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3607 if (buff_size > I40E_AQ_LARGE_BUF) 3608 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3609 3610 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3611 if (!status) { 3612 if (local_len != NULL) 3613 *local_len = le16_to_cpu(resp->local_len); 3614 if (remote_len != NULL) 3615 *remote_len = le16_to_cpu(resp->remote_len); 3616 } 3617 3618 return status; 3619 } 3620 3621 /** 3622 * i40e_aq_cfg_lldp_mib_change_event 3623 * @hw: pointer to the hw struct 3624 * @enable_update: Enable or Disable event posting 3625 * @cmd_details: pointer to command details structure or NULL 3626 * 3627 * Enable or Disable posting of an event on ARQ when LLDP MIB 3628 * associated with the interface changes 3629 **/ 3630 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3631 bool enable_update, 3632 struct i40e_asq_cmd_details *cmd_details) 3633 { 3634 struct i40e_aq_desc desc; 3635 struct i40e_aqc_lldp_update_mib *cmd = 3636 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3637 i40e_status status; 3638 3639 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3640 3641 if (!enable_update) 3642 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3643 3644 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3645 3646 return status; 3647 } 3648 3649 /** 3650 * i40e_aq_restore_lldp 3651 * @hw: pointer to the hw struct 3652 * @setting: pointer to factory setting variable or NULL 3653 * @restore: True if factory settings should be restored 3654 * @cmd_details: pointer to command details structure or NULL 3655 * 3656 * Restore LLDP Agent factory settings if @restore set to True. In other case 3657 * only returns factory setting in AQ response. 3658 **/ 3659 enum i40e_status_code 3660 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3661 struct i40e_asq_cmd_details *cmd_details) 3662 { 3663 struct i40e_aq_desc desc; 3664 struct i40e_aqc_lldp_restore *cmd = 3665 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3666 i40e_status status; 3667 3668 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3669 i40e_debug(hw, I40E_DEBUG_ALL, 3670 "Restore LLDP not supported by current FW version.\n"); 3671 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3672 } 3673 3674 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3675 3676 if (restore) 3677 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3678 3679 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3680 3681 if (setting) 3682 *setting = cmd->command & 1; 3683 3684 return status; 3685 } 3686 3687 /** 3688 * i40e_aq_stop_lldp 3689 * @hw: pointer to the hw struct 3690 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3691 * @persist: True if stop of LLDP should be persistent across power cycles 3692 * @cmd_details: pointer to command details structure or NULL 3693 * 3694 * Stop or Shutdown the embedded LLDP Agent 3695 **/ 3696 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3697 bool persist, 3698 struct i40e_asq_cmd_details *cmd_details) 3699 { 3700 struct i40e_aq_desc desc; 3701 struct i40e_aqc_lldp_stop *cmd = 3702 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3703 i40e_status status; 3704 3705 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3706 3707 if (shutdown_agent) 3708 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3709 3710 if (persist) { 3711 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3712 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3713 else 3714 i40e_debug(hw, I40E_DEBUG_ALL, 3715 "Persistent Stop LLDP not supported by current FW version.\n"); 3716 } 3717 3718 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3719 3720 return status; 3721 } 3722 3723 /** 3724 * i40e_aq_start_lldp 3725 * @hw: pointer to the hw struct 3726 * @buff: buffer for result 3727 * @persist: True if start of LLDP should be persistent across power cycles 3728 * @buff_size: buffer size 3729 * @cmd_details: pointer to command details structure or NULL 3730 * 3731 * Start the embedded LLDP Agent on all ports. 3732 **/ 3733 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3734 struct i40e_asq_cmd_details *cmd_details) 3735 { 3736 struct i40e_aq_desc desc; 3737 struct i40e_aqc_lldp_start *cmd = 3738 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3739 i40e_status status; 3740 3741 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3742 3743 cmd->command = I40E_AQ_LLDP_AGENT_START; 3744 3745 if (persist) { 3746 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3747 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3748 else 3749 i40e_debug(hw, I40E_DEBUG_ALL, 3750 "Persistent Start LLDP not supported by current FW version.\n"); 3751 } 3752 3753 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3754 3755 return status; 3756 } 3757 3758 /** 3759 * i40e_aq_set_dcb_parameters 3760 * @hw: pointer to the hw struct 3761 * @cmd_details: pointer to command details structure or NULL 3762 * @dcb_enable: True if DCB configuration needs to be applied 3763 * 3764 **/ 3765 enum i40e_status_code 3766 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3767 struct i40e_asq_cmd_details *cmd_details) 3768 { 3769 struct i40e_aq_desc desc; 3770 struct i40e_aqc_set_dcb_parameters *cmd = 3771 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3772 i40e_status status; 3773 3774 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3775 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3776 3777 i40e_fill_default_direct_cmd_desc(&desc, 3778 i40e_aqc_opc_set_dcb_parameters); 3779 3780 if (dcb_enable) { 3781 cmd->valid_flags = I40E_DCB_VALID; 3782 cmd->command = I40E_AQ_DCB_SET_AGENT; 3783 } 3784 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3785 3786 return status; 3787 } 3788 3789 /** 3790 * i40e_aq_get_cee_dcb_config 3791 * @hw: pointer to the hw struct 3792 * @buff: response buffer that stores CEE operational configuration 3793 * @buff_size: size of the buffer passed 3794 * @cmd_details: pointer to command details structure or NULL 3795 * 3796 * Get CEE DCBX mode operational configuration from firmware 3797 **/ 3798 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3799 void *buff, u16 buff_size, 3800 struct i40e_asq_cmd_details *cmd_details) 3801 { 3802 struct i40e_aq_desc desc; 3803 i40e_status status; 3804 3805 if (buff_size == 0 || !buff) 3806 return I40E_ERR_PARAM; 3807 3808 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3809 3810 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3811 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3812 cmd_details); 3813 3814 return status; 3815 } 3816 3817 /** 3818 * i40e_aq_add_udp_tunnel 3819 * @hw: pointer to the hw struct 3820 * @udp_port: the UDP port to add in Host byte order 3821 * @protocol_index: protocol index type 3822 * @filter_index: pointer to filter index 3823 * @cmd_details: pointer to command details structure or NULL 3824 * 3825 * Note: Firmware expects the udp_port value to be in Little Endian format, 3826 * and this function will call cpu_to_le16 to convert from Host byte order to 3827 * Little Endian order. 3828 **/ 3829 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3830 u16 udp_port, u8 protocol_index, 3831 u8 *filter_index, 3832 struct i40e_asq_cmd_details *cmd_details) 3833 { 3834 struct i40e_aq_desc desc; 3835 struct i40e_aqc_add_udp_tunnel *cmd = 3836 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3837 struct i40e_aqc_del_udp_tunnel_completion *resp = 3838 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3839 i40e_status status; 3840 3841 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3842 3843 cmd->udp_port = cpu_to_le16(udp_port); 3844 cmd->protocol_type = protocol_index; 3845 3846 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3847 3848 if (!status && filter_index) 3849 *filter_index = resp->index; 3850 3851 return status; 3852 } 3853 3854 /** 3855 * i40e_aq_del_udp_tunnel 3856 * @hw: pointer to the hw struct 3857 * @index: filter index 3858 * @cmd_details: pointer to command details structure or NULL 3859 **/ 3860 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3861 struct i40e_asq_cmd_details *cmd_details) 3862 { 3863 struct i40e_aq_desc desc; 3864 struct i40e_aqc_remove_udp_tunnel *cmd = 3865 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3866 i40e_status status; 3867 3868 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3869 3870 cmd->index = index; 3871 3872 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3873 3874 return status; 3875 } 3876 3877 /** 3878 * i40e_aq_delete_element - Delete switch element 3879 * @hw: pointer to the hw struct 3880 * @seid: the SEID to delete from the switch 3881 * @cmd_details: pointer to command details structure or NULL 3882 * 3883 * This deletes a switch element from the switch. 3884 **/ 3885 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3886 struct i40e_asq_cmd_details *cmd_details) 3887 { 3888 struct i40e_aq_desc desc; 3889 struct i40e_aqc_switch_seid *cmd = 3890 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3891 i40e_status status; 3892 3893 if (seid == 0) 3894 return I40E_ERR_PARAM; 3895 3896 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3897 3898 cmd->seid = cpu_to_le16(seid); 3899 3900 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3901 3902 return status; 3903 } 3904 3905 /** 3906 * i40e_aq_dcb_updated - DCB Updated Command 3907 * @hw: pointer to the hw struct 3908 * @cmd_details: pointer to command details structure or NULL 3909 * 3910 * EMP will return when the shared RPB settings have been 3911 * recomputed and modified. The retval field in the descriptor 3912 * will be set to 0 when RPB is modified. 3913 **/ 3914 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3915 struct i40e_asq_cmd_details *cmd_details) 3916 { 3917 struct i40e_aq_desc desc; 3918 i40e_status status; 3919 3920 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3921 3922 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3923 3924 return status; 3925 } 3926 3927 /** 3928 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3929 * @hw: pointer to the hw struct 3930 * @seid: seid for the physical port/switching component/vsi 3931 * @buff: Indirect buffer to hold data parameters and response 3932 * @buff_size: Indirect buffer size 3933 * @opcode: Tx scheduler AQ command opcode 3934 * @cmd_details: pointer to command details structure or NULL 3935 * 3936 * Generic command handler for Tx scheduler AQ commands 3937 **/ 3938 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3939 void *buff, u16 buff_size, 3940 enum i40e_admin_queue_opc opcode, 3941 struct i40e_asq_cmd_details *cmd_details) 3942 { 3943 struct i40e_aq_desc desc; 3944 struct i40e_aqc_tx_sched_ind *cmd = 3945 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3946 i40e_status status; 3947 bool cmd_param_flag = false; 3948 3949 switch (opcode) { 3950 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3951 case i40e_aqc_opc_configure_vsi_tc_bw: 3952 case i40e_aqc_opc_enable_switching_comp_ets: 3953 case i40e_aqc_opc_modify_switching_comp_ets: 3954 case i40e_aqc_opc_disable_switching_comp_ets: 3955 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3956 case i40e_aqc_opc_configure_switching_comp_bw_config: 3957 cmd_param_flag = true; 3958 break; 3959 case i40e_aqc_opc_query_vsi_bw_config: 3960 case i40e_aqc_opc_query_vsi_ets_sla_config: 3961 case i40e_aqc_opc_query_switching_comp_ets_config: 3962 case i40e_aqc_opc_query_port_ets_config: 3963 case i40e_aqc_opc_query_switching_comp_bw_config: 3964 cmd_param_flag = false; 3965 break; 3966 default: 3967 return I40E_ERR_PARAM; 3968 } 3969 3970 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3971 3972 /* Indirect command */ 3973 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3974 if (cmd_param_flag) 3975 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3976 if (buff_size > I40E_AQ_LARGE_BUF) 3977 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3978 3979 desc.datalen = cpu_to_le16(buff_size); 3980 3981 cmd->vsi_seid = cpu_to_le16(seid); 3982 3983 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3984 3985 return status; 3986 } 3987 3988 /** 3989 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3990 * @hw: pointer to the hw struct 3991 * @seid: VSI seid 3992 * @credit: BW limit credits (0 = disabled) 3993 * @max_credit: Max BW limit credits 3994 * @cmd_details: pointer to command details structure or NULL 3995 **/ 3996 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3997 u16 seid, u16 credit, u8 max_credit, 3998 struct i40e_asq_cmd_details *cmd_details) 3999 { 4000 struct i40e_aq_desc desc; 4001 struct i40e_aqc_configure_vsi_bw_limit *cmd = 4002 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 4003 i40e_status status; 4004 4005 i40e_fill_default_direct_cmd_desc(&desc, 4006 i40e_aqc_opc_configure_vsi_bw_limit); 4007 4008 cmd->vsi_seid = cpu_to_le16(seid); 4009 cmd->credit = cpu_to_le16(credit); 4010 cmd->max_credit = max_credit; 4011 4012 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4013 4014 return status; 4015 } 4016 4017 /** 4018 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4019 * @hw: pointer to the hw struct 4020 * @seid: VSI seid 4021 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4022 * @cmd_details: pointer to command details structure or NULL 4023 **/ 4024 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4025 u16 seid, 4026 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4027 struct i40e_asq_cmd_details *cmd_details) 4028 { 4029 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4030 i40e_aqc_opc_configure_vsi_tc_bw, 4031 cmd_details); 4032 } 4033 4034 /** 4035 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4036 * @hw: pointer to the hw struct 4037 * @seid: seid of the switching component connected to Physical Port 4038 * @ets_data: Buffer holding ETS parameters 4039 * @opcode: Tx scheduler AQ command opcode 4040 * @cmd_details: pointer to command details structure or NULL 4041 **/ 4042 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4043 u16 seid, 4044 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4045 enum i40e_admin_queue_opc opcode, 4046 struct i40e_asq_cmd_details *cmd_details) 4047 { 4048 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4049 sizeof(*ets_data), opcode, cmd_details); 4050 } 4051 4052 /** 4053 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4054 * @hw: pointer to the hw struct 4055 * @seid: seid of the switching component 4056 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4057 * @cmd_details: pointer to command details structure or NULL 4058 **/ 4059 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4060 u16 seid, 4061 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4062 struct i40e_asq_cmd_details *cmd_details) 4063 { 4064 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4065 i40e_aqc_opc_configure_switching_comp_bw_config, 4066 cmd_details); 4067 } 4068 4069 /** 4070 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4071 * @hw: pointer to the hw struct 4072 * @seid: seid of the VSI 4073 * @bw_data: Buffer to hold VSI BW configuration 4074 * @cmd_details: pointer to command details structure or NULL 4075 **/ 4076 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4077 u16 seid, 4078 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4079 struct i40e_asq_cmd_details *cmd_details) 4080 { 4081 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4082 i40e_aqc_opc_query_vsi_bw_config, 4083 cmd_details); 4084 } 4085 4086 /** 4087 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4088 * @hw: pointer to the hw struct 4089 * @seid: seid of the VSI 4090 * @bw_data: Buffer to hold VSI BW configuration per TC 4091 * @cmd_details: pointer to command details structure or NULL 4092 **/ 4093 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4094 u16 seid, 4095 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4096 struct i40e_asq_cmd_details *cmd_details) 4097 { 4098 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4099 i40e_aqc_opc_query_vsi_ets_sla_config, 4100 cmd_details); 4101 } 4102 4103 /** 4104 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4105 * @hw: pointer to the hw struct 4106 * @seid: seid of the switching component 4107 * @bw_data: Buffer to hold switching component's per TC BW config 4108 * @cmd_details: pointer to command details structure or NULL 4109 **/ 4110 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4111 u16 seid, 4112 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4113 struct i40e_asq_cmd_details *cmd_details) 4114 { 4115 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4116 i40e_aqc_opc_query_switching_comp_ets_config, 4117 cmd_details); 4118 } 4119 4120 /** 4121 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4122 * @hw: pointer to the hw struct 4123 * @seid: seid of the VSI or switching component connected to Physical Port 4124 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4125 * @cmd_details: pointer to command details structure or NULL 4126 **/ 4127 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4128 u16 seid, 4129 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4130 struct i40e_asq_cmd_details *cmd_details) 4131 { 4132 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4133 i40e_aqc_opc_query_port_ets_config, 4134 cmd_details); 4135 } 4136 4137 /** 4138 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4139 * @hw: pointer to the hw struct 4140 * @seid: seid of the switching component 4141 * @bw_data: Buffer to hold switching component's BW configuration 4142 * @cmd_details: pointer to command details structure or NULL 4143 **/ 4144 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4145 u16 seid, 4146 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4147 struct i40e_asq_cmd_details *cmd_details) 4148 { 4149 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4150 i40e_aqc_opc_query_switching_comp_bw_config, 4151 cmd_details); 4152 } 4153 4154 /** 4155 * i40e_validate_filter_settings 4156 * @hw: pointer to the hardware structure 4157 * @settings: Filter control settings 4158 * 4159 * Check and validate the filter control settings passed. 4160 * The function checks for the valid filter/context sizes being 4161 * passed for FCoE and PE. 4162 * 4163 * Returns 0 if the values passed are valid and within 4164 * range else returns an error. 4165 **/ 4166 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4167 struct i40e_filter_control_settings *settings) 4168 { 4169 u32 fcoe_cntx_size, fcoe_filt_size; 4170 u32 pe_cntx_size, pe_filt_size; 4171 u32 fcoe_fmax; 4172 u32 val; 4173 4174 /* Validate FCoE settings passed */ 4175 switch (settings->fcoe_filt_num) { 4176 case I40E_HASH_FILTER_SIZE_1K: 4177 case I40E_HASH_FILTER_SIZE_2K: 4178 case I40E_HASH_FILTER_SIZE_4K: 4179 case I40E_HASH_FILTER_SIZE_8K: 4180 case I40E_HASH_FILTER_SIZE_16K: 4181 case I40E_HASH_FILTER_SIZE_32K: 4182 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4183 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4184 break; 4185 default: 4186 return I40E_ERR_PARAM; 4187 } 4188 4189 switch (settings->fcoe_cntx_num) { 4190 case I40E_DMA_CNTX_SIZE_512: 4191 case I40E_DMA_CNTX_SIZE_1K: 4192 case I40E_DMA_CNTX_SIZE_2K: 4193 case I40E_DMA_CNTX_SIZE_4K: 4194 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4195 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4196 break; 4197 default: 4198 return I40E_ERR_PARAM; 4199 } 4200 4201 /* Validate PE settings passed */ 4202 switch (settings->pe_filt_num) { 4203 case I40E_HASH_FILTER_SIZE_1K: 4204 case I40E_HASH_FILTER_SIZE_2K: 4205 case I40E_HASH_FILTER_SIZE_4K: 4206 case I40E_HASH_FILTER_SIZE_8K: 4207 case I40E_HASH_FILTER_SIZE_16K: 4208 case I40E_HASH_FILTER_SIZE_32K: 4209 case I40E_HASH_FILTER_SIZE_64K: 4210 case I40E_HASH_FILTER_SIZE_128K: 4211 case I40E_HASH_FILTER_SIZE_256K: 4212 case I40E_HASH_FILTER_SIZE_512K: 4213 case I40E_HASH_FILTER_SIZE_1M: 4214 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4215 pe_filt_size <<= (u32)settings->pe_filt_num; 4216 break; 4217 default: 4218 return I40E_ERR_PARAM; 4219 } 4220 4221 switch (settings->pe_cntx_num) { 4222 case I40E_DMA_CNTX_SIZE_512: 4223 case I40E_DMA_CNTX_SIZE_1K: 4224 case I40E_DMA_CNTX_SIZE_2K: 4225 case I40E_DMA_CNTX_SIZE_4K: 4226 case I40E_DMA_CNTX_SIZE_8K: 4227 case I40E_DMA_CNTX_SIZE_16K: 4228 case I40E_DMA_CNTX_SIZE_32K: 4229 case I40E_DMA_CNTX_SIZE_64K: 4230 case I40E_DMA_CNTX_SIZE_128K: 4231 case I40E_DMA_CNTX_SIZE_256K: 4232 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4233 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4234 break; 4235 default: 4236 return I40E_ERR_PARAM; 4237 } 4238 4239 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4240 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4241 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4242 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4243 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4244 return I40E_ERR_INVALID_SIZE; 4245 4246 return 0; 4247 } 4248 4249 /** 4250 * i40e_set_filter_control 4251 * @hw: pointer to the hardware structure 4252 * @settings: Filter control settings 4253 * 4254 * Set the Queue Filters for PE/FCoE and enable filters required 4255 * for a single PF. It is expected that these settings are programmed 4256 * at the driver initialization time. 4257 **/ 4258 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4259 struct i40e_filter_control_settings *settings) 4260 { 4261 i40e_status ret = 0; 4262 u32 hash_lut_size = 0; 4263 u32 val; 4264 4265 if (!settings) 4266 return I40E_ERR_PARAM; 4267 4268 /* Validate the input settings */ 4269 ret = i40e_validate_filter_settings(hw, settings); 4270 if (ret) 4271 return ret; 4272 4273 /* Read the PF Queue Filter control register */ 4274 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4275 4276 /* Program required PE hash buckets for the PF */ 4277 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4278 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4279 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4280 /* Program required PE contexts for the PF */ 4281 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4282 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4283 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4284 4285 /* Program required FCoE hash buckets for the PF */ 4286 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4287 val |= ((u32)settings->fcoe_filt_num << 4288 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4289 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4290 /* Program required FCoE DDP contexts for the PF */ 4291 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4292 val |= ((u32)settings->fcoe_cntx_num << 4293 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4294 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4295 4296 /* Program Hash LUT size for the PF */ 4297 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4298 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4299 hash_lut_size = 1; 4300 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4301 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4302 4303 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4304 if (settings->enable_fdir) 4305 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4306 if (settings->enable_ethtype) 4307 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4308 if (settings->enable_macvlan) 4309 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4310 4311 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4312 4313 return 0; 4314 } 4315 4316 /** 4317 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4318 * @hw: pointer to the hw struct 4319 * @mac_addr: MAC address to use in the filter 4320 * @ethtype: Ethertype to use in the filter 4321 * @flags: Flags that needs to be applied to the filter 4322 * @vsi_seid: seid of the control VSI 4323 * @queue: VSI queue number to send the packet to 4324 * @is_add: Add control packet filter if True else remove 4325 * @stats: Structure to hold information on control filter counts 4326 * @cmd_details: pointer to command details structure or NULL 4327 * 4328 * This command will Add or Remove control packet filter for a control VSI. 4329 * In return it will update the total number of perfect filter count in 4330 * the stats member. 4331 **/ 4332 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4333 u8 *mac_addr, u16 ethtype, u16 flags, 4334 u16 vsi_seid, u16 queue, bool is_add, 4335 struct i40e_control_filter_stats *stats, 4336 struct i40e_asq_cmd_details *cmd_details) 4337 { 4338 struct i40e_aq_desc desc; 4339 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4340 (struct i40e_aqc_add_remove_control_packet_filter *) 4341 &desc.params.raw; 4342 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4343 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4344 &desc.params.raw; 4345 i40e_status status; 4346 4347 if (vsi_seid == 0) 4348 return I40E_ERR_PARAM; 4349 4350 if (is_add) { 4351 i40e_fill_default_direct_cmd_desc(&desc, 4352 i40e_aqc_opc_add_control_packet_filter); 4353 cmd->queue = cpu_to_le16(queue); 4354 } else { 4355 i40e_fill_default_direct_cmd_desc(&desc, 4356 i40e_aqc_opc_remove_control_packet_filter); 4357 } 4358 4359 if (mac_addr) 4360 ether_addr_copy(cmd->mac, mac_addr); 4361 4362 cmd->etype = cpu_to_le16(ethtype); 4363 cmd->flags = cpu_to_le16(flags); 4364 cmd->seid = cpu_to_le16(vsi_seid); 4365 4366 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4367 4368 if (!status && stats) { 4369 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4370 stats->etype_used = le16_to_cpu(resp->etype_used); 4371 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4372 stats->etype_free = le16_to_cpu(resp->etype_free); 4373 } 4374 4375 return status; 4376 } 4377 4378 /** 4379 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4380 * @hw: pointer to the hw struct 4381 * @seid: VSI seid to add ethertype filter from 4382 **/ 4383 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4384 u16 seid) 4385 { 4386 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4387 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4388 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4389 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4390 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4391 i40e_status status; 4392 4393 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4394 seid, 0, true, NULL, 4395 NULL); 4396 if (status) 4397 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4398 } 4399 4400 /** 4401 * i40e_aq_alternate_read 4402 * @hw: pointer to the hardware structure 4403 * @reg_addr0: address of first dword to be read 4404 * @reg_val0: pointer for data read from 'reg_addr0' 4405 * @reg_addr1: address of second dword to be read 4406 * @reg_val1: pointer for data read from 'reg_addr1' 4407 * 4408 * Read one or two dwords from alternate structure. Fields are indicated 4409 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4410 * is not passed then only register at 'reg_addr0' is read. 4411 * 4412 **/ 4413 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4414 u32 reg_addr0, u32 *reg_val0, 4415 u32 reg_addr1, u32 *reg_val1) 4416 { 4417 struct i40e_aq_desc desc; 4418 struct i40e_aqc_alternate_write *cmd_resp = 4419 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4420 i40e_status status; 4421 4422 if (!reg_val0) 4423 return I40E_ERR_PARAM; 4424 4425 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4426 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4427 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4428 4429 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4430 4431 if (!status) { 4432 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4433 4434 if (reg_val1) 4435 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4436 } 4437 4438 return status; 4439 } 4440 4441 /** 4442 * i40e_aq_resume_port_tx 4443 * @hw: pointer to the hardware structure 4444 * @cmd_details: pointer to command details structure or NULL 4445 * 4446 * Resume port's Tx traffic 4447 **/ 4448 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4449 struct i40e_asq_cmd_details *cmd_details) 4450 { 4451 struct i40e_aq_desc desc; 4452 i40e_status status; 4453 4454 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4455 4456 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4457 4458 return status; 4459 } 4460 4461 /** 4462 * i40e_set_pci_config_data - store PCI bus info 4463 * @hw: pointer to hardware structure 4464 * @link_status: the link status word from PCI config space 4465 * 4466 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4467 **/ 4468 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4469 { 4470 hw->bus.type = i40e_bus_type_pci_express; 4471 4472 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4473 case PCI_EXP_LNKSTA_NLW_X1: 4474 hw->bus.width = i40e_bus_width_pcie_x1; 4475 break; 4476 case PCI_EXP_LNKSTA_NLW_X2: 4477 hw->bus.width = i40e_bus_width_pcie_x2; 4478 break; 4479 case PCI_EXP_LNKSTA_NLW_X4: 4480 hw->bus.width = i40e_bus_width_pcie_x4; 4481 break; 4482 case PCI_EXP_LNKSTA_NLW_X8: 4483 hw->bus.width = i40e_bus_width_pcie_x8; 4484 break; 4485 default: 4486 hw->bus.width = i40e_bus_width_unknown; 4487 break; 4488 } 4489 4490 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4491 case PCI_EXP_LNKSTA_CLS_2_5GB: 4492 hw->bus.speed = i40e_bus_speed_2500; 4493 break; 4494 case PCI_EXP_LNKSTA_CLS_5_0GB: 4495 hw->bus.speed = i40e_bus_speed_5000; 4496 break; 4497 case PCI_EXP_LNKSTA_CLS_8_0GB: 4498 hw->bus.speed = i40e_bus_speed_8000; 4499 break; 4500 default: 4501 hw->bus.speed = i40e_bus_speed_unknown; 4502 break; 4503 } 4504 } 4505 4506 /** 4507 * i40e_aq_debug_dump 4508 * @hw: pointer to the hardware structure 4509 * @cluster_id: specific cluster to dump 4510 * @table_id: table id within cluster 4511 * @start_index: index of line in the block to read 4512 * @buff_size: dump buffer size 4513 * @buff: dump buffer 4514 * @ret_buff_size: actual buffer size returned 4515 * @ret_next_table: next block to read 4516 * @ret_next_index: next index to read 4517 * @cmd_details: pointer to command details structure or NULL 4518 * 4519 * Dump internal FW/HW data for debug purposes. 4520 * 4521 **/ 4522 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4523 u8 table_id, u32 start_index, u16 buff_size, 4524 void *buff, u16 *ret_buff_size, 4525 u8 *ret_next_table, u32 *ret_next_index, 4526 struct i40e_asq_cmd_details *cmd_details) 4527 { 4528 struct i40e_aq_desc desc; 4529 struct i40e_aqc_debug_dump_internals *cmd = 4530 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4531 struct i40e_aqc_debug_dump_internals *resp = 4532 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4533 i40e_status status; 4534 4535 if (buff_size == 0 || !buff) 4536 return I40E_ERR_PARAM; 4537 4538 i40e_fill_default_direct_cmd_desc(&desc, 4539 i40e_aqc_opc_debug_dump_internals); 4540 /* Indirect Command */ 4541 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4542 if (buff_size > I40E_AQ_LARGE_BUF) 4543 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4544 4545 cmd->cluster_id = cluster_id; 4546 cmd->table_id = table_id; 4547 cmd->idx = cpu_to_le32(start_index); 4548 4549 desc.datalen = cpu_to_le16(buff_size); 4550 4551 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4552 if (!status) { 4553 if (ret_buff_size) 4554 *ret_buff_size = le16_to_cpu(desc.datalen); 4555 if (ret_next_table) 4556 *ret_next_table = resp->table_id; 4557 if (ret_next_index) 4558 *ret_next_index = le32_to_cpu(resp->idx); 4559 } 4560 4561 return status; 4562 } 4563 4564 /** 4565 * i40e_read_bw_from_alt_ram 4566 * @hw: pointer to the hardware structure 4567 * @max_bw: pointer for max_bw read 4568 * @min_bw: pointer for min_bw read 4569 * @min_valid: pointer for bool that is true if min_bw is a valid value 4570 * @max_valid: pointer for bool that is true if max_bw is a valid value 4571 * 4572 * Read bw from the alternate ram for the given pf 4573 **/ 4574 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4575 u32 *max_bw, u32 *min_bw, 4576 bool *min_valid, bool *max_valid) 4577 { 4578 i40e_status status; 4579 u32 max_bw_addr, min_bw_addr; 4580 4581 /* Calculate the address of the min/max bw registers */ 4582 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4583 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4584 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4585 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4586 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4587 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4588 4589 /* Read the bandwidths from alt ram */ 4590 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4591 min_bw_addr, min_bw); 4592 4593 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4594 *min_valid = true; 4595 else 4596 *min_valid = false; 4597 4598 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4599 *max_valid = true; 4600 else 4601 *max_valid = false; 4602 4603 return status; 4604 } 4605 4606 /** 4607 * i40e_aq_configure_partition_bw 4608 * @hw: pointer to the hardware structure 4609 * @bw_data: Buffer holding valid pfs and bw limits 4610 * @cmd_details: pointer to command details 4611 * 4612 * Configure partitions guaranteed/max bw 4613 **/ 4614 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4615 struct i40e_aqc_configure_partition_bw_data *bw_data, 4616 struct i40e_asq_cmd_details *cmd_details) 4617 { 4618 i40e_status status; 4619 struct i40e_aq_desc desc; 4620 u16 bwd_size = sizeof(*bw_data); 4621 4622 i40e_fill_default_direct_cmd_desc(&desc, 4623 i40e_aqc_opc_configure_partition_bw); 4624 4625 /* Indirect command */ 4626 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4627 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4628 4629 if (bwd_size > I40E_AQ_LARGE_BUF) 4630 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4631 4632 desc.datalen = cpu_to_le16(bwd_size); 4633 4634 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4635 cmd_details); 4636 4637 return status; 4638 } 4639 4640 /** 4641 * i40e_read_phy_register_clause22 4642 * @hw: pointer to the HW structure 4643 * @reg: register address in the page 4644 * @phy_addr: PHY address on MDIO interface 4645 * @value: PHY register value 4646 * 4647 * Reads specified PHY register value 4648 **/ 4649 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4650 u16 reg, u8 phy_addr, u16 *value) 4651 { 4652 i40e_status status = I40E_ERR_TIMEOUT; 4653 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4654 u32 command = 0; 4655 u16 retry = 1000; 4656 4657 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4658 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4659 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4660 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4661 (I40E_GLGEN_MSCA_MDICMD_MASK); 4662 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4663 do { 4664 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4665 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4666 status = 0; 4667 break; 4668 } 4669 udelay(10); 4670 retry--; 4671 } while (retry); 4672 4673 if (status) { 4674 i40e_debug(hw, I40E_DEBUG_PHY, 4675 "PHY: Can't write command to external PHY.\n"); 4676 } else { 4677 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4678 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4679 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4680 } 4681 4682 return status; 4683 } 4684 4685 /** 4686 * i40e_write_phy_register_clause22 4687 * @hw: pointer to the HW structure 4688 * @reg: register address in the page 4689 * @phy_addr: PHY address on MDIO interface 4690 * @value: PHY register value 4691 * 4692 * Writes specified PHY register value 4693 **/ 4694 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4695 u16 reg, u8 phy_addr, u16 value) 4696 { 4697 i40e_status status = I40E_ERR_TIMEOUT; 4698 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4699 u32 command = 0; 4700 u16 retry = 1000; 4701 4702 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4703 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4704 4705 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4706 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4707 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4708 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4709 (I40E_GLGEN_MSCA_MDICMD_MASK); 4710 4711 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4712 do { 4713 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4714 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4715 status = 0; 4716 break; 4717 } 4718 udelay(10); 4719 retry--; 4720 } while (retry); 4721 4722 return status; 4723 } 4724 4725 /** 4726 * i40e_read_phy_register_clause45 4727 * @hw: pointer to the HW structure 4728 * @page: registers page number 4729 * @reg: register address in the page 4730 * @phy_addr: PHY address on MDIO interface 4731 * @value: PHY register value 4732 * 4733 * Reads specified PHY register value 4734 **/ 4735 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4736 u8 page, u16 reg, u8 phy_addr, u16 *value) 4737 { 4738 i40e_status status = I40E_ERR_TIMEOUT; 4739 u32 command = 0; 4740 u16 retry = 1000; 4741 u8 port_num = hw->func_caps.mdio_port_num; 4742 4743 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4744 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4745 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4746 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4747 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4748 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4749 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4750 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4751 do { 4752 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4753 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4754 status = 0; 4755 break; 4756 } 4757 usleep_range(10, 20); 4758 retry--; 4759 } while (retry); 4760 4761 if (status) { 4762 i40e_debug(hw, I40E_DEBUG_PHY, 4763 "PHY: Can't write command to external PHY.\n"); 4764 goto phy_read_end; 4765 } 4766 4767 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4768 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4769 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4770 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4771 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4772 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4773 status = I40E_ERR_TIMEOUT; 4774 retry = 1000; 4775 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4776 do { 4777 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4778 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4779 status = 0; 4780 break; 4781 } 4782 usleep_range(10, 20); 4783 retry--; 4784 } while (retry); 4785 4786 if (!status) { 4787 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4788 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4789 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4790 } else { 4791 i40e_debug(hw, I40E_DEBUG_PHY, 4792 "PHY: Can't read register value from external PHY.\n"); 4793 } 4794 4795 phy_read_end: 4796 return status; 4797 } 4798 4799 /** 4800 * i40e_write_phy_register_clause45 4801 * @hw: pointer to the HW structure 4802 * @page: registers page number 4803 * @reg: register address in the page 4804 * @phy_addr: PHY address on MDIO interface 4805 * @value: PHY register value 4806 * 4807 * Writes value to specified PHY register 4808 **/ 4809 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4810 u8 page, u16 reg, u8 phy_addr, u16 value) 4811 { 4812 i40e_status status = I40E_ERR_TIMEOUT; 4813 u32 command = 0; 4814 u16 retry = 1000; 4815 u8 port_num = hw->func_caps.mdio_port_num; 4816 4817 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4818 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4819 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4820 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4821 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4822 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4823 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4824 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4825 do { 4826 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4827 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4828 status = 0; 4829 break; 4830 } 4831 usleep_range(10, 20); 4832 retry--; 4833 } while (retry); 4834 if (status) { 4835 i40e_debug(hw, I40E_DEBUG_PHY, 4836 "PHY: Can't write command to external PHY.\n"); 4837 goto phy_write_end; 4838 } 4839 4840 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4841 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4842 4843 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4844 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4845 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4846 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4847 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4848 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4849 status = I40E_ERR_TIMEOUT; 4850 retry = 1000; 4851 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4852 do { 4853 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4854 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4855 status = 0; 4856 break; 4857 } 4858 usleep_range(10, 20); 4859 retry--; 4860 } while (retry); 4861 4862 phy_write_end: 4863 return status; 4864 } 4865 4866 /** 4867 * i40e_write_phy_register 4868 * @hw: pointer to the HW structure 4869 * @page: registers page number 4870 * @reg: register address in the page 4871 * @phy_addr: PHY address on MDIO interface 4872 * @value: PHY register value 4873 * 4874 * Writes value to specified PHY register 4875 **/ 4876 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4877 u8 page, u16 reg, u8 phy_addr, u16 value) 4878 { 4879 i40e_status status; 4880 4881 switch (hw->device_id) { 4882 case I40E_DEV_ID_1G_BASE_T_X722: 4883 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4884 value); 4885 break; 4886 case I40E_DEV_ID_10G_BASE_T: 4887 case I40E_DEV_ID_10G_BASE_T4: 4888 case I40E_DEV_ID_10G_BASE_T_X722: 4889 case I40E_DEV_ID_25G_B: 4890 case I40E_DEV_ID_25G_SFP28: 4891 status = i40e_write_phy_register_clause45(hw, page, reg, 4892 phy_addr, value); 4893 break; 4894 default: 4895 status = I40E_ERR_UNKNOWN_PHY; 4896 break; 4897 } 4898 4899 return status; 4900 } 4901 4902 /** 4903 * i40e_read_phy_register 4904 * @hw: pointer to the HW structure 4905 * @page: registers page number 4906 * @reg: register address in the page 4907 * @phy_addr: PHY address on MDIO interface 4908 * @value: PHY register value 4909 * 4910 * Reads specified PHY register value 4911 **/ 4912 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 4913 u8 page, u16 reg, u8 phy_addr, u16 *value) 4914 { 4915 i40e_status status; 4916 4917 switch (hw->device_id) { 4918 case I40E_DEV_ID_1G_BASE_T_X722: 4919 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4920 value); 4921 break; 4922 case I40E_DEV_ID_10G_BASE_T: 4923 case I40E_DEV_ID_10G_BASE_T4: 4924 case I40E_DEV_ID_10G_BASE_T_BC: 4925 case I40E_DEV_ID_10G_BASE_T_X722: 4926 case I40E_DEV_ID_25G_B: 4927 case I40E_DEV_ID_25G_SFP28: 4928 status = i40e_read_phy_register_clause45(hw, page, reg, 4929 phy_addr, value); 4930 break; 4931 default: 4932 status = I40E_ERR_UNKNOWN_PHY; 4933 break; 4934 } 4935 4936 return status; 4937 } 4938 4939 /** 4940 * i40e_get_phy_address 4941 * @hw: pointer to the HW structure 4942 * @dev_num: PHY port num that address we want 4943 * 4944 * Gets PHY address for current port 4945 **/ 4946 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4947 { 4948 u8 port_num = hw->func_caps.mdio_port_num; 4949 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4950 4951 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4952 } 4953 4954 /** 4955 * i40e_blink_phy_led 4956 * @hw: pointer to the HW structure 4957 * @time: time how long led will blinks in secs 4958 * @interval: gap between LED on and off in msecs 4959 * 4960 * Blinks PHY link LED 4961 **/ 4962 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 4963 u32 time, u32 interval) 4964 { 4965 i40e_status status = 0; 4966 u32 i; 4967 u16 led_ctl; 4968 u16 gpio_led_port; 4969 u16 led_reg; 4970 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4971 u8 phy_addr = 0; 4972 u8 port_num; 4973 4974 i = rd32(hw, I40E_PFGEN_PORTNUM); 4975 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4976 phy_addr = i40e_get_phy_address(hw, port_num); 4977 4978 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4979 led_addr++) { 4980 status = i40e_read_phy_register_clause45(hw, 4981 I40E_PHY_COM_REG_PAGE, 4982 led_addr, phy_addr, 4983 &led_reg); 4984 if (status) 4985 goto phy_blinking_end; 4986 led_ctl = led_reg; 4987 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4988 led_reg = 0; 4989 status = i40e_write_phy_register_clause45(hw, 4990 I40E_PHY_COM_REG_PAGE, 4991 led_addr, phy_addr, 4992 led_reg); 4993 if (status) 4994 goto phy_blinking_end; 4995 break; 4996 } 4997 } 4998 4999 if (time > 0 && interval > 0) { 5000 for (i = 0; i < time * 1000; i += interval) { 5001 status = i40e_read_phy_register_clause45(hw, 5002 I40E_PHY_COM_REG_PAGE, 5003 led_addr, phy_addr, &led_reg); 5004 if (status) 5005 goto restore_config; 5006 if (led_reg & I40E_PHY_LED_MANUAL_ON) 5007 led_reg = 0; 5008 else 5009 led_reg = I40E_PHY_LED_MANUAL_ON; 5010 status = i40e_write_phy_register_clause45(hw, 5011 I40E_PHY_COM_REG_PAGE, 5012 led_addr, phy_addr, led_reg); 5013 if (status) 5014 goto restore_config; 5015 msleep(interval); 5016 } 5017 } 5018 5019 restore_config: 5020 status = i40e_write_phy_register_clause45(hw, 5021 I40E_PHY_COM_REG_PAGE, 5022 led_addr, phy_addr, led_ctl); 5023 5024 phy_blinking_end: 5025 return status; 5026 } 5027 5028 /** 5029 * i40e_led_get_reg - read LED register 5030 * @hw: pointer to the HW structure 5031 * @led_addr: LED register address 5032 * @reg_val: read register value 5033 **/ 5034 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5035 u32 *reg_val) 5036 { 5037 enum i40e_status_code status; 5038 u8 phy_addr = 0; 5039 u8 port_num; 5040 u32 i; 5041 5042 *reg_val = 0; 5043 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5044 status = 5045 i40e_aq_get_phy_register(hw, 5046 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5047 I40E_PHY_COM_REG_PAGE, 5048 I40E_PHY_LED_PROV_REG_1, 5049 reg_val, NULL); 5050 } else { 5051 i = rd32(hw, I40E_PFGEN_PORTNUM); 5052 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5053 phy_addr = i40e_get_phy_address(hw, port_num); 5054 status = i40e_read_phy_register_clause45(hw, 5055 I40E_PHY_COM_REG_PAGE, 5056 led_addr, phy_addr, 5057 (u16 *)reg_val); 5058 } 5059 return status; 5060 } 5061 5062 /** 5063 * i40e_led_set_reg - write LED register 5064 * @hw: pointer to the HW structure 5065 * @led_addr: LED register address 5066 * @reg_val: register value to write 5067 **/ 5068 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5069 u32 reg_val) 5070 { 5071 enum i40e_status_code status; 5072 u8 phy_addr = 0; 5073 u8 port_num; 5074 u32 i; 5075 5076 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5077 status = 5078 i40e_aq_set_phy_register(hw, 5079 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5080 I40E_PHY_COM_REG_PAGE, 5081 I40E_PHY_LED_PROV_REG_1, 5082 reg_val, NULL); 5083 } else { 5084 i = rd32(hw, I40E_PFGEN_PORTNUM); 5085 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5086 phy_addr = i40e_get_phy_address(hw, port_num); 5087 status = i40e_write_phy_register_clause45(hw, 5088 I40E_PHY_COM_REG_PAGE, 5089 led_addr, phy_addr, 5090 (u16)reg_val); 5091 } 5092 5093 return status; 5094 } 5095 5096 /** 5097 * i40e_led_get_phy - return current on/off mode 5098 * @hw: pointer to the hw struct 5099 * @led_addr: address of led register to use 5100 * @val: original value of register to use 5101 * 5102 **/ 5103 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5104 u16 *val) 5105 { 5106 i40e_status status = 0; 5107 u16 gpio_led_port; 5108 u8 phy_addr = 0; 5109 u16 reg_val; 5110 u16 temp_addr; 5111 u8 port_num; 5112 u32 i; 5113 u32 reg_val_aq; 5114 5115 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5116 status = 5117 i40e_aq_get_phy_register(hw, 5118 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5119 I40E_PHY_COM_REG_PAGE, 5120 I40E_PHY_LED_PROV_REG_1, 5121 ®_val_aq, NULL); 5122 if (status == I40E_SUCCESS) 5123 *val = (u16)reg_val_aq; 5124 return status; 5125 } 5126 temp_addr = I40E_PHY_LED_PROV_REG_1; 5127 i = rd32(hw, I40E_PFGEN_PORTNUM); 5128 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5129 phy_addr = i40e_get_phy_address(hw, port_num); 5130 5131 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5132 temp_addr++) { 5133 status = i40e_read_phy_register_clause45(hw, 5134 I40E_PHY_COM_REG_PAGE, 5135 temp_addr, phy_addr, 5136 ®_val); 5137 if (status) 5138 return status; 5139 *val = reg_val; 5140 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5141 *led_addr = temp_addr; 5142 break; 5143 } 5144 } 5145 return status; 5146 } 5147 5148 /** 5149 * i40e_led_set_phy 5150 * @hw: pointer to the HW structure 5151 * @on: true or false 5152 * @led_addr: address of led register to use 5153 * @mode: original val plus bit for set or ignore 5154 * 5155 * Set led's on or off when controlled by the PHY 5156 * 5157 **/ 5158 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5159 u16 led_addr, u32 mode) 5160 { 5161 i40e_status status = 0; 5162 u32 led_ctl = 0; 5163 u32 led_reg = 0; 5164 5165 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5166 if (status) 5167 return status; 5168 led_ctl = led_reg; 5169 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5170 led_reg = 0; 5171 status = i40e_led_set_reg(hw, led_addr, led_reg); 5172 if (status) 5173 return status; 5174 } 5175 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5176 if (status) 5177 goto restore_config; 5178 if (on) 5179 led_reg = I40E_PHY_LED_MANUAL_ON; 5180 else 5181 led_reg = 0; 5182 5183 status = i40e_led_set_reg(hw, led_addr, led_reg); 5184 if (status) 5185 goto restore_config; 5186 if (mode & I40E_PHY_LED_MODE_ORIG) { 5187 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5188 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5189 } 5190 return status; 5191 5192 restore_config: 5193 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5194 return status; 5195 } 5196 5197 /** 5198 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5199 * @hw: pointer to the hw struct 5200 * @reg_addr: register address 5201 * @reg_val: ptr to register value 5202 * @cmd_details: pointer to command details structure or NULL 5203 * 5204 * Use the firmware to read the Rx control register, 5205 * especially useful if the Rx unit is under heavy pressure 5206 **/ 5207 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5208 u32 reg_addr, u32 *reg_val, 5209 struct i40e_asq_cmd_details *cmd_details) 5210 { 5211 struct i40e_aq_desc desc; 5212 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5213 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5214 i40e_status status; 5215 5216 if (!reg_val) 5217 return I40E_ERR_PARAM; 5218 5219 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5220 5221 cmd_resp->address = cpu_to_le32(reg_addr); 5222 5223 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5224 5225 if (status == 0) 5226 *reg_val = le32_to_cpu(cmd_resp->value); 5227 5228 return status; 5229 } 5230 5231 /** 5232 * i40e_read_rx_ctl - read from an Rx control register 5233 * @hw: pointer to the hw struct 5234 * @reg_addr: register address 5235 **/ 5236 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5237 { 5238 i40e_status status = 0; 5239 bool use_register; 5240 int retry = 5; 5241 u32 val = 0; 5242 5243 use_register = (((hw->aq.api_maj_ver == 1) && 5244 (hw->aq.api_min_ver < 5)) || 5245 (hw->mac.type == I40E_MAC_X722)); 5246 if (!use_register) { 5247 do_retry: 5248 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5249 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5250 usleep_range(1000, 2000); 5251 retry--; 5252 goto do_retry; 5253 } 5254 } 5255 5256 /* if the AQ access failed, try the old-fashioned way */ 5257 if (status || use_register) 5258 val = rd32(hw, reg_addr); 5259 5260 return val; 5261 } 5262 5263 /** 5264 * i40e_aq_rx_ctl_write_register 5265 * @hw: pointer to the hw struct 5266 * @reg_addr: register address 5267 * @reg_val: register value 5268 * @cmd_details: pointer to command details structure or NULL 5269 * 5270 * Use the firmware to write to an Rx control register, 5271 * especially useful if the Rx unit is under heavy pressure 5272 **/ 5273 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5274 u32 reg_addr, u32 reg_val, 5275 struct i40e_asq_cmd_details *cmd_details) 5276 { 5277 struct i40e_aq_desc desc; 5278 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5279 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5280 i40e_status status; 5281 5282 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5283 5284 cmd->address = cpu_to_le32(reg_addr); 5285 cmd->value = cpu_to_le32(reg_val); 5286 5287 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5288 5289 return status; 5290 } 5291 5292 /** 5293 * i40e_write_rx_ctl - write to an Rx control register 5294 * @hw: pointer to the hw struct 5295 * @reg_addr: register address 5296 * @reg_val: register value 5297 **/ 5298 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5299 { 5300 i40e_status status = 0; 5301 bool use_register; 5302 int retry = 5; 5303 5304 use_register = (((hw->aq.api_maj_ver == 1) && 5305 (hw->aq.api_min_ver < 5)) || 5306 (hw->mac.type == I40E_MAC_X722)); 5307 if (!use_register) { 5308 do_retry: 5309 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5310 reg_val, NULL); 5311 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5312 usleep_range(1000, 2000); 5313 retry--; 5314 goto do_retry; 5315 } 5316 } 5317 5318 /* if the AQ access failed, try the old-fashioned way */ 5319 if (status || use_register) 5320 wr32(hw, reg_addr, reg_val); 5321 } 5322 5323 /** 5324 * i40e_aq_set_phy_register 5325 * @hw: pointer to the hw struct 5326 * @phy_select: select which phy should be accessed 5327 * @dev_addr: PHY device address 5328 * @reg_addr: PHY register address 5329 * @reg_val: new register value 5330 * @cmd_details: pointer to command details structure or NULL 5331 * 5332 * Write the external PHY register. 5333 **/ 5334 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, 5335 u8 phy_select, u8 dev_addr, 5336 u32 reg_addr, u32 reg_val, 5337 struct i40e_asq_cmd_details *cmd_details) 5338 { 5339 struct i40e_aq_desc desc; 5340 struct i40e_aqc_phy_register_access *cmd = 5341 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5342 i40e_status status; 5343 5344 i40e_fill_default_direct_cmd_desc(&desc, 5345 i40e_aqc_opc_set_phy_register); 5346 5347 cmd->phy_interface = phy_select; 5348 cmd->dev_address = dev_addr; 5349 cmd->reg_address = cpu_to_le32(reg_addr); 5350 cmd->reg_value = cpu_to_le32(reg_val); 5351 5352 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5353 5354 return status; 5355 } 5356 5357 /** 5358 * i40e_aq_get_phy_register 5359 * @hw: pointer to the hw struct 5360 * @phy_select: select which phy should be accessed 5361 * @dev_addr: PHY device address 5362 * @reg_addr: PHY register address 5363 * @reg_val: read register value 5364 * @cmd_details: pointer to command details structure or NULL 5365 * 5366 * Read the external PHY register. 5367 **/ 5368 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, 5369 u8 phy_select, u8 dev_addr, 5370 u32 reg_addr, u32 *reg_val, 5371 struct i40e_asq_cmd_details *cmd_details) 5372 { 5373 struct i40e_aq_desc desc; 5374 struct i40e_aqc_phy_register_access *cmd = 5375 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5376 i40e_status status; 5377 5378 i40e_fill_default_direct_cmd_desc(&desc, 5379 i40e_aqc_opc_get_phy_register); 5380 5381 cmd->phy_interface = phy_select; 5382 cmd->dev_address = dev_addr; 5383 cmd->reg_address = cpu_to_le32(reg_addr); 5384 5385 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5386 if (!status) 5387 *reg_val = le32_to_cpu(cmd->reg_value); 5388 5389 return status; 5390 } 5391 5392 /** 5393 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5394 * @hw: pointer to the hw struct 5395 * @buff: command buffer (size in bytes = buff_size) 5396 * @buff_size: buffer size in bytes 5397 * @track_id: package tracking id 5398 * @error_offset: returns error offset 5399 * @error_info: returns error information 5400 * @cmd_details: pointer to command details structure or NULL 5401 **/ 5402 enum 5403 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5404 u16 buff_size, u32 track_id, 5405 u32 *error_offset, u32 *error_info, 5406 struct i40e_asq_cmd_details *cmd_details) 5407 { 5408 struct i40e_aq_desc desc; 5409 struct i40e_aqc_write_personalization_profile *cmd = 5410 (struct i40e_aqc_write_personalization_profile *) 5411 &desc.params.raw; 5412 struct i40e_aqc_write_ddp_resp *resp; 5413 i40e_status status; 5414 5415 i40e_fill_default_direct_cmd_desc(&desc, 5416 i40e_aqc_opc_write_personalization_profile); 5417 5418 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5419 if (buff_size > I40E_AQ_LARGE_BUF) 5420 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5421 5422 desc.datalen = cpu_to_le16(buff_size); 5423 5424 cmd->profile_track_id = cpu_to_le32(track_id); 5425 5426 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5427 if (!status) { 5428 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5429 if (error_offset) 5430 *error_offset = le32_to_cpu(resp->error_offset); 5431 if (error_info) 5432 *error_info = le32_to_cpu(resp->error_info); 5433 } 5434 5435 return status; 5436 } 5437 5438 /** 5439 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5440 * @hw: pointer to the hw struct 5441 * @buff: command buffer (size in bytes = buff_size) 5442 * @buff_size: buffer size in bytes 5443 * @flags: AdminQ command flags 5444 * @cmd_details: pointer to command details structure or NULL 5445 **/ 5446 enum 5447 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5448 u16 buff_size, u8 flags, 5449 struct i40e_asq_cmd_details *cmd_details) 5450 { 5451 struct i40e_aq_desc desc; 5452 struct i40e_aqc_get_applied_profiles *cmd = 5453 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5454 i40e_status status; 5455 5456 i40e_fill_default_direct_cmd_desc(&desc, 5457 i40e_aqc_opc_get_personalization_profile_list); 5458 5459 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5460 if (buff_size > I40E_AQ_LARGE_BUF) 5461 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5462 desc.datalen = cpu_to_le16(buff_size); 5463 5464 cmd->flags = flags; 5465 5466 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5467 5468 return status; 5469 } 5470 5471 /** 5472 * i40e_find_segment_in_package 5473 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5474 * @pkg_hdr: pointer to the package header to be searched 5475 * 5476 * This function searches a package file for a particular segment type. On 5477 * success it returns a pointer to the segment header, otherwise it will 5478 * return NULL. 5479 **/ 5480 struct i40e_generic_seg_header * 5481 i40e_find_segment_in_package(u32 segment_type, 5482 struct i40e_package_header *pkg_hdr) 5483 { 5484 struct i40e_generic_seg_header *segment; 5485 u32 i; 5486 5487 /* Search all package segments for the requested segment type */ 5488 for (i = 0; i < pkg_hdr->segment_count; i++) { 5489 segment = 5490 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5491 pkg_hdr->segment_offset[i]); 5492 5493 if (segment->type == segment_type) 5494 return segment; 5495 } 5496 5497 return NULL; 5498 } 5499 5500 /* Get section table in profile */ 5501 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5502 do { \ 5503 struct i40e_profile_segment *p = (profile); \ 5504 u32 count; \ 5505 u32 *nvm; \ 5506 count = p->device_table_count; \ 5507 nvm = (u32 *)&p->device_table[count]; \ 5508 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5509 } while (0) 5510 5511 /* Get section header in profile */ 5512 #define I40E_SECTION_HEADER(profile, offset) \ 5513 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5514 5515 /** 5516 * i40e_find_section_in_profile 5517 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5518 * @profile: pointer to the i40e segment header to be searched 5519 * 5520 * This function searches i40e segment for a particular section type. On 5521 * success it returns a pointer to the section header, otherwise it will 5522 * return NULL. 5523 **/ 5524 struct i40e_profile_section_header * 5525 i40e_find_section_in_profile(u32 section_type, 5526 struct i40e_profile_segment *profile) 5527 { 5528 struct i40e_profile_section_header *sec; 5529 struct i40e_section_table *sec_tbl; 5530 u32 sec_off; 5531 u32 i; 5532 5533 if (profile->header.type != SEGMENT_TYPE_I40E) 5534 return NULL; 5535 5536 I40E_SECTION_TABLE(profile, sec_tbl); 5537 5538 for (i = 0; i < sec_tbl->section_count; i++) { 5539 sec_off = sec_tbl->section_offset[i]; 5540 sec = I40E_SECTION_HEADER(profile, sec_off); 5541 if (sec->section.type == section_type) 5542 return sec; 5543 } 5544 5545 return NULL; 5546 } 5547 5548 /** 5549 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5550 * @hw: pointer to the hw struct 5551 * @aq: command buffer containing all data to execute AQ 5552 **/ 5553 static enum 5554 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5555 struct i40e_profile_aq_section *aq) 5556 { 5557 i40e_status status; 5558 struct i40e_aq_desc desc; 5559 u8 *msg = NULL; 5560 u16 msglen; 5561 5562 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5563 desc.flags |= cpu_to_le16(aq->flags); 5564 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5565 5566 msglen = aq->datalen; 5567 if (msglen) { 5568 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5569 I40E_AQ_FLAG_RD)); 5570 if (msglen > I40E_AQ_LARGE_BUF) 5571 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5572 desc.datalen = cpu_to_le16(msglen); 5573 msg = &aq->data[0]; 5574 } 5575 5576 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5577 5578 if (status) { 5579 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5580 "unable to exec DDP AQ opcode %u, error %d\n", 5581 aq->opcode, status); 5582 return status; 5583 } 5584 5585 /* copy returned desc to aq_buf */ 5586 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5587 5588 return 0; 5589 } 5590 5591 /** 5592 * i40e_validate_profile 5593 * @hw: pointer to the hardware structure 5594 * @profile: pointer to the profile segment of the package to be validated 5595 * @track_id: package tracking id 5596 * @rollback: flag if the profile is for rollback. 5597 * 5598 * Validates supported devices and profile's sections. 5599 */ 5600 static enum i40e_status_code 5601 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5602 u32 track_id, bool rollback) 5603 { 5604 struct i40e_profile_section_header *sec = NULL; 5605 i40e_status status = 0; 5606 struct i40e_section_table *sec_tbl; 5607 u32 vendor_dev_id; 5608 u32 dev_cnt; 5609 u32 sec_off; 5610 u32 i; 5611 5612 if (track_id == I40E_DDP_TRACKID_INVALID) { 5613 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5614 return I40E_NOT_SUPPORTED; 5615 } 5616 5617 dev_cnt = profile->device_table_count; 5618 for (i = 0; i < dev_cnt; i++) { 5619 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5620 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5621 hw->device_id == (vendor_dev_id & 0xFFFF)) 5622 break; 5623 } 5624 if (dev_cnt && i == dev_cnt) { 5625 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5626 "Device doesn't support DDP\n"); 5627 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5628 } 5629 5630 I40E_SECTION_TABLE(profile, sec_tbl); 5631 5632 /* Validate sections types */ 5633 for (i = 0; i < sec_tbl->section_count; i++) { 5634 sec_off = sec_tbl->section_offset[i]; 5635 sec = I40E_SECTION_HEADER(profile, sec_off); 5636 if (rollback) { 5637 if (sec->section.type == SECTION_TYPE_MMIO || 5638 sec->section.type == SECTION_TYPE_AQ || 5639 sec->section.type == SECTION_TYPE_RB_AQ) { 5640 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5641 "Not a roll-back package\n"); 5642 return I40E_NOT_SUPPORTED; 5643 } 5644 } else { 5645 if (sec->section.type == SECTION_TYPE_RB_AQ || 5646 sec->section.type == SECTION_TYPE_RB_MMIO) { 5647 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5648 "Not an original package\n"); 5649 return I40E_NOT_SUPPORTED; 5650 } 5651 } 5652 } 5653 5654 return status; 5655 } 5656 5657 /** 5658 * i40e_write_profile 5659 * @hw: pointer to the hardware structure 5660 * @profile: pointer to the profile segment of the package to be downloaded 5661 * @track_id: package tracking id 5662 * 5663 * Handles the download of a complete package. 5664 */ 5665 enum i40e_status_code 5666 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5667 u32 track_id) 5668 { 5669 i40e_status status = 0; 5670 struct i40e_section_table *sec_tbl; 5671 struct i40e_profile_section_header *sec = NULL; 5672 struct i40e_profile_aq_section *ddp_aq; 5673 u32 section_size = 0; 5674 u32 offset = 0, info = 0; 5675 u32 sec_off; 5676 u32 i; 5677 5678 status = i40e_validate_profile(hw, profile, track_id, false); 5679 if (status) 5680 return status; 5681 5682 I40E_SECTION_TABLE(profile, sec_tbl); 5683 5684 for (i = 0; i < sec_tbl->section_count; i++) { 5685 sec_off = sec_tbl->section_offset[i]; 5686 sec = I40E_SECTION_HEADER(profile, sec_off); 5687 /* Process generic admin command */ 5688 if (sec->section.type == SECTION_TYPE_AQ) { 5689 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5690 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5691 if (status) { 5692 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5693 "Failed to execute aq: section %d, opcode %u\n", 5694 i, ddp_aq->opcode); 5695 break; 5696 } 5697 sec->section.type = SECTION_TYPE_RB_AQ; 5698 } 5699 5700 /* Skip any non-mmio sections */ 5701 if (sec->section.type != SECTION_TYPE_MMIO) 5702 continue; 5703 5704 section_size = sec->section.size + 5705 sizeof(struct i40e_profile_section_header); 5706 5707 /* Write MMIO section */ 5708 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5709 track_id, &offset, &info, NULL); 5710 if (status) { 5711 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5712 "Failed to write profile: section %d, offset %d, info %d\n", 5713 i, offset, info); 5714 break; 5715 } 5716 } 5717 return status; 5718 } 5719 5720 /** 5721 * i40e_rollback_profile 5722 * @hw: pointer to the hardware structure 5723 * @profile: pointer to the profile segment of the package to be removed 5724 * @track_id: package tracking id 5725 * 5726 * Rolls back previously loaded package. 5727 */ 5728 enum i40e_status_code 5729 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5730 u32 track_id) 5731 { 5732 struct i40e_profile_section_header *sec = NULL; 5733 i40e_status status = 0; 5734 struct i40e_section_table *sec_tbl; 5735 u32 offset = 0, info = 0; 5736 u32 section_size = 0; 5737 u32 sec_off; 5738 int i; 5739 5740 status = i40e_validate_profile(hw, profile, track_id, true); 5741 if (status) 5742 return status; 5743 5744 I40E_SECTION_TABLE(profile, sec_tbl); 5745 5746 /* For rollback write sections in reverse */ 5747 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5748 sec_off = sec_tbl->section_offset[i]; 5749 sec = I40E_SECTION_HEADER(profile, sec_off); 5750 5751 /* Skip any non-rollback sections */ 5752 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5753 continue; 5754 5755 section_size = sec->section.size + 5756 sizeof(struct i40e_profile_section_header); 5757 5758 /* Write roll-back MMIO section */ 5759 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5760 track_id, &offset, &info, NULL); 5761 if (status) { 5762 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5763 "Failed to write profile: section %d, offset %d, info %d\n", 5764 i, offset, info); 5765 break; 5766 } 5767 } 5768 return status; 5769 } 5770 5771 /** 5772 * i40e_add_pinfo_to_list 5773 * @hw: pointer to the hardware structure 5774 * @profile: pointer to the profile segment of the package 5775 * @profile_info_sec: buffer for information section 5776 * @track_id: package tracking id 5777 * 5778 * Register a profile to the list of loaded profiles. 5779 */ 5780 enum i40e_status_code 5781 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5782 struct i40e_profile_segment *profile, 5783 u8 *profile_info_sec, u32 track_id) 5784 { 5785 i40e_status status = 0; 5786 struct i40e_profile_section_header *sec = NULL; 5787 struct i40e_profile_info *pinfo; 5788 u32 offset = 0, info = 0; 5789 5790 sec = (struct i40e_profile_section_header *)profile_info_sec; 5791 sec->tbl_size = 1; 5792 sec->data_end = sizeof(struct i40e_profile_section_header) + 5793 sizeof(struct i40e_profile_info); 5794 sec->section.type = SECTION_TYPE_INFO; 5795 sec->section.offset = sizeof(struct i40e_profile_section_header); 5796 sec->section.size = sizeof(struct i40e_profile_info); 5797 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5798 sec->section.offset); 5799 pinfo->track_id = track_id; 5800 pinfo->version = profile->version; 5801 pinfo->op = I40E_DDP_ADD_TRACKID; 5802 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5803 5804 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5805 track_id, &offset, &info, NULL); 5806 5807 return status; 5808 } 5809 5810 /** 5811 * i40e_aq_add_cloud_filters 5812 * @hw: pointer to the hardware structure 5813 * @seid: VSI seid to add cloud filters from 5814 * @filters: Buffer which contains the filters to be added 5815 * @filter_count: number of filters contained in the buffer 5816 * 5817 * Set the cloud filters for a given VSI. The contents of the 5818 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5819 * of the function. 5820 * 5821 **/ 5822 enum i40e_status_code 5823 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5824 struct i40e_aqc_cloud_filters_element_data *filters, 5825 u8 filter_count) 5826 { 5827 struct i40e_aq_desc desc; 5828 struct i40e_aqc_add_remove_cloud_filters *cmd = 5829 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5830 enum i40e_status_code status; 5831 u16 buff_len; 5832 5833 i40e_fill_default_direct_cmd_desc(&desc, 5834 i40e_aqc_opc_add_cloud_filters); 5835 5836 buff_len = filter_count * sizeof(*filters); 5837 desc.datalen = cpu_to_le16(buff_len); 5838 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5839 cmd->num_filters = filter_count; 5840 cmd->seid = cpu_to_le16(seid); 5841 5842 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5843 5844 return status; 5845 } 5846 5847 /** 5848 * i40e_aq_add_cloud_filters_bb 5849 * @hw: pointer to the hardware structure 5850 * @seid: VSI seid to add cloud filters from 5851 * @filters: Buffer which contains the filters in big buffer to be added 5852 * @filter_count: number of filters contained in the buffer 5853 * 5854 * Set the big buffer cloud filters for a given VSI. The contents of the 5855 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5856 * function. 5857 * 5858 **/ 5859 enum i40e_status_code 5860 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5861 struct i40e_aqc_cloud_filters_element_bb *filters, 5862 u8 filter_count) 5863 { 5864 struct i40e_aq_desc desc; 5865 struct i40e_aqc_add_remove_cloud_filters *cmd = 5866 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5867 i40e_status status; 5868 u16 buff_len; 5869 int i; 5870 5871 i40e_fill_default_direct_cmd_desc(&desc, 5872 i40e_aqc_opc_add_cloud_filters); 5873 5874 buff_len = filter_count * sizeof(*filters); 5875 desc.datalen = cpu_to_le16(buff_len); 5876 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5877 cmd->num_filters = filter_count; 5878 cmd->seid = cpu_to_le16(seid); 5879 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5880 5881 for (i = 0; i < filter_count; i++) { 5882 u16 tnl_type; 5883 u32 ti; 5884 5885 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5886 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5887 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5888 5889 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5890 * one more byte further than normally used for Tenant ID in 5891 * other tunnel types. 5892 */ 5893 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5894 ti = le32_to_cpu(filters[i].element.tenant_id); 5895 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5896 } 5897 } 5898 5899 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5900 5901 return status; 5902 } 5903 5904 /** 5905 * i40e_aq_rem_cloud_filters 5906 * @hw: pointer to the hardware structure 5907 * @seid: VSI seid to remove cloud filters from 5908 * @filters: Buffer which contains the filters to be removed 5909 * @filter_count: number of filters contained in the buffer 5910 * 5911 * Remove the cloud filters for a given VSI. The contents of the 5912 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5913 * of the function. 5914 * 5915 **/ 5916 enum i40e_status_code 5917 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5918 struct i40e_aqc_cloud_filters_element_data *filters, 5919 u8 filter_count) 5920 { 5921 struct i40e_aq_desc desc; 5922 struct i40e_aqc_add_remove_cloud_filters *cmd = 5923 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5924 enum i40e_status_code status; 5925 u16 buff_len; 5926 5927 i40e_fill_default_direct_cmd_desc(&desc, 5928 i40e_aqc_opc_remove_cloud_filters); 5929 5930 buff_len = filter_count * sizeof(*filters); 5931 desc.datalen = cpu_to_le16(buff_len); 5932 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5933 cmd->num_filters = filter_count; 5934 cmd->seid = cpu_to_le16(seid); 5935 5936 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5937 5938 return status; 5939 } 5940 5941 /** 5942 * i40e_aq_rem_cloud_filters_bb 5943 * @hw: pointer to the hardware structure 5944 * @seid: VSI seid to remove cloud filters from 5945 * @filters: Buffer which contains the filters in big buffer to be removed 5946 * @filter_count: number of filters contained in the buffer 5947 * 5948 * Remove the big buffer cloud filters for a given VSI. The contents of the 5949 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5950 * function. 5951 * 5952 **/ 5953 enum i40e_status_code 5954 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5955 struct i40e_aqc_cloud_filters_element_bb *filters, 5956 u8 filter_count) 5957 { 5958 struct i40e_aq_desc desc; 5959 struct i40e_aqc_add_remove_cloud_filters *cmd = 5960 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5961 i40e_status status; 5962 u16 buff_len; 5963 int i; 5964 5965 i40e_fill_default_direct_cmd_desc(&desc, 5966 i40e_aqc_opc_remove_cloud_filters); 5967 5968 buff_len = filter_count * sizeof(*filters); 5969 desc.datalen = cpu_to_le16(buff_len); 5970 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5971 cmd->num_filters = filter_count; 5972 cmd->seid = cpu_to_le16(seid); 5973 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5974 5975 for (i = 0; i < filter_count; i++) { 5976 u16 tnl_type; 5977 u32 ti; 5978 5979 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5980 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5981 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5982 5983 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5984 * one more byte further than normally used for Tenant ID in 5985 * other tunnel types. 5986 */ 5987 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5988 ti = le32_to_cpu(filters[i].element.tenant_id); 5989 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5990 } 5991 } 5992 5993 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5994 5995 return status; 5996 } 5997