1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include "i40e_type.h" 5 #include "i40e_adminq.h" 6 #include "i40e_prototype.h" 7 #include <linux/avf/virtchnl.h> 8 9 /** 10 * i40e_set_mac_type - Sets MAC type 11 * @hw: pointer to the HW structure 12 * 13 * This function sets the mac type of the adapter based on the 14 * vendor ID and device ID stored in the hw structure. 15 **/ 16 static i40e_status i40e_set_mac_type(struct i40e_hw *hw) 17 { 18 i40e_status status = 0; 19 20 if (hw->vendor_id == PCI_VENDOR_ID_INTEL) { 21 switch (hw->device_id) { 22 case I40E_DEV_ID_SFP_XL710: 23 case I40E_DEV_ID_QEMU: 24 case I40E_DEV_ID_KX_B: 25 case I40E_DEV_ID_KX_C: 26 case I40E_DEV_ID_QSFP_A: 27 case I40E_DEV_ID_QSFP_B: 28 case I40E_DEV_ID_QSFP_C: 29 case I40E_DEV_ID_10G_BASE_T: 30 case I40E_DEV_ID_10G_BASE_T4: 31 case I40E_DEV_ID_10G_B: 32 case I40E_DEV_ID_10G_SFP: 33 case I40E_DEV_ID_20G_KR2: 34 case I40E_DEV_ID_20G_KR2_A: 35 case I40E_DEV_ID_25G_B: 36 case I40E_DEV_ID_25G_SFP28: 37 case I40E_DEV_ID_X710_N3000: 38 case I40E_DEV_ID_XXV710_N3000: 39 hw->mac.type = I40E_MAC_XL710; 40 break; 41 case I40E_DEV_ID_KX_X722: 42 case I40E_DEV_ID_QSFP_X722: 43 case I40E_DEV_ID_SFP_X722: 44 case I40E_DEV_ID_1G_BASE_T_X722: 45 case I40E_DEV_ID_10G_BASE_T_X722: 46 case I40E_DEV_ID_SFP_I_X722: 47 hw->mac.type = I40E_MAC_X722; 48 break; 49 default: 50 hw->mac.type = I40E_MAC_GENERIC; 51 break; 52 } 53 } else { 54 status = I40E_ERR_DEVICE_NOT_SUPPORTED; 55 } 56 57 hw_dbg(hw, "i40e_set_mac_type found mac: %d, returns: %d\n", 58 hw->mac.type, status); 59 return status; 60 } 61 62 /** 63 * i40e_aq_str - convert AQ err code to a string 64 * @hw: pointer to the HW structure 65 * @aq_err: the AQ error code to convert 66 **/ 67 const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err) 68 { 69 switch (aq_err) { 70 case I40E_AQ_RC_OK: 71 return "OK"; 72 case I40E_AQ_RC_EPERM: 73 return "I40E_AQ_RC_EPERM"; 74 case I40E_AQ_RC_ENOENT: 75 return "I40E_AQ_RC_ENOENT"; 76 case I40E_AQ_RC_ESRCH: 77 return "I40E_AQ_RC_ESRCH"; 78 case I40E_AQ_RC_EINTR: 79 return "I40E_AQ_RC_EINTR"; 80 case I40E_AQ_RC_EIO: 81 return "I40E_AQ_RC_EIO"; 82 case I40E_AQ_RC_ENXIO: 83 return "I40E_AQ_RC_ENXIO"; 84 case I40E_AQ_RC_E2BIG: 85 return "I40E_AQ_RC_E2BIG"; 86 case I40E_AQ_RC_EAGAIN: 87 return "I40E_AQ_RC_EAGAIN"; 88 case I40E_AQ_RC_ENOMEM: 89 return "I40E_AQ_RC_ENOMEM"; 90 case I40E_AQ_RC_EACCES: 91 return "I40E_AQ_RC_EACCES"; 92 case I40E_AQ_RC_EFAULT: 93 return "I40E_AQ_RC_EFAULT"; 94 case I40E_AQ_RC_EBUSY: 95 return "I40E_AQ_RC_EBUSY"; 96 case I40E_AQ_RC_EEXIST: 97 return "I40E_AQ_RC_EEXIST"; 98 case I40E_AQ_RC_EINVAL: 99 return "I40E_AQ_RC_EINVAL"; 100 case I40E_AQ_RC_ENOTTY: 101 return "I40E_AQ_RC_ENOTTY"; 102 case I40E_AQ_RC_ENOSPC: 103 return "I40E_AQ_RC_ENOSPC"; 104 case I40E_AQ_RC_ENOSYS: 105 return "I40E_AQ_RC_ENOSYS"; 106 case I40E_AQ_RC_ERANGE: 107 return "I40E_AQ_RC_ERANGE"; 108 case I40E_AQ_RC_EFLUSHED: 109 return "I40E_AQ_RC_EFLUSHED"; 110 case I40E_AQ_RC_BAD_ADDR: 111 return "I40E_AQ_RC_BAD_ADDR"; 112 case I40E_AQ_RC_EMODE: 113 return "I40E_AQ_RC_EMODE"; 114 case I40E_AQ_RC_EFBIG: 115 return "I40E_AQ_RC_EFBIG"; 116 } 117 118 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 119 return hw->err_str; 120 } 121 122 /** 123 * i40e_stat_str - convert status err code to a string 124 * @hw: pointer to the HW structure 125 * @stat_err: the status error code to convert 126 **/ 127 const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err) 128 { 129 switch (stat_err) { 130 case 0: 131 return "OK"; 132 case I40E_ERR_NVM: 133 return "I40E_ERR_NVM"; 134 case I40E_ERR_NVM_CHECKSUM: 135 return "I40E_ERR_NVM_CHECKSUM"; 136 case I40E_ERR_PHY: 137 return "I40E_ERR_PHY"; 138 case I40E_ERR_CONFIG: 139 return "I40E_ERR_CONFIG"; 140 case I40E_ERR_PARAM: 141 return "I40E_ERR_PARAM"; 142 case I40E_ERR_MAC_TYPE: 143 return "I40E_ERR_MAC_TYPE"; 144 case I40E_ERR_UNKNOWN_PHY: 145 return "I40E_ERR_UNKNOWN_PHY"; 146 case I40E_ERR_LINK_SETUP: 147 return "I40E_ERR_LINK_SETUP"; 148 case I40E_ERR_ADAPTER_STOPPED: 149 return "I40E_ERR_ADAPTER_STOPPED"; 150 case I40E_ERR_INVALID_MAC_ADDR: 151 return "I40E_ERR_INVALID_MAC_ADDR"; 152 case I40E_ERR_DEVICE_NOT_SUPPORTED: 153 return "I40E_ERR_DEVICE_NOT_SUPPORTED"; 154 case I40E_ERR_MASTER_REQUESTS_PENDING: 155 return "I40E_ERR_MASTER_REQUESTS_PENDING"; 156 case I40E_ERR_INVALID_LINK_SETTINGS: 157 return "I40E_ERR_INVALID_LINK_SETTINGS"; 158 case I40E_ERR_AUTONEG_NOT_COMPLETE: 159 return "I40E_ERR_AUTONEG_NOT_COMPLETE"; 160 case I40E_ERR_RESET_FAILED: 161 return "I40E_ERR_RESET_FAILED"; 162 case I40E_ERR_SWFW_SYNC: 163 return "I40E_ERR_SWFW_SYNC"; 164 case I40E_ERR_NO_AVAILABLE_VSI: 165 return "I40E_ERR_NO_AVAILABLE_VSI"; 166 case I40E_ERR_NO_MEMORY: 167 return "I40E_ERR_NO_MEMORY"; 168 case I40E_ERR_BAD_PTR: 169 return "I40E_ERR_BAD_PTR"; 170 case I40E_ERR_RING_FULL: 171 return "I40E_ERR_RING_FULL"; 172 case I40E_ERR_INVALID_PD_ID: 173 return "I40E_ERR_INVALID_PD_ID"; 174 case I40E_ERR_INVALID_QP_ID: 175 return "I40E_ERR_INVALID_QP_ID"; 176 case I40E_ERR_INVALID_CQ_ID: 177 return "I40E_ERR_INVALID_CQ_ID"; 178 case I40E_ERR_INVALID_CEQ_ID: 179 return "I40E_ERR_INVALID_CEQ_ID"; 180 case I40E_ERR_INVALID_AEQ_ID: 181 return "I40E_ERR_INVALID_AEQ_ID"; 182 case I40E_ERR_INVALID_SIZE: 183 return "I40E_ERR_INVALID_SIZE"; 184 case I40E_ERR_INVALID_ARP_INDEX: 185 return "I40E_ERR_INVALID_ARP_INDEX"; 186 case I40E_ERR_INVALID_FPM_FUNC_ID: 187 return "I40E_ERR_INVALID_FPM_FUNC_ID"; 188 case I40E_ERR_QP_INVALID_MSG_SIZE: 189 return "I40E_ERR_QP_INVALID_MSG_SIZE"; 190 case I40E_ERR_QP_TOOMANY_WRS_POSTED: 191 return "I40E_ERR_QP_TOOMANY_WRS_POSTED"; 192 case I40E_ERR_INVALID_FRAG_COUNT: 193 return "I40E_ERR_INVALID_FRAG_COUNT"; 194 case I40E_ERR_QUEUE_EMPTY: 195 return "I40E_ERR_QUEUE_EMPTY"; 196 case I40E_ERR_INVALID_ALIGNMENT: 197 return "I40E_ERR_INVALID_ALIGNMENT"; 198 case I40E_ERR_FLUSHED_QUEUE: 199 return "I40E_ERR_FLUSHED_QUEUE"; 200 case I40E_ERR_INVALID_PUSH_PAGE_INDEX: 201 return "I40E_ERR_INVALID_PUSH_PAGE_INDEX"; 202 case I40E_ERR_INVALID_IMM_DATA_SIZE: 203 return "I40E_ERR_INVALID_IMM_DATA_SIZE"; 204 case I40E_ERR_TIMEOUT: 205 return "I40E_ERR_TIMEOUT"; 206 case I40E_ERR_OPCODE_MISMATCH: 207 return "I40E_ERR_OPCODE_MISMATCH"; 208 case I40E_ERR_CQP_COMPL_ERROR: 209 return "I40E_ERR_CQP_COMPL_ERROR"; 210 case I40E_ERR_INVALID_VF_ID: 211 return "I40E_ERR_INVALID_VF_ID"; 212 case I40E_ERR_INVALID_HMCFN_ID: 213 return "I40E_ERR_INVALID_HMCFN_ID"; 214 case I40E_ERR_BACKING_PAGE_ERROR: 215 return "I40E_ERR_BACKING_PAGE_ERROR"; 216 case I40E_ERR_NO_PBLCHUNKS_AVAILABLE: 217 return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE"; 218 case I40E_ERR_INVALID_PBLE_INDEX: 219 return "I40E_ERR_INVALID_PBLE_INDEX"; 220 case I40E_ERR_INVALID_SD_INDEX: 221 return "I40E_ERR_INVALID_SD_INDEX"; 222 case I40E_ERR_INVALID_PAGE_DESC_INDEX: 223 return "I40E_ERR_INVALID_PAGE_DESC_INDEX"; 224 case I40E_ERR_INVALID_SD_TYPE: 225 return "I40E_ERR_INVALID_SD_TYPE"; 226 case I40E_ERR_MEMCPY_FAILED: 227 return "I40E_ERR_MEMCPY_FAILED"; 228 case I40E_ERR_INVALID_HMC_OBJ_INDEX: 229 return "I40E_ERR_INVALID_HMC_OBJ_INDEX"; 230 case I40E_ERR_INVALID_HMC_OBJ_COUNT: 231 return "I40E_ERR_INVALID_HMC_OBJ_COUNT"; 232 case I40E_ERR_INVALID_SRQ_ARM_LIMIT: 233 return "I40E_ERR_INVALID_SRQ_ARM_LIMIT"; 234 case I40E_ERR_SRQ_ENABLED: 235 return "I40E_ERR_SRQ_ENABLED"; 236 case I40E_ERR_ADMIN_QUEUE_ERROR: 237 return "I40E_ERR_ADMIN_QUEUE_ERROR"; 238 case I40E_ERR_ADMIN_QUEUE_TIMEOUT: 239 return "I40E_ERR_ADMIN_QUEUE_TIMEOUT"; 240 case I40E_ERR_BUF_TOO_SHORT: 241 return "I40E_ERR_BUF_TOO_SHORT"; 242 case I40E_ERR_ADMIN_QUEUE_FULL: 243 return "I40E_ERR_ADMIN_QUEUE_FULL"; 244 case I40E_ERR_ADMIN_QUEUE_NO_WORK: 245 return "I40E_ERR_ADMIN_QUEUE_NO_WORK"; 246 case I40E_ERR_BAD_IWARP_CQE: 247 return "I40E_ERR_BAD_IWARP_CQE"; 248 case I40E_ERR_NVM_BLANK_MODE: 249 return "I40E_ERR_NVM_BLANK_MODE"; 250 case I40E_ERR_NOT_IMPLEMENTED: 251 return "I40E_ERR_NOT_IMPLEMENTED"; 252 case I40E_ERR_PE_DOORBELL_NOT_ENABLED: 253 return "I40E_ERR_PE_DOORBELL_NOT_ENABLED"; 254 case I40E_ERR_DIAG_TEST_FAILED: 255 return "I40E_ERR_DIAG_TEST_FAILED"; 256 case I40E_ERR_NOT_READY: 257 return "I40E_ERR_NOT_READY"; 258 case I40E_NOT_SUPPORTED: 259 return "I40E_NOT_SUPPORTED"; 260 case I40E_ERR_FIRMWARE_API_VERSION: 261 return "I40E_ERR_FIRMWARE_API_VERSION"; 262 case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 263 return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 264 } 265 266 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 267 return hw->err_str; 268 } 269 270 /** 271 * i40e_debug_aq 272 * @hw: debug mask related to admin queue 273 * @mask: debug mask 274 * @desc: pointer to admin queue descriptor 275 * @buffer: pointer to command buffer 276 * @buf_len: max length of buffer 277 * 278 * Dumps debug log about adminq command with descriptor contents. 279 **/ 280 void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, 281 void *buffer, u16 buf_len) 282 { 283 struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc; 284 u32 effective_mask = hw->debug_mask & mask; 285 char prefix[27]; 286 u16 len; 287 u8 *buf = (u8 *)buffer; 288 289 if (!effective_mask || !desc) 290 return; 291 292 len = le16_to_cpu(aq_desc->datalen); 293 294 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 295 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 296 le16_to_cpu(aq_desc->opcode), 297 le16_to_cpu(aq_desc->flags), 298 le16_to_cpu(aq_desc->datalen), 299 le16_to_cpu(aq_desc->retval)); 300 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 301 "\tcookie (h,l) 0x%08X 0x%08X\n", 302 le32_to_cpu(aq_desc->cookie_high), 303 le32_to_cpu(aq_desc->cookie_low)); 304 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 305 "\tparam (0,1) 0x%08X 0x%08X\n", 306 le32_to_cpu(aq_desc->params.internal.param0), 307 le32_to_cpu(aq_desc->params.internal.param1)); 308 i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR, 309 "\taddr (h,l) 0x%08X 0x%08X\n", 310 le32_to_cpu(aq_desc->params.external.addr_high), 311 le32_to_cpu(aq_desc->params.external.addr_low)); 312 313 if (buffer && buf_len != 0 && len != 0 && 314 (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) { 315 i40e_debug(hw, mask, "AQ CMD Buffer:\n"); 316 if (buf_len < len) 317 len = buf_len; 318 319 snprintf(prefix, sizeof(prefix), 320 "i40e %02x:%02x.%x: \t0x", 321 hw->bus.bus_id, 322 hw->bus.device, 323 hw->bus.func); 324 325 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 326 16, 1, buf, len, false); 327 } 328 } 329 330 /** 331 * i40e_check_asq_alive 332 * @hw: pointer to the hw struct 333 * 334 * Returns true if Queue is enabled else false. 335 **/ 336 bool i40e_check_asq_alive(struct i40e_hw *hw) 337 { 338 if (hw->aq.asq.len) 339 return !!(rd32(hw, hw->aq.asq.len) & 340 I40E_PF_ATQLEN_ATQENABLE_MASK); 341 else 342 return false; 343 } 344 345 /** 346 * i40e_aq_queue_shutdown 347 * @hw: pointer to the hw struct 348 * @unloading: is the driver unloading itself 349 * 350 * Tell the Firmware that we're shutting down the AdminQ and whether 351 * or not the driver is unloading as well. 352 **/ 353 i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, 354 bool unloading) 355 { 356 struct i40e_aq_desc desc; 357 struct i40e_aqc_queue_shutdown *cmd = 358 (struct i40e_aqc_queue_shutdown *)&desc.params.raw; 359 i40e_status status; 360 361 i40e_fill_default_direct_cmd_desc(&desc, 362 i40e_aqc_opc_queue_shutdown); 363 364 if (unloading) 365 cmd->driver_unloading = cpu_to_le32(I40E_AQ_DRIVER_UNLOADING); 366 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 367 368 return status; 369 } 370 371 /** 372 * i40e_aq_get_set_rss_lut 373 * @hw: pointer to the hardware structure 374 * @vsi_id: vsi fw index 375 * @pf_lut: for PF table set true, for VSI table set false 376 * @lut: pointer to the lut buffer provided by the caller 377 * @lut_size: size of the lut buffer 378 * @set: set true to set the table, false to get the table 379 * 380 * Internal function to get or set RSS look up table 381 **/ 382 static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw, 383 u16 vsi_id, bool pf_lut, 384 u8 *lut, u16 lut_size, 385 bool set) 386 { 387 i40e_status status; 388 struct i40e_aq_desc desc; 389 struct i40e_aqc_get_set_rss_lut *cmd_resp = 390 (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw; 391 392 if (set) 393 i40e_fill_default_direct_cmd_desc(&desc, 394 i40e_aqc_opc_set_rss_lut); 395 else 396 i40e_fill_default_direct_cmd_desc(&desc, 397 i40e_aqc_opc_get_rss_lut); 398 399 /* Indirect command */ 400 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 401 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 402 403 cmd_resp->vsi_id = 404 cpu_to_le16((u16)((vsi_id << 405 I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) & 406 I40E_AQC_SET_RSS_LUT_VSI_ID_MASK)); 407 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID); 408 409 if (pf_lut) 410 cmd_resp->flags |= cpu_to_le16((u16) 411 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF << 412 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 413 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 414 else 415 cmd_resp->flags |= cpu_to_le16((u16) 416 ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI << 417 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) & 418 I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK)); 419 420 status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL); 421 422 return status; 423 } 424 425 /** 426 * i40e_aq_get_rss_lut 427 * @hw: pointer to the hardware structure 428 * @vsi_id: vsi fw index 429 * @pf_lut: for PF table set true, for VSI table set false 430 * @lut: pointer to the lut buffer provided by the caller 431 * @lut_size: size of the lut buffer 432 * 433 * get the RSS lookup table, PF or VSI type 434 **/ 435 i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id, 436 bool pf_lut, u8 *lut, u16 lut_size) 437 { 438 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, 439 false); 440 } 441 442 /** 443 * i40e_aq_set_rss_lut 444 * @hw: pointer to the hardware structure 445 * @vsi_id: vsi fw index 446 * @pf_lut: for PF table set true, for VSI table set false 447 * @lut: pointer to the lut buffer provided by the caller 448 * @lut_size: size of the lut buffer 449 * 450 * set the RSS lookup table, PF or VSI type 451 **/ 452 i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id, 453 bool pf_lut, u8 *lut, u16 lut_size) 454 { 455 return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 456 } 457 458 /** 459 * i40e_aq_get_set_rss_key 460 * @hw: pointer to the hw struct 461 * @vsi_id: vsi fw index 462 * @key: pointer to key info struct 463 * @set: set true to set the key, false to get the key 464 * 465 * get the RSS key per VSI 466 **/ 467 static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw, 468 u16 vsi_id, 469 struct i40e_aqc_get_set_rss_key_data *key, 470 bool set) 471 { 472 i40e_status status; 473 struct i40e_aq_desc desc; 474 struct i40e_aqc_get_set_rss_key *cmd_resp = 475 (struct i40e_aqc_get_set_rss_key *)&desc.params.raw; 476 u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data); 477 478 if (set) 479 i40e_fill_default_direct_cmd_desc(&desc, 480 i40e_aqc_opc_set_rss_key); 481 else 482 i40e_fill_default_direct_cmd_desc(&desc, 483 i40e_aqc_opc_get_rss_key); 484 485 /* Indirect command */ 486 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 487 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 488 489 cmd_resp->vsi_id = 490 cpu_to_le16((u16)((vsi_id << 491 I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) & 492 I40E_AQC_SET_RSS_KEY_VSI_ID_MASK)); 493 cmd_resp->vsi_id |= cpu_to_le16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID); 494 495 status = i40e_asq_send_command(hw, &desc, key, key_size, NULL); 496 497 return status; 498 } 499 500 /** 501 * i40e_aq_get_rss_key 502 * @hw: pointer to the hw struct 503 * @vsi_id: vsi fw index 504 * @key: pointer to key info struct 505 * 506 **/ 507 i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw, 508 u16 vsi_id, 509 struct i40e_aqc_get_set_rss_key_data *key) 510 { 511 return i40e_aq_get_set_rss_key(hw, vsi_id, key, false); 512 } 513 514 /** 515 * i40e_aq_set_rss_key 516 * @hw: pointer to the hw struct 517 * @vsi_id: vsi fw index 518 * @key: pointer to key info struct 519 * 520 * set the RSS key per VSI 521 **/ 522 i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, 523 u16 vsi_id, 524 struct i40e_aqc_get_set_rss_key_data *key) 525 { 526 return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); 527 } 528 529 /* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the 530 * hardware to a bit-field that can be used by SW to more easily determine the 531 * packet type. 532 * 533 * Macros are used to shorten the table lines and make this table human 534 * readable. 535 * 536 * We store the PTYPE in the top byte of the bit field - this is just so that 537 * we can check that the table doesn't have a row missing, as the index into 538 * the table should be the PTYPE. 539 * 540 * Typical work flow: 541 * 542 * IF NOT i40e_ptype_lookup[ptype].known 543 * THEN 544 * Packet is unknown 545 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP 546 * Use the rest of the fields to look at the tunnels, inner protocols, etc 547 * ELSE 548 * Use the enum i40e_rx_l2_ptype to decode the packet type 549 * ENDIF 550 */ 551 552 /* macro to make the table lines short */ 553 #define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ 554 { PTYPE, \ 555 1, \ 556 I40E_RX_PTYPE_OUTER_##OUTER_IP, \ 557 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ 558 I40E_RX_PTYPE_##OUTER_FRAG, \ 559 I40E_RX_PTYPE_TUNNEL_##T, \ 560 I40E_RX_PTYPE_TUNNEL_END_##TE, \ 561 I40E_RX_PTYPE_##TEF, \ 562 I40E_RX_PTYPE_INNER_PROT_##I, \ 563 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } 564 565 #define I40E_PTT_UNUSED_ENTRY(PTYPE) \ 566 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 } 567 568 /* shorter macros makes the table fit but are terse */ 569 #define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG 570 #define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG 571 #define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC 572 573 /* Lookup table mapping the HW PTYPE to the bit field for decoding */ 574 struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { 575 /* L2 Packet types */ 576 I40E_PTT_UNUSED_ENTRY(0), 577 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 578 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), 579 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 580 I40E_PTT_UNUSED_ENTRY(4), 581 I40E_PTT_UNUSED_ENTRY(5), 582 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 583 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 584 I40E_PTT_UNUSED_ENTRY(8), 585 I40E_PTT_UNUSED_ENTRY(9), 586 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), 587 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), 588 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 589 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 590 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 591 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 592 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 593 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 594 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 595 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 596 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 597 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), 598 599 /* Non Tunneled IPv4 */ 600 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), 601 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), 602 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), 603 I40E_PTT_UNUSED_ENTRY(25), 604 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), 605 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), 606 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), 607 608 /* IPv4 --> IPv4 */ 609 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 610 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 611 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 612 I40E_PTT_UNUSED_ENTRY(32), 613 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 614 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 615 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 616 617 /* IPv4 --> IPv6 */ 618 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 619 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 620 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 621 I40E_PTT_UNUSED_ENTRY(39), 622 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 623 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 624 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 625 626 /* IPv4 --> GRE/NAT */ 627 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 628 629 /* IPv4 --> GRE/NAT --> IPv4 */ 630 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 631 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 632 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 633 I40E_PTT_UNUSED_ENTRY(47), 634 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 635 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 636 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 637 638 /* IPv4 --> GRE/NAT --> IPv6 */ 639 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 640 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 641 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 642 I40E_PTT_UNUSED_ENTRY(54), 643 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 644 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 645 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 646 647 /* IPv4 --> GRE/NAT --> MAC */ 648 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 649 650 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ 651 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 652 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 653 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 654 I40E_PTT_UNUSED_ENTRY(62), 655 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 656 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 657 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 658 659 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ 660 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 661 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 662 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 663 I40E_PTT_UNUSED_ENTRY(69), 664 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 665 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 666 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 667 668 /* IPv4 --> GRE/NAT --> MAC/VLAN */ 669 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 670 671 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ 672 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 673 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 674 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 675 I40E_PTT_UNUSED_ENTRY(77), 676 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 677 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 678 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 679 680 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ 681 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 682 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 683 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 684 I40E_PTT_UNUSED_ENTRY(84), 685 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 686 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 687 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 688 689 /* Non Tunneled IPv6 */ 690 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), 691 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), 692 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), 693 I40E_PTT_UNUSED_ENTRY(91), 694 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), 695 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), 696 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), 697 698 /* IPv6 --> IPv4 */ 699 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), 700 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), 701 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), 702 I40E_PTT_UNUSED_ENTRY(98), 703 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), 704 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), 705 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), 706 707 /* IPv6 --> IPv6 */ 708 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), 709 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), 710 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), 711 I40E_PTT_UNUSED_ENTRY(105), 712 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), 713 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), 714 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), 715 716 /* IPv6 --> GRE/NAT */ 717 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), 718 719 /* IPv6 --> GRE/NAT -> IPv4 */ 720 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), 721 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), 722 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), 723 I40E_PTT_UNUSED_ENTRY(113), 724 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), 725 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), 726 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), 727 728 /* IPv6 --> GRE/NAT -> IPv6 */ 729 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), 730 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), 731 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), 732 I40E_PTT_UNUSED_ENTRY(120), 733 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), 734 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), 735 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), 736 737 /* IPv6 --> GRE/NAT -> MAC */ 738 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), 739 740 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ 741 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), 742 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), 743 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), 744 I40E_PTT_UNUSED_ENTRY(128), 745 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), 746 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), 747 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), 748 749 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ 750 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), 751 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), 752 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), 753 I40E_PTT_UNUSED_ENTRY(135), 754 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), 755 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), 756 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), 757 758 /* IPv6 --> GRE/NAT -> MAC/VLAN */ 759 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), 760 761 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ 762 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), 763 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), 764 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), 765 I40E_PTT_UNUSED_ENTRY(143), 766 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), 767 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), 768 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), 769 770 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ 771 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), 772 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), 773 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), 774 I40E_PTT_UNUSED_ENTRY(150), 775 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), 776 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), 777 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), 778 779 /* unused entries */ 780 I40E_PTT_UNUSED_ENTRY(154), 781 I40E_PTT_UNUSED_ENTRY(155), 782 I40E_PTT_UNUSED_ENTRY(156), 783 I40E_PTT_UNUSED_ENTRY(157), 784 I40E_PTT_UNUSED_ENTRY(158), 785 I40E_PTT_UNUSED_ENTRY(159), 786 787 I40E_PTT_UNUSED_ENTRY(160), 788 I40E_PTT_UNUSED_ENTRY(161), 789 I40E_PTT_UNUSED_ENTRY(162), 790 I40E_PTT_UNUSED_ENTRY(163), 791 I40E_PTT_UNUSED_ENTRY(164), 792 I40E_PTT_UNUSED_ENTRY(165), 793 I40E_PTT_UNUSED_ENTRY(166), 794 I40E_PTT_UNUSED_ENTRY(167), 795 I40E_PTT_UNUSED_ENTRY(168), 796 I40E_PTT_UNUSED_ENTRY(169), 797 798 I40E_PTT_UNUSED_ENTRY(170), 799 I40E_PTT_UNUSED_ENTRY(171), 800 I40E_PTT_UNUSED_ENTRY(172), 801 I40E_PTT_UNUSED_ENTRY(173), 802 I40E_PTT_UNUSED_ENTRY(174), 803 I40E_PTT_UNUSED_ENTRY(175), 804 I40E_PTT_UNUSED_ENTRY(176), 805 I40E_PTT_UNUSED_ENTRY(177), 806 I40E_PTT_UNUSED_ENTRY(178), 807 I40E_PTT_UNUSED_ENTRY(179), 808 809 I40E_PTT_UNUSED_ENTRY(180), 810 I40E_PTT_UNUSED_ENTRY(181), 811 I40E_PTT_UNUSED_ENTRY(182), 812 I40E_PTT_UNUSED_ENTRY(183), 813 I40E_PTT_UNUSED_ENTRY(184), 814 I40E_PTT_UNUSED_ENTRY(185), 815 I40E_PTT_UNUSED_ENTRY(186), 816 I40E_PTT_UNUSED_ENTRY(187), 817 I40E_PTT_UNUSED_ENTRY(188), 818 I40E_PTT_UNUSED_ENTRY(189), 819 820 I40E_PTT_UNUSED_ENTRY(190), 821 I40E_PTT_UNUSED_ENTRY(191), 822 I40E_PTT_UNUSED_ENTRY(192), 823 I40E_PTT_UNUSED_ENTRY(193), 824 I40E_PTT_UNUSED_ENTRY(194), 825 I40E_PTT_UNUSED_ENTRY(195), 826 I40E_PTT_UNUSED_ENTRY(196), 827 I40E_PTT_UNUSED_ENTRY(197), 828 I40E_PTT_UNUSED_ENTRY(198), 829 I40E_PTT_UNUSED_ENTRY(199), 830 831 I40E_PTT_UNUSED_ENTRY(200), 832 I40E_PTT_UNUSED_ENTRY(201), 833 I40E_PTT_UNUSED_ENTRY(202), 834 I40E_PTT_UNUSED_ENTRY(203), 835 I40E_PTT_UNUSED_ENTRY(204), 836 I40E_PTT_UNUSED_ENTRY(205), 837 I40E_PTT_UNUSED_ENTRY(206), 838 I40E_PTT_UNUSED_ENTRY(207), 839 I40E_PTT_UNUSED_ENTRY(208), 840 I40E_PTT_UNUSED_ENTRY(209), 841 842 I40E_PTT_UNUSED_ENTRY(210), 843 I40E_PTT_UNUSED_ENTRY(211), 844 I40E_PTT_UNUSED_ENTRY(212), 845 I40E_PTT_UNUSED_ENTRY(213), 846 I40E_PTT_UNUSED_ENTRY(214), 847 I40E_PTT_UNUSED_ENTRY(215), 848 I40E_PTT_UNUSED_ENTRY(216), 849 I40E_PTT_UNUSED_ENTRY(217), 850 I40E_PTT_UNUSED_ENTRY(218), 851 I40E_PTT_UNUSED_ENTRY(219), 852 853 I40E_PTT_UNUSED_ENTRY(220), 854 I40E_PTT_UNUSED_ENTRY(221), 855 I40E_PTT_UNUSED_ENTRY(222), 856 I40E_PTT_UNUSED_ENTRY(223), 857 I40E_PTT_UNUSED_ENTRY(224), 858 I40E_PTT_UNUSED_ENTRY(225), 859 I40E_PTT_UNUSED_ENTRY(226), 860 I40E_PTT_UNUSED_ENTRY(227), 861 I40E_PTT_UNUSED_ENTRY(228), 862 I40E_PTT_UNUSED_ENTRY(229), 863 864 I40E_PTT_UNUSED_ENTRY(230), 865 I40E_PTT_UNUSED_ENTRY(231), 866 I40E_PTT_UNUSED_ENTRY(232), 867 I40E_PTT_UNUSED_ENTRY(233), 868 I40E_PTT_UNUSED_ENTRY(234), 869 I40E_PTT_UNUSED_ENTRY(235), 870 I40E_PTT_UNUSED_ENTRY(236), 871 I40E_PTT_UNUSED_ENTRY(237), 872 I40E_PTT_UNUSED_ENTRY(238), 873 I40E_PTT_UNUSED_ENTRY(239), 874 875 I40E_PTT_UNUSED_ENTRY(240), 876 I40E_PTT_UNUSED_ENTRY(241), 877 I40E_PTT_UNUSED_ENTRY(242), 878 I40E_PTT_UNUSED_ENTRY(243), 879 I40E_PTT_UNUSED_ENTRY(244), 880 I40E_PTT_UNUSED_ENTRY(245), 881 I40E_PTT_UNUSED_ENTRY(246), 882 I40E_PTT_UNUSED_ENTRY(247), 883 I40E_PTT_UNUSED_ENTRY(248), 884 I40E_PTT_UNUSED_ENTRY(249), 885 886 I40E_PTT_UNUSED_ENTRY(250), 887 I40E_PTT_UNUSED_ENTRY(251), 888 I40E_PTT_UNUSED_ENTRY(252), 889 I40E_PTT_UNUSED_ENTRY(253), 890 I40E_PTT_UNUSED_ENTRY(254), 891 I40E_PTT_UNUSED_ENTRY(255) 892 }; 893 894 /** 895 * i40e_init_shared_code - Initialize the shared code 896 * @hw: pointer to hardware structure 897 * 898 * This assigns the MAC type and PHY code and inits the NVM. 899 * Does not touch the hardware. This function must be called prior to any 900 * other function in the shared code. The i40e_hw structure should be 901 * memset to 0 prior to calling this function. The following fields in 902 * hw structure should be filled in prior to calling this function: 903 * hw_addr, back, device_id, vendor_id, subsystem_device_id, 904 * subsystem_vendor_id, and revision_id 905 **/ 906 i40e_status i40e_init_shared_code(struct i40e_hw *hw) 907 { 908 i40e_status status = 0; 909 u32 port, ari, func_rid; 910 911 i40e_set_mac_type(hw); 912 913 switch (hw->mac.type) { 914 case I40E_MAC_XL710: 915 case I40E_MAC_X722: 916 break; 917 default: 918 return I40E_ERR_DEVICE_NOT_SUPPORTED; 919 } 920 921 hw->phy.get_link_info = true; 922 923 /* Determine port number and PF number*/ 924 port = (rd32(hw, I40E_PFGEN_PORTNUM) & I40E_PFGEN_PORTNUM_PORT_NUM_MASK) 925 >> I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT; 926 hw->port = (u8)port; 927 ari = (rd32(hw, I40E_GLPCI_CAPSUP) & I40E_GLPCI_CAPSUP_ARI_EN_MASK) >> 928 I40E_GLPCI_CAPSUP_ARI_EN_SHIFT; 929 func_rid = rd32(hw, I40E_PF_FUNC_RID); 930 if (ari) 931 hw->pf_id = (u8)(func_rid & 0xff); 932 else 933 hw->pf_id = (u8)(func_rid & 0x7); 934 935 if (hw->mac.type == I40E_MAC_X722) 936 hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | 937 I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; 938 939 status = i40e_init_nvm(hw); 940 return status; 941 } 942 943 /** 944 * i40e_aq_mac_address_read - Retrieve the MAC addresses 945 * @hw: pointer to the hw struct 946 * @flags: a return indicator of what addresses were added to the addr store 947 * @addrs: the requestor's mac addr store 948 * @cmd_details: pointer to command details structure or NULL 949 **/ 950 static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw, 951 u16 *flags, 952 struct i40e_aqc_mac_address_read_data *addrs, 953 struct i40e_asq_cmd_details *cmd_details) 954 { 955 struct i40e_aq_desc desc; 956 struct i40e_aqc_mac_address_read *cmd_data = 957 (struct i40e_aqc_mac_address_read *)&desc.params.raw; 958 i40e_status status; 959 960 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read); 961 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF); 962 963 status = i40e_asq_send_command(hw, &desc, addrs, 964 sizeof(*addrs), cmd_details); 965 *flags = le16_to_cpu(cmd_data->command_flags); 966 967 return status; 968 } 969 970 /** 971 * i40e_aq_mac_address_write - Change the MAC addresses 972 * @hw: pointer to the hw struct 973 * @flags: indicates which MAC to be written 974 * @mac_addr: address to write 975 * @cmd_details: pointer to command details structure or NULL 976 **/ 977 i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw, 978 u16 flags, u8 *mac_addr, 979 struct i40e_asq_cmd_details *cmd_details) 980 { 981 struct i40e_aq_desc desc; 982 struct i40e_aqc_mac_address_write *cmd_data = 983 (struct i40e_aqc_mac_address_write *)&desc.params.raw; 984 i40e_status status; 985 986 i40e_fill_default_direct_cmd_desc(&desc, 987 i40e_aqc_opc_mac_address_write); 988 cmd_data->command_flags = cpu_to_le16(flags); 989 cmd_data->mac_sah = cpu_to_le16((u16)mac_addr[0] << 8 | mac_addr[1]); 990 cmd_data->mac_sal = cpu_to_le32(((u32)mac_addr[2] << 24) | 991 ((u32)mac_addr[3] << 16) | 992 ((u32)mac_addr[4] << 8) | 993 mac_addr[5]); 994 995 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 996 997 return status; 998 } 999 1000 /** 1001 * i40e_get_mac_addr - get MAC address 1002 * @hw: pointer to the HW structure 1003 * @mac_addr: pointer to MAC address 1004 * 1005 * Reads the adapter's MAC address from register 1006 **/ 1007 i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1008 { 1009 struct i40e_aqc_mac_address_read_data addrs; 1010 i40e_status status; 1011 u16 flags = 0; 1012 1013 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1014 1015 if (flags & I40E_AQC_LAN_ADDR_VALID) 1016 ether_addr_copy(mac_addr, addrs.pf_lan_mac); 1017 1018 return status; 1019 } 1020 1021 /** 1022 * i40e_get_port_mac_addr - get Port MAC address 1023 * @hw: pointer to the HW structure 1024 * @mac_addr: pointer to Port MAC address 1025 * 1026 * Reads the adapter's Port MAC address 1027 **/ 1028 i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr) 1029 { 1030 struct i40e_aqc_mac_address_read_data addrs; 1031 i40e_status status; 1032 u16 flags = 0; 1033 1034 status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); 1035 if (status) 1036 return status; 1037 1038 if (flags & I40E_AQC_PORT_ADDR_VALID) 1039 ether_addr_copy(mac_addr, addrs.port_mac); 1040 else 1041 status = I40E_ERR_INVALID_MAC_ADDR; 1042 1043 return status; 1044 } 1045 1046 /** 1047 * i40e_pre_tx_queue_cfg - pre tx queue configure 1048 * @hw: pointer to the HW structure 1049 * @queue: target PF queue index 1050 * @enable: state change request 1051 * 1052 * Handles hw requirement to indicate intention to enable 1053 * or disable target queue. 1054 **/ 1055 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) 1056 { 1057 u32 abs_queue_idx = hw->func_caps.base_queue + queue; 1058 u32 reg_block = 0; 1059 u32 reg_val; 1060 1061 if (abs_queue_idx >= 128) { 1062 reg_block = abs_queue_idx / 128; 1063 abs_queue_idx %= 128; 1064 } 1065 1066 reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1067 reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1068 reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1069 1070 if (enable) 1071 reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK; 1072 else 1073 reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1074 1075 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val); 1076 } 1077 1078 /** 1079 * i40e_read_pba_string - Reads part number string from EEPROM 1080 * @hw: pointer to hardware structure 1081 * @pba_num: stores the part number string from the EEPROM 1082 * @pba_num_size: part number string buffer length 1083 * 1084 * Reads the part number string from the EEPROM. 1085 **/ 1086 i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, 1087 u32 pba_num_size) 1088 { 1089 i40e_status status = 0; 1090 u16 pba_word = 0; 1091 u16 pba_size = 0; 1092 u16 pba_ptr = 0; 1093 u16 i = 0; 1094 1095 status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word); 1096 if (status || (pba_word != 0xFAFA)) { 1097 hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n"); 1098 return status; 1099 } 1100 1101 status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr); 1102 if (status) { 1103 hw_dbg(hw, "Failed to read PBA Block pointer.\n"); 1104 return status; 1105 } 1106 1107 status = i40e_read_nvm_word(hw, pba_ptr, &pba_size); 1108 if (status) { 1109 hw_dbg(hw, "Failed to read PBA Block size.\n"); 1110 return status; 1111 } 1112 1113 /* Subtract one to get PBA word count (PBA Size word is included in 1114 * total size) 1115 */ 1116 pba_size--; 1117 if (pba_num_size < (((u32)pba_size * 2) + 1)) { 1118 hw_dbg(hw, "Buffer to small for PBA data.\n"); 1119 return I40E_ERR_PARAM; 1120 } 1121 1122 for (i = 0; i < pba_size; i++) { 1123 status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word); 1124 if (status) { 1125 hw_dbg(hw, "Failed to read PBA Block word %d.\n", i); 1126 return status; 1127 } 1128 1129 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF; 1130 pba_num[(i * 2) + 1] = pba_word & 0xFF; 1131 } 1132 pba_num[(pba_size * 2)] = '\0'; 1133 1134 return status; 1135 } 1136 1137 /** 1138 * i40e_get_media_type - Gets media type 1139 * @hw: pointer to the hardware structure 1140 **/ 1141 static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) 1142 { 1143 enum i40e_media_type media; 1144 1145 switch (hw->phy.link_info.phy_type) { 1146 case I40E_PHY_TYPE_10GBASE_SR: 1147 case I40E_PHY_TYPE_10GBASE_LR: 1148 case I40E_PHY_TYPE_1000BASE_SX: 1149 case I40E_PHY_TYPE_1000BASE_LX: 1150 case I40E_PHY_TYPE_40GBASE_SR4: 1151 case I40E_PHY_TYPE_40GBASE_LR4: 1152 case I40E_PHY_TYPE_25GBASE_LR: 1153 case I40E_PHY_TYPE_25GBASE_SR: 1154 media = I40E_MEDIA_TYPE_FIBER; 1155 break; 1156 case I40E_PHY_TYPE_100BASE_TX: 1157 case I40E_PHY_TYPE_1000BASE_T: 1158 case I40E_PHY_TYPE_2_5GBASE_T: 1159 case I40E_PHY_TYPE_5GBASE_T: 1160 case I40E_PHY_TYPE_10GBASE_T: 1161 media = I40E_MEDIA_TYPE_BASET; 1162 break; 1163 case I40E_PHY_TYPE_10GBASE_CR1_CU: 1164 case I40E_PHY_TYPE_40GBASE_CR4_CU: 1165 case I40E_PHY_TYPE_10GBASE_CR1: 1166 case I40E_PHY_TYPE_40GBASE_CR4: 1167 case I40E_PHY_TYPE_10GBASE_SFPP_CU: 1168 case I40E_PHY_TYPE_40GBASE_AOC: 1169 case I40E_PHY_TYPE_10GBASE_AOC: 1170 case I40E_PHY_TYPE_25GBASE_CR: 1171 case I40E_PHY_TYPE_25GBASE_AOC: 1172 case I40E_PHY_TYPE_25GBASE_ACC: 1173 media = I40E_MEDIA_TYPE_DA; 1174 break; 1175 case I40E_PHY_TYPE_1000BASE_KX: 1176 case I40E_PHY_TYPE_10GBASE_KX4: 1177 case I40E_PHY_TYPE_10GBASE_KR: 1178 case I40E_PHY_TYPE_40GBASE_KR4: 1179 case I40E_PHY_TYPE_20GBASE_KR2: 1180 case I40E_PHY_TYPE_25GBASE_KR: 1181 media = I40E_MEDIA_TYPE_BACKPLANE; 1182 break; 1183 case I40E_PHY_TYPE_SGMII: 1184 case I40E_PHY_TYPE_XAUI: 1185 case I40E_PHY_TYPE_XFI: 1186 case I40E_PHY_TYPE_XLAUI: 1187 case I40E_PHY_TYPE_XLPPI: 1188 default: 1189 media = I40E_MEDIA_TYPE_UNKNOWN; 1190 break; 1191 } 1192 1193 return media; 1194 } 1195 1196 /** 1197 * i40e_poll_globr - Poll for Global Reset completion 1198 * @hw: pointer to the hardware structure 1199 * @retry_limit: how many times to retry before failure 1200 **/ 1201 static i40e_status i40e_poll_globr(struct i40e_hw *hw, 1202 u32 retry_limit) 1203 { 1204 u32 cnt, reg = 0; 1205 1206 for (cnt = 0; cnt < retry_limit; cnt++) { 1207 reg = rd32(hw, I40E_GLGEN_RSTAT); 1208 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1209 return 0; 1210 msleep(100); 1211 } 1212 1213 hw_dbg(hw, "Global reset failed.\n"); 1214 hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg); 1215 1216 return I40E_ERR_RESET_FAILED; 1217 } 1218 1219 #define I40E_PF_RESET_WAIT_COUNT_A0 200 1220 #define I40E_PF_RESET_WAIT_COUNT 200 1221 /** 1222 * i40e_pf_reset - Reset the PF 1223 * @hw: pointer to the hardware structure 1224 * 1225 * Assuming someone else has triggered a global reset, 1226 * assure the global reset is complete and then reset the PF 1227 **/ 1228 i40e_status i40e_pf_reset(struct i40e_hw *hw) 1229 { 1230 u32 cnt = 0; 1231 u32 cnt1 = 0; 1232 u32 reg = 0; 1233 u32 grst_del; 1234 1235 /* Poll for Global Reset steady state in case of recent GRST. 1236 * The grst delay value is in 100ms units, and we'll wait a 1237 * couple counts longer to be sure we don't just miss the end. 1238 */ 1239 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & 1240 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> 1241 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 1242 1243 /* It can take upto 15 secs for GRST steady state. 1244 * Bump it to 16 secs max to be safe. 1245 */ 1246 grst_del = grst_del * 20; 1247 1248 for (cnt = 0; cnt < grst_del; cnt++) { 1249 reg = rd32(hw, I40E_GLGEN_RSTAT); 1250 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 1251 break; 1252 msleep(100); 1253 } 1254 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1255 hw_dbg(hw, "Global reset polling failed to complete.\n"); 1256 return I40E_ERR_RESET_FAILED; 1257 } 1258 1259 /* Now Wait for the FW to be ready */ 1260 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) { 1261 reg = rd32(hw, I40E_GLNVM_ULD); 1262 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1263 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK); 1264 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1265 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK)) { 1266 hw_dbg(hw, "Core and Global modules ready %d\n", cnt1); 1267 break; 1268 } 1269 usleep_range(10000, 20000); 1270 } 1271 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK | 1272 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) { 1273 hw_dbg(hw, "wait for FW Reset complete timedout\n"); 1274 hw_dbg(hw, "I40E_GLNVM_ULD = 0x%x\n", reg); 1275 return I40E_ERR_RESET_FAILED; 1276 } 1277 1278 /* If there was a Global Reset in progress when we got here, 1279 * we don't need to do the PF Reset 1280 */ 1281 if (!cnt) { 1282 u32 reg2 = 0; 1283 if (hw->revision_id == 0) 1284 cnt = I40E_PF_RESET_WAIT_COUNT_A0; 1285 else 1286 cnt = I40E_PF_RESET_WAIT_COUNT; 1287 reg = rd32(hw, I40E_PFGEN_CTRL); 1288 wr32(hw, I40E_PFGEN_CTRL, 1289 (reg | I40E_PFGEN_CTRL_PFSWR_MASK)); 1290 for (; cnt; cnt--) { 1291 reg = rd32(hw, I40E_PFGEN_CTRL); 1292 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) 1293 break; 1294 reg2 = rd32(hw, I40E_GLGEN_RSTAT); 1295 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) 1296 break; 1297 usleep_range(1000, 2000); 1298 } 1299 if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { 1300 if (i40e_poll_globr(hw, grst_del)) 1301 return I40E_ERR_RESET_FAILED; 1302 } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { 1303 hw_dbg(hw, "PF reset polling failed to complete.\n"); 1304 return I40E_ERR_RESET_FAILED; 1305 } 1306 } 1307 1308 i40e_clear_pxe_mode(hw); 1309 1310 return 0; 1311 } 1312 1313 /** 1314 * i40e_clear_hw - clear out any left over hw state 1315 * @hw: pointer to the hw struct 1316 * 1317 * Clear queues and interrupts, typically called at init time, 1318 * but after the capabilities have been found so we know how many 1319 * queues and msix vectors have been allocated. 1320 **/ 1321 void i40e_clear_hw(struct i40e_hw *hw) 1322 { 1323 u32 num_queues, base_queue; 1324 u32 num_pf_int; 1325 u32 num_vf_int; 1326 u32 num_vfs; 1327 u32 i, j; 1328 u32 val; 1329 u32 eol = 0x7ff; 1330 1331 /* get number of interrupts, queues, and VFs */ 1332 val = rd32(hw, I40E_GLPCI_CNF2); 1333 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1334 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1335 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> 1336 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; 1337 1338 val = rd32(hw, I40E_PFLAN_QALLOC); 1339 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> 1340 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; 1341 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> 1342 I40E_PFLAN_QALLOC_LASTQ_SHIFT; 1343 if (val & I40E_PFLAN_QALLOC_VALID_MASK) 1344 num_queues = (j - base_queue) + 1; 1345 else 1346 num_queues = 0; 1347 1348 val = rd32(hw, I40E_PF_VT_PFALLOC); 1349 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >> 1350 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT; 1351 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >> 1352 I40E_PF_VT_PFALLOC_LASTVF_SHIFT; 1353 if (val & I40E_PF_VT_PFALLOC_VALID_MASK) 1354 num_vfs = (j - i) + 1; 1355 else 1356 num_vfs = 0; 1357 1358 /* stop all the interrupts */ 1359 wr32(hw, I40E_PFINT_ICR0_ENA, 0); 1360 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; 1361 for (i = 0; i < num_pf_int - 2; i++) 1362 wr32(hw, I40E_PFINT_DYN_CTLN(i), val); 1363 1364 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */ 1365 val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1366 wr32(hw, I40E_PFINT_LNKLST0, val); 1367 for (i = 0; i < num_pf_int - 2; i++) 1368 wr32(hw, I40E_PFINT_LNKLSTN(i), val); 1369 val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT; 1370 for (i = 0; i < num_vfs; i++) 1371 wr32(hw, I40E_VPINT_LNKLST0(i), val); 1372 for (i = 0; i < num_vf_int - 2; i++) 1373 wr32(hw, I40E_VPINT_LNKLSTN(i), val); 1374 1375 /* warn the HW of the coming Tx disables */ 1376 for (i = 0; i < num_queues; i++) { 1377 u32 abs_queue_idx = base_queue + i; 1378 u32 reg_block = 0; 1379 1380 if (abs_queue_idx >= 128) { 1381 reg_block = abs_queue_idx / 128; 1382 abs_queue_idx %= 128; 1383 } 1384 1385 val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block)); 1386 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK; 1387 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT); 1388 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK; 1389 1390 wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val); 1391 } 1392 udelay(400); 1393 1394 /* stop all the queues */ 1395 for (i = 0; i < num_queues; i++) { 1396 wr32(hw, I40E_QINT_TQCTL(i), 0); 1397 wr32(hw, I40E_QTX_ENA(i), 0); 1398 wr32(hw, I40E_QINT_RQCTL(i), 0); 1399 wr32(hw, I40E_QRX_ENA(i), 0); 1400 } 1401 1402 /* short wait for all queue disables to settle */ 1403 udelay(50); 1404 } 1405 1406 /** 1407 * i40e_clear_pxe_mode - clear pxe operations mode 1408 * @hw: pointer to the hw struct 1409 * 1410 * Make sure all PXE mode settings are cleared, including things 1411 * like descriptor fetch/write-back mode. 1412 **/ 1413 void i40e_clear_pxe_mode(struct i40e_hw *hw) 1414 { 1415 u32 reg; 1416 1417 if (i40e_check_asq_alive(hw)) 1418 i40e_aq_clear_pxe_mode(hw, NULL); 1419 1420 /* Clear single descriptor fetch/write-back mode */ 1421 reg = rd32(hw, I40E_GLLAN_RCTL_0); 1422 1423 if (hw->revision_id == 0) { 1424 /* As a work around clear PXE_MODE instead of setting it */ 1425 wr32(hw, I40E_GLLAN_RCTL_0, (reg & (~I40E_GLLAN_RCTL_0_PXE_MODE_MASK))); 1426 } else { 1427 wr32(hw, I40E_GLLAN_RCTL_0, (reg | I40E_GLLAN_RCTL_0_PXE_MODE_MASK)); 1428 } 1429 } 1430 1431 /** 1432 * i40e_led_is_mine - helper to find matching led 1433 * @hw: pointer to the hw struct 1434 * @idx: index into GPIO registers 1435 * 1436 * returns: 0 if no match, otherwise the value of the GPIO_CTL register 1437 */ 1438 static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx) 1439 { 1440 u32 gpio_val = 0; 1441 u32 port; 1442 1443 if (!hw->func_caps.led[idx]) 1444 return 0; 1445 1446 gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx)); 1447 port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >> 1448 I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT; 1449 1450 /* if PRT_NUM_NA is 1 then this LED is not port specific, OR 1451 * if it is not our port then ignore 1452 */ 1453 if ((gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK) || 1454 (port != hw->port)) 1455 return 0; 1456 1457 return gpio_val; 1458 } 1459 1460 #define I40E_COMBINED_ACTIVITY 0xA 1461 #define I40E_FILTER_ACTIVITY 0xE 1462 #define I40E_LINK_ACTIVITY 0xC 1463 #define I40E_MAC_ACTIVITY 0xD 1464 #define I40E_LED0 22 1465 1466 /** 1467 * i40e_led_get - return current on/off mode 1468 * @hw: pointer to the hw struct 1469 * 1470 * The value returned is the 'mode' field as defined in the 1471 * GPIO register definitions: 0x0 = off, 0xf = on, and other 1472 * values are variations of possible behaviors relating to 1473 * blink, link, and wire. 1474 **/ 1475 u32 i40e_led_get(struct i40e_hw *hw) 1476 { 1477 u32 mode = 0; 1478 int i; 1479 1480 /* as per the documentation GPIO 22-29 are the LED 1481 * GPIO pins named LED0..LED7 1482 */ 1483 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1484 u32 gpio_val = i40e_led_is_mine(hw, i); 1485 1486 if (!gpio_val) 1487 continue; 1488 1489 mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >> 1490 I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT; 1491 break; 1492 } 1493 1494 return mode; 1495 } 1496 1497 /** 1498 * i40e_led_set - set new on/off mode 1499 * @hw: pointer to the hw struct 1500 * @mode: 0=off, 0xf=on (else see manual for mode details) 1501 * @blink: true if the LED should blink when on, false if steady 1502 * 1503 * if this function is used to turn on the blink it should 1504 * be used to disable the blink when restoring the original state. 1505 **/ 1506 void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) 1507 { 1508 int i; 1509 1510 if (mode & 0xfffffff0) 1511 hw_dbg(hw, "invalid mode passed in %X\n", mode); 1512 1513 /* as per the documentation GPIO 22-29 are the LED 1514 * GPIO pins named LED0..LED7 1515 */ 1516 for (i = I40E_LED0; i <= I40E_GLGEN_GPIO_CTL_MAX_INDEX; i++) { 1517 u32 gpio_val = i40e_led_is_mine(hw, i); 1518 1519 if (!gpio_val) 1520 continue; 1521 gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK; 1522 /* this & is a bit of paranoia, but serves as a range check */ 1523 gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & 1524 I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); 1525 1526 if (blink) 1527 gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1528 else 1529 gpio_val &= ~BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); 1530 1531 wr32(hw, I40E_GLGEN_GPIO_CTL(i), gpio_val); 1532 break; 1533 } 1534 } 1535 1536 /* Admin command wrappers */ 1537 1538 /** 1539 * i40e_aq_get_phy_capabilities 1540 * @hw: pointer to the hw struct 1541 * @abilities: structure for PHY capabilities to be filled 1542 * @qualified_modules: report Qualified Modules 1543 * @report_init: report init capabilities (active are default) 1544 * @cmd_details: pointer to command details structure or NULL 1545 * 1546 * Returns the various PHY abilities supported on the Port. 1547 **/ 1548 i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, 1549 bool qualified_modules, bool report_init, 1550 struct i40e_aq_get_phy_abilities_resp *abilities, 1551 struct i40e_asq_cmd_details *cmd_details) 1552 { 1553 struct i40e_aq_desc desc; 1554 i40e_status status; 1555 u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); 1556 u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; 1557 1558 if (!abilities) 1559 return I40E_ERR_PARAM; 1560 1561 do { 1562 i40e_fill_default_direct_cmd_desc(&desc, 1563 i40e_aqc_opc_get_phy_abilities); 1564 1565 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 1566 if (abilities_size > I40E_AQ_LARGE_BUF) 1567 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 1568 1569 if (qualified_modules) 1570 desc.params.external.param0 |= 1571 cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); 1572 1573 if (report_init) 1574 desc.params.external.param0 |= 1575 cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); 1576 1577 status = i40e_asq_send_command(hw, &desc, abilities, 1578 abilities_size, cmd_details); 1579 1580 if (status) 1581 break; 1582 1583 if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { 1584 status = I40E_ERR_UNKNOWN_PHY; 1585 break; 1586 } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { 1587 usleep_range(1000, 2000); 1588 total_delay++; 1589 status = I40E_ERR_TIMEOUT; 1590 } 1591 } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && 1592 (total_delay < max_delay)); 1593 1594 if (status) 1595 return status; 1596 1597 if (report_init) { 1598 if (hw->mac.type == I40E_MAC_XL710 && 1599 hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1600 hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { 1601 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 1602 } else { 1603 hw->phy.phy_types = le32_to_cpu(abilities->phy_type); 1604 hw->phy.phy_types |= 1605 ((u64)abilities->phy_type_ext << 32); 1606 } 1607 } 1608 1609 return status; 1610 } 1611 1612 /** 1613 * i40e_aq_set_phy_config 1614 * @hw: pointer to the hw struct 1615 * @config: structure with PHY configuration to be set 1616 * @cmd_details: pointer to command details structure or NULL 1617 * 1618 * Set the various PHY configuration parameters 1619 * supported on the Port.One or more of the Set PHY config parameters may be 1620 * ignored in an MFP mode as the PF may not have the privilege to set some 1621 * of the PHY Config parameters. This status will be indicated by the 1622 * command response. 1623 **/ 1624 enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, 1625 struct i40e_aq_set_phy_config *config, 1626 struct i40e_asq_cmd_details *cmd_details) 1627 { 1628 struct i40e_aq_desc desc; 1629 struct i40e_aq_set_phy_config *cmd = 1630 (struct i40e_aq_set_phy_config *)&desc.params.raw; 1631 enum i40e_status_code status; 1632 1633 if (!config) 1634 return I40E_ERR_PARAM; 1635 1636 i40e_fill_default_direct_cmd_desc(&desc, 1637 i40e_aqc_opc_set_phy_config); 1638 1639 *cmd = *config; 1640 1641 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1642 1643 return status; 1644 } 1645 1646 /** 1647 * i40e_set_fc 1648 * @hw: pointer to the hw struct 1649 * @aq_failures: buffer to return AdminQ failure information 1650 * @atomic_restart: whether to enable atomic link restart 1651 * 1652 * Set the requested flow control mode using set_phy_config. 1653 **/ 1654 enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures, 1655 bool atomic_restart) 1656 { 1657 enum i40e_fc_mode fc_mode = hw->fc.requested_mode; 1658 struct i40e_aq_get_phy_abilities_resp abilities; 1659 struct i40e_aq_set_phy_config config; 1660 enum i40e_status_code status; 1661 u8 pause_mask = 0x0; 1662 1663 *aq_failures = 0x0; 1664 1665 switch (fc_mode) { 1666 case I40E_FC_FULL: 1667 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1668 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1669 break; 1670 case I40E_FC_RX_PAUSE: 1671 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX; 1672 break; 1673 case I40E_FC_TX_PAUSE: 1674 pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX; 1675 break; 1676 default: 1677 break; 1678 } 1679 1680 /* Get the current phy config */ 1681 status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, 1682 NULL); 1683 if (status) { 1684 *aq_failures |= I40E_SET_FC_AQ_FAIL_GET; 1685 return status; 1686 } 1687 1688 memset(&config, 0, sizeof(struct i40e_aq_set_phy_config)); 1689 /* clear the old pause settings */ 1690 config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) & 1691 ~(I40E_AQ_PHY_FLAG_PAUSE_RX); 1692 /* set the new abilities */ 1693 config.abilities |= pause_mask; 1694 /* If the abilities have changed, then set the new config */ 1695 if (config.abilities != abilities.abilities) { 1696 /* Auto restart link so settings take effect */ 1697 if (atomic_restart) 1698 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; 1699 /* Copy over all the old settings */ 1700 config.phy_type = abilities.phy_type; 1701 config.phy_type_ext = abilities.phy_type_ext; 1702 config.link_speed = abilities.link_speed; 1703 config.eee_capability = abilities.eee_capability; 1704 config.eeer = abilities.eeer_val; 1705 config.low_power_ctrl = abilities.d3_lpan; 1706 config.fec_config = abilities.fec_cfg_curr_mod_ext_info & 1707 I40E_AQ_PHY_FEC_CONFIG_MASK; 1708 status = i40e_aq_set_phy_config(hw, &config, NULL); 1709 1710 if (status) 1711 *aq_failures |= I40E_SET_FC_AQ_FAIL_SET; 1712 } 1713 /* Update the link info */ 1714 status = i40e_update_link_info(hw); 1715 if (status) { 1716 /* Wait a little bit (on 40G cards it sometimes takes a really 1717 * long time for link to come back from the atomic reset) 1718 * and try once more 1719 */ 1720 msleep(1000); 1721 status = i40e_update_link_info(hw); 1722 } 1723 if (status) 1724 *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE; 1725 1726 return status; 1727 } 1728 1729 /** 1730 * i40e_aq_clear_pxe_mode 1731 * @hw: pointer to the hw struct 1732 * @cmd_details: pointer to command details structure or NULL 1733 * 1734 * Tell the firmware that the driver is taking over from PXE 1735 **/ 1736 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw, 1737 struct i40e_asq_cmd_details *cmd_details) 1738 { 1739 i40e_status status; 1740 struct i40e_aq_desc desc; 1741 struct i40e_aqc_clear_pxe *cmd = 1742 (struct i40e_aqc_clear_pxe *)&desc.params.raw; 1743 1744 i40e_fill_default_direct_cmd_desc(&desc, 1745 i40e_aqc_opc_clear_pxe_mode); 1746 1747 cmd->rx_cnt = 0x2; 1748 1749 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1750 1751 wr32(hw, I40E_GLLAN_RCTL_0, 0x1); 1752 1753 return status; 1754 } 1755 1756 /** 1757 * i40e_aq_set_link_restart_an 1758 * @hw: pointer to the hw struct 1759 * @enable_link: if true: enable link, if false: disable link 1760 * @cmd_details: pointer to command details structure or NULL 1761 * 1762 * Sets up the link and restarts the Auto-Negotiation over the link. 1763 **/ 1764 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw, 1765 bool enable_link, 1766 struct i40e_asq_cmd_details *cmd_details) 1767 { 1768 struct i40e_aq_desc desc; 1769 struct i40e_aqc_set_link_restart_an *cmd = 1770 (struct i40e_aqc_set_link_restart_an *)&desc.params.raw; 1771 i40e_status status; 1772 1773 i40e_fill_default_direct_cmd_desc(&desc, 1774 i40e_aqc_opc_set_link_restart_an); 1775 1776 cmd->command = I40E_AQ_PHY_RESTART_AN; 1777 if (enable_link) 1778 cmd->command |= I40E_AQ_PHY_LINK_ENABLE; 1779 else 1780 cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE; 1781 1782 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1783 1784 return status; 1785 } 1786 1787 /** 1788 * i40e_aq_get_link_info 1789 * @hw: pointer to the hw struct 1790 * @enable_lse: enable/disable LinkStatusEvent reporting 1791 * @link: pointer to link status structure - optional 1792 * @cmd_details: pointer to command details structure or NULL 1793 * 1794 * Returns the link status of the adapter. 1795 **/ 1796 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, 1797 bool enable_lse, struct i40e_link_status *link, 1798 struct i40e_asq_cmd_details *cmd_details) 1799 { 1800 struct i40e_aq_desc desc; 1801 struct i40e_aqc_get_link_status *resp = 1802 (struct i40e_aqc_get_link_status *)&desc.params.raw; 1803 struct i40e_link_status *hw_link_info = &hw->phy.link_info; 1804 i40e_status status; 1805 bool tx_pause, rx_pause; 1806 u16 command_flags; 1807 1808 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status); 1809 1810 if (enable_lse) 1811 command_flags = I40E_AQ_LSE_ENABLE; 1812 else 1813 command_flags = I40E_AQ_LSE_DISABLE; 1814 resp->command_flags = cpu_to_le16(command_flags); 1815 1816 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1817 1818 if (status) 1819 goto aq_get_link_info_exit; 1820 1821 /* save off old link status information */ 1822 hw->phy.link_info_old = *hw_link_info; 1823 1824 /* update link status */ 1825 hw_link_info->phy_type = (enum i40e_aq_phy_type)resp->phy_type; 1826 hw->phy.media_type = i40e_get_media_type(hw); 1827 hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed; 1828 hw_link_info->link_info = resp->link_info; 1829 hw_link_info->an_info = resp->an_info; 1830 hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | 1831 I40E_AQ_CONFIG_FEC_RS_ENA); 1832 hw_link_info->ext_info = resp->ext_info; 1833 hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; 1834 hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); 1835 hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; 1836 1837 /* update fc info */ 1838 tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX); 1839 rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX); 1840 if (tx_pause & rx_pause) 1841 hw->fc.current_mode = I40E_FC_FULL; 1842 else if (tx_pause) 1843 hw->fc.current_mode = I40E_FC_TX_PAUSE; 1844 else if (rx_pause) 1845 hw->fc.current_mode = I40E_FC_RX_PAUSE; 1846 else 1847 hw->fc.current_mode = I40E_FC_NONE; 1848 1849 if (resp->config & I40E_AQ_CONFIG_CRC_ENA) 1850 hw_link_info->crc_enable = true; 1851 else 1852 hw_link_info->crc_enable = false; 1853 1854 if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_IS_ENABLED)) 1855 hw_link_info->lse_enable = true; 1856 else 1857 hw_link_info->lse_enable = false; 1858 1859 if ((hw->mac.type == I40E_MAC_XL710) && 1860 (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 && 1861 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1862 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1863 1864 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1865 hw->aq.api_min_ver >= 7) { 1866 __le32 tmp; 1867 1868 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1869 hw->phy.phy_types = le32_to_cpu(tmp); 1870 hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); 1871 } 1872 1873 /* save link status information */ 1874 if (link) 1875 *link = *hw_link_info; 1876 1877 /* flag cleared so helper functions don't call AQ again */ 1878 hw->phy.get_link_info = false; 1879 1880 aq_get_link_info_exit: 1881 return status; 1882 } 1883 1884 /** 1885 * i40e_aq_set_phy_int_mask 1886 * @hw: pointer to the hw struct 1887 * @mask: interrupt mask to be set 1888 * @cmd_details: pointer to command details structure or NULL 1889 * 1890 * Set link interrupt mask. 1891 **/ 1892 i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, 1893 u16 mask, 1894 struct i40e_asq_cmd_details *cmd_details) 1895 { 1896 struct i40e_aq_desc desc; 1897 struct i40e_aqc_set_phy_int_mask *cmd = 1898 (struct i40e_aqc_set_phy_int_mask *)&desc.params.raw; 1899 i40e_status status; 1900 1901 i40e_fill_default_direct_cmd_desc(&desc, 1902 i40e_aqc_opc_set_phy_int_mask); 1903 1904 cmd->event_mask = cpu_to_le16(mask); 1905 1906 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1907 1908 return status; 1909 } 1910 1911 /** 1912 * i40e_aq_set_phy_debug 1913 * @hw: pointer to the hw struct 1914 * @cmd_flags: debug command flags 1915 * @cmd_details: pointer to command details structure or NULL 1916 * 1917 * Reset the external PHY. 1918 **/ 1919 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, 1920 struct i40e_asq_cmd_details *cmd_details) 1921 { 1922 struct i40e_aq_desc desc; 1923 struct i40e_aqc_set_phy_debug *cmd = 1924 (struct i40e_aqc_set_phy_debug *)&desc.params.raw; 1925 i40e_status status; 1926 1927 i40e_fill_default_direct_cmd_desc(&desc, 1928 i40e_aqc_opc_set_phy_debug); 1929 1930 cmd->command_flags = cmd_flags; 1931 1932 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 1933 1934 return status; 1935 } 1936 1937 /** 1938 * i40e_aq_add_vsi 1939 * @hw: pointer to the hw struct 1940 * @vsi_ctx: pointer to a vsi context struct 1941 * @cmd_details: pointer to command details structure or NULL 1942 * 1943 * Add a VSI context to the hardware. 1944 **/ 1945 i40e_status i40e_aq_add_vsi(struct i40e_hw *hw, 1946 struct i40e_vsi_context *vsi_ctx, 1947 struct i40e_asq_cmd_details *cmd_details) 1948 { 1949 struct i40e_aq_desc desc; 1950 struct i40e_aqc_add_get_update_vsi *cmd = 1951 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 1952 struct i40e_aqc_add_get_update_vsi_completion *resp = 1953 (struct i40e_aqc_add_get_update_vsi_completion *) 1954 &desc.params.raw; 1955 i40e_status status; 1956 1957 i40e_fill_default_direct_cmd_desc(&desc, 1958 i40e_aqc_opc_add_vsi); 1959 1960 cmd->uplink_seid = cpu_to_le16(vsi_ctx->uplink_seid); 1961 cmd->connection_type = vsi_ctx->connection_type; 1962 cmd->vf_id = vsi_ctx->vf_num; 1963 cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags); 1964 1965 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 1966 1967 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 1968 sizeof(vsi_ctx->info), cmd_details); 1969 1970 if (status) 1971 goto aq_add_vsi_exit; 1972 1973 vsi_ctx->seid = le16_to_cpu(resp->seid); 1974 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 1975 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 1976 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 1977 1978 aq_add_vsi_exit: 1979 return status; 1980 } 1981 1982 /** 1983 * i40e_aq_set_default_vsi 1984 * @hw: pointer to the hw struct 1985 * @seid: vsi number 1986 * @cmd_details: pointer to command details structure or NULL 1987 **/ 1988 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, 1989 u16 seid, 1990 struct i40e_asq_cmd_details *cmd_details) 1991 { 1992 struct i40e_aq_desc desc; 1993 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 1994 (struct i40e_aqc_set_vsi_promiscuous_modes *) 1995 &desc.params.raw; 1996 i40e_status status; 1997 1998 i40e_fill_default_direct_cmd_desc(&desc, 1999 i40e_aqc_opc_set_vsi_promiscuous_modes); 2000 2001 cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2002 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2003 cmd->seid = cpu_to_le16(seid); 2004 2005 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2006 2007 return status; 2008 } 2009 2010 /** 2011 * i40e_aq_clear_default_vsi 2012 * @hw: pointer to the hw struct 2013 * @seid: vsi number 2014 * @cmd_details: pointer to command details structure or NULL 2015 **/ 2016 i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, 2017 u16 seid, 2018 struct i40e_asq_cmd_details *cmd_details) 2019 { 2020 struct i40e_aq_desc desc; 2021 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2022 (struct i40e_aqc_set_vsi_promiscuous_modes *) 2023 &desc.params.raw; 2024 i40e_status status; 2025 2026 i40e_fill_default_direct_cmd_desc(&desc, 2027 i40e_aqc_opc_set_vsi_promiscuous_modes); 2028 2029 cmd->promiscuous_flags = cpu_to_le16(0); 2030 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT); 2031 cmd->seid = cpu_to_le16(seid); 2032 2033 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2034 2035 return status; 2036 } 2037 2038 /** 2039 * i40e_aq_set_vsi_unicast_promiscuous 2040 * @hw: pointer to the hw struct 2041 * @seid: vsi number 2042 * @set: set unicast promiscuous enable/disable 2043 * @cmd_details: pointer to command details structure or NULL 2044 * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc 2045 **/ 2046 i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, 2047 u16 seid, bool set, 2048 struct i40e_asq_cmd_details *cmd_details, 2049 bool rx_only_promisc) 2050 { 2051 struct i40e_aq_desc desc; 2052 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2053 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2054 i40e_status status; 2055 u16 flags = 0; 2056 2057 i40e_fill_default_direct_cmd_desc(&desc, 2058 i40e_aqc_opc_set_vsi_promiscuous_modes); 2059 2060 if (set) { 2061 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2062 if (rx_only_promisc && 2063 (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || 2064 (hw->aq.api_maj_ver > 1))) 2065 flags |= I40E_AQC_SET_VSI_PROMISC_TX; 2066 } 2067 2068 cmd->promiscuous_flags = cpu_to_le16(flags); 2069 2070 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2071 if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || 2072 (hw->aq.api_maj_ver > 1)) 2073 cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); 2074 2075 cmd->seid = cpu_to_le16(seid); 2076 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2077 2078 return status; 2079 } 2080 2081 /** 2082 * i40e_aq_set_vsi_multicast_promiscuous 2083 * @hw: pointer to the hw struct 2084 * @seid: vsi number 2085 * @set: set multicast promiscuous enable/disable 2086 * @cmd_details: pointer to command details structure or NULL 2087 **/ 2088 i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, 2089 u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details) 2090 { 2091 struct i40e_aq_desc desc; 2092 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2093 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2094 i40e_status status; 2095 u16 flags = 0; 2096 2097 i40e_fill_default_direct_cmd_desc(&desc, 2098 i40e_aqc_opc_set_vsi_promiscuous_modes); 2099 2100 if (set) 2101 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2102 2103 cmd->promiscuous_flags = cpu_to_le16(flags); 2104 2105 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2106 2107 cmd->seid = cpu_to_le16(seid); 2108 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2109 2110 return status; 2111 } 2112 2113 /** 2114 * i40e_aq_set_vsi_mc_promisc_on_vlan 2115 * @hw: pointer to the hw struct 2116 * @seid: vsi number 2117 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2118 * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag 2119 * @cmd_details: pointer to command details structure or NULL 2120 **/ 2121 enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, 2122 u16 seid, bool enable, 2123 u16 vid, 2124 struct i40e_asq_cmd_details *cmd_details) 2125 { 2126 struct i40e_aq_desc desc; 2127 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2128 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2129 enum i40e_status_code status; 2130 u16 flags = 0; 2131 2132 i40e_fill_default_direct_cmd_desc(&desc, 2133 i40e_aqc_opc_set_vsi_promiscuous_modes); 2134 2135 if (enable) 2136 flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; 2137 2138 cmd->promiscuous_flags = cpu_to_le16(flags); 2139 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); 2140 cmd->seid = cpu_to_le16(seid); 2141 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2142 2143 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2144 2145 return status; 2146 } 2147 2148 /** 2149 * i40e_aq_set_vsi_uc_promisc_on_vlan 2150 * @hw: pointer to the hw struct 2151 * @seid: vsi number 2152 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2153 * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag 2154 * @cmd_details: pointer to command details structure or NULL 2155 **/ 2156 enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, 2157 u16 seid, bool enable, 2158 u16 vid, 2159 struct i40e_asq_cmd_details *cmd_details) 2160 { 2161 struct i40e_aq_desc desc; 2162 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2163 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2164 enum i40e_status_code status; 2165 u16 flags = 0; 2166 2167 i40e_fill_default_direct_cmd_desc(&desc, 2168 i40e_aqc_opc_set_vsi_promiscuous_modes); 2169 2170 if (enable) 2171 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; 2172 2173 cmd->promiscuous_flags = cpu_to_le16(flags); 2174 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); 2175 cmd->seid = cpu_to_le16(seid); 2176 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2177 2178 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2179 2180 return status; 2181 } 2182 2183 /** 2184 * i40e_aq_set_vsi_bc_promisc_on_vlan 2185 * @hw: pointer to the hw struct 2186 * @seid: vsi number 2187 * @enable: set broadcast promiscuous enable/disable for a given VLAN 2188 * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag 2189 * @cmd_details: pointer to command details structure or NULL 2190 **/ 2191 i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw, 2192 u16 seid, bool enable, u16 vid, 2193 struct i40e_asq_cmd_details *cmd_details) 2194 { 2195 struct i40e_aq_desc desc; 2196 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2197 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2198 i40e_status status; 2199 u16 flags = 0; 2200 2201 i40e_fill_default_direct_cmd_desc(&desc, 2202 i40e_aqc_opc_set_vsi_promiscuous_modes); 2203 2204 if (enable) 2205 flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST; 2206 2207 cmd->promiscuous_flags = cpu_to_le16(flags); 2208 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2209 cmd->seid = cpu_to_le16(seid); 2210 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); 2211 2212 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2213 2214 return status; 2215 } 2216 2217 /** 2218 * i40e_aq_set_vsi_broadcast 2219 * @hw: pointer to the hw struct 2220 * @seid: vsi number 2221 * @set_filter: true to set filter, false to clear filter 2222 * @cmd_details: pointer to command details structure or NULL 2223 * 2224 * Set or clear the broadcast promiscuous flag (filter) for a given VSI. 2225 **/ 2226 i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, 2227 u16 seid, bool set_filter, 2228 struct i40e_asq_cmd_details *cmd_details) 2229 { 2230 struct i40e_aq_desc desc; 2231 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2232 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2233 i40e_status status; 2234 2235 i40e_fill_default_direct_cmd_desc(&desc, 2236 i40e_aqc_opc_set_vsi_promiscuous_modes); 2237 2238 if (set_filter) 2239 cmd->promiscuous_flags 2240 |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2241 else 2242 cmd->promiscuous_flags 2243 &= cpu_to_le16(~I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2244 2245 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_BROADCAST); 2246 cmd->seid = cpu_to_le16(seid); 2247 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2248 2249 return status; 2250 } 2251 2252 /** 2253 * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting 2254 * @hw: pointer to the hw struct 2255 * @seid: vsi number 2256 * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN 2257 * @cmd_details: pointer to command details structure or NULL 2258 **/ 2259 i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, 2260 u16 seid, bool enable, 2261 struct i40e_asq_cmd_details *cmd_details) 2262 { 2263 struct i40e_aq_desc desc; 2264 struct i40e_aqc_set_vsi_promiscuous_modes *cmd = 2265 (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; 2266 i40e_status status; 2267 u16 flags = 0; 2268 2269 i40e_fill_default_direct_cmd_desc(&desc, 2270 i40e_aqc_opc_set_vsi_promiscuous_modes); 2271 if (enable) 2272 flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; 2273 2274 cmd->promiscuous_flags = cpu_to_le16(flags); 2275 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); 2276 cmd->seid = cpu_to_le16(seid); 2277 2278 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2279 2280 return status; 2281 } 2282 2283 /** 2284 * i40e_get_vsi_params - get VSI configuration info 2285 * @hw: pointer to the hw struct 2286 * @vsi_ctx: pointer to a vsi context struct 2287 * @cmd_details: pointer to command details structure or NULL 2288 **/ 2289 i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, 2290 struct i40e_vsi_context *vsi_ctx, 2291 struct i40e_asq_cmd_details *cmd_details) 2292 { 2293 struct i40e_aq_desc desc; 2294 struct i40e_aqc_add_get_update_vsi *cmd = 2295 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2296 struct i40e_aqc_add_get_update_vsi_completion *resp = 2297 (struct i40e_aqc_add_get_update_vsi_completion *) 2298 &desc.params.raw; 2299 i40e_status status; 2300 2301 i40e_fill_default_direct_cmd_desc(&desc, 2302 i40e_aqc_opc_get_vsi_parameters); 2303 2304 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2305 2306 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2307 2308 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2309 sizeof(vsi_ctx->info), NULL); 2310 2311 if (status) 2312 goto aq_get_vsi_params_exit; 2313 2314 vsi_ctx->seid = le16_to_cpu(resp->seid); 2315 vsi_ctx->vsi_number = le16_to_cpu(resp->vsi_number); 2316 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2317 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2318 2319 aq_get_vsi_params_exit: 2320 return status; 2321 } 2322 2323 /** 2324 * i40e_aq_update_vsi_params 2325 * @hw: pointer to the hw struct 2326 * @vsi_ctx: pointer to a vsi context struct 2327 * @cmd_details: pointer to command details structure or NULL 2328 * 2329 * Update a VSI context. 2330 **/ 2331 i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, 2332 struct i40e_vsi_context *vsi_ctx, 2333 struct i40e_asq_cmd_details *cmd_details) 2334 { 2335 struct i40e_aq_desc desc; 2336 struct i40e_aqc_add_get_update_vsi *cmd = 2337 (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; 2338 struct i40e_aqc_add_get_update_vsi_completion *resp = 2339 (struct i40e_aqc_add_get_update_vsi_completion *) 2340 &desc.params.raw; 2341 i40e_status status; 2342 2343 i40e_fill_default_direct_cmd_desc(&desc, 2344 i40e_aqc_opc_update_vsi_parameters); 2345 cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid); 2346 2347 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2348 2349 status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, 2350 sizeof(vsi_ctx->info), cmd_details); 2351 2352 vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); 2353 vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); 2354 2355 return status; 2356 } 2357 2358 /** 2359 * i40e_aq_get_switch_config 2360 * @hw: pointer to the hardware structure 2361 * @buf: pointer to the result buffer 2362 * @buf_size: length of input buffer 2363 * @start_seid: seid to start for the report, 0 == beginning 2364 * @cmd_details: pointer to command details structure or NULL 2365 * 2366 * Fill the buf with switch configuration returned from AdminQ command 2367 **/ 2368 i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, 2369 struct i40e_aqc_get_switch_config_resp *buf, 2370 u16 buf_size, u16 *start_seid, 2371 struct i40e_asq_cmd_details *cmd_details) 2372 { 2373 struct i40e_aq_desc desc; 2374 struct i40e_aqc_switch_seid *scfg = 2375 (struct i40e_aqc_switch_seid *)&desc.params.raw; 2376 i40e_status status; 2377 2378 i40e_fill_default_direct_cmd_desc(&desc, 2379 i40e_aqc_opc_get_switch_config); 2380 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 2381 if (buf_size > I40E_AQ_LARGE_BUF) 2382 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2383 scfg->seid = cpu_to_le16(*start_seid); 2384 2385 status = i40e_asq_send_command(hw, &desc, buf, buf_size, cmd_details); 2386 *start_seid = le16_to_cpu(scfg->seid); 2387 2388 return status; 2389 } 2390 2391 /** 2392 * i40e_aq_set_switch_config 2393 * @hw: pointer to the hardware structure 2394 * @flags: bit flag values to set 2395 * @mode: cloud filter mode 2396 * @valid_flags: which bit flags to set 2397 * @mode: cloud filter mode 2398 * @cmd_details: pointer to command details structure or NULL 2399 * 2400 * Set switch configuration bits 2401 **/ 2402 enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, 2403 u16 flags, 2404 u16 valid_flags, u8 mode, 2405 struct i40e_asq_cmd_details *cmd_details) 2406 { 2407 struct i40e_aq_desc desc; 2408 struct i40e_aqc_set_switch_config *scfg = 2409 (struct i40e_aqc_set_switch_config *)&desc.params.raw; 2410 enum i40e_status_code status; 2411 2412 i40e_fill_default_direct_cmd_desc(&desc, 2413 i40e_aqc_opc_set_switch_config); 2414 scfg->flags = cpu_to_le16(flags); 2415 scfg->valid_flags = cpu_to_le16(valid_flags); 2416 scfg->mode = mode; 2417 if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { 2418 scfg->switch_tag = cpu_to_le16(hw->switch_tag); 2419 scfg->first_tag = cpu_to_le16(hw->first_tag); 2420 scfg->second_tag = cpu_to_le16(hw->second_tag); 2421 } 2422 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2423 2424 return status; 2425 } 2426 2427 /** 2428 * i40e_aq_get_firmware_version 2429 * @hw: pointer to the hw struct 2430 * @fw_major_version: firmware major version 2431 * @fw_minor_version: firmware minor version 2432 * @fw_build: firmware build number 2433 * @api_major_version: major queue version 2434 * @api_minor_version: minor queue version 2435 * @cmd_details: pointer to command details structure or NULL 2436 * 2437 * Get the firmware version from the admin queue commands 2438 **/ 2439 i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw, 2440 u16 *fw_major_version, u16 *fw_minor_version, 2441 u32 *fw_build, 2442 u16 *api_major_version, u16 *api_minor_version, 2443 struct i40e_asq_cmd_details *cmd_details) 2444 { 2445 struct i40e_aq_desc desc; 2446 struct i40e_aqc_get_version *resp = 2447 (struct i40e_aqc_get_version *)&desc.params.raw; 2448 i40e_status status; 2449 2450 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version); 2451 2452 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2453 2454 if (!status) { 2455 if (fw_major_version) 2456 *fw_major_version = le16_to_cpu(resp->fw_major); 2457 if (fw_minor_version) 2458 *fw_minor_version = le16_to_cpu(resp->fw_minor); 2459 if (fw_build) 2460 *fw_build = le32_to_cpu(resp->fw_build); 2461 if (api_major_version) 2462 *api_major_version = le16_to_cpu(resp->api_major); 2463 if (api_minor_version) 2464 *api_minor_version = le16_to_cpu(resp->api_minor); 2465 } 2466 2467 return status; 2468 } 2469 2470 /** 2471 * i40e_aq_send_driver_version 2472 * @hw: pointer to the hw struct 2473 * @dv: driver's major, minor version 2474 * @cmd_details: pointer to command details structure or NULL 2475 * 2476 * Send the driver version to the firmware 2477 **/ 2478 i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw, 2479 struct i40e_driver_version *dv, 2480 struct i40e_asq_cmd_details *cmd_details) 2481 { 2482 struct i40e_aq_desc desc; 2483 struct i40e_aqc_driver_version *cmd = 2484 (struct i40e_aqc_driver_version *)&desc.params.raw; 2485 i40e_status status; 2486 u16 len; 2487 2488 if (dv == NULL) 2489 return I40E_ERR_PARAM; 2490 2491 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_driver_version); 2492 2493 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 2494 cmd->driver_major_ver = dv->major_version; 2495 cmd->driver_minor_ver = dv->minor_version; 2496 cmd->driver_build_ver = dv->build_version; 2497 cmd->driver_subbuild_ver = dv->subbuild_version; 2498 2499 len = 0; 2500 while (len < sizeof(dv->driver_string) && 2501 (dv->driver_string[len] < 0x80) && 2502 dv->driver_string[len]) 2503 len++; 2504 status = i40e_asq_send_command(hw, &desc, dv->driver_string, 2505 len, cmd_details); 2506 2507 return status; 2508 } 2509 2510 /** 2511 * i40e_get_link_status - get status of the HW network link 2512 * @hw: pointer to the hw struct 2513 * @link_up: pointer to bool (true/false = linkup/linkdown) 2514 * 2515 * Variable link_up true if link is up, false if link is down. 2516 * The variable link_up is invalid if returned value of status != 0 2517 * 2518 * Side effect: LinkStatusEvent reporting becomes enabled 2519 **/ 2520 i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up) 2521 { 2522 i40e_status status = 0; 2523 2524 if (hw->phy.get_link_info) { 2525 status = i40e_update_link_info(hw); 2526 2527 if (status) 2528 i40e_debug(hw, I40E_DEBUG_LINK, "get link failed: status %d\n", 2529 status); 2530 } 2531 2532 *link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; 2533 2534 return status; 2535 } 2536 2537 /** 2538 * i40e_updatelink_status - update status of the HW network link 2539 * @hw: pointer to the hw struct 2540 **/ 2541 i40e_status i40e_update_link_info(struct i40e_hw *hw) 2542 { 2543 struct i40e_aq_get_phy_abilities_resp abilities; 2544 i40e_status status = 0; 2545 2546 status = i40e_aq_get_link_info(hw, true, NULL, NULL); 2547 if (status) 2548 return status; 2549 2550 /* extra checking needed to ensure link info to user is timely */ 2551 if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) && 2552 ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) || 2553 !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) { 2554 status = i40e_aq_get_phy_capabilities(hw, false, false, 2555 &abilities, NULL); 2556 if (status) 2557 return status; 2558 2559 hw->phy.link_info.req_fec_info = 2560 abilities.fec_cfg_curr_mod_ext_info & 2561 (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS); 2562 2563 memcpy(hw->phy.link_info.module_type, &abilities.module_type, 2564 sizeof(hw->phy.link_info.module_type)); 2565 } 2566 2567 return status; 2568 } 2569 2570 /** 2571 * i40e_aq_add_veb - Insert a VEB between the VSI and the MAC 2572 * @hw: pointer to the hw struct 2573 * @uplink_seid: the MAC or other gizmo SEID 2574 * @downlink_seid: the VSI SEID 2575 * @enabled_tc: bitmap of TCs to be enabled 2576 * @default_port: true for default port VSI, false for control port 2577 * @veb_seid: pointer to where to put the resulting VEB SEID 2578 * @enable_stats: true to turn on VEB stats 2579 * @cmd_details: pointer to command details structure or NULL 2580 * 2581 * This asks the FW to add a VEB between the uplink and downlink 2582 * elements. If the uplink SEID is 0, this will be a floating VEB. 2583 **/ 2584 i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, 2585 u16 downlink_seid, u8 enabled_tc, 2586 bool default_port, u16 *veb_seid, 2587 bool enable_stats, 2588 struct i40e_asq_cmd_details *cmd_details) 2589 { 2590 struct i40e_aq_desc desc; 2591 struct i40e_aqc_add_veb *cmd = 2592 (struct i40e_aqc_add_veb *)&desc.params.raw; 2593 struct i40e_aqc_add_veb_completion *resp = 2594 (struct i40e_aqc_add_veb_completion *)&desc.params.raw; 2595 i40e_status status; 2596 u16 veb_flags = 0; 2597 2598 /* SEIDs need to either both be set or both be 0 for floating VEB */ 2599 if (!!uplink_seid != !!downlink_seid) 2600 return I40E_ERR_PARAM; 2601 2602 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_veb); 2603 2604 cmd->uplink_seid = cpu_to_le16(uplink_seid); 2605 cmd->downlink_seid = cpu_to_le16(downlink_seid); 2606 cmd->enable_tcs = enabled_tc; 2607 if (!uplink_seid) 2608 veb_flags |= I40E_AQC_ADD_VEB_FLOATING; 2609 if (default_port) 2610 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT; 2611 else 2612 veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; 2613 2614 /* reverse logic here: set the bitflag to disable the stats */ 2615 if (!enable_stats) 2616 veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; 2617 2618 cmd->veb_flags = cpu_to_le16(veb_flags); 2619 2620 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2621 2622 if (!status && veb_seid) 2623 *veb_seid = le16_to_cpu(resp->veb_seid); 2624 2625 return status; 2626 } 2627 2628 /** 2629 * i40e_aq_get_veb_parameters - Retrieve VEB parameters 2630 * @hw: pointer to the hw struct 2631 * @veb_seid: the SEID of the VEB to query 2632 * @switch_id: the uplink switch id 2633 * @floating: set to true if the VEB is floating 2634 * @statistic_index: index of the stats counter block for this VEB 2635 * @vebs_used: number of VEB's used by function 2636 * @vebs_free: total VEB's not reserved by any function 2637 * @cmd_details: pointer to command details structure or NULL 2638 * 2639 * This retrieves the parameters for a particular VEB, specified by 2640 * uplink_seid, and returns them to the caller. 2641 **/ 2642 i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, 2643 u16 veb_seid, u16 *switch_id, 2644 bool *floating, u16 *statistic_index, 2645 u16 *vebs_used, u16 *vebs_free, 2646 struct i40e_asq_cmd_details *cmd_details) 2647 { 2648 struct i40e_aq_desc desc; 2649 struct i40e_aqc_get_veb_parameters_completion *cmd_resp = 2650 (struct i40e_aqc_get_veb_parameters_completion *) 2651 &desc.params.raw; 2652 i40e_status status; 2653 2654 if (veb_seid == 0) 2655 return I40E_ERR_PARAM; 2656 2657 i40e_fill_default_direct_cmd_desc(&desc, 2658 i40e_aqc_opc_get_veb_parameters); 2659 cmd_resp->seid = cpu_to_le16(veb_seid); 2660 2661 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2662 if (status) 2663 goto get_veb_exit; 2664 2665 if (switch_id) 2666 *switch_id = le16_to_cpu(cmd_resp->switch_id); 2667 if (statistic_index) 2668 *statistic_index = le16_to_cpu(cmd_resp->statistic_index); 2669 if (vebs_used) 2670 *vebs_used = le16_to_cpu(cmd_resp->vebs_used); 2671 if (vebs_free) 2672 *vebs_free = le16_to_cpu(cmd_resp->vebs_free); 2673 if (floating) { 2674 u16 flags = le16_to_cpu(cmd_resp->veb_flags); 2675 2676 if (flags & I40E_AQC_ADD_VEB_FLOATING) 2677 *floating = true; 2678 else 2679 *floating = false; 2680 } 2681 2682 get_veb_exit: 2683 return status; 2684 } 2685 2686 /** 2687 * i40e_aq_add_macvlan 2688 * @hw: pointer to the hw struct 2689 * @seid: VSI for the mac address 2690 * @mv_list: list of macvlans to be added 2691 * @count: length of the list 2692 * @cmd_details: pointer to command details structure or NULL 2693 * 2694 * Add MAC/VLAN addresses to the HW filtering 2695 **/ 2696 i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, 2697 struct i40e_aqc_add_macvlan_element_data *mv_list, 2698 u16 count, struct i40e_asq_cmd_details *cmd_details) 2699 { 2700 struct i40e_aq_desc desc; 2701 struct i40e_aqc_macvlan *cmd = 2702 (struct i40e_aqc_macvlan *)&desc.params.raw; 2703 i40e_status status; 2704 u16 buf_size; 2705 int i; 2706 2707 if (count == 0 || !mv_list || !hw) 2708 return I40E_ERR_PARAM; 2709 2710 buf_size = count * sizeof(*mv_list); 2711 2712 /* prep the rest of the request */ 2713 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_macvlan); 2714 cmd->num_addresses = cpu_to_le16(count); 2715 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2716 cmd->seid[1] = 0; 2717 cmd->seid[2] = 0; 2718 2719 for (i = 0; i < count; i++) 2720 if (is_multicast_ether_addr(mv_list[i].mac_addr)) 2721 mv_list[i].flags |= 2722 cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); 2723 2724 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2725 if (buf_size > I40E_AQ_LARGE_BUF) 2726 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2727 2728 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2729 cmd_details); 2730 2731 return status; 2732 } 2733 2734 /** 2735 * i40e_aq_remove_macvlan 2736 * @hw: pointer to the hw struct 2737 * @seid: VSI for the mac address 2738 * @mv_list: list of macvlans to be removed 2739 * @count: length of the list 2740 * @cmd_details: pointer to command details structure or NULL 2741 * 2742 * Remove MAC/VLAN addresses from the HW filtering 2743 **/ 2744 i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, 2745 struct i40e_aqc_remove_macvlan_element_data *mv_list, 2746 u16 count, struct i40e_asq_cmd_details *cmd_details) 2747 { 2748 struct i40e_aq_desc desc; 2749 struct i40e_aqc_macvlan *cmd = 2750 (struct i40e_aqc_macvlan *)&desc.params.raw; 2751 i40e_status status; 2752 u16 buf_size; 2753 2754 if (count == 0 || !mv_list || !hw) 2755 return I40E_ERR_PARAM; 2756 2757 buf_size = count * sizeof(*mv_list); 2758 2759 /* prep the rest of the request */ 2760 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_remove_macvlan); 2761 cmd->num_addresses = cpu_to_le16(count); 2762 cmd->seid[0] = cpu_to_le16(I40E_AQC_MACVLAN_CMD_SEID_VALID | seid); 2763 cmd->seid[1] = 0; 2764 cmd->seid[2] = 0; 2765 2766 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 2767 if (buf_size > I40E_AQ_LARGE_BUF) 2768 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2769 2770 status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, 2771 cmd_details); 2772 2773 return status; 2774 } 2775 2776 /** 2777 * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule 2778 * @hw: pointer to the hw struct 2779 * @opcode: AQ opcode for add or delete mirror rule 2780 * @sw_seid: Switch SEID (to which rule refers) 2781 * @rule_type: Rule Type (ingress/egress/VLAN) 2782 * @id: Destination VSI SEID or Rule ID 2783 * @count: length of the list 2784 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2785 * @cmd_details: pointer to command details structure or NULL 2786 * @rule_id: Rule ID returned from FW 2787 * @rules_used: Number of rules used in internal switch 2788 * @rules_free: Number of rules free in internal switch 2789 * 2790 * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for 2791 * VEBs/VEPA elements only 2792 **/ 2793 static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, 2794 u16 opcode, u16 sw_seid, u16 rule_type, u16 id, 2795 u16 count, __le16 *mr_list, 2796 struct i40e_asq_cmd_details *cmd_details, 2797 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2798 { 2799 struct i40e_aq_desc desc; 2800 struct i40e_aqc_add_delete_mirror_rule *cmd = 2801 (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; 2802 struct i40e_aqc_add_delete_mirror_rule_completion *resp = 2803 (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; 2804 i40e_status status; 2805 u16 buf_size; 2806 2807 buf_size = count * sizeof(*mr_list); 2808 2809 /* prep the rest of the request */ 2810 i40e_fill_default_direct_cmd_desc(&desc, opcode); 2811 cmd->seid = cpu_to_le16(sw_seid); 2812 cmd->rule_type = cpu_to_le16(rule_type & 2813 I40E_AQC_MIRROR_RULE_TYPE_MASK); 2814 cmd->num_entries = cpu_to_le16(count); 2815 /* Dest VSI for add, rule_id for delete */ 2816 cmd->destination = cpu_to_le16(id); 2817 if (mr_list) { 2818 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2819 I40E_AQ_FLAG_RD)); 2820 if (buf_size > I40E_AQ_LARGE_BUF) 2821 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2822 } 2823 2824 status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, 2825 cmd_details); 2826 if (!status || 2827 hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { 2828 if (rule_id) 2829 *rule_id = le16_to_cpu(resp->rule_id); 2830 if (rules_used) 2831 *rules_used = le16_to_cpu(resp->mirror_rules_used); 2832 if (rules_free) 2833 *rules_free = le16_to_cpu(resp->mirror_rules_free); 2834 } 2835 return status; 2836 } 2837 2838 /** 2839 * i40e_aq_add_mirrorrule - add a mirror rule 2840 * @hw: pointer to the hw struct 2841 * @sw_seid: Switch SEID (to which rule refers) 2842 * @rule_type: Rule Type (ingress/egress/VLAN) 2843 * @dest_vsi: SEID of VSI to which packets will be mirrored 2844 * @count: length of the list 2845 * @mr_list: list of mirrored VSI SEIDs or VLAN IDs 2846 * @cmd_details: pointer to command details structure or NULL 2847 * @rule_id: Rule ID returned from FW 2848 * @rules_used: Number of rules used in internal switch 2849 * @rules_free: Number of rules free in internal switch 2850 * 2851 * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only 2852 **/ 2853 i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2854 u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, 2855 struct i40e_asq_cmd_details *cmd_details, 2856 u16 *rule_id, u16 *rules_used, u16 *rules_free) 2857 { 2858 if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || 2859 rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { 2860 if (count == 0 || !mr_list) 2861 return I40E_ERR_PARAM; 2862 } 2863 2864 return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, 2865 rule_type, dest_vsi, count, mr_list, 2866 cmd_details, rule_id, rules_used, rules_free); 2867 } 2868 2869 /** 2870 * i40e_aq_delete_mirrorrule - delete a mirror rule 2871 * @hw: pointer to the hw struct 2872 * @sw_seid: Switch SEID (to which rule refers) 2873 * @rule_type: Rule Type (ingress/egress/VLAN) 2874 * @count: length of the list 2875 * @rule_id: Rule ID that is returned in the receive desc as part of 2876 * add_mirrorrule. 2877 * @mr_list: list of mirrored VLAN IDs to be removed 2878 * @cmd_details: pointer to command details structure or NULL 2879 * @rules_used: Number of rules used in internal switch 2880 * @rules_free: Number of rules free in internal switch 2881 * 2882 * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only 2883 **/ 2884 i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, 2885 u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, 2886 struct i40e_asq_cmd_details *cmd_details, 2887 u16 *rules_used, u16 *rules_free) 2888 { 2889 /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ 2890 if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { 2891 /* count and mr_list shall be valid for rule_type INGRESS VLAN 2892 * mirroring. For other rule_type, count and rule_type should 2893 * not matter. 2894 */ 2895 if (count == 0 || !mr_list) 2896 return I40E_ERR_PARAM; 2897 } 2898 2899 return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, 2900 rule_type, rule_id, count, mr_list, 2901 cmd_details, NULL, rules_used, rules_free); 2902 } 2903 2904 /** 2905 * i40e_aq_send_msg_to_vf 2906 * @hw: pointer to the hardware structure 2907 * @vfid: VF id to send msg 2908 * @v_opcode: opcodes for VF-PF communication 2909 * @v_retval: return error code 2910 * @msg: pointer to the msg buffer 2911 * @msglen: msg length 2912 * @cmd_details: pointer to command details 2913 * 2914 * send msg to vf 2915 **/ 2916 i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, 2917 u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, 2918 struct i40e_asq_cmd_details *cmd_details) 2919 { 2920 struct i40e_aq_desc desc; 2921 struct i40e_aqc_pf_vf_message *cmd = 2922 (struct i40e_aqc_pf_vf_message *)&desc.params.raw; 2923 i40e_status status; 2924 2925 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf); 2926 cmd->id = cpu_to_le32(vfid); 2927 desc.cookie_high = cpu_to_le32(v_opcode); 2928 desc.cookie_low = cpu_to_le32(v_retval); 2929 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_SI); 2930 if (msglen) { 2931 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 2932 I40E_AQ_FLAG_RD)); 2933 if (msglen > I40E_AQ_LARGE_BUF) 2934 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 2935 desc.datalen = cpu_to_le16(msglen); 2936 } 2937 status = i40e_asq_send_command(hw, &desc, msg, msglen, cmd_details); 2938 2939 return status; 2940 } 2941 2942 /** 2943 * i40e_aq_debug_read_register 2944 * @hw: pointer to the hw struct 2945 * @reg_addr: register address 2946 * @reg_val: register value 2947 * @cmd_details: pointer to command details structure or NULL 2948 * 2949 * Read the register using the admin queue commands 2950 **/ 2951 i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw, 2952 u32 reg_addr, u64 *reg_val, 2953 struct i40e_asq_cmd_details *cmd_details) 2954 { 2955 struct i40e_aq_desc desc; 2956 struct i40e_aqc_debug_reg_read_write *cmd_resp = 2957 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2958 i40e_status status; 2959 2960 if (reg_val == NULL) 2961 return I40E_ERR_PARAM; 2962 2963 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_read_reg); 2964 2965 cmd_resp->address = cpu_to_le32(reg_addr); 2966 2967 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2968 2969 if (!status) { 2970 *reg_val = ((u64)le32_to_cpu(cmd_resp->value_high) << 32) | 2971 (u64)le32_to_cpu(cmd_resp->value_low); 2972 } 2973 2974 return status; 2975 } 2976 2977 /** 2978 * i40e_aq_debug_write_register 2979 * @hw: pointer to the hw struct 2980 * @reg_addr: register address 2981 * @reg_val: register value 2982 * @cmd_details: pointer to command details structure or NULL 2983 * 2984 * Write to a register using the admin queue commands 2985 **/ 2986 i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, 2987 u32 reg_addr, u64 reg_val, 2988 struct i40e_asq_cmd_details *cmd_details) 2989 { 2990 struct i40e_aq_desc desc; 2991 struct i40e_aqc_debug_reg_read_write *cmd = 2992 (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw; 2993 i40e_status status; 2994 2995 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg); 2996 2997 cmd->address = cpu_to_le32(reg_addr); 2998 cmd->value_high = cpu_to_le32((u32)(reg_val >> 32)); 2999 cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF)); 3000 3001 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3002 3003 return status; 3004 } 3005 3006 /** 3007 * i40e_aq_request_resource 3008 * @hw: pointer to the hw struct 3009 * @resource: resource id 3010 * @access: access type 3011 * @sdp_number: resource number 3012 * @timeout: the maximum time in ms that the driver may hold the resource 3013 * @cmd_details: pointer to command details structure or NULL 3014 * 3015 * requests common resource using the admin queue commands 3016 **/ 3017 i40e_status i40e_aq_request_resource(struct i40e_hw *hw, 3018 enum i40e_aq_resources_ids resource, 3019 enum i40e_aq_resource_access_type access, 3020 u8 sdp_number, u64 *timeout, 3021 struct i40e_asq_cmd_details *cmd_details) 3022 { 3023 struct i40e_aq_desc desc; 3024 struct i40e_aqc_request_resource *cmd_resp = 3025 (struct i40e_aqc_request_resource *)&desc.params.raw; 3026 i40e_status status; 3027 3028 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource); 3029 3030 cmd_resp->resource_id = cpu_to_le16(resource); 3031 cmd_resp->access_type = cpu_to_le16(access); 3032 cmd_resp->resource_number = cpu_to_le32(sdp_number); 3033 3034 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3035 /* The completion specifies the maximum time in ms that the driver 3036 * may hold the resource in the Timeout field. 3037 * If the resource is held by someone else, the command completes with 3038 * busy return value and the timeout field indicates the maximum time 3039 * the current owner of the resource has to free it. 3040 */ 3041 if (!status || hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) 3042 *timeout = le32_to_cpu(cmd_resp->timeout); 3043 3044 return status; 3045 } 3046 3047 /** 3048 * i40e_aq_release_resource 3049 * @hw: pointer to the hw struct 3050 * @resource: resource id 3051 * @sdp_number: resource number 3052 * @cmd_details: pointer to command details structure or NULL 3053 * 3054 * release common resource using the admin queue commands 3055 **/ 3056 i40e_status i40e_aq_release_resource(struct i40e_hw *hw, 3057 enum i40e_aq_resources_ids resource, 3058 u8 sdp_number, 3059 struct i40e_asq_cmd_details *cmd_details) 3060 { 3061 struct i40e_aq_desc desc; 3062 struct i40e_aqc_request_resource *cmd = 3063 (struct i40e_aqc_request_resource *)&desc.params.raw; 3064 i40e_status status; 3065 3066 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource); 3067 3068 cmd->resource_id = cpu_to_le16(resource); 3069 cmd->resource_number = cpu_to_le32(sdp_number); 3070 3071 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3072 3073 return status; 3074 } 3075 3076 /** 3077 * i40e_aq_read_nvm 3078 * @hw: pointer to the hw struct 3079 * @module_pointer: module pointer location in words from the NVM beginning 3080 * @offset: byte offset from the module beginning 3081 * @length: length of the section to be read (in bytes from the offset) 3082 * @data: command buffer (size [bytes] = length) 3083 * @last_command: tells if this is the last command in a series 3084 * @cmd_details: pointer to command details structure or NULL 3085 * 3086 * Read the NVM using the admin queue commands 3087 **/ 3088 i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer, 3089 u32 offset, u16 length, void *data, 3090 bool last_command, 3091 struct i40e_asq_cmd_details *cmd_details) 3092 { 3093 struct i40e_aq_desc desc; 3094 struct i40e_aqc_nvm_update *cmd = 3095 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3096 i40e_status status; 3097 3098 /* In offset the highest byte must be zeroed. */ 3099 if (offset & 0xFF000000) { 3100 status = I40E_ERR_PARAM; 3101 goto i40e_aq_read_nvm_exit; 3102 } 3103 3104 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_read); 3105 3106 /* If this is the last command in a series, set the proper flag. */ 3107 if (last_command) 3108 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3109 cmd->module_pointer = module_pointer; 3110 cmd->offset = cpu_to_le32(offset); 3111 cmd->length = cpu_to_le16(length); 3112 3113 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3114 if (length > I40E_AQ_LARGE_BUF) 3115 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3116 3117 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3118 3119 i40e_aq_read_nvm_exit: 3120 return status; 3121 } 3122 3123 /** 3124 * i40e_aq_erase_nvm 3125 * @hw: pointer to the hw struct 3126 * @module_pointer: module pointer location in words from the NVM beginning 3127 * @offset: offset in the module (expressed in 4 KB from module's beginning) 3128 * @length: length of the section to be erased (expressed in 4 KB) 3129 * @last_command: tells if this is the last command in a series 3130 * @cmd_details: pointer to command details structure or NULL 3131 * 3132 * Erase the NVM sector using the admin queue commands 3133 **/ 3134 i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer, 3135 u32 offset, u16 length, bool last_command, 3136 struct i40e_asq_cmd_details *cmd_details) 3137 { 3138 struct i40e_aq_desc desc; 3139 struct i40e_aqc_nvm_update *cmd = 3140 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3141 i40e_status status; 3142 3143 /* In offset the highest byte must be zeroed. */ 3144 if (offset & 0xFF000000) { 3145 status = I40E_ERR_PARAM; 3146 goto i40e_aq_erase_nvm_exit; 3147 } 3148 3149 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase); 3150 3151 /* If this is the last command in a series, set the proper flag. */ 3152 if (last_command) 3153 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3154 cmd->module_pointer = module_pointer; 3155 cmd->offset = cpu_to_le32(offset); 3156 cmd->length = cpu_to_le16(length); 3157 3158 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3159 3160 i40e_aq_erase_nvm_exit: 3161 return status; 3162 } 3163 3164 /** 3165 * i40e_parse_discover_capabilities 3166 * @hw: pointer to the hw struct 3167 * @buff: pointer to a buffer containing device/function capability records 3168 * @cap_count: number of capability records in the list 3169 * @list_type_opc: type of capabilities list to parse 3170 * 3171 * Parse the device/function capabilities list. 3172 **/ 3173 static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, 3174 u32 cap_count, 3175 enum i40e_admin_queue_opc list_type_opc) 3176 { 3177 struct i40e_aqc_list_capabilities_element_resp *cap; 3178 u32 valid_functions, num_functions; 3179 u32 number, logical_id, phys_id; 3180 struct i40e_hw_capabilities *p; 3181 u16 id, ocp_cfg_word0; 3182 i40e_status status; 3183 u8 major_rev; 3184 u32 i = 0; 3185 3186 cap = (struct i40e_aqc_list_capabilities_element_resp *) buff; 3187 3188 if (list_type_opc == i40e_aqc_opc_list_dev_capabilities) 3189 p = &hw->dev_caps; 3190 else if (list_type_opc == i40e_aqc_opc_list_func_capabilities) 3191 p = &hw->func_caps; 3192 else 3193 return; 3194 3195 for (i = 0; i < cap_count; i++, cap++) { 3196 id = le16_to_cpu(cap->id); 3197 number = le32_to_cpu(cap->number); 3198 logical_id = le32_to_cpu(cap->logical_id); 3199 phys_id = le32_to_cpu(cap->phys_id); 3200 major_rev = cap->major_rev; 3201 3202 switch (id) { 3203 case I40E_AQ_CAP_ID_SWITCH_MODE: 3204 p->switch_mode = number; 3205 break; 3206 case I40E_AQ_CAP_ID_MNG_MODE: 3207 p->management_mode = number; 3208 if (major_rev > 1) { 3209 p->mng_protocols_over_mctp = logical_id; 3210 i40e_debug(hw, I40E_DEBUG_INIT, 3211 "HW Capability: Protocols over MCTP = %d\n", 3212 p->mng_protocols_over_mctp); 3213 } else { 3214 p->mng_protocols_over_mctp = 0; 3215 } 3216 break; 3217 case I40E_AQ_CAP_ID_NPAR_ACTIVE: 3218 p->npar_enable = number; 3219 break; 3220 case I40E_AQ_CAP_ID_OS2BMC_CAP: 3221 p->os2bmc = number; 3222 break; 3223 case I40E_AQ_CAP_ID_FUNCTIONS_VALID: 3224 p->valid_functions = number; 3225 break; 3226 case I40E_AQ_CAP_ID_SRIOV: 3227 if (number == 1) 3228 p->sr_iov_1_1 = true; 3229 break; 3230 case I40E_AQ_CAP_ID_VF: 3231 p->num_vfs = number; 3232 p->vf_base_id = logical_id; 3233 break; 3234 case I40E_AQ_CAP_ID_VMDQ: 3235 if (number == 1) 3236 p->vmdq = true; 3237 break; 3238 case I40E_AQ_CAP_ID_8021QBG: 3239 if (number == 1) 3240 p->evb_802_1_qbg = true; 3241 break; 3242 case I40E_AQ_CAP_ID_8021QBR: 3243 if (number == 1) 3244 p->evb_802_1_qbh = true; 3245 break; 3246 case I40E_AQ_CAP_ID_VSI: 3247 p->num_vsis = number; 3248 break; 3249 case I40E_AQ_CAP_ID_DCB: 3250 if (number == 1) { 3251 p->dcb = true; 3252 p->enabled_tcmap = logical_id; 3253 p->maxtc = phys_id; 3254 } 3255 break; 3256 case I40E_AQ_CAP_ID_FCOE: 3257 if (number == 1) 3258 p->fcoe = true; 3259 break; 3260 case I40E_AQ_CAP_ID_ISCSI: 3261 if (number == 1) 3262 p->iscsi = true; 3263 break; 3264 case I40E_AQ_CAP_ID_RSS: 3265 p->rss = true; 3266 p->rss_table_size = number; 3267 p->rss_table_entry_width = logical_id; 3268 break; 3269 case I40E_AQ_CAP_ID_RXQ: 3270 p->num_rx_qp = number; 3271 p->base_queue = phys_id; 3272 break; 3273 case I40E_AQ_CAP_ID_TXQ: 3274 p->num_tx_qp = number; 3275 p->base_queue = phys_id; 3276 break; 3277 case I40E_AQ_CAP_ID_MSIX: 3278 p->num_msix_vectors = number; 3279 i40e_debug(hw, I40E_DEBUG_INIT, 3280 "HW Capability: MSIX vector count = %d\n", 3281 p->num_msix_vectors); 3282 break; 3283 case I40E_AQ_CAP_ID_VF_MSIX: 3284 p->num_msix_vectors_vf = number; 3285 break; 3286 case I40E_AQ_CAP_ID_FLEX10: 3287 if (major_rev == 1) { 3288 if (number == 1) { 3289 p->flex10_enable = true; 3290 p->flex10_capable = true; 3291 } 3292 } else { 3293 /* Capability revision >= 2 */ 3294 if (number & 1) 3295 p->flex10_enable = true; 3296 if (number & 2) 3297 p->flex10_capable = true; 3298 } 3299 p->flex10_mode = logical_id; 3300 p->flex10_status = phys_id; 3301 break; 3302 case I40E_AQ_CAP_ID_CEM: 3303 if (number == 1) 3304 p->mgmt_cem = true; 3305 break; 3306 case I40E_AQ_CAP_ID_IWARP: 3307 if (number == 1) 3308 p->iwarp = true; 3309 break; 3310 case I40E_AQ_CAP_ID_LED: 3311 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3312 p->led[phys_id] = true; 3313 break; 3314 case I40E_AQ_CAP_ID_SDP: 3315 if (phys_id < I40E_HW_CAP_MAX_GPIO) 3316 p->sdp[phys_id] = true; 3317 break; 3318 case I40E_AQ_CAP_ID_MDIO: 3319 if (number == 1) { 3320 p->mdio_port_num = phys_id; 3321 p->mdio_port_mode = logical_id; 3322 } 3323 break; 3324 case I40E_AQ_CAP_ID_1588: 3325 if (number == 1) 3326 p->ieee_1588 = true; 3327 break; 3328 case I40E_AQ_CAP_ID_FLOW_DIRECTOR: 3329 p->fd = true; 3330 p->fd_filters_guaranteed = number; 3331 p->fd_filters_best_effort = logical_id; 3332 break; 3333 case I40E_AQ_CAP_ID_WSR_PROT: 3334 p->wr_csr_prot = (u64)number; 3335 p->wr_csr_prot |= (u64)logical_id << 32; 3336 break; 3337 case I40E_AQ_CAP_ID_NVM_MGMT: 3338 if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) 3339 p->sec_rev_disabled = true; 3340 if (number & I40E_NVM_MGMT_UPDATE_DISABLED) 3341 p->update_disabled = true; 3342 break; 3343 default: 3344 break; 3345 } 3346 } 3347 3348 if (p->fcoe) 3349 i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); 3350 3351 /* Software override ensuring FCoE is disabled if npar or mfp 3352 * mode because it is not supported in these modes. 3353 */ 3354 if (p->npar_enable || p->flex10_enable) 3355 p->fcoe = false; 3356 3357 /* count the enabled ports (aka the "not disabled" ports) */ 3358 hw->num_ports = 0; 3359 for (i = 0; i < 4; i++) { 3360 u32 port_cfg_reg = I40E_PRTGEN_CNF + (4 * i); 3361 u64 port_cfg = 0; 3362 3363 /* use AQ read to get the physical register offset instead 3364 * of the port relative offset 3365 */ 3366 i40e_aq_debug_read_register(hw, port_cfg_reg, &port_cfg, NULL); 3367 if (!(port_cfg & I40E_PRTGEN_CNF_PORT_DIS_MASK)) 3368 hw->num_ports++; 3369 } 3370 3371 /* OCP cards case: if a mezz is removed the Ethernet port is at 3372 * disabled state in PRTGEN_CNF register. Additional NVM read is 3373 * needed in order to check if we are dealing with OCP card. 3374 * Those cards have 4 PFs at minimum, so using PRTGEN_CNF for counting 3375 * physical ports results in wrong partition id calculation and thus 3376 * not supporting WoL. 3377 */ 3378 if (hw->mac.type == I40E_MAC_X722) { 3379 if (!i40e_acquire_nvm(hw, I40E_RESOURCE_READ)) { 3380 status = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, 3381 2 * I40E_SR_OCP_CFG_WORD0, 3382 sizeof(ocp_cfg_word0), 3383 &ocp_cfg_word0, true, NULL); 3384 if (!status && 3385 (ocp_cfg_word0 & I40E_SR_OCP_ENABLED)) 3386 hw->num_ports = 4; 3387 i40e_release_nvm(hw); 3388 } 3389 } 3390 3391 valid_functions = p->valid_functions; 3392 num_functions = 0; 3393 while (valid_functions) { 3394 if (valid_functions & 1) 3395 num_functions++; 3396 valid_functions >>= 1; 3397 } 3398 3399 /* partition id is 1-based, and functions are evenly spread 3400 * across the ports as partitions 3401 */ 3402 if (hw->num_ports != 0) { 3403 hw->partition_id = (hw->pf_id / hw->num_ports) + 1; 3404 hw->num_partitions = num_functions / hw->num_ports; 3405 } 3406 3407 /* additional HW specific goodies that might 3408 * someday be HW version specific 3409 */ 3410 p->rx_buf_chain_len = I40E_MAX_CHAINED_RX_BUFFERS; 3411 } 3412 3413 /** 3414 * i40e_aq_discover_capabilities 3415 * @hw: pointer to the hw struct 3416 * @buff: a virtual buffer to hold the capabilities 3417 * @buff_size: Size of the virtual buffer 3418 * @data_size: Size of the returned data, or buff size needed if AQ err==ENOMEM 3419 * @list_type_opc: capabilities type to discover - pass in the command opcode 3420 * @cmd_details: pointer to command details structure or NULL 3421 * 3422 * Get the device capabilities descriptions from the firmware 3423 **/ 3424 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw, 3425 void *buff, u16 buff_size, u16 *data_size, 3426 enum i40e_admin_queue_opc list_type_opc, 3427 struct i40e_asq_cmd_details *cmd_details) 3428 { 3429 struct i40e_aqc_list_capabilites *cmd; 3430 struct i40e_aq_desc desc; 3431 i40e_status status = 0; 3432 3433 cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw; 3434 3435 if (list_type_opc != i40e_aqc_opc_list_func_capabilities && 3436 list_type_opc != i40e_aqc_opc_list_dev_capabilities) { 3437 status = I40E_ERR_PARAM; 3438 goto exit; 3439 } 3440 3441 i40e_fill_default_direct_cmd_desc(&desc, list_type_opc); 3442 3443 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3444 if (buff_size > I40E_AQ_LARGE_BUF) 3445 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3446 3447 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3448 *data_size = le16_to_cpu(desc.datalen); 3449 3450 if (status) 3451 goto exit; 3452 3453 i40e_parse_discover_capabilities(hw, buff, le32_to_cpu(cmd->count), 3454 list_type_opc); 3455 3456 exit: 3457 return status; 3458 } 3459 3460 /** 3461 * i40e_aq_update_nvm 3462 * @hw: pointer to the hw struct 3463 * @module_pointer: module pointer location in words from the NVM beginning 3464 * @offset: byte offset from the module beginning 3465 * @length: length of the section to be written (in bytes from the offset) 3466 * @data: command buffer (size [bytes] = length) 3467 * @last_command: tells if this is the last command in a series 3468 * @preservation_flags: Preservation mode flags 3469 * @cmd_details: pointer to command details structure or NULL 3470 * 3471 * Update the NVM using the admin queue commands 3472 **/ 3473 i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, 3474 u32 offset, u16 length, void *data, 3475 bool last_command, u8 preservation_flags, 3476 struct i40e_asq_cmd_details *cmd_details) 3477 { 3478 struct i40e_aq_desc desc; 3479 struct i40e_aqc_nvm_update *cmd = 3480 (struct i40e_aqc_nvm_update *)&desc.params.raw; 3481 i40e_status status; 3482 3483 /* In offset the highest byte must be zeroed. */ 3484 if (offset & 0xFF000000) { 3485 status = I40E_ERR_PARAM; 3486 goto i40e_aq_update_nvm_exit; 3487 } 3488 3489 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3490 3491 /* If this is the last command in a series, set the proper flag. */ 3492 if (last_command) 3493 cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; 3494 if (hw->mac.type == I40E_MAC_X722) { 3495 if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) 3496 cmd->command_flags |= 3497 (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << 3498 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3499 else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) 3500 cmd->command_flags |= 3501 (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << 3502 I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); 3503 } 3504 cmd->module_pointer = module_pointer; 3505 cmd->offset = cpu_to_le32(offset); 3506 cmd->length = cpu_to_le16(length); 3507 3508 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 3509 if (length > I40E_AQ_LARGE_BUF) 3510 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3511 3512 status = i40e_asq_send_command(hw, &desc, data, length, cmd_details); 3513 3514 i40e_aq_update_nvm_exit: 3515 return status; 3516 } 3517 3518 /** 3519 * i40e_aq_rearrange_nvm 3520 * @hw: pointer to the hw struct 3521 * @rearrange_nvm: defines direction of rearrangement 3522 * @cmd_details: pointer to command details structure or NULL 3523 * 3524 * Rearrange NVM structure, available only for transition FW 3525 **/ 3526 i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw, 3527 u8 rearrange_nvm, 3528 struct i40e_asq_cmd_details *cmd_details) 3529 { 3530 struct i40e_aqc_nvm_update *cmd; 3531 i40e_status status; 3532 struct i40e_aq_desc desc; 3533 3534 cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw; 3535 3536 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update); 3537 3538 rearrange_nvm &= (I40E_AQ_NVM_REARRANGE_TO_FLAT | 3539 I40E_AQ_NVM_REARRANGE_TO_STRUCT); 3540 3541 if (!rearrange_nvm) { 3542 status = I40E_ERR_PARAM; 3543 goto i40e_aq_rearrange_nvm_exit; 3544 } 3545 3546 cmd->command_flags |= rearrange_nvm; 3547 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3548 3549 i40e_aq_rearrange_nvm_exit: 3550 return status; 3551 } 3552 3553 /** 3554 * i40e_aq_get_lldp_mib 3555 * @hw: pointer to the hw struct 3556 * @bridge_type: type of bridge requested 3557 * @mib_type: Local, Remote or both Local and Remote MIBs 3558 * @buff: pointer to a user supplied buffer to store the MIB block 3559 * @buff_size: size of the buffer (in bytes) 3560 * @local_len : length of the returned Local LLDP MIB 3561 * @remote_len: length of the returned Remote LLDP MIB 3562 * @cmd_details: pointer to command details structure or NULL 3563 * 3564 * Requests the complete LLDP MIB (entire packet). 3565 **/ 3566 i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, 3567 u8 mib_type, void *buff, u16 buff_size, 3568 u16 *local_len, u16 *remote_len, 3569 struct i40e_asq_cmd_details *cmd_details) 3570 { 3571 struct i40e_aq_desc desc; 3572 struct i40e_aqc_lldp_get_mib *cmd = 3573 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3574 struct i40e_aqc_lldp_get_mib *resp = 3575 (struct i40e_aqc_lldp_get_mib *)&desc.params.raw; 3576 i40e_status status; 3577 3578 if (buff_size == 0 || !buff) 3579 return I40E_ERR_PARAM; 3580 3581 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_get_mib); 3582 /* Indirect Command */ 3583 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3584 3585 cmd->type = mib_type & I40E_AQ_LLDP_MIB_TYPE_MASK; 3586 cmd->type |= ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) & 3587 I40E_AQ_LLDP_BRIDGE_TYPE_MASK); 3588 3589 desc.datalen = cpu_to_le16(buff_size); 3590 3591 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3592 if (buff_size > I40E_AQ_LARGE_BUF) 3593 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3594 3595 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3596 if (!status) { 3597 if (local_len != NULL) 3598 *local_len = le16_to_cpu(resp->local_len); 3599 if (remote_len != NULL) 3600 *remote_len = le16_to_cpu(resp->remote_len); 3601 } 3602 3603 return status; 3604 } 3605 3606 /** 3607 * i40e_aq_cfg_lldp_mib_change_event 3608 * @hw: pointer to the hw struct 3609 * @enable_update: Enable or Disable event posting 3610 * @cmd_details: pointer to command details structure or NULL 3611 * 3612 * Enable or Disable posting of an event on ARQ when LLDP MIB 3613 * associated with the interface changes 3614 **/ 3615 i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw, 3616 bool enable_update, 3617 struct i40e_asq_cmd_details *cmd_details) 3618 { 3619 struct i40e_aq_desc desc; 3620 struct i40e_aqc_lldp_update_mib *cmd = 3621 (struct i40e_aqc_lldp_update_mib *)&desc.params.raw; 3622 i40e_status status; 3623 3624 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib); 3625 3626 if (!enable_update) 3627 cmd->command |= I40E_AQ_LLDP_MIB_UPDATE_DISABLE; 3628 3629 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3630 3631 return status; 3632 } 3633 3634 /** 3635 * i40e_aq_restore_lldp 3636 * @hw: pointer to the hw struct 3637 * @setting: pointer to factory setting variable or NULL 3638 * @restore: True if factory settings should be restored 3639 * @cmd_details: pointer to command details structure or NULL 3640 * 3641 * Restore LLDP Agent factory settings if @restore set to True. In other case 3642 * only returns factory setting in AQ response. 3643 **/ 3644 enum i40e_status_code 3645 i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore, 3646 struct i40e_asq_cmd_details *cmd_details) 3647 { 3648 struct i40e_aq_desc desc; 3649 struct i40e_aqc_lldp_restore *cmd = 3650 (struct i40e_aqc_lldp_restore *)&desc.params.raw; 3651 i40e_status status; 3652 3653 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) { 3654 i40e_debug(hw, I40E_DEBUG_ALL, 3655 "Restore LLDP not supported by current FW version.\n"); 3656 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3657 } 3658 3659 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore); 3660 3661 if (restore) 3662 cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE; 3663 3664 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3665 3666 if (setting) 3667 *setting = cmd->command & 1; 3668 3669 return status; 3670 } 3671 3672 /** 3673 * i40e_aq_stop_lldp 3674 * @hw: pointer to the hw struct 3675 * @shutdown_agent: True if LLDP Agent needs to be Shutdown 3676 * @persist: True if stop of LLDP should be persistent across power cycles 3677 * @cmd_details: pointer to command details structure or NULL 3678 * 3679 * Stop or Shutdown the embedded LLDP Agent 3680 **/ 3681 i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, 3682 bool persist, 3683 struct i40e_asq_cmd_details *cmd_details) 3684 { 3685 struct i40e_aq_desc desc; 3686 struct i40e_aqc_lldp_stop *cmd = 3687 (struct i40e_aqc_lldp_stop *)&desc.params.raw; 3688 i40e_status status; 3689 3690 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop); 3691 3692 if (shutdown_agent) 3693 cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN; 3694 3695 if (persist) { 3696 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3697 cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST; 3698 else 3699 i40e_debug(hw, I40E_DEBUG_ALL, 3700 "Persistent Stop LLDP not supported by current FW version.\n"); 3701 } 3702 3703 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3704 3705 return status; 3706 } 3707 3708 /** 3709 * i40e_aq_start_lldp 3710 * @hw: pointer to the hw struct 3711 * @buff: buffer for result 3712 * @persist: True if start of LLDP should be persistent across power cycles 3713 * @buff_size: buffer size 3714 * @cmd_details: pointer to command details structure or NULL 3715 * 3716 * Start the embedded LLDP Agent on all ports. 3717 **/ 3718 i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist, 3719 struct i40e_asq_cmd_details *cmd_details) 3720 { 3721 struct i40e_aq_desc desc; 3722 struct i40e_aqc_lldp_start *cmd = 3723 (struct i40e_aqc_lldp_start *)&desc.params.raw; 3724 i40e_status status; 3725 3726 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); 3727 3728 cmd->command = I40E_AQ_LLDP_AGENT_START; 3729 3730 if (persist) { 3731 if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) 3732 cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST; 3733 else 3734 i40e_debug(hw, I40E_DEBUG_ALL, 3735 "Persistent Start LLDP not supported by current FW version.\n"); 3736 } 3737 3738 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3739 3740 return status; 3741 } 3742 3743 /** 3744 * i40e_aq_set_dcb_parameters 3745 * @hw: pointer to the hw struct 3746 * @cmd_details: pointer to command details structure or NULL 3747 * @dcb_enable: True if DCB configuration needs to be applied 3748 * 3749 **/ 3750 enum i40e_status_code 3751 i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, 3752 struct i40e_asq_cmd_details *cmd_details) 3753 { 3754 struct i40e_aq_desc desc; 3755 struct i40e_aqc_set_dcb_parameters *cmd = 3756 (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; 3757 i40e_status status; 3758 3759 if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) 3760 return I40E_ERR_DEVICE_NOT_SUPPORTED; 3761 3762 i40e_fill_default_direct_cmd_desc(&desc, 3763 i40e_aqc_opc_set_dcb_parameters); 3764 3765 if (dcb_enable) { 3766 cmd->valid_flags = I40E_DCB_VALID; 3767 cmd->command = I40E_AQ_DCB_SET_AGENT; 3768 } 3769 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3770 3771 return status; 3772 } 3773 3774 /** 3775 * i40e_aq_get_cee_dcb_config 3776 * @hw: pointer to the hw struct 3777 * @buff: response buffer that stores CEE operational configuration 3778 * @buff_size: size of the buffer passed 3779 * @cmd_details: pointer to command details structure or NULL 3780 * 3781 * Get CEE DCBX mode operational configuration from firmware 3782 **/ 3783 i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, 3784 void *buff, u16 buff_size, 3785 struct i40e_asq_cmd_details *cmd_details) 3786 { 3787 struct i40e_aq_desc desc; 3788 i40e_status status; 3789 3790 if (buff_size == 0 || !buff) 3791 return I40E_ERR_PARAM; 3792 3793 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_cee_dcb_cfg); 3794 3795 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3796 status = i40e_asq_send_command(hw, &desc, (void *)buff, buff_size, 3797 cmd_details); 3798 3799 return status; 3800 } 3801 3802 /** 3803 * i40e_aq_add_udp_tunnel 3804 * @hw: pointer to the hw struct 3805 * @udp_port: the UDP port to add in Host byte order 3806 * @protocol_index: protocol index type 3807 * @filter_index: pointer to filter index 3808 * @cmd_details: pointer to command details structure or NULL 3809 * 3810 * Note: Firmware expects the udp_port value to be in Little Endian format, 3811 * and this function will call cpu_to_le16 to convert from Host byte order to 3812 * Little Endian order. 3813 **/ 3814 i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, 3815 u16 udp_port, u8 protocol_index, 3816 u8 *filter_index, 3817 struct i40e_asq_cmd_details *cmd_details) 3818 { 3819 struct i40e_aq_desc desc; 3820 struct i40e_aqc_add_udp_tunnel *cmd = 3821 (struct i40e_aqc_add_udp_tunnel *)&desc.params.raw; 3822 struct i40e_aqc_del_udp_tunnel_completion *resp = 3823 (struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw; 3824 i40e_status status; 3825 3826 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel); 3827 3828 cmd->udp_port = cpu_to_le16(udp_port); 3829 cmd->protocol_type = protocol_index; 3830 3831 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3832 3833 if (!status && filter_index) 3834 *filter_index = resp->index; 3835 3836 return status; 3837 } 3838 3839 /** 3840 * i40e_aq_del_udp_tunnel 3841 * @hw: pointer to the hw struct 3842 * @index: filter index 3843 * @cmd_details: pointer to command details structure or NULL 3844 **/ 3845 i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index, 3846 struct i40e_asq_cmd_details *cmd_details) 3847 { 3848 struct i40e_aq_desc desc; 3849 struct i40e_aqc_remove_udp_tunnel *cmd = 3850 (struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw; 3851 i40e_status status; 3852 3853 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel); 3854 3855 cmd->index = index; 3856 3857 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3858 3859 return status; 3860 } 3861 3862 /** 3863 * i40e_aq_delete_element - Delete switch element 3864 * @hw: pointer to the hw struct 3865 * @seid: the SEID to delete from the switch 3866 * @cmd_details: pointer to command details structure or NULL 3867 * 3868 * This deletes a switch element from the switch. 3869 **/ 3870 i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, 3871 struct i40e_asq_cmd_details *cmd_details) 3872 { 3873 struct i40e_aq_desc desc; 3874 struct i40e_aqc_switch_seid *cmd = 3875 (struct i40e_aqc_switch_seid *)&desc.params.raw; 3876 i40e_status status; 3877 3878 if (seid == 0) 3879 return I40E_ERR_PARAM; 3880 3881 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element); 3882 3883 cmd->seid = cpu_to_le16(seid); 3884 3885 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3886 3887 return status; 3888 } 3889 3890 /** 3891 * i40e_aq_dcb_updated - DCB Updated Command 3892 * @hw: pointer to the hw struct 3893 * @cmd_details: pointer to command details structure or NULL 3894 * 3895 * EMP will return when the shared RPB settings have been 3896 * recomputed and modified. The retval field in the descriptor 3897 * will be set to 0 when RPB is modified. 3898 **/ 3899 i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, 3900 struct i40e_asq_cmd_details *cmd_details) 3901 { 3902 struct i40e_aq_desc desc; 3903 i40e_status status; 3904 3905 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated); 3906 3907 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3908 3909 return status; 3910 } 3911 3912 /** 3913 * i40e_aq_tx_sched_cmd - generic Tx scheduler AQ command handler 3914 * @hw: pointer to the hw struct 3915 * @seid: seid for the physical port/switching component/vsi 3916 * @buff: Indirect buffer to hold data parameters and response 3917 * @buff_size: Indirect buffer size 3918 * @opcode: Tx scheduler AQ command opcode 3919 * @cmd_details: pointer to command details structure or NULL 3920 * 3921 * Generic command handler for Tx scheduler AQ commands 3922 **/ 3923 static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid, 3924 void *buff, u16 buff_size, 3925 enum i40e_admin_queue_opc opcode, 3926 struct i40e_asq_cmd_details *cmd_details) 3927 { 3928 struct i40e_aq_desc desc; 3929 struct i40e_aqc_tx_sched_ind *cmd = 3930 (struct i40e_aqc_tx_sched_ind *)&desc.params.raw; 3931 i40e_status status; 3932 bool cmd_param_flag = false; 3933 3934 switch (opcode) { 3935 case i40e_aqc_opc_configure_vsi_ets_sla_bw_limit: 3936 case i40e_aqc_opc_configure_vsi_tc_bw: 3937 case i40e_aqc_opc_enable_switching_comp_ets: 3938 case i40e_aqc_opc_modify_switching_comp_ets: 3939 case i40e_aqc_opc_disable_switching_comp_ets: 3940 case i40e_aqc_opc_configure_switching_comp_ets_bw_limit: 3941 case i40e_aqc_opc_configure_switching_comp_bw_config: 3942 cmd_param_flag = true; 3943 break; 3944 case i40e_aqc_opc_query_vsi_bw_config: 3945 case i40e_aqc_opc_query_vsi_ets_sla_config: 3946 case i40e_aqc_opc_query_switching_comp_ets_config: 3947 case i40e_aqc_opc_query_port_ets_config: 3948 case i40e_aqc_opc_query_switching_comp_bw_config: 3949 cmd_param_flag = false; 3950 break; 3951 default: 3952 return I40E_ERR_PARAM; 3953 } 3954 3955 i40e_fill_default_direct_cmd_desc(&desc, opcode); 3956 3957 /* Indirect command */ 3958 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 3959 if (cmd_param_flag) 3960 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 3961 if (buff_size > I40E_AQ_LARGE_BUF) 3962 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 3963 3964 desc.datalen = cpu_to_le16(buff_size); 3965 3966 cmd->vsi_seid = cpu_to_le16(seid); 3967 3968 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 3969 3970 return status; 3971 } 3972 3973 /** 3974 * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit 3975 * @hw: pointer to the hw struct 3976 * @seid: VSI seid 3977 * @credit: BW limit credits (0 = disabled) 3978 * @max_credit: Max BW limit credits 3979 * @cmd_details: pointer to command details structure or NULL 3980 **/ 3981 i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, 3982 u16 seid, u16 credit, u8 max_credit, 3983 struct i40e_asq_cmd_details *cmd_details) 3984 { 3985 struct i40e_aq_desc desc; 3986 struct i40e_aqc_configure_vsi_bw_limit *cmd = 3987 (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw; 3988 i40e_status status; 3989 3990 i40e_fill_default_direct_cmd_desc(&desc, 3991 i40e_aqc_opc_configure_vsi_bw_limit); 3992 3993 cmd->vsi_seid = cpu_to_le16(seid); 3994 cmd->credit = cpu_to_le16(credit); 3995 cmd->max_credit = max_credit; 3996 3997 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 3998 3999 return status; 4000 } 4001 4002 /** 4003 * i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC 4004 * @hw: pointer to the hw struct 4005 * @seid: VSI seid 4006 * @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits 4007 * @cmd_details: pointer to command details structure or NULL 4008 **/ 4009 i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, 4010 u16 seid, 4011 struct i40e_aqc_configure_vsi_tc_bw_data *bw_data, 4012 struct i40e_asq_cmd_details *cmd_details) 4013 { 4014 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4015 i40e_aqc_opc_configure_vsi_tc_bw, 4016 cmd_details); 4017 } 4018 4019 /** 4020 * i40e_aq_config_switch_comp_ets - Enable/Disable/Modify ETS on the port 4021 * @hw: pointer to the hw struct 4022 * @seid: seid of the switching component connected to Physical Port 4023 * @ets_data: Buffer holding ETS parameters 4024 * @opcode: Tx scheduler AQ command opcode 4025 * @cmd_details: pointer to command details structure or NULL 4026 **/ 4027 i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, 4028 u16 seid, 4029 struct i40e_aqc_configure_switching_comp_ets_data *ets_data, 4030 enum i40e_admin_queue_opc opcode, 4031 struct i40e_asq_cmd_details *cmd_details) 4032 { 4033 return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data, 4034 sizeof(*ets_data), opcode, cmd_details); 4035 } 4036 4037 /** 4038 * i40e_aq_config_switch_comp_bw_config - Config Switch comp BW Alloc per TC 4039 * @hw: pointer to the hw struct 4040 * @seid: seid of the switching component 4041 * @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits 4042 * @cmd_details: pointer to command details structure or NULL 4043 **/ 4044 i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw, 4045 u16 seid, 4046 struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data, 4047 struct i40e_asq_cmd_details *cmd_details) 4048 { 4049 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4050 i40e_aqc_opc_configure_switching_comp_bw_config, 4051 cmd_details); 4052 } 4053 4054 /** 4055 * i40e_aq_query_vsi_bw_config - Query VSI BW configuration 4056 * @hw: pointer to the hw struct 4057 * @seid: seid of the VSI 4058 * @bw_data: Buffer to hold VSI BW configuration 4059 * @cmd_details: pointer to command details structure or NULL 4060 **/ 4061 i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw, 4062 u16 seid, 4063 struct i40e_aqc_query_vsi_bw_config_resp *bw_data, 4064 struct i40e_asq_cmd_details *cmd_details) 4065 { 4066 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4067 i40e_aqc_opc_query_vsi_bw_config, 4068 cmd_details); 4069 } 4070 4071 /** 4072 * i40e_aq_query_vsi_ets_sla_config - Query VSI BW configuration per TC 4073 * @hw: pointer to the hw struct 4074 * @seid: seid of the VSI 4075 * @bw_data: Buffer to hold VSI BW configuration per TC 4076 * @cmd_details: pointer to command details structure or NULL 4077 **/ 4078 i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw, 4079 u16 seid, 4080 struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data, 4081 struct i40e_asq_cmd_details *cmd_details) 4082 { 4083 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4084 i40e_aqc_opc_query_vsi_ets_sla_config, 4085 cmd_details); 4086 } 4087 4088 /** 4089 * i40e_aq_query_switch_comp_ets_config - Query Switch comp BW config per TC 4090 * @hw: pointer to the hw struct 4091 * @seid: seid of the switching component 4092 * @bw_data: Buffer to hold switching component's per TC BW config 4093 * @cmd_details: pointer to command details structure or NULL 4094 **/ 4095 i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw, 4096 u16 seid, 4097 struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data, 4098 struct i40e_asq_cmd_details *cmd_details) 4099 { 4100 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4101 i40e_aqc_opc_query_switching_comp_ets_config, 4102 cmd_details); 4103 } 4104 4105 /** 4106 * i40e_aq_query_port_ets_config - Query Physical Port ETS configuration 4107 * @hw: pointer to the hw struct 4108 * @seid: seid of the VSI or switching component connected to Physical Port 4109 * @bw_data: Buffer to hold current ETS configuration for the Physical Port 4110 * @cmd_details: pointer to command details structure or NULL 4111 **/ 4112 i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw, 4113 u16 seid, 4114 struct i40e_aqc_query_port_ets_config_resp *bw_data, 4115 struct i40e_asq_cmd_details *cmd_details) 4116 { 4117 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4118 i40e_aqc_opc_query_port_ets_config, 4119 cmd_details); 4120 } 4121 4122 /** 4123 * i40e_aq_query_switch_comp_bw_config - Query Switch comp BW configuration 4124 * @hw: pointer to the hw struct 4125 * @seid: seid of the switching component 4126 * @bw_data: Buffer to hold switching component's BW configuration 4127 * @cmd_details: pointer to command details structure or NULL 4128 **/ 4129 i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, 4130 u16 seid, 4131 struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data, 4132 struct i40e_asq_cmd_details *cmd_details) 4133 { 4134 return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data), 4135 i40e_aqc_opc_query_switching_comp_bw_config, 4136 cmd_details); 4137 } 4138 4139 /** 4140 * i40e_validate_filter_settings 4141 * @hw: pointer to the hardware structure 4142 * @settings: Filter control settings 4143 * 4144 * Check and validate the filter control settings passed. 4145 * The function checks for the valid filter/context sizes being 4146 * passed for FCoE and PE. 4147 * 4148 * Returns 0 if the values passed are valid and within 4149 * range else returns an error. 4150 **/ 4151 static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw, 4152 struct i40e_filter_control_settings *settings) 4153 { 4154 u32 fcoe_cntx_size, fcoe_filt_size; 4155 u32 pe_cntx_size, pe_filt_size; 4156 u32 fcoe_fmax; 4157 u32 val; 4158 4159 /* Validate FCoE settings passed */ 4160 switch (settings->fcoe_filt_num) { 4161 case I40E_HASH_FILTER_SIZE_1K: 4162 case I40E_HASH_FILTER_SIZE_2K: 4163 case I40E_HASH_FILTER_SIZE_4K: 4164 case I40E_HASH_FILTER_SIZE_8K: 4165 case I40E_HASH_FILTER_SIZE_16K: 4166 case I40E_HASH_FILTER_SIZE_32K: 4167 fcoe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4168 fcoe_filt_size <<= (u32)settings->fcoe_filt_num; 4169 break; 4170 default: 4171 return I40E_ERR_PARAM; 4172 } 4173 4174 switch (settings->fcoe_cntx_num) { 4175 case I40E_DMA_CNTX_SIZE_512: 4176 case I40E_DMA_CNTX_SIZE_1K: 4177 case I40E_DMA_CNTX_SIZE_2K: 4178 case I40E_DMA_CNTX_SIZE_4K: 4179 fcoe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4180 fcoe_cntx_size <<= (u32)settings->fcoe_cntx_num; 4181 break; 4182 default: 4183 return I40E_ERR_PARAM; 4184 } 4185 4186 /* Validate PE settings passed */ 4187 switch (settings->pe_filt_num) { 4188 case I40E_HASH_FILTER_SIZE_1K: 4189 case I40E_HASH_FILTER_SIZE_2K: 4190 case I40E_HASH_FILTER_SIZE_4K: 4191 case I40E_HASH_FILTER_SIZE_8K: 4192 case I40E_HASH_FILTER_SIZE_16K: 4193 case I40E_HASH_FILTER_SIZE_32K: 4194 case I40E_HASH_FILTER_SIZE_64K: 4195 case I40E_HASH_FILTER_SIZE_128K: 4196 case I40E_HASH_FILTER_SIZE_256K: 4197 case I40E_HASH_FILTER_SIZE_512K: 4198 case I40E_HASH_FILTER_SIZE_1M: 4199 pe_filt_size = I40E_HASH_FILTER_BASE_SIZE; 4200 pe_filt_size <<= (u32)settings->pe_filt_num; 4201 break; 4202 default: 4203 return I40E_ERR_PARAM; 4204 } 4205 4206 switch (settings->pe_cntx_num) { 4207 case I40E_DMA_CNTX_SIZE_512: 4208 case I40E_DMA_CNTX_SIZE_1K: 4209 case I40E_DMA_CNTX_SIZE_2K: 4210 case I40E_DMA_CNTX_SIZE_4K: 4211 case I40E_DMA_CNTX_SIZE_8K: 4212 case I40E_DMA_CNTX_SIZE_16K: 4213 case I40E_DMA_CNTX_SIZE_32K: 4214 case I40E_DMA_CNTX_SIZE_64K: 4215 case I40E_DMA_CNTX_SIZE_128K: 4216 case I40E_DMA_CNTX_SIZE_256K: 4217 pe_cntx_size = I40E_DMA_CNTX_BASE_SIZE; 4218 pe_cntx_size <<= (u32)settings->pe_cntx_num; 4219 break; 4220 default: 4221 return I40E_ERR_PARAM; 4222 } 4223 4224 /* FCHSIZE + FCDSIZE should not be greater than PMFCOEFMAX */ 4225 val = rd32(hw, I40E_GLHMC_FCOEFMAX); 4226 fcoe_fmax = (val & I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK) 4227 >> I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT; 4228 if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax) 4229 return I40E_ERR_INVALID_SIZE; 4230 4231 return 0; 4232 } 4233 4234 /** 4235 * i40e_set_filter_control 4236 * @hw: pointer to the hardware structure 4237 * @settings: Filter control settings 4238 * 4239 * Set the Queue Filters for PE/FCoE and enable filters required 4240 * for a single PF. It is expected that these settings are programmed 4241 * at the driver initialization time. 4242 **/ 4243 i40e_status i40e_set_filter_control(struct i40e_hw *hw, 4244 struct i40e_filter_control_settings *settings) 4245 { 4246 i40e_status ret = 0; 4247 u32 hash_lut_size = 0; 4248 u32 val; 4249 4250 if (!settings) 4251 return I40E_ERR_PARAM; 4252 4253 /* Validate the input settings */ 4254 ret = i40e_validate_filter_settings(hw, settings); 4255 if (ret) 4256 return ret; 4257 4258 /* Read the PF Queue Filter control register */ 4259 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); 4260 4261 /* Program required PE hash buckets for the PF */ 4262 val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; 4263 val |= ((u32)settings->pe_filt_num << I40E_PFQF_CTL_0_PEHSIZE_SHIFT) & 4264 I40E_PFQF_CTL_0_PEHSIZE_MASK; 4265 /* Program required PE contexts for the PF */ 4266 val &= ~I40E_PFQF_CTL_0_PEDSIZE_MASK; 4267 val |= ((u32)settings->pe_cntx_num << I40E_PFQF_CTL_0_PEDSIZE_SHIFT) & 4268 I40E_PFQF_CTL_0_PEDSIZE_MASK; 4269 4270 /* Program required FCoE hash buckets for the PF */ 4271 val &= ~I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4272 val |= ((u32)settings->fcoe_filt_num << 4273 I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT) & 4274 I40E_PFQF_CTL_0_PFFCHSIZE_MASK; 4275 /* Program required FCoE DDP contexts for the PF */ 4276 val &= ~I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4277 val |= ((u32)settings->fcoe_cntx_num << 4278 I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT) & 4279 I40E_PFQF_CTL_0_PFFCDSIZE_MASK; 4280 4281 /* Program Hash LUT size for the PF */ 4282 val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4283 if (settings->hash_lut_size == I40E_HASH_LUT_SIZE_512) 4284 hash_lut_size = 1; 4285 val |= (hash_lut_size << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT) & 4286 I40E_PFQF_CTL_0_HASHLUTSIZE_MASK; 4287 4288 /* Enable FDIR, Ethertype and MACVLAN filters for PF and VFs */ 4289 if (settings->enable_fdir) 4290 val |= I40E_PFQF_CTL_0_FD_ENA_MASK; 4291 if (settings->enable_ethtype) 4292 val |= I40E_PFQF_CTL_0_ETYPE_ENA_MASK; 4293 if (settings->enable_macvlan) 4294 val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; 4295 4296 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); 4297 4298 return 0; 4299 } 4300 4301 /** 4302 * i40e_aq_add_rem_control_packet_filter - Add or Remove Control Packet Filter 4303 * @hw: pointer to the hw struct 4304 * @mac_addr: MAC address to use in the filter 4305 * @ethtype: Ethertype to use in the filter 4306 * @flags: Flags that needs to be applied to the filter 4307 * @vsi_seid: seid of the control VSI 4308 * @queue: VSI queue number to send the packet to 4309 * @is_add: Add control packet filter if True else remove 4310 * @stats: Structure to hold information on control filter counts 4311 * @cmd_details: pointer to command details structure or NULL 4312 * 4313 * This command will Add or Remove control packet filter for a control VSI. 4314 * In return it will update the total number of perfect filter count in 4315 * the stats member. 4316 **/ 4317 i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, 4318 u8 *mac_addr, u16 ethtype, u16 flags, 4319 u16 vsi_seid, u16 queue, bool is_add, 4320 struct i40e_control_filter_stats *stats, 4321 struct i40e_asq_cmd_details *cmd_details) 4322 { 4323 struct i40e_aq_desc desc; 4324 struct i40e_aqc_add_remove_control_packet_filter *cmd = 4325 (struct i40e_aqc_add_remove_control_packet_filter *) 4326 &desc.params.raw; 4327 struct i40e_aqc_add_remove_control_packet_filter_completion *resp = 4328 (struct i40e_aqc_add_remove_control_packet_filter_completion *) 4329 &desc.params.raw; 4330 i40e_status status; 4331 4332 if (vsi_seid == 0) 4333 return I40E_ERR_PARAM; 4334 4335 if (is_add) { 4336 i40e_fill_default_direct_cmd_desc(&desc, 4337 i40e_aqc_opc_add_control_packet_filter); 4338 cmd->queue = cpu_to_le16(queue); 4339 } else { 4340 i40e_fill_default_direct_cmd_desc(&desc, 4341 i40e_aqc_opc_remove_control_packet_filter); 4342 } 4343 4344 if (mac_addr) 4345 ether_addr_copy(cmd->mac, mac_addr); 4346 4347 cmd->etype = cpu_to_le16(ethtype); 4348 cmd->flags = cpu_to_le16(flags); 4349 cmd->seid = cpu_to_le16(vsi_seid); 4350 4351 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4352 4353 if (!status && stats) { 4354 stats->mac_etype_used = le16_to_cpu(resp->mac_etype_used); 4355 stats->etype_used = le16_to_cpu(resp->etype_used); 4356 stats->mac_etype_free = le16_to_cpu(resp->mac_etype_free); 4357 stats->etype_free = le16_to_cpu(resp->etype_free); 4358 } 4359 4360 return status; 4361 } 4362 4363 /** 4364 * i40e_add_filter_to_drop_tx_flow_control_frames- filter to drop flow control 4365 * @hw: pointer to the hw struct 4366 * @seid: VSI seid to add ethertype filter from 4367 **/ 4368 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, 4369 u16 seid) 4370 { 4371 #define I40E_FLOW_CONTROL_ETHTYPE 0x8808 4372 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | 4373 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | 4374 I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; 4375 u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE; 4376 i40e_status status; 4377 4378 status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag, 4379 seid, 0, true, NULL, 4380 NULL); 4381 if (status) 4382 hw_dbg(hw, "Ethtype Filter Add failed: Error pruning Tx flow control frames\n"); 4383 } 4384 4385 /** 4386 * i40e_aq_alternate_read 4387 * @hw: pointer to the hardware structure 4388 * @reg_addr0: address of first dword to be read 4389 * @reg_val0: pointer for data read from 'reg_addr0' 4390 * @reg_addr1: address of second dword to be read 4391 * @reg_val1: pointer for data read from 'reg_addr1' 4392 * 4393 * Read one or two dwords from alternate structure. Fields are indicated 4394 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer 4395 * is not passed then only register at 'reg_addr0' is read. 4396 * 4397 **/ 4398 static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw, 4399 u32 reg_addr0, u32 *reg_val0, 4400 u32 reg_addr1, u32 *reg_val1) 4401 { 4402 struct i40e_aq_desc desc; 4403 struct i40e_aqc_alternate_write *cmd_resp = 4404 (struct i40e_aqc_alternate_write *)&desc.params.raw; 4405 i40e_status status; 4406 4407 if (!reg_val0) 4408 return I40E_ERR_PARAM; 4409 4410 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_alternate_read); 4411 cmd_resp->address0 = cpu_to_le32(reg_addr0); 4412 cmd_resp->address1 = cpu_to_le32(reg_addr1); 4413 4414 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL); 4415 4416 if (!status) { 4417 *reg_val0 = le32_to_cpu(cmd_resp->data0); 4418 4419 if (reg_val1) 4420 *reg_val1 = le32_to_cpu(cmd_resp->data1); 4421 } 4422 4423 return status; 4424 } 4425 4426 /** 4427 * i40e_aq_resume_port_tx 4428 * @hw: pointer to the hardware structure 4429 * @cmd_details: pointer to command details structure or NULL 4430 * 4431 * Resume port's Tx traffic 4432 **/ 4433 i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, 4434 struct i40e_asq_cmd_details *cmd_details) 4435 { 4436 struct i40e_aq_desc desc; 4437 i40e_status status; 4438 4439 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx); 4440 4441 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 4442 4443 return status; 4444 } 4445 4446 /** 4447 * i40e_set_pci_config_data - store PCI bus info 4448 * @hw: pointer to hardware structure 4449 * @link_status: the link status word from PCI config space 4450 * 4451 * Stores the PCI bus info (speed, width, type) within the i40e_hw structure 4452 **/ 4453 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) 4454 { 4455 hw->bus.type = i40e_bus_type_pci_express; 4456 4457 switch (link_status & PCI_EXP_LNKSTA_NLW) { 4458 case PCI_EXP_LNKSTA_NLW_X1: 4459 hw->bus.width = i40e_bus_width_pcie_x1; 4460 break; 4461 case PCI_EXP_LNKSTA_NLW_X2: 4462 hw->bus.width = i40e_bus_width_pcie_x2; 4463 break; 4464 case PCI_EXP_LNKSTA_NLW_X4: 4465 hw->bus.width = i40e_bus_width_pcie_x4; 4466 break; 4467 case PCI_EXP_LNKSTA_NLW_X8: 4468 hw->bus.width = i40e_bus_width_pcie_x8; 4469 break; 4470 default: 4471 hw->bus.width = i40e_bus_width_unknown; 4472 break; 4473 } 4474 4475 switch (link_status & PCI_EXP_LNKSTA_CLS) { 4476 case PCI_EXP_LNKSTA_CLS_2_5GB: 4477 hw->bus.speed = i40e_bus_speed_2500; 4478 break; 4479 case PCI_EXP_LNKSTA_CLS_5_0GB: 4480 hw->bus.speed = i40e_bus_speed_5000; 4481 break; 4482 case PCI_EXP_LNKSTA_CLS_8_0GB: 4483 hw->bus.speed = i40e_bus_speed_8000; 4484 break; 4485 default: 4486 hw->bus.speed = i40e_bus_speed_unknown; 4487 break; 4488 } 4489 } 4490 4491 /** 4492 * i40e_aq_debug_dump 4493 * @hw: pointer to the hardware structure 4494 * @cluster_id: specific cluster to dump 4495 * @table_id: table id within cluster 4496 * @start_index: index of line in the block to read 4497 * @buff_size: dump buffer size 4498 * @buff: dump buffer 4499 * @ret_buff_size: actual buffer size returned 4500 * @ret_next_table: next block to read 4501 * @ret_next_index: next index to read 4502 * @cmd_details: pointer to command details structure or NULL 4503 * 4504 * Dump internal FW/HW data for debug purposes. 4505 * 4506 **/ 4507 i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, 4508 u8 table_id, u32 start_index, u16 buff_size, 4509 void *buff, u16 *ret_buff_size, 4510 u8 *ret_next_table, u32 *ret_next_index, 4511 struct i40e_asq_cmd_details *cmd_details) 4512 { 4513 struct i40e_aq_desc desc; 4514 struct i40e_aqc_debug_dump_internals *cmd = 4515 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4516 struct i40e_aqc_debug_dump_internals *resp = 4517 (struct i40e_aqc_debug_dump_internals *)&desc.params.raw; 4518 i40e_status status; 4519 4520 if (buff_size == 0 || !buff) 4521 return I40E_ERR_PARAM; 4522 4523 i40e_fill_default_direct_cmd_desc(&desc, 4524 i40e_aqc_opc_debug_dump_internals); 4525 /* Indirect Command */ 4526 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4527 if (buff_size > I40E_AQ_LARGE_BUF) 4528 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4529 4530 cmd->cluster_id = cluster_id; 4531 cmd->table_id = table_id; 4532 cmd->idx = cpu_to_le32(start_index); 4533 4534 desc.datalen = cpu_to_le16(buff_size); 4535 4536 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 4537 if (!status) { 4538 if (ret_buff_size) 4539 *ret_buff_size = le16_to_cpu(desc.datalen); 4540 if (ret_next_table) 4541 *ret_next_table = resp->table_id; 4542 if (ret_next_index) 4543 *ret_next_index = le32_to_cpu(resp->idx); 4544 } 4545 4546 return status; 4547 } 4548 4549 /** 4550 * i40e_read_bw_from_alt_ram 4551 * @hw: pointer to the hardware structure 4552 * @max_bw: pointer for max_bw read 4553 * @min_bw: pointer for min_bw read 4554 * @min_valid: pointer for bool that is true if min_bw is a valid value 4555 * @max_valid: pointer for bool that is true if max_bw is a valid value 4556 * 4557 * Read bw from the alternate ram for the given pf 4558 **/ 4559 i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw, 4560 u32 *max_bw, u32 *min_bw, 4561 bool *min_valid, bool *max_valid) 4562 { 4563 i40e_status status; 4564 u32 max_bw_addr, min_bw_addr; 4565 4566 /* Calculate the address of the min/max bw registers */ 4567 max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4568 I40E_ALT_STRUCT_MAX_BW_OFFSET + 4569 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4570 min_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET + 4571 I40E_ALT_STRUCT_MIN_BW_OFFSET + 4572 (I40E_ALT_STRUCT_DWORDS_PER_PF * hw->pf_id); 4573 4574 /* Read the bandwidths from alt ram */ 4575 status = i40e_aq_alternate_read(hw, max_bw_addr, max_bw, 4576 min_bw_addr, min_bw); 4577 4578 if (*min_bw & I40E_ALT_BW_VALID_MASK) 4579 *min_valid = true; 4580 else 4581 *min_valid = false; 4582 4583 if (*max_bw & I40E_ALT_BW_VALID_MASK) 4584 *max_valid = true; 4585 else 4586 *max_valid = false; 4587 4588 return status; 4589 } 4590 4591 /** 4592 * i40e_aq_configure_partition_bw 4593 * @hw: pointer to the hardware structure 4594 * @bw_data: Buffer holding valid pfs and bw limits 4595 * @cmd_details: pointer to command details 4596 * 4597 * Configure partitions guaranteed/max bw 4598 **/ 4599 i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, 4600 struct i40e_aqc_configure_partition_bw_data *bw_data, 4601 struct i40e_asq_cmd_details *cmd_details) 4602 { 4603 i40e_status status; 4604 struct i40e_aq_desc desc; 4605 u16 bwd_size = sizeof(*bw_data); 4606 4607 i40e_fill_default_direct_cmd_desc(&desc, 4608 i40e_aqc_opc_configure_partition_bw); 4609 4610 /* Indirect command */ 4611 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 4612 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_RD); 4613 4614 if (bwd_size > I40E_AQ_LARGE_BUF) 4615 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 4616 4617 desc.datalen = cpu_to_le16(bwd_size); 4618 4619 status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, 4620 cmd_details); 4621 4622 return status; 4623 } 4624 4625 /** 4626 * i40e_read_phy_register_clause22 4627 * @hw: pointer to the HW structure 4628 * @reg: register address in the page 4629 * @phy_addr: PHY address on MDIO interface 4630 * @value: PHY register value 4631 * 4632 * Reads specified PHY register value 4633 **/ 4634 i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, 4635 u16 reg, u8 phy_addr, u16 *value) 4636 { 4637 i40e_status status = I40E_ERR_TIMEOUT; 4638 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4639 u32 command = 0; 4640 u16 retry = 1000; 4641 4642 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4643 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4644 (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) | 4645 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4646 (I40E_GLGEN_MSCA_MDICMD_MASK); 4647 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4648 do { 4649 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4650 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4651 status = 0; 4652 break; 4653 } 4654 udelay(10); 4655 retry--; 4656 } while (retry); 4657 4658 if (status) { 4659 i40e_debug(hw, I40E_DEBUG_PHY, 4660 "PHY: Can't write command to external PHY.\n"); 4661 } else { 4662 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4663 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4664 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4665 } 4666 4667 return status; 4668 } 4669 4670 /** 4671 * i40e_write_phy_register_clause22 4672 * @hw: pointer to the HW structure 4673 * @reg: register address in the page 4674 * @phy_addr: PHY address on MDIO interface 4675 * @value: PHY register value 4676 * 4677 * Writes specified PHY register value 4678 **/ 4679 i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, 4680 u16 reg, u8 phy_addr, u16 value) 4681 { 4682 i40e_status status = I40E_ERR_TIMEOUT; 4683 u8 port_num = (u8)hw->func_caps.mdio_port_num; 4684 u32 command = 0; 4685 u16 retry = 1000; 4686 4687 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4688 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4689 4690 command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4691 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4692 (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) | 4693 (I40E_MDIO_CLAUSE22_STCODE_MASK) | 4694 (I40E_GLGEN_MSCA_MDICMD_MASK); 4695 4696 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4697 do { 4698 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4699 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4700 status = 0; 4701 break; 4702 } 4703 udelay(10); 4704 retry--; 4705 } while (retry); 4706 4707 return status; 4708 } 4709 4710 /** 4711 * i40e_read_phy_register_clause45 4712 * @hw: pointer to the HW structure 4713 * @page: registers page number 4714 * @reg: register address in the page 4715 * @phy_addr: PHY address on MDIO interface 4716 * @value: PHY register value 4717 * 4718 * Reads specified PHY register value 4719 **/ 4720 i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw, 4721 u8 page, u16 reg, u8 phy_addr, u16 *value) 4722 { 4723 i40e_status status = I40E_ERR_TIMEOUT; 4724 u32 command = 0; 4725 u16 retry = 1000; 4726 u8 port_num = hw->func_caps.mdio_port_num; 4727 4728 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4729 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4730 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4731 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4732 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4733 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4734 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4735 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4736 do { 4737 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4738 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4739 status = 0; 4740 break; 4741 } 4742 usleep_range(10, 20); 4743 retry--; 4744 } while (retry); 4745 4746 if (status) { 4747 i40e_debug(hw, I40E_DEBUG_PHY, 4748 "PHY: Can't write command to external PHY.\n"); 4749 goto phy_read_end; 4750 } 4751 4752 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4753 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4754 (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) | 4755 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4756 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4757 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4758 status = I40E_ERR_TIMEOUT; 4759 retry = 1000; 4760 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4761 do { 4762 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4763 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4764 status = 0; 4765 break; 4766 } 4767 usleep_range(10, 20); 4768 retry--; 4769 } while (retry); 4770 4771 if (!status) { 4772 command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); 4773 *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> 4774 I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; 4775 } else { 4776 i40e_debug(hw, I40E_DEBUG_PHY, 4777 "PHY: Can't read register value from external PHY.\n"); 4778 } 4779 4780 phy_read_end: 4781 return status; 4782 } 4783 4784 /** 4785 * i40e_write_phy_register_clause45 4786 * @hw: pointer to the HW structure 4787 * @page: registers page number 4788 * @reg: register address in the page 4789 * @phy_addr: PHY address on MDIO interface 4790 * @value: PHY register value 4791 * 4792 * Writes value to specified PHY register 4793 **/ 4794 i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw, 4795 u8 page, u16 reg, u8 phy_addr, u16 value) 4796 { 4797 i40e_status status = I40E_ERR_TIMEOUT; 4798 u32 command = 0; 4799 u16 retry = 1000; 4800 u8 port_num = hw->func_caps.mdio_port_num; 4801 4802 command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | 4803 (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4804 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4805 (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) | 4806 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4807 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4808 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4809 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4810 do { 4811 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4812 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4813 status = 0; 4814 break; 4815 } 4816 usleep_range(10, 20); 4817 retry--; 4818 } while (retry); 4819 if (status) { 4820 i40e_debug(hw, I40E_DEBUG_PHY, 4821 "PHY: Can't write command to external PHY.\n"); 4822 goto phy_write_end; 4823 } 4824 4825 command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; 4826 wr32(hw, I40E_GLGEN_MSRWD(port_num), command); 4827 4828 command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | 4829 (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | 4830 (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) | 4831 (I40E_MDIO_CLAUSE45_STCODE_MASK) | 4832 (I40E_GLGEN_MSCA_MDICMD_MASK) | 4833 (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); 4834 status = I40E_ERR_TIMEOUT; 4835 retry = 1000; 4836 wr32(hw, I40E_GLGEN_MSCA(port_num), command); 4837 do { 4838 command = rd32(hw, I40E_GLGEN_MSCA(port_num)); 4839 if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { 4840 status = 0; 4841 break; 4842 } 4843 usleep_range(10, 20); 4844 retry--; 4845 } while (retry); 4846 4847 phy_write_end: 4848 return status; 4849 } 4850 4851 /** 4852 * i40e_write_phy_register 4853 * @hw: pointer to the HW structure 4854 * @page: registers page number 4855 * @reg: register address in the page 4856 * @phy_addr: PHY address on MDIO interface 4857 * @value: PHY register value 4858 * 4859 * Writes value to specified PHY register 4860 **/ 4861 i40e_status i40e_write_phy_register(struct i40e_hw *hw, 4862 u8 page, u16 reg, u8 phy_addr, u16 value) 4863 { 4864 i40e_status status; 4865 4866 switch (hw->device_id) { 4867 case I40E_DEV_ID_1G_BASE_T_X722: 4868 status = i40e_write_phy_register_clause22(hw, reg, phy_addr, 4869 value); 4870 break; 4871 case I40E_DEV_ID_10G_BASE_T: 4872 case I40E_DEV_ID_10G_BASE_T4: 4873 case I40E_DEV_ID_10G_BASE_T_X722: 4874 case I40E_DEV_ID_25G_B: 4875 case I40E_DEV_ID_25G_SFP28: 4876 status = i40e_write_phy_register_clause45(hw, page, reg, 4877 phy_addr, value); 4878 break; 4879 default: 4880 status = I40E_ERR_UNKNOWN_PHY; 4881 break; 4882 } 4883 4884 return status; 4885 } 4886 4887 /** 4888 * i40e_read_phy_register 4889 * @hw: pointer to the HW structure 4890 * @page: registers page number 4891 * @reg: register address in the page 4892 * @phy_addr: PHY address on MDIO interface 4893 * @value: PHY register value 4894 * 4895 * Reads specified PHY register value 4896 **/ 4897 i40e_status i40e_read_phy_register(struct i40e_hw *hw, 4898 u8 page, u16 reg, u8 phy_addr, u16 *value) 4899 { 4900 i40e_status status; 4901 4902 switch (hw->device_id) { 4903 case I40E_DEV_ID_1G_BASE_T_X722: 4904 status = i40e_read_phy_register_clause22(hw, reg, phy_addr, 4905 value); 4906 break; 4907 case I40E_DEV_ID_10G_BASE_T: 4908 case I40E_DEV_ID_10G_BASE_T4: 4909 case I40E_DEV_ID_10G_BASE_T_BC: 4910 case I40E_DEV_ID_10G_BASE_T_X722: 4911 case I40E_DEV_ID_25G_B: 4912 case I40E_DEV_ID_25G_SFP28: 4913 status = i40e_read_phy_register_clause45(hw, page, reg, 4914 phy_addr, value); 4915 break; 4916 default: 4917 status = I40E_ERR_UNKNOWN_PHY; 4918 break; 4919 } 4920 4921 return status; 4922 } 4923 4924 /** 4925 * i40e_get_phy_address 4926 * @hw: pointer to the HW structure 4927 * @dev_num: PHY port num that address we want 4928 * 4929 * Gets PHY address for current port 4930 **/ 4931 u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) 4932 { 4933 u8 port_num = hw->func_caps.mdio_port_num; 4934 u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); 4935 4936 return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; 4937 } 4938 4939 /** 4940 * i40e_blink_phy_led 4941 * @hw: pointer to the HW structure 4942 * @time: time how long led will blinks in secs 4943 * @interval: gap between LED on and off in msecs 4944 * 4945 * Blinks PHY link LED 4946 **/ 4947 i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, 4948 u32 time, u32 interval) 4949 { 4950 i40e_status status = 0; 4951 u32 i; 4952 u16 led_ctl; 4953 u16 gpio_led_port; 4954 u16 led_reg; 4955 u16 led_addr = I40E_PHY_LED_PROV_REG_1; 4956 u8 phy_addr = 0; 4957 u8 port_num; 4958 4959 i = rd32(hw, I40E_PFGEN_PORTNUM); 4960 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 4961 phy_addr = i40e_get_phy_address(hw, port_num); 4962 4963 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 4964 led_addr++) { 4965 status = i40e_read_phy_register_clause45(hw, 4966 I40E_PHY_COM_REG_PAGE, 4967 led_addr, phy_addr, 4968 &led_reg); 4969 if (status) 4970 goto phy_blinking_end; 4971 led_ctl = led_reg; 4972 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 4973 led_reg = 0; 4974 status = i40e_write_phy_register_clause45(hw, 4975 I40E_PHY_COM_REG_PAGE, 4976 led_addr, phy_addr, 4977 led_reg); 4978 if (status) 4979 goto phy_blinking_end; 4980 break; 4981 } 4982 } 4983 4984 if (time > 0 && interval > 0) { 4985 for (i = 0; i < time * 1000; i += interval) { 4986 status = i40e_read_phy_register_clause45(hw, 4987 I40E_PHY_COM_REG_PAGE, 4988 led_addr, phy_addr, &led_reg); 4989 if (status) 4990 goto restore_config; 4991 if (led_reg & I40E_PHY_LED_MANUAL_ON) 4992 led_reg = 0; 4993 else 4994 led_reg = I40E_PHY_LED_MANUAL_ON; 4995 status = i40e_write_phy_register_clause45(hw, 4996 I40E_PHY_COM_REG_PAGE, 4997 led_addr, phy_addr, led_reg); 4998 if (status) 4999 goto restore_config; 5000 msleep(interval); 5001 } 5002 } 5003 5004 restore_config: 5005 status = i40e_write_phy_register_clause45(hw, 5006 I40E_PHY_COM_REG_PAGE, 5007 led_addr, phy_addr, led_ctl); 5008 5009 phy_blinking_end: 5010 return status; 5011 } 5012 5013 /** 5014 * i40e_led_get_reg - read LED register 5015 * @hw: pointer to the HW structure 5016 * @led_addr: LED register address 5017 * @reg_val: read register value 5018 **/ 5019 static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, 5020 u32 *reg_val) 5021 { 5022 enum i40e_status_code status; 5023 u8 phy_addr = 0; 5024 u8 port_num; 5025 u32 i; 5026 5027 *reg_val = 0; 5028 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5029 status = 5030 i40e_aq_get_phy_register(hw, 5031 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5032 I40E_PHY_COM_REG_PAGE, 5033 I40E_PHY_LED_PROV_REG_1, 5034 reg_val, NULL); 5035 } else { 5036 i = rd32(hw, I40E_PFGEN_PORTNUM); 5037 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5038 phy_addr = i40e_get_phy_address(hw, port_num); 5039 status = i40e_read_phy_register_clause45(hw, 5040 I40E_PHY_COM_REG_PAGE, 5041 led_addr, phy_addr, 5042 (u16 *)reg_val); 5043 } 5044 return status; 5045 } 5046 5047 /** 5048 * i40e_led_set_reg - write LED register 5049 * @hw: pointer to the HW structure 5050 * @led_addr: LED register address 5051 * @reg_val: register value to write 5052 **/ 5053 static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, 5054 u32 reg_val) 5055 { 5056 enum i40e_status_code status; 5057 u8 phy_addr = 0; 5058 u8 port_num; 5059 u32 i; 5060 5061 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5062 status = 5063 i40e_aq_set_phy_register(hw, 5064 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5065 I40E_PHY_COM_REG_PAGE, 5066 I40E_PHY_LED_PROV_REG_1, 5067 reg_val, NULL); 5068 } else { 5069 i = rd32(hw, I40E_PFGEN_PORTNUM); 5070 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5071 phy_addr = i40e_get_phy_address(hw, port_num); 5072 status = i40e_write_phy_register_clause45(hw, 5073 I40E_PHY_COM_REG_PAGE, 5074 led_addr, phy_addr, 5075 (u16)reg_val); 5076 } 5077 5078 return status; 5079 } 5080 5081 /** 5082 * i40e_led_get_phy - return current on/off mode 5083 * @hw: pointer to the hw struct 5084 * @led_addr: address of led register to use 5085 * @val: original value of register to use 5086 * 5087 **/ 5088 i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, 5089 u16 *val) 5090 { 5091 i40e_status status = 0; 5092 u16 gpio_led_port; 5093 u8 phy_addr = 0; 5094 u16 reg_val; 5095 u16 temp_addr; 5096 u8 port_num; 5097 u32 i; 5098 u32 reg_val_aq; 5099 5100 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 5101 status = 5102 i40e_aq_get_phy_register(hw, 5103 I40E_AQ_PHY_REG_ACCESS_EXTERNAL, 5104 I40E_PHY_COM_REG_PAGE, 5105 I40E_PHY_LED_PROV_REG_1, 5106 ®_val_aq, NULL); 5107 if (status == I40E_SUCCESS) 5108 *val = (u16)reg_val_aq; 5109 return status; 5110 } 5111 temp_addr = I40E_PHY_LED_PROV_REG_1; 5112 i = rd32(hw, I40E_PFGEN_PORTNUM); 5113 port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); 5114 phy_addr = i40e_get_phy_address(hw, port_num); 5115 5116 for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, 5117 temp_addr++) { 5118 status = i40e_read_phy_register_clause45(hw, 5119 I40E_PHY_COM_REG_PAGE, 5120 temp_addr, phy_addr, 5121 ®_val); 5122 if (status) 5123 return status; 5124 *val = reg_val; 5125 if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { 5126 *led_addr = temp_addr; 5127 break; 5128 } 5129 } 5130 return status; 5131 } 5132 5133 /** 5134 * i40e_led_set_phy 5135 * @hw: pointer to the HW structure 5136 * @on: true or false 5137 * @led_addr: address of led register to use 5138 * @mode: original val plus bit for set or ignore 5139 * 5140 * Set led's on or off when controlled by the PHY 5141 * 5142 **/ 5143 i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, 5144 u16 led_addr, u32 mode) 5145 { 5146 i40e_status status = 0; 5147 u32 led_ctl = 0; 5148 u32 led_reg = 0; 5149 5150 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5151 if (status) 5152 return status; 5153 led_ctl = led_reg; 5154 if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { 5155 led_reg = 0; 5156 status = i40e_led_set_reg(hw, led_addr, led_reg); 5157 if (status) 5158 return status; 5159 } 5160 status = i40e_led_get_reg(hw, led_addr, &led_reg); 5161 if (status) 5162 goto restore_config; 5163 if (on) 5164 led_reg = I40E_PHY_LED_MANUAL_ON; 5165 else 5166 led_reg = 0; 5167 5168 status = i40e_led_set_reg(hw, led_addr, led_reg); 5169 if (status) 5170 goto restore_config; 5171 if (mode & I40E_PHY_LED_MODE_ORIG) { 5172 led_ctl = (mode & I40E_PHY_LED_MODE_MASK); 5173 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5174 } 5175 return status; 5176 5177 restore_config: 5178 status = i40e_led_set_reg(hw, led_addr, led_ctl); 5179 return status; 5180 } 5181 5182 /** 5183 * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register 5184 * @hw: pointer to the hw struct 5185 * @reg_addr: register address 5186 * @reg_val: ptr to register value 5187 * @cmd_details: pointer to command details structure or NULL 5188 * 5189 * Use the firmware to read the Rx control register, 5190 * especially useful if the Rx unit is under heavy pressure 5191 **/ 5192 i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, 5193 u32 reg_addr, u32 *reg_val, 5194 struct i40e_asq_cmd_details *cmd_details) 5195 { 5196 struct i40e_aq_desc desc; 5197 struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = 5198 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5199 i40e_status status; 5200 5201 if (!reg_val) 5202 return I40E_ERR_PARAM; 5203 5204 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); 5205 5206 cmd_resp->address = cpu_to_le32(reg_addr); 5207 5208 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5209 5210 if (status == 0) 5211 *reg_val = le32_to_cpu(cmd_resp->value); 5212 5213 return status; 5214 } 5215 5216 /** 5217 * i40e_read_rx_ctl - read from an Rx control register 5218 * @hw: pointer to the hw struct 5219 * @reg_addr: register address 5220 **/ 5221 u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) 5222 { 5223 i40e_status status = 0; 5224 bool use_register; 5225 int retry = 5; 5226 u32 val = 0; 5227 5228 use_register = (((hw->aq.api_maj_ver == 1) && 5229 (hw->aq.api_min_ver < 5)) || 5230 (hw->mac.type == I40E_MAC_X722)); 5231 if (!use_register) { 5232 do_retry: 5233 status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); 5234 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5235 usleep_range(1000, 2000); 5236 retry--; 5237 goto do_retry; 5238 } 5239 } 5240 5241 /* if the AQ access failed, try the old-fashioned way */ 5242 if (status || use_register) 5243 val = rd32(hw, reg_addr); 5244 5245 return val; 5246 } 5247 5248 /** 5249 * i40e_aq_rx_ctl_write_register 5250 * @hw: pointer to the hw struct 5251 * @reg_addr: register address 5252 * @reg_val: register value 5253 * @cmd_details: pointer to command details structure or NULL 5254 * 5255 * Use the firmware to write to an Rx control register, 5256 * especially useful if the Rx unit is under heavy pressure 5257 **/ 5258 i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, 5259 u32 reg_addr, u32 reg_val, 5260 struct i40e_asq_cmd_details *cmd_details) 5261 { 5262 struct i40e_aq_desc desc; 5263 struct i40e_aqc_rx_ctl_reg_read_write *cmd = 5264 (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; 5265 i40e_status status; 5266 5267 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); 5268 5269 cmd->address = cpu_to_le32(reg_addr); 5270 cmd->value = cpu_to_le32(reg_val); 5271 5272 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5273 5274 return status; 5275 } 5276 5277 /** 5278 * i40e_write_rx_ctl - write to an Rx control register 5279 * @hw: pointer to the hw struct 5280 * @reg_addr: register address 5281 * @reg_val: register value 5282 **/ 5283 void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) 5284 { 5285 i40e_status status = 0; 5286 bool use_register; 5287 int retry = 5; 5288 5289 use_register = (((hw->aq.api_maj_ver == 1) && 5290 (hw->aq.api_min_ver < 5)) || 5291 (hw->mac.type == I40E_MAC_X722)); 5292 if (!use_register) { 5293 do_retry: 5294 status = i40e_aq_rx_ctl_write_register(hw, reg_addr, 5295 reg_val, NULL); 5296 if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { 5297 usleep_range(1000, 2000); 5298 retry--; 5299 goto do_retry; 5300 } 5301 } 5302 5303 /* if the AQ access failed, try the old-fashioned way */ 5304 if (status || use_register) 5305 wr32(hw, reg_addr, reg_val); 5306 } 5307 5308 /** 5309 * i40e_aq_set_phy_register 5310 * @hw: pointer to the hw struct 5311 * @phy_select: select which phy should be accessed 5312 * @dev_addr: PHY device address 5313 * @reg_addr: PHY register address 5314 * @reg_val: new register value 5315 * @cmd_details: pointer to command details structure or NULL 5316 * 5317 * Write the external PHY register. 5318 **/ 5319 i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, 5320 u8 phy_select, u8 dev_addr, 5321 u32 reg_addr, u32 reg_val, 5322 struct i40e_asq_cmd_details *cmd_details) 5323 { 5324 struct i40e_aq_desc desc; 5325 struct i40e_aqc_phy_register_access *cmd = 5326 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5327 i40e_status status; 5328 5329 i40e_fill_default_direct_cmd_desc(&desc, 5330 i40e_aqc_opc_set_phy_register); 5331 5332 cmd->phy_interface = phy_select; 5333 cmd->dev_address = dev_addr; 5334 cmd->reg_address = cpu_to_le32(reg_addr); 5335 cmd->reg_value = cpu_to_le32(reg_val); 5336 5337 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5338 5339 return status; 5340 } 5341 5342 /** 5343 * i40e_aq_get_phy_register 5344 * @hw: pointer to the hw struct 5345 * @phy_select: select which phy should be accessed 5346 * @dev_addr: PHY device address 5347 * @reg_addr: PHY register address 5348 * @reg_val: read register value 5349 * @cmd_details: pointer to command details structure or NULL 5350 * 5351 * Read the external PHY register. 5352 **/ 5353 i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, 5354 u8 phy_select, u8 dev_addr, 5355 u32 reg_addr, u32 *reg_val, 5356 struct i40e_asq_cmd_details *cmd_details) 5357 { 5358 struct i40e_aq_desc desc; 5359 struct i40e_aqc_phy_register_access *cmd = 5360 (struct i40e_aqc_phy_register_access *)&desc.params.raw; 5361 i40e_status status; 5362 5363 i40e_fill_default_direct_cmd_desc(&desc, 5364 i40e_aqc_opc_get_phy_register); 5365 5366 cmd->phy_interface = phy_select; 5367 cmd->dev_address = dev_addr; 5368 cmd->reg_address = cpu_to_le32(reg_addr); 5369 5370 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 5371 if (!status) 5372 *reg_val = le32_to_cpu(cmd->reg_value); 5373 5374 return status; 5375 } 5376 5377 /** 5378 * i40e_aq_write_ddp - Write dynamic device personalization (ddp) 5379 * @hw: pointer to the hw struct 5380 * @buff: command buffer (size in bytes = buff_size) 5381 * @buff_size: buffer size in bytes 5382 * @track_id: package tracking id 5383 * @error_offset: returns error offset 5384 * @error_info: returns error information 5385 * @cmd_details: pointer to command details structure or NULL 5386 **/ 5387 enum 5388 i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, 5389 u16 buff_size, u32 track_id, 5390 u32 *error_offset, u32 *error_info, 5391 struct i40e_asq_cmd_details *cmd_details) 5392 { 5393 struct i40e_aq_desc desc; 5394 struct i40e_aqc_write_personalization_profile *cmd = 5395 (struct i40e_aqc_write_personalization_profile *) 5396 &desc.params.raw; 5397 struct i40e_aqc_write_ddp_resp *resp; 5398 i40e_status status; 5399 5400 i40e_fill_default_direct_cmd_desc(&desc, 5401 i40e_aqc_opc_write_personalization_profile); 5402 5403 desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); 5404 if (buff_size > I40E_AQ_LARGE_BUF) 5405 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5406 5407 desc.datalen = cpu_to_le16(buff_size); 5408 5409 cmd->profile_track_id = cpu_to_le32(track_id); 5410 5411 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5412 if (!status) { 5413 resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; 5414 if (error_offset) 5415 *error_offset = le32_to_cpu(resp->error_offset); 5416 if (error_info) 5417 *error_info = le32_to_cpu(resp->error_info); 5418 } 5419 5420 return status; 5421 } 5422 5423 /** 5424 * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp) 5425 * @hw: pointer to the hw struct 5426 * @buff: command buffer (size in bytes = buff_size) 5427 * @buff_size: buffer size in bytes 5428 * @flags: AdminQ command flags 5429 * @cmd_details: pointer to command details structure or NULL 5430 **/ 5431 enum 5432 i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff, 5433 u16 buff_size, u8 flags, 5434 struct i40e_asq_cmd_details *cmd_details) 5435 { 5436 struct i40e_aq_desc desc; 5437 struct i40e_aqc_get_applied_profiles *cmd = 5438 (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; 5439 i40e_status status; 5440 5441 i40e_fill_default_direct_cmd_desc(&desc, 5442 i40e_aqc_opc_get_personalization_profile_list); 5443 5444 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); 5445 if (buff_size > I40E_AQ_LARGE_BUF) 5446 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5447 desc.datalen = cpu_to_le16(buff_size); 5448 5449 cmd->flags = flags; 5450 5451 status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details); 5452 5453 return status; 5454 } 5455 5456 /** 5457 * i40e_find_segment_in_package 5458 * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) 5459 * @pkg_hdr: pointer to the package header to be searched 5460 * 5461 * This function searches a package file for a particular segment type. On 5462 * success it returns a pointer to the segment header, otherwise it will 5463 * return NULL. 5464 **/ 5465 struct i40e_generic_seg_header * 5466 i40e_find_segment_in_package(u32 segment_type, 5467 struct i40e_package_header *pkg_hdr) 5468 { 5469 struct i40e_generic_seg_header *segment; 5470 u32 i; 5471 5472 /* Search all package segments for the requested segment type */ 5473 for (i = 0; i < pkg_hdr->segment_count; i++) { 5474 segment = 5475 (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + 5476 pkg_hdr->segment_offset[i]); 5477 5478 if (segment->type == segment_type) 5479 return segment; 5480 } 5481 5482 return NULL; 5483 } 5484 5485 /* Get section table in profile */ 5486 #define I40E_SECTION_TABLE(profile, sec_tbl) \ 5487 do { \ 5488 struct i40e_profile_segment *p = (profile); \ 5489 u32 count; \ 5490 u32 *nvm; \ 5491 count = p->device_table_count; \ 5492 nvm = (u32 *)&p->device_table[count]; \ 5493 sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \ 5494 } while (0) 5495 5496 /* Get section header in profile */ 5497 #define I40E_SECTION_HEADER(profile, offset) \ 5498 (struct i40e_profile_section_header *)((u8 *)(profile) + (offset)) 5499 5500 /** 5501 * i40e_find_section_in_profile 5502 * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE) 5503 * @profile: pointer to the i40e segment header to be searched 5504 * 5505 * This function searches i40e segment for a particular section type. On 5506 * success it returns a pointer to the section header, otherwise it will 5507 * return NULL. 5508 **/ 5509 struct i40e_profile_section_header * 5510 i40e_find_section_in_profile(u32 section_type, 5511 struct i40e_profile_segment *profile) 5512 { 5513 struct i40e_profile_section_header *sec; 5514 struct i40e_section_table *sec_tbl; 5515 u32 sec_off; 5516 u32 i; 5517 5518 if (profile->header.type != SEGMENT_TYPE_I40E) 5519 return NULL; 5520 5521 I40E_SECTION_TABLE(profile, sec_tbl); 5522 5523 for (i = 0; i < sec_tbl->section_count; i++) { 5524 sec_off = sec_tbl->section_offset[i]; 5525 sec = I40E_SECTION_HEADER(profile, sec_off); 5526 if (sec->section.type == section_type) 5527 return sec; 5528 } 5529 5530 return NULL; 5531 } 5532 5533 /** 5534 * i40e_ddp_exec_aq_section - Execute generic AQ for DDP 5535 * @hw: pointer to the hw struct 5536 * @aq: command buffer containing all data to execute AQ 5537 **/ 5538 static enum 5539 i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw, 5540 struct i40e_profile_aq_section *aq) 5541 { 5542 i40e_status status; 5543 struct i40e_aq_desc desc; 5544 u8 *msg = NULL; 5545 u16 msglen; 5546 5547 i40e_fill_default_direct_cmd_desc(&desc, aq->opcode); 5548 desc.flags |= cpu_to_le16(aq->flags); 5549 memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw)); 5550 5551 msglen = aq->datalen; 5552 if (msglen) { 5553 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | 5554 I40E_AQ_FLAG_RD)); 5555 if (msglen > I40E_AQ_LARGE_BUF) 5556 desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); 5557 desc.datalen = cpu_to_le16(msglen); 5558 msg = &aq->data[0]; 5559 } 5560 5561 status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL); 5562 5563 if (status) { 5564 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5565 "unable to exec DDP AQ opcode %u, error %d\n", 5566 aq->opcode, status); 5567 return status; 5568 } 5569 5570 /* copy returned desc to aq_buf */ 5571 memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw)); 5572 5573 return 0; 5574 } 5575 5576 /** 5577 * i40e_validate_profile 5578 * @hw: pointer to the hardware structure 5579 * @profile: pointer to the profile segment of the package to be validated 5580 * @track_id: package tracking id 5581 * @rollback: flag if the profile is for rollback. 5582 * 5583 * Validates supported devices and profile's sections. 5584 */ 5585 static enum i40e_status_code 5586 i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5587 u32 track_id, bool rollback) 5588 { 5589 struct i40e_profile_section_header *sec = NULL; 5590 i40e_status status = 0; 5591 struct i40e_section_table *sec_tbl; 5592 u32 vendor_dev_id; 5593 u32 dev_cnt; 5594 u32 sec_off; 5595 u32 i; 5596 5597 if (track_id == I40E_DDP_TRACKID_INVALID) { 5598 i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n"); 5599 return I40E_NOT_SUPPORTED; 5600 } 5601 5602 dev_cnt = profile->device_table_count; 5603 for (i = 0; i < dev_cnt; i++) { 5604 vendor_dev_id = profile->device_table[i].vendor_dev_id; 5605 if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL && 5606 hw->device_id == (vendor_dev_id & 0xFFFF)) 5607 break; 5608 } 5609 if (dev_cnt && i == dev_cnt) { 5610 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5611 "Device doesn't support DDP\n"); 5612 return I40E_ERR_DEVICE_NOT_SUPPORTED; 5613 } 5614 5615 I40E_SECTION_TABLE(profile, sec_tbl); 5616 5617 /* Validate sections types */ 5618 for (i = 0; i < sec_tbl->section_count; i++) { 5619 sec_off = sec_tbl->section_offset[i]; 5620 sec = I40E_SECTION_HEADER(profile, sec_off); 5621 if (rollback) { 5622 if (sec->section.type == SECTION_TYPE_MMIO || 5623 sec->section.type == SECTION_TYPE_AQ || 5624 sec->section.type == SECTION_TYPE_RB_AQ) { 5625 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5626 "Not a roll-back package\n"); 5627 return I40E_NOT_SUPPORTED; 5628 } 5629 } else { 5630 if (sec->section.type == SECTION_TYPE_RB_AQ || 5631 sec->section.type == SECTION_TYPE_RB_MMIO) { 5632 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5633 "Not an original package\n"); 5634 return I40E_NOT_SUPPORTED; 5635 } 5636 } 5637 } 5638 5639 return status; 5640 } 5641 5642 /** 5643 * i40e_write_profile 5644 * @hw: pointer to the hardware structure 5645 * @profile: pointer to the profile segment of the package to be downloaded 5646 * @track_id: package tracking id 5647 * 5648 * Handles the download of a complete package. 5649 */ 5650 enum i40e_status_code 5651 i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5652 u32 track_id) 5653 { 5654 i40e_status status = 0; 5655 struct i40e_section_table *sec_tbl; 5656 struct i40e_profile_section_header *sec = NULL; 5657 struct i40e_profile_aq_section *ddp_aq; 5658 u32 section_size = 0; 5659 u32 offset = 0, info = 0; 5660 u32 sec_off; 5661 u32 i; 5662 5663 status = i40e_validate_profile(hw, profile, track_id, false); 5664 if (status) 5665 return status; 5666 5667 I40E_SECTION_TABLE(profile, sec_tbl); 5668 5669 for (i = 0; i < sec_tbl->section_count; i++) { 5670 sec_off = sec_tbl->section_offset[i]; 5671 sec = I40E_SECTION_HEADER(profile, sec_off); 5672 /* Process generic admin command */ 5673 if (sec->section.type == SECTION_TYPE_AQ) { 5674 ddp_aq = (struct i40e_profile_aq_section *)&sec[1]; 5675 status = i40e_ddp_exec_aq_section(hw, ddp_aq); 5676 if (status) { 5677 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5678 "Failed to execute aq: section %d, opcode %u\n", 5679 i, ddp_aq->opcode); 5680 break; 5681 } 5682 sec->section.type = SECTION_TYPE_RB_AQ; 5683 } 5684 5685 /* Skip any non-mmio sections */ 5686 if (sec->section.type != SECTION_TYPE_MMIO) 5687 continue; 5688 5689 section_size = sec->section.size + 5690 sizeof(struct i40e_profile_section_header); 5691 5692 /* Write MMIO section */ 5693 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5694 track_id, &offset, &info, NULL); 5695 if (status) { 5696 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5697 "Failed to write profile: section %d, offset %d, info %d\n", 5698 i, offset, info); 5699 break; 5700 } 5701 } 5702 return status; 5703 } 5704 5705 /** 5706 * i40e_rollback_profile 5707 * @hw: pointer to the hardware structure 5708 * @profile: pointer to the profile segment of the package to be removed 5709 * @track_id: package tracking id 5710 * 5711 * Rolls back previously loaded package. 5712 */ 5713 enum i40e_status_code 5714 i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, 5715 u32 track_id) 5716 { 5717 struct i40e_profile_section_header *sec = NULL; 5718 i40e_status status = 0; 5719 struct i40e_section_table *sec_tbl; 5720 u32 offset = 0, info = 0; 5721 u32 section_size = 0; 5722 u32 sec_off; 5723 int i; 5724 5725 status = i40e_validate_profile(hw, profile, track_id, true); 5726 if (status) 5727 return status; 5728 5729 I40E_SECTION_TABLE(profile, sec_tbl); 5730 5731 /* For rollback write sections in reverse */ 5732 for (i = sec_tbl->section_count - 1; i >= 0; i--) { 5733 sec_off = sec_tbl->section_offset[i]; 5734 sec = I40E_SECTION_HEADER(profile, sec_off); 5735 5736 /* Skip any non-rollback sections */ 5737 if (sec->section.type != SECTION_TYPE_RB_MMIO) 5738 continue; 5739 5740 section_size = sec->section.size + 5741 sizeof(struct i40e_profile_section_header); 5742 5743 /* Write roll-back MMIO section */ 5744 status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size, 5745 track_id, &offset, &info, NULL); 5746 if (status) { 5747 i40e_debug(hw, I40E_DEBUG_PACKAGE, 5748 "Failed to write profile: section %d, offset %d, info %d\n", 5749 i, offset, info); 5750 break; 5751 } 5752 } 5753 return status; 5754 } 5755 5756 /** 5757 * i40e_add_pinfo_to_list 5758 * @hw: pointer to the hardware structure 5759 * @profile: pointer to the profile segment of the package 5760 * @profile_info_sec: buffer for information section 5761 * @track_id: package tracking id 5762 * 5763 * Register a profile to the list of loaded profiles. 5764 */ 5765 enum i40e_status_code 5766 i40e_add_pinfo_to_list(struct i40e_hw *hw, 5767 struct i40e_profile_segment *profile, 5768 u8 *profile_info_sec, u32 track_id) 5769 { 5770 i40e_status status = 0; 5771 struct i40e_profile_section_header *sec = NULL; 5772 struct i40e_profile_info *pinfo; 5773 u32 offset = 0, info = 0; 5774 5775 sec = (struct i40e_profile_section_header *)profile_info_sec; 5776 sec->tbl_size = 1; 5777 sec->data_end = sizeof(struct i40e_profile_section_header) + 5778 sizeof(struct i40e_profile_info); 5779 sec->section.type = SECTION_TYPE_INFO; 5780 sec->section.offset = sizeof(struct i40e_profile_section_header); 5781 sec->section.size = sizeof(struct i40e_profile_info); 5782 pinfo = (struct i40e_profile_info *)(profile_info_sec + 5783 sec->section.offset); 5784 pinfo->track_id = track_id; 5785 pinfo->version = profile->version; 5786 pinfo->op = I40E_DDP_ADD_TRACKID; 5787 memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); 5788 5789 status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end, 5790 track_id, &offset, &info, NULL); 5791 5792 return status; 5793 } 5794 5795 /** 5796 * i40e_aq_add_cloud_filters 5797 * @hw: pointer to the hardware structure 5798 * @seid: VSI seid to add cloud filters from 5799 * @filters: Buffer which contains the filters to be added 5800 * @filter_count: number of filters contained in the buffer 5801 * 5802 * Set the cloud filters for a given VSI. The contents of the 5803 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5804 * of the function. 5805 * 5806 **/ 5807 enum i40e_status_code 5808 i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, 5809 struct i40e_aqc_cloud_filters_element_data *filters, 5810 u8 filter_count) 5811 { 5812 struct i40e_aq_desc desc; 5813 struct i40e_aqc_add_remove_cloud_filters *cmd = 5814 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5815 enum i40e_status_code status; 5816 u16 buff_len; 5817 5818 i40e_fill_default_direct_cmd_desc(&desc, 5819 i40e_aqc_opc_add_cloud_filters); 5820 5821 buff_len = filter_count * sizeof(*filters); 5822 desc.datalen = cpu_to_le16(buff_len); 5823 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5824 cmd->num_filters = filter_count; 5825 cmd->seid = cpu_to_le16(seid); 5826 5827 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5828 5829 return status; 5830 } 5831 5832 /** 5833 * i40e_aq_add_cloud_filters_bb 5834 * @hw: pointer to the hardware structure 5835 * @seid: VSI seid to add cloud filters from 5836 * @filters: Buffer which contains the filters in big buffer to be added 5837 * @filter_count: number of filters contained in the buffer 5838 * 5839 * Set the big buffer cloud filters for a given VSI. The contents of the 5840 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5841 * function. 5842 * 5843 **/ 5844 enum i40e_status_code 5845 i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5846 struct i40e_aqc_cloud_filters_element_bb *filters, 5847 u8 filter_count) 5848 { 5849 struct i40e_aq_desc desc; 5850 struct i40e_aqc_add_remove_cloud_filters *cmd = 5851 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5852 i40e_status status; 5853 u16 buff_len; 5854 int i; 5855 5856 i40e_fill_default_direct_cmd_desc(&desc, 5857 i40e_aqc_opc_add_cloud_filters); 5858 5859 buff_len = filter_count * sizeof(*filters); 5860 desc.datalen = cpu_to_le16(buff_len); 5861 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5862 cmd->num_filters = filter_count; 5863 cmd->seid = cpu_to_le16(seid); 5864 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5865 5866 for (i = 0; i < filter_count; i++) { 5867 u16 tnl_type; 5868 u32 ti; 5869 5870 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5871 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5872 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5873 5874 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5875 * one more byte further than normally used for Tenant ID in 5876 * other tunnel types. 5877 */ 5878 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5879 ti = le32_to_cpu(filters[i].element.tenant_id); 5880 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5881 } 5882 } 5883 5884 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5885 5886 return status; 5887 } 5888 5889 /** 5890 * i40e_aq_rem_cloud_filters 5891 * @hw: pointer to the hardware structure 5892 * @seid: VSI seid to remove cloud filters from 5893 * @filters: Buffer which contains the filters to be removed 5894 * @filter_count: number of filters contained in the buffer 5895 * 5896 * Remove the cloud filters for a given VSI. The contents of the 5897 * i40e_aqc_cloud_filters_element_data are filled in by the caller 5898 * of the function. 5899 * 5900 **/ 5901 enum i40e_status_code 5902 i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, 5903 struct i40e_aqc_cloud_filters_element_data *filters, 5904 u8 filter_count) 5905 { 5906 struct i40e_aq_desc desc; 5907 struct i40e_aqc_add_remove_cloud_filters *cmd = 5908 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5909 enum i40e_status_code status; 5910 u16 buff_len; 5911 5912 i40e_fill_default_direct_cmd_desc(&desc, 5913 i40e_aqc_opc_remove_cloud_filters); 5914 5915 buff_len = filter_count * sizeof(*filters); 5916 desc.datalen = cpu_to_le16(buff_len); 5917 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5918 cmd->num_filters = filter_count; 5919 cmd->seid = cpu_to_le16(seid); 5920 5921 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5922 5923 return status; 5924 } 5925 5926 /** 5927 * i40e_aq_rem_cloud_filters_bb 5928 * @hw: pointer to the hardware structure 5929 * @seid: VSI seid to remove cloud filters from 5930 * @filters: Buffer which contains the filters in big buffer to be removed 5931 * @filter_count: number of filters contained in the buffer 5932 * 5933 * Remove the big buffer cloud filters for a given VSI. The contents of the 5934 * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the 5935 * function. 5936 * 5937 **/ 5938 enum i40e_status_code 5939 i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, 5940 struct i40e_aqc_cloud_filters_element_bb *filters, 5941 u8 filter_count) 5942 { 5943 struct i40e_aq_desc desc; 5944 struct i40e_aqc_add_remove_cloud_filters *cmd = 5945 (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; 5946 i40e_status status; 5947 u16 buff_len; 5948 int i; 5949 5950 i40e_fill_default_direct_cmd_desc(&desc, 5951 i40e_aqc_opc_remove_cloud_filters); 5952 5953 buff_len = filter_count * sizeof(*filters); 5954 desc.datalen = cpu_to_le16(buff_len); 5955 desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); 5956 cmd->num_filters = filter_count; 5957 cmd->seid = cpu_to_le16(seid); 5958 cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; 5959 5960 for (i = 0; i < filter_count; i++) { 5961 u16 tnl_type; 5962 u32 ti; 5963 5964 tnl_type = (le16_to_cpu(filters[i].element.flags) & 5965 I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> 5966 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; 5967 5968 /* Due to hardware eccentricities, the VNI for Geneve is shifted 5969 * one more byte further than normally used for Tenant ID in 5970 * other tunnel types. 5971 */ 5972 if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { 5973 ti = le32_to_cpu(filters[i].element.tenant_id); 5974 filters[i].element.tenant_id = cpu_to_le32(ti << 8); 5975 } 5976 } 5977 5978 status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); 5979 5980 return status; 5981 } 5982