1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/avf/virtchnl.h> 5 #include <linux/bitfield.h> 6 #include "iavf_type.h" 7 #include "iavf_adminq.h" 8 #include "iavf_prototype.h" 9 10 /** 11 * iavf_aq_str - convert AQ err code to a string 12 * @hw: pointer to the HW structure 13 * @aq_err: the AQ error code to convert 14 **/ 15 const char *iavf_aq_str(struct iavf_hw *hw, enum iavf_admin_queue_err aq_err) 16 { 17 switch (aq_err) { 18 case IAVF_AQ_RC_OK: 19 return "OK"; 20 case IAVF_AQ_RC_EPERM: 21 return "IAVF_AQ_RC_EPERM"; 22 case IAVF_AQ_RC_ENOENT: 23 return "IAVF_AQ_RC_ENOENT"; 24 case IAVF_AQ_RC_ESRCH: 25 return "IAVF_AQ_RC_ESRCH"; 26 case IAVF_AQ_RC_EINTR: 27 return "IAVF_AQ_RC_EINTR"; 28 case IAVF_AQ_RC_EIO: 29 return "IAVF_AQ_RC_EIO"; 30 case IAVF_AQ_RC_ENXIO: 31 return "IAVF_AQ_RC_ENXIO"; 32 case IAVF_AQ_RC_E2BIG: 33 return "IAVF_AQ_RC_E2BIG"; 34 case IAVF_AQ_RC_EAGAIN: 35 return "IAVF_AQ_RC_EAGAIN"; 36 case IAVF_AQ_RC_ENOMEM: 37 return "IAVF_AQ_RC_ENOMEM"; 38 case IAVF_AQ_RC_EACCES: 39 return "IAVF_AQ_RC_EACCES"; 40 case IAVF_AQ_RC_EFAULT: 41 return "IAVF_AQ_RC_EFAULT"; 42 case IAVF_AQ_RC_EBUSY: 43 return "IAVF_AQ_RC_EBUSY"; 44 case IAVF_AQ_RC_EEXIST: 45 return "IAVF_AQ_RC_EEXIST"; 46 case IAVF_AQ_RC_EINVAL: 47 return "IAVF_AQ_RC_EINVAL"; 48 case IAVF_AQ_RC_ENOTTY: 49 return "IAVF_AQ_RC_ENOTTY"; 50 case IAVF_AQ_RC_ENOSPC: 51 return "IAVF_AQ_RC_ENOSPC"; 52 case IAVF_AQ_RC_ENOSYS: 53 return "IAVF_AQ_RC_ENOSYS"; 54 case IAVF_AQ_RC_ERANGE: 55 return "IAVF_AQ_RC_ERANGE"; 56 case IAVF_AQ_RC_EFLUSHED: 57 return "IAVF_AQ_RC_EFLUSHED"; 58 case IAVF_AQ_RC_BAD_ADDR: 59 return "IAVF_AQ_RC_BAD_ADDR"; 60 case IAVF_AQ_RC_EMODE: 61 return "IAVF_AQ_RC_EMODE"; 62 case IAVF_AQ_RC_EFBIG: 63 return "IAVF_AQ_RC_EFBIG"; 64 } 65 66 snprintf(hw->err_str, sizeof(hw->err_str), "%d", aq_err); 67 return hw->err_str; 68 } 69 70 /** 71 * iavf_stat_str - convert status err code to a string 72 * @hw: pointer to the HW structure 73 * @stat_err: the status error code to convert 74 **/ 75 const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) 76 { 77 switch (stat_err) { 78 case 0: 79 return "OK"; 80 case IAVF_ERR_NVM: 81 return "IAVF_ERR_NVM"; 82 case IAVF_ERR_NVM_CHECKSUM: 83 return "IAVF_ERR_NVM_CHECKSUM"; 84 case IAVF_ERR_PHY: 85 return "IAVF_ERR_PHY"; 86 case IAVF_ERR_CONFIG: 87 return "IAVF_ERR_CONFIG"; 88 case IAVF_ERR_PARAM: 89 return "IAVF_ERR_PARAM"; 90 case IAVF_ERR_MAC_TYPE: 91 return "IAVF_ERR_MAC_TYPE"; 92 case IAVF_ERR_UNKNOWN_PHY: 93 return "IAVF_ERR_UNKNOWN_PHY"; 94 case IAVF_ERR_LINK_SETUP: 95 return "IAVF_ERR_LINK_SETUP"; 96 case IAVF_ERR_ADAPTER_STOPPED: 97 return "IAVF_ERR_ADAPTER_STOPPED"; 98 case IAVF_ERR_INVALID_MAC_ADDR: 99 return "IAVF_ERR_INVALID_MAC_ADDR"; 100 case IAVF_ERR_DEVICE_NOT_SUPPORTED: 101 return "IAVF_ERR_DEVICE_NOT_SUPPORTED"; 102 case IAVF_ERR_PRIMARY_REQUESTS_PENDING: 103 return "IAVF_ERR_PRIMARY_REQUESTS_PENDING"; 104 case IAVF_ERR_INVALID_LINK_SETTINGS: 105 return "IAVF_ERR_INVALID_LINK_SETTINGS"; 106 case IAVF_ERR_AUTONEG_NOT_COMPLETE: 107 return "IAVF_ERR_AUTONEG_NOT_COMPLETE"; 108 case IAVF_ERR_RESET_FAILED: 109 return "IAVF_ERR_RESET_FAILED"; 110 case IAVF_ERR_SWFW_SYNC: 111 return "IAVF_ERR_SWFW_SYNC"; 112 case IAVF_ERR_NO_AVAILABLE_VSI: 113 return "IAVF_ERR_NO_AVAILABLE_VSI"; 114 case IAVF_ERR_NO_MEMORY: 115 return "IAVF_ERR_NO_MEMORY"; 116 case IAVF_ERR_BAD_PTR: 117 return "IAVF_ERR_BAD_PTR"; 118 case IAVF_ERR_RING_FULL: 119 return "IAVF_ERR_RING_FULL"; 120 case IAVF_ERR_INVALID_PD_ID: 121 return "IAVF_ERR_INVALID_PD_ID"; 122 case IAVF_ERR_INVALID_QP_ID: 123 return "IAVF_ERR_INVALID_QP_ID"; 124 case IAVF_ERR_INVALID_CQ_ID: 125 return "IAVF_ERR_INVALID_CQ_ID"; 126 case IAVF_ERR_INVALID_CEQ_ID: 127 return "IAVF_ERR_INVALID_CEQ_ID"; 128 case IAVF_ERR_INVALID_AEQ_ID: 129 return "IAVF_ERR_INVALID_AEQ_ID"; 130 case IAVF_ERR_INVALID_SIZE: 131 return "IAVF_ERR_INVALID_SIZE"; 132 case IAVF_ERR_INVALID_ARP_INDEX: 133 return "IAVF_ERR_INVALID_ARP_INDEX"; 134 case IAVF_ERR_INVALID_FPM_FUNC_ID: 135 return "IAVF_ERR_INVALID_FPM_FUNC_ID"; 136 case IAVF_ERR_QP_INVALID_MSG_SIZE: 137 return "IAVF_ERR_QP_INVALID_MSG_SIZE"; 138 case IAVF_ERR_QP_TOOMANY_WRS_POSTED: 139 return "IAVF_ERR_QP_TOOMANY_WRS_POSTED"; 140 case IAVF_ERR_INVALID_FRAG_COUNT: 141 return "IAVF_ERR_INVALID_FRAG_COUNT"; 142 case IAVF_ERR_QUEUE_EMPTY: 143 return "IAVF_ERR_QUEUE_EMPTY"; 144 case IAVF_ERR_INVALID_ALIGNMENT: 145 return "IAVF_ERR_INVALID_ALIGNMENT"; 146 case IAVF_ERR_FLUSHED_QUEUE: 147 return "IAVF_ERR_FLUSHED_QUEUE"; 148 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: 149 return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX"; 150 case IAVF_ERR_INVALID_IMM_DATA_SIZE: 151 return "IAVF_ERR_INVALID_IMM_DATA_SIZE"; 152 case IAVF_ERR_TIMEOUT: 153 return "IAVF_ERR_TIMEOUT"; 154 case IAVF_ERR_OPCODE_MISMATCH: 155 return "IAVF_ERR_OPCODE_MISMATCH"; 156 case IAVF_ERR_CQP_COMPL_ERROR: 157 return "IAVF_ERR_CQP_COMPL_ERROR"; 158 case IAVF_ERR_INVALID_VF_ID: 159 return "IAVF_ERR_INVALID_VF_ID"; 160 case IAVF_ERR_INVALID_HMCFN_ID: 161 return "IAVF_ERR_INVALID_HMCFN_ID"; 162 case IAVF_ERR_BACKING_PAGE_ERROR: 163 return "IAVF_ERR_BACKING_PAGE_ERROR"; 164 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: 165 return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE"; 166 case IAVF_ERR_INVALID_PBLE_INDEX: 167 return "IAVF_ERR_INVALID_PBLE_INDEX"; 168 case IAVF_ERR_INVALID_SD_INDEX: 169 return "IAVF_ERR_INVALID_SD_INDEX"; 170 case IAVF_ERR_INVALID_PAGE_DESC_INDEX: 171 return "IAVF_ERR_INVALID_PAGE_DESC_INDEX"; 172 case IAVF_ERR_INVALID_SD_TYPE: 173 return "IAVF_ERR_INVALID_SD_TYPE"; 174 case IAVF_ERR_MEMCPY_FAILED: 175 return "IAVF_ERR_MEMCPY_FAILED"; 176 case IAVF_ERR_INVALID_HMC_OBJ_INDEX: 177 return "IAVF_ERR_INVALID_HMC_OBJ_INDEX"; 178 case IAVF_ERR_INVALID_HMC_OBJ_COUNT: 179 return "IAVF_ERR_INVALID_HMC_OBJ_COUNT"; 180 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: 181 return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT"; 182 case IAVF_ERR_SRQ_ENABLED: 183 return "IAVF_ERR_SRQ_ENABLED"; 184 case IAVF_ERR_ADMIN_QUEUE_ERROR: 185 return "IAVF_ERR_ADMIN_QUEUE_ERROR"; 186 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: 187 return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT"; 188 case IAVF_ERR_BUF_TOO_SHORT: 189 return "IAVF_ERR_BUF_TOO_SHORT"; 190 case IAVF_ERR_ADMIN_QUEUE_FULL: 191 return "IAVF_ERR_ADMIN_QUEUE_FULL"; 192 case IAVF_ERR_ADMIN_QUEUE_NO_WORK: 193 return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; 194 case IAVF_ERR_BAD_RDMA_CQE: 195 return "IAVF_ERR_BAD_RDMA_CQE"; 196 case IAVF_ERR_NVM_BLANK_MODE: 197 return "IAVF_ERR_NVM_BLANK_MODE"; 198 case IAVF_ERR_NOT_IMPLEMENTED: 199 return "IAVF_ERR_NOT_IMPLEMENTED"; 200 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: 201 return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED"; 202 case IAVF_ERR_DIAG_TEST_FAILED: 203 return "IAVF_ERR_DIAG_TEST_FAILED"; 204 case IAVF_ERR_NOT_READY: 205 return "IAVF_ERR_NOT_READY"; 206 case IAVF_NOT_SUPPORTED: 207 return "IAVF_NOT_SUPPORTED"; 208 case IAVF_ERR_FIRMWARE_API_VERSION: 209 return "IAVF_ERR_FIRMWARE_API_VERSION"; 210 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 211 return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 212 } 213 214 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 215 return hw->err_str; 216 } 217 218 /** 219 * iavf_debug_aq 220 * @hw: debug mask related to admin queue 221 * @mask: debug mask 222 * @desc: pointer to admin queue descriptor 223 * @buffer: pointer to command buffer 224 * @buf_len: max length of buffer 225 * 226 * Dumps debug log about adminq command with descriptor contents. 227 **/ 228 void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, 229 void *buffer, u16 buf_len) 230 { 231 struct iavf_aq_desc *aq_desc = (struct iavf_aq_desc *)desc; 232 u8 *buf = (u8 *)buffer; 233 234 if ((!(mask & hw->debug_mask)) || !desc) 235 return; 236 237 iavf_debug(hw, mask, 238 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 239 le16_to_cpu(aq_desc->opcode), 240 le16_to_cpu(aq_desc->flags), 241 le16_to_cpu(aq_desc->datalen), 242 le16_to_cpu(aq_desc->retval)); 243 iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 244 le32_to_cpu(aq_desc->cookie_high), 245 le32_to_cpu(aq_desc->cookie_low)); 246 iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 247 le32_to_cpu(aq_desc->params.internal.param0), 248 le32_to_cpu(aq_desc->params.internal.param1)); 249 iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 250 le32_to_cpu(aq_desc->params.external.addr_high), 251 le32_to_cpu(aq_desc->params.external.addr_low)); 252 253 if (buffer && aq_desc->datalen) { 254 u16 len = le16_to_cpu(aq_desc->datalen); 255 256 iavf_debug(hw, mask, "AQ CMD Buffer:\n"); 257 if (buf_len < len) 258 len = buf_len; 259 /* write the full 16-byte chunks */ 260 if (hw->debug_mask & mask) { 261 char prefix[27]; 262 263 snprintf(prefix, sizeof(prefix), 264 "iavf %02x:%02x.%x: \t0x", 265 hw->bus.bus_id, 266 hw->bus.device, 267 hw->bus.func); 268 269 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 270 16, 1, buf, len, false); 271 } 272 } 273 } 274 275 /** 276 * iavf_check_asq_alive 277 * @hw: pointer to the hw struct 278 * 279 * Returns true if Queue is enabled else false. 280 **/ 281 bool iavf_check_asq_alive(struct iavf_hw *hw) 282 { 283 /* Check if the queue is initialized */ 284 if (!hw->aq.asq.count) 285 return false; 286 287 return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK); 288 } 289 290 /** 291 * iavf_aq_queue_shutdown 292 * @hw: pointer to the hw struct 293 * @unloading: is the driver unloading itself 294 * 295 * Tell the Firmware that we're shutting down the AdminQ and whether 296 * or not the driver is unloading as well. 297 **/ 298 enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) 299 { 300 struct iavf_aq_desc desc; 301 struct iavf_aqc_queue_shutdown *cmd = 302 (struct iavf_aqc_queue_shutdown *)&desc.params.raw; 303 enum iavf_status status; 304 305 iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown); 306 307 if (unloading) 308 cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING); 309 status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL); 310 311 return status; 312 } 313 314 /** 315 * iavf_aq_get_set_rss_lut 316 * @hw: pointer to the hardware structure 317 * @vsi_id: vsi fw index 318 * @pf_lut: for PF table set true, for VSI table set false 319 * @lut: pointer to the lut buffer provided by the caller 320 * @lut_size: size of the lut buffer 321 * @set: set true to set the table, false to get the table 322 * 323 * Internal function to get or set RSS look up table 324 **/ 325 static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, 326 u16 vsi_id, bool pf_lut, 327 u8 *lut, u16 lut_size, 328 bool set) 329 { 330 enum iavf_status status; 331 struct iavf_aq_desc desc; 332 struct iavf_aqc_get_set_rss_lut *cmd_resp = 333 (struct iavf_aqc_get_set_rss_lut *)&desc.params.raw; 334 u16 flags; 335 336 if (set) 337 iavf_fill_default_direct_cmd_desc(&desc, 338 iavf_aqc_opc_set_rss_lut); 339 else 340 iavf_fill_default_direct_cmd_desc(&desc, 341 iavf_aqc_opc_get_rss_lut); 342 343 /* Indirect command */ 344 desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); 345 desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); 346 347 vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) | 348 FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1); 349 cmd_resp->vsi_id = cpu_to_le16(vsi_id); 350 351 if (pf_lut) 352 flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, 353 IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF); 354 else 355 flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, 356 IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI); 357 358 cmd_resp->flags = cpu_to_le16(flags); 359 360 status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); 361 362 return status; 363 } 364 365 /** 366 * iavf_aq_set_rss_lut 367 * @hw: pointer to the hardware structure 368 * @vsi_id: vsi fw index 369 * @pf_lut: for PF table set true, for VSI table set false 370 * @lut: pointer to the lut buffer provided by the caller 371 * @lut_size: size of the lut buffer 372 * 373 * set the RSS lookup table, PF or VSI type 374 **/ 375 enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, 376 bool pf_lut, u8 *lut, u16 lut_size) 377 { 378 return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 379 } 380 381 /** 382 * iavf_aq_get_set_rss_key 383 * @hw: pointer to the hw struct 384 * @vsi_id: vsi fw index 385 * @key: pointer to key info struct 386 * @set: set true to set the key, false to get the key 387 * 388 * get the RSS key per VSI 389 **/ 390 static enum 391 iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, 392 struct iavf_aqc_get_set_rss_key_data *key, 393 bool set) 394 { 395 enum iavf_status status; 396 struct iavf_aq_desc desc; 397 struct iavf_aqc_get_set_rss_key *cmd_resp = 398 (struct iavf_aqc_get_set_rss_key *)&desc.params.raw; 399 u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data); 400 401 if (set) 402 iavf_fill_default_direct_cmd_desc(&desc, 403 iavf_aqc_opc_set_rss_key); 404 else 405 iavf_fill_default_direct_cmd_desc(&desc, 406 iavf_aqc_opc_get_rss_key); 407 408 /* Indirect command */ 409 desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_BUF); 410 desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_RD); 411 412 vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) | 413 FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1); 414 cmd_resp->vsi_id = cpu_to_le16(vsi_id); 415 416 status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); 417 418 return status; 419 } 420 421 /** 422 * iavf_aq_set_rss_key 423 * @hw: pointer to the hw struct 424 * @vsi_id: vsi fw index 425 * @key: pointer to key info struct 426 * 427 * set the RSS key per VSI 428 **/ 429 enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, 430 struct iavf_aqc_get_set_rss_key_data *key) 431 { 432 return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); 433 } 434 435 /** 436 * iavf_aq_send_msg_to_pf 437 * @hw: pointer to the hardware structure 438 * @v_opcode: opcodes for VF-PF communication 439 * @v_retval: return error code 440 * @msg: pointer to the msg buffer 441 * @msglen: msg length 442 * @cmd_details: pointer to command details 443 * 444 * Send message to PF driver using admin queue. By default, this message 445 * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for 446 * completion before returning. 447 **/ 448 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, 449 enum virtchnl_ops v_opcode, 450 enum iavf_status v_retval, 451 u8 *msg, u16 msglen, 452 struct iavf_asq_cmd_details *cmd_details) 453 { 454 struct iavf_asq_cmd_details details; 455 struct iavf_aq_desc desc; 456 enum iavf_status status; 457 458 iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf); 459 desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_SI); 460 desc.cookie_high = cpu_to_le32(v_opcode); 461 desc.cookie_low = cpu_to_le32(v_retval); 462 if (msglen) { 463 desc.flags |= cpu_to_le16((u16)(IAVF_AQ_FLAG_BUF 464 | IAVF_AQ_FLAG_RD)); 465 if (msglen > IAVF_AQ_LARGE_BUF) 466 desc.flags |= cpu_to_le16((u16)IAVF_AQ_FLAG_LB); 467 desc.datalen = cpu_to_le16(msglen); 468 } 469 if (!cmd_details) { 470 memset(&details, 0, sizeof(details)); 471 details.async = true; 472 cmd_details = &details; 473 } 474 status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details); 475 return status; 476 } 477 478 /** 479 * iavf_vf_parse_hw_config 480 * @hw: pointer to the hardware structure 481 * @msg: pointer to the virtual channel VF resource structure 482 * 483 * Given a VF resource message from the PF, populate the hw struct 484 * with appropriate information. 485 **/ 486 void iavf_vf_parse_hw_config(struct iavf_hw *hw, 487 struct virtchnl_vf_resource *msg) 488 { 489 struct virtchnl_vsi_resource *vsi_res; 490 int i; 491 492 vsi_res = &msg->vsi_res[0]; 493 494 hw->dev_caps.num_vsis = msg->num_vsis; 495 hw->dev_caps.num_rx_qp = msg->num_queue_pairs; 496 hw->dev_caps.num_tx_qp = msg->num_queue_pairs; 497 hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; 498 hw->dev_caps.dcb = msg->vf_cap_flags & 499 VIRTCHNL_VF_OFFLOAD_L2; 500 hw->dev_caps.fcoe = 0; 501 for (i = 0; i < msg->num_vsis; i++) { 502 if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { 503 ether_addr_copy(hw->mac.perm_addr, 504 vsi_res->default_mac_addr); 505 ether_addr_copy(hw->mac.addr, 506 vsi_res->default_mac_addr); 507 } 508 vsi_res++; 509 } 510 } 511