1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2013 - 2018 Intel Corporation. */ 3 4 #include <linux/avf/virtchnl.h> 5 #include <linux/bitfield.h> 6 #include "iavf_type.h" 7 #include "iavf_adminq.h" 8 #include "iavf_prototype.h" 9 10 /** 11 * iavf_stat_str - convert status err code to a string 12 * @hw: pointer to the HW structure 13 * @stat_err: the status error code to convert 14 **/ 15 const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err) 16 { 17 switch (stat_err) { 18 case 0: 19 return "OK"; 20 case IAVF_ERR_NVM: 21 return "IAVF_ERR_NVM"; 22 case IAVF_ERR_NVM_CHECKSUM: 23 return "IAVF_ERR_NVM_CHECKSUM"; 24 case IAVF_ERR_PHY: 25 return "IAVF_ERR_PHY"; 26 case IAVF_ERR_CONFIG: 27 return "IAVF_ERR_CONFIG"; 28 case IAVF_ERR_PARAM: 29 return "IAVF_ERR_PARAM"; 30 case IAVF_ERR_MAC_TYPE: 31 return "IAVF_ERR_MAC_TYPE"; 32 case IAVF_ERR_UNKNOWN_PHY: 33 return "IAVF_ERR_UNKNOWN_PHY"; 34 case IAVF_ERR_LINK_SETUP: 35 return "IAVF_ERR_LINK_SETUP"; 36 case IAVF_ERR_ADAPTER_STOPPED: 37 return "IAVF_ERR_ADAPTER_STOPPED"; 38 case IAVF_ERR_INVALID_MAC_ADDR: 39 return "IAVF_ERR_INVALID_MAC_ADDR"; 40 case IAVF_ERR_DEVICE_NOT_SUPPORTED: 41 return "IAVF_ERR_DEVICE_NOT_SUPPORTED"; 42 case IAVF_ERR_PRIMARY_REQUESTS_PENDING: 43 return "IAVF_ERR_PRIMARY_REQUESTS_PENDING"; 44 case IAVF_ERR_INVALID_LINK_SETTINGS: 45 return "IAVF_ERR_INVALID_LINK_SETTINGS"; 46 case IAVF_ERR_AUTONEG_NOT_COMPLETE: 47 return "IAVF_ERR_AUTONEG_NOT_COMPLETE"; 48 case IAVF_ERR_RESET_FAILED: 49 return "IAVF_ERR_RESET_FAILED"; 50 case IAVF_ERR_SWFW_SYNC: 51 return "IAVF_ERR_SWFW_SYNC"; 52 case IAVF_ERR_NO_AVAILABLE_VSI: 53 return "IAVF_ERR_NO_AVAILABLE_VSI"; 54 case IAVF_ERR_NO_MEMORY: 55 return "IAVF_ERR_NO_MEMORY"; 56 case IAVF_ERR_BAD_PTR: 57 return "IAVF_ERR_BAD_PTR"; 58 case IAVF_ERR_RING_FULL: 59 return "IAVF_ERR_RING_FULL"; 60 case IAVF_ERR_INVALID_PD_ID: 61 return "IAVF_ERR_INVALID_PD_ID"; 62 case IAVF_ERR_INVALID_QP_ID: 63 return "IAVF_ERR_INVALID_QP_ID"; 64 case IAVF_ERR_INVALID_CQ_ID: 65 return "IAVF_ERR_INVALID_CQ_ID"; 66 case IAVF_ERR_INVALID_CEQ_ID: 67 return "IAVF_ERR_INVALID_CEQ_ID"; 68 case IAVF_ERR_INVALID_AEQ_ID: 69 return "IAVF_ERR_INVALID_AEQ_ID"; 70 case IAVF_ERR_INVALID_SIZE: 71 return "IAVF_ERR_INVALID_SIZE"; 72 case IAVF_ERR_INVALID_ARP_INDEX: 73 return "IAVF_ERR_INVALID_ARP_INDEX"; 74 case IAVF_ERR_INVALID_FPM_FUNC_ID: 75 return "IAVF_ERR_INVALID_FPM_FUNC_ID"; 76 case IAVF_ERR_QP_INVALID_MSG_SIZE: 77 return "IAVF_ERR_QP_INVALID_MSG_SIZE"; 78 case IAVF_ERR_QP_TOOMANY_WRS_POSTED: 79 return "IAVF_ERR_QP_TOOMANY_WRS_POSTED"; 80 case IAVF_ERR_INVALID_FRAG_COUNT: 81 return "IAVF_ERR_INVALID_FRAG_COUNT"; 82 case IAVF_ERR_QUEUE_EMPTY: 83 return "IAVF_ERR_QUEUE_EMPTY"; 84 case IAVF_ERR_INVALID_ALIGNMENT: 85 return "IAVF_ERR_INVALID_ALIGNMENT"; 86 case IAVF_ERR_FLUSHED_QUEUE: 87 return "IAVF_ERR_FLUSHED_QUEUE"; 88 case IAVF_ERR_INVALID_PUSH_PAGE_INDEX: 89 return "IAVF_ERR_INVALID_PUSH_PAGE_INDEX"; 90 case IAVF_ERR_INVALID_IMM_DATA_SIZE: 91 return "IAVF_ERR_INVALID_IMM_DATA_SIZE"; 92 case IAVF_ERR_TIMEOUT: 93 return "IAVF_ERR_TIMEOUT"; 94 case IAVF_ERR_OPCODE_MISMATCH: 95 return "IAVF_ERR_OPCODE_MISMATCH"; 96 case IAVF_ERR_CQP_COMPL_ERROR: 97 return "IAVF_ERR_CQP_COMPL_ERROR"; 98 case IAVF_ERR_INVALID_VF_ID: 99 return "IAVF_ERR_INVALID_VF_ID"; 100 case IAVF_ERR_INVALID_HMCFN_ID: 101 return "IAVF_ERR_INVALID_HMCFN_ID"; 102 case IAVF_ERR_BACKING_PAGE_ERROR: 103 return "IAVF_ERR_BACKING_PAGE_ERROR"; 104 case IAVF_ERR_NO_PBLCHUNKS_AVAILABLE: 105 return "IAVF_ERR_NO_PBLCHUNKS_AVAILABLE"; 106 case IAVF_ERR_INVALID_PBLE_INDEX: 107 return "IAVF_ERR_INVALID_PBLE_INDEX"; 108 case IAVF_ERR_INVALID_SD_INDEX: 109 return "IAVF_ERR_INVALID_SD_INDEX"; 110 case IAVF_ERR_INVALID_PAGE_DESC_INDEX: 111 return "IAVF_ERR_INVALID_PAGE_DESC_INDEX"; 112 case IAVF_ERR_INVALID_SD_TYPE: 113 return "IAVF_ERR_INVALID_SD_TYPE"; 114 case IAVF_ERR_MEMCPY_FAILED: 115 return "IAVF_ERR_MEMCPY_FAILED"; 116 case IAVF_ERR_INVALID_HMC_OBJ_INDEX: 117 return "IAVF_ERR_INVALID_HMC_OBJ_INDEX"; 118 case IAVF_ERR_INVALID_HMC_OBJ_COUNT: 119 return "IAVF_ERR_INVALID_HMC_OBJ_COUNT"; 120 case IAVF_ERR_INVALID_SRQ_ARM_LIMIT: 121 return "IAVF_ERR_INVALID_SRQ_ARM_LIMIT"; 122 case IAVF_ERR_SRQ_ENABLED: 123 return "IAVF_ERR_SRQ_ENABLED"; 124 case IAVF_ERR_ADMIN_QUEUE_ERROR: 125 return "IAVF_ERR_ADMIN_QUEUE_ERROR"; 126 case IAVF_ERR_ADMIN_QUEUE_TIMEOUT: 127 return "IAVF_ERR_ADMIN_QUEUE_TIMEOUT"; 128 case IAVF_ERR_BUF_TOO_SHORT: 129 return "IAVF_ERR_BUF_TOO_SHORT"; 130 case IAVF_ERR_ADMIN_QUEUE_FULL: 131 return "IAVF_ERR_ADMIN_QUEUE_FULL"; 132 case IAVF_ERR_ADMIN_QUEUE_NO_WORK: 133 return "IAVF_ERR_ADMIN_QUEUE_NO_WORK"; 134 case IAVF_ERR_BAD_RDMA_CQE: 135 return "IAVF_ERR_BAD_RDMA_CQE"; 136 case IAVF_ERR_NVM_BLANK_MODE: 137 return "IAVF_ERR_NVM_BLANK_MODE"; 138 case IAVF_ERR_NOT_IMPLEMENTED: 139 return "IAVF_ERR_NOT_IMPLEMENTED"; 140 case IAVF_ERR_PE_DOORBELL_NOT_ENABLED: 141 return "IAVF_ERR_PE_DOORBELL_NOT_ENABLED"; 142 case IAVF_ERR_DIAG_TEST_FAILED: 143 return "IAVF_ERR_DIAG_TEST_FAILED"; 144 case IAVF_ERR_NOT_READY: 145 return "IAVF_ERR_NOT_READY"; 146 case IAVF_NOT_SUPPORTED: 147 return "IAVF_NOT_SUPPORTED"; 148 case IAVF_ERR_FIRMWARE_API_VERSION: 149 return "IAVF_ERR_FIRMWARE_API_VERSION"; 150 case IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR: 151 return "IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; 152 } 153 154 snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); 155 return hw->err_str; 156 } 157 158 /** 159 * iavf_debug_aq 160 * @hw: debug mask related to admin queue 161 * @mask: debug mask 162 * @desc: pointer to admin queue descriptor 163 * @buffer: pointer to command buffer 164 * @buf_len: max length of buffer 165 * 166 * Dumps debug log about adminq command with descriptor contents. 167 **/ 168 void iavf_debug_aq(struct iavf_hw *hw, enum iavf_debug_mask mask, void *desc, 169 void *buffer, u16 buf_len) 170 { 171 struct libie_aq_desc *aq_desc = (struct libie_aq_desc *)desc; 172 u8 *buf = (u8 *)buffer; 173 174 if ((!(mask & hw->debug_mask)) || !desc) 175 return; 176 177 iavf_debug(hw, mask, 178 "AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", 179 le16_to_cpu(aq_desc->opcode), 180 le16_to_cpu(aq_desc->flags), 181 le16_to_cpu(aq_desc->datalen), 182 le16_to_cpu(aq_desc->retval)); 183 iavf_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n", 184 le32_to_cpu(aq_desc->cookie_high), 185 le32_to_cpu(aq_desc->cookie_low)); 186 iavf_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n", 187 le32_to_cpu(aq_desc->params.generic.param0), 188 le32_to_cpu(aq_desc->params.generic.param1)); 189 iavf_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n", 190 le32_to_cpu(aq_desc->params.generic.addr_high), 191 le32_to_cpu(aq_desc->params.generic.addr_low)); 192 193 if (buffer && aq_desc->datalen) { 194 u16 len = le16_to_cpu(aq_desc->datalen); 195 196 iavf_debug(hw, mask, "AQ CMD Buffer:\n"); 197 if (buf_len < len) 198 len = buf_len; 199 /* write the full 16-byte chunks */ 200 if (hw->debug_mask & mask) { 201 char prefix[27]; 202 203 snprintf(prefix, sizeof(prefix), 204 "iavf %02x:%02x.%x: \t0x", 205 hw->bus.bus_id, 206 hw->bus.device, 207 hw->bus.func); 208 209 print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 210 16, 1, buf, len, false); 211 } 212 } 213 } 214 215 /** 216 * iavf_check_asq_alive 217 * @hw: pointer to the hw struct 218 * 219 * Returns true if Queue is enabled else false. 220 **/ 221 bool iavf_check_asq_alive(struct iavf_hw *hw) 222 { 223 /* Check if the queue is initialized */ 224 if (!hw->aq.asq.count) 225 return false; 226 227 return !!(rd32(hw, IAVF_VF_ATQLEN1) & IAVF_VF_ATQLEN1_ATQENABLE_MASK); 228 } 229 230 /** 231 * iavf_aq_queue_shutdown 232 * @hw: pointer to the hw struct 233 * @unloading: is the driver unloading itself 234 * 235 * Tell the Firmware that we're shutting down the AdminQ and whether 236 * or not the driver is unloading as well. 237 **/ 238 enum iavf_status iavf_aq_queue_shutdown(struct iavf_hw *hw, bool unloading) 239 { 240 struct iavf_aqc_queue_shutdown *cmd; 241 struct libie_aq_desc desc; 242 enum iavf_status status; 243 244 cmd = libie_aq_raw(&desc); 245 iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_queue_shutdown); 246 247 if (unloading) 248 cmd->driver_unloading = cpu_to_le32(IAVF_AQ_DRIVER_UNLOADING); 249 status = iavf_asq_send_command(hw, &desc, NULL, 0, NULL); 250 251 return status; 252 } 253 254 /** 255 * iavf_aq_get_set_rss_lut 256 * @hw: pointer to the hardware structure 257 * @vsi_id: vsi fw index 258 * @pf_lut: for PF table set true, for VSI table set false 259 * @lut: pointer to the lut buffer provided by the caller 260 * @lut_size: size of the lut buffer 261 * @set: set true to set the table, false to get the table 262 * 263 * Internal function to get or set RSS look up table 264 **/ 265 static enum iavf_status iavf_aq_get_set_rss_lut(struct iavf_hw *hw, 266 u16 vsi_id, bool pf_lut, 267 u8 *lut, u16 lut_size, 268 bool set) 269 { 270 struct iavf_aqc_get_set_rss_lut *cmd_resp; 271 struct libie_aq_desc desc; 272 enum iavf_status status; 273 u16 flags; 274 275 cmd_resp = libie_aq_raw(&desc); 276 277 if (set) 278 iavf_fill_default_direct_cmd_desc(&desc, 279 iavf_aqc_opc_set_rss_lut); 280 else 281 iavf_fill_default_direct_cmd_desc(&desc, 282 iavf_aqc_opc_get_rss_lut); 283 284 /* Indirect command */ 285 desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF); 286 desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD); 287 288 vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_ID_MASK, vsi_id) | 289 FIELD_PREP(IAVF_AQC_SET_RSS_LUT_VSI_VALID, 1); 290 cmd_resp->vsi_id = cpu_to_le16(vsi_id); 291 292 if (pf_lut) 293 flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, 294 IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_PF); 295 else 296 flags = FIELD_PREP(IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_MASK, 297 IAVF_AQC_SET_RSS_LUT_TABLE_TYPE_VSI); 298 299 cmd_resp->flags = cpu_to_le16(flags); 300 301 status = iavf_asq_send_command(hw, &desc, lut, lut_size, NULL); 302 303 return status; 304 } 305 306 /** 307 * iavf_aq_set_rss_lut 308 * @hw: pointer to the hardware structure 309 * @vsi_id: vsi fw index 310 * @pf_lut: for PF table set true, for VSI table set false 311 * @lut: pointer to the lut buffer provided by the caller 312 * @lut_size: size of the lut buffer 313 * 314 * set the RSS lookup table, PF or VSI type 315 **/ 316 enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 vsi_id, 317 bool pf_lut, u8 *lut, u16 lut_size) 318 { 319 return iavf_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true); 320 } 321 322 /** 323 * iavf_aq_get_set_rss_key 324 * @hw: pointer to the hw struct 325 * @vsi_id: vsi fw index 326 * @key: pointer to key info struct 327 * @set: set true to set the key, false to get the key 328 * 329 * get the RSS key per VSI 330 **/ 331 static enum 332 iavf_status iavf_aq_get_set_rss_key(struct iavf_hw *hw, u16 vsi_id, 333 struct iavf_aqc_get_set_rss_key_data *key, 334 bool set) 335 { 336 u16 key_size = sizeof(struct iavf_aqc_get_set_rss_key_data); 337 struct iavf_aqc_get_set_rss_key *cmd_resp; 338 struct libie_aq_desc desc; 339 enum iavf_status status; 340 341 cmd_resp = libie_aq_raw(&desc); 342 343 if (set) 344 iavf_fill_default_direct_cmd_desc(&desc, 345 iavf_aqc_opc_set_rss_key); 346 else 347 iavf_fill_default_direct_cmd_desc(&desc, 348 iavf_aqc_opc_get_rss_key); 349 350 /* Indirect command */ 351 desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_BUF); 352 desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD); 353 354 vsi_id = FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_ID_MASK, vsi_id) | 355 FIELD_PREP(IAVF_AQC_SET_RSS_KEY_VSI_VALID, 1); 356 cmd_resp->vsi_id = cpu_to_le16(vsi_id); 357 358 status = iavf_asq_send_command(hw, &desc, key, key_size, NULL); 359 360 return status; 361 } 362 363 /** 364 * iavf_aq_set_rss_key 365 * @hw: pointer to the hw struct 366 * @vsi_id: vsi fw index 367 * @key: pointer to key info struct 368 * 369 * set the RSS key per VSI 370 **/ 371 enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, 372 struct iavf_aqc_get_set_rss_key_data *key) 373 { 374 return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); 375 } 376 377 /** 378 * iavf_aq_send_msg_to_pf 379 * @hw: pointer to the hardware structure 380 * @v_opcode: opcodes for VF-PF communication 381 * @v_retval: return error code 382 * @msg: pointer to the msg buffer 383 * @msglen: msg length 384 * @cmd_details: pointer to command details 385 * 386 * Send message to PF driver using admin queue. By default, this message 387 * is sent asynchronously, i.e. iavf_asq_send_command() does not wait for 388 * completion before returning. 389 **/ 390 enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, 391 enum virtchnl_ops v_opcode, 392 enum iavf_status v_retval, 393 u8 *msg, u16 msglen, 394 struct iavf_asq_cmd_details *cmd_details) 395 { 396 struct iavf_asq_cmd_details details; 397 struct libie_aq_desc desc; 398 enum iavf_status status; 399 400 iavf_fill_default_direct_cmd_desc(&desc, iavf_aqc_opc_send_msg_to_pf); 401 desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_SI); 402 desc.cookie_high = cpu_to_le32(v_opcode); 403 desc.cookie_low = cpu_to_le32(v_retval); 404 if (msglen) { 405 desc.flags |= cpu_to_le16((u16)(LIBIE_AQ_FLAG_BUF 406 | LIBIE_AQ_FLAG_RD)); 407 if (msglen > IAVF_AQ_LARGE_BUF) 408 desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_LB); 409 desc.datalen = cpu_to_le16(msglen); 410 } 411 if (!cmd_details) { 412 memset(&details, 0, sizeof(details)); 413 details.async = true; 414 cmd_details = &details; 415 } 416 status = iavf_asq_send_command(hw, &desc, msg, msglen, cmd_details); 417 return status; 418 } 419 420 /** 421 * iavf_vf_parse_hw_config 422 * @hw: pointer to the hardware structure 423 * @msg: pointer to the virtual channel VF resource structure 424 * 425 * Given a VF resource message from the PF, populate the hw struct 426 * with appropriate information. 427 **/ 428 void iavf_vf_parse_hw_config(struct iavf_hw *hw, 429 struct virtchnl_vf_resource *msg) 430 { 431 struct virtchnl_vsi_resource *vsi_res; 432 int i; 433 434 vsi_res = &msg->vsi_res[0]; 435 436 hw->dev_caps.num_vsis = msg->num_vsis; 437 hw->dev_caps.num_rx_qp = msg->num_queue_pairs; 438 hw->dev_caps.num_tx_qp = msg->num_queue_pairs; 439 hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; 440 hw->dev_caps.dcb = msg->vf_cap_flags & 441 VIRTCHNL_VF_OFFLOAD_L2; 442 hw->dev_caps.fcoe = 0; 443 for (i = 0; i < msg->num_vsis; i++) { 444 if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { 445 ether_addr_copy(hw->mac.perm_addr, 446 vsi_res->default_mac_addr); 447 ether_addr_copy(hw->mac.addr, 448 vsi_res->default_mac_addr); 449 } 450 vsi_res++; 451 } 452 } 453