1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include "hclge_main.h" 5 #include "hclge_mbx.h" 6 #include "hnae3.h" 7 #include "hclge_comm_rss.h" 8 9 #define CREATE_TRACE_POINTS 10 #include "hclge_trace.h" 11 12 static u16 hclge_errno_to_resp(int errno) 13 { 14 int resp = abs(errno); 15 16 /* The status for pf to vf msg cmd is u16, constrainted by HW. 17 * We need to keep the same type with it. 18 * The intput errno is the stander error code, it's safely to 19 * use a u16 to store the abs(errno). 20 */ 21 return (u16)resp; 22 } 23 24 /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF 25 * receives a mailbox message from VF. 26 * @vport: pointer to struct hclge_vport 27 * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox 28 * message 29 * @resp_status: indicate to VF whether its request success(0) or failed. 30 */ 31 static int hclge_gen_resp_to_vf(struct hclge_vport *vport, 32 struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req, 33 struct hclge_respond_to_vf_msg *resp_msg) 34 { 35 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 36 struct hclge_dev *hdev = vport->back; 37 enum hclge_comm_cmd_status status; 38 struct hclge_desc desc; 39 u16 resp; 40 41 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 42 43 if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) { 44 dev_err(&hdev->pdev->dev, 45 "PF fail to gen resp to VF len %u exceeds max len %u\n", 46 resp_msg->len, 47 HCLGE_MBX_MAX_RESP_DATA_SIZE); 48 /* If resp_msg->len is too long, set the value to max length 49 * and return the msg to VF 50 */ 51 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 52 } 53 54 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 55 56 resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; 57 resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; 58 resp_pf_to_vf->match_id = vf_to_pf_req->match_id; 59 60 resp_pf_to_vf->msg.code = cpu_to_le16(HCLGE_MBX_PF_VF_RESP); 61 resp_pf_to_vf->msg.vf_mbx_msg_code = 62 cpu_to_le16(vf_to_pf_req->msg.code); 63 resp_pf_to_vf->msg.vf_mbx_msg_subcode = 64 cpu_to_le16(vf_to_pf_req->msg.subcode); 65 resp = hclge_errno_to_resp(resp_msg->status); 66 if (resp < SHRT_MAX) { 67 resp_pf_to_vf->msg.resp_status = cpu_to_le16(resp); 68 } else { 69 dev_warn(&hdev->pdev->dev, 70 "failed to send response to VF, response status %u is out-of-bound\n", 71 resp); 72 resp_pf_to_vf->msg.resp_status = cpu_to_le16(EIO); 73 } 74 75 if (resp_msg->len > 0) 76 memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, 77 resp_msg->len); 78 79 trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); 80 81 status = hclge_cmd_send(&hdev->hw, &desc, 1); 82 if (status) 83 dev_err(&hdev->pdev->dev, 84 "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n", 85 status, vf_to_pf_req->mbx_src_vfid, 86 vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode); 87 88 return status; 89 } 90 91 static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, 92 u16 mbx_opcode, u8 dest_vfid) 93 { 94 struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf; 95 struct hclge_dev *hdev = vport->back; 96 enum hclge_comm_cmd_status status; 97 struct hclge_desc desc; 98 99 if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) { 100 dev_err(&hdev->pdev->dev, 101 "msg data length(=%u) exceeds maximum(=%u)\n", 102 msg_len, HCLGE_MBX_MAX_MSG_SIZE); 103 return -EMSGSIZE; 104 } 105 106 resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data; 107 108 hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false); 109 110 resp_pf_to_vf->dest_vfid = dest_vfid; 111 resp_pf_to_vf->msg_len = msg_len; 112 resp_pf_to_vf->msg.code = cpu_to_le16(mbx_opcode); 113 114 memcpy(resp_pf_to_vf->msg.msg_data, msg, msg_len); 115 116 trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); 117 118 status = hclge_cmd_send(&hdev->hw, &desc, 1); 119 if (status) 120 dev_err(&hdev->pdev->dev, 121 "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n", 122 status, dest_vfid, mbx_opcode); 123 124 return status; 125 } 126 127 int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type) 128 { 129 __le16 msg_data; 130 u8 dest_vfid; 131 132 dest_vfid = (u8)vport->vport_id; 133 msg_data = cpu_to_le16(reset_type); 134 135 /* send this requested info to VF */ 136 return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data), 137 HCLGE_MBX_ASSERTING_RESET, dest_vfid); 138 } 139 140 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) 141 { 142 struct hclge_dev *hdev = vport->back; 143 u16 reset_type; 144 145 BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX); 146 147 if (hdev->reset_type == HNAE3_FUNC_RESET) 148 reset_type = HNAE3_VF_PF_FUNC_RESET; 149 else if (hdev->reset_type == HNAE3_FLR_RESET) 150 reset_type = HNAE3_VF_FULL_RESET; 151 else 152 reset_type = HNAE3_VF_FUNC_RESET; 153 154 return hclge_inform_vf_reset(vport, reset_type); 155 } 156 157 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head) 158 { 159 struct hnae3_ring_chain_node *chain_tmp, *chain; 160 161 chain = head->next; 162 163 while (chain) { 164 chain_tmp = chain->next; 165 kfree_sensitive(chain); 166 chain = chain_tmp; 167 } 168 } 169 170 /* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx 171 * from mailbox message 172 * msg[0]: opcode 173 * msg[1]: <not relevant to this function> 174 * msg[2]: ring_num 175 * msg[3]: first ring type (TX|RX) 176 * msg[4]: first tqp id 177 * msg[5]: first int_gl idx 178 * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx 179 */ 180 static int hclge_get_ring_chain_from_mbx( 181 struct hclge_mbx_vf_to_pf_cmd *req, 182 struct hnae3_ring_chain_node *ring_chain, 183 struct hclge_vport *vport) 184 { 185 struct hnae3_ring_chain_node *cur_chain, *new_chain; 186 struct hclge_dev *hdev = vport->back; 187 int ring_num; 188 int i; 189 190 ring_num = req->msg.ring_num; 191 192 if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) 193 return -EINVAL; 194 195 for (i = 0; i < ring_num; i++) { 196 if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { 197 dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", 198 req->msg.param[i].tqp_index, 199 vport->nic.kinfo.rss_size - 1U); 200 return -EINVAL; 201 } 202 } 203 204 hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, 205 req->msg.param[0].ring_type); 206 ring_chain->tqp_index = 207 hclge_get_queue_id(vport->nic.kinfo.tqp 208 [req->msg.param[0].tqp_index]); 209 hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 210 HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); 211 212 cur_chain = ring_chain; 213 214 for (i = 1; i < ring_num; i++) { 215 new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL); 216 if (!new_chain) 217 goto err; 218 219 hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B, 220 req->msg.param[i].ring_type); 221 222 new_chain->tqp_index = 223 hclge_get_queue_id(vport->nic.kinfo.tqp 224 [req->msg.param[i].tqp_index]); 225 226 hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, 227 HNAE3_RING_GL_IDX_S, 228 req->msg.param[i].int_gl_index); 229 230 cur_chain->next = new_chain; 231 cur_chain = new_chain; 232 } 233 234 return 0; 235 err: 236 hclge_free_vector_ring_chain(ring_chain); 237 return -ENOMEM; 238 } 239 240 static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en, 241 struct hclge_mbx_vf_to_pf_cmd *req) 242 { 243 struct hnae3_ring_chain_node ring_chain; 244 int vector_id = req->msg.vector_id; 245 int ret; 246 247 memset(&ring_chain, 0, sizeof(ring_chain)); 248 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 249 if (ret) 250 return ret; 251 252 ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain); 253 254 hclge_free_vector_ring_chain(&ring_chain); 255 256 return ret; 257 } 258 259 static int hclge_query_ring_vector_map(struct hclge_vport *vport, 260 struct hnae3_ring_chain_node *ring_chain, 261 struct hclge_desc *desc) 262 { 263 struct hclge_ctrl_vector_chain_cmd *req = 264 (struct hclge_ctrl_vector_chain_cmd *)desc->data; 265 struct hclge_dev *hdev = vport->back; 266 u16 tqp_type_and_id; 267 int status; 268 269 hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true); 270 271 tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]); 272 hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, 273 hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B)); 274 hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, 275 ring_chain->tqp_index); 276 req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id); 277 req->vfid = vport->vport_id; 278 279 status = hclge_cmd_send(&hdev->hw, desc, 1); 280 if (status) 281 dev_err(&hdev->pdev->dev, 282 "Get VF ring vector map info fail, status is %d.\n", 283 status); 284 285 return status; 286 } 287 288 static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport, 289 struct hclge_mbx_vf_to_pf_cmd *req, 290 struct hclge_respond_to_vf_msg *resp) 291 { 292 #define HCLGE_LIMIT_RING_NUM 1 293 #define HCLGE_RING_TYPE_OFFSET 0 294 #define HCLGE_TQP_INDEX_OFFSET 1 295 #define HCLGE_INT_GL_INDEX_OFFSET 2 296 #define HCLGE_VECTOR_ID_OFFSET 3 297 #define HCLGE_RING_VECTOR_MAP_INFO_LEN 4 298 struct hnae3_ring_chain_node ring_chain; 299 struct hclge_desc desc; 300 struct hclge_ctrl_vector_chain_cmd *data = 301 (struct hclge_ctrl_vector_chain_cmd *)desc.data; 302 u16 tqp_type_and_id; 303 u8 int_gl_index; 304 int ret; 305 306 req->msg.ring_num = HCLGE_LIMIT_RING_NUM; 307 308 memset(&ring_chain, 0, sizeof(ring_chain)); 309 ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport); 310 if (ret) 311 return ret; 312 313 ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc); 314 if (ret) { 315 hclge_free_vector_ring_chain(&ring_chain); 316 return ret; 317 } 318 319 tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]); 320 int_gl_index = hnae3_get_field(tqp_type_and_id, 321 HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S); 322 323 resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type; 324 resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index; 325 resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index; 326 resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l; 327 resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN; 328 329 hclge_free_vector_ring_chain(&ring_chain); 330 331 return ret; 332 } 333 334 static void hclge_set_vf_promisc_mode(struct hclge_vport *vport, 335 struct hclge_mbx_vf_to_pf_cmd *req) 336 { 337 struct hnae3_handle *handle = &vport->nic; 338 struct hclge_dev *hdev = vport->back; 339 340 vport->vf_info.request_uc_en = req->msg.en_uc; 341 vport->vf_info.request_mc_en = req->msg.en_mc; 342 vport->vf_info.request_bc_en = req->msg.en_bc; 343 344 if (req->msg.en_limit_promisc) 345 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags); 346 else 347 clear_bit(HNAE3_PFLAG_LIMIT_PROMISC, 348 &handle->priv_flags); 349 350 set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state); 351 hclge_task_schedule(hdev, 0); 352 } 353 354 static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, 355 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 356 { 357 #define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6 358 359 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 360 struct hclge_dev *hdev = vport->back; 361 int status; 362 363 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) { 364 const u8 *old_addr = (const u8 *) 365 (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]); 366 367 /* If VF MAC has been configured by the host then it 368 * cannot be overridden by the MAC specified by the VM. 369 */ 370 if (!is_zero_ether_addr(vport->vf_info.mac) && 371 !ether_addr_equal(mac_addr, vport->vf_info.mac)) 372 return -EPERM; 373 374 if (!is_valid_ether_addr(mac_addr)) 375 return -EINVAL; 376 377 spin_lock_bh(&vport->mac_list_lock); 378 status = hclge_update_mac_node_for_dev_addr(vport, old_addr, 379 mac_addr); 380 spin_unlock_bh(&vport->mac_list_lock); 381 hclge_task_schedule(hdev, 0); 382 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) { 383 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 384 HCLGE_MAC_ADDR_UC, mac_addr); 385 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) { 386 status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 387 HCLGE_MAC_ADDR_UC, mac_addr); 388 } else { 389 dev_err(&hdev->pdev->dev, 390 "failed to set unicast mac addr, unknown subcode %u\n", 391 mbx_req->msg.subcode); 392 return -EIO; 393 } 394 395 return status; 396 } 397 398 static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, 399 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 400 { 401 const u8 *mac_addr = (const u8 *)(mbx_req->msg.data); 402 struct hclge_dev *hdev = vport->back; 403 404 if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) { 405 hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, 406 HCLGE_MAC_ADDR_MC, mac_addr); 407 } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) { 408 hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, 409 HCLGE_MAC_ADDR_MC, mac_addr); 410 } else { 411 dev_err(&hdev->pdev->dev, 412 "failed to set mcast mac addr, unknown subcode %u\n", 413 mbx_req->msg.subcode); 414 return -EIO; 415 } 416 417 return 0; 418 } 419 420 int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, 421 u16 state, 422 struct hclge_vlan_info *vlan_info) 423 { 424 struct hclge_mbx_port_base_vlan base_vlan; 425 426 base_vlan.state = cpu_to_le16(state); 427 base_vlan.vlan_proto = cpu_to_le16(vlan_info->vlan_proto); 428 base_vlan.qos = cpu_to_le16(vlan_info->qos); 429 base_vlan.vlan_tag = cpu_to_le16(vlan_info->vlan_tag); 430 431 return hclge_send_mbx_msg(vport, (u8 *)&base_vlan, sizeof(base_vlan), 432 HCLGE_MBX_PUSH_VLAN_INFO, vfid); 433 } 434 435 static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, 436 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 437 struct hclge_respond_to_vf_msg *resp_msg) 438 { 439 #define HCLGE_MBX_VLAN_STATE_OFFSET 0 440 #define HCLGE_MBX_VLAN_INFO_OFFSET 2 441 442 struct hnae3_handle *handle = &vport->nic; 443 struct hclge_dev *hdev = vport->back; 444 struct hclge_vf_vlan_cfg *msg_cmd; 445 __be16 proto; 446 u16 vlan_id; 447 448 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 449 switch (msg_cmd->subcode) { 450 case HCLGE_MBX_VLAN_FILTER: 451 proto = cpu_to_be16(le16_to_cpu(msg_cmd->proto)); 452 vlan_id = le16_to_cpu(msg_cmd->vlan); 453 return hclge_set_vlan_filter(handle, proto, vlan_id, 454 msg_cmd->is_kill); 455 case HCLGE_MBX_VLAN_RX_OFF_CFG: 456 return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable); 457 case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE: 458 /* vf does not need to know about the port based VLAN state 459 * on device HNAE3_DEVICE_VERSION_V3. So always return disable 460 * on device HNAE3_DEVICE_VERSION_V3 if vf queries the port 461 * based VLAN state. 462 */ 463 resp_msg->data[0] = 464 hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ? 465 HNAE3_PORT_BASE_VLAN_DISABLE : 466 vport->port_base_vlan_cfg.state; 467 resp_msg->len = sizeof(u8); 468 return 0; 469 case HCLGE_MBX_ENABLE_VLAN_FILTER: 470 return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable); 471 default: 472 return 0; 473 } 474 } 475 476 static int hclge_set_vf_alive(struct hclge_vport *vport, 477 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 478 { 479 bool alive = !!mbx_req->msg.data[0]; 480 int ret = 0; 481 482 if (alive) 483 ret = hclge_vport_start(vport); 484 else 485 hclge_vport_stop(vport); 486 487 return ret; 488 } 489 490 static void hclge_get_basic_info(struct hclge_vport *vport, 491 struct hclge_respond_to_vf_msg *resp_msg) 492 { 493 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 494 struct hnae3_ae_dev *ae_dev = vport->back->ae_dev; 495 struct hclge_basic_info *basic_info; 496 unsigned int i; 497 u32 pf_caps; 498 499 basic_info = (struct hclge_basic_info *)resp_msg->data; 500 for (i = 0; i < kinfo->tc_info.num_tc; i++) 501 basic_info->hw_tc_map |= BIT(i); 502 503 pf_caps = le32_to_cpu(basic_info->pf_caps); 504 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 505 hnae3_set_bit(pf_caps, HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1); 506 507 basic_info->pf_caps = cpu_to_le32(pf_caps); 508 resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE; 509 } 510 511 static void hclge_get_vf_queue_info(struct hclge_vport *vport, 512 struct hclge_respond_to_vf_msg *resp_msg) 513 { 514 #define HCLGE_TQPS_RSS_INFO_LEN 6 515 516 struct hclge_mbx_vf_queue_info *queue_info; 517 struct hclge_dev *hdev = vport->back; 518 519 /* get the queue related info */ 520 queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg->data; 521 queue_info->num_tqps = cpu_to_le16(vport->alloc_tqps); 522 queue_info->rss_size = cpu_to_le16(vport->nic.kinfo.rss_size); 523 queue_info->rx_buf_len = cpu_to_le16(hdev->rx_buf_len); 524 resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN; 525 } 526 527 static void hclge_get_vf_mac_addr(struct hclge_vport *vport, 528 struct hclge_respond_to_vf_msg *resp_msg) 529 { 530 ether_addr_copy(resp_msg->data, vport->vf_info.mac); 531 resp_msg->len = ETH_ALEN; 532 } 533 534 static void hclge_get_vf_queue_depth(struct hclge_vport *vport, 535 struct hclge_respond_to_vf_msg *resp_msg) 536 { 537 #define HCLGE_TQPS_DEPTH_INFO_LEN 4 538 539 struct hclge_mbx_vf_queue_depth *queue_depth; 540 struct hclge_dev *hdev = vport->back; 541 542 /* get the queue depth info */ 543 queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg->data; 544 queue_depth->num_tx_desc = cpu_to_le16(hdev->num_tx_desc); 545 queue_depth->num_rx_desc = cpu_to_le16(hdev->num_rx_desc); 546 547 resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN; 548 } 549 550 static void hclge_get_vf_media_type(struct hclge_vport *vport, 551 struct hclge_respond_to_vf_msg *resp_msg) 552 { 553 #define HCLGE_VF_MEDIA_TYPE_OFFSET 0 554 #define HCLGE_VF_MODULE_TYPE_OFFSET 1 555 #define HCLGE_VF_MEDIA_TYPE_LENGTH 2 556 557 struct hclge_dev *hdev = vport->back; 558 559 resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] = 560 hdev->hw.mac.media_type; 561 resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] = 562 hdev->hw.mac.module_type; 563 resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH; 564 } 565 566 int hclge_push_vf_link_status(struct hclge_vport *vport) 567 { 568 #define HCLGE_VF_LINK_STATE_UP 1U 569 #define HCLGE_VF_LINK_STATE_DOWN 0U 570 571 struct hclge_mbx_link_status link_info; 572 struct hclge_dev *hdev = vport->back; 573 u16 link_status; 574 575 /* mac.link can only be 0 or 1 */ 576 switch (vport->vf_info.link_state) { 577 case IFLA_VF_LINK_STATE_ENABLE: 578 link_status = HCLGE_VF_LINK_STATE_UP; 579 break; 580 case IFLA_VF_LINK_STATE_DISABLE: 581 link_status = HCLGE_VF_LINK_STATE_DOWN; 582 break; 583 case IFLA_VF_LINK_STATE_AUTO: 584 default: 585 link_status = (u16)hdev->hw.mac.link; 586 break; 587 } 588 589 link_info.link_status = cpu_to_le16(link_status); 590 link_info.speed = cpu_to_le32(hdev->hw.mac.speed); 591 link_info.duplex = cpu_to_le16(hdev->hw.mac.duplex); 592 link_info.flag = HCLGE_MBX_PUSH_LINK_STATUS_EN; 593 594 /* send this requested info to VF */ 595 return hclge_send_mbx_msg(vport, (u8 *)&link_info, sizeof(link_info), 596 HCLGE_MBX_LINK_STAT_CHANGE, vport->vport_id); 597 } 598 599 static void hclge_get_link_mode(struct hclge_vport *vport, 600 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 601 { 602 #define HCLGE_SUPPORTED 1 603 struct hclge_mbx_link_mode link_mode; 604 struct hclge_dev *hdev = vport->back; 605 unsigned long advertising; 606 unsigned long supported; 607 unsigned long send_data; 608 u8 dest_vfid; 609 610 advertising = hdev->hw.mac.advertising[0]; 611 supported = hdev->hw.mac.supported[0]; 612 dest_vfid = mbx_req->mbx_src_vfid; 613 send_data = mbx_req->msg.data[0] == HCLGE_SUPPORTED ? supported : 614 advertising; 615 link_mode.idx = cpu_to_le16((u16)mbx_req->msg.data[0]); 616 link_mode.link_mode = cpu_to_le64(send_data); 617 618 hclge_send_mbx_msg(vport, (u8 *)&link_mode, sizeof(link_mode), 619 HCLGE_MBX_LINK_STAT_MODE, dest_vfid); 620 } 621 622 static int hclge_mbx_reset_vf_queue(struct hclge_vport *vport, 623 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 624 struct hclge_respond_to_vf_msg *resp_msg) 625 { 626 #define HCLGE_RESET_ALL_QUEUE_DONE 1U 627 struct hnae3_handle *handle = &vport->nic; 628 struct hclge_dev *hdev = vport->back; 629 u16 queue_id; 630 int ret; 631 632 queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); 633 resp_msg->data[0] = HCLGE_RESET_ALL_QUEUE_DONE; 634 resp_msg->len = sizeof(u8); 635 636 /* pf will reset vf's all queues at a time. So it is unnecessary 637 * to reset queues if queue_id > 0, just return success. 638 */ 639 if (queue_id > 0) 640 return 0; 641 642 ret = hclge_reset_tqp(handle); 643 if (ret) 644 dev_err(&hdev->pdev->dev, "failed to reset vf %u queue, ret = %d\n", 645 vport->vport_id - HCLGE_VF_VPORT_START_NUM, ret); 646 647 return ret; 648 } 649 650 static int hclge_reset_vf(struct hclge_vport *vport) 651 { 652 struct hclge_dev *hdev = vport->back; 653 654 dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!", 655 vport->vport_id - HCLGE_VF_VPORT_START_NUM); 656 657 return hclge_func_reset_cmd(hdev, vport->vport_id); 658 } 659 660 static void hclge_notify_vf_config(struct hclge_vport *vport) 661 { 662 struct hclge_dev *hdev = vport->back; 663 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 664 struct hclge_port_base_vlan_config *vlan_cfg; 665 int ret; 666 667 hclge_push_vf_link_status(vport); 668 if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) { 669 ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET); 670 if (ret) { 671 dev_err(&hdev->pdev->dev, 672 "failed to inform VF %u reset!", 673 vport->vport_id - HCLGE_VF_VPORT_START_NUM); 674 return; 675 } 676 vport->need_notify = 0; 677 return; 678 } 679 680 if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 && 681 test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) { 682 vlan_cfg = &vport->port_base_vlan_cfg; 683 ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], 684 vport->vport_id, 685 vlan_cfg->state, 686 &vlan_cfg->vlan_info); 687 if (ret) { 688 dev_err(&hdev->pdev->dev, 689 "failed to inform VF %u port base vlan!", 690 vport->vport_id - HCLGE_VF_VPORT_START_NUM); 691 return; 692 } 693 clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify); 694 } 695 } 696 697 static void hclge_vf_keep_alive(struct hclge_vport *vport) 698 { 699 struct hclge_dev *hdev = vport->back; 700 701 vport->last_active_jiffies = jiffies; 702 703 if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) && 704 !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { 705 set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); 706 dev_info(&hdev->pdev->dev, "VF %u is alive!", 707 vport->vport_id - HCLGE_VF_VPORT_START_NUM); 708 hclge_notify_vf_config(vport); 709 } 710 } 711 712 static int hclge_set_vf_mtu(struct hclge_vport *vport, 713 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 714 { 715 struct hclge_mbx_mtu_info *mtu_info; 716 u32 mtu; 717 718 mtu_info = (struct hclge_mbx_mtu_info *)mbx_req->msg.data; 719 mtu = le32_to_cpu(mtu_info->mtu); 720 721 return hclge_set_vport_mtu(vport, mtu); 722 } 723 724 static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, 725 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 726 struct hclge_respond_to_vf_msg *resp_msg) 727 { 728 struct hnae3_handle *handle = &vport->nic; 729 struct hclge_dev *hdev = vport->back; 730 u16 queue_id, qid_in_pf; 731 732 queue_id = le16_to_cpu(*(__le16 *)mbx_req->msg.data); 733 if (queue_id >= handle->kinfo.num_tqps) { 734 dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n", 735 queue_id, mbx_req->mbx_src_vfid); 736 return -EINVAL; 737 } 738 739 qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); 740 *(__le16 *)resp_msg->data = cpu_to_le16(qid_in_pf); 741 resp_msg->len = sizeof(qid_in_pf); 742 return 0; 743 } 744 745 static int hclge_get_rss_key(struct hclge_vport *vport, 746 struct hclge_mbx_vf_to_pf_cmd *mbx_req, 747 struct hclge_respond_to_vf_msg *resp_msg) 748 { 749 #define HCLGE_RSS_MBX_RESP_LEN 8 750 struct hclge_dev *hdev = vport->back; 751 struct hclge_comm_rss_cfg *rss_cfg; 752 u8 index; 753 754 index = mbx_req->msg.data[0]; 755 rss_cfg = &hdev->rss_cfg; 756 757 /* Check the query index of rss_hash_key from VF, make sure no 758 * more than the size of rss_hash_key. 759 */ 760 if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > 761 sizeof(rss_cfg->rss_hash_key)) { 762 dev_warn(&hdev->pdev->dev, 763 "failed to get the rss hash key, the index(%u) invalid !\n", 764 index); 765 return -EINVAL; 766 } 767 768 memcpy(resp_msg->data, 769 &rss_cfg->rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], 770 HCLGE_RSS_MBX_RESP_LEN); 771 resp_msg->len = HCLGE_RSS_MBX_RESP_LEN; 772 return 0; 773 } 774 775 static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code) 776 { 777 switch (link_fail_code) { 778 case HCLGE_LF_REF_CLOCK_LOST: 779 dev_warn(&hdev->pdev->dev, "Reference clock lost!\n"); 780 break; 781 case HCLGE_LF_XSFP_TX_DISABLE: 782 dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n"); 783 break; 784 case HCLGE_LF_XSFP_ABSENT: 785 dev_warn(&hdev->pdev->dev, "SFP is absent!\n"); 786 break; 787 default: 788 break; 789 } 790 } 791 792 static void hclge_handle_link_change_event(struct hclge_dev *hdev, 793 struct hclge_mbx_vf_to_pf_cmd *req) 794 { 795 hclge_task_schedule(hdev, 0); 796 797 if (!req->msg.subcode) 798 hclge_link_fail_parse(hdev, req->msg.data[0]); 799 } 800 801 static bool hclge_cmd_crq_empty(struct hclge_hw *hw) 802 { 803 u32 tail = hclge_read_dev(hw, HCLGE_COMM_NIC_CRQ_TAIL_REG); 804 805 return tail == hw->hw.cmq.crq.next_to_use; 806 } 807 808 static void hclge_handle_ncsi_error(struct hclge_dev *hdev) 809 { 810 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 811 812 ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET); 813 dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n"); 814 ae_dev->ops->reset_event(hdev->pdev, NULL); 815 } 816 817 static void hclge_handle_vf_tbl(struct hclge_vport *vport, 818 struct hclge_mbx_vf_to_pf_cmd *mbx_req) 819 { 820 struct hclge_dev *hdev = vport->back; 821 struct hclge_vf_vlan_cfg *msg_cmd; 822 823 msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg; 824 if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) { 825 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC); 826 hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC); 827 hclge_rm_vport_all_vlan_table(vport, true); 828 } else { 829 dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n", 830 msg_cmd->subcode); 831 } 832 } 833 834 static int 835 hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param) 836 { 837 return hclge_map_unmap_ring_to_vf_vector(param->vport, true, 838 param->req); 839 } 840 841 static int 842 hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param) 843 { 844 return hclge_map_unmap_ring_to_vf_vector(param->vport, false, 845 param->req); 846 } 847 848 static int 849 hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param) 850 { 851 int ret; 852 853 ret = hclge_get_vf_ring_vector_map(param->vport, param->req, 854 param->resp_msg); 855 if (ret) 856 dev_err(¶m->vport->back->pdev->dev, 857 "PF fail(%d) to get VF ring vector map\n", 858 ret); 859 return ret; 860 } 861 862 static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param) 863 { 864 hclge_set_vf_promisc_mode(param->vport, param->req); 865 return 0; 866 } 867 868 static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param) 869 { 870 int ret; 871 872 ret = hclge_set_vf_uc_mac_addr(param->vport, param->req); 873 if (ret) 874 dev_err(¶m->vport->back->pdev->dev, 875 "PF fail(%d) to set VF UC MAC Addr\n", 876 ret); 877 return ret; 878 } 879 880 static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param) 881 { 882 int ret; 883 884 ret = hclge_set_vf_mc_mac_addr(param->vport, param->req); 885 if (ret) 886 dev_err(¶m->vport->back->pdev->dev, 887 "PF fail(%d) to set VF MC MAC Addr\n", 888 ret); 889 return ret; 890 } 891 892 static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param) 893 { 894 int ret; 895 896 ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg); 897 if (ret) 898 dev_err(¶m->vport->back->pdev->dev, 899 "PF failed(%d) to config VF's VLAN\n", 900 ret); 901 return ret; 902 } 903 904 static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param) 905 { 906 int ret; 907 908 ret = hclge_set_vf_alive(param->vport, param->req); 909 if (ret) 910 dev_err(¶m->vport->back->pdev->dev, 911 "PF failed(%d) to set VF's ALIVE\n", 912 ret); 913 return ret; 914 } 915 916 static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param) 917 { 918 hclge_get_vf_queue_info(param->vport, param->resp_msg); 919 return 0; 920 } 921 922 static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param) 923 { 924 hclge_get_vf_queue_depth(param->vport, param->resp_msg); 925 return 0; 926 } 927 928 static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param) 929 { 930 hclge_get_basic_info(param->vport, param->resp_msg); 931 return 0; 932 } 933 934 static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param) 935 { 936 int ret; 937 938 ret = hclge_push_vf_link_status(param->vport); 939 if (ret) 940 dev_err(¶m->vport->back->pdev->dev, 941 "failed to inform link stat to VF, ret = %d\n", 942 ret); 943 return ret; 944 } 945 946 static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param) 947 { 948 return hclge_mbx_reset_vf_queue(param->vport, param->req, 949 param->resp_msg); 950 } 951 952 static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param) 953 { 954 return hclge_reset_vf(param->vport); 955 } 956 957 static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param) 958 { 959 hclge_vf_keep_alive(param->vport); 960 return 0; 961 } 962 963 static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param) 964 { 965 int ret; 966 967 ret = hclge_set_vf_mtu(param->vport, param->req); 968 if (ret) 969 dev_err(¶m->vport->back->pdev->dev, 970 "VF fail(%d) to set mtu\n", ret); 971 return ret; 972 } 973 974 static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param) 975 { 976 return hclge_get_queue_id_in_pf(param->vport, param->req, 977 param->resp_msg); 978 } 979 980 static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param) 981 { 982 return hclge_get_rss_key(param->vport, param->req, param->resp_msg); 983 } 984 985 static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param) 986 { 987 hclge_get_link_mode(param->vport, param->req); 988 return 0; 989 } 990 991 static int 992 hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param) 993 { 994 hclge_rm_vport_all_mac_table(param->vport, false, 995 HCLGE_MAC_ADDR_UC); 996 hclge_rm_vport_all_mac_table(param->vport, false, 997 HCLGE_MAC_ADDR_MC); 998 hclge_rm_vport_all_vlan_table(param->vport, false); 999 return 0; 1000 } 1001 1002 static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param) 1003 { 1004 hclge_rm_vport_all_mac_table(param->vport, true, 1005 HCLGE_MAC_ADDR_UC); 1006 hclge_rm_vport_all_mac_table(param->vport, true, 1007 HCLGE_MAC_ADDR_MC); 1008 hclge_rm_vport_all_vlan_table(param->vport, true); 1009 param->vport->mps = 0; 1010 return 0; 1011 } 1012 1013 static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param) 1014 { 1015 hclge_get_vf_media_type(param->vport, param->resp_msg); 1016 return 0; 1017 } 1018 1019 static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param) 1020 { 1021 hclge_handle_link_change_event(param->vport->back, param->req); 1022 return 0; 1023 } 1024 1025 static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param) 1026 { 1027 hclge_get_vf_mac_addr(param->vport, param->resp_msg); 1028 return 0; 1029 } 1030 1031 static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param) 1032 { 1033 hclge_handle_ncsi_error(param->vport->back); 1034 return 0; 1035 } 1036 1037 static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param) 1038 { 1039 hclge_handle_vf_tbl(param->vport, param->req); 1040 return 0; 1041 } 1042 1043 static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = { 1044 [HCLGE_MBX_RESET] = hclge_mbx_reset_handler, 1045 [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler, 1046 [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler, 1047 [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler, 1048 [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler, 1049 [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler, 1050 [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler, 1051 [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler, 1052 [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler, 1053 [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler, 1054 [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler, 1055 [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler, 1056 [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler, 1057 [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler, 1058 [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler, 1059 [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler, 1060 [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler, 1061 [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler, 1062 [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler, 1063 [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler, 1064 [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler, 1065 [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler, 1066 [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler, 1067 [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler, 1068 [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler, 1069 [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler, 1070 }; 1071 1072 static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param) 1073 { 1074 hclge_mbx_ops_fn cmd_func = NULL; 1075 struct hclge_dev *hdev; 1076 int ret = 0; 1077 1078 hdev = param->vport->back; 1079 cmd_func = hclge_mbx_ops_list[param->req->msg.code]; 1080 if (!cmd_func) { 1081 dev_err(&hdev->pdev->dev, 1082 "un-supported mailbox message, code = %u\n", 1083 param->req->msg.code); 1084 return; 1085 } 1086 ret = cmd_func(param); 1087 1088 /* PF driver should not reply IMP */ 1089 if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) && 1090 param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) { 1091 param->resp_msg->status = ret; 1092 if (time_is_before_jiffies(hdev->last_mbx_scheduled + 1093 HCLGE_MBX_SCHED_TIMEOUT)) 1094 dev_warn(&hdev->pdev->dev, 1095 "resp vport%u mbx(%u,%u) late\n", 1096 param->req->mbx_src_vfid, 1097 param->req->msg.code, 1098 param->req->msg.subcode); 1099 1100 hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg); 1101 } 1102 } 1103 1104 void hclge_mbx_handler(struct hclge_dev *hdev) 1105 { 1106 struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq; 1107 struct hclge_respond_to_vf_msg resp_msg; 1108 struct hclge_mbx_vf_to_pf_cmd *req; 1109 struct hclge_mbx_ops_param param; 1110 struct hclge_desc *desc; 1111 unsigned int flag; 1112 1113 param.resp_msg = &resp_msg; 1114 /* handle all the mailbox requests in the queue */ 1115 while (!hclge_cmd_crq_empty(&hdev->hw)) { 1116 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, 1117 &hdev->hw.hw.comm_state)) { 1118 dev_warn(&hdev->pdev->dev, 1119 "command queue needs re-initializing\n"); 1120 return; 1121 } 1122 1123 desc = &crq->desc[crq->next_to_use]; 1124 req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data; 1125 1126 flag = le16_to_cpu(crq->desc[crq->next_to_use].flag); 1127 if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B) || 1128 req->mbx_src_vfid > hdev->num_req_vfs)) { 1129 dev_warn(&hdev->pdev->dev, 1130 "dropped invalid mailbox message, code = %u, vfid = %u\n", 1131 req->msg.code, req->mbx_src_vfid); 1132 1133 /* dropping/not processing this invalid message */ 1134 crq->desc[crq->next_to_use].flag = 0; 1135 hclge_mbx_ring_ptr_move_crq(crq); 1136 continue; 1137 } 1138 1139 trace_hclge_pf_mbx_get(hdev, req); 1140 1141 /* clear the resp_msg before processing every mailbox message */ 1142 memset(&resp_msg, 0, sizeof(resp_msg)); 1143 param.vport = &hdev->vport[req->mbx_src_vfid]; 1144 param.req = req; 1145 hclge_mbx_request_handling(¶m); 1146 1147 crq->desc[crq->next_to_use].flag = 0; 1148 hclge_mbx_ring_ptr_move_crq(crq); 1149 } 1150 1151 /* Write back CMDQ_RQ header pointer, M7 need this pointer */ 1152 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CRQ_HEAD_REG, 1153 crq->next_to_use); 1154 } 1155