1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 #include <linux/iopoll.h> 6 #include <net/rtnetlink.h> 7 #include "hclgevf_cmd.h" 8 #include "hclgevf_main.h" 9 #include "hclgevf_regs.h" 10 #include "hclge_mbx.h" 11 #include "hnae3.h" 12 #include "hclgevf_devlink.h" 13 #include "hclge_comm_rss.h" 14 #include "hclgevf_trace.h" 15 16 #define HCLGEVF_NAME "hclgevf" 17 18 #define HCLGEVF_RESET_MAX_FAIL_CNT 5 19 20 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); 21 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 22 unsigned long delay); 23 24 static struct hnae3_ae_algo ae_algovf; 25 26 static struct workqueue_struct *hclgevf_wq; 27 28 static const struct pci_device_id ae_algovf_pci_tbl[] = { 29 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0}, 30 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), 31 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, 32 /* required last entry */ 33 {0, } 34 }; 35 36 MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); 37 38 /* hclgevf_cmd_send - send command to command queue 39 * @hw: pointer to the hw struct 40 * @desc: prefilled descriptor for describing the command 41 * @num : the number of descriptors to be sent 42 * 43 * This is the main send command for command queue, it 44 * sends the queue, cleans the queue, etc 45 */ 46 int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num) 47 { 48 return hclge_comm_cmd_send(&hw->hw, desc, num); 49 } 50 51 static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, 52 int num, bool is_special) 53 { 54 int i; 55 56 trace_hclge_vf_cmd_send(hw, desc, 0, num); 57 58 if (is_special) 59 return; 60 61 for (i = 1; i < num; i++) 62 trace_hclge_vf_cmd_send(hw, &desc[i], i, num); 63 } 64 65 static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc, 66 int num, bool is_special) 67 { 68 int i; 69 70 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag))) 71 return; 72 73 trace_hclge_vf_cmd_get(hw, desc, 0, num); 74 75 if (is_special) 76 return; 77 78 for (i = 1; i < num; i++) 79 trace_hclge_vf_cmd_get(hw, &desc[i], i, num); 80 } 81 82 static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = { 83 .trace_cmd_send = hclgevf_trace_cmd_send, 84 .trace_cmd_get = hclgevf_trace_cmd_get, 85 }; 86 87 void hclgevf_arq_init(struct hclgevf_dev *hdev) 88 { 89 struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq; 90 91 spin_lock(&cmdq->crq.lock); 92 /* initialize the pointers of async rx queue of mailbox */ 93 hdev->arq.hdev = hdev; 94 hdev->arq.head = 0; 95 hdev->arq.tail = 0; 96 atomic_set(&hdev->arq.count, 0); 97 spin_unlock(&cmdq->crq.lock); 98 } 99 100 struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle) 101 { 102 if (!handle->client) 103 return container_of(handle, struct hclgevf_dev, nic); 104 else if (handle->client->type == HNAE3_CLIENT_ROCE) 105 return container_of(handle, struct hclgevf_dev, roce); 106 else 107 return container_of(handle, struct hclgevf_dev, nic); 108 } 109 110 static void hclgevf_update_stats(struct hnae3_handle *handle) 111 { 112 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 113 int status; 114 115 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 116 if (status) 117 dev_err(&hdev->pdev->dev, 118 "VF update of TQPS stats fail, status = %d.\n", 119 status); 120 } 121 122 static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset) 123 { 124 if (strset == ETH_SS_TEST) 125 return -EOPNOTSUPP; 126 else if (strset == ETH_SS_STATS) 127 return hclge_comm_tqps_get_sset_count(handle); 128 129 return 0; 130 } 131 132 static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset, 133 u8 *data) 134 { 135 u8 *p = (char *)data; 136 137 if (strset == ETH_SS_STATS) 138 p = hclge_comm_tqps_get_strings(handle, p); 139 } 140 141 static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data) 142 { 143 hclge_comm_tqps_get_stats(handle, data); 144 } 145 146 static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code, 147 u8 subcode) 148 { 149 if (msg) { 150 memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg)); 151 msg->code = code; 152 msg->subcode = subcode; 153 } 154 } 155 156 static int hclgevf_get_basic_info(struct hclgevf_dev *hdev) 157 { 158 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 159 u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE]; 160 struct hclge_basic_info *basic_info; 161 struct hclge_vf_to_pf_msg send_msg; 162 unsigned long caps; 163 int status; 164 165 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0); 166 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 167 sizeof(resp_msg)); 168 if (status) { 169 dev_err(&hdev->pdev->dev, 170 "failed to get basic info from pf, ret = %d", status); 171 return status; 172 } 173 174 basic_info = (struct hclge_basic_info *)resp_msg; 175 176 hdev->hw_tc_map = basic_info->hw_tc_map; 177 hdev->mbx_api_version = le16_to_cpu(basic_info->mbx_api_version); 178 caps = le32_to_cpu(basic_info->pf_caps); 179 if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps)) 180 set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); 181 182 return 0; 183 } 184 185 static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) 186 { 187 struct hnae3_handle *nic = &hdev->nic; 188 struct hclge_vf_to_pf_msg send_msg; 189 u8 resp_msg; 190 int ret; 191 192 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 193 HCLGE_MBX_GET_PORT_BASE_VLAN_STATE); 194 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg, 195 sizeof(u8)); 196 if (ret) { 197 dev_err(&hdev->pdev->dev, 198 "VF request to get port based vlan state failed %d", 199 ret); 200 return ret; 201 } 202 203 nic->port_base_vlan_state = resp_msg; 204 205 return 0; 206 } 207 208 static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) 209 { 210 #define HCLGEVF_TQPS_RSS_INFO_LEN 6 211 212 struct hclge_mbx_vf_queue_info *queue_info; 213 u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN]; 214 struct hclge_vf_to_pf_msg send_msg; 215 int status; 216 217 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0); 218 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 219 HCLGEVF_TQPS_RSS_INFO_LEN); 220 if (status) { 221 dev_err(&hdev->pdev->dev, 222 "VF request to get tqp info from PF failed %d", 223 status); 224 return status; 225 } 226 227 queue_info = (struct hclge_mbx_vf_queue_info *)resp_msg; 228 hdev->num_tqps = le16_to_cpu(queue_info->num_tqps); 229 hdev->rss_size_max = le16_to_cpu(queue_info->rss_size); 230 hdev->rx_buf_len = le16_to_cpu(queue_info->rx_buf_len); 231 232 return 0; 233 } 234 235 static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev) 236 { 237 #define HCLGEVF_TQPS_DEPTH_INFO_LEN 4 238 239 struct hclge_mbx_vf_queue_depth *queue_depth; 240 u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN]; 241 struct hclge_vf_to_pf_msg send_msg; 242 int ret; 243 244 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0); 245 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 246 HCLGEVF_TQPS_DEPTH_INFO_LEN); 247 if (ret) { 248 dev_err(&hdev->pdev->dev, 249 "VF request to get tqp depth info from PF failed %d", 250 ret); 251 return ret; 252 } 253 254 queue_depth = (struct hclge_mbx_vf_queue_depth *)resp_msg; 255 hdev->num_tx_desc = le16_to_cpu(queue_depth->num_tx_desc); 256 hdev->num_rx_desc = le16_to_cpu(queue_depth->num_rx_desc); 257 258 return 0; 259 } 260 261 static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) 262 { 263 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 264 struct hclge_vf_to_pf_msg send_msg; 265 u16 qid_in_pf = 0; 266 u8 resp_data[2]; 267 int ret; 268 269 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0); 270 *(__le16 *)send_msg.data = cpu_to_le16(queue_id); 271 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data, 272 sizeof(resp_data)); 273 if (!ret) 274 qid_in_pf = le16_to_cpu(*(__le16 *)resp_data); 275 276 return qid_in_pf; 277 } 278 279 static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) 280 { 281 struct hclge_vf_to_pf_msg send_msg; 282 u8 resp_msg[2]; 283 int ret; 284 285 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0); 286 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 287 sizeof(resp_msg)); 288 if (ret) { 289 dev_err(&hdev->pdev->dev, 290 "VF request to get the pf port media type failed %d", 291 ret); 292 return ret; 293 } 294 295 hdev->hw.mac.media_type = resp_msg[0]; 296 hdev->hw.mac.module_type = resp_msg[1]; 297 298 return 0; 299 } 300 301 static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) 302 { 303 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 304 struct hclge_comm_tqp *tqp; 305 int i; 306 307 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, 308 sizeof(struct hclge_comm_tqp), GFP_KERNEL); 309 if (!hdev->htqp) 310 return -ENOMEM; 311 312 tqp = hdev->htqp; 313 314 for (i = 0; i < hdev->num_tqps; i++) { 315 tqp->dev = &hdev->pdev->dev; 316 tqp->index = i; 317 318 tqp->q.ae_algo = &ae_algovf; 319 tqp->q.buf_size = hdev->rx_buf_len; 320 tqp->q.tx_desc_num = hdev->num_tx_desc; 321 tqp->q.rx_desc_num = hdev->num_rx_desc; 322 323 /* need an extended offset to configure queues >= 324 * HCLGEVF_TQP_MAX_SIZE_DEV_V2. 325 */ 326 if (i < HCLGEVF_TQP_MAX_SIZE_DEV_V2) 327 tqp->q.io_base = hdev->hw.hw.io_base + 328 HCLGEVF_TQP_REG_OFFSET + 329 i * HCLGEVF_TQP_REG_SIZE; 330 else 331 tqp->q.io_base = hdev->hw.hw.io_base + 332 HCLGEVF_TQP_REG_OFFSET + 333 HCLGEVF_TQP_EXT_REG_OFFSET + 334 (i - HCLGEVF_TQP_MAX_SIZE_DEV_V2) * 335 HCLGEVF_TQP_REG_SIZE; 336 337 /* when device supports tx push and has device memory, 338 * the queue can execute push mode or doorbell mode on 339 * device memory. 340 */ 341 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) 342 tqp->q.mem_base = hdev->hw.hw.mem_base + 343 HCLGEVF_TQP_MEM_OFFSET(hdev, i); 344 345 tqp++; 346 } 347 348 return 0; 349 } 350 351 static int hclgevf_knic_setup(struct hclgevf_dev *hdev) 352 { 353 struct hnae3_handle *nic = &hdev->nic; 354 struct hnae3_knic_private_info *kinfo; 355 u16 new_tqps = hdev->num_tqps; 356 unsigned int i; 357 u8 num_tc = 0; 358 359 kinfo = &nic->kinfo; 360 kinfo->num_tx_desc = hdev->num_tx_desc; 361 kinfo->num_rx_desc = hdev->num_rx_desc; 362 kinfo->rx_buf_len = hdev->rx_buf_len; 363 for (i = 0; i < HCLGE_COMM_MAX_TC_NUM; i++) 364 if (hdev->hw_tc_map & BIT(i)) 365 num_tc++; 366 367 num_tc = num_tc ? num_tc : 1; 368 kinfo->tc_info.num_tc = num_tc; 369 kinfo->rss_size = min_t(u16, hdev->rss_size_max, new_tqps / num_tc); 370 new_tqps = kinfo->rss_size * num_tc; 371 kinfo->num_tqps = min(new_tqps, hdev->num_tqps); 372 373 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, 374 sizeof(struct hnae3_queue *), GFP_KERNEL); 375 if (!kinfo->tqp) 376 return -ENOMEM; 377 378 for (i = 0; i < kinfo->num_tqps; i++) { 379 hdev->htqp[i].q.handle = &hdev->nic; 380 hdev->htqp[i].q.tqp_index = i; 381 kinfo->tqp[i] = &hdev->htqp[i].q; 382 } 383 384 /* after init the max rss_size and tqps, adjust the default tqp numbers 385 * and rss size with the actual vector numbers 386 */ 387 kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps); 388 kinfo->rss_size = min_t(u16, kinfo->num_tqps / num_tc, 389 kinfo->rss_size); 390 391 return 0; 392 } 393 394 static void hclgevf_request_link_info(struct hclgevf_dev *hdev) 395 { 396 struct hclge_vf_to_pf_msg send_msg; 397 int status; 398 399 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0); 400 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 401 if (status) 402 dev_err(&hdev->pdev->dev, 403 "VF failed to fetch link status(%d) from PF", status); 404 } 405 406 void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) 407 { 408 struct hnae3_handle *rhandle = &hdev->roce; 409 struct hnae3_handle *handle = &hdev->nic; 410 struct hnae3_client *rclient; 411 struct hnae3_client *client; 412 413 if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state)) 414 return; 415 416 client = handle->client; 417 rclient = hdev->roce_client; 418 419 link_state = 420 test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; 421 if (link_state != hdev->hw.mac.link) { 422 hdev->hw.mac.link = link_state; 423 client->ops->link_status_change(handle, !!link_state); 424 if (rclient && rclient->ops->link_status_change) 425 rclient->ops->link_status_change(rhandle, !!link_state); 426 } 427 428 clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); 429 } 430 431 static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) 432 { 433 #define HCLGEVF_ADVERTISING 0 434 #define HCLGEVF_SUPPORTED 1 435 436 struct hclge_vf_to_pf_msg send_msg; 437 438 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0); 439 send_msg.data[0] = HCLGEVF_ADVERTISING; 440 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 441 send_msg.data[0] = HCLGEVF_SUPPORTED; 442 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 443 } 444 445 static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) 446 { 447 struct hnae3_handle *nic = &hdev->nic; 448 int ret; 449 450 nic->ae_algo = &ae_algovf; 451 nic->pdev = hdev->pdev; 452 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, 453 MAX_NUMNODES); 454 nic->flags |= HNAE3_SUPPORT_VF; 455 nic->kinfo.io_base = hdev->hw.hw.io_base; 456 457 ret = hclgevf_knic_setup(hdev); 458 if (ret) 459 dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n", 460 ret); 461 return ret; 462 } 463 464 static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id) 465 { 466 if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) { 467 dev_warn(&hdev->pdev->dev, 468 "vector(vector_id %d) has been freed.\n", vector_id); 469 return; 470 } 471 472 hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT; 473 hdev->num_msi_left += 1; 474 hdev->num_msi_used -= 1; 475 } 476 477 static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num, 478 struct hnae3_vector_info *vector_info) 479 { 480 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 481 struct hnae3_vector_info *vector = vector_info; 482 int alloc = 0; 483 int i, j; 484 485 vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num); 486 vector_num = min(hdev->num_msi_left, vector_num); 487 488 for (j = 0; j < vector_num; j++) { 489 for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) { 490 if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) { 491 vector->vector = pci_irq_vector(hdev->pdev, i); 492 vector->io_addr = hdev->hw.hw.io_base + 493 HCLGEVF_VECTOR_REG_BASE + 494 (i - 1) * HCLGEVF_VECTOR_REG_OFFSET; 495 hdev->vector_status[i] = 0; 496 hdev->vector_irq[i] = vector->vector; 497 498 vector++; 499 alloc++; 500 501 break; 502 } 503 } 504 } 505 hdev->num_msi_left -= alloc; 506 hdev->num_msi_used += alloc; 507 508 return alloc; 509 } 510 511 static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector) 512 { 513 int i; 514 515 for (i = 0; i < hdev->num_msi; i++) 516 if (vector == hdev->vector_irq[i]) 517 return i; 518 519 return -EINVAL; 520 } 521 522 /* for revision 0x20, vf shared the same rss config with pf */ 523 static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev) 524 { 525 #define HCLGEVF_RSS_MBX_RESP_LEN 8 526 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 527 u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN]; 528 struct hclge_vf_to_pf_msg send_msg; 529 u16 msg_num, hash_key_index; 530 u8 index; 531 int ret; 532 533 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0); 534 msg_num = (HCLGE_COMM_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) / 535 HCLGEVF_RSS_MBX_RESP_LEN; 536 for (index = 0; index < msg_num; index++) { 537 send_msg.data[0] = index; 538 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg, 539 HCLGEVF_RSS_MBX_RESP_LEN); 540 if (ret) { 541 dev_err(&hdev->pdev->dev, 542 "VF get rss hash key from PF failed, ret=%d", 543 ret); 544 return ret; 545 } 546 547 hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index; 548 if (index == msg_num - 1) 549 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 550 &resp_msg[0], 551 HCLGE_COMM_RSS_KEY_SIZE - hash_key_index); 552 else 553 memcpy(&rss_cfg->rss_hash_key[hash_key_index], 554 &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN); 555 } 556 557 return 0; 558 } 559 560 static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key, 561 u8 *hfunc) 562 { 563 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 564 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 565 int ret; 566 567 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 568 hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc); 569 } else { 570 if (hfunc) 571 *hfunc = ETH_RSS_HASH_TOP; 572 if (key) { 573 ret = hclgevf_get_rss_hash_key(hdev); 574 if (ret) 575 return ret; 576 memcpy(key, rss_cfg->rss_hash_key, 577 HCLGE_COMM_RSS_KEY_SIZE); 578 } 579 } 580 581 hclge_comm_get_rss_indir_tbl(rss_cfg, indir, 582 hdev->ae_dev->dev_specs.rss_ind_tbl_size); 583 584 return 0; 585 } 586 587 static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir, 588 const u8 *key, const u8 hfunc) 589 { 590 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 591 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 592 int ret, i; 593 594 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 595 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, 596 hfunc); 597 if (ret) 598 return ret; 599 } 600 601 /* update the shadow RSS table with user specified qids */ 602 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 603 rss_cfg->rss_indirection_tbl[i] = indir[i]; 604 605 /* update the hardware */ 606 return hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 607 rss_cfg->rss_indirection_tbl); 608 } 609 610 static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, 611 struct ethtool_rxnfc *nfc) 612 { 613 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 614 int ret; 615 616 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 617 return -EOPNOTSUPP; 618 619 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, 620 &hdev->rss_cfg, nfc); 621 if (ret) 622 dev_err(&hdev->pdev->dev, 623 "failed to set rss tuple, ret = %d.\n", ret); 624 625 return ret; 626 } 627 628 static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, 629 struct ethtool_rxnfc *nfc) 630 { 631 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 632 u8 tuple_sets; 633 int ret; 634 635 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) 636 return -EOPNOTSUPP; 637 638 nfc->data = 0; 639 640 ret = hclge_comm_get_rss_tuple(&hdev->rss_cfg, nfc->flow_type, 641 &tuple_sets); 642 if (ret || !tuple_sets) 643 return ret; 644 645 nfc->data = hclge_comm_convert_rss_tuple(tuple_sets); 646 647 return 0; 648 } 649 650 static int hclgevf_get_tc_size(struct hnae3_handle *handle) 651 { 652 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 653 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 654 655 return rss_cfg->rss_size; 656 } 657 658 static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en, 659 int vector_id, 660 struct hnae3_ring_chain_node *ring_chain) 661 { 662 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 663 struct hclge_vf_to_pf_msg send_msg; 664 struct hnae3_ring_chain_node *node; 665 int status; 666 int i = 0; 667 668 memset(&send_msg, 0, sizeof(send_msg)); 669 send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR : 670 HCLGE_MBX_UNMAP_RING_TO_VECTOR; 671 send_msg.vector_id = vector_id; 672 673 for (node = ring_chain; node; node = node->next) { 674 send_msg.param[i].ring_type = 675 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B); 676 677 send_msg.param[i].tqp_index = node->tqp_index; 678 send_msg.param[i].int_gl_index = 679 hnae3_get_field(node->int_gl_idx, 680 HNAE3_RING_GL_IDX_M, 681 HNAE3_RING_GL_IDX_S); 682 683 i++; 684 if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) { 685 send_msg.ring_num = i; 686 687 status = hclgevf_send_mbx_msg(hdev, &send_msg, false, 688 NULL, 0); 689 if (status) { 690 dev_err(&hdev->pdev->dev, 691 "Map TQP fail, status is %d.\n", 692 status); 693 return status; 694 } 695 i = 0; 696 } 697 } 698 699 return 0; 700 } 701 702 static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector, 703 struct hnae3_ring_chain_node *ring_chain) 704 { 705 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 706 int vector_id; 707 708 vector_id = hclgevf_get_vector_index(hdev, vector); 709 if (vector_id < 0) { 710 dev_err(&handle->pdev->dev, 711 "Get vector index fail. ret =%d\n", vector_id); 712 return vector_id; 713 } 714 715 return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain); 716 } 717 718 static int hclgevf_unmap_ring_from_vector( 719 struct hnae3_handle *handle, 720 int vector, 721 struct hnae3_ring_chain_node *ring_chain) 722 { 723 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 724 int ret, vector_id; 725 726 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 727 return 0; 728 729 vector_id = hclgevf_get_vector_index(hdev, vector); 730 if (vector_id < 0) { 731 dev_err(&handle->pdev->dev, 732 "Get vector index fail. ret =%d\n", vector_id); 733 return vector_id; 734 } 735 736 ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain); 737 if (ret) 738 dev_err(&handle->pdev->dev, 739 "Unmap ring from vector fail. vector=%d, ret =%d\n", 740 vector_id, 741 ret); 742 743 return ret; 744 } 745 746 static int hclgevf_put_vector(struct hnae3_handle *handle, int vector) 747 { 748 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 749 int vector_id; 750 751 vector_id = hclgevf_get_vector_index(hdev, vector); 752 if (vector_id < 0) { 753 dev_err(&handle->pdev->dev, 754 "hclgevf_put_vector get vector index fail. ret =%d\n", 755 vector_id); 756 return vector_id; 757 } 758 759 hclgevf_free_vector(hdev, vector_id); 760 761 return 0; 762 } 763 764 static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev, 765 bool en_uc_pmc, bool en_mc_pmc, 766 bool en_bc_pmc) 767 { 768 struct hnae3_handle *handle = &hdev->nic; 769 struct hclge_vf_to_pf_msg send_msg; 770 int ret; 771 772 memset(&send_msg, 0, sizeof(send_msg)); 773 send_msg.code = HCLGE_MBX_SET_PROMISC_MODE; 774 send_msg.en_bc = en_bc_pmc ? 1 : 0; 775 send_msg.en_uc = en_uc_pmc ? 1 : 0; 776 send_msg.en_mc = en_mc_pmc ? 1 : 0; 777 send_msg.en_limit_promisc = test_bit(HNAE3_PFLAG_LIMIT_PROMISC, 778 &handle->priv_flags) ? 1 : 0; 779 780 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 781 if (ret) 782 dev_err(&hdev->pdev->dev, 783 "Set promisc mode fail, status is %d.\n", ret); 784 785 return ret; 786 } 787 788 static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc, 789 bool en_mc_pmc) 790 { 791 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 792 bool en_bc_pmc; 793 794 en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2; 795 796 return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc, 797 en_bc_pmc); 798 } 799 800 static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle) 801 { 802 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 803 804 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 805 hclgevf_task_schedule(hdev, 0); 806 } 807 808 static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev) 809 { 810 struct hnae3_handle *handle = &hdev->nic; 811 bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE; 812 bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE; 813 int ret; 814 815 if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) { 816 ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc); 817 if (!ret) 818 clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 819 } 820 } 821 822 static int hclgevf_tqp_enable_cmd_send(struct hclgevf_dev *hdev, u16 tqp_id, 823 u16 stream_id, bool enable) 824 { 825 struct hclgevf_cfg_com_tqp_queue_cmd *req; 826 struct hclge_desc desc; 827 828 req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data; 829 830 hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); 831 req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK); 832 req->stream_id = cpu_to_le16(stream_id); 833 if (enable) 834 req->enable |= 1U << HCLGEVF_TQP_ENABLE_B; 835 836 return hclgevf_cmd_send(&hdev->hw, &desc, 1); 837 } 838 839 static int hclgevf_tqp_enable(struct hnae3_handle *handle, bool enable) 840 { 841 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 842 int ret; 843 u16 i; 844 845 for (i = 0; i < handle->kinfo.num_tqps; i++) { 846 ret = hclgevf_tqp_enable_cmd_send(hdev, i, 0, enable); 847 if (ret) 848 return ret; 849 } 850 851 return 0; 852 } 853 854 static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p) 855 { 856 struct hclge_vf_to_pf_msg send_msg; 857 u8 host_mac[ETH_ALEN]; 858 int status; 859 860 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0); 861 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac, 862 ETH_ALEN); 863 if (status) { 864 dev_err(&hdev->pdev->dev, 865 "fail to get VF MAC from host %d", status); 866 return status; 867 } 868 869 ether_addr_copy(p, host_mac); 870 871 return 0; 872 } 873 874 static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p) 875 { 876 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 877 u8 host_mac_addr[ETH_ALEN]; 878 879 if (hclgevf_get_host_mac_addr(hdev, host_mac_addr)) 880 return; 881 882 hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr); 883 if (hdev->has_pf_mac) 884 ether_addr_copy(p, host_mac_addr); 885 else 886 ether_addr_copy(p, hdev->hw.mac.mac_addr); 887 } 888 889 static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p, 890 bool is_first) 891 { 892 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 893 u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr; 894 struct hclge_vf_to_pf_msg send_msg; 895 u8 *new_mac_addr = (u8 *)p; 896 int status; 897 898 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0); 899 send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY; 900 ether_addr_copy(send_msg.data, new_mac_addr); 901 if (is_first && !hdev->has_pf_mac) 902 eth_zero_addr(&send_msg.data[ETH_ALEN]); 903 else 904 ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr); 905 status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 906 if (!status) 907 ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr); 908 909 return status; 910 } 911 912 static struct hclgevf_mac_addr_node * 913 hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr) 914 { 915 struct hclgevf_mac_addr_node *mac_node, *tmp; 916 917 list_for_each_entry_safe(mac_node, tmp, list, node) 918 if (ether_addr_equal(mac_addr, mac_node->mac_addr)) 919 return mac_node; 920 921 return NULL; 922 } 923 924 static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node, 925 enum HCLGEVF_MAC_NODE_STATE state) 926 { 927 switch (state) { 928 /* from set_rx_mode or tmp_add_list */ 929 case HCLGEVF_MAC_TO_ADD: 930 if (mac_node->state == HCLGEVF_MAC_TO_DEL) 931 mac_node->state = HCLGEVF_MAC_ACTIVE; 932 break; 933 /* only from set_rx_mode */ 934 case HCLGEVF_MAC_TO_DEL: 935 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 936 list_del(&mac_node->node); 937 kfree(mac_node); 938 } else { 939 mac_node->state = HCLGEVF_MAC_TO_DEL; 940 } 941 break; 942 /* only from tmp_add_list, the mac_node->state won't be 943 * HCLGEVF_MAC_ACTIVE 944 */ 945 case HCLGEVF_MAC_ACTIVE: 946 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 947 mac_node->state = HCLGEVF_MAC_ACTIVE; 948 break; 949 } 950 } 951 952 static int hclgevf_update_mac_list(struct hnae3_handle *handle, 953 enum HCLGEVF_MAC_NODE_STATE state, 954 enum HCLGEVF_MAC_ADDR_TYPE mac_type, 955 const unsigned char *addr) 956 { 957 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 958 struct hclgevf_mac_addr_node *mac_node; 959 struct list_head *list; 960 961 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 962 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 963 964 spin_lock_bh(&hdev->mac_table.mac_list_lock); 965 966 /* if the mac addr is already in the mac list, no need to add a new 967 * one into it, just check the mac addr state, convert it to a new 968 * state, or just remove it, or do nothing. 969 */ 970 mac_node = hclgevf_find_mac_node(list, addr); 971 if (mac_node) { 972 hclgevf_update_mac_node(mac_node, state); 973 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 974 return 0; 975 } 976 /* if this address is never added, unnecessary to delete */ 977 if (state == HCLGEVF_MAC_TO_DEL) { 978 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 979 return -ENOENT; 980 } 981 982 mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC); 983 if (!mac_node) { 984 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 985 return -ENOMEM; 986 } 987 988 mac_node->state = state; 989 ether_addr_copy(mac_node->mac_addr, addr); 990 list_add_tail(&mac_node->node, list); 991 992 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 993 return 0; 994 } 995 996 static int hclgevf_add_uc_addr(struct hnae3_handle *handle, 997 const unsigned char *addr) 998 { 999 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1000 HCLGEVF_MAC_ADDR_UC, addr); 1001 } 1002 1003 static int hclgevf_rm_uc_addr(struct hnae3_handle *handle, 1004 const unsigned char *addr) 1005 { 1006 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1007 HCLGEVF_MAC_ADDR_UC, addr); 1008 } 1009 1010 static int hclgevf_add_mc_addr(struct hnae3_handle *handle, 1011 const unsigned char *addr) 1012 { 1013 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD, 1014 HCLGEVF_MAC_ADDR_MC, addr); 1015 } 1016 1017 static int hclgevf_rm_mc_addr(struct hnae3_handle *handle, 1018 const unsigned char *addr) 1019 { 1020 return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL, 1021 HCLGEVF_MAC_ADDR_MC, addr); 1022 } 1023 1024 static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev, 1025 struct hclgevf_mac_addr_node *mac_node, 1026 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1027 { 1028 struct hclge_vf_to_pf_msg send_msg; 1029 u8 code, subcode; 1030 1031 if (mac_type == HCLGEVF_MAC_ADDR_UC) { 1032 code = HCLGE_MBX_SET_UNICAST; 1033 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1034 subcode = HCLGE_MBX_MAC_VLAN_UC_ADD; 1035 else 1036 subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE; 1037 } else { 1038 code = HCLGE_MBX_SET_MULTICAST; 1039 if (mac_node->state == HCLGEVF_MAC_TO_ADD) 1040 subcode = HCLGE_MBX_MAC_VLAN_MC_ADD; 1041 else 1042 subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE; 1043 } 1044 1045 hclgevf_build_send_msg(&send_msg, code, subcode); 1046 ether_addr_copy(send_msg.data, mac_node->mac_addr); 1047 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1048 } 1049 1050 static void hclgevf_config_mac_list(struct hclgevf_dev *hdev, 1051 struct list_head *list, 1052 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1053 { 1054 char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN]; 1055 struct hclgevf_mac_addr_node *mac_node, *tmp; 1056 int ret; 1057 1058 list_for_each_entry_safe(mac_node, tmp, list, node) { 1059 ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type); 1060 if (ret) { 1061 hnae3_format_mac_addr(format_mac_addr, 1062 mac_node->mac_addr); 1063 dev_err(&hdev->pdev->dev, 1064 "failed to configure mac %s, state = %d, ret = %d\n", 1065 format_mac_addr, mac_node->state, ret); 1066 return; 1067 } 1068 if (mac_node->state == HCLGEVF_MAC_TO_ADD) { 1069 mac_node->state = HCLGEVF_MAC_ACTIVE; 1070 } else { 1071 list_del(&mac_node->node); 1072 kfree(mac_node); 1073 } 1074 } 1075 } 1076 1077 static void hclgevf_sync_from_add_list(struct list_head *add_list, 1078 struct list_head *mac_list) 1079 { 1080 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1081 1082 list_for_each_entry_safe(mac_node, tmp, add_list, node) { 1083 /* if the mac address from tmp_add_list is not in the 1084 * uc/mc_mac_list, it means have received a TO_DEL request 1085 * during the time window of sending mac config request to PF 1086 * If mac_node state is ACTIVE, then change its state to TO_DEL, 1087 * then it will be removed at next time. If is TO_ADD, it means 1088 * send TO_ADD request failed, so just remove the mac node. 1089 */ 1090 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1091 if (new_node) { 1092 hclgevf_update_mac_node(new_node, mac_node->state); 1093 list_del(&mac_node->node); 1094 kfree(mac_node); 1095 } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) { 1096 mac_node->state = HCLGEVF_MAC_TO_DEL; 1097 list_move_tail(&mac_node->node, mac_list); 1098 } else { 1099 list_del(&mac_node->node); 1100 kfree(mac_node); 1101 } 1102 } 1103 } 1104 1105 static void hclgevf_sync_from_del_list(struct list_head *del_list, 1106 struct list_head *mac_list) 1107 { 1108 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1109 1110 list_for_each_entry_safe(mac_node, tmp, del_list, node) { 1111 new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr); 1112 if (new_node) { 1113 /* If the mac addr is exist in the mac list, it means 1114 * received a new request TO_ADD during the time window 1115 * of sending mac addr configurrequest to PF, so just 1116 * change the mac state to ACTIVE. 1117 */ 1118 new_node->state = HCLGEVF_MAC_ACTIVE; 1119 list_del(&mac_node->node); 1120 kfree(mac_node); 1121 } else { 1122 list_move_tail(&mac_node->node, mac_list); 1123 } 1124 } 1125 } 1126 1127 static void hclgevf_clear_list(struct list_head *list) 1128 { 1129 struct hclgevf_mac_addr_node *mac_node, *tmp; 1130 1131 list_for_each_entry_safe(mac_node, tmp, list, node) { 1132 list_del(&mac_node->node); 1133 kfree(mac_node); 1134 } 1135 } 1136 1137 static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev, 1138 enum HCLGEVF_MAC_ADDR_TYPE mac_type) 1139 { 1140 struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node; 1141 struct list_head tmp_add_list, tmp_del_list; 1142 struct list_head *list; 1143 1144 INIT_LIST_HEAD(&tmp_add_list); 1145 INIT_LIST_HEAD(&tmp_del_list); 1146 1147 /* move the mac addr to the tmp_add_list and tmp_del_list, then 1148 * we can add/delete these mac addr outside the spin lock 1149 */ 1150 list = (mac_type == HCLGEVF_MAC_ADDR_UC) ? 1151 &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list; 1152 1153 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1154 1155 list_for_each_entry_safe(mac_node, tmp, list, node) { 1156 switch (mac_node->state) { 1157 case HCLGEVF_MAC_TO_DEL: 1158 list_move_tail(&mac_node->node, &tmp_del_list); 1159 break; 1160 case HCLGEVF_MAC_TO_ADD: 1161 new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 1162 if (!new_node) 1163 goto stop_traverse; 1164 1165 ether_addr_copy(new_node->mac_addr, mac_node->mac_addr); 1166 new_node->state = mac_node->state; 1167 list_add_tail(&new_node->node, &tmp_add_list); 1168 break; 1169 default: 1170 break; 1171 } 1172 } 1173 1174 stop_traverse: 1175 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1176 1177 /* delete first, in order to get max mac table space for adding */ 1178 hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type); 1179 hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type); 1180 1181 /* if some mac addresses were added/deleted fail, move back to the 1182 * mac_list, and retry at next time. 1183 */ 1184 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1185 1186 hclgevf_sync_from_del_list(&tmp_del_list, list); 1187 hclgevf_sync_from_add_list(&tmp_add_list, list); 1188 1189 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1190 } 1191 1192 static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev) 1193 { 1194 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC); 1195 hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC); 1196 } 1197 1198 static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev) 1199 { 1200 spin_lock_bh(&hdev->mac_table.mac_list_lock); 1201 1202 hclgevf_clear_list(&hdev->mac_table.uc_mac_list); 1203 hclgevf_clear_list(&hdev->mac_table.mc_mac_list); 1204 1205 spin_unlock_bh(&hdev->mac_table.mac_list_lock); 1206 } 1207 1208 static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable) 1209 { 1210 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1211 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; 1212 struct hclge_vf_to_pf_msg send_msg; 1213 1214 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) 1215 return -EOPNOTSUPP; 1216 1217 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1218 HCLGE_MBX_ENABLE_VLAN_FILTER); 1219 send_msg.data[0] = enable ? 1 : 0; 1220 1221 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1222 } 1223 1224 static int hclgevf_set_vlan_filter(struct hnae3_handle *handle, 1225 __be16 proto, u16 vlan_id, 1226 bool is_kill) 1227 { 1228 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1229 struct hclge_mbx_vlan_filter *vlan_filter; 1230 struct hclge_vf_to_pf_msg send_msg; 1231 int ret; 1232 1233 if (vlan_id > HCLGEVF_MAX_VLAN_ID) 1234 return -EINVAL; 1235 1236 if (proto != htons(ETH_P_8021Q)) 1237 return -EPROTONOSUPPORT; 1238 1239 /* When device is resetting or reset failed, firmware is unable to 1240 * handle mailbox. Just record the vlan id, and remove it after 1241 * reset finished. 1242 */ 1243 if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 1244 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) { 1245 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1246 return -EBUSY; 1247 } else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) { 1248 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1249 } 1250 1251 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1252 HCLGE_MBX_VLAN_FILTER); 1253 vlan_filter = (struct hclge_mbx_vlan_filter *)send_msg.data; 1254 vlan_filter->is_kill = is_kill; 1255 vlan_filter->vlan_id = cpu_to_le16(vlan_id); 1256 vlan_filter->proto = cpu_to_le16(be16_to_cpu(proto)); 1257 1258 /* when remove hw vlan filter failed, record the vlan id, 1259 * and try to remove it from hw later, to be consistence 1260 * with stack. 1261 */ 1262 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1263 if (is_kill && ret) 1264 set_bit(vlan_id, hdev->vlan_del_fail_bmap); 1265 1266 return ret; 1267 } 1268 1269 static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) 1270 { 1271 #define HCLGEVF_MAX_SYNC_COUNT 60 1272 struct hnae3_handle *handle = &hdev->nic; 1273 int ret, sync_cnt = 0; 1274 u16 vlan_id; 1275 1276 if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID)) 1277 return; 1278 1279 rtnl_lock(); 1280 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1281 while (vlan_id != VLAN_N_VID) { 1282 ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q), 1283 vlan_id, true); 1284 if (ret) 1285 break; 1286 1287 clear_bit(vlan_id, hdev->vlan_del_fail_bmap); 1288 sync_cnt++; 1289 if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT) 1290 break; 1291 1292 vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID); 1293 } 1294 rtnl_unlock(); 1295 } 1296 1297 static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) 1298 { 1299 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1300 struct hclge_vf_to_pf_msg send_msg; 1301 1302 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 1303 HCLGE_MBX_VLAN_RX_OFF_CFG); 1304 send_msg.data[0] = enable ? 1 : 0; 1305 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1306 } 1307 1308 static int hclgevf_reset_tqp(struct hnae3_handle *handle) 1309 { 1310 #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U 1311 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1312 struct hclge_vf_to_pf_msg send_msg; 1313 u8 return_status = 0; 1314 int ret; 1315 u16 i; 1316 1317 /* disable vf queue before send queue reset msg to PF */ 1318 ret = hclgevf_tqp_enable(handle, false); 1319 if (ret) { 1320 dev_err(&hdev->pdev->dev, "failed to disable tqp, ret = %d\n", 1321 ret); 1322 return ret; 1323 } 1324 1325 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1326 1327 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &return_status, 1328 sizeof(return_status)); 1329 if (ret || return_status == HCLGEVF_RESET_ALL_QUEUE_DONE) 1330 return ret; 1331 1332 for (i = 1; i < handle->kinfo.num_tqps; i++) { 1333 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0); 1334 *(__le16 *)send_msg.data = cpu_to_le16(i); 1335 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1336 if (ret) 1337 return ret; 1338 } 1339 1340 return 0; 1341 } 1342 1343 static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) 1344 { 1345 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1346 struct hclge_mbx_mtu_info *mtu_info; 1347 struct hclge_vf_to_pf_msg send_msg; 1348 1349 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0); 1350 mtu_info = (struct hclge_mbx_mtu_info *)send_msg.data; 1351 mtu_info->mtu = cpu_to_le32(new_mtu); 1352 1353 return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1354 } 1355 1356 static int hclgevf_notify_client(struct hclgevf_dev *hdev, 1357 enum hnae3_reset_notify_type type) 1358 { 1359 struct hnae3_client *client = hdev->nic_client; 1360 struct hnae3_handle *handle = &hdev->nic; 1361 int ret; 1362 1363 if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) || 1364 !client) 1365 return 0; 1366 1367 if (!client->ops->reset_notify) 1368 return -EOPNOTSUPP; 1369 1370 ret = client->ops->reset_notify(handle, type); 1371 if (ret) 1372 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", 1373 type, ret); 1374 1375 return ret; 1376 } 1377 1378 static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev, 1379 enum hnae3_reset_notify_type type) 1380 { 1381 struct hnae3_client *client = hdev->roce_client; 1382 struct hnae3_handle *handle = &hdev->roce; 1383 int ret; 1384 1385 if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client) 1386 return 0; 1387 1388 if (!client->ops->reset_notify) 1389 return -EOPNOTSUPP; 1390 1391 ret = client->ops->reset_notify(handle, type); 1392 if (ret) 1393 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", 1394 type, ret); 1395 return ret; 1396 } 1397 1398 static void hclgevf_set_reset_pending(struct hclgevf_dev *hdev, 1399 enum hnae3_reset_type reset_type) 1400 { 1401 /* When an incorrect reset type is executed, the get_reset_level 1402 * function generates the HNAE3_NONE_RESET flag. As a result, this 1403 * type do not need to pending. 1404 */ 1405 if (reset_type != HNAE3_NONE_RESET) 1406 set_bit(reset_type, &hdev->reset_pending); 1407 } 1408 1409 static int hclgevf_reset_wait(struct hclgevf_dev *hdev) 1410 { 1411 #define HCLGEVF_RESET_WAIT_US 20000 1412 #define HCLGEVF_RESET_WAIT_CNT 2000 1413 #define HCLGEVF_RESET_WAIT_TIMEOUT_US \ 1414 (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) 1415 1416 u32 val; 1417 int ret; 1418 1419 if (hdev->reset_type == HNAE3_VF_RESET) 1420 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1421 HCLGEVF_VF_RST_ING, val, 1422 !(val & HCLGEVF_VF_RST_ING_BIT), 1423 HCLGEVF_RESET_WAIT_US, 1424 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1425 else 1426 ret = readl_poll_timeout(hdev->hw.hw.io_base + 1427 HCLGEVF_RST_ING, val, 1428 !(val & HCLGEVF_RST_ING_BITS), 1429 HCLGEVF_RESET_WAIT_US, 1430 HCLGEVF_RESET_WAIT_TIMEOUT_US); 1431 1432 /* hardware completion status should be available by this time */ 1433 if (ret) { 1434 dev_err(&hdev->pdev->dev, 1435 "couldn't get reset done status from h/w, timeout!\n"); 1436 return ret; 1437 } 1438 1439 /* we will wait a bit more to let reset of the stack to complete. This 1440 * might happen in case reset assertion was made by PF. Yes, this also 1441 * means we might end up waiting bit more even for VF reset. 1442 */ 1443 if (hdev->reset_type == HNAE3_VF_FULL_RESET) 1444 msleep(5000); 1445 else 1446 msleep(500); 1447 1448 return 0; 1449 } 1450 1451 static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable) 1452 { 1453 u32 reg_val; 1454 1455 reg_val = hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); 1456 if (enable) 1457 reg_val |= HCLGEVF_NIC_SW_RST_RDY; 1458 else 1459 reg_val &= ~HCLGEVF_NIC_SW_RST_RDY; 1460 1461 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, 1462 reg_val); 1463 } 1464 1465 static int hclgevf_reset_stack(struct hclgevf_dev *hdev) 1466 { 1467 int ret; 1468 1469 /* uninitialize the nic client */ 1470 ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); 1471 if (ret) 1472 return ret; 1473 1474 /* re-initialize the hclge device */ 1475 ret = hclgevf_reset_hdev(hdev); 1476 if (ret) { 1477 dev_err(&hdev->pdev->dev, 1478 "hclge device re-init failed, VF is disabled!\n"); 1479 return ret; 1480 } 1481 1482 /* bring up the nic client again */ 1483 ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); 1484 if (ret) 1485 return ret; 1486 1487 /* clear handshake status with IMP */ 1488 hclgevf_reset_handshake(hdev, false); 1489 1490 /* bring up the nic to enable TX/RX again */ 1491 return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 1492 } 1493 1494 static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) 1495 { 1496 #define HCLGEVF_RESET_SYNC_TIME 100 1497 1498 if (hdev->reset_type == HNAE3_VF_FUNC_RESET) { 1499 struct hclge_vf_to_pf_msg send_msg; 1500 int ret; 1501 1502 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0); 1503 ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0); 1504 if (ret) { 1505 dev_err(&hdev->pdev->dev, 1506 "failed to assert VF reset, ret = %d\n", ret); 1507 return ret; 1508 } 1509 hdev->rst_stats.vf_func_rst_cnt++; 1510 } 1511 1512 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 1513 /* inform hardware that preparatory work is done */ 1514 msleep(HCLGEVF_RESET_SYNC_TIME); 1515 hclgevf_reset_handshake(hdev, true); 1516 dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n", 1517 hdev->reset_type); 1518 1519 return 0; 1520 } 1521 1522 static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) 1523 { 1524 dev_info(&hdev->pdev->dev, "VF function reset count: %u\n", 1525 hdev->rst_stats.vf_func_rst_cnt); 1526 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n", 1527 hdev->rst_stats.flr_rst_cnt); 1528 dev_info(&hdev->pdev->dev, "VF reset count: %u\n", 1529 hdev->rst_stats.vf_rst_cnt); 1530 dev_info(&hdev->pdev->dev, "reset done count: %u\n", 1531 hdev->rst_stats.rst_done_cnt); 1532 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n", 1533 hdev->rst_stats.hw_rst_done_cnt); 1534 dev_info(&hdev->pdev->dev, "reset count: %u\n", 1535 hdev->rst_stats.rst_cnt); 1536 dev_info(&hdev->pdev->dev, "reset fail count: %u\n", 1537 hdev->rst_stats.rst_fail_cnt); 1538 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n", 1539 hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE)); 1540 dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", 1541 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_STATE_REG)); 1542 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", 1543 hclgevf_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG)); 1544 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", 1545 hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); 1546 dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); 1547 } 1548 1549 static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev) 1550 { 1551 /* recover handshake status with IMP when reset fail */ 1552 hclgevf_reset_handshake(hdev, true); 1553 hdev->rst_stats.rst_fail_cnt++; 1554 dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n", 1555 hdev->rst_stats.rst_fail_cnt); 1556 1557 if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT) 1558 hclgevf_set_reset_pending(hdev, hdev->reset_type); 1559 1560 if (hclgevf_is_reset_pending(hdev)) { 1561 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1562 hclgevf_reset_task_schedule(hdev); 1563 } else { 1564 set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1565 hclgevf_dump_rst_info(hdev); 1566 } 1567 } 1568 1569 static int hclgevf_reset_prepare(struct hclgevf_dev *hdev) 1570 { 1571 int ret; 1572 1573 hdev->rst_stats.rst_cnt++; 1574 1575 /* perform reset of the stack & ae device for a client */ 1576 ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); 1577 if (ret) 1578 return ret; 1579 1580 rtnl_lock(); 1581 /* bring down the nic to stop any ongoing TX/RX */ 1582 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 1583 rtnl_unlock(); 1584 if (ret) 1585 return ret; 1586 1587 return hclgevf_reset_prepare_wait(hdev); 1588 } 1589 1590 static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev) 1591 { 1592 int ret; 1593 1594 hdev->rst_stats.hw_rst_done_cnt++; 1595 ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); 1596 if (ret) 1597 return ret; 1598 1599 rtnl_lock(); 1600 /* now, re-initialize the nic client and ae device */ 1601 ret = hclgevf_reset_stack(hdev); 1602 rtnl_unlock(); 1603 if (ret) { 1604 dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); 1605 return ret; 1606 } 1607 1608 ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT); 1609 /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1 1610 * times 1611 */ 1612 if (ret && 1613 hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1) 1614 return ret; 1615 1616 ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT); 1617 if (ret) 1618 return ret; 1619 1620 hdev->last_reset_time = jiffies; 1621 hdev->rst_stats.rst_done_cnt++; 1622 hdev->rst_stats.rst_fail_cnt = 0; 1623 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 1624 1625 return 0; 1626 } 1627 1628 static void hclgevf_reset(struct hclgevf_dev *hdev) 1629 { 1630 if (hclgevf_reset_prepare(hdev)) 1631 goto err_reset; 1632 1633 /* check if VF could successfully fetch the hardware reset completion 1634 * status from the hardware 1635 */ 1636 if (hclgevf_reset_wait(hdev)) { 1637 /* can't do much in this situation, will disable VF */ 1638 dev_err(&hdev->pdev->dev, 1639 "failed to fetch H/W reset completion status\n"); 1640 goto err_reset; 1641 } 1642 1643 if (hclgevf_reset_rebuild(hdev)) 1644 goto err_reset; 1645 1646 return; 1647 1648 err_reset: 1649 hclgevf_reset_err_handle(hdev); 1650 } 1651 1652 static enum hnae3_reset_type hclgevf_get_reset_level(unsigned long *addr) 1653 { 1654 enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; 1655 1656 /* return the highest priority reset level amongst all */ 1657 if (test_bit(HNAE3_VF_RESET, addr)) { 1658 rst_level = HNAE3_VF_RESET; 1659 clear_bit(HNAE3_VF_RESET, addr); 1660 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1661 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1662 } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { 1663 rst_level = HNAE3_VF_FULL_RESET; 1664 clear_bit(HNAE3_VF_FULL_RESET, addr); 1665 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1666 } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { 1667 rst_level = HNAE3_VF_PF_FUNC_RESET; 1668 clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); 1669 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1670 } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { 1671 rst_level = HNAE3_VF_FUNC_RESET; 1672 clear_bit(HNAE3_VF_FUNC_RESET, addr); 1673 } else if (test_bit(HNAE3_FLR_RESET, addr)) { 1674 rst_level = HNAE3_FLR_RESET; 1675 clear_bit(HNAE3_FLR_RESET, addr); 1676 } 1677 1678 clear_bit(HNAE3_NONE_RESET, addr); 1679 1680 return rst_level; 1681 } 1682 1683 static void hclgevf_reset_event(struct pci_dev *pdev, 1684 struct hnae3_handle *handle) 1685 { 1686 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); 1687 struct hclgevf_dev *hdev = ae_dev->priv; 1688 1689 if (hdev->default_reset_request) 1690 hdev->reset_level = 1691 hclgevf_get_reset_level(&hdev->default_reset_request); 1692 else 1693 hdev->reset_level = HNAE3_VF_FUNC_RESET; 1694 1695 dev_info(&hdev->pdev->dev, "received reset request from VF enet, reset level is %d\n", 1696 hdev->reset_level); 1697 1698 /* reset of this VF requested */ 1699 set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); 1700 hclgevf_reset_task_schedule(hdev); 1701 1702 hdev->last_reset_time = jiffies; 1703 } 1704 1705 static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, 1706 enum hnae3_reset_type rst_type) 1707 { 1708 #define HCLGEVF_SUPPORT_RESET_TYPE \ 1709 (BIT(HNAE3_VF_RESET) | BIT(HNAE3_VF_FUNC_RESET) | \ 1710 BIT(HNAE3_VF_PF_FUNC_RESET) | BIT(HNAE3_VF_FULL_RESET) | \ 1711 BIT(HNAE3_FLR_RESET) | BIT(HNAE3_VF_EXP_RESET)) 1712 1713 struct hclgevf_dev *hdev = ae_dev->priv; 1714 1715 if (!(BIT(rst_type) & HCLGEVF_SUPPORT_RESET_TYPE)) { 1716 /* To prevent reset triggered by hclge_reset_event */ 1717 set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); 1718 dev_info(&hdev->pdev->dev, "unsupported reset type %d\n", 1719 rst_type); 1720 return; 1721 } 1722 set_bit(rst_type, &hdev->default_reset_request); 1723 } 1724 1725 static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) 1726 { 1727 writel(en ? 1 : 0, vector->addr); 1728 } 1729 1730 static void hclgevf_reset_prepare_general(struct hnae3_ae_dev *ae_dev, 1731 enum hnae3_reset_type rst_type) 1732 { 1733 #define HCLGEVF_RESET_RETRY_WAIT_MS 500 1734 #define HCLGEVF_RESET_RETRY_CNT 5 1735 1736 struct hclgevf_dev *hdev = ae_dev->priv; 1737 int retry_cnt = 0; 1738 int ret; 1739 1740 while (retry_cnt++ < HCLGEVF_RESET_RETRY_CNT) { 1741 down(&hdev->reset_sem); 1742 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1743 hdev->reset_type = rst_type; 1744 ret = hclgevf_reset_prepare(hdev); 1745 if (!ret && !hdev->reset_pending) 1746 break; 1747 1748 dev_err(&hdev->pdev->dev, 1749 "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n", 1750 ret, hdev->reset_pending, retry_cnt); 1751 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1752 up(&hdev->reset_sem); 1753 msleep(HCLGEVF_RESET_RETRY_WAIT_MS); 1754 } 1755 1756 /* disable misc vector before reset done */ 1757 hclgevf_enable_vector(&hdev->misc_vector, false); 1758 1759 if (hdev->reset_type == HNAE3_FLR_RESET) 1760 hdev->rst_stats.flr_rst_cnt++; 1761 } 1762 1763 static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev) 1764 { 1765 struct hclgevf_dev *hdev = ae_dev->priv; 1766 int ret; 1767 1768 hclgevf_enable_vector(&hdev->misc_vector, true); 1769 1770 ret = hclgevf_reset_rebuild(hdev); 1771 if (ret) 1772 dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", 1773 ret); 1774 1775 hdev->reset_type = HNAE3_NONE_RESET; 1776 if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 1777 up(&hdev->reset_sem); 1778 } 1779 1780 static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) 1781 { 1782 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 1783 1784 return hdev->fw_version; 1785 } 1786 1787 static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev) 1788 { 1789 struct hclgevf_misc_vector *vector = &hdev->misc_vector; 1790 1791 vector->vector_irq = pci_irq_vector(hdev->pdev, 1792 HCLGEVF_MISC_VECTOR_NUM); 1793 vector->addr = hdev->hw.hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE; 1794 /* vector status always valid for Vector 0 */ 1795 hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0; 1796 hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq; 1797 1798 hdev->num_msi_left -= 1; 1799 hdev->num_msi_used += 1; 1800 } 1801 1802 void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev) 1803 { 1804 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1805 test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) && 1806 !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, 1807 &hdev->state)) 1808 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1809 } 1810 1811 void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev) 1812 { 1813 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1814 !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, 1815 &hdev->state)) 1816 mod_delayed_work(hclgevf_wq, &hdev->service_task, 0); 1817 } 1818 1819 static void hclgevf_task_schedule(struct hclgevf_dev *hdev, 1820 unsigned long delay) 1821 { 1822 if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) && 1823 !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) 1824 mod_delayed_work(hclgevf_wq, &hdev->service_task, delay); 1825 } 1826 1827 static void hclgevf_reset_service_task(struct hclgevf_dev *hdev) 1828 { 1829 #define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3 1830 1831 if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) 1832 return; 1833 1834 down(&hdev->reset_sem); 1835 set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1836 1837 if (test_and_clear_bit(HCLGEVF_RESET_PENDING, 1838 &hdev->reset_state)) { 1839 /* PF has intimated that it is about to reset the hardware. 1840 * We now have to poll & check if hardware has actually 1841 * completed the reset sequence. On hardware reset completion, 1842 * VF needs to reset the client and ae device. 1843 */ 1844 hdev->reset_attempts = 0; 1845 1846 hdev->last_reset_time = jiffies; 1847 hdev->reset_type = 1848 hclgevf_get_reset_level(&hdev->reset_pending); 1849 if (hdev->reset_type != HNAE3_NONE_RESET) 1850 hclgevf_reset(hdev); 1851 } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, 1852 &hdev->reset_state)) { 1853 /* we could be here when either of below happens: 1854 * 1. reset was initiated due to watchdog timeout caused by 1855 * a. IMP was earlier reset and our TX got choked down and 1856 * which resulted in watchdog reacting and inducing VF 1857 * reset. This also means our cmdq would be unreliable. 1858 * b. problem in TX due to other lower layer(example link 1859 * layer not functioning properly etc.) 1860 * 2. VF reset might have been initiated due to some config 1861 * change. 1862 * 1863 * NOTE: Theres no clear way to detect above cases than to react 1864 * to the response of PF for this reset request. PF will ack the 1865 * 1b and 2. cases but we will not get any intimation about 1a 1866 * from PF as cmdq would be in unreliable state i.e. mailbox 1867 * communication between PF and VF would be broken. 1868 * 1869 * if we are never geting into pending state it means either: 1870 * 1. PF is not receiving our request which could be due to IMP 1871 * reset 1872 * 2. PF is screwed 1873 * We cannot do much for 2. but to check first we can try reset 1874 * our PCIe + stack and see if it alleviates the problem. 1875 */ 1876 if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) { 1877 /* prepare for full reset of stack + pcie interface */ 1878 hclgevf_set_reset_pending(hdev, HNAE3_VF_FULL_RESET); 1879 1880 /* "defer" schedule the reset task again */ 1881 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1882 } else { 1883 hdev->reset_attempts++; 1884 1885 hclgevf_set_reset_pending(hdev, hdev->reset_level); 1886 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 1887 } 1888 hclgevf_reset_task_schedule(hdev); 1889 } 1890 1891 hdev->reset_type = HNAE3_NONE_RESET; 1892 clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 1893 up(&hdev->reset_sem); 1894 } 1895 1896 static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev) 1897 { 1898 if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state)) 1899 return; 1900 1901 if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state)) 1902 return; 1903 1904 hclgevf_mbx_async_handler(hdev); 1905 1906 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 1907 } 1908 1909 static void hclgevf_keep_alive(struct hclgevf_dev *hdev) 1910 { 1911 struct hclge_vf_to_pf_msg send_msg; 1912 int ret; 1913 1914 if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 1915 return; 1916 1917 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0); 1918 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 1919 if (ret) 1920 dev_err(&hdev->pdev->dev, 1921 "VF sends keep alive cmd failed(=%d)\n", ret); 1922 } 1923 1924 static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev) 1925 { 1926 unsigned long delta = round_jiffies_relative(HZ); 1927 struct hnae3_handle *handle = &hdev->nic; 1928 1929 if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) || 1930 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state)) 1931 return; 1932 1933 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { 1934 delta = jiffies - hdev->last_serv_processed; 1935 1936 if (delta < round_jiffies_relative(HZ)) { 1937 delta = round_jiffies_relative(HZ) - delta; 1938 goto out; 1939 } 1940 } 1941 1942 hdev->serv_processed_cnt++; 1943 if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL)) 1944 hclgevf_keep_alive(hdev); 1945 1946 if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) { 1947 hdev->last_serv_processed = jiffies; 1948 goto out; 1949 } 1950 1951 if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL)) 1952 hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); 1953 1954 /* VF does not need to request link status when this bit is set, because 1955 * PF will push its link status to VFs when link status changed. 1956 */ 1957 if (!test_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state)) 1958 hclgevf_request_link_info(hdev); 1959 1960 hclgevf_update_link_mode(hdev); 1961 1962 hclgevf_sync_vlan_filter(hdev); 1963 1964 hclgevf_sync_mac_table(hdev); 1965 1966 hclgevf_sync_promisc_mode(hdev); 1967 1968 hdev->last_serv_processed = jiffies; 1969 1970 out: 1971 hclgevf_task_schedule(hdev, delta); 1972 } 1973 1974 static void hclgevf_service_task(struct work_struct *work) 1975 { 1976 struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev, 1977 service_task.work); 1978 1979 hclgevf_reset_service_task(hdev); 1980 hclgevf_mailbox_service_task(hdev); 1981 hclgevf_periodic_service_task(hdev); 1982 1983 /* Handle reset and mbx again in case periodical task delays the 1984 * handling by calling hclgevf_task_schedule() in 1985 * hclgevf_periodic_service_task() 1986 */ 1987 hclgevf_reset_service_task(hdev); 1988 hclgevf_mailbox_service_task(hdev); 1989 } 1990 1991 static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) 1992 { 1993 hclgevf_write_dev(&hdev->hw, HCLGE_COMM_VECTOR0_CMDQ_SRC_REG, regclr); 1994 } 1995 1996 static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, 1997 u32 *clearval) 1998 { 1999 u32 val, cmdq_stat_reg, rst_ing_reg; 2000 2001 /* fetch the events from their corresponding regs */ 2002 cmdq_stat_reg = hclgevf_read_dev(&hdev->hw, 2003 HCLGE_COMM_VECTOR0_CMDQ_STATE_REG); 2004 if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) { 2005 rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 2006 dev_info(&hdev->pdev->dev, 2007 "receive reset interrupt 0x%x!\n", rst_ing_reg); 2008 hclgevf_set_reset_pending(hdev, HNAE3_VF_RESET); 2009 set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); 2010 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 2011 *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B); 2012 hdev->rst_stats.vf_rst_cnt++; 2013 /* set up VF hardware reset status, its PF will clear 2014 * this status when PF has initialized done. 2015 */ 2016 val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING); 2017 hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING, 2018 val | HCLGEVF_VF_RST_ING_BIT); 2019 return HCLGEVF_VECTOR0_EVENT_RST; 2020 } 2021 2022 /* check for vector0 mailbox(=CMDQ RX) event source */ 2023 if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) { 2024 /* for revision 0x21, clearing interrupt is writing bit 0 2025 * to the clear register, writing bit 1 means to keep the 2026 * old value. 2027 * for revision 0x20, the clear register is a read & write 2028 * register, so we should just write 0 to the bit we are 2029 * handling, and keep other bits as cmdq_stat_reg. 2030 */ 2031 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) 2032 *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2033 else 2034 *clearval = cmdq_stat_reg & 2035 ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); 2036 2037 return HCLGEVF_VECTOR0_EVENT_MBX; 2038 } 2039 2040 /* print other vector0 event source */ 2041 dev_info(&hdev->pdev->dev, 2042 "vector 0 interrupt from unknown source, cmdq_src = %#x\n", 2043 cmdq_stat_reg); 2044 2045 return HCLGEVF_VECTOR0_EVENT_OTHER; 2046 } 2047 2048 static void hclgevf_reset_timer(struct timer_list *t) 2049 { 2050 struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer); 2051 2052 hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST); 2053 hclgevf_reset_task_schedule(hdev); 2054 } 2055 2056 static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) 2057 { 2058 #define HCLGEVF_RESET_DELAY 5 2059 2060 enum hclgevf_evt_cause event_cause; 2061 struct hclgevf_dev *hdev = data; 2062 u32 clearval; 2063 2064 hclgevf_enable_vector(&hdev->misc_vector, false); 2065 event_cause = hclgevf_check_evt_cause(hdev, &clearval); 2066 if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) 2067 hclgevf_clear_event_cause(hdev, clearval); 2068 2069 switch (event_cause) { 2070 case HCLGEVF_VECTOR0_EVENT_RST: 2071 mod_timer(&hdev->reset_timer, 2072 jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY)); 2073 break; 2074 case HCLGEVF_VECTOR0_EVENT_MBX: 2075 hclgevf_mbx_handler(hdev); 2076 break; 2077 default: 2078 break; 2079 } 2080 2081 hclgevf_enable_vector(&hdev->misc_vector, true); 2082 2083 return IRQ_HANDLED; 2084 } 2085 2086 static int hclgevf_configure(struct hclgevf_dev *hdev) 2087 { 2088 int ret; 2089 2090 hdev->gro_en = true; 2091 2092 ret = hclgevf_get_basic_info(hdev); 2093 if (ret) 2094 return ret; 2095 2096 /* get current port based vlan state from PF */ 2097 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2098 if (ret) 2099 return ret; 2100 2101 /* get queue configuration from PF */ 2102 ret = hclgevf_get_queue_info(hdev); 2103 if (ret) 2104 return ret; 2105 2106 /* get queue depth info from PF */ 2107 ret = hclgevf_get_queue_depth(hdev); 2108 if (ret) 2109 return ret; 2110 2111 return hclgevf_get_pf_media_type(hdev); 2112 } 2113 2114 static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) 2115 { 2116 struct pci_dev *pdev = ae_dev->pdev; 2117 struct hclgevf_dev *hdev; 2118 2119 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); 2120 if (!hdev) 2121 return -ENOMEM; 2122 2123 hdev->pdev = pdev; 2124 hdev->ae_dev = ae_dev; 2125 ae_dev->priv = hdev; 2126 2127 return 0; 2128 } 2129 2130 static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) 2131 { 2132 struct hnae3_handle *roce = &hdev->roce; 2133 struct hnae3_handle *nic = &hdev->nic; 2134 2135 roce->rinfo.num_vectors = hdev->num_roce_msix; 2136 2137 if (hdev->num_msi_left < roce->rinfo.num_vectors || 2138 hdev->num_msi_left == 0) 2139 return -EINVAL; 2140 2141 roce->rinfo.base_vector = hdev->roce_base_msix_offset; 2142 2143 roce->rinfo.netdev = nic->kinfo.netdev; 2144 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; 2145 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; 2146 2147 roce->pdev = nic->pdev; 2148 roce->ae_algo = nic->ae_algo; 2149 bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits, 2150 MAX_NUMNODES); 2151 return 0; 2152 } 2153 2154 static int hclgevf_config_gro(struct hclgevf_dev *hdev) 2155 { 2156 struct hclgevf_cfg_gro_status_cmd *req; 2157 struct hclge_desc desc; 2158 int ret; 2159 2160 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) 2161 return 0; 2162 2163 hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, 2164 false); 2165 req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; 2166 2167 req->gro_en = hdev->gro_en ? 1 : 0; 2168 2169 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2170 if (ret) 2171 dev_err(&hdev->pdev->dev, 2172 "VF GRO hardware config cmd failed, ret = %d.\n", ret); 2173 2174 return ret; 2175 } 2176 2177 static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) 2178 { 2179 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; 2180 u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 2181 u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 2182 u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 2183 int ret; 2184 2185 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { 2186 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, 2187 rss_cfg->rss_algo, 2188 rss_cfg->rss_hash_key); 2189 if (ret) 2190 return ret; 2191 2192 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, rss_cfg); 2193 if (ret) 2194 return ret; 2195 } 2196 2197 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, 2198 rss_cfg->rss_indirection_tbl); 2199 if (ret) 2200 return ret; 2201 2202 hclge_comm_get_rss_tc_info(rss_cfg->rss_size, hdev->hw_tc_map, 2203 tc_offset, tc_valid, tc_size); 2204 2205 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 2206 tc_valid, tc_size); 2207 } 2208 2209 static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) 2210 { 2211 struct hnae3_handle *nic = &hdev->nic; 2212 int ret; 2213 2214 ret = hclgevf_en_hw_strip_rxvtag(nic, true); 2215 if (ret) { 2216 dev_err(&hdev->pdev->dev, 2217 "failed to enable rx vlan offload, ret = %d\n", ret); 2218 return ret; 2219 } 2220 2221 return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, 2222 false); 2223 } 2224 2225 static void hclgevf_flush_link_update(struct hclgevf_dev *hdev) 2226 { 2227 #define HCLGEVF_FLUSH_LINK_TIMEOUT 100000 2228 2229 unsigned long last = hdev->serv_processed_cnt; 2230 int i = 0; 2231 2232 while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) && 2233 i++ < HCLGEVF_FLUSH_LINK_TIMEOUT && 2234 last == hdev->serv_processed_cnt) 2235 usleep_range(1, 1); 2236 } 2237 2238 static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) 2239 { 2240 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2241 2242 if (enable) { 2243 hclgevf_task_schedule(hdev, 0); 2244 } else { 2245 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2246 2247 smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */ 2248 hclgevf_flush_link_update(hdev); 2249 } 2250 } 2251 2252 static int hclgevf_ae_start(struct hnae3_handle *handle) 2253 { 2254 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2255 2256 clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2257 clear_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, &hdev->state); 2258 2259 hclge_comm_reset_tqp_stats(handle); 2260 2261 hclgevf_request_link_info(hdev); 2262 2263 hclgevf_update_link_mode(hdev); 2264 2265 return 0; 2266 } 2267 2268 static void hclgevf_ae_stop(struct hnae3_handle *handle) 2269 { 2270 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2271 2272 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2273 2274 if (hdev->reset_type != HNAE3_VF_RESET) 2275 hclgevf_reset_tqp(handle); 2276 2277 hclge_comm_reset_tqp_stats(handle); 2278 hclgevf_update_link_status(hdev, 0); 2279 } 2280 2281 static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) 2282 { 2283 #define HCLGEVF_STATE_ALIVE 1 2284 #define HCLGEVF_STATE_NOT_ALIVE 0 2285 2286 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2287 struct hclge_vf_to_pf_msg send_msg; 2288 2289 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0); 2290 send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE : 2291 HCLGEVF_STATE_NOT_ALIVE; 2292 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2293 } 2294 2295 static int hclgevf_client_start(struct hnae3_handle *handle) 2296 { 2297 return hclgevf_set_alive(handle, true); 2298 } 2299 2300 static void hclgevf_client_stop(struct hnae3_handle *handle) 2301 { 2302 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 2303 int ret; 2304 2305 ret = hclgevf_set_alive(handle, false); 2306 if (ret) 2307 dev_warn(&hdev->pdev->dev, 2308 "%s failed %d\n", __func__, ret); 2309 } 2310 2311 static void hclgevf_state_init(struct hclgevf_dev *hdev) 2312 { 2313 clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); 2314 clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); 2315 clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state); 2316 2317 INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task); 2318 timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0); 2319 2320 mutex_init(&hdev->mbx_resp.mbx_mutex); 2321 sema_init(&hdev->reset_sem, 1); 2322 2323 spin_lock_init(&hdev->mac_table.mac_list_lock); 2324 INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list); 2325 INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list); 2326 2327 /* bring the device down */ 2328 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2329 } 2330 2331 static void hclgevf_state_uninit(struct hclgevf_dev *hdev) 2332 { 2333 set_bit(HCLGEVF_STATE_DOWN, &hdev->state); 2334 set_bit(HCLGEVF_STATE_REMOVING, &hdev->state); 2335 2336 if (hdev->service_task.work.func) 2337 cancel_delayed_work_sync(&hdev->service_task); 2338 2339 mutex_destroy(&hdev->mbx_resp.mbx_mutex); 2340 } 2341 2342 static int hclgevf_init_msi(struct hclgevf_dev *hdev) 2343 { 2344 struct pci_dev *pdev = hdev->pdev; 2345 int vectors; 2346 int i; 2347 2348 if (hnae3_dev_roce_supported(hdev)) 2349 vectors = pci_alloc_irq_vectors(pdev, 2350 hdev->roce_base_msix_offset + 1, 2351 hdev->num_msi, 2352 PCI_IRQ_MSIX); 2353 else 2354 vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM, 2355 hdev->num_msi, 2356 PCI_IRQ_MSI | PCI_IRQ_MSIX); 2357 2358 if (vectors < 0) { 2359 dev_err(&pdev->dev, 2360 "failed(%d) to allocate MSI/MSI-X vectors\n", 2361 vectors); 2362 return vectors; 2363 } 2364 if (vectors < hdev->num_msi) 2365 dev_warn(&hdev->pdev->dev, 2366 "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n", 2367 hdev->num_msi, vectors); 2368 2369 hdev->num_msi = vectors; 2370 hdev->num_msi_left = vectors; 2371 2372 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, 2373 sizeof(u16), GFP_KERNEL); 2374 if (!hdev->vector_status) { 2375 pci_free_irq_vectors(pdev); 2376 return -ENOMEM; 2377 } 2378 2379 for (i = 0; i < hdev->num_msi; i++) 2380 hdev->vector_status[i] = HCLGEVF_INVALID_VPORT; 2381 2382 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, 2383 sizeof(int), GFP_KERNEL); 2384 if (!hdev->vector_irq) { 2385 devm_kfree(&pdev->dev, hdev->vector_status); 2386 pci_free_irq_vectors(pdev); 2387 return -ENOMEM; 2388 } 2389 2390 return 0; 2391 } 2392 2393 static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) 2394 { 2395 struct pci_dev *pdev = hdev->pdev; 2396 2397 devm_kfree(&pdev->dev, hdev->vector_status); 2398 devm_kfree(&pdev->dev, hdev->vector_irq); 2399 pci_free_irq_vectors(pdev); 2400 } 2401 2402 static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) 2403 { 2404 int ret; 2405 2406 hclgevf_get_misc_vector(hdev); 2407 2408 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", 2409 HCLGEVF_NAME, pci_name(hdev->pdev)); 2410 ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, 2411 0, hdev->misc_vector.name, hdev); 2412 if (ret) { 2413 dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n", 2414 hdev->misc_vector.vector_irq); 2415 return ret; 2416 } 2417 2418 hclgevf_clear_event_cause(hdev, 0); 2419 2420 /* enable misc. vector(vector 0) */ 2421 hclgevf_enable_vector(&hdev->misc_vector, true); 2422 2423 return ret; 2424 } 2425 2426 static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) 2427 { 2428 /* disable misc vector(vector 0) */ 2429 hclgevf_enable_vector(&hdev->misc_vector, false); 2430 synchronize_irq(hdev->misc_vector.vector_irq); 2431 free_irq(hdev->misc_vector.vector_irq, hdev); 2432 hclgevf_free_vector(hdev, 0); 2433 } 2434 2435 static void hclgevf_info_show(struct hclgevf_dev *hdev) 2436 { 2437 struct device *dev = &hdev->pdev->dev; 2438 2439 dev_info(dev, "VF info begin:\n"); 2440 2441 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); 2442 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); 2443 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); 2444 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); 2445 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); 2446 dev_info(dev, "PF media type of this VF: %u\n", 2447 hdev->hw.mac.media_type); 2448 2449 dev_info(dev, "VF info end.\n"); 2450 } 2451 2452 static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev, 2453 struct hnae3_client *client) 2454 { 2455 struct hclgevf_dev *hdev = ae_dev->priv; 2456 int rst_cnt = hdev->rst_stats.rst_cnt; 2457 int ret; 2458 2459 ret = client->ops->init_instance(&hdev->nic); 2460 if (ret) 2461 return ret; 2462 2463 set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2464 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 2465 rst_cnt != hdev->rst_stats.rst_cnt) { 2466 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2467 2468 client->ops->uninit_instance(&hdev->nic, 0); 2469 return -EBUSY; 2470 } 2471 2472 hnae3_set_client_init_flag(client, ae_dev, 1); 2473 2474 if (netif_msg_drv(&hdev->nic)) 2475 hclgevf_info_show(hdev); 2476 2477 return 0; 2478 } 2479 2480 static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev, 2481 struct hnae3_client *client) 2482 { 2483 struct hclgevf_dev *hdev = ae_dev->priv; 2484 int ret; 2485 2486 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || 2487 !hdev->nic_client) 2488 return 0; 2489 2490 ret = hclgevf_init_roce_base_info(hdev); 2491 if (ret) 2492 return ret; 2493 2494 ret = client->ops->init_instance(&hdev->roce); 2495 if (ret) 2496 return ret; 2497 2498 set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2499 hnae3_set_client_init_flag(client, ae_dev, 1); 2500 2501 return 0; 2502 } 2503 2504 static int hclgevf_init_client_instance(struct hnae3_client *client, 2505 struct hnae3_ae_dev *ae_dev) 2506 { 2507 struct hclgevf_dev *hdev = ae_dev->priv; 2508 int ret; 2509 2510 switch (client->type) { 2511 case HNAE3_CLIENT_KNIC: 2512 hdev->nic_client = client; 2513 hdev->nic.client = client; 2514 2515 ret = hclgevf_init_nic_client_instance(ae_dev, client); 2516 if (ret) 2517 goto clear_nic; 2518 2519 ret = hclgevf_init_roce_client_instance(ae_dev, 2520 hdev->roce_client); 2521 if (ret) 2522 goto clear_roce; 2523 2524 break; 2525 case HNAE3_CLIENT_ROCE: 2526 if (hnae3_dev_roce_supported(hdev)) { 2527 hdev->roce_client = client; 2528 hdev->roce.client = client; 2529 } 2530 2531 ret = hclgevf_init_roce_client_instance(ae_dev, client); 2532 if (ret) 2533 goto clear_roce; 2534 2535 break; 2536 default: 2537 return -EINVAL; 2538 } 2539 2540 return 0; 2541 2542 clear_nic: 2543 hdev->nic_client = NULL; 2544 hdev->nic.client = NULL; 2545 return ret; 2546 clear_roce: 2547 hdev->roce_client = NULL; 2548 hdev->roce.client = NULL; 2549 return ret; 2550 } 2551 2552 static void hclgevf_uninit_client_instance(struct hnae3_client *client, 2553 struct hnae3_ae_dev *ae_dev) 2554 { 2555 struct hclgevf_dev *hdev = ae_dev->priv; 2556 2557 /* un-init roce, if it exists */ 2558 if (hdev->roce_client) { 2559 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2560 msleep(HCLGEVF_WAIT_RESET_DONE); 2561 clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state); 2562 2563 hdev->roce_client->ops->uninit_instance(&hdev->roce, 0); 2564 hdev->roce_client = NULL; 2565 hdev->roce.client = NULL; 2566 } 2567 2568 /* un-init nic/unic, if this was not called by roce client */ 2569 if (client->ops->uninit_instance && hdev->nic_client && 2570 client->type != HNAE3_CLIENT_ROCE) { 2571 while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) 2572 msleep(HCLGEVF_WAIT_RESET_DONE); 2573 clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state); 2574 2575 client->ops->uninit_instance(&hdev->nic, 0); 2576 hdev->nic_client = NULL; 2577 hdev->nic.client = NULL; 2578 } 2579 } 2580 2581 static int hclgevf_dev_mem_map(struct hclgevf_dev *hdev) 2582 { 2583 struct pci_dev *pdev = hdev->pdev; 2584 struct hclgevf_hw *hw = &hdev->hw; 2585 2586 /* for device does not have device memory, return directly */ 2587 if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGEVF_MEM_BAR))) 2588 return 0; 2589 2590 hw->hw.mem_base = 2591 devm_ioremap_wc(&pdev->dev, 2592 pci_resource_start(pdev, HCLGEVF_MEM_BAR), 2593 pci_resource_len(pdev, HCLGEVF_MEM_BAR)); 2594 if (!hw->hw.mem_base) { 2595 dev_err(&pdev->dev, "failed to map device memory\n"); 2596 return -EFAULT; 2597 } 2598 2599 return 0; 2600 } 2601 2602 static int hclgevf_pci_init(struct hclgevf_dev *hdev) 2603 { 2604 struct pci_dev *pdev = hdev->pdev; 2605 struct hclgevf_hw *hw; 2606 int ret; 2607 2608 ret = pci_enable_device(pdev); 2609 if (ret) { 2610 dev_err(&pdev->dev, "failed to enable PCI device\n"); 2611 return ret; 2612 } 2613 2614 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 2615 if (ret) { 2616 dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting"); 2617 goto err_disable_device; 2618 } 2619 2620 ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME); 2621 if (ret) { 2622 dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); 2623 goto err_disable_device; 2624 } 2625 2626 pci_set_master(pdev); 2627 hw = &hdev->hw; 2628 hw->hw.io_base = pci_iomap(pdev, 2, 0); 2629 if (!hw->hw.io_base) { 2630 dev_err(&pdev->dev, "can't map configuration register space\n"); 2631 ret = -ENOMEM; 2632 goto err_release_regions; 2633 } 2634 2635 ret = hclgevf_dev_mem_map(hdev); 2636 if (ret) 2637 goto err_unmap_io_base; 2638 2639 return 0; 2640 2641 err_unmap_io_base: 2642 pci_iounmap(pdev, hdev->hw.hw.io_base); 2643 err_release_regions: 2644 pci_release_regions(pdev); 2645 err_disable_device: 2646 pci_disable_device(pdev); 2647 2648 return ret; 2649 } 2650 2651 static void hclgevf_pci_uninit(struct hclgevf_dev *hdev) 2652 { 2653 struct pci_dev *pdev = hdev->pdev; 2654 2655 if (hdev->hw.hw.mem_base) 2656 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); 2657 2658 pci_iounmap(pdev, hdev->hw.hw.io_base); 2659 pci_release_regions(pdev); 2660 pci_disable_device(pdev); 2661 } 2662 2663 static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) 2664 { 2665 struct hclgevf_query_res_cmd *req; 2666 struct hclge_desc desc; 2667 int ret; 2668 2669 hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RSRC, true); 2670 ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); 2671 if (ret) { 2672 dev_err(&hdev->pdev->dev, 2673 "query vf resource failed, ret = %d.\n", ret); 2674 return ret; 2675 } 2676 2677 req = (struct hclgevf_query_res_cmd *)desc.data; 2678 2679 if (hnae3_dev_roce_supported(hdev)) { 2680 hdev->roce_base_msix_offset = 2681 hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee), 2682 HCLGEVF_MSIX_OFT_ROCEE_M, 2683 HCLGEVF_MSIX_OFT_ROCEE_S); 2684 hdev->num_roce_msix = 2685 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2686 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2687 2688 /* nic's msix numbers is always equals to the roce's. */ 2689 hdev->num_nic_msix = hdev->num_roce_msix; 2690 2691 /* VF should have NIC vectors and Roce vectors, NIC vectors 2692 * are queued before Roce vectors. The offset is fixed to 64. 2693 */ 2694 hdev->num_msi = hdev->num_roce_msix + 2695 hdev->roce_base_msix_offset; 2696 } else { 2697 hdev->num_msi = 2698 hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number), 2699 HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S); 2700 2701 hdev->num_nic_msix = hdev->num_msi; 2702 } 2703 2704 if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) { 2705 dev_err(&hdev->pdev->dev, 2706 "Just %u msi resources, not enough for vf(min:2).\n", 2707 hdev->num_nic_msix); 2708 return -EINVAL; 2709 } 2710 2711 return 0; 2712 } 2713 2714 static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev) 2715 { 2716 #define HCLGEVF_MAX_NON_TSO_BD_NUM 8U 2717 2718 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2719 2720 ae_dev->dev_specs.max_non_tso_bd_num = 2721 HCLGEVF_MAX_NON_TSO_BD_NUM; 2722 ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2723 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2724 ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2725 ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2726 } 2727 2728 static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev, 2729 struct hclge_desc *desc) 2730 { 2731 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); 2732 struct hclgevf_dev_specs_0_cmd *req0; 2733 struct hclgevf_dev_specs_1_cmd *req1; 2734 2735 req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data; 2736 req1 = (struct hclgevf_dev_specs_1_cmd *)desc[1].data; 2737 2738 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num; 2739 ae_dev->dev_specs.rss_ind_tbl_size = 2740 le16_to_cpu(req0->rss_ind_tbl_size); 2741 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max); 2742 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size); 2743 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl); 2744 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size); 2745 } 2746 2747 static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev) 2748 { 2749 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; 2750 2751 if (!dev_specs->max_non_tso_bd_num) 2752 dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM; 2753 if (!dev_specs->rss_ind_tbl_size) 2754 dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE; 2755 if (!dev_specs->rss_key_size) 2756 dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE; 2757 if (!dev_specs->max_int_gl) 2758 dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL; 2759 if (!dev_specs->max_frm_size) 2760 dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME; 2761 } 2762 2763 static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev) 2764 { 2765 struct hclge_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM]; 2766 int ret; 2767 int i; 2768 2769 /* set default specifications as devices lower than version V3 do not 2770 * support querying specifications from firmware. 2771 */ 2772 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { 2773 hclgevf_set_default_dev_specs(hdev); 2774 return 0; 2775 } 2776 2777 for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) { 2778 hclgevf_cmd_setup_basic_desc(&desc[i], 2779 HCLGE_OPC_QUERY_DEV_SPECS, true); 2780 desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 2781 } 2782 hclgevf_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true); 2783 2784 ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM); 2785 if (ret) 2786 return ret; 2787 2788 hclgevf_parse_dev_specs(hdev, desc); 2789 hclgevf_check_dev_specs(hdev); 2790 2791 return 0; 2792 } 2793 2794 static int hclgevf_pci_reset(struct hclgevf_dev *hdev) 2795 { 2796 struct pci_dev *pdev = hdev->pdev; 2797 int ret = 0; 2798 2799 if ((hdev->reset_type == HNAE3_VF_FULL_RESET || 2800 hdev->reset_type == HNAE3_FLR_RESET) && 2801 test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2802 hclgevf_misc_irq_uninit(hdev); 2803 hclgevf_uninit_msi(hdev); 2804 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2805 } 2806 2807 if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 2808 pci_set_master(pdev); 2809 ret = hclgevf_init_msi(hdev); 2810 if (ret) { 2811 dev_err(&pdev->dev, 2812 "failed(%d) to init MSI/MSI-X\n", ret); 2813 return ret; 2814 } 2815 2816 ret = hclgevf_misc_irq_init(hdev); 2817 if (ret) { 2818 hclgevf_uninit_msi(hdev); 2819 dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", 2820 ret); 2821 return ret; 2822 } 2823 2824 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2825 } 2826 2827 return ret; 2828 } 2829 2830 static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev) 2831 { 2832 struct hclge_vf_to_pf_msg send_msg; 2833 2834 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL, 2835 HCLGE_MBX_VPORT_LIST_CLEAR); 2836 return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 2837 } 2838 2839 static void hclgevf_init_rxd_adv_layout(struct hclgevf_dev *hdev) 2840 { 2841 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 2842 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 1); 2843 } 2844 2845 static void hclgevf_uninit_rxd_adv_layout(struct hclgevf_dev *hdev) 2846 { 2847 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) 2848 hclgevf_write_dev(&hdev->hw, HCLGEVF_RXD_ADV_LAYOUT_EN_REG, 0); 2849 } 2850 2851 static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) 2852 { 2853 struct pci_dev *pdev = hdev->pdev; 2854 int ret; 2855 2856 ret = hclgevf_pci_reset(hdev); 2857 if (ret) { 2858 dev_err(&pdev->dev, "pci reset failed %d\n", ret); 2859 return ret; 2860 } 2861 2862 hclgevf_arq_init(hdev); 2863 2864 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2865 &hdev->fw_version, false, 2866 hdev->reset_pending); 2867 if (ret) { 2868 dev_err(&pdev->dev, "cmd failed %d\n", ret); 2869 return ret; 2870 } 2871 2872 ret = hclgevf_rss_init_hw(hdev); 2873 if (ret) { 2874 dev_err(&hdev->pdev->dev, 2875 "failed(%d) to initialize RSS\n", ret); 2876 return ret; 2877 } 2878 2879 ret = hclgevf_config_gro(hdev); 2880 if (ret) 2881 return ret; 2882 2883 ret = hclgevf_init_vlan_config(hdev); 2884 if (ret) { 2885 dev_err(&hdev->pdev->dev, 2886 "failed(%d) to initialize VLAN config\n", ret); 2887 return ret; 2888 } 2889 2890 /* get current port based vlan state from PF */ 2891 ret = hclgevf_get_port_base_vlan_filter_state(hdev); 2892 if (ret) 2893 return ret; 2894 2895 set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state); 2896 2897 hclgevf_init_rxd_adv_layout(hdev); 2898 2899 dev_info(&hdev->pdev->dev, "Reset done\n"); 2900 2901 return 0; 2902 } 2903 2904 static int hclgevf_init_hdev(struct hclgevf_dev *hdev) 2905 { 2906 struct pci_dev *pdev = hdev->pdev; 2907 int ret; 2908 2909 ret = hclgevf_pci_init(hdev); 2910 if (ret) 2911 return ret; 2912 2913 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); 2914 if (ret) 2915 goto err_cmd_queue_init; 2916 2917 hclgevf_arq_init(hdev); 2918 2919 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops); 2920 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, 2921 &hdev->fw_version, false, 2922 hdev->reset_pending); 2923 if (ret) 2924 goto err_cmd_init; 2925 2926 /* Get vf resource */ 2927 ret = hclgevf_query_vf_resource(hdev); 2928 if (ret) 2929 goto err_cmd_init; 2930 2931 ret = hclgevf_query_dev_specs(hdev); 2932 if (ret) { 2933 dev_err(&pdev->dev, 2934 "failed to query dev specifications, ret = %d\n", ret); 2935 goto err_cmd_init; 2936 } 2937 2938 ret = hclgevf_init_msi(hdev); 2939 if (ret) { 2940 dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); 2941 goto err_cmd_init; 2942 } 2943 2944 hclgevf_state_init(hdev); 2945 hdev->reset_level = HNAE3_VF_FUNC_RESET; 2946 hdev->reset_type = HNAE3_NONE_RESET; 2947 2948 ret = hclgevf_misc_irq_init(hdev); 2949 if (ret) 2950 goto err_misc_irq_init; 2951 2952 set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 2953 2954 ret = hclgevf_configure(hdev); 2955 if (ret) { 2956 dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); 2957 goto err_config; 2958 } 2959 2960 ret = hclgevf_alloc_tqps(hdev); 2961 if (ret) { 2962 dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret); 2963 goto err_config; 2964 } 2965 2966 ret = hclgevf_set_handle_info(hdev); 2967 if (ret) 2968 goto err_config; 2969 2970 ret = hclgevf_config_gro(hdev); 2971 if (ret) 2972 goto err_config; 2973 2974 /* Initialize RSS for this VF */ 2975 ret = hclge_comm_rss_init_cfg(&hdev->nic, hdev->ae_dev, 2976 &hdev->rss_cfg); 2977 if (ret) { 2978 dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret); 2979 goto err_config; 2980 } 2981 2982 ret = hclgevf_rss_init_hw(hdev); 2983 if (ret) { 2984 dev_err(&hdev->pdev->dev, 2985 "failed(%d) to initialize RSS\n", ret); 2986 goto err_config; 2987 } 2988 2989 /* ensure vf tbl list as empty before init */ 2990 ret = hclgevf_clear_vport_list(hdev); 2991 if (ret) { 2992 dev_err(&pdev->dev, 2993 "failed to clear tbl list configuration, ret = %d.\n", 2994 ret); 2995 goto err_config; 2996 } 2997 2998 ret = hclgevf_init_vlan_config(hdev); 2999 if (ret) { 3000 dev_err(&hdev->pdev->dev, 3001 "failed(%d) to initialize VLAN config\n", ret); 3002 goto err_config; 3003 } 3004 3005 hclgevf_init_rxd_adv_layout(hdev); 3006 3007 ret = hclgevf_devlink_init(hdev); 3008 if (ret) 3009 goto err_config; 3010 3011 set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state); 3012 3013 hdev->last_reset_time = jiffies; 3014 dev_info(&hdev->pdev->dev, "finished initializing %s driver\n", 3015 HCLGEVF_DRIVER_NAME); 3016 3017 hclgevf_task_schedule(hdev, round_jiffies_relative(HZ)); 3018 3019 return 0; 3020 3021 err_config: 3022 hclgevf_misc_irq_uninit(hdev); 3023 err_misc_irq_init: 3024 hclgevf_state_uninit(hdev); 3025 hclgevf_uninit_msi(hdev); 3026 err_cmd_init: 3027 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3028 err_cmd_queue_init: 3029 hclgevf_pci_uninit(hdev); 3030 clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); 3031 return ret; 3032 } 3033 3034 static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) 3035 { 3036 struct hclge_vf_to_pf_msg send_msg; 3037 3038 hclgevf_state_uninit(hdev); 3039 hclgevf_uninit_rxd_adv_layout(hdev); 3040 3041 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0); 3042 hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3043 3044 if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { 3045 hclgevf_misc_irq_uninit(hdev); 3046 hclgevf_uninit_msi(hdev); 3047 } 3048 3049 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); 3050 hclgevf_devlink_uninit(hdev); 3051 hclgevf_pci_uninit(hdev); 3052 hclgevf_uninit_mac_list(hdev); 3053 } 3054 3055 static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) 3056 { 3057 struct pci_dev *pdev = ae_dev->pdev; 3058 int ret; 3059 3060 ret = hclgevf_alloc_hdev(ae_dev); 3061 if (ret) { 3062 dev_err(&pdev->dev, "hclge device allocation failed\n"); 3063 return ret; 3064 } 3065 3066 ret = hclgevf_init_hdev(ae_dev->priv); 3067 if (ret) { 3068 dev_err(&pdev->dev, "hclge device initialization failed\n"); 3069 return ret; 3070 } 3071 3072 return 0; 3073 } 3074 3075 static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) 3076 { 3077 struct hclgevf_dev *hdev = ae_dev->priv; 3078 3079 hclgevf_uninit_hdev(hdev); 3080 ae_dev->priv = NULL; 3081 } 3082 3083 static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) 3084 { 3085 struct hnae3_handle *nic = &hdev->nic; 3086 struct hnae3_knic_private_info *kinfo = &nic->kinfo; 3087 3088 return min_t(u32, hdev->rss_size_max, 3089 hdev->num_tqps / kinfo->tc_info.num_tc); 3090 } 3091 3092 /** 3093 * hclgevf_get_channels - Get the current channels enabled and max supported. 3094 * @handle: hardware information for network interface 3095 * @ch: ethtool channels structure 3096 * 3097 * We don't support separate tx and rx queues as channels. The other count 3098 * represents how many queues are being used for control. max_combined counts 3099 * how many queue pairs we can support. They may not be mapped 1 to 1 with 3100 * q_vectors since we support a lot more queue pairs than q_vectors. 3101 **/ 3102 static void hclgevf_get_channels(struct hnae3_handle *handle, 3103 struct ethtool_channels *ch) 3104 { 3105 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3106 3107 ch->max_combined = hclgevf_get_max_channels(hdev); 3108 ch->other_count = 0; 3109 ch->max_other = 0; 3110 ch->combined_count = handle->kinfo.rss_size; 3111 } 3112 3113 static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle, 3114 u16 *alloc_tqps, u16 *max_rss_size) 3115 { 3116 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3117 3118 *alloc_tqps = hdev->num_tqps; 3119 *max_rss_size = hdev->rss_size_max; 3120 } 3121 3122 static void hclgevf_update_rss_size(struct hnae3_handle *handle, 3123 u32 new_tqps_num) 3124 { 3125 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3126 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3127 u16 max_rss_size; 3128 3129 kinfo->req_rss_size = new_tqps_num; 3130 3131 max_rss_size = min_t(u16, hdev->rss_size_max, 3132 hdev->num_tqps / kinfo->tc_info.num_tc); 3133 3134 /* Use the user's configuration when it is not larger than 3135 * max_rss_size, otherwise, use the maximum specification value. 3136 */ 3137 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 3138 kinfo->req_rss_size <= max_rss_size) 3139 kinfo->rss_size = kinfo->req_rss_size; 3140 else if (kinfo->rss_size > max_rss_size || 3141 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) 3142 kinfo->rss_size = max_rss_size; 3143 3144 kinfo->num_tqps = kinfo->tc_info.num_tc * kinfo->rss_size; 3145 } 3146 3147 static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, 3148 bool rxfh_configured) 3149 { 3150 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3151 struct hnae3_knic_private_info *kinfo = &handle->kinfo; 3152 u16 tc_offset[HCLGE_COMM_MAX_TC_NUM]; 3153 u16 tc_valid[HCLGE_COMM_MAX_TC_NUM]; 3154 u16 tc_size[HCLGE_COMM_MAX_TC_NUM]; 3155 u16 cur_rss_size = kinfo->rss_size; 3156 u16 cur_tqps = kinfo->num_tqps; 3157 u32 *rss_indir; 3158 unsigned int i; 3159 int ret; 3160 3161 hclgevf_update_rss_size(handle, new_tqps_num); 3162 3163 hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map, 3164 tc_offset, tc_valid, tc_size); 3165 ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 3166 tc_valid, tc_size); 3167 if (ret) 3168 return ret; 3169 3170 /* RSS indirection table has been configured by user */ 3171 if (rxfh_configured) 3172 goto out; 3173 3174 /* Reinitializes the rss indirect table according to the new RSS size */ 3175 rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size, 3176 sizeof(u32), GFP_KERNEL); 3177 if (!rss_indir) 3178 return -ENOMEM; 3179 3180 for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++) 3181 rss_indir[i] = i % kinfo->rss_size; 3182 3183 hdev->rss_cfg.rss_size = kinfo->rss_size; 3184 3185 ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); 3186 if (ret) 3187 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", 3188 ret); 3189 3190 kfree(rss_indir); 3191 3192 out: 3193 if (!ret) 3194 dev_info(&hdev->pdev->dev, 3195 "Channels changed, rss_size from %u to %u, tqps from %u to %u", 3196 cur_rss_size, kinfo->rss_size, 3197 cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc); 3198 3199 return ret; 3200 } 3201 3202 static int hclgevf_get_status(struct hnae3_handle *handle) 3203 { 3204 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3205 3206 return hdev->hw.mac.link; 3207 } 3208 3209 static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle, 3210 u8 *auto_neg, u32 *speed, 3211 u8 *duplex, u32 *lane_num) 3212 { 3213 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3214 3215 if (speed) 3216 *speed = hdev->hw.mac.speed; 3217 if (duplex) 3218 *duplex = hdev->hw.mac.duplex; 3219 if (auto_neg) 3220 *auto_neg = AUTONEG_DISABLE; 3221 } 3222 3223 void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, 3224 u8 duplex) 3225 { 3226 hdev->hw.mac.speed = speed; 3227 hdev->hw.mac.duplex = duplex; 3228 } 3229 3230 static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) 3231 { 3232 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3233 bool gro_en_old = hdev->gro_en; 3234 int ret; 3235 3236 hdev->gro_en = enable; 3237 ret = hclgevf_config_gro(hdev); 3238 if (ret) 3239 hdev->gro_en = gro_en_old; 3240 3241 return ret; 3242 } 3243 3244 static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, 3245 u8 *module_type) 3246 { 3247 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3248 3249 if (media_type) 3250 *media_type = hdev->hw.mac.media_type; 3251 3252 if (module_type) 3253 *module_type = hdev->hw.mac.module_type; 3254 } 3255 3256 static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) 3257 { 3258 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3259 3260 return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); 3261 } 3262 3263 static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle) 3264 { 3265 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3266 3267 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); 3268 } 3269 3270 static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) 3271 { 3272 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3273 3274 return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); 3275 } 3276 3277 static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) 3278 { 3279 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3280 3281 return hdev->rst_stats.hw_rst_done_cnt; 3282 } 3283 3284 static void hclgevf_get_link_mode(struct hnae3_handle *handle, 3285 unsigned long *supported, 3286 unsigned long *advertising) 3287 { 3288 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); 3289 3290 *supported = hdev->hw.mac.supported; 3291 *advertising = hdev->hw.mac.advertising; 3292 } 3293 3294 void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, 3295 struct hclge_mbx_port_base_vlan *port_base_vlan) 3296 { 3297 struct hnae3_handle *nic = &hdev->nic; 3298 struct hclge_vf_to_pf_msg send_msg; 3299 int ret; 3300 3301 rtnl_lock(); 3302 3303 if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) || 3304 test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) { 3305 dev_warn(&hdev->pdev->dev, 3306 "is resetting when updating port based vlan info\n"); 3307 rtnl_unlock(); 3308 return; 3309 } 3310 3311 ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); 3312 if (ret) { 3313 rtnl_unlock(); 3314 return; 3315 } 3316 3317 /* send msg to PF and wait update port based vlan info */ 3318 hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, 3319 HCLGE_MBX_PORT_BASE_VLAN_CFG); 3320 memcpy(send_msg.data, port_base_vlan, sizeof(*port_base_vlan)); 3321 ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); 3322 if (!ret) { 3323 if (state == HNAE3_PORT_BASE_VLAN_DISABLE) 3324 nic->port_base_vlan_state = state; 3325 else 3326 nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; 3327 } 3328 3329 hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); 3330 rtnl_unlock(); 3331 } 3332 3333 static const struct hnae3_ae_ops hclgevf_ops = { 3334 .init_ae_dev = hclgevf_init_ae_dev, 3335 .uninit_ae_dev = hclgevf_uninit_ae_dev, 3336 .reset_prepare = hclgevf_reset_prepare_general, 3337 .reset_done = hclgevf_reset_done, 3338 .init_client_instance = hclgevf_init_client_instance, 3339 .uninit_client_instance = hclgevf_uninit_client_instance, 3340 .start = hclgevf_ae_start, 3341 .stop = hclgevf_ae_stop, 3342 .client_start = hclgevf_client_start, 3343 .client_stop = hclgevf_client_stop, 3344 .map_ring_to_vector = hclgevf_map_ring_to_vector, 3345 .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, 3346 .get_vector = hclgevf_get_vector, 3347 .put_vector = hclgevf_put_vector, 3348 .reset_queue = hclgevf_reset_tqp, 3349 .get_mac_addr = hclgevf_get_mac_addr, 3350 .set_mac_addr = hclgevf_set_mac_addr, 3351 .add_uc_addr = hclgevf_add_uc_addr, 3352 .rm_uc_addr = hclgevf_rm_uc_addr, 3353 .add_mc_addr = hclgevf_add_mc_addr, 3354 .rm_mc_addr = hclgevf_rm_mc_addr, 3355 .get_stats = hclgevf_get_stats, 3356 .update_stats = hclgevf_update_stats, 3357 .get_strings = hclgevf_get_strings, 3358 .get_sset_count = hclgevf_get_sset_count, 3359 .get_rss_key_size = hclge_comm_get_rss_key_size, 3360 .get_rss = hclgevf_get_rss, 3361 .set_rss = hclgevf_set_rss, 3362 .get_rss_tuple = hclgevf_get_rss_tuple, 3363 .set_rss_tuple = hclgevf_set_rss_tuple, 3364 .get_tc_size = hclgevf_get_tc_size, 3365 .get_fw_version = hclgevf_get_fw_version, 3366 .set_vlan_filter = hclgevf_set_vlan_filter, 3367 .enable_vlan_filter = hclgevf_enable_vlan_filter, 3368 .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, 3369 .reset_event = hclgevf_reset_event, 3370 .set_default_reset_request = hclgevf_set_def_reset_request, 3371 .set_channels = hclgevf_set_channels, 3372 .get_channels = hclgevf_get_channels, 3373 .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, 3374 .get_regs_len = hclgevf_get_regs_len, 3375 .get_regs = hclgevf_get_regs, 3376 .get_status = hclgevf_get_status, 3377 .get_ksettings_an_result = hclgevf_get_ksettings_an_result, 3378 .get_media_type = hclgevf_get_media_type, 3379 .get_hw_reset_stat = hclgevf_get_hw_reset_stat, 3380 .ae_dev_resetting = hclgevf_ae_dev_resetting, 3381 .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, 3382 .set_gro_en = hclgevf_gro_en, 3383 .set_mtu = hclgevf_set_mtu, 3384 .get_global_queue_id = hclgevf_get_qid_global, 3385 .set_timer_task = hclgevf_set_timer_task, 3386 .get_link_mode = hclgevf_get_link_mode, 3387 .set_promisc_mode = hclgevf_set_promisc_mode, 3388 .request_update_promisc_mode = hclgevf_request_update_promisc_mode, 3389 .get_cmdq_stat = hclgevf_get_cmdq_stat, 3390 }; 3391 3392 static struct hnae3_ae_algo ae_algovf = { 3393 .ops = &hclgevf_ops, 3394 .pdev_id_table = ae_algovf_pci_tbl, 3395 }; 3396 3397 static int __init hclgevf_init(void) 3398 { 3399 pr_info("%s is initializing\n", HCLGEVF_NAME); 3400 3401 hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME); 3402 if (!hclgevf_wq) { 3403 pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); 3404 return -ENOMEM; 3405 } 3406 3407 hnae3_register_ae_algo(&ae_algovf); 3408 3409 return 0; 3410 } 3411 3412 static void __exit hclgevf_exit(void) 3413 { 3414 hnae3_unregister_ae_algo(&ae_algovf); 3415 destroy_workqueue(hclgevf_wq); 3416 } 3417 module_init(hclgevf_init); 3418 module_exit(hclgevf_exit); 3419 3420 MODULE_LICENSE("GPL"); 3421 MODULE_AUTHOR("Huawei Tech. Co., Ltd."); 3422 MODULE_DESCRIPTION("HCLGEVF Driver"); 3423 MODULE_VERSION(HCLGEVF_MOD_VERSION); 3424