Lines Matching refs:hdev
50 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
51 static int hclge_init_vlan_config(struct hclge_dev *hdev);
52 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
55 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
56 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
59 static int hclge_set_default_loopback(struct hclge_dev *hdev);
61 static void hclge_sync_mac_table(struct hclge_dev *hdev);
62 static void hclge_restore_hw_table(struct hclge_dev *hdev);
63 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
64 static void hclge_sync_fd_table(struct hclge_dev *hdev);
65 static void hclge_update_fec_stats(struct hclge_dev *hdev);
66 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
68 static int hclge_update_port_info(struct hclge_dev *hdev);
440 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev) in hclge_mac_update_stats_defective() argument
444 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_defective()
452 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); in hclge_mac_update_stats_defective()
454 dev_err(&hdev->pdev->dev, in hclge_mac_update_stats_defective()
476 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev) in hclge_mac_update_stats_complete() argument
480 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num; in hclge_mac_update_stats_complete()
481 u64 *data = (u64 *)(&hdev->mac_stats); in hclge_mac_update_stats_complete()
500 ret = hclge_cmd_send(&hdev->hw, desc, desc_num); in hclge_mac_update_stats_complete()
506 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num); in hclge_mac_update_stats_complete()
523 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num) in hclge_mac_query_reg_num() argument
533 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) { in hclge_mac_query_reg_num()
539 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_mac_query_reg_num()
541 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
549 dev_err(&hdev->pdev->dev, in hclge_mac_query_reg_num()
557 int hclge_mac_update_stats(struct hclge_dev *hdev) in hclge_mac_update_stats() argument
560 if (hdev->ae_dev->dev_specs.mac_stats_num) in hclge_mac_update_stats()
561 return hclge_mac_update_stats_complete(hdev); in hclge_mac_update_stats()
563 return hclge_mac_update_stats_defective(hdev); in hclge_mac_update_stats()
566 static int hclge_comm_get_count(struct hclge_dev *hdev, in hclge_comm_get_count() argument
574 if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_count()
580 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev, in hclge_comm_get_stats() argument
588 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_stats()
591 *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset); in hclge_comm_get_stats()
598 static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset, in hclge_comm_get_strings() argument
608 if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num) in hclge_comm_get_strings()
615 static void hclge_update_stats_for_all(struct hclge_dev *hdev) in hclge_update_stats_for_all() argument
620 handle = &hdev->vport[0].nic; in hclge_update_stats_for_all()
622 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats_for_all()
624 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
630 hclge_update_fec_stats(hdev); in hclge_update_stats_for_all()
632 status = hclge_mac_update_stats(hdev); in hclge_update_stats_for_all()
634 dev_err(&hdev->pdev->dev, in hclge_update_stats_for_all()
641 struct hclge_dev *hdev = vport->back; in hclge_update_stats() local
644 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state)) in hclge_update_stats()
647 status = hclge_mac_update_stats(hdev); in hclge_update_stats()
649 dev_err(&hdev->pdev->dev, in hclge_update_stats()
653 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw); in hclge_update_stats()
655 dev_err(&hdev->pdev->dev, in hclge_update_stats()
659 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state); in hclge_update_stats()
671 struct hclge_dev *hdev = vport->back; in hclge_get_sset_count() local
682 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 || in hclge_get_sset_count()
683 hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || in hclge_get_sset_count()
684 hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || in hclge_get_sset_count()
685 hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { in hclge_get_sset_count()
690 if (hdev->ae_dev->dev_specs.hilink_version != in hclge_get_sset_count()
701 if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && in hclge_get_sset_count()
702 hdev->hw.mac.phydev->drv->set_loopback) || in hclge_get_sset_count()
703 hnae3_dev_phy_imp_supported(hdev)) { in hclge_get_sset_count()
708 count = hclge_comm_get_count(hdev, g_mac_stats_string, in hclge_get_sset_count()
720 struct hclge_dev *hdev = vport->back; in hclge_get_strings() local
726 hclge_comm_get_strings(hdev, stringset, g_mac_stats_string, in hclge_get_strings()
756 struct hclge_dev *hdev = vport->back; in hclge_get_stats() local
759 p = hclge_comm_get_stats(hdev, g_mac_stats_string, in hclge_get_stats()
768 struct hclge_dev *hdev = vport->back; in hclge_get_mac_stat() local
772 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num; in hclge_get_mac_stat()
773 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num; in hclge_get_mac_stat()
776 static int hclge_parse_func_status(struct hclge_dev *hdev, in hclge_parse_func_status() argument
786 hdev->flag |= HCLGE_FLAG_MAIN; in hclge_parse_func_status()
788 hdev->flag &= ~HCLGE_FLAG_MAIN; in hclge_parse_func_status()
790 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK; in hclge_parse_func_status()
794 static int hclge_query_function_status(struct hclge_dev *hdev) in hclge_query_function_status() argument
807 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_function_status()
809 dev_err(&hdev->pdev->dev, in hclge_query_function_status()
820 return hclge_parse_func_status(hdev, req); in hclge_query_function_status()
823 static int hclge_query_pf_resource(struct hclge_dev *hdev) in hclge_query_pf_resource() argument
830 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_pf_resource()
832 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
838 hdev->num_tqps = le16_to_cpu(req->tqp_num) + in hclge_query_pf_resource()
840 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; in hclge_query_pf_resource()
843 hdev->tx_buf_size = in hclge_query_pf_resource()
846 hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; in hclge_query_pf_resource()
848 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
851 hdev->dv_buf_size = in hclge_query_pf_resource()
854 hdev->dv_buf_size = HCLGE_DEFAULT_DV; in hclge_query_pf_resource()
856 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); in hclge_query_pf_resource()
858 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic); in hclge_query_pf_resource()
859 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) { in hclge_query_pf_resource()
860 dev_err(&hdev->pdev->dev, in hclge_query_pf_resource()
862 hdev->num_nic_msi); in hclge_query_pf_resource()
866 if (hnae3_dev_roce_supported(hdev)) { in hclge_query_pf_resource()
867 hdev->num_roce_msi = in hclge_query_pf_resource()
873 hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi; in hclge_query_pf_resource()
875 hdev->num_msi = hdev->num_nic_msi; in hclge_query_pf_resource()
947 struct hclge_dev *hdev = vport->back; in hclge_check_port_speed() local
948 u32 speed_ability = hdev->hw.mac.speed_ability; in hclge_check_port_speed()
1119 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev, in hclge_parse_fiber_link_mode() argument
1122 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_fiber_link_mode()
1131 if (hnae3_dev_fec_supported(hdev)) in hclge_parse_fiber_link_mode()
1134 if (hnae3_dev_pause_supported(hdev)) in hclge_parse_fiber_link_mode()
1141 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev, in hclge_parse_backplane_link_mode() argument
1144 struct hclge_mac *mac = &hdev->hw.mac; in hclge_parse_backplane_link_mode()
1147 if (hnae3_dev_fec_supported(hdev)) in hclge_parse_backplane_link_mode()
1150 if (hnae3_dev_pause_supported(hdev)) in hclge_parse_backplane_link_mode()
1157 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev, in hclge_parse_copper_link_mode() argument
1160 unsigned long *supported = hdev->hw.mac.supported; in hclge_parse_copper_link_mode()
1182 if (hnae3_dev_pause_supported(hdev)) { in hclge_parse_copper_link_mode()
1191 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability) in hclge_parse_link_mode() argument
1193 u8 media_type = hdev->hw.mac.media_type; in hclge_parse_link_mode()
1196 hclge_parse_fiber_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1198 hclge_parse_copper_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1200 hclge_parse_backplane_link_mode(hdev, speed_ability); in hclge_parse_link_mode()
1330 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) in hclge_get_cfg() argument
1351 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); in hclge_get_cfg()
1353 dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret); in hclge_get_cfg()
1362 static void hclge_set_default_dev_specs(struct hclge_dev *hdev) in hclge_set_default_dev_specs() argument
1366 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_set_default_dev_specs()
1379 static void hclge_parse_dev_specs(struct hclge_dev *hdev, in hclge_parse_dev_specs() argument
1382 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_parse_dev_specs()
1404 static void hclge_check_dev_specs(struct hclge_dev *hdev) in hclge_check_dev_specs() argument
1406 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs; in hclge_check_dev_specs()
1426 static int hclge_query_mac_stats_num(struct hclge_dev *hdev) in hclge_query_mac_stats_num() argument
1431 ret = hclge_mac_query_reg_num(hdev, ®_num); in hclge_query_mac_stats_num()
1435 hdev->ae_dev->dev_specs.mac_stats_num = reg_num; in hclge_query_mac_stats_num()
1439 static int hclge_query_dev_specs(struct hclge_dev *hdev) in hclge_query_dev_specs() argument
1445 ret = hclge_query_mac_stats_num(hdev); in hclge_query_dev_specs()
1452 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) { in hclge_query_dev_specs()
1453 hclge_set_default_dev_specs(hdev); in hclge_query_dev_specs()
1464 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM); in hclge_query_dev_specs()
1468 hclge_parse_dev_specs(hdev, desc); in hclge_query_dev_specs()
1469 hclge_check_dev_specs(hdev); in hclge_query_dev_specs()
1474 static int hclge_get_cap(struct hclge_dev *hdev) in hclge_get_cap() argument
1478 ret = hclge_query_function_status(hdev); in hclge_get_cap()
1480 dev_err(&hdev->pdev->dev, in hclge_get_cap()
1486 return hclge_query_pf_resource(hdev); in hclge_get_cap()
1489 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) in hclge_init_kdump_kernel_config() argument
1497 dev_info(&hdev->pdev->dev, in hclge_init_kdump_kernel_config()
1501 hdev->num_tqps = hdev->num_req_vfs + 1; in hclge_init_kdump_kernel_config()
1502 hdev->num_tx_desc = HCLGE_MIN_TX_DESC; in hclge_init_kdump_kernel_config()
1503 hdev->num_rx_desc = HCLGE_MIN_RX_DESC; in hclge_init_kdump_kernel_config()
1506 static void hclge_init_tc_config(struct hclge_dev *hdev) in hclge_init_tc_config() argument
1510 if (hdev->tc_max > HNAE3_MAX_TC || in hclge_init_tc_config()
1511 hdev->tc_max < 1) { in hclge_init_tc_config()
1512 dev_warn(&hdev->pdev->dev, "TC num = %u.\n", in hclge_init_tc_config()
1513 hdev->tc_max); in hclge_init_tc_config()
1514 hdev->tc_max = 1; in hclge_init_tc_config()
1518 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_init_tc_config()
1519 hdev->tc_max = 1; in hclge_init_tc_config()
1520 hdev->pfc_max = 0; in hclge_init_tc_config()
1522 hdev->pfc_max = hdev->tc_max; in hclge_init_tc_config()
1525 hdev->tm_info.num_tc = 1; in hclge_init_tc_config()
1528 for (i = 0; i < hdev->tm_info.num_tc; i++) in hclge_init_tc_config()
1529 hnae3_set_bit(hdev->hw_tc_map, i, 1); in hclge_init_tc_config()
1531 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; in hclge_init_tc_config()
1534 static int hclge_configure(struct hclge_dev *hdev) in hclge_configure() argument
1536 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_configure()
1540 ret = hclge_get_cfg(hdev, &cfg); in hclge_configure()
1544 hdev->base_tqp_pid = 0; in hclge_configure()
1545 hdev->vf_rss_size_max = cfg.vf_rss_size_max; in hclge_configure()
1546 hdev->pf_rss_size_max = cfg.pf_rss_size_max; in hclge_configure()
1547 hdev->rx_buf_len = cfg.rx_buf_len; in hclge_configure()
1548 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr); in hclge_configure()
1549 hdev->hw.mac.media_type = cfg.media_type; in hclge_configure()
1550 hdev->hw.mac.phy_addr = cfg.phy_addr; in hclge_configure()
1551 hdev->num_tx_desc = cfg.tqp_desc_num; in hclge_configure()
1552 hdev->num_rx_desc = cfg.tqp_desc_num; in hclge_configure()
1553 hdev->tm_info.num_pg = 1; in hclge_configure()
1554 hdev->tc_max = cfg.tc_num; in hclge_configure()
1555 hdev->tm_info.hw_pfc_map = 0; in hclge_configure()
1557 hdev->wanted_umv_size = cfg.umv_space; in hclge_configure()
1559 hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size; in hclge_configure()
1560 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; in hclge_configure()
1561 hdev->gro_en = true; in hclge_configure()
1565 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_configure()
1566 hdev->fd_en = true; in hclge_configure()
1567 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_configure()
1570 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); in hclge_configure()
1572 dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n", in hclge_configure()
1576 hdev->hw.mac.req_speed = hdev->hw.mac.speed; in hclge_configure()
1577 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE; in hclge_configure()
1578 hdev->hw.mac.req_duplex = DUPLEX_FULL; in hclge_configure()
1580 hclge_parse_link_mode(hdev, cfg.speed_ability); in hclge_configure()
1582 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); in hclge_configure()
1584 hclge_init_tc_config(hdev); in hclge_configure()
1585 hclge_init_kdump_kernel_config(hdev); in hclge_configure()
1590 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, in hclge_config_tso() argument
1602 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_tso()
1605 static int hclge_config_gro(struct hclge_dev *hdev) in hclge_config_gro() argument
1611 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev)) in hclge_config_gro()
1617 req->gro_en = hdev->gro_en ? 1 : 0; in hclge_config_gro()
1619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_gro()
1621 dev_err(&hdev->pdev->dev, in hclge_config_gro()
1627 static int hclge_alloc_tqps(struct hclge_dev *hdev) in hclge_alloc_tqps() argument
1629 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_alloc_tqps()
1633 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, in hclge_alloc_tqps()
1635 if (!hdev->htqp) in hclge_alloc_tqps()
1638 tqp = hdev->htqp; in hclge_alloc_tqps()
1640 for (i = 0; i < hdev->num_tqps; i++) { in hclge_alloc_tqps()
1641 tqp->dev = &hdev->pdev->dev; in hclge_alloc_tqps()
1645 tqp->q.buf_size = hdev->rx_buf_len; in hclge_alloc_tqps()
1646 tqp->q.tx_desc_num = hdev->num_tx_desc; in hclge_alloc_tqps()
1647 tqp->q.rx_desc_num = hdev->num_rx_desc; in hclge_alloc_tqps()
1653 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1657 tqp->q.io_base = hdev->hw.hw.io_base + in hclge_alloc_tqps()
1668 tqp->q.mem_base = hdev->hw.hw.mem_base + in hclge_alloc_tqps()
1669 HCLGE_TQP_MEM_OFFSET(hdev, i); in hclge_alloc_tqps()
1677 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, in hclge_map_tqps_to_func() argument
1694 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_map_tqps_to_func()
1696 dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret); in hclge_map_tqps_to_func()
1704 struct hclge_dev *hdev = vport->back; in hclge_assign_tqp() local
1707 for (i = 0, alloced = 0; i < hdev->num_tqps && in hclge_assign_tqp()
1709 if (!hdev->htqp[i].alloced) { in hclge_assign_tqp()
1710 hdev->htqp[i].q.handle = &vport->nic; in hclge_assign_tqp()
1711 hdev->htqp[i].q.tqp_index = alloced; in hclge_assign_tqp()
1712 hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; in hclge_assign_tqp()
1713 hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; in hclge_assign_tqp()
1714 kinfo->tqp[alloced] = &hdev->htqp[i].q; in hclge_assign_tqp()
1715 hdev->htqp[i].alloced = true; in hclge_assign_tqp()
1720 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max, in hclge_assign_tqp()
1721 vport->alloc_tqps / hdev->tm_info.num_tc); in hclge_assign_tqp()
1725 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc); in hclge_assign_tqp()
1736 struct hclge_dev *hdev = vport->back; in hclge_knic_setup() local
1742 kinfo->rx_buf_len = hdev->rx_buf_len; in hclge_knic_setup()
1743 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size; in hclge_knic_setup()
1745 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps, in hclge_knic_setup()
1752 dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); in hclge_knic_setup()
1757 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, in hclge_map_tqp_to_vport() argument
1772 ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, in hclge_map_tqp_to_vport()
1781 static int hclge_map_tqp(struct hclge_dev *hdev) in hclge_map_tqp() argument
1783 struct hclge_vport *vport = hdev->vport; in hclge_map_tqp()
1786 num_vport = hdev->num_req_vfs + 1; in hclge_map_tqp()
1790 ret = hclge_map_tqp_to_vport(hdev, vport); in hclge_map_tqp()
1803 struct hclge_dev *hdev = vport->back; in hclge_vport_setup() local
1806 nic->pdev = hdev->pdev; in hclge_vport_setup()
1808 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits, in hclge_vport_setup()
1810 nic->kinfo.io_base = hdev->hw.hw.io_base; in hclge_vport_setup()
1813 hdev->num_tx_desc, hdev->num_rx_desc); in hclge_vport_setup()
1815 dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret); in hclge_vport_setup()
1820 static int hclge_alloc_vport(struct hclge_dev *hdev) in hclge_alloc_vport() argument
1822 struct pci_dev *pdev = hdev->pdev; in hclge_alloc_vport()
1830 num_vport = hdev->num_req_vfs + 1; in hclge_alloc_vport()
1832 if (hdev->num_tqps < num_vport) { in hclge_alloc_vport()
1833 dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)", in hclge_alloc_vport()
1834 hdev->num_tqps, num_vport); in hclge_alloc_vport()
1839 tqp_per_vport = hdev->num_tqps / num_vport; in hclge_alloc_vport()
1840 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; in hclge_alloc_vport()
1847 hdev->vport = vport; in hclge_alloc_vport()
1848 hdev->num_alloc_vport = num_vport; in hclge_alloc_vport()
1851 hdev->num_alloc_vfs = hdev->num_req_vfs; in hclge_alloc_vport()
1854 vport->back = hdev; in hclge_alloc_vport()
1884 static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, in hclge_cmd_alloc_tx_buff() argument
1906 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_alloc_tx_buff()
1908 dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", in hclge_cmd_alloc_tx_buff()
1914 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, in hclge_tx_buffer_alloc() argument
1917 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); in hclge_tx_buffer_alloc()
1920 dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret); in hclge_tx_buffer_alloc()
1925 static u32 hclge_get_tc_num(struct hclge_dev *hdev) in hclge_get_tc_num() argument
1931 if (hdev->hw_tc_map & BIT(i)) in hclge_get_tc_num()
1937 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_pfc_priv_num() argument
1946 if ((hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_pfc_priv_num()
1955 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, in hclge_get_no_pfc_priv_num() argument
1964 if (hdev->hw_tc_map & BIT(i) && in hclge_get_no_pfc_priv_num()
1965 !(hdev->tm_info.hw_pfc_map & BIT(i)) && in hclge_get_no_pfc_priv_num()
1997 static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, in hclge_is_rx_buf_ok() argument
2002 u32 tc_num = hclge_get_tc_num(hdev); in hclge_is_rx_buf_ok()
2007 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_is_rx_buf_ok()
2009 if (hnae3_dev_dcb_supported(hdev)) in hclge_is_rx_buf_ok()
2011 hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2014 + hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2026 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
2027 buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2037 if (hnae3_dev_dcb_supported(hdev)) { in hclge_is_rx_buf_ok()
2038 hi_thrd = shared_buf - hdev->dv_buf_size; in hclge_is_rx_buf_ok()
2063 static int hclge_tx_buffer_calc(struct hclge_dev *hdev, in hclge_tx_buffer_calc() argument
2068 total_size = hdev->pkt_buf_size; in hclge_tx_buffer_calc()
2074 if (hdev->hw_tc_map & BIT(i)) { in hclge_tx_buffer_calc()
2075 if (total_size < hdev->tx_buf_size) in hclge_tx_buffer_calc()
2078 priv->tx_buf_size = hdev->tx_buf_size; in hclge_tx_buffer_calc()
2089 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max, in hclge_rx_buf_calc_all() argument
2092 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_rx_buf_calc_all()
2093 u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT); in hclge_rx_buf_calc_all()
2104 if (!(hdev->hw_tc_map & BIT(i))) in hclge_rx_buf_calc_all()
2109 if (hdev->tm_info.hw_pfc_map & BIT(i)) { in hclge_rx_buf_calc_all()
2119 priv->buf_size = priv->wl.high + hdev->dv_buf_size; in hclge_rx_buf_calc_all()
2122 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_rx_buf_calc_all()
2125 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_nopfc_buf_till_fit() argument
2128 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2129 int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); in hclge_drop_nopfc_buf_till_fit()
2137 if (hdev->hw_tc_map & mask && in hclge_drop_nopfc_buf_till_fit()
2138 !(hdev->tm_info.hw_pfc_map & mask)) { in hclge_drop_nopfc_buf_till_fit()
2147 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_nopfc_buf_till_fit()
2152 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_nopfc_buf_till_fit()
2155 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev, in hclge_drop_pfc_buf_till_fit() argument
2158 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_drop_pfc_buf_till_fit()
2159 int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); in hclge_drop_pfc_buf_till_fit()
2167 if (hdev->hw_tc_map & mask && in hclge_drop_pfc_buf_till_fit()
2168 hdev->tm_info.hw_pfc_map & mask) { in hclge_drop_pfc_buf_till_fit()
2177 if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || in hclge_drop_pfc_buf_till_fit()
2182 return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all); in hclge_drop_pfc_buf_till_fit()
2185 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev, in hclge_only_alloc_priv_buff() argument
2192 u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc); in hclge_only_alloc_priv_buff()
2193 u32 tc_num = hclge_get_tc_num(hdev); in hclge_only_alloc_priv_buff()
2194 u32 half_mps = hdev->mps >> 1; in hclge_only_alloc_priv_buff()
2204 min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER + in hclge_only_alloc_priv_buff()
2219 if (!(hdev->hw_tc_map & BIT(i))) in hclge_only_alloc_priv_buff()
2224 priv->wl.high = rx_priv - hdev->dv_buf_size; in hclge_only_alloc_priv_buff()
2238 static int hclge_rx_buffer_calc(struct hclge_dev *hdev, in hclge_rx_buffer_calc() argument
2242 if (!hnae3_dev_dcb_supported(hdev)) { in hclge_rx_buffer_calc()
2243 u32 rx_all = hdev->pkt_buf_size; in hclge_rx_buffer_calc()
2246 if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) in hclge_rx_buffer_calc()
2252 if (hclge_only_alloc_priv_buff(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2255 if (hclge_rx_buf_calc_all(hdev, true, buf_alloc)) in hclge_rx_buffer_calc()
2259 if (hclge_rx_buf_calc_all(hdev, false, buf_alloc)) in hclge_rx_buffer_calc()
2262 if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2265 if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc)) in hclge_rx_buffer_calc()
2271 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, in hclge_rx_priv_buf_alloc() argument
2296 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_rx_priv_buf_alloc()
2298 dev_err(&hdev->pdev->dev, in hclge_rx_priv_buf_alloc()
2304 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, in hclge_rx_priv_wl_config() argument
2340 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_rx_priv_wl_config()
2342 dev_err(&hdev->pdev->dev, in hclge_rx_priv_wl_config()
2348 static int hclge_common_thrd_config(struct hclge_dev *hdev, in hclge_common_thrd_config() argument
2384 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_common_thrd_config()
2386 dev_err(&hdev->pdev->dev, in hclge_common_thrd_config()
2391 static int hclge_common_wl_config(struct hclge_dev *hdev, in hclge_common_wl_config() argument
2408 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_common_wl_config()
2410 dev_err(&hdev->pdev->dev, in hclge_common_wl_config()
2416 int hclge_buffer_alloc(struct hclge_dev *hdev) in hclge_buffer_alloc() argument
2425 ret = hclge_tx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2427 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2432 ret = hclge_tx_buffer_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2434 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2439 ret = hclge_rx_buffer_calc(hdev, pkt_buf); in hclge_buffer_alloc()
2441 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2447 ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); in hclge_buffer_alloc()
2449 dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", in hclge_buffer_alloc()
2454 if (hnae3_dev_dcb_supported(hdev)) { in hclge_buffer_alloc()
2455 ret = hclge_rx_priv_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2457 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2463 ret = hclge_common_thrd_config(hdev, pkt_buf); in hclge_buffer_alloc()
2465 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2472 ret = hclge_common_wl_config(hdev, pkt_buf); in hclge_buffer_alloc()
2474 dev_err(&hdev->pdev->dev, in hclge_buffer_alloc()
2486 struct hclge_dev *hdev = vport->back; in hclge_init_roce_base_info() local
2490 if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi) in hclge_init_roce_base_info()
2493 roce->rinfo.base_vector = hdev->num_nic_msi; in hclge_init_roce_base_info()
2496 roce->rinfo.roce_io_base = hdev->hw.hw.io_base; in hclge_init_roce_base_info()
2497 roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base; in hclge_init_roce_base_info()
2507 static int hclge_init_msi(struct hclge_dev *hdev) in hclge_init_msi() argument
2509 struct pci_dev *pdev = hdev->pdev; in hclge_init_msi()
2514 hdev->num_msi, in hclge_init_msi()
2522 if (vectors < hdev->num_msi) in hclge_init_msi()
2523 dev_warn(&hdev->pdev->dev, in hclge_init_msi()
2525 hdev->num_msi, vectors); in hclge_init_msi()
2527 hdev->num_msi = vectors; in hclge_init_msi()
2528 hdev->num_msi_left = vectors; in hclge_init_msi()
2530 hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2532 if (!hdev->vector_status) { in hclge_init_msi()
2537 for (i = 0; i < hdev->num_msi; i++) in hclge_init_msi()
2538 hdev->vector_status[i] = HCLGE_INVALID_VPORT; in hclge_init_msi()
2540 hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, in hclge_init_msi()
2542 if (!hdev->vector_irq) { in hclge_init_msi()
2584 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, in hclge_cfg_mac_speed_dup_hw() argument
2601 dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); in hclge_cfg_mac_speed_dup_hw()
2611 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_speed_dup_hw()
2613 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_speed_dup_hw()
2621 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num) in hclge_cfg_mac_speed_dup() argument
2623 struct hclge_mac *mac = &hdev->hw.mac; in hclge_cfg_mac_speed_dup()
2631 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num); in hclge_cfg_mac_speed_dup()
2635 hdev->hw.mac.speed = speed; in hclge_cfg_mac_speed_dup()
2636 hdev->hw.mac.duplex = duplex; in hclge_cfg_mac_speed_dup()
2638 hdev->hw.mac.lane_num = lane_num; in hclge_cfg_mac_speed_dup()
2647 struct hclge_dev *hdev = vport->back; in hclge_cfg_mac_speed_dup_h() local
2650 ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num); in hclge_cfg_mac_speed_dup_h()
2655 hdev->hw.mac.req_speed = (u32)speed; in hclge_cfg_mac_speed_dup_h()
2656 hdev->hw.mac.req_duplex = duplex; in hclge_cfg_mac_speed_dup_h()
2661 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) in hclge_set_autoneg_en() argument
2675 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_autoneg_en()
2677 dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", in hclge_set_autoneg_en()
2686 struct hclge_dev *hdev = vport->back; in hclge_set_autoneg() local
2688 if (!hdev->hw.mac.support_autoneg) { in hclge_set_autoneg()
2690 dev_err(&hdev->pdev->dev, in hclge_set_autoneg()
2698 return hclge_set_autoneg_en(hdev, enable); in hclge_set_autoneg()
2704 struct hclge_dev *hdev = vport->back; in hclge_get_autoneg() local
2705 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_autoneg()
2710 return hdev->hw.mac.autoneg; in hclge_get_autoneg()
2716 struct hclge_dev *hdev = vport->back; in hclge_restart_autoneg() local
2719 dev_dbg(&hdev->pdev->dev, "restart autoneg\n"); in hclge_restart_autoneg()
2721 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_restart_autoneg()
2724 return hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_restart_autoneg()
2730 struct hclge_dev *hdev = vport->back; in hclge_halt_autoneg() local
2732 if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg) in hclge_halt_autoneg()
2733 return hclge_set_autoneg_en(hdev, !halt); in hclge_halt_autoneg()
2738 static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev, in hclge_parse_fec_stats_lanes() argument
2755 hdev->fec_stats.per_lanes[i] += in hclge_parse_fec_stats_lanes()
2761 static void hclge_parse_fec_stats(struct hclge_dev *hdev, in hclge_parse_fec_stats() argument
2768 hdev->fec_stats.base_r_lane_num = req->base_r_lane_num; in hclge_parse_fec_stats()
2769 hdev->fec_stats.rs_corr_blocks += in hclge_parse_fec_stats()
2771 hdev->fec_stats.rs_uncorr_blocks += in hclge_parse_fec_stats()
2773 hdev->fec_stats.rs_error_blocks += in hclge_parse_fec_stats()
2775 hdev->fec_stats.base_r_corr_blocks += in hclge_parse_fec_stats()
2777 hdev->fec_stats.base_r_uncorr_blocks += in hclge_parse_fec_stats()
2780 hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1); in hclge_parse_fec_stats()
2783 static int hclge_update_fec_stats_hw(struct hclge_dev *hdev) in hclge_update_fec_stats_hw() argument
2796 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2800 hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM); in hclge_update_fec_stats_hw()
2805 static void hclge_update_fec_stats(struct hclge_dev *hdev) in hclge_update_fec_stats() argument
2807 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_fec_stats()
2811 test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state)) in hclge_update_fec_stats()
2814 ret = hclge_update_fec_stats_hw(hdev); in hclge_update_fec_stats()
2816 dev_err(&hdev->pdev->dev, in hclge_update_fec_stats()
2819 clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state); in hclge_update_fec_stats()
2822 static void hclge_get_fec_stats_total(struct hclge_dev *hdev, in hclge_get_fec_stats_total() argument
2825 fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks; in hclge_get_fec_stats_total()
2827 hdev->fec_stats.rs_uncorr_blocks; in hclge_get_fec_stats_total()
2830 static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev, in hclge_get_fec_stats_lanes() argument
2835 if (hdev->fec_stats.base_r_lane_num == 0 || in hclge_get_fec_stats_lanes()
2836 hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) { in hclge_get_fec_stats_lanes()
2837 dev_err(&hdev->pdev->dev, in hclge_get_fec_stats_lanes()
2839 hdev->fec_stats.base_r_lane_num); in hclge_get_fec_stats_lanes()
2843 for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) { in hclge_get_fec_stats_lanes()
2845 hdev->fec_stats.base_r_corr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2847 hdev->fec_stats.base_r_uncorr_per_lanes[i]; in hclge_get_fec_stats_lanes()
2851 static void hclge_comm_get_fec_stats(struct hclge_dev *hdev, in hclge_comm_get_fec_stats() argument
2854 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_comm_get_fec_stats()
2859 hclge_get_fec_stats_total(hdev, fec_stats); in hclge_comm_get_fec_stats()
2862 hclge_get_fec_stats_lanes(hdev, fec_stats); in hclge_comm_get_fec_stats()
2865 dev_err(&hdev->pdev->dev, in hclge_comm_get_fec_stats()
2876 struct hclge_dev *hdev = vport->back; in hclge_get_fec_stats() local
2877 u32 fec_mode = hdev->hw.mac.fec_mode; in hclge_get_fec_stats()
2884 hclge_update_fec_stats(hdev); in hclge_get_fec_stats()
2886 hclge_comm_get_fec_stats(hdev, fec_stats); in hclge_get_fec_stats()
2889 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode) in hclge_set_fec_hw() argument
2910 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fec_hw()
2912 dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret); in hclge_set_fec_hw()
2920 struct hclge_dev *hdev = vport->back; in hclge_set_fec() local
2921 struct hclge_mac *mac = &hdev->hw.mac; in hclge_set_fec()
2925 dev_err(&hdev->pdev->dev, "unsupported fec mode\n"); in hclge_set_fec()
2929 ret = hclge_set_fec_hw(hdev, fec_mode); in hclge_set_fec()
2941 struct hclge_dev *hdev = vport->back; in hclge_get_fec() local
2942 struct hclge_mac *mac = &hdev->hw.mac; in hclge_get_fec()
2950 static int hclge_mac_init(struct hclge_dev *hdev) in hclge_mac_init() argument
2952 struct hclge_mac *mac = &hdev->hw.mac; in hclge_mac_init()
2955 hdev->support_sfp_query = true; in hclge_mac_init()
2957 if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_mac_init()
2958 hdev->hw.mac.duplex = HCLGE_MAC_FULL; in hclge_mac_init()
2960 if (hdev->hw.mac.support_autoneg) { in hclge_mac_init()
2961 ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg); in hclge_mac_init()
2966 if (!hdev->hw.mac.autoneg) { in hclge_mac_init()
2967 ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed, in hclge_mac_init()
2968 hdev->hw.mac.req_duplex, in hclge_mac_init()
2969 hdev->hw.mac.lane_num); in hclge_mac_init()
2977 ret = hclge_set_fec_hw(hdev, mac->user_fec_mode); in hclge_mac_init()
2982 ret = hclge_set_mac_mtu(hdev, hdev->mps); in hclge_mac_init()
2984 dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); in hclge_mac_init()
2988 ret = hclge_set_default_loopback(hdev); in hclge_mac_init()
2992 ret = hclge_buffer_alloc(hdev); in hclge_mac_init()
2994 dev_err(&hdev->pdev->dev, in hclge_mac_init()
3000 static void hclge_mbx_task_schedule(struct hclge_dev *hdev) in hclge_mbx_task_schedule() argument
3002 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_mbx_task_schedule()
3003 !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) { in hclge_mbx_task_schedule()
3004 hdev->last_mbx_scheduled = jiffies; in hclge_mbx_task_schedule()
3005 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_mbx_task_schedule()
3009 static void hclge_reset_task_schedule(struct hclge_dev *hdev) in hclge_reset_task_schedule() argument
3011 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_reset_task_schedule()
3012 test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) && in hclge_reset_task_schedule()
3013 !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) { in hclge_reset_task_schedule()
3014 hdev->last_rst_scheduled = jiffies; in hclge_reset_task_schedule()
3015 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_reset_task_schedule()
3019 static void hclge_errhand_task_schedule(struct hclge_dev *hdev) in hclge_errhand_task_schedule() argument
3021 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_errhand_task_schedule()
3022 !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_task_schedule()
3023 mod_delayed_work(hclge_wq, &hdev->service_task, 0); in hclge_errhand_task_schedule()
3026 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) in hclge_task_schedule() argument
3028 if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && in hclge_task_schedule()
3029 !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_task_schedule()
3030 mod_delayed_work(hclge_wq, &hdev->service_task, delay_time); in hclge_task_schedule()
3033 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) in hclge_get_mac_link_status() argument
3040 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_mac_link_status()
3042 dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", in hclge_get_mac_link_status()
3054 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status) in hclge_get_mac_phy_link() argument
3056 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mac_phy_link()
3060 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) in hclge_get_mac_phy_link()
3066 return hclge_get_mac_link_status(hdev, link_status); in hclge_get_mac_phy_link()
3069 static void hclge_push_link_status(struct hclge_dev *hdev) in hclge_push_link_status() argument
3075 for (i = 0; i < pci_num_vf(hdev->pdev); i++) { in hclge_push_link_status()
3076 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_push_link_status()
3084 dev_err(&hdev->pdev->dev, in hclge_push_link_status()
3091 static void hclge_update_link_status(struct hclge_dev *hdev) in hclge_update_link_status() argument
3093 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_update_link_status()
3094 struct hnae3_client *client = hdev->nic_client; in hclge_update_link_status()
3101 if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state)) in hclge_update_link_status()
3104 ret = hclge_get_mac_phy_link(hdev, &state); in hclge_update_link_status()
3106 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3110 if (state != hdev->hw.mac.link) { in hclge_update_link_status()
3111 hdev->hw.mac.link = state; in hclge_update_link_status()
3113 hclge_update_port_info(hdev); in hclge_update_link_status()
3116 hclge_config_mac_tnl_int(hdev, state); in hclge_update_link_status()
3118 if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) { in hclge_update_link_status()
3119 struct hnae3_handle *rhandle = &hdev->vport[0].roce; in hclge_update_link_status()
3120 struct hnae3_client *rclient = hdev->roce_client; in hclge_update_link_status()
3127 hclge_push_link_status(hdev); in hclge_update_link_status()
3130 clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_update_link_status()
3175 static void hclge_update_pause_advertising(struct hclge_dev *hdev) in hclge_update_pause_advertising() argument
3177 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_pause_advertising()
3180 switch (hdev->fc_mode_last_time) { in hclge_update_pause_advertising()
3202 static void hclge_update_advertising(struct hclge_dev *hdev) in hclge_update_advertising() argument
3204 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_advertising()
3209 hclge_update_pause_advertising(hdev); in hclge_update_advertising()
3212 static void hclge_update_port_capability(struct hclge_dev *hdev, in hclge_update_port_capability() argument
3215 if (hnae3_dev_fec_supported(hdev)) in hclge_update_port_capability()
3233 hclge_update_advertising(hdev); in hclge_update_port_capability()
3237 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) in hclge_get_sfp_speed() argument
3245 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_speed()
3247 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_speed()
3251 dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); in hclge_get_sfp_speed()
3260 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac) in hclge_get_sfp_info() argument
3271 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_sfp_info()
3273 dev_warn(&hdev->pdev->dev, in hclge_get_sfp_info()
3277 dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret); in hclge_get_sfp_info()
3318 struct hclge_dev *hdev = vport->back; in hclge_get_phy_link_ksettings() local
3327 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_get_phy_link_ksettings()
3329 dev_err(&hdev->pdev->dev, in hclge_get_phy_link_ksettings()
3368 struct hclge_dev *hdev = vport->back; in hclge_set_phy_link_ksettings() local
3396 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM); in hclge_set_phy_link_ksettings()
3398 dev_err(&hdev->pdev->dev, in hclge_set_phy_link_ksettings()
3403 hdev->hw.mac.req_autoneg = cmd->base.autoneg; in hclge_set_phy_link_ksettings()
3404 hdev->hw.mac.req_speed = cmd->base.speed; in hclge_set_phy_link_ksettings()
3405 hdev->hw.mac.req_duplex = cmd->base.duplex; in hclge_set_phy_link_ksettings()
3406 linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising); in hclge_set_phy_link_ksettings()
3411 static int hclge_update_tp_port_info(struct hclge_dev *hdev) in hclge_update_tp_port_info() argument
3416 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_update_tp_port_info()
3419 ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_update_tp_port_info()
3423 hdev->hw.mac.autoneg = cmd.base.autoneg; in hclge_update_tp_port_info()
3424 hdev->hw.mac.speed = cmd.base.speed; in hclge_update_tp_port_info()
3425 hdev->hw.mac.duplex = cmd.base.duplex; in hclge_update_tp_port_info()
3426 linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); in hclge_update_tp_port_info()
3431 static int hclge_tp_port_init(struct hclge_dev *hdev) in hclge_tp_port_init() argument
3435 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_tp_port_init()
3438 cmd.base.autoneg = hdev->hw.mac.req_autoneg; in hclge_tp_port_init()
3439 cmd.base.speed = hdev->hw.mac.req_speed; in hclge_tp_port_init()
3440 cmd.base.duplex = hdev->hw.mac.req_duplex; in hclge_tp_port_init()
3441 linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising); in hclge_tp_port_init()
3443 return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd); in hclge_tp_port_init()
3446 static int hclge_update_port_info(struct hclge_dev *hdev) in hclge_update_port_info() argument
3448 struct hclge_mac *mac = &hdev->hw.mac; in hclge_update_port_info()
3454 return hclge_update_tp_port_info(hdev); in hclge_update_port_info()
3457 if (!hdev->support_sfp_query) in hclge_update_port_info()
3460 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3462 ret = hclge_get_sfp_info(hdev, mac); in hclge_update_port_info()
3465 ret = hclge_get_sfp_speed(hdev, &speed); in hclge_update_port_info()
3469 hdev->support_sfp_query = false; in hclge_update_port_info()
3475 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_update_port_info()
3477 hclge_update_port_capability(hdev, mac); in hclge_update_port_info()
3479 (void)hclge_tm_port_shaper_cfg(hdev); in hclge_update_port_info()
3482 return hclge_cfg_mac_speed_dup(hdev, mac->speed, in hclge_update_port_info()
3489 return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0); in hclge_update_port_info()
3496 struct hclge_dev *hdev = vport->back; in hclge_get_status() local
3498 hclge_update_link_status(hdev); in hclge_get_status()
3500 return hdev->hw.mac.link; in hclge_get_status()
3503 struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf) in hclge_get_vf_vport() argument
3505 if (!pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3506 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3511 if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) { in hclge_get_vf_vport()
3512 dev_err(&hdev->pdev->dev, in hclge_get_vf_vport()
3514 vf, pci_num_vf(hdev->pdev)); in hclge_get_vf_vport()
3520 return &hdev->vport[vf]; in hclge_get_vf_vport()
3527 struct hclge_dev *hdev = vport->back; in hclge_get_vf_config() local
3529 vport = hclge_get_vf_vport(hdev, vf); in hclge_get_vf_config()
3551 struct hclge_dev *hdev = vport->back; in hclge_set_vf_link_state() local
3555 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_link_state()
3571 dev_err(&hdev->pdev->dev, in hclge_set_vf_link_state()
3578 static void hclge_set_reset_pending(struct hclge_dev *hdev, in hclge_set_reset_pending() argument
3586 set_bit(reset_type, &hdev->reset_pending); in hclge_set_reset_pending()
3589 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) in hclge_check_event_cause() argument
3594 cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); in hclge_check_event_cause()
3595 msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_check_event_cause()
3596 hw_err_src_reg = hclge_read_dev(&hdev->hw, in hclge_check_event_cause()
3608 dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); in hclge_check_event_cause()
3609 hclge_set_reset_pending(hdev, HNAE3_IMP_RESET); in hclge_check_event_cause()
3610 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3612 hdev->rst_stats.imp_rst_cnt++; in hclge_check_event_cause()
3617 dev_info(&hdev->pdev->dev, "global reset interrupt\n"); in hclge_check_event_cause()
3618 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_check_event_cause()
3619 hclge_set_reset_pending(hdev, HNAE3_GLOBAL_RESET); in hclge_check_event_cause()
3621 hdev->rst_stats.global_rst_cnt++; in hclge_check_event_cause()
3644 dev_info(&hdev->pdev->dev, in hclge_check_event_cause()
3651 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type, in hclge_clear_event_cause() argument
3662 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr); in hclge_clear_event_cause()
3665 hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr); in hclge_clear_event_cause()
3672 static void hclge_clear_all_event_cause(struct hclge_dev *hdev) in hclge_clear_all_event_cause() argument
3674 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST, in hclge_clear_all_event_cause()
3678 hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0); in hclge_clear_all_event_cause()
3688 struct hclge_dev *hdev = data; in hclge_misc_irq_handle() local
3693 hclge_enable_vector(&hdev->misc_vector, false); in hclge_misc_irq_handle()
3694 event_cause = hclge_check_event_cause(hdev, &clearval); in hclge_misc_irq_handle()
3699 hclge_errhand_task_schedule(hdev); in hclge_misc_irq_handle()
3702 hclge_reset_task_schedule(hdev); in hclge_misc_irq_handle()
3705 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3706 hclge_ptp_clean_tx_hwts(hdev); in hclge_misc_irq_handle()
3707 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_misc_irq_handle()
3719 hclge_mbx_task_schedule(hdev); in hclge_misc_irq_handle()
3722 dev_warn(&hdev->pdev->dev, in hclge_misc_irq_handle()
3727 hclge_clear_event_cause(hdev, event_cause, clearval); in hclge_misc_irq_handle()
3733 hclge_enable_vector(&hdev->misc_vector, true); in hclge_misc_irq_handle()
3738 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) in hclge_free_vector() argument
3740 if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) { in hclge_free_vector()
3741 dev_warn(&hdev->pdev->dev, in hclge_free_vector()
3746 hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; in hclge_free_vector()
3747 hdev->num_msi_left += 1; in hclge_free_vector()
3748 hdev->num_msi_used -= 1; in hclge_free_vector()
3751 static void hclge_get_misc_vector(struct hclge_dev *hdev) in hclge_get_misc_vector() argument
3753 struct hclge_misc_vector *vector = &hdev->misc_vector; in hclge_get_misc_vector()
3755 vector->vector_irq = pci_irq_vector(hdev->pdev, 0); in hclge_get_misc_vector()
3757 vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; in hclge_get_misc_vector()
3758 hdev->vector_status[0] = 0; in hclge_get_misc_vector()
3760 hdev->num_msi_left -= 1; in hclge_get_misc_vector()
3761 hdev->num_msi_used += 1; in hclge_get_misc_vector()
3764 static int hclge_misc_irq_init(struct hclge_dev *hdev) in hclge_misc_irq_init() argument
3768 hclge_get_misc_vector(hdev); in hclge_misc_irq_init()
3771 snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s", in hclge_misc_irq_init()
3772 HCLGE_NAME, pci_name(hdev->pdev)); in hclge_misc_irq_init()
3773 ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle, in hclge_misc_irq_init()
3774 IRQF_NO_AUTOEN, hdev->misc_vector.name, hdev); in hclge_misc_irq_init()
3776 hclge_free_vector(hdev, 0); in hclge_misc_irq_init()
3777 dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", in hclge_misc_irq_init()
3778 hdev->misc_vector.vector_irq); in hclge_misc_irq_init()
3784 static void hclge_misc_irq_uninit(struct hclge_dev *hdev) in hclge_misc_irq_uninit() argument
3786 free_irq(hdev->misc_vector.vector_irq, hdev); in hclge_misc_irq_uninit()
3787 hclge_free_vector(hdev, 0); in hclge_misc_irq_uninit()
3790 int hclge_notify_client(struct hclge_dev *hdev, in hclge_notify_client() argument
3793 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_notify_client()
3794 struct hnae3_client *client = hdev->nic_client; in hclge_notify_client()
3797 if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client) in hclge_notify_client()
3805 dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", in hclge_notify_client()
3811 static int hclge_notify_roce_client(struct hclge_dev *hdev, in hclge_notify_roce_client() argument
3814 struct hnae3_handle *handle = &hdev->vport[0].roce; in hclge_notify_roce_client()
3815 struct hnae3_client *client = hdev->roce_client; in hclge_notify_roce_client()
3818 if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client) in hclge_notify_roce_client()
3826 dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)", in hclge_notify_roce_client()
3832 static int hclge_reset_wait(struct hclge_dev *hdev) in hclge_reset_wait() argument
3840 switch (hdev->reset_type) { in hclge_reset_wait()
3854 dev_err(&hdev->pdev->dev, in hclge_reset_wait()
3856 hdev->reset_type); in hclge_reset_wait()
3860 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3863 val = hclge_read_dev(&hdev->hw, reg); in hclge_reset_wait()
3868 dev_warn(&hdev->pdev->dev, in hclge_reset_wait()
3869 "Wait for reset timeout: %d\n", hdev->reset_type); in hclge_reset_wait()
3876 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) in hclge_set_vf_rst() argument
3888 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vf_rst()
3891 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) in hclge_set_all_vf_rst() argument
3895 for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) { in hclge_set_all_vf_rst()
3896 struct hclge_vport *vport = &hdev->vport[i]; in hclge_set_all_vf_rst()
3900 ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); in hclge_set_all_vf_rst()
3902 dev_err(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3914 hdev->reset_type == HNAE3_FUNC_RESET) { in hclge_set_all_vf_rst()
3926 dev_warn(&hdev->pdev->dev, in hclge_set_all_vf_rst()
3935 static void hclge_mailbox_service_task(struct hclge_dev *hdev) in hclge_mailbox_service_task() argument
3937 if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) || in hclge_mailbox_service_task()
3938 test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) || in hclge_mailbox_service_task()
3939 test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state)) in hclge_mailbox_service_task()
3942 if (time_is_before_jiffies(hdev->last_mbx_scheduled + in hclge_mailbox_service_task()
3944 dev_warn(&hdev->pdev->dev, in hclge_mailbox_service_task()
3946 jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled), in hclge_mailbox_service_task()
3949 hclge_mbx_handler(hdev); in hclge_mailbox_service_task()
3951 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_mailbox_service_task()
3954 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev) in hclge_func_reset_sync_vf() argument
3966 hclge_mailbox_service_task(hdev); in hclge_func_reset_sync_vf()
3968 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_sync_vf()
3976 dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n", in hclge_func_reset_sync_vf()
3986 dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n"); in hclge_func_reset_sync_vf()
3989 void hclge_report_hw_error(struct hclge_dev *hdev, in hclge_report_hw_error() argument
3992 struct hnae3_client *client = hdev->nic_client; in hclge_report_hw_error()
3995 !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state)) in hclge_report_hw_error()
3998 client->ops->process_hw_error(&hdev->vport[0].nic, type); in hclge_report_hw_error()
4001 static void hclge_handle_imp_error(struct hclge_dev *hdev) in hclge_handle_imp_error() argument
4005 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_handle_imp_error()
4007 hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR); in hclge_handle_imp_error()
4009 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4013 hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR); in hclge_handle_imp_error()
4015 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val); in hclge_handle_imp_error()
4019 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) in hclge_func_reset_cmd() argument
4029 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_func_reset_cmd()
4031 dev_err(&hdev->pdev->dev, in hclge_func_reset_cmd()
4037 static void hclge_do_reset(struct hclge_dev *hdev) in hclge_do_reset() argument
4039 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_do_reset()
4040 struct pci_dev *pdev = hdev->pdev; in hclge_do_reset()
4046 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), in hclge_do_reset()
4047 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); in hclge_do_reset()
4051 switch (hdev->reset_type) { in hclge_do_reset()
4054 val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_do_reset()
4056 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); in hclge_do_reset()
4060 val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); in hclge_do_reset()
4062 hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); in hclge_do_reset()
4067 hclge_set_reset_pending(hdev, HNAE3_FUNC_RESET); in hclge_do_reset()
4068 hclge_reset_task_schedule(hdev); in hclge_do_reset()
4072 "unsupported reset type: %d\n", hdev->reset_type); in hclge_do_reset()
4081 struct hclge_dev *hdev = ae_dev->priv; in hclge_get_reset_level() local
4103 if (hdev->reset_type != HNAE3_NONE_RESET && in hclge_get_reset_level()
4104 rst_level < hdev->reset_type) in hclge_get_reset_level()
4110 static void hclge_clear_reset_cause(struct hclge_dev *hdev) in hclge_clear_reset_cause() argument
4114 switch (hdev->reset_type) { in hclge_clear_reset_cause()
4131 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_clear_reset_cause()
4132 hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, in hclge_clear_reset_cause()
4135 hclge_enable_vector(&hdev->misc_vector, true); in hclge_clear_reset_cause()
4138 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable) in hclge_reset_handshake() argument
4142 reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG); in hclge_reset_handshake()
4148 hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val); in hclge_reset_handshake()
4151 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev) in hclge_func_reset_notify_vf() argument
4155 ret = hclge_set_all_vf_rst(hdev, true); in hclge_func_reset_notify_vf()
4159 hclge_func_reset_sync_vf(hdev); in hclge_func_reset_notify_vf()
4164 static int hclge_reset_prepare_wait(struct hclge_dev *hdev) in hclge_reset_prepare_wait() argument
4169 switch (hdev->reset_type) { in hclge_reset_prepare_wait()
4171 ret = hclge_func_reset_notify_vf(hdev); in hclge_reset_prepare_wait()
4175 ret = hclge_func_reset_cmd(hdev, 0); in hclge_reset_prepare_wait()
4177 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_wait()
4187 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_wait()
4188 hdev->rst_stats.pf_rst_cnt++; in hclge_reset_prepare_wait()
4191 ret = hclge_func_reset_notify_vf(hdev); in hclge_reset_prepare_wait()
4196 hclge_handle_imp_error(hdev); in hclge_reset_prepare_wait()
4197 reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); in hclge_reset_prepare_wait()
4198 hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, in hclge_reset_prepare_wait()
4207 hclge_reset_handshake(hdev, true); in hclge_reset_prepare_wait()
4208 dev_info(&hdev->pdev->dev, "prepare wait ok\n"); in hclge_reset_prepare_wait()
4213 static void hclge_show_rst_info(struct hclge_dev *hdev) in hclge_show_rst_info() argument
4221 hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN); in hclge_show_rst_info()
4223 dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf); in hclge_show_rst_info()
4228 static bool hclge_reset_err_handle(struct hclge_dev *hdev) in hclge_reset_err_handle() argument
4232 if (hdev->reset_pending) { in hclge_reset_err_handle()
4233 dev_info(&hdev->pdev->dev, "Reset pending %lu\n", in hclge_reset_err_handle()
4234 hdev->reset_pending); in hclge_reset_err_handle()
4236 } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) & in hclge_reset_err_handle()
4238 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4240 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
4242 } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) { in hclge_reset_err_handle()
4243 hdev->rst_stats.reset_fail_cnt++; in hclge_reset_err_handle()
4244 hclge_set_reset_pending(hdev, hdev->reset_type); in hclge_reset_err_handle()
4245 dev_info(&hdev->pdev->dev, in hclge_reset_err_handle()
4247 hdev->rst_stats.reset_fail_cnt); in hclge_reset_err_handle()
4251 hclge_clear_reset_cause(hdev); in hclge_reset_err_handle()
4254 hclge_reset_handshake(hdev, true); in hclge_reset_err_handle()
4256 dev_err(&hdev->pdev->dev, "Reset fail!\n"); in hclge_reset_err_handle()
4258 hclge_show_rst_info(hdev); in hclge_reset_err_handle()
4260 set_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_err_handle()
4265 static void hclge_update_reset_level(struct hclge_dev *hdev) in hclge_update_reset_level() argument
4267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_update_reset_level()
4274 hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_update_reset_level()
4281 &hdev->default_reset_request); in hclge_update_reset_level()
4283 set_bit(reset_level, &hdev->reset_request); in hclge_update_reset_level()
4286 static int hclge_set_rst_done(struct hclge_dev *hdev) in hclge_set_rst_done() argument
4296 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_rst_done()
4302 dev_warn(&hdev->pdev->dev, in hclge_set_rst_done()
4307 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n", in hclge_set_rst_done()
4314 static int hclge_reset_prepare_up(struct hclge_dev *hdev) in hclge_reset_prepare_up() argument
4318 switch (hdev->reset_type) { in hclge_reset_prepare_up()
4321 ret = hclge_set_all_vf_rst(hdev, false); in hclge_reset_prepare_up()
4325 ret = hclge_set_rst_done(hdev); in hclge_reset_prepare_up()
4332 hclge_reset_handshake(hdev, false); in hclge_reset_prepare_up()
4337 static int hclge_reset_stack(struct hclge_dev *hdev) in hclge_reset_stack() argument
4341 ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_stack()
4345 ret = hclge_reset_ae_dev(hdev->ae_dev); in hclge_reset_stack()
4349 return hclge_notify_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_stack()
4352 static int hclge_reset_prepare(struct hclge_dev *hdev) in hclge_reset_prepare() argument
4356 hdev->rst_stats.reset_cnt++; in hclge_reset_prepare()
4358 ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset_prepare()
4363 ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_reset_prepare()
4368 return hclge_reset_prepare_wait(hdev); in hclge_reset_prepare()
4371 static int hclge_reset_rebuild(struct hclge_dev *hdev) in hclge_reset_rebuild() argument
4375 hdev->rst_stats.hw_reset_done_cnt++; in hclge_reset_rebuild()
4377 ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); in hclge_reset_rebuild()
4382 ret = hclge_reset_stack(hdev); in hclge_reset_rebuild()
4387 hclge_clear_reset_cause(hdev); in hclge_reset_rebuild()
4389 ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); in hclge_reset_rebuild()
4394 hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1) in hclge_reset_rebuild()
4397 ret = hclge_reset_prepare_up(hdev); in hclge_reset_rebuild()
4402 ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_reset_rebuild()
4407 ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); in hclge_reset_rebuild()
4411 hdev->last_reset_time = jiffies; in hclge_reset_rebuild()
4412 hdev->rst_stats.reset_fail_cnt = 0; in hclge_reset_rebuild()
4413 hdev->rst_stats.reset_done_cnt++; in hclge_reset_rebuild()
4414 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_reset_rebuild()
4416 hclge_update_reset_level(hdev); in hclge_reset_rebuild()
4421 static void hclge_reset(struct hclge_dev *hdev) in hclge_reset() argument
4423 if (hclge_reset_prepare(hdev)) in hclge_reset()
4426 if (hclge_reset_wait(hdev)) in hclge_reset()
4429 if (hclge_reset_rebuild(hdev)) in hclge_reset()
4435 if (hclge_reset_err_handle(hdev)) in hclge_reset()
4436 hclge_reset_task_schedule(hdev); in hclge_reset()
4442 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_event() local
4459 if (time_before(jiffies, (hdev->last_reset_time + in hclge_reset_event()
4461 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_reset_event()
4465 if (hdev->default_reset_request) { in hclge_reset_event()
4466 hdev->reset_level = in hclge_reset_event()
4468 &hdev->default_reset_request); in hclge_reset_event()
4469 } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) { in hclge_reset_event()
4470 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_reset_event()
4473 dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n", in hclge_reset_event()
4474 hdev->reset_level); in hclge_reset_event()
4477 set_bit(hdev->reset_level, &hdev->reset_request); in hclge_reset_event()
4478 hclge_reset_task_schedule(hdev); in hclge_reset_event()
4480 if (hdev->reset_level < HNAE3_GLOBAL_RESET) in hclge_reset_event()
4481 hdev->reset_level++; in hclge_reset_event()
4491 struct hclge_dev *hdev = ae_dev->priv; in hclge_set_def_reset_request() local
4495 set_bit(HNAE3_NONE_RESET, &hdev->default_reset_request); in hclge_set_def_reset_request()
4496 dev_warn(&hdev->pdev->dev, "unsupported reset type %d\n", in hclge_set_def_reset_request()
4501 set_bit(rst_type, &hdev->default_reset_request); in hclge_set_def_reset_request()
4506 struct hclge_dev *hdev = timer_container_of(hdev, t, reset_timer); in hclge_reset_timer() local
4511 if (!hdev->default_reset_request) in hclge_reset_timer()
4514 dev_info(&hdev->pdev->dev, in hclge_reset_timer()
4516 hclge_reset_event(hdev->pdev, NULL); in hclge_reset_timer()
4519 static void hclge_reset_subtask(struct hclge_dev *hdev) in hclge_reset_subtask() argument
4521 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_reset_subtask()
4532 hdev->last_reset_time = jiffies; in hclge_reset_subtask()
4533 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending); in hclge_reset_subtask()
4534 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4535 hclge_reset(hdev); in hclge_reset_subtask()
4538 hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request); in hclge_reset_subtask()
4539 if (hdev->reset_type != HNAE3_NONE_RESET) in hclge_reset_subtask()
4540 hclge_do_reset(hdev); in hclge_reset_subtask()
4542 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_subtask()
4545 static void hclge_handle_err_reset_request(struct hclge_dev *hdev) in hclge_handle_err_reset_request() argument
4547 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_reset_request()
4556 if (hdev->default_reset_request && ae_dev->ops->reset_event) in hclge_handle_err_reset_request()
4557 ae_dev->ops->reset_event(hdev->pdev, NULL); in hclge_handle_err_reset_request()
4560 hclge_enable_vector(&hdev->misc_vector, true); in hclge_handle_err_reset_request()
4563 static void hclge_handle_err_recovery(struct hclge_dev *hdev) in hclge_handle_err_recovery() argument
4565 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_handle_err_recovery()
4569 if (hclge_find_error_source(hdev)) { in hclge_handle_err_recovery()
4571 hclge_handle_mac_tnl(hdev); in hclge_handle_err_recovery()
4572 hclge_handle_vf_queue_err_ras(hdev); in hclge_handle_err_recovery()
4575 hclge_handle_err_reset_request(hdev); in hclge_handle_err_recovery()
4578 static void hclge_misc_err_recovery(struct hclge_dev *hdev) in hclge_misc_err_recovery() argument
4580 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_misc_err_recovery()
4581 struct device *dev = &hdev->pdev->dev; in hclge_misc_err_recovery()
4584 msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); in hclge_misc_err_recovery()
4587 (hdev, &hdev->default_reset_request)) in hclge_misc_err_recovery()
4594 hclge_handle_err_reset_request(hdev); in hclge_misc_err_recovery()
4597 static void hclge_errhand_service_task(struct hclge_dev *hdev) in hclge_errhand_service_task() argument
4599 if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) in hclge_errhand_service_task()
4602 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_errhand_service_task()
4603 hclge_handle_err_recovery(hdev); in hclge_errhand_service_task()
4605 hclge_misc_err_recovery(hdev); in hclge_errhand_service_task()
4608 static void hclge_reset_service_task(struct hclge_dev *hdev) in hclge_reset_service_task() argument
4610 if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) in hclge_reset_service_task()
4613 if (time_is_before_jiffies(hdev->last_rst_scheduled + in hclge_reset_service_task()
4615 dev_warn(&hdev->pdev->dev, in hclge_reset_service_task()
4617 jiffies_to_msecs(jiffies - hdev->last_rst_scheduled), in hclge_reset_service_task()
4620 down(&hdev->reset_sem); in hclge_reset_service_task()
4621 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4623 hclge_reset_subtask(hdev); in hclge_reset_service_task()
4625 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_service_task()
4626 up(&hdev->reset_sem); in hclge_reset_service_task()
4629 static void hclge_update_vport_alive(struct hclge_dev *hdev) in hclge_update_vport_alive() argument
4637 for (i = 1; i < hdev->num_alloc_vport; i++) { in hclge_update_vport_alive()
4638 struct hclge_vport *vport = &hdev->vport[i]; in hclge_update_vport_alive()
4646 dev_warn(&hdev->pdev->dev, in hclge_update_vport_alive()
4653 static void hclge_periodic_service_task(struct hclge_dev *hdev) in hclge_periodic_service_task() argument
4657 if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) in hclge_periodic_service_task()
4663 hclge_update_link_status(hdev); in hclge_periodic_service_task()
4664 hclge_sync_mac_table(hdev); in hclge_periodic_service_task()
4665 hclge_sync_promisc_mode(hdev); in hclge_periodic_service_task()
4666 hclge_sync_fd_table(hdev); in hclge_periodic_service_task()
4668 if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) { in hclge_periodic_service_task()
4669 delta = jiffies - hdev->last_serv_processed; in hclge_periodic_service_task()
4677 hdev->serv_processed_cnt++; in hclge_periodic_service_task()
4678 hclge_update_vport_alive(hdev); in hclge_periodic_service_task()
4680 if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) { in hclge_periodic_service_task()
4681 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4685 if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL)) in hclge_periodic_service_task()
4686 hclge_update_stats_for_all(hdev); in hclge_periodic_service_task()
4688 hclge_update_port_info(hdev); in hclge_periodic_service_task()
4689 hclge_sync_vlan_filter(hdev); in hclge_periodic_service_task()
4691 if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL)) in hclge_periodic_service_task()
4692 hclge_rfs_filter_expire(hdev); in hclge_periodic_service_task()
4694 hdev->last_serv_processed = jiffies; in hclge_periodic_service_task()
4697 hclge_task_schedule(hdev, delta); in hclge_periodic_service_task()
4700 static void hclge_ptp_service_task(struct hclge_dev *hdev) in hclge_ptp_service_task() argument
4704 if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) || in hclge_ptp_service_task()
4705 !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) || in hclge_ptp_service_task()
4706 !time_is_before_jiffies(hdev->ptp->tx_start + HZ)) in hclge_ptp_service_task()
4710 spin_lock_irqsave(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4715 if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) in hclge_ptp_service_task()
4716 hclge_ptp_clean_tx_hwts(hdev); in hclge_ptp_service_task()
4718 spin_unlock_irqrestore(&hdev->ptp->lock, flags); in hclge_ptp_service_task()
4723 struct hclge_dev *hdev = in hclge_service_task() local
4726 hclge_errhand_service_task(hdev); in hclge_service_task()
4727 hclge_reset_service_task(hdev); in hclge_service_task()
4728 hclge_ptp_service_task(hdev); in hclge_service_task()
4729 hclge_mailbox_service_task(hdev); in hclge_service_task()
4730 hclge_periodic_service_task(hdev); in hclge_service_task()
4736 hclge_errhand_service_task(hdev); in hclge_service_task()
4737 hclge_reset_service_task(hdev); in hclge_service_task()
4738 hclge_mailbox_service_task(hdev); in hclge_service_task()
4752 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx, in hclge_get_vector_info() argument
4757 vector_info->vector = pci_irq_vector(hdev->pdev, idx); in hclge_get_vector_info()
4761 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4765 vector_info->io_addr = hdev->hw.hw.io_base + in hclge_get_vector_info()
4772 hdev->vector_status[idx] = hdev->vport[0].vport_id; in hclge_get_vector_info()
4773 hdev->vector_irq[idx] = vector_info->vector; in hclge_get_vector_info()
4781 struct hclge_dev *hdev = vport->back; in hclge_get_vector() local
4786 vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num); in hclge_get_vector()
4787 vector_num = min(hdev->num_msi_left, vector_num); in hclge_get_vector()
4790 while (++i < hdev->num_nic_msi) { in hclge_get_vector()
4791 if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { in hclge_get_vector()
4792 hclge_get_vector_info(hdev, i, vector); in hclge_get_vector()
4800 hdev->num_msi_left -= alloc; in hclge_get_vector()
4801 hdev->num_msi_used += alloc; in hclge_get_vector()
4806 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) in hclge_get_vector_index() argument
4810 for (i = 0; i < hdev->num_msi; i++) in hclge_get_vector_index()
4811 if (vector == hdev->vector_irq[i]) in hclge_get_vector_index()
4820 struct hclge_dev *hdev = vport->back; in hclge_put_vector() local
4823 vector_id = hclge_get_vector_index(hdev, vector); in hclge_put_vector()
4825 dev_err(&hdev->pdev->dev, in hclge_put_vector()
4830 hclge_free_vector(hdev, vector_id); in hclge_put_vector()
4855 struct hclge_dev *hdev = vport->back; in hclge_set_rss() local
4856 struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg; in hclge_set_rss()
4859 ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc); in hclge_set_rss()
4861 dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc); in hclge_set_rss()
4870 return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw, in hclge_set_rss()
4878 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tuple() local
4881 ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw, in hclge_set_rss_tuple()
4882 &hdev->rss_cfg, nfc); in hclge_set_rss_tuple()
4884 dev_err(&hdev->pdev->dev, in hclge_set_rss_tuple()
4914 struct hclge_dev *hdev = vport->back; in hclge_get_tc_size() local
4916 return hdev->pf_rss_size_max; in hclge_get_tc_size()
4919 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev) in hclge_init_rss_tc_mode() argument
4921 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_init_rss_tc_mode()
4922 struct hclge_vport *vport = hdev->vport; in hclge_init_rss_tc_mode()
4936 if (!(hdev->hw_tc_map & BIT(i))) in hclge_init_rss_tc_mode()
4945 dev_err(&hdev->pdev->dev, in hclge_init_rss_tc_mode()
4959 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_init_rss_tc_mode()
4963 int hclge_rss_init_hw(struct hclge_dev *hdev) in hclge_rss_init_hw() argument
4965 u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl; in hclge_rss_init_hw()
4966 u8 *key = hdev->rss_cfg.rss_hash_key; in hclge_rss_init_hw()
4967 u8 hfunc = hdev->rss_cfg.rss_algo; in hclge_rss_init_hw()
4970 ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw, in hclge_rss_init_hw()
4975 ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key); in hclge_rss_init_hw()
4979 ret = hclge_comm_set_rss_input_tuple(&hdev->hw.hw, &hdev->rss_cfg); in hclge_rss_init_hw()
4983 return hclge_init_rss_tc_mode(hdev); in hclge_rss_init_hw()
4990 struct hclge_dev *hdev = vport->back; in hclge_bind_ring_with_vector() local
5027 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5029 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5053 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_bind_ring_with_vector()
5055 dev_err(&hdev->pdev->dev, in hclge_bind_ring_with_vector()
5068 struct hclge_dev *hdev = vport->back; in hclge_map_ring_to_vector() local
5071 vector_id = hclge_get_vector_index(hdev, vector); in hclge_map_ring_to_vector()
5073 dev_err(&hdev->pdev->dev, in hclge_map_ring_to_vector()
5085 struct hclge_dev *hdev = vport->back; in hclge_unmap_ring_frm_vector() local
5088 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_unmap_ring_frm_vector()
5091 vector_id = hclge_get_vector_index(hdev, vector); in hclge_unmap_ring_frm_vector()
5107 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id, in hclge_cmd_set_promisc_mode() argument
5110 struct hclge_vport *vport = &hdev->vport[vf_id]; in hclge_cmd_set_promisc_mode()
5143 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cmd_set_promisc_mode()
5145 dev_err(&hdev->pdev->dev, in hclge_cmd_set_promisc_mode()
5163 struct hclge_dev *hdev = vport->back; in hclge_set_promisc_mode() local
5170 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_promisc_mode()
5184 static void hclge_sync_fd_state(struct hclge_dev *hdev) in hclge_sync_fd_state() argument
5186 if (hlist_empty(&hdev->fd_rule_list)) in hclge_sync_fd_state()
5187 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_sync_fd_state()
5190 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location) in hclge_fd_inc_rule_cnt() argument
5192 if (!test_bit(location, hdev->fd_bmap)) { in hclge_fd_inc_rule_cnt()
5193 set_bit(location, hdev->fd_bmap); in hclge_fd_inc_rule_cnt()
5194 hdev->hclge_fd_rule_num++; in hclge_fd_inc_rule_cnt()
5198 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location) in hclge_fd_dec_rule_cnt() argument
5200 if (test_bit(location, hdev->fd_bmap)) { in hclge_fd_dec_rule_cnt()
5201 clear_bit(location, hdev->fd_bmap); in hclge_fd_dec_rule_cnt()
5202 hdev->hclge_fd_rule_num--; in hclge_fd_dec_rule_cnt()
5206 static void hclge_fd_free_node(struct hclge_dev *hdev, in hclge_fd_free_node() argument
5211 hclge_sync_fd_state(hdev); in hclge_fd_free_node()
5214 static void hclge_update_fd_rule_node(struct hclge_dev *hdev, in hclge_update_fd_rule_node() argument
5237 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5238 hclge_fd_free_node(hdev, old_rule); in hclge_update_fd_rule_node()
5253 hclge_fd_dec_rule_cnt(hdev, old_rule->location); in hclge_update_fd_rule_node()
5254 hclge_fd_free_node(hdev, old_rule); in hclge_update_fd_rule_node()
5296 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev, in hclge_fd_set_user_def_cmd() argument
5325 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_set_user_def_cmd()
5327 dev_err(&hdev->pdev->dev, in hclge_fd_set_user_def_cmd()
5332 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked) in hclge_sync_fd_user_def_cfg() argument
5336 if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state)) in hclge_sync_fd_user_def_cfg()
5340 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5342 ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg); in hclge_sync_fd_user_def_cfg()
5344 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_sync_fd_user_def_cfg()
5347 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_user_def_cfg()
5350 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_check_user_def_refcnt() argument
5353 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_fd_check_user_def_refcnt()
5363 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_check_user_def_refcnt()
5380 dev_err(&hdev->pdev->dev, in hclge_fd_check_user_def_refcnt()
5386 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_inc_user_def_refcnt() argument
5395 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_inc_user_def_refcnt()
5398 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_inc_user_def_refcnt()
5403 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev, in hclge_fd_dec_user_def_refcnt() argument
5412 cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1]; in hclge_fd_dec_user_def_refcnt()
5419 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_fd_dec_user_def_refcnt()
5423 static void hclge_update_fd_list(struct hclge_dev *hdev, in hclge_update_fd_list() argument
5427 struct hlist_head *hlist = &hdev->fd_rule_list; in hclge_update_fd_list()
5432 hclge_fd_dec_user_def_refcnt(hdev, fd_rule); in hclge_update_fd_list()
5434 hclge_fd_inc_user_def_refcnt(hdev, new_rule); in hclge_update_fd_list()
5435 hclge_sync_fd_user_def_cfg(hdev, true); in hclge_update_fd_list()
5437 hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state); in hclge_update_fd_list()
5445 dev_warn(&hdev->pdev->dev, in hclge_update_fd_list()
5451 hclge_fd_inc_user_def_refcnt(hdev, new_rule); in hclge_update_fd_list()
5452 hclge_sync_fd_user_def_cfg(hdev, true); in hclge_update_fd_list()
5455 hclge_fd_inc_rule_cnt(hdev, new_rule->location); in hclge_update_fd_list()
5458 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_update_fd_list()
5459 hclge_task_schedule(hdev, 0); in hclge_update_fd_list()
5463 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode) in hclge_get_fd_mode() argument
5473 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_mode()
5475 dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret); in hclge_get_fd_mode()
5484 static int hclge_get_fd_allocation(struct hclge_dev *hdev, in hclge_get_fd_allocation() argument
5498 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_fd_allocation()
5500 dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n", in hclge_get_fd_allocation()
5513 static int hclge_set_fd_key_config(struct hclge_dev *hdev, in hclge_set_fd_key_config() argument
5524 stage = &hdev->fd_cfg.key_cfg[stage_num]; in hclge_set_fd_key_config()
5534 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_fd_key_config()
5536 dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret); in hclge_set_fd_key_config()
5541 static void hclge_fd_disable_user_def(struct hclge_dev *hdev) in hclge_fd_disable_user_def() argument
5543 struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg; in hclge_fd_disable_user_def()
5545 spin_lock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5546 memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg)); in hclge_fd_disable_user_def()
5547 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_fd_disable_user_def()
5549 hclge_fd_set_user_def_cmd(hdev, cfg); in hclge_fd_disable_user_def()
5552 static int hclge_init_fd_config(struct hclge_dev *hdev) in hclge_init_fd_config() argument
5558 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_init_fd_config()
5561 ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5565 switch (hdev->fd_cfg.fd_mode) { in hclge_init_fd_config()
5567 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH; in hclge_init_fd_config()
5570 hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2; in hclge_init_fd_config()
5573 dev_err(&hdev->pdev->dev, in hclge_init_fd_config()
5575 hdev->fd_cfg.fd_mode); in hclge_init_fd_config()
5579 key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1]; in hclge_init_fd_config()
5592 if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) { in hclge_init_fd_config()
5595 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hclge_init_fd_config()
5604 ret = hclge_get_fd_allocation(hdev, in hclge_init_fd_config()
5605 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5606 &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2], in hclge_init_fd_config()
5607 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1], in hclge_init_fd_config()
5608 &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]); in hclge_init_fd_config()
5612 return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1); in hclge_init_fd_config()
5615 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x, in hclge_fd_tcam_config() argument
5648 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_fd_tcam_config()
5650 dev_err(&hdev->pdev->dev, in hclge_fd_tcam_config()
5657 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc, in hclge_fd_ad_config() argument
5660 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); in hclge_fd_ad_config()
5696 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_fd_ad_config()
5698 dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret); in hclge_fd_ad_config()
5826 static int hclge_config_key(struct hclge_dev *hdev, u8 stage, in hclge_config_key() argument
5829 struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage]; in hclge_config_key()
5857 meta_data_region = hdev->fd_cfg.max_key_length / 8 - in hclge_config_key()
5865 ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y, in hclge_config_key()
5868 dev_err(&hdev->pdev->dev, in hclge_config_key()
5874 ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x, in hclge_config_key()
5877 dev_err(&hdev->pdev->dev, in hclge_config_key()
5883 static int hclge_config_action(struct hclge_dev *hdev, u8 stage, in hclge_config_action() argument
5886 struct hclge_vport *vport = hdev->vport; in hclge_config_action()
5906 if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) { in hclge_config_action()
5909 hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]; in hclge_config_action()
5921 return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data); in hclge_config_action()
6056 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev, in hclge_fd_check_ext_tuple() argument
6062 dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n"); in hclge_fd_check_ext_tuple()
6071 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6081 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_ext_tuple()
6083 dev_err(&hdev->pdev->dev, in hclge_fd_check_ext_tuple()
6129 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev, in hclge_fd_parse_user_def_field() argument
6134 u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active; in hclge_fd_parse_user_def_field()
6154 dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n"); in hclge_fd_parse_user_def_field()
6159 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6166 dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n"); in hclge_fd_parse_user_def_field()
6172 dev_err(&hdev->pdev->dev, in hclge_fd_parse_user_def_field()
6185 static int hclge_fd_check_spec(struct hclge_dev *hdev, in hclge_fd_check_spec() argument
6193 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_fd_check_spec()
6194 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6197 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1); in hclge_fd_check_spec()
6201 ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info); in hclge_fd_check_spec()
6228 if (hdev->fd_cfg.fd_mode != in hclge_fd_check_spec()
6230 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6239 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6246 dev_err(&hdev->pdev->dev, in hclge_fd_check_spec()
6252 return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple); in hclge_fd_check_spec()
6447 static int hclge_fd_config_rule(struct hclge_dev *hdev, in hclge_fd_config_rule() argument
6452 ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
6456 return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule); in hclge_fd_config_rule()
6459 static int hclge_add_fd_entry_common(struct hclge_dev *hdev, in hclge_add_fd_entry_common() argument
6464 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6466 if (hdev->fd_active_type != rule->rule_type && in hclge_add_fd_entry_common()
6467 (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_add_fd_entry_common()
6468 hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) { in hclge_add_fd_entry_common()
6469 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry_common()
6471 rule->rule_type, hdev->fd_active_type); in hclge_add_fd_entry_common()
6472 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6476 ret = hclge_fd_check_user_def_refcnt(hdev, rule); in hclge_add_fd_entry_common()
6480 ret = hclge_clear_arfs_rules(hdev); in hclge_add_fd_entry_common()
6484 ret = hclge_fd_config_rule(hdev, rule); in hclge_add_fd_entry_common()
6489 hdev->fd_active_type = rule->rule_type; in hclge_add_fd_entry_common()
6490 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_common()
6493 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_common()
6500 struct hclge_dev *hdev = vport->back; in hclge_is_cls_flower_active() local
6502 return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE; in hclge_is_cls_flower_active()
6505 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie, in hclge_fd_parse_ring_cookie() argument
6508 struct hclge_vport *vport = hdev->vport; in hclge_fd_parse_ring_cookie()
6520 if (vf > hdev->num_req_vfs) { in hclge_fd_parse_ring_cookie()
6521 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6523 vf - 1U, hdev->num_req_vfs); in hclge_fd_parse_ring_cookie()
6527 *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; in hclge_fd_parse_ring_cookie()
6528 tqps = hdev->vport[vf].nic.kinfo.num_tqps; in hclge_fd_parse_ring_cookie()
6531 dev_err(&hdev->pdev->dev, in hclge_fd_parse_ring_cookie()
6548 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry() local
6557 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_fd_entry()
6558 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6563 if (!hdev->fd_en) { in hclge_add_fd_entry()
6564 dev_err(&hdev->pdev->dev, in hclge_add_fd_entry()
6571 ret = hclge_fd_check_spec(hdev, fs, &unused, &info); in hclge_add_fd_entry()
6575 ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id, in hclge_add_fd_entry()
6598 ret = hclge_add_fd_entry_common(hdev, rule); in hclge_add_fd_entry()
6609 struct hclge_dev *hdev = vport->back; in hclge_del_fd_entry() local
6613 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_fd_entry()
6618 if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_del_fd_entry()
6621 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6622 if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE || in hclge_del_fd_entry()
6623 !test_bit(fs->location, hdev->fd_bmap)) { in hclge_del_fd_entry()
6624 dev_err(&hdev->pdev->dev, in hclge_del_fd_entry()
6626 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6630 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location, in hclge_del_fd_entry()
6635 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL); in hclge_del_fd_entry()
6638 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_fd_entry()
6642 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev, in hclge_clear_fd_rules_in_list() argument
6649 spin_lock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6651 for_each_set_bit(location, hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6652 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) in hclge_clear_fd_rules_in_list()
6653 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location, in hclge_clear_fd_rules_in_list()
6657 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, in hclge_clear_fd_rules_in_list()
6662 hdev->fd_active_type = HCLGE_FD_RULE_NONE; in hclge_clear_fd_rules_in_list()
6663 hdev->hclge_fd_rule_num = 0; in hclge_clear_fd_rules_in_list()
6664 bitmap_zero(hdev->fd_bmap, in hclge_clear_fd_rules_in_list()
6665 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_clear_fd_rules_in_list()
6668 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_clear_fd_rules_in_list()
6671 static void hclge_del_all_fd_entries(struct hclge_dev *hdev) in hclge_del_all_fd_entries() argument
6673 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_all_fd_entries()
6676 hclge_clear_fd_rules_in_list(hdev, true); in hclge_del_all_fd_entries()
6677 hclge_fd_disable_user_def(hdev); in hclge_del_all_fd_entries()
6683 struct hclge_dev *hdev = vport->back; in hclge_restore_fd_entries() local
6691 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_restore_fd_entries()
6695 if (!hdev->fd_en) in hclge_restore_fd_entries()
6698 spin_lock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6699 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_restore_fd_entries()
6703 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_restore_fd_entries()
6704 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_restore_fd_entries()
6713 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_cnt() local
6715 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle)) in hclge_get_fd_rule_cnt()
6718 cmd->rule_cnt = hdev->hclge_fd_rule_num; in hclge_get_fd_rule_cnt()
6719 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_fd_rule_cnt()
6892 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev, in hclge_get_fd_rule() argument
6898 hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { in hclge_get_fd_rule()
6928 struct hclge_dev *hdev = vport->back; in hclge_get_fd_rule_info() local
6931 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_fd_rule_info()
6936 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6938 rule = hclge_get_fd_rule(hdev, fs->location); in hclge_get_fd_rule_info()
6940 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6980 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_fd_rule_info()
6989 struct hclge_dev *hdev = vport->back; in hclge_get_all_rules() local
6994 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_get_all_rules()
6997 cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]; in hclge_get_all_rules()
6999 spin_lock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7001 &hdev->fd_rule_list, rule_node) { in hclge_get_all_rules()
7003 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7014 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_get_all_rules()
7046 hclge_fd_search_flow_keys(struct hclge_dev *hdev, in hclge_fd_search_flow_keys() argument
7052 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_fd_search_flow_keys()
7090 struct hclge_dev *hdev = vport->back; in hclge_add_fd_entry_by_arfs() local
7094 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_add_fd_entry_by_arfs()
7100 spin_lock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7101 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE && in hclge_add_fd_entry_by_arfs()
7102 hdev->fd_active_type != HCLGE_FD_RULE_NONE) { in hclge_add_fd_entry_by_arfs()
7103 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7114 rule = hclge_fd_search_flow_keys(hdev, &new_tuples); in hclge_add_fd_entry_by_arfs()
7116 bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM); in hclge_add_fd_entry_by_arfs()
7117 if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_add_fd_entry_by_arfs()
7118 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7124 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7132 hclge_update_fd_list(hdev, rule->state, rule->location, rule); in hclge_add_fd_entry_by_arfs()
7133 hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE; in hclge_add_fd_entry_by_arfs()
7137 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_add_fd_entry_by_arfs()
7138 hclge_task_schedule(hdev, 0); in hclge_add_fd_entry_by_arfs()
7140 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_add_fd_entry_by_arfs()
7144 static void hclge_rfs_filter_expire(struct hclge_dev *hdev) in hclge_rfs_filter_expire() argument
7147 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_rfs_filter_expire()
7151 spin_lock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7152 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) { in hclge_rfs_filter_expire()
7153 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7156 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_rfs_filter_expire()
7162 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_rfs_filter_expire()
7165 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_rfs_filter_expire()
7170 static int hclge_clear_arfs_rules(struct hclge_dev *hdev) in hclge_clear_arfs_rules() argument
7177 if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) in hclge_clear_arfs_rules()
7180 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_clear_arfs_rules()
7184 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_clear_arfs_rules()
7190 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_clear_arfs_rules()
7198 hclge_sync_fd_state(hdev); in hclge_clear_arfs_rules()
7326 static int hclge_parse_cls_flower(struct hclge_dev *hdev, in hclge_parse_cls_flower() argument
7343 dev_err(&hdev->pdev->dev, "unsupported key set: %#llx\n", in hclge_parse_cls_flower()
7361 static int hclge_check_cls_flower(struct hclge_dev *hdev, in hclge_check_cls_flower() argument
7366 if (tc < 0 || tc > hdev->tc_max) { in hclge_check_cls_flower()
7367 dev_err(&hdev->pdev->dev, "invalid traffic class\n"); in hclge_check_cls_flower()
7372 prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) { in hclge_check_cls_flower()
7373 dev_err(&hdev->pdev->dev, in hclge_check_cls_flower()
7375 prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]); in hclge_check_cls_flower()
7379 if (test_bit(prio - 1, hdev->fd_bmap)) { in hclge_check_cls_flower()
7380 dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio); in hclge_check_cls_flower()
7391 struct hclge_dev *hdev = vport->back; in hclge_add_cls_flower() local
7395 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) { in hclge_add_cls_flower()
7396 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7401 ret = hclge_check_cls_flower(hdev, cls_flower, tc); in hclge_add_cls_flower()
7403 dev_err(&hdev->pdev->dev, in hclge_add_cls_flower()
7412 ret = hclge_parse_cls_flower(hdev, cls_flower, rule); in hclge_add_cls_flower()
7425 ret = hclge_add_fd_entry_common(hdev, rule); in hclge_add_cls_flower()
7432 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev, in hclge_find_cls_flower() argument
7438 hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { in hclge_find_cls_flower()
7450 struct hclge_dev *hdev = vport->back; in hclge_del_cls_flower() local
7454 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_del_cls_flower()
7457 spin_lock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7459 rule = hclge_find_cls_flower(hdev, cls_flower->cookie); in hclge_del_cls_flower()
7461 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7465 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location, in hclge_del_cls_flower()
7472 hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL); in hclge_del_cls_flower()
7473 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_del_cls_flower()
7474 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7478 hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL); in hclge_del_cls_flower()
7479 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_del_cls_flower()
7484 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist) in hclge_sync_fd_list() argument
7490 if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state)) in hclge_sync_fd_list()
7493 spin_lock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7498 ret = hclge_fd_config_rule(hdev, rule); in hclge_sync_fd_list()
7504 ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, in hclge_sync_fd_list()
7508 hclge_fd_dec_rule_cnt(hdev, rule->location); in hclge_sync_fd_list()
7509 hclge_fd_free_node(hdev, rule); in hclge_sync_fd_list()
7518 set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state); in hclge_sync_fd_list()
7520 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_sync_fd_list()
7523 static void hclge_sync_fd_table(struct hclge_dev *hdev) in hclge_sync_fd_table() argument
7525 if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) in hclge_sync_fd_table()
7528 if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) { in hclge_sync_fd_table()
7529 bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE; in hclge_sync_fd_table()
7531 hclge_clear_fd_rules_in_list(hdev, clear_list); in hclge_sync_fd_table()
7534 hclge_sync_fd_user_def_cfg(hdev, false); in hclge_sync_fd_table()
7536 hclge_sync_fd_list(hdev, &hdev->fd_rule_list); in hclge_sync_fd_table()
7542 struct hclge_dev *hdev = vport->back; in hclge_get_hw_reset_stat() local
7544 return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || in hclge_get_hw_reset_stat()
7545 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); in hclge_get_hw_reset_stat()
7551 struct hclge_dev *hdev = vport->back; in hclge_get_cmdq_stat() local
7553 return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_get_cmdq_stat()
7559 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_resetting() local
7561 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_ae_dev_resetting()
7567 struct hclge_dev *hdev = vport->back; in hclge_ae_dev_reset_cnt() local
7569 return hdev->rst_stats.hw_reset_done_cnt; in hclge_ae_dev_reset_cnt()
7575 struct hclge_dev *hdev = vport->back; in hclge_enable_fd() local
7577 hdev->fd_en = enable; in hclge_enable_fd()
7580 set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state); in hclge_enable_fd()
7584 hclge_task_schedule(hdev, 0); in hclge_enable_fd()
7587 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) in hclge_cfg_mac_mode() argument
7614 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_mac_mode()
7616 dev_err(&hdev->pdev->dev, in hclge_cfg_mac_mode()
7622 hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, in hclge_cfg_mac_mode()
7626 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, in hclge_config_switch_param() argument
7643 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7645 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7655 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_config_switch_param()
7657 dev_err(&hdev->pdev->dev, in hclge_config_switch_param()
7662 static void hclge_phy_link_status_wait(struct hclge_dev *hdev, in hclge_phy_link_status_wait() argument
7667 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_phy_link_status_wait()
7674 dev_err(&hdev->pdev->dev, in hclge_phy_link_status_wait()
7686 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, in hclge_mac_link_status_wait() argument
7694 ret = hclge_get_mac_link_status(hdev, &link_status); in hclge_mac_link_status_wait()
7705 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, in hclge_mac_phy_link_status_wait() argument
7715 hclge_phy_link_status_wait(hdev, link_ret); in hclge_mac_phy_link_status_wait()
7717 return hclge_mac_link_status_wait(hdev, link_ret, in hclge_mac_phy_link_status_wait()
7721 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) in hclge_set_app_loopback() argument
7731 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7733 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7748 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_app_loopback()
7750 dev_err(&hdev->pdev->dev, in hclge_set_app_loopback()
7755 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en, in hclge_cfg_common_loopback_cmd_send() argument
7777 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7786 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_cmd_send()
7788 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_cmd_send()
7795 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev) in hclge_cfg_common_loopback_wait() argument
7811 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_cfg_common_loopback_wait()
7813 dev_err(&hdev->pdev->dev, in hclge_cfg_common_loopback_wait()
7822 dev_err(&hdev->pdev->dev, "wait loopback timeout\n"); in hclge_cfg_common_loopback_wait()
7825 dev_err(&hdev->pdev->dev, "failed to do loopback test\n"); in hclge_cfg_common_loopback_wait()
7832 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en, in hclge_cfg_common_loopback() argument
7837 ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode); in hclge_cfg_common_loopback()
7841 return hclge_cfg_common_loopback_wait(hdev); in hclge_cfg_common_loopback()
7844 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en, in hclge_set_common_loopback() argument
7849 ret = hclge_cfg_common_loopback(hdev, en, loop_mode); in hclge_set_common_loopback()
7853 hclge_cfg_mac_mode(hdev, en); in hclge_set_common_loopback()
7855 ret = hclge_mac_phy_link_status_wait(hdev, en, false); in hclge_set_common_loopback()
7857 dev_err(&hdev->pdev->dev, in hclge_set_common_loopback()
7863 static int hclge_enable_phy_loopback(struct hclge_dev *hdev, in hclge_enable_phy_loopback() argument
7881 static int hclge_disable_phy_loopback(struct hclge_dev *hdev, in hclge_disable_phy_loopback() argument
7893 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en) in hclge_set_phy_loopback() argument
7895 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_phy_loopback()
7899 if (hnae3_dev_phy_imp_supported(hdev)) in hclge_set_phy_loopback()
7900 return hclge_set_common_loopback(hdev, en, in hclge_set_phy_loopback()
7906 ret = hclge_enable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
7908 ret = hclge_disable_phy_loopback(hdev, phydev); in hclge_set_phy_loopback()
7910 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7915 hclge_cfg_mac_mode(hdev, en); in hclge_set_phy_loopback()
7917 ret = hclge_mac_phy_link_status_wait(hdev, en, true); in hclge_set_phy_loopback()
7919 dev_err(&hdev->pdev->dev, in hclge_set_phy_loopback()
7925 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id, in hclge_tqp_enable_cmd_send() argument
7938 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_tqp_enable_cmd_send()
7944 struct hclge_dev *hdev = vport->back; in hclge_tqp_enable() local
7949 ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable); in hclge_tqp_enable()
7960 struct hclge_dev *hdev = vport->back; in hclge_set_loopback() local
7968 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { in hclge_set_loopback()
7971 ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param, in hclge_set_loopback()
7979 ret = hclge_set_app_loopback(hdev, en); in hclge_set_loopback()
7983 ret = hclge_set_common_loopback(hdev, en, loop_mode); in hclge_set_loopback()
7986 ret = hclge_set_phy_loopback(hdev, en); in hclge_set_loopback()
7992 dev_err(&hdev->pdev->dev, in hclge_set_loopback()
8002 dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n", in hclge_set_loopback()
8008 static int hclge_set_default_loopback(struct hclge_dev *hdev) in hclge_set_default_loopback() argument
8012 ret = hclge_set_app_loopback(hdev, false); in hclge_set_default_loopback()
8016 ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES); in hclge_set_default_loopback()
8020 return hclge_cfg_common_loopback(hdev, false, in hclge_set_default_loopback()
8024 static void hclge_flush_link_update(struct hclge_dev *hdev) in hclge_flush_link_update() argument
8028 unsigned long last = hdev->serv_processed_cnt; in hclge_flush_link_update()
8031 while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) && in hclge_flush_link_update()
8033 last == hdev->serv_processed_cnt) in hclge_flush_link_update()
8040 struct hclge_dev *hdev = vport->back; in hclge_set_timer_task() local
8043 hclge_task_schedule(hdev, 0); in hclge_set_timer_task()
8046 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_set_timer_task()
8049 hclge_flush_link_update(hdev); in hclge_set_timer_task()
8056 struct hclge_dev *hdev = vport->back; in hclge_ae_start() local
8059 hclge_cfg_mac_mode(hdev, true); in hclge_ae_start()
8060 clear_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_start()
8061 hdev->hw.mac.link = 0; in hclge_ae_start()
8066 hclge_mac_start_phy(hdev); in hclge_ae_start()
8074 struct hclge_dev *hdev = vport->back; in hclge_ae_stop() local
8076 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_ae_stop()
8077 spin_lock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8078 hclge_clear_arfs_rules(hdev); in hclge_ae_stop()
8079 spin_unlock_bh(&hdev->fd_rule_lock); in hclge_ae_stop()
8084 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { in hclge_ae_stop()
8085 hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE, in hclge_ae_stop()
8087 if (hdev->reset_type != HNAE3_FUNC_RESET && in hclge_ae_stop()
8088 hdev->reset_type != HNAE3_FLR_RESET) { in hclge_ae_stop()
8089 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
8090 hclge_update_link_status(hdev); in hclge_ae_stop()
8097 hclge_config_mac_tnl_int(hdev, false); in hclge_ae_stop()
8100 hclge_cfg_mac_mode(hdev, false); in hclge_ae_stop()
8102 hclge_mac_stop_phy(hdev); in hclge_ae_stop()
8106 hclge_update_link_status(hdev); in hclge_ae_stop()
8111 struct hclge_dev *hdev = vport->back; in hclge_vport_start() local
8119 if (test_bit(vport->vport_id, hdev->vport_config_block)) { in hclge_vport_start()
8124 hclge_restore_hw_table(hdev); in hclge_vport_start()
8128 clear_bit(vport->vport_id, hdev->vport_config_block); in hclge_vport_start()
8158 struct hclge_dev *hdev = vport->back; in hclge_get_mac_vlan_cmd_status() local
8161 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8174 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8182 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8187 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8195 dev_dbg(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8200 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8206 dev_err(&hdev->pdev->dev, in hclge_get_mac_vlan_cmd_status()
8276 struct hclge_dev *hdev = vport->back; in hclge_remove_mac_vlan_tbl() local
8286 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_remove_mac_vlan_tbl()
8288 dev_err(&hdev->pdev->dev, in hclge_remove_mac_vlan_tbl()
8305 struct hclge_dev *hdev = vport->back; in hclge_lookup_mac_vlan_tbl() local
8323 ret = hclge_cmd_send(&hdev->hw, desc, 3); in hclge_lookup_mac_vlan_tbl()
8328 ret = hclge_cmd_send(&hdev->hw, desc, 1); in hclge_lookup_mac_vlan_tbl()
8331 dev_err(&hdev->pdev->dev, in hclge_lookup_mac_vlan_tbl()
8347 struct hclge_dev *hdev = vport->back; in hclge_add_mac_vlan_tbl() local
8361 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mac_vlan_tbl()
8377 ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); in hclge_add_mac_vlan_tbl()
8387 dev_err(&hdev->pdev->dev, in hclge_add_mac_vlan_tbl()
8396 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, in hclge_set_umv_space() argument
8408 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_umv_space()
8410 dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n", in hclge_set_umv_space()
8420 static int hclge_init_umv_space(struct hclge_dev *hdev) in hclge_init_umv_space() argument
8425 ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size); in hclge_init_umv_space()
8429 if (allocated_size < hdev->wanted_umv_size) in hclge_init_umv_space()
8430 dev_warn(&hdev->pdev->dev, in hclge_init_umv_space()
8432 hdev->wanted_umv_size, allocated_size); in hclge_init_umv_space()
8434 hdev->max_umv_size = allocated_size; in hclge_init_umv_space()
8435 hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8436 hdev->share_umv_size = hdev->priv_umv_size + in hclge_init_umv_space()
8437 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_init_umv_space()
8439 if (hdev->ae_dev->dev_specs.mc_mac_size) in hclge_init_umv_space()
8440 set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps); in hclge_init_umv_space()
8445 static void hclge_reset_umv_space(struct hclge_dev *hdev) in hclge_reset_umv_space() argument
8450 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_umv_space()
8451 vport = &hdev->vport[i]; in hclge_reset_umv_space()
8455 mutex_lock(&hdev->vport_lock); in hclge_reset_umv_space()
8456 hdev->share_umv_size = hdev->priv_umv_size + in hclge_reset_umv_space()
8457 hdev->max_umv_size % (hdev->num_alloc_vport + 1); in hclge_reset_umv_space()
8458 mutex_unlock(&hdev->vport_lock); in hclge_reset_umv_space()
8460 hdev->used_mc_mac_num = 0; in hclge_reset_umv_space()
8465 struct hclge_dev *hdev = vport->back; in hclge_is_umv_space_full() local
8469 mutex_lock(&hdev->vport_lock); in hclge_is_umv_space_full()
8471 is_full = (vport->used_umv_num >= hdev->priv_umv_size && in hclge_is_umv_space_full()
8472 hdev->share_umv_size == 0); in hclge_is_umv_space_full()
8475 mutex_unlock(&hdev->vport_lock); in hclge_is_umv_space_full()
8482 struct hclge_dev *hdev = vport->back; in hclge_update_umv_space() local
8485 if (vport->used_umv_num > hdev->priv_umv_size) in hclge_update_umv_space()
8486 hdev->share_umv_size++; in hclge_update_umv_space()
8491 if (vport->used_umv_num >= hdev->priv_umv_size && in hclge_update_umv_space()
8492 hdev->share_umv_size > 0) in hclge_update_umv_space()
8493 hdev->share_umv_size--; in hclge_update_umv_space()
8545 struct hclge_dev *hdev = vport->back; in hclge_update_mac_list() local
8570 dev_err(&hdev->pdev->dev, in hclge_update_mac_list()
8606 struct hclge_dev *hdev = vport->back; in hclge_add_uc_addr_common() local
8617 dev_err(&hdev->pdev->dev, in hclge_add_uc_addr_common()
8640 mutex_lock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8645 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8648 mutex_unlock(&hdev->vport_lock); in hclge_add_uc_addr_common()
8651 dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n", in hclge_add_uc_addr_common()
8652 hdev->priv_umv_size); in hclge_add_uc_addr_common()
8677 struct hclge_dev *hdev = vport->back; in hclge_rm_uc_addr_common() local
8686 dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n", in hclge_rm_uc_addr_common()
8696 mutex_lock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8698 mutex_unlock(&hdev->vport_lock); in hclge_rm_uc_addr_common()
8718 struct hclge_dev *hdev = vport->back; in hclge_add_mc_addr_common() local
8727 dev_err(&hdev->pdev->dev, in hclge_add_mc_addr_common()
8736 if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) && in hclge_add_mc_addr_common()
8737 hdev->used_mc_mac_num >= in hclge_add_mc_addr_common()
8738 hdev->ae_dev->dev_specs.mc_mac_size) in hclge_add_mc_addr_common()
8755 hdev->used_mc_mac_num++; in hclge_add_mc_addr_common()
8763 dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n"); in hclge_add_mc_addr_common()
8782 struct hclge_dev *hdev = vport->back; in hclge_rm_mc_addr_common() local
8790 dev_dbg(&hdev->pdev->dev, in hclge_rm_mc_addr_common()
8809 hdev->used_mc_mac_num--; in hclge_rm_mc_addr_common()
9018 struct hclge_dev *hdev = vport->back; in hclge_need_sync_mac_table() local
9020 if (test_bit(vport->vport_id, hdev->vport_config_block)) in hclge_need_sync_mac_table()
9029 static void hclge_sync_mac_table(struct hclge_dev *hdev) in hclge_sync_mac_table() argument
9033 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_mac_table()
9034 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_mac_table()
9099 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_mac_table() local
9113 set_bit(vport->vport_id, hdev->vport_config_block); in hclge_rm_vport_all_mac_table()
9135 struct hclge_dev *hdev = vport->back; in hclge_uninit_vport_mac_list() local
9163 dev_warn(&hdev->pdev->dev, in hclge_uninit_vport_mac_list()
9174 static void hclge_uninit_mac_table(struct hclge_dev *hdev) in hclge_uninit_mac_table() argument
9179 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_mac_table()
9180 vport = &hdev->vport[i]; in hclge_uninit_mac_table()
9186 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev, in hclge_get_mac_ethertype_cmd_status() argument
9197 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9209 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9214 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9219 dev_err(&hdev->pdev->dev, in hclge_get_mac_ethertype_cmd_status()
9233 struct hclge_dev *hdev = vport->back; in hclge_set_vf_mac() local
9235 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_mac()
9241 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9254 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9261 dev_info(&hdev->pdev->dev, in hclge_set_vf_mac()
9267 static int hclge_add_mgr_tbl(struct hclge_dev *hdev, in hclge_add_mgr_tbl() argument
9278 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_add_mgr_tbl()
9280 dev_err(&hdev->pdev->dev, in hclge_add_mgr_tbl()
9289 return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code); in hclge_add_mgr_tbl()
9292 static int init_mgr_tbl(struct hclge_dev *hdev) in init_mgr_tbl() argument
9298 ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]); in init_mgr_tbl()
9300 dev_err(&hdev->pdev->dev, in init_mgr_tbl()
9313 struct hclge_dev *hdev = vport->back; in hclge_get_mac_addr() local
9315 ether_addr_copy(p, hdev->hw.mac.mac_addr); in hclge_get_mac_addr()
9368 struct hclge_dev *hdev = vport->back; in hclge_set_mac_addr() local
9377 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9383 ret = hclge_pause_addr_cfg(hdev, new_addr); in hclge_set_mac_addr()
9385 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9392 old_addr = hdev->hw.mac.mac_addr; in hclge_set_mac_addr()
9398 dev_err(&hdev->pdev->dev, in hclge_set_mac_addr()
9404 hclge_pause_addr_cfg(hdev, old_addr); in hclge_set_mac_addr()
9411 ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); in hclge_set_mac_addr()
9414 hclge_task_schedule(hdev, 0); in hclge_set_mac_addr()
9419 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) in hclge_mii_ioctl() argument
9423 if (!hnae3_dev_phy_imp_supported(hdev)) in hclge_mii_ioctl()
9428 data->phy_id = hdev->hw.mac.phy_addr; in hclge_mii_ioctl()
9432 data->val_out = hclge_read_phy_reg(hdev, data->reg_num); in hclge_mii_ioctl()
9436 return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); in hclge_mii_ioctl()
9446 struct hclge_dev *hdev = vport->back; in hclge_do_ioctl() local
9450 return hclge_ptp_get_cfg(hdev, ifr); in hclge_do_ioctl()
9452 return hclge_ptp_set_cfg(hdev, ifr); in hclge_do_ioctl()
9454 if (!hdev->hw.mac.phydev) in hclge_do_ioctl()
9455 return hclge_mii_ioctl(hdev, ifr, cmd); in hclge_do_ioctl()
9458 return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd); in hclge_do_ioctl()
9461 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id, in hclge_set_port_vlan_filter_bypass() argument
9474 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter_bypass()
9476 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter_bypass()
9483 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, in hclge_set_vlan_filter_ctrl() argument
9496 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9498 dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9508 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_filter_ctrl()
9510 dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n", in hclge_set_vlan_filter_ctrl()
9518 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_filter() local
9519 struct hnae3_ae_dev *ae_dev = hdev->ae_dev; in hclge_set_vport_vlan_filter()
9522 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vport_vlan_filter()
9523 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vport_vlan_filter()
9527 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vport_vlan_filter()
9534 ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, in hclge_set_vport_vlan_filter()
9540 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_set_vport_vlan_filter()
9552 struct hclge_dev *hdev = vport->back; in hclge_need_enable_vport_vlan_filter() local
9569 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_need_enable_vport_vlan_filter()
9600 struct hclge_dev *hdev = vport->back; in hclge_enable_vport_vlan_filter() local
9603 mutex_lock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9606 mutex_unlock(&hdev->vport_lock); in hclge_enable_vport_vlan_filter()
9618 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_filter_cmd() argument
9649 ret = hclge_cmd_send(&hdev->hw, desc, 2); in hclge_set_vf_vlan_filter_cmd()
9651 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter_cmd()
9660 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid, in hclge_check_vf_vlan_cmd_status() argument
9673 set_bit(vfid, hdev->vf_vlan_full); in hclge_check_vf_vlan_cmd_status()
9674 dev_warn(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9679 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9695 dev_err(&hdev->pdev->dev, in hclge_check_vf_vlan_cmd_status()
9703 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid, in hclge_set_vf_vlan_common() argument
9706 struct hclge_vport *vport = &hdev->vport[vfid]; in hclge_set_vf_vlan_common()
9715 if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) { in hclge_set_vf_vlan_common()
9717 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_common()
9724 ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc); in hclge_set_vf_vlan_common()
9728 return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc); in hclge_set_vf_vlan_common()
9731 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto, in hclge_set_port_vlan_filter() argument
9753 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_port_vlan_filter()
9755 dev_err(&hdev->pdev->dev, in hclge_set_port_vlan_filter()
9760 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id, in hclge_need_update_port_vlan() argument
9765 test_bit(vport_id, hdev->vlan_table[vlan_id])) in hclge_need_update_port_vlan()
9768 if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9769 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9776 !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) { in hclge_need_update_port_vlan()
9777 dev_warn(&hdev->pdev->dev, in hclge_need_update_port_vlan()
9786 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, in hclge_set_vlan_filter_hw() argument
9799 ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id); in hclge_set_vlan_filter_hw()
9801 dev_err(&hdev->pdev->dev, in hclge_set_vlan_filter_hw()
9807 if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill)) in hclge_set_vlan_filter_hw()
9810 for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) in hclge_set_vlan_filter_hw()
9814 ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id, in hclge_set_vlan_filter_hw()
9824 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_tx_offload_cfg() local
9856 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_tx_offload_cfg()
9858 dev_err(&hdev->pdev->dev, in hclge_set_vlan_tx_offload_cfg()
9869 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_rx_offload_cfg() local
9896 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_rx_offload_cfg()
9898 dev_err(&hdev->pdev->dev, in hclge_set_vlan_rx_offload_cfg()
9960 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) in hclge_set_vlan_protocol_type() argument
9970 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type); in hclge_set_vlan_protocol_type()
9972 cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type); in hclge_set_vlan_protocol_type()
9974 cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type); in hclge_set_vlan_protocol_type()
9976 cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type); in hclge_set_vlan_protocol_type()
9978 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9980 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
9989 tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type); in hclge_set_vlan_protocol_type()
9990 tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type); in hclge_set_vlan_protocol_type()
9992 status = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_vlan_protocol_type()
9994 dev_err(&hdev->pdev->dev, in hclge_set_vlan_protocol_type()
10001 static int hclge_init_vlan_filter(struct hclge_dev *hdev) in hclge_init_vlan_filter() argument
10008 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_init_vlan_filter()
10009 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_filter()
10014 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vlan_filter()
10015 vport = &hdev->vport[i]; in hclge_init_vlan_filter()
10016 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_init_vlan_filter()
10024 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) && in hclge_init_vlan_filter()
10025 !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps)) in hclge_init_vlan_filter()
10028 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, in hclge_init_vlan_filter()
10032 static int hclge_init_vlan_type(struct hclge_dev *hdev) in hclge_init_vlan_type() argument
10034 hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10035 hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10036 hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10037 hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10038 hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10039 hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q; in hclge_init_vlan_type()
10041 return hclge_set_vlan_protocol_type(hdev); in hclge_init_vlan_type()
10044 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev) in hclge_init_vport_vlan_offload() argument
10051 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_init_vport_vlan_offload()
10052 vport = &hdev->vport[i]; in hclge_init_vport_vlan_offload()
10064 static int hclge_init_vlan_config(struct hclge_dev *hdev) in hclge_init_vlan_config() argument
10066 struct hnae3_handle *handle = &hdev->vport[0].nic; in hclge_init_vlan_config()
10069 ret = hclge_init_vlan_filter(hdev); in hclge_init_vlan_config()
10073 ret = hclge_init_vlan_type(hdev); in hclge_init_vlan_config()
10077 ret = hclge_init_vport_vlan_offload(hdev); in hclge_init_vlan_config()
10088 struct hclge_dev *hdev = vport->back; in hclge_add_vport_vlan_table() local
10090 mutex_lock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10094 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10101 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10109 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_vlan_table()
10115 struct hclge_dev *hdev = vport->back; in hclge_add_vport_all_vlan_table() local
10118 mutex_lock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10122 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_add_vport_all_vlan_table()
10126 dev_err(&hdev->pdev->dev, in hclge_add_vport_all_vlan_table()
10130 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10137 mutex_unlock(&hdev->vport_lock); in hclge_add_vport_all_vlan_table()
10146 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_vlan_table() local
10151 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_vlan_table()
10167 struct hclge_dev *hdev = vport->back; in hclge_rm_vport_all_vlan_table() local
10169 mutex_lock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10173 hclge_set_vlan_filter_hw(hdev, in hclge_rm_vport_all_vlan_table()
10185 clear_bit(vport->vport_id, hdev->vf_vlan_full); in hclge_rm_vport_all_vlan_table()
10186 mutex_unlock(&hdev->vport_lock); in hclge_rm_vport_all_vlan_table()
10189 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev) in hclge_uninit_vport_vlan_table() argument
10195 mutex_lock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10197 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_uninit_vport_vlan_table()
10198 vport = &hdev->vport[i]; in hclge_uninit_vport_vlan_table()
10205 mutex_unlock(&hdev->vport_lock); in hclge_uninit_vport_vlan_table()
10208 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev) in hclge_restore_vport_port_base_vlan_config() argument
10219 for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) { in hclge_restore_vport_port_base_vlan_config()
10220 vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM]; in hclge_restore_vport_port_base_vlan_config()
10230 clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]); in hclge_restore_vport_port_base_vlan_config()
10231 ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto), in hclge_restore_vport_port_base_vlan_config()
10242 struct hclge_dev *hdev = vport->back; in hclge_restore_vport_vlan_table() local
10245 mutex_lock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10249 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_restore_vport_vlan_table()
10258 mutex_unlock(&hdev->vport_lock); in hclge_restore_vport_vlan_table()
10292 static void hclge_restore_hw_table(struct hclge_dev *hdev) in hclge_restore_hw_table() argument
10294 struct hclge_vport *vport = &hdev->vport[0]; in hclge_restore_hw_table()
10298 hclge_restore_vport_port_base_vlan_config(hdev); in hclge_restore_hw_table()
10300 set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state); in hclge_restore_hw_table()
10328 struct hclge_dev *hdev = vport->back; in hclge_set_vport_vlan_fltr_change() local
10330 if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps)) in hclge_set_vport_vlan_fltr_change()
10339 struct hclge_dev *hdev = vport->back; in hclge_update_vlan_filter_entries() local
10345 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0); in hclge_update_vlan_filter_entries()
10348 return hclge_set_vlan_filter_hw(hdev, in hclge_update_vlan_filter_entries()
10358 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0); in hclge_update_vlan_filter_entries()
10362 ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), in hclge_update_vlan_filter_entries()
10387 struct hclge_dev *hdev = vport->back; in hclge_modify_port_base_vlan_tag() local
10391 ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto), in hclge_modify_port_base_vlan_tag()
10400 ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, in hclge_modify_port_base_vlan_tag()
10403 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_modify_port_base_vlan_tag()
10407 dev_err(&hdev->pdev->dev, in hclge_modify_port_base_vlan_tag()
10481 struct hclge_dev *hdev = vport->back; in hclge_set_vf_vlan_filter() local
10486 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_vlan_filter()
10489 vport = hclge_get_vf_vport(hdev, vfid); in hclge_set_vf_vlan_filter()
10511 dev_err(&hdev->pdev->dev, in hclge_set_vf_vlan_filter()
10525 (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0], in hclge_set_vf_vlan_filter()
10536 static void hclge_clear_vf_vlan(struct hclge_dev *hdev) in hclge_clear_vf_vlan() argument
10544 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_clear_vf_vlan()
10545 vport = &hdev->vport[vf]; in hclge_clear_vf_vlan()
10548 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_clear_vf_vlan()
10552 dev_err(&hdev->pdev->dev, in hclge_clear_vf_vlan()
10562 struct hclge_dev *hdev = vport->back; in hclge_set_vlan_filter() local
10570 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10571 if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_set_vlan_filter()
10572 test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) { in hclge_set_vlan_filter()
10574 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10579 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10588 ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, in hclge_set_vlan_filter()
10598 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10600 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10607 mutex_lock(&hdev->vport_lock); in hclge_set_vlan_filter()
10609 mutex_unlock(&hdev->vport_lock); in hclge_set_vlan_filter()
10617 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) in hclge_sync_vlan_fltr_state() argument
10623 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_fltr_state()
10624 vport = &hdev->vport[i]; in hclge_sync_vlan_fltr_state()
10629 mutex_lock(&hdev->vport_lock); in hclge_sync_vlan_fltr_state()
10633 dev_err(&hdev->pdev->dev, in hclge_sync_vlan_fltr_state()
10638 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_fltr_state()
10641 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_fltr_state()
10645 static void hclge_sync_vlan_filter(struct hclge_dev *hdev) in hclge_sync_vlan_filter() argument
10652 mutex_lock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10654 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_vlan_filter()
10655 struct hclge_vport *vport = &hdev->vport[i]; in hclge_sync_vlan_filter()
10660 ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), in hclge_sync_vlan_filter()
10664 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10674 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10682 mutex_unlock(&hdev->vport_lock); in hclge_sync_vlan_filter()
10684 hclge_sync_vlan_fltr_state(hdev); in hclge_sync_vlan_filter()
10687 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) in hclge_set_mac_mtu() argument
10698 return hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_mac_mtu()
10710 struct hclge_dev *hdev = vport->back; in hclge_set_vport_mtu() local
10716 max_frm_size > hdev->ae_dev->dev_specs.max_frm_size) in hclge_set_vport_mtu()
10720 mutex_lock(&hdev->vport_lock); in hclge_set_vport_mtu()
10722 if (vport->vport_id && (u32)max_frm_size > hdev->mps) { in hclge_set_vport_mtu()
10723 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10727 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10732 for (i = 1; i < hdev->num_alloc_vport; i++) in hclge_set_vport_mtu()
10733 if ((u32)max_frm_size < hdev->vport[i].mps) { in hclge_set_vport_mtu()
10734 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10736 i, hdev->vport[i].mps); in hclge_set_vport_mtu()
10737 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10741 hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); in hclge_set_vport_mtu()
10743 ret = hclge_set_mac_mtu(hdev, max_frm_size); in hclge_set_vport_mtu()
10745 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10750 hdev->mps = max_frm_size; in hclge_set_vport_mtu()
10753 ret = hclge_buffer_alloc(hdev); in hclge_set_vport_mtu()
10755 dev_err(&hdev->pdev->dev, in hclge_set_vport_mtu()
10759 hclge_notify_client(hdev, HNAE3_UP_CLIENT); in hclge_set_vport_mtu()
10760 mutex_unlock(&hdev->vport_lock); in hclge_set_vport_mtu()
10764 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id, in hclge_reset_tqp_cmd_send() argument
10778 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_tqp_cmd_send()
10780 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd_send()
10788 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id, in hclge_get_reset_status() argument
10800 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_reset_status()
10802 dev_err(&hdev->pdev->dev, in hclge_get_reset_status()
10826 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp_cmd() local
10835 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true); in hclge_reset_tqp_cmd()
10837 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10844 ret = hclge_get_reset_status(hdev, queue_gid, in hclge_reset_tqp_cmd()
10857 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10862 ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false); in hclge_reset_tqp_cmd()
10864 dev_err(&hdev->pdev->dev, in hclge_reset_tqp_cmd()
10880 struct hclge_dev *hdev = vport->back; in hclge_reset_rcb() local
10895 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_reset_rcb()
10897 dev_err(&hdev->pdev->dev, in hclge_reset_rcb()
10907 dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n", in hclge_reset_rcb()
10921 struct hclge_dev *hdev = vport->back; in hclge_reset_tqp() local
10928 dev_err(&hdev->pdev->dev, in hclge_reset_tqp()
10940 struct hclge_dev *hdev = vport->back; in hclge_get_fw_version() local
10942 return hdev->fw_version; in hclge_get_fw_version()
10945 int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version) in hclge_query_scc_version() argument
10954 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_query_scc_version()
10963 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_set_flowctrl_adv() argument
10965 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_flowctrl_adv()
10973 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) in hclge_cfg_pauseparam() argument
10977 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) in hclge_cfg_pauseparam()
10980 ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); in hclge_cfg_pauseparam()
10982 dev_err(&hdev->pdev->dev, in hclge_cfg_pauseparam()
10988 int hclge_cfg_flowctrl(struct hclge_dev *hdev) in hclge_cfg_flowctrl() argument
10990 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_cfg_flowctrl()
11000 return hclge_mac_pause_setup_hw(hdev); in hclge_cfg_flowctrl()
11020 return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause); in hclge_cfg_flowctrl()
11027 struct hclge_dev *hdev = vport->back; in hclge_get_pauseparam() local
11028 u8 media_type = hdev->hw.mac.media_type; in hclge_get_pauseparam()
11033 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_get_pauseparam()
11039 if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { in hclge_get_pauseparam()
11042 } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { in hclge_get_pauseparam()
11045 } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { in hclge_get_pauseparam()
11054 static void hclge_record_user_pauseparam(struct hclge_dev *hdev, in hclge_record_user_pauseparam() argument
11058 hdev->fc_mode_last_time = HCLGE_FC_FULL; in hclge_record_user_pauseparam()
11060 hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE; in hclge_record_user_pauseparam()
11062 hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE; in hclge_record_user_pauseparam()
11064 hdev->fc_mode_last_time = HCLGE_FC_NONE; in hclge_record_user_pauseparam()
11066 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; in hclge_record_user_pauseparam()
11073 struct hclge_dev *hdev = vport->back; in hclge_set_pauseparam() local
11074 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_set_pauseparam()
11077 if (phydev || hnae3_dev_phy_imp_supported(hdev)) { in hclge_set_pauseparam()
11080 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
11086 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { in hclge_set_pauseparam()
11087 dev_info(&hdev->pdev->dev, in hclge_set_pauseparam()
11092 hclge_set_flowctrl_adv(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11094 hclge_record_user_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11096 if (!auto_neg || hnae3_dev_phy_imp_supported(hdev)) in hclge_set_pauseparam()
11097 return hclge_cfg_pauseparam(hdev, rx_en, tx_en); in hclge_set_pauseparam()
11109 struct hclge_dev *hdev = vport->back; in hclge_get_ksettings_an_result() local
11112 *speed = hdev->hw.mac.speed; in hclge_get_ksettings_an_result()
11114 *duplex = hdev->hw.mac.duplex; in hclge_get_ksettings_an_result()
11116 *auto_neg = hdev->hw.mac.autoneg; in hclge_get_ksettings_an_result()
11118 *lane_num = hdev->hw.mac.lane_num; in hclge_get_ksettings_an_result()
11125 struct hclge_dev *hdev = vport->back; in hclge_get_media_type() local
11131 hclge_update_port_info(hdev); in hclge_get_media_type()
11134 *media_type = hdev->hw.mac.media_type; in hclge_get_media_type()
11137 *module_type = hdev->hw.mac.module_type; in hclge_get_media_type()
11144 struct hclge_dev *hdev = vport->back; in hclge_get_mdix_mode() local
11145 struct phy_device *phydev = hdev->hw.mac.phydev; in hclge_get_mdix_mode()
11190 static void hclge_info_show(struct hclge_dev *hdev) in hclge_info_show() argument
11192 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_info_show()
11193 struct device *dev = &hdev->pdev->dev; in hclge_info_show()
11197 dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps); in hclge_info_show()
11198 dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc); in hclge_info_show()
11199 dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc); in hclge_info_show()
11200 dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport); in hclge_info_show()
11201 dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs); in hclge_info_show()
11202 dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map); in hclge_info_show()
11203 dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size); in hclge_info_show()
11204 dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size); in hclge_info_show()
11205 dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size); in hclge_info_show()
11207 hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); in hclge_info_show()
11213 hdev->tx_spare_buf_size); in hclge_info_show()
11222 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_nic_client_instance() local
11223 u32 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_nic_client_instance()
11230 set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11231 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_nic_client_instance()
11232 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_nic_client_instance()
11238 ret = hclge_config_nic_hw_error(hdev, true); in hclge_init_nic_client_instance()
11247 if (netif_msg_drv(&hdev->vport->nic)) in hclge_init_nic_client_instance()
11248 hclge_info_show(hdev); in hclge_init_nic_client_instance()
11253 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_init_nic_client_instance()
11254 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_nic_client_instance()
11265 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_roce_client_instance() local
11270 if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client || in hclge_init_roce_client_instance()
11271 !hdev->nic_client) in hclge_init_roce_client_instance()
11274 client = hdev->roce_client; in hclge_init_roce_client_instance()
11279 rst_cnt = hdev->rst_stats.reset_cnt; in hclge_init_roce_client_instance()
11284 set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11285 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_init_roce_client_instance()
11286 rst_cnt != hdev->rst_stats.reset_cnt) { in hclge_init_roce_client_instance()
11292 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_init_roce_client_instance()
11304 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_init_roce_client_instance()
11305 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_init_roce_client_instance()
11308 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_init_roce_client_instance()
11316 struct hclge_dev *hdev = ae_dev->priv; in hclge_init_client_instance() local
11317 struct hclge_vport *vport = &hdev->vport[0]; in hclge_init_client_instance()
11322 hdev->nic_client = client; in hclge_init_client_instance()
11334 if (hnae3_dev_roce_supported(hdev)) { in hclge_init_client_instance()
11335 hdev->roce_client = client; in hclge_init_client_instance()
11351 hdev->nic_client = NULL; in hclge_init_client_instance()
11355 hdev->roce_client = NULL; in hclge_init_client_instance()
11360 static bool hclge_uninit_need_wait(struct hclge_dev *hdev) in hclge_uninit_need_wait() argument
11362 return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || in hclge_uninit_need_wait()
11363 test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state); in hclge_uninit_need_wait()
11369 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_client_instance() local
11370 struct hclge_vport *vport = &hdev->vport[0]; in hclge_uninit_client_instance()
11372 if (hdev->roce_client) { in hclge_uninit_client_instance()
11373 clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11374 while (hclge_uninit_need_wait(hdev)) in hclge_uninit_client_instance()
11377 hdev->roce_client->ops->uninit_instance(&vport->roce, 0); in hclge_uninit_client_instance()
11378 hdev->roce_client = NULL; in hclge_uninit_client_instance()
11383 if (hdev->nic_client && client->ops->uninit_instance) { in hclge_uninit_client_instance()
11384 clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state); in hclge_uninit_client_instance()
11385 while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_uninit_client_instance()
11389 hdev->nic_client = NULL; in hclge_uninit_client_instance()
11394 static int hclge_dev_mem_map(struct hclge_dev *hdev) in hclge_dev_mem_map() argument
11396 struct pci_dev *pdev = hdev->pdev; in hclge_dev_mem_map()
11397 struct hclge_hw *hw = &hdev->hw; in hclge_dev_mem_map()
11415 static int hclge_pci_init(struct hclge_dev *hdev) in hclge_pci_init() argument
11417 struct pci_dev *pdev = hdev->pdev; in hclge_pci_init()
11445 hw = &hdev->hw; in hclge_pci_init()
11453 ret = hclge_dev_mem_map(hdev); in hclge_pci_init()
11457 hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); in hclge_pci_init()
11462 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_init()
11471 static void hclge_pci_uninit(struct hclge_dev *hdev) in hclge_pci_uninit() argument
11473 struct pci_dev *pdev = hdev->pdev; in hclge_pci_uninit()
11475 if (hdev->hw.hw.mem_base) in hclge_pci_uninit()
11476 devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base); in hclge_pci_uninit()
11478 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_pci_uninit()
11484 static void hclge_state_init(struct hclge_dev *hdev) in hclge_state_init() argument
11486 set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); in hclge_state_init()
11487 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_init()
11488 clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11489 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_state_init()
11490 clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state); in hclge_state_init()
11491 clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state); in hclge_state_init()
11492 clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); in hclge_state_init()
11495 static void hclge_state_uninit(struct hclge_dev *hdev) in hclge_state_uninit() argument
11497 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_state_uninit()
11498 set_bit(HCLGE_STATE_REMOVING, &hdev->state); in hclge_state_uninit()
11500 if (hdev->reset_timer.function) in hclge_state_uninit()
11501 timer_delete_sync(&hdev->reset_timer); in hclge_state_uninit()
11502 if (hdev->service_task.work.func) in hclge_state_uninit()
11503 cancel_delayed_work_sync(&hdev->service_task); in hclge_state_uninit()
11512 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_prepare_general() local
11517 down(&hdev->reset_sem); in hclge_reset_prepare_general()
11518 set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11519 hdev->reset_type = rst_type; in hclge_reset_prepare_general()
11520 ret = hclge_reset_prepare(hdev); in hclge_reset_prepare_general()
11521 if (!ret && !hdev->reset_pending) in hclge_reset_prepare_general()
11524 dev_err(&hdev->pdev->dev, in hclge_reset_prepare_general()
11526 ret, hdev->reset_pending, retry_cnt); in hclge_reset_prepare_general()
11527 clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); in hclge_reset_prepare_general()
11528 up(&hdev->reset_sem); in hclge_reset_prepare_general()
11533 hclge_enable_vector(&hdev->misc_vector, false); in hclge_reset_prepare_general()
11534 set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state); in hclge_reset_prepare_general()
11536 if (hdev->reset_type == HNAE3_FLR_RESET) in hclge_reset_prepare_general()
11537 hdev->rst_stats.flr_rst_cnt++; in hclge_reset_prepare_general()
11542 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_done() local
11545 hclge_enable_vector(&hdev->misc_vector, true); in hclge_reset_done()
11547 ret = hclge_reset_rebuild(hdev); in hclge_reset_done()
11549 dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret); in hclge_reset_done()
11551 hdev->reset_type = HNAE3_NONE_RESET; in hclge_reset_done()
11552 if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) in hclge_reset_done()
11553 up(&hdev->reset_sem); in hclge_reset_done()
11556 static void hclge_clear_resetting_state(struct hclge_dev *hdev) in hclge_clear_resetting_state() argument
11560 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_clear_resetting_state()
11561 struct hclge_vport *vport = &hdev->vport[i]; in hclge_clear_resetting_state()
11565 ret = hclge_set_vf_rst(hdev, vport->vport_id, false); in hclge_clear_resetting_state()
11567 dev_warn(&hdev->pdev->dev, in hclge_clear_resetting_state()
11573 static int hclge_clear_hw_resource(struct hclge_dev *hdev) in hclge_clear_hw_resource() argument
11580 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_clear_hw_resource()
11588 dev_err(&hdev->pdev->dev, in hclge_clear_hw_resource()
11595 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) in hclge_init_rxd_adv_layout() argument
11597 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_init_rxd_adv_layout()
11598 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1); in hclge_init_rxd_adv_layout()
11601 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev) in hclge_uninit_rxd_adv_layout() argument
11603 if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) in hclge_uninit_rxd_adv_layout()
11604 hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0); in hclge_uninit_rxd_adv_layout()
11614 static int hclge_get_wol_supported_mode(struct hclge_dev *hdev, in hclge_get_wol_supported_mode() argument
11625 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_wol_supported_mode()
11627 dev_err(&hdev->pdev->dev, in hclge_get_wol_supported_mode()
11637 static int hclge_set_wol_cfg(struct hclge_dev *hdev, in hclge_set_wol_cfg() argument
11650 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_wol_cfg()
11652 dev_err(&hdev->pdev->dev, in hclge_set_wol_cfg()
11658 static int hclge_update_wol(struct hclge_dev *hdev) in hclge_update_wol() argument
11660 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_update_wol()
11662 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_update_wol()
11665 return hclge_set_wol_cfg(hdev, wol_info); in hclge_update_wol()
11668 static int hclge_init_wol(struct hclge_dev *hdev) in hclge_init_wol() argument
11670 struct hclge_wol_info *wol_info = &hdev->hw.mac.wol; in hclge_init_wol()
11673 if (!hnae3_ae_dev_wol_supported(hdev->ae_dev)) in hclge_init_wol()
11677 ret = hclge_get_wol_supported_mode(hdev, in hclge_init_wol()
11684 return hclge_update_wol(hdev); in hclge_init_wol()
11728 struct hclge_dev *hdev; in hclge_init_ae_dev() local
11731 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); in hclge_init_ae_dev()
11732 if (!hdev) in hclge_init_ae_dev()
11735 hdev->pdev = pdev; in hclge_init_ae_dev()
11736 hdev->ae_dev = ae_dev; in hclge_init_ae_dev()
11737 hdev->reset_type = HNAE3_NONE_RESET; in hclge_init_ae_dev()
11738 hdev->reset_level = HNAE3_FUNC_RESET; in hclge_init_ae_dev()
11739 ae_dev->priv = hdev; in hclge_init_ae_dev()
11742 hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; in hclge_init_ae_dev()
11744 mutex_init(&hdev->vport_lock); in hclge_init_ae_dev()
11745 spin_lock_init(&hdev->fd_rule_lock); in hclge_init_ae_dev()
11746 sema_init(&hdev->reset_sem, 1); in hclge_init_ae_dev()
11748 ret = hclge_pci_init(hdev); in hclge_init_ae_dev()
11753 ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw); in hclge_init_ae_dev()
11758 hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops); in hclge_init_ae_dev()
11759 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_init_ae_dev()
11760 true, hdev->reset_pending); in hclge_init_ae_dev()
11764 ret = hclge_clear_hw_resource(hdev); in hclge_init_ae_dev()
11768 ret = hclge_get_cap(hdev); in hclge_init_ae_dev()
11772 ret = hclge_query_dev_specs(hdev); in hclge_init_ae_dev()
11779 ret = hclge_configure(hdev); in hclge_init_ae_dev()
11785 ret = hclge_init_msi(hdev); in hclge_init_ae_dev()
11791 ret = hclge_misc_irq_init(hdev); in hclge_init_ae_dev()
11795 ret = hclge_alloc_tqps(hdev); in hclge_init_ae_dev()
11801 ret = hclge_alloc_vport(hdev); in hclge_init_ae_dev()
11805 ret = hclge_map_tqp(hdev); in hclge_init_ae_dev()
11809 if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { in hclge_init_ae_dev()
11811 if (hnae3_dev_phy_imp_supported(hdev)) in hclge_init_ae_dev()
11812 ret = hclge_update_tp_port_info(hdev); in hclge_init_ae_dev()
11814 ret = hclge_mac_mdio_config(hdev); in hclge_init_ae_dev()
11820 ret = hclge_init_umv_space(hdev); in hclge_init_ae_dev()
11824 ret = hclge_mac_init(hdev); in hclge_init_ae_dev()
11830 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_init_ae_dev()
11836 ret = hclge_config_gro(hdev); in hclge_init_ae_dev()
11840 ret = hclge_init_vlan_config(hdev); in hclge_init_ae_dev()
11846 ret = hclge_tm_schd_init(hdev); in hclge_init_ae_dev()
11852 ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev, in hclge_init_ae_dev()
11853 &hdev->rss_cfg); in hclge_init_ae_dev()
11859 ret = hclge_rss_init_hw(hdev); in hclge_init_ae_dev()
11865 ret = init_mgr_tbl(hdev); in hclge_init_ae_dev()
11871 ret = hclge_init_fd_config(hdev); in hclge_init_ae_dev()
11878 ret = hclge_ptp_init(hdev); in hclge_init_ae_dev()
11882 ret = hclge_update_port_info(hdev); in hclge_init_ae_dev()
11886 INIT_KFIFO(hdev->mac_tnl_log); in hclge_init_ae_dev()
11888 hclge_dcb_ops_set(hdev); in hclge_init_ae_dev()
11890 timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); in hclge_init_ae_dev()
11891 INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task); in hclge_init_ae_dev()
11893 hclge_clear_all_event_cause(hdev); in hclge_init_ae_dev()
11894 hclge_clear_resetting_state(hdev); in hclge_init_ae_dev()
11897 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_init_ae_dev()
11898 hclge_handle_occurred_error(hdev); in hclge_init_ae_dev()
11911 mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL); in hclge_init_ae_dev()
11914 hclge_init_rxd_adv_layout(hdev); in hclge_init_ae_dev()
11916 ret = hclge_init_wol(hdev); in hclge_init_ae_dev()
11921 ret = hclge_devlink_init(hdev); in hclge_init_ae_dev()
11925 hclge_state_init(hdev); in hclge_init_ae_dev()
11926 hdev->last_reset_time = jiffies; in hclge_init_ae_dev()
11929 enable_irq(hdev->misc_vector.vector_irq); in hclge_init_ae_dev()
11930 hclge_enable_vector(&hdev->misc_vector, true); in hclge_init_ae_dev()
11932 dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n", in hclge_init_ae_dev()
11935 hclge_task_schedule(hdev, round_jiffies_relative(HZ)); in hclge_init_ae_dev()
11939 hclge_ptp_uninit(hdev); in hclge_init_ae_dev()
11941 if (hdev->hw.mac.phydev) in hclge_init_ae_dev()
11942 mdiobus_unregister(hdev->hw.mac.mdio_bus); in hclge_init_ae_dev()
11944 hclge_misc_irq_uninit(hdev); in hclge_init_ae_dev()
11948 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_init_ae_dev()
11950 pcim_iounmap(pdev, hdev->hw.hw.io_base); in hclge_init_ae_dev()
11954 mutex_destroy(&hdev->vport_lock); in hclge_init_ae_dev()
11958 static void hclge_stats_clear(struct hclge_dev *hdev) in hclge_stats_clear() argument
11960 memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats)); in hclge_stats_clear()
11961 memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats)); in hclge_stats_clear()
11964 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_mac_spoofchk() argument
11966 return hclge_config_switch_param(hdev, vf, enable, in hclge_set_mac_spoofchk()
11970 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_vlan_spoofchk() argument
11972 return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, in hclge_set_vlan_spoofchk()
11977 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable) in hclge_set_vf_spoofchk_hw() argument
11981 ret = hclge_set_mac_spoofchk(hdev, vf, enable); in hclge_set_vf_spoofchk_hw()
11983 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
11989 ret = hclge_set_vlan_spoofchk(hdev, vf, enable); in hclge_set_vf_spoofchk_hw()
11991 dev_err(&hdev->pdev->dev, in hclge_set_vf_spoofchk_hw()
12002 struct hclge_dev *hdev = vport->back; in hclge_set_vf_spoofchk() local
12006 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_set_vf_spoofchk()
12009 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_spoofchk()
12016 if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full)) in hclge_set_vf_spoofchk()
12017 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
12021 dev_warn(&hdev->pdev->dev, in hclge_set_vf_spoofchk()
12025 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable); in hclge_set_vf_spoofchk()
12033 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev) in hclge_reset_vport_spoofchk() argument
12035 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_spoofchk()
12039 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2) in hclge_reset_vport_spoofchk()
12043 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_spoofchk()
12044 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, in hclge_reset_vport_spoofchk()
12058 struct hclge_dev *hdev = vport->back; in hclge_set_vf_trust() local
12061 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_trust()
12070 hclge_task_schedule(hdev, 0); in hclge_set_vf_trust()
12075 static void hclge_reset_vf_rate(struct hclge_dev *hdev) in hclge_reset_vf_rate() argument
12081 for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) { in hclge_reset_vf_rate()
12082 struct hclge_vport *vport = &hdev->vport[vf]; in hclge_reset_vf_rate()
12087 dev_err(&hdev->pdev->dev, in hclge_reset_vf_rate()
12093 static int hclge_vf_rate_param_check(struct hclge_dev *hdev, in hclge_vf_rate_param_check() argument
12097 max_tx_rate < 0 || (u32)max_tx_rate > hdev->hw.mac.max_speed) { in hclge_vf_rate_param_check()
12098 dev_err(&hdev->pdev->dev, in hclge_vf_rate_param_check()
12100 min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed); in hclge_vf_rate_param_check()
12111 struct hclge_dev *hdev = vport->back; in hclge_set_vf_rate() local
12114 ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate); in hclge_set_vf_rate()
12118 vport = hclge_get_vf_vport(hdev, vf); in hclge_set_vf_rate()
12134 static int hclge_resume_vf_rate(struct hclge_dev *hdev) in hclge_resume_vf_rate() argument
12136 struct hnae3_handle *handle = &hdev->vport->nic; in hclge_resume_vf_rate()
12142 for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) { in hclge_resume_vf_rate()
12143 vport = hclge_get_vf_vport(hdev, vf); in hclge_resume_vf_rate()
12156 dev_err(&hdev->pdev->dev, in hclge_resume_vf_rate()
12166 static void hclge_reset_vport_state(struct hclge_dev *hdev) in hclge_reset_vport_state() argument
12168 struct hclge_vport *vport = hdev->vport; in hclge_reset_vport_state()
12171 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_reset_vport_state()
12179 struct hclge_dev *hdev = ae_dev->priv; in hclge_reset_ae_dev() local
12183 set_bit(HCLGE_STATE_DOWN, &hdev->state); in hclge_reset_ae_dev()
12185 hclge_stats_clear(hdev); in hclge_reset_ae_dev()
12189 if (hdev->reset_type == HNAE3_IMP_RESET || in hclge_reset_ae_dev()
12190 hdev->reset_type == HNAE3_GLOBAL_RESET) { in hclge_reset_ae_dev()
12191 memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table)); in hclge_reset_ae_dev()
12192 memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full)); in hclge_reset_ae_dev()
12193 bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport); in hclge_reset_ae_dev()
12194 hclge_reset_umv_space(hdev); in hclge_reset_ae_dev()
12197 ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version, in hclge_reset_ae_dev()
12198 true, hdev->reset_pending); in hclge_reset_ae_dev()
12204 ret = hclge_map_tqp(hdev); in hclge_reset_ae_dev()
12210 ret = hclge_mac_init(hdev); in hclge_reset_ae_dev()
12216 ret = hclge_tp_port_init(hdev); in hclge_reset_ae_dev()
12223 ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); in hclge_reset_ae_dev()
12229 ret = hclge_config_gro(hdev); in hclge_reset_ae_dev()
12233 ret = hclge_init_vlan_config(hdev); in hclge_reset_ae_dev()
12239 hclge_reset_tc_config(hdev); in hclge_reset_ae_dev()
12241 ret = hclge_tm_init_hw(hdev, true); in hclge_reset_ae_dev()
12247 ret = hclge_rss_init_hw(hdev); in hclge_reset_ae_dev()
12253 ret = init_mgr_tbl(hdev); in hclge_reset_ae_dev()
12260 ret = hclge_init_fd_config(hdev); in hclge_reset_ae_dev()
12266 ret = hclge_ptp_init(hdev); in hclge_reset_ae_dev()
12271 if (hnae3_dev_ras_imp_supported(hdev)) in hclge_reset_ae_dev()
12272 hclge_handle_occurred_error(hdev); in hclge_reset_ae_dev()
12279 ret = hclge_config_nic_hw_error(hdev, true); in hclge_reset_ae_dev()
12287 if (hdev->roce_client) { in hclge_reset_ae_dev()
12288 ret = hclge_config_rocee_ras_interrupt(hdev, true); in hclge_reset_ae_dev()
12297 hclge_reset_vport_state(hdev); in hclge_reset_ae_dev()
12298 ret = hclge_reset_vport_spoofchk(hdev); in hclge_reset_ae_dev()
12302 ret = hclge_resume_vf_rate(hdev); in hclge_reset_ae_dev()
12306 hclge_init_rxd_adv_layout(hdev); in hclge_reset_ae_dev()
12308 ret = hclge_update_wol(hdev); in hclge_reset_ae_dev()
12321 struct hclge_dev *hdev = ae_dev->priv; in hclge_uninit_ae_dev() local
12322 struct hclge_mac *mac = &hdev->hw.mac; in hclge_uninit_ae_dev()
12324 hclge_reset_vf_rate(hdev); in hclge_uninit_ae_dev()
12325 hclge_clear_vf_vlan(hdev); in hclge_uninit_ae_dev()
12326 hclge_state_uninit(hdev); in hclge_uninit_ae_dev()
12327 hclge_ptp_uninit(hdev); in hclge_uninit_ae_dev()
12328 hclge_uninit_rxd_adv_layout(hdev); in hclge_uninit_ae_dev()
12329 hclge_uninit_mac_table(hdev); in hclge_uninit_ae_dev()
12330 hclge_del_all_fd_entries(hdev); in hclge_uninit_ae_dev()
12336 hclge_enable_vector(&hdev->misc_vector, false); in hclge_uninit_ae_dev()
12337 disable_irq(hdev->misc_vector.vector_irq); in hclge_uninit_ae_dev()
12340 hclge_config_mac_tnl_int(hdev, false); in hclge_uninit_ae_dev()
12341 hclge_config_nic_hw_error(hdev, false); in hclge_uninit_ae_dev()
12342 hclge_config_rocee_ras_interrupt(hdev, false); in hclge_uninit_ae_dev()
12344 hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw); in hclge_uninit_ae_dev()
12345 hclge_misc_irq_uninit(hdev); in hclge_uninit_ae_dev()
12346 hclge_devlink_uninit(hdev); in hclge_uninit_ae_dev()
12347 hclge_pci_uninit(hdev); in hclge_uninit_ae_dev()
12348 hclge_uninit_vport_vlan_table(hdev); in hclge_uninit_ae_dev()
12349 mutex_destroy(&hdev->vport_lock); in hclge_uninit_ae_dev()
12356 struct hclge_dev *hdev = vport->back; in hclge_get_max_channels() local
12358 return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps); in hclge_get_max_channels()
12374 struct hclge_dev *hdev = vport->back; in hclge_get_tqps_and_rss_info() local
12377 *max_rss_size = hdev->pf_rss_size_max; in hclge_get_tqps_and_rss_info()
12384 struct hclge_dev *hdev = vport->back; in hclge_set_rss_tc_mode_cfg() local
12396 if (!(hdev->hw_tc_map & BIT(i))) in hclge_set_rss_tc_mode_cfg()
12404 return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid, in hclge_set_rss_tc_mode_cfg()
12414 struct hclge_dev *hdev = vport->back; in hclge_set_channels() local
12423 ret = hclge_tm_vport_map_update(hdev); in hclge_set_channels()
12425 dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret); in hclge_set_channels()
12448 dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", in hclge_set_channels()
12455 dev_info(&hdev->pdev->dev, in hclge_set_channels()
12463 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status) in hclge_set_led_status() argument
12475 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_set_led_status()
12477 dev_err(&hdev->pdev->dev, in hclge_set_led_status()
12493 struct hclge_dev *hdev = vport->back; in hclge_set_led_id() local
12497 return hclge_set_led_status(hdev, HCLGE_LED_ON); in hclge_set_led_id()
12499 return hclge_set_led_status(hdev, HCLGE_LED_OFF); in hclge_set_led_id()
12511 struct hclge_dev *hdev = vport->back; in hclge_get_link_mode() local
12515 supported[idx] = hdev->hw.mac.supported[idx]; in hclge_get_link_mode()
12516 advertising[idx] = hdev->hw.mac.advertising[idx]; in hclge_get_link_mode()
12523 struct hclge_dev *hdev = vport->back; in hclge_gro_en() local
12524 bool gro_en_old = hdev->gro_en; in hclge_gro_en()
12527 hdev->gro_en = enable; in hclge_gro_en()
12528 ret = hclge_config_gro(hdev); in hclge_gro_en()
12530 hdev->gro_en = gro_en_old; in hclge_gro_en()
12538 struct hclge_dev *hdev = vport->back; in hclge_sync_vport_promisc_mode() local
12577 ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en, in hclge_sync_vport_promisc_mode()
12588 static void hclge_sync_promisc_mode(struct hclge_dev *hdev) in hclge_sync_promisc_mode() argument
12594 for (i = 0; i < hdev->num_alloc_vport; i++) { in hclge_sync_promisc_mode()
12595 vport = &hdev->vport[i]; in hclge_sync_promisc_mode()
12603 static bool hclge_module_existed(struct hclge_dev *hdev) in hclge_module_existed() argument
12610 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_module_existed()
12612 dev_err(&hdev->pdev->dev, in hclge_module_existed()
12625 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset, in hclge_get_sfp_eeprom_info() argument
12651 ret = hclge_cmd_send(&hdev->hw, desc, i); in hclge_get_sfp_eeprom_info()
12653 dev_err(&hdev->pdev->dev, in hclge_get_sfp_eeprom_info()
12680 struct hclge_dev *hdev = vport->back; in hclge_get_module_eeprom() local
12684 if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER) in hclge_get_module_eeprom()
12687 if (!hclge_module_existed(hdev)) in hclge_get_module_eeprom()
12691 data_len = hclge_get_sfp_eeprom_info(hdev, in hclge_get_module_eeprom()
12708 struct hclge_dev *hdev = vport->back; in hclge_get_link_diagnosis_info() local
12712 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) in hclge_get_link_diagnosis_info()
12716 ret = hclge_cmd_send(&hdev->hw, &desc, 1); in hclge_get_link_diagnosis_info()
12718 dev_err(&hdev->pdev->dev, in hclge_get_link_diagnosis_info()
12732 struct hclge_dev *hdev = vport->back; in hclge_clear_vport_vf_info() local
12744 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12755 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12759 ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false); in hclge_clear_vport_vf_info()
12761 dev_err(&hdev->pdev->dev, in hclge_clear_vport_vf_info()
12770 struct hclge_dev *hdev = ae_dev->priv; in hclge_clean_vport_config() local
12775 vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM]; in hclge_clean_vport_config()