Lines Matching refs:qm

369 bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
373 cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
382 struct hisi_qm *qm = s->private;
384 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
469 static void hpre_config_pasid(struct hisi_qm *qm)
473 if (qm->ver >= QM_HW_V3)
476 val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
477 val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
478 if (qm->use_sva) {
485 writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
486 writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
489 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
491 struct device *dev = &qm->pdev->dev;
513 static int hpre_set_cluster(struct hisi_qm *qm)
515 struct device *dev = &qm->pdev->dev;
523 cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
524 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
532 qm->io_base + offset + HPRE_CORE_ENB);
533 writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
534 ret = readl_relaxed_poll_timeout(qm->io_base + offset +
555 static void disable_flr_of_bme(struct hisi_qm *qm)
559 val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
562 writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
563 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
566 static void hpre_open_sva_prefetch(struct hisi_qm *qm)
571 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
575 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
577 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
579 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
584 pci_err(qm->pdev, "failed to open sva prefetch\n");
587 static void hpre_close_sva_prefetch(struct hisi_qm *qm)
592 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
595 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
597 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
599 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
604 pci_err(qm->pdev, "failed to close sva prefetch\n");
607 static void hpre_enable_clock_gate(struct hisi_qm *qm)
614 if (qm->ver < QM_HW_V3)
617 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
619 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
621 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
623 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
625 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
630 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
632 writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
634 val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
636 writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
640 static void hpre_disable_clock_gate(struct hisi_qm *qm)
647 if (qm->ver < QM_HW_V3)
650 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
652 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
654 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
656 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
658 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
663 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
665 writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
667 val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
669 writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
673 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
675 struct device *dev = &qm->pdev->dev;
680 hpre_disable_clock_gate(qm);
682 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
683 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
684 writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
686 if (qm->ver >= QM_HW_V3)
688 qm->io_base + HPRE_TYPES_ENB);
690 writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
692 writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
693 writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
694 writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
695 writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
697 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
698 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
699 writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
700 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
709 ret = hpre_set_cluster(qm);
714 if (qm->ver == QM_HW_V2) {
715 ret = hpre_cfg_by_dsm(qm);
719 disable_flr_of_bme(qm);
723 hpre_config_pasid(qm);
725 hpre_enable_clock_gate(qm);
730 static void hpre_cnt_regs_clear(struct hisi_qm *qm)
738 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
743 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
747 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
749 hisi_qm_debug_regs_clear(qm);
752 static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
756 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
759 val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
760 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
766 if (qm->ver > QM_HW_V2)
767 writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
769 writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
772 static void hpre_hw_error_disable(struct hisi_qm *qm)
776 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
777 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
780 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
782 hpre_master_ooo_ctrl(qm, false);
785 static void hpre_hw_error_enable(struct hisi_qm *qm)
789 ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
790 nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
793 writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
796 writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
797 writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
798 writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
801 hpre_master_ooo_ctrl(qm, true);
805 writel(~err_en, qm->io_base + HPRE_INT_MASK);
812 return &hpre->qm;
817 struct hisi_qm *qm = hpre_file_to_qm(file);
819 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
825 struct hisi_qm *qm = hpre_file_to_qm(file);
831 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
833 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
840 struct hisi_qm *qm = hpre_file_to_qm(file);
845 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
850 struct hisi_qm *qm = hpre_file_to_qm(file);
855 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
862 struct hisi_qm *qm = hpre_file_to_qm(file);
867 ret = hisi_qm_get_dfx_access(qm);
884 hisi_qm_put_dfx_access(qm);
890 hisi_qm_put_dfx_access(qm);
898 struct hisi_qm *qm = hpre_file_to_qm(file);
918 ret = hisi_qm_get_dfx_access(qm);
941 hisi_qm_put_dfx_access(qm);
981 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
984 struct hpre *hpre = container_of(qm, struct hpre, qm);
991 file_dir = qm->debug.debug_root;
1006 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
1008 struct device *dev = &qm->pdev->dev;
1017 regset->base = qm->io_base;
1020 debugfs_create_file("regs", 0444, qm->debug.debug_root,
1026 static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
1028 struct device *dev = &qm->pdev->dev;
1036 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
1043 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
1051 regset->base = qm->io_base + hpre_cluster_offsets[i];
1056 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
1065 static int hpre_ctrl_debug_init(struct hisi_qm *qm)
1069 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
1074 ret = hpre_pf_comm_regs_debugfs_init(qm);
1078 return hpre_cluster_debugfs_init(qm);
1083 struct hisi_qm *qm = s->private;
1086 size = qm->cap_tables.qm_cap_size;
1088 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
1089 qm->cap_tables.qm_cap_table[i].cap_val);
1091 size = qm->cap_tables.dev_cap_size;
1093 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
1094 qm->cap_tables.dev_cap_table[i].cap_val);
1101 static void hpre_dfx_debug_init(struct hisi_qm *qm)
1103 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
1104 struct hpre *hpre = container_of(qm, struct hpre, qm);
1109 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
1116 if (qm->fun_type == QM_HW_PF && hpre_regs)
1118 qm, &hpre_diff_regs_fops);
1121 qm->debug.debug_root, qm, &hpre_cap_regs_fops);
1124 static int hpre_debugfs_init(struct hisi_qm *qm)
1126 struct device *dev = &qm->pdev->dev;
1129 ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
1135 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
1137 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
1138 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
1140 hisi_qm_debug_init(qm);
1142 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
1143 ret = hpre_ctrl_debug_init(qm);
1148 hpre_dfx_debug_init(qm);
1153 debugfs_remove_recursive(qm->debug.debug_root);
1154 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1158 static void hpre_debugfs_exit(struct hisi_qm *qm)
1160 debugfs_remove_recursive(qm->debug.debug_root);
1162 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1165 static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
1168 struct device *dev = &qm->pdev->dev;
1181 hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
1182 i, qm->cap_ver);
1194 qm->cap_tables.dev_cap_table = hpre_cap;
1195 qm->cap_tables.dev_cap_size = size;
1200 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1210 qm->mode = uacce_mode;
1211 qm->pdev = pdev;
1212 qm->sqe_size = HPRE_SQE_SIZE;
1213 qm->dev_name = hpre_name;
1215 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?
1217 if (qm->fun_type == QM_HW_PF) {
1218 qm->qp_base = HPRE_PF_DEF_Q_BASE;
1219 qm->qp_num = pf_q_num;
1220 qm->debug.curr_qm_qp_num = pf_q_num;
1221 qm->qm_list = &hpre_devices;
1222 qm->err_ini = &hpre_err_ini;
1224 set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
1227 ret = hisi_qm_init(qm);
1229 pci_err(pdev, "Failed to init hpre qm configures!\n");
1234 ret = hpre_pre_store_cap_reg(qm);
1237 hisi_qm_uninit(qm);
1241 alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
1242 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
1245 hisi_qm_uninit(qm);
1251 static int hpre_show_last_regs_init(struct hisi_qm *qm)
1255 struct qm_debug *debug = &qm->debug;
1261 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
1270 debug->last_words[i] = readl_relaxed(qm->io_base +
1274 io_base = qm->io_base + hpre_cluster_offsets[i];
1285 static void hpre_show_last_regs_uninit(struct hisi_qm *qm)
1287 struct qm_debug *debug = &qm->debug;
1289 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1296 static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
1300 struct qm_debug *debug = &qm->debug;
1301 struct pci_dev *pdev = qm->pdev;
1308 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1313 val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);
1319 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
1323 io_base = qm->io_base + hpre_cluster_offsets[i];
1335 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
1338 struct device *dev = &qm->pdev->dev;
1348 static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
1350 return readl(qm->io_base + HPRE_INT_STATUS);
1353 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
1355 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
1358 static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
1362 nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
1363 writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
1366 static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
1370 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1372 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1374 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1377 static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
1381 err_status = hpre_get_hw_err_status(qm);
1383 if (err_status & qm->err_info.ecc_2bits_mask)
1384 qm->err_status.is_dev_ecc_mbit = true;
1385 hpre_log_hw_error(qm, err_status);
1387 if (err_status & qm->err_info.dev_reset_mask) {
1389 hpre_disable_error_report(qm, err_status);
1392 hpre_clear_hw_err_status(qm, err_status);
1398 static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
1402 err_status = hpre_get_hw_err_status(qm);
1403 if (err_status & qm->err_info.dev_shutdown_mask)
1409 static void hpre_err_info_init(struct hisi_qm *qm)
1411 struct hisi_qm_err_info *err_info = &qm->err_info;
1414 err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
1415 err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
1417 err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1418 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1419 err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1420 HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1421 err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1422 HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
1423 err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1424 HPRE_RESET_MASK_CAP, qm->cap_ver);
1446 struct hisi_qm *qm = &hpre->qm;
1449 ret = hpre_set_user_domain_and_cache(qm);
1453 hpre_open_sva_prefetch(qm);
1455 hisi_qm_dev_err_init(qm);
1456 ret = hpre_show_last_regs_init(qm);
1458 pci_err(qm->pdev, "Failed to init last word regs!\n");
1466 struct hisi_qm *qm = &hpre->qm;
1469 if (qm->fun_type == QM_HW_PF) {
1474 if (qm->ver >= QM_HW_V3) {
1476 qm->type_rate = type_rate;
1483 static void hpre_probe_uninit(struct hisi_qm *qm)
1485 if (qm->fun_type == QM_HW_VF)
1488 hpre_cnt_regs_clear(qm);
1489 qm->debug.curr_qm_qp_num = 0;
1490 hpre_show_last_regs_uninit(qm);
1491 hpre_close_sva_prefetch(qm);
1492 hisi_qm_dev_err_uninit(qm);
1497 struct hisi_qm *qm;
1505 qm = &hpre->qm;
1506 ret = hpre_qm_init(qm, pdev);
1518 ret = hisi_qm_start(qm);
1522 ret = hpre_debugfs_init(qm);
1526 hisi_qm_add_list(qm, &hpre_devices);
1527 ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
1533 if (qm->uacce) {
1534 ret = uacce_register(qm->uacce);
1541 if (qm->fun_type == QM_HW_PF && vfs_num) {
1547 hisi_qm_pm_init(qm);
1552 hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
1555 hisi_qm_del_list(qm, &hpre_devices);
1556 hpre_debugfs_exit(qm);
1557 hisi_qm_stop(qm, QM_NORMAL);
1560 hpre_probe_uninit(qm);
1563 hisi_qm_uninit(qm);
1570 struct hisi_qm *qm = pci_get_drvdata(pdev);
1572 hisi_qm_pm_uninit(qm);
1573 hisi_qm_wait_task_finish(qm, &hpre_devices);
1574 hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
1575 hisi_qm_del_list(qm, &hpre_devices);
1576 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1579 hpre_debugfs_exit(qm);
1580 hisi_qm_stop(qm, QM_NORMAL);
1582 hpre_probe_uninit(qm);
1583 hisi_qm_uninit(qm);