Lines Matching refs:qm
382 struct hisi_qm *qm;
398 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
399 void (*qm_db)(struct hisi_qm *qm, u16 qn,
401 int (*debug_init)(struct hisi_qm *qm);
402 void (*hw_error_init)(struct hisi_qm *qm);
403 void (*hw_error_uninit)(struct hisi_qm *qm);
404 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
405 int (*set_msi)(struct hisi_qm *qm, bool set);
408 int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num);
409 void (*set_ifc_end)(struct hisi_qm *qm);
410 int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num);
469 static void qm_irqs_unregister(struct hisi_qm *qm);
470 static int qm_reset_device(struct hisi_qm *qm);
503 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
505 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
508 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
510 return qm->err_ini->get_dev_hw_err_status(qm);
514 static bool qm_check_dev_error(struct hisi_qm *qm)
516 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
532 static int qm_wait_reset_finish(struct hisi_qm *qm)
537 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
546 static int qm_reset_prepare_ready(struct hisi_qm *qm)
548 struct pci_dev *pdev = qm->pdev;
555 if (qm->ver < QM_HW_V3)
558 return qm_wait_reset_finish(qm);
561 static void qm_reset_bit_clear(struct hisi_qm *qm)
563 struct pci_dev *pdev = qm->pdev;
566 if (qm->ver < QM_HW_V3)
569 clear_bit(QM_RESETTING, &qm->misc_ctl);
585 int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
589 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
596 static void qm_mb_write(struct hisi_qm *qm, const void *src)
598 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
622 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
627 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
628 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
633 qm_mb_write(qm, mailbox);
635 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
636 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
641 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE);
643 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n");
651 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
655 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
663 mutex_lock(&qm->mailbox_lock);
664 ret = qm_mb_nolock(qm, &mailbox);
665 mutex_unlock(&qm->mailbox_lock);
672 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
683 tmp_xqc = qm->xqc_buf.sqc;
684 xqc_dma = qm->xqc_buf.sqc_dma;
688 tmp_xqc = qm->xqc_buf.cqc;
689 xqc_dma = qm->xqc_buf.cqc_dma;
693 tmp_xqc = qm->xqc_buf.eqc;
694 xqc_dma = qm->xqc_buf.eqc_dma;
698 tmp_xqc = qm->xqc_buf.aeqc;
699 xqc_dma = qm->xqc_buf.aeqc_dma;
702 dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd);
707 if (qm_check_dev_error(qm)) {
708 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
712 mutex_lock(&qm->mailbox_lock);
717 ret = qm_mb_nolock(qm, &mailbox);
721 mutex_unlock(&qm->mailbox_lock);
726 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
734 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
737 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
739 void __iomem *io_base = qm->io_base;
744 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
757 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
759 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
762 qm->ops->qm_db(qm, qn, cmd, index, priority);
765 static void qm_disable_clock_gate(struct hisi_qm *qm)
769 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
770 if (qm->ver < QM_HW_V3)
773 val = readl(qm->io_base + QM_PM_CTRL);
775 writel(val, qm->io_base + QM_PM_CTRL);
778 static int qm_dev_mem_reset(struct hisi_qm *qm)
782 writel(0x1, qm->io_base + QM_MEM_START_INIT);
783 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
790 * @qm: The qm which want to get information.
797 u32 hisi_qm_get_hw_info(struct hisi_qm *qm,
803 switch (qm->ver) {
812 val = readl(qm->io_base + info_table[index].offset);
818 u32 hisi_qm_get_cap_value(struct hisi_qm *qm,
824 switch (qm->ver) {
833 val = readl(qm->io_base + info_table[index].offset);
839 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits,
844 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver);
849 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs,
852 struct device *dev = &qm->pdev->dev;
856 if (!qm->uacce)
877 qm->uacce->algs = algs;
883 static u32 qm_get_irq_num(struct hisi_qm *qm)
885 if (qm->fun_type == QM_HW_PF)
886 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver);
888 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver);
891 static int qm_pm_get_sync(struct hisi_qm *qm)
893 struct device *dev = &qm->pdev->dev;
896 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
908 static void qm_pm_put_sync(struct hisi_qm *qm)
910 struct device *dev = &qm->pdev->dev;
912 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
931 struct hisi_qm *qm = qp->qm;
935 qp->req_cb(qp, qp->sqe + qm->sqe_size *
939 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
947 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
954 struct hisi_qm *qm = poll_data->qm;
960 qp = &qm->qp_array[poll_data->qp_finish_id[i]];
974 static void qm_get_complete_eqe_num(struct hisi_qm *qm)
976 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
978 u16 eq_depth = qm->eq_depth;
981 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
982 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
983 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
988 if (unlikely(cqn >= qm->qp_num))
990 poll_data = &qm->poll_data[cqn];
992 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
997 if (qm->status.eq_head == eq_depth - 1) {
998 qm->status.eqc_phase = !qm->status.eqc_phase;
999 eqe = qm->eqe;
1000 qm->status.eq_head = 0;
1003 qm->status.eq_head++;
1011 queue_work(qm->wq, &poll_data->work);
1012 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
1017 struct hisi_qm *qm = data;
1020 qm_get_complete_eqe_num(qm);
1027 struct hisi_qm *qm = data;
1030 val = readl(qm->io_base + QM_IFC_INT_STATUS);
1035 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) {
1036 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n");
1040 schedule_work(&qm->cmd_process);
1059 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
1061 struct hisi_qp *qp = &qm->qp_array[qp_id];
1068 static void qm_reset_function(struct hisi_qm *qm)
1070 struct device *dev = &qm->pdev->dev;
1073 if (qm_check_dev_error(qm))
1076 ret = qm_reset_prepare_ready(qm);
1082 ret = hisi_qm_stop(qm, QM_DOWN);
1084 dev_err(dev, "failed to stop qm when reset function\n");
1088 ret = hisi_qm_start(qm);
1090 dev_err(dev, "failed to start qm when reset function\n");
1093 qm_reset_bit_clear(qm);
1098 struct hisi_qm *qm = data;
1099 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1100 u16 aeq_depth = qm->aeq_depth;
1103 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1105 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1112 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1113 qm_reset_function(qm);
1116 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1120 qm_disable_qp(qm, qp_id);
1123 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1128 if (qm->status.aeq_head == aeq_depth - 1) {
1129 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1130 aeqe = qm->aeqe;
1131 qm->status.aeq_head = 0;
1134 qm->status.aeq_head++;
1138 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1153 static void qm_init_prefetch(struct hisi_qm *qm)
1155 struct device *dev = &qm->pdev->dev;
1158 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
1176 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1246 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1254 if (qm->ver == QM_HW_V1) {
1267 if (qm->ver == QM_HW_V1) {
1288 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1289 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1292 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1299 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
1300 factor = &qm->factor[fun_num];
1302 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1308 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1309 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1313 writel(fun_num, qm->io_base + QM_VFT_CFG);
1315 qm_vft_data_cfg(qm, type, base, number, factor);
1317 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1318 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1320 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1325 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1327 u32 qos = qm->factor[fun_num].func_qos;
1330 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1332 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1335 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1338 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1347 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1353 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1359 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
1360 ret = qm_shaper_init_vft(qm, fun_num);
1368 qm_set_vft_common(qm, i, fun_num, 0, 0);
1373 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1378 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1382 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1383 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1391 static void qm_hw_error_init_v1(struct hisi_qm *qm)
1393 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1396 static void qm_hw_error_cfg(struct hisi_qm *qm)
1398 struct hisi_qm_err_info *err_info = &qm->err_info;
1400 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
1402 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1405 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
1406 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1407 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1408 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
1411 static void qm_hw_error_init_v2(struct hisi_qm *qm)
1415 qm_hw_error_cfg(qm);
1417 irq_unmask = ~qm->error_mask;
1418 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1419 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1422 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1424 u32 irq_mask = qm->error_mask;
1426 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1427 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1430 static void qm_hw_error_init_v3(struct hisi_qm *qm)
1434 qm_hw_error_cfg(qm);
1437 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1439 irq_unmask = ~qm->error_mask;
1440 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1441 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1444 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
1446 u32 irq_mask = qm->error_mask;
1448 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1449 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
1452 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1455 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1458 struct device *dev = &qm->pdev->dev;
1471 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1476 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n",
1479 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1485 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n",
1490 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02);
1492 dev_err(dev, "qm axi poison error happened\n");
1497 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1501 error_status = qm_get_hw_error_status(qm);
1502 if (error_status & qm->error_mask) {
1504 qm->err_status.is_qm_ecc_mbit = true;
1506 qm_log_hw_error(qm, error_status);
1507 if (error_status & qm->err_info.qm_reset_mask) {
1509 writel(qm->err_info.nfe & (~error_status),
1510 qm->io_base + QM_RAS_NFE_ENABLE);
1515 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1516 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1517 writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
1523 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
1529 mutex_lock(&qm->mailbox_lock);
1530 ret = qm_mb_nolock(qm, &mailbox);
1534 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1535 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1538 mutex_unlock(&qm->mailbox_lock);
1542 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
1546 if (qm->fun_type == QM_HW_PF)
1547 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
1549 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
1551 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
1554 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
1556 struct device *dev = &qm->pdev->dev;
1560 ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id);
1582 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
1584 struct device *dev = &qm->pdev->dev;
1585 u32 vfs_num = qm->vfs_num;
1591 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
1595 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
1611 qm_handle_vf_msg(qm, i);
1617 qm_clear_cmd_interrupt(qm, val);
1622 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
1626 val = readl(qm->io_base + QM_IFC_INT_CFG);
1629 writel(val, qm->io_base + QM_IFC_INT_CFG);
1631 val = readl(qm->io_base + QM_IFC_INT_SET_P);
1633 writel(val, qm->io_base + QM_IFC_INT_SET_P);
1636 static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
1640 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1642 writel(val, qm->io_base + QM_IFC_INT_SET_V);
1645 static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
1647 struct device *dev = &qm->pdev->dev;
1652 ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num);
1658 qm_trigger_vf_interrupt(qm, fun_num);
1661 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1674 qm->ops->set_ifc_end(qm);
1678 static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
1680 struct device *dev = &qm->pdev->dev;
1681 u32 vfs_num = qm->vfs_num;
1687 ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS);
1690 qm->ops->set_ifc_end(qm);
1694 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
1697 val = readq(qm->io_base + QM_IFC_READY_STATUS);
1700 qm->ops->set_ifc_end(qm);
1708 qm->ops->set_ifc_end(qm);
1719 static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
1725 ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0);
1727 dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd);
1731 qm_trigger_pf_interrupt(qm);
1735 val = readl(qm->io_base + QM_IFC_INT_SET_V);
1746 qm->ops->set_ifc_end(qm);
1751 static int qm_drain_qm(struct hisi_qm *qm)
1753 return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0);
1758 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
1761 static int qm_set_msi(struct hisi_qm *qm, bool set)
1763 struct pci_dev *pdev = qm->pdev;
1771 if (qm->err_status.is_qm_ecc_mbit ||
1772 qm->err_status.is_dev_ecc_mbit)
1776 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
1783 static void qm_wait_msi_finish(struct hisi_qm *qm)
1785 struct pci_dev *pdev = qm->pdev;
1805 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
1811 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
1818 static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
1820 struct pci_dev *pdev = qm->pdev;
1841 qm_wait_msi_finish(qm);
1848 static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
1856 mutex_lock(&qm->mailbox_lock);
1857 return qm_mb_nolock(qm, &mailbox);
1860 static void qm_set_ifc_end_v3(struct hisi_qm *qm)
1862 mutex_unlock(&qm->mailbox_lock);
1865 static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
1870 ret = qm_get_mb_cmd(qm, &msg, fun_num);
1882 static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num)
1887 if (qm->fun_type == QM_HW_PF)
1894 mutex_lock(&qm->ifc_lock);
1895 writeq(msg, qm->io_base + offset);
1900 static void qm_set_ifc_end_v4(struct hisi_qm *qm)
1902 mutex_unlock(&qm->ifc_lock);
1905 static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num)
1911 return (u64)readl(qm->io_base + offset);
1914 static u64 qm_get_ifc_vf(struct hisi_qm *qm)
1916 return readq(qm->io_base + QM_PF2VF_VF_R);
1919 static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num)
1923 if (qm->fun_type == QM_HW_PF)
1924 msg = qm_get_ifc_pf(qm, fun_num);
1926 msg = qm_get_ifc_vf(qm);
1983 return qp->sqe + sq_tail * qp->qm->sqe_size;
1995 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1997 struct device *dev = &qm->pdev->dev;
2001 if (atomic_read(&qm->status.flags) == QM_STOP) {
2002 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n");
2006 if (qm->qp_in_used == qm->qp_num) {
2008 qm->qp_num);
2009 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2013 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2016 qm->qp_num);
2017 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2021 qp = &qm->qp_array[qp_id];
2030 qm->qp_in_used++;
2036 * hisi_qm_create_qp() - Create a queue pair from qm.
2037 * @qm: The qm we create a qp from.
2042 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2047 ret = qm_pm_get_sync(qm);
2051 down_write(&qm->qps_lock);
2052 qp = qm_create_qp_nolock(qm, alg_type);
2053 up_write(&qm->qps_lock);
2056 qm_pm_put_sync(qm);
2062 * hisi_qm_release_qp() - Release a qp back to its qm.
2069 struct hisi_qm *qm = qp->qm;
2071 down_write(&qm->qps_lock);
2073 qm->qp_in_used--;
2074 idr_remove(&qm->qp_idr, qp->qp_id);
2076 up_write(&qm->qps_lock);
2078 qm_pm_put_sync(qm);
2083 struct hisi_qm *qm = qp->qm;
2084 enum qm_hw_ver ver = qm->ver;
2088 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2091 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
2100 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2104 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0);
2109 struct hisi_qm *qm = qp->qm;
2110 enum qm_hw_ver ver = qm->ver;
2130 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2133 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0);
2151 struct hisi_qm *qm = qp->qm;
2152 struct device *dev = &qm->pdev->dev;
2157 if (atomic_read(&qm->status.flags) == QM_STOP) {
2158 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n");
2182 struct hisi_qm *qm = qp->qm;
2185 down_write(&qm->qps_lock);
2187 up_write(&qm->qps_lock);
2205 struct hisi_qm *qm = qp->qm;
2211 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
2216 static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id)
2218 struct device *dev = &qm->pdev->dev;
2224 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
2231 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
2264 struct hisi_qm *qm = qp->qm;
2269 if (qm_check_dev_error(qm))
2273 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) {
2276 dev_err(&qm->pdev->dev, "Failed to stop qp!\n");
2283 ret = qm_wait_qp_empty(qm, &state, qp->qp_id);
2290 if (qm->debug.dev_dfx.dev_timeout)
2291 qm->debug.dev_dfx.dev_state = state;
2298 struct hisi_qm *qm = qp->qm;
2299 struct device *dev = &qm->pdev->dev;
2316 if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) {
2322 flush_workqueue(qm->wq);
2330 * hisi_qm_stop_qp() - Stop a qp in qm.
2337 down_write(&qp->qm->qps_lock);
2339 up_write(&qp->qm->qps_lock);
2349 * if qp related qm is resetting.
2366 atomic_read(&qp->qm->status.flags) == QM_STOP ||
2368 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
2375 memcpy(sqe, msg, qp->qm->sqe_size);
2377 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
2385 static void hisi_qm_cache_wb(struct hisi_qm *qm)
2389 if (qm->ver == QM_HW_V1)
2392 writel(0x1, qm->io_base + QM_CACHE_WB_START);
2393 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2396 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2404 /* This function returns free number of qp in qm. */
2407 struct hisi_qm *qm = uacce->priv;
2410 down_read(&qm->qps_lock);
2411 ret = qm->qp_num - qm->qp_in_used;
2412 up_read(&qm->qps_lock);
2417 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
2421 for (i = 0; i < qm->qp_num; i++)
2422 qm_set_qp_disable(&qm->qp_array[i], offset);
2429 struct hisi_qm *qm = uacce->priv;
2433 qp = hisi_qm_create_qp(qm, alg_type);
2460 struct hisi_qm *qm = qp->qm;
2461 resource_size_t phys_base = qm->db_phys_base +
2462 qp->qp_id * qm->db_interval;
2464 struct pci_dev *pdev = qm->pdev;
2471 if (qm->ver == QM_HW_V1) {
2474 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
2479 if (sz > qm->db_interval)
2518 struct hisi_qm *qm = qp->qm;
2519 struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx;
2536 dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n",
2563 struct hisi_qm *qm = q->uacce->priv;
2566 down_write(&qm->qps_lock);
2568 up_write(&qm->qps_lock);
2599 qp_info.sqe_size = qp->qm->sqe_size;
2616 * @qm: the uacce device
2618 static int qm_hw_err_isolate(struct hisi_qm *qm)
2624 isolate = &qm->isolate_data;
2629 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
2662 static void qm_hw_err_destroy(struct hisi_qm *qm)
2666 mutex_lock(&qm->isolate_data.isolate_lock);
2667 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
2671 mutex_unlock(&qm->isolate_data.isolate_lock);
2676 struct hisi_qm *qm = uacce->priv;
2680 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2682 pf_qm = qm;
2690 struct hisi_qm *qm = uacce->priv;
2696 if (qm->isolate_data.is_isolate)
2699 qm->isolate_data.err_threshold = num;
2702 qm_hw_err_destroy(qm);
2709 struct hisi_qm *qm = uacce->priv;
2713 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
2717 return qm->isolate_data.err_threshold;
2734 static void qm_remove_uacce(struct hisi_qm *qm)
2736 struct uacce_device *uacce = qm->uacce;
2738 if (qm->use_sva) {
2739 qm_hw_err_destroy(qm);
2741 qm->uacce = NULL;
2745 static int qm_alloc_uacce(struct hisi_qm *qm)
2747 struct pci_dev *pdev = qm->pdev;
2768 qm->use_sva = true;
2771 qm_remove_uacce(qm);
2776 uacce->priv = qm;
2778 if (qm->ver == QM_HW_V1)
2780 else if (qm->ver == QM_HW_V2)
2785 if (qm->ver == QM_HW_V1)
2787 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
2791 mmio_page_nr = qm->db_interval / PAGE_SIZE;
2793 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
2796 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth +
2803 qm->uacce = uacce;
2804 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
2805 mutex_init(&qm->isolate_data.isolate_lock);
2813 * @qm: The qm needed to be fronzen.
2817 static int qm_frozen(struct hisi_qm *qm)
2819 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
2822 down_write(&qm->qps_lock);
2824 if (!qm->qp_in_used) {
2825 qm->qp_in_used = qm->qp_num;
2826 up_write(&qm->qps_lock);
2827 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
2831 up_write(&qm->qps_lock);
2839 struct hisi_qm *qm, *vf_qm;
2848 list_for_each_entry(qm, &qm_list->list, list) {
2849 dev = qm->pdev;
2869 * @qm: The qm needed to wait for the task to finish.
2872 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2874 while (qm_frozen(qm) ||
2875 ((qm->fun_type == QM_HW_PF) &&
2876 qm_try_frozen_vfs(qm->pdev, qm_list))) {
2880 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
2881 test_bit(QM_RESETTING, &qm->misc_ctl))
2884 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2885 flush_work(&qm->cmd_process);
2891 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2893 struct device *dev = &qm->pdev->dev;
2898 qdma = &qm->qp_array[i].qdma;
2900 kfree(qm->poll_data[i].qp_finish_id);
2903 kfree(qm->poll_data);
2904 kfree(qm->qp_array);
2907 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id,
2910 struct device *dev = &qm->pdev->dev;
2911 size_t off = qm->sqe_size * sq_depth;
2915 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
2917 if (!qm->poll_data[id].qp_finish_id)
2920 qp = &qm->qp_array[id];
2933 qp->qm = qm;
2939 kfree(qm->poll_data[id].qp_finish_id);
2943 static void hisi_qm_pre_init(struct hisi_qm *qm)
2945 struct pci_dev *pdev = qm->pdev;
2947 if (qm->ver == QM_HW_V1)
2948 qm->ops = &qm_hw_ops_v1;
2949 else if (qm->ver == QM_HW_V2)
2950 qm->ops = &qm_hw_ops_v2;
2951 else if (qm->ver == QM_HW_V3)
2952 qm->ops = &qm_hw_ops_v3;
2954 qm->ops = &qm_hw_ops_v4;
2956 pci_set_drvdata(pdev, qm);
2957 mutex_init(&qm->mailbox_lock);
2958 mutex_init(&qm->ifc_lock);
2959 init_rwsem(&qm->qps_lock);
2960 qm->qp_in_used = 0;
2961 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) {
2967 static void qm_cmd_uninit(struct hisi_qm *qm)
2971 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2974 val = readl(qm->io_base + QM_IFC_INT_MASK);
2976 writel(val, qm->io_base + QM_IFC_INT_MASK);
2979 static void qm_cmd_init(struct hisi_qm *qm)
2983 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
2987 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
2990 val = readl(qm->io_base + QM_IFC_INT_MASK);
2992 writel(val, qm->io_base + QM_IFC_INT_MASK);
2995 static void qm_put_pci_res(struct hisi_qm *qm)
2997 struct pci_dev *pdev = qm->pdev;
2999 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
3000 iounmap(qm->db_io_base);
3002 iounmap(qm->io_base);
3006 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3008 struct pci_dev *pdev = qm->pdev;
3011 qm_put_pci_res(qm);
3015 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
3017 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
3018 writel(state, qm->io_base + QM_VF_STATE);
3021 static void hisi_qm_unint_work(struct hisi_qm *qm)
3023 destroy_workqueue(qm->wq);
3026 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm)
3028 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma;
3029 struct device *dev = &qm->pdev->dev;
3034 static void hisi_qm_memory_uninit(struct hisi_qm *qm)
3036 struct device *dev = &qm->pdev->dev;
3038 hisi_qp_memory_uninit(qm, qm->qp_num);
3039 hisi_qm_free_rsv_buf(qm);
3040 if (qm->qdma.va) {
3041 hisi_qm_cache_wb(qm);
3042 dma_free_coherent(dev, qm->qdma.size,
3043 qm->qdma.va, qm->qdma.dma);
3046 idr_destroy(&qm->qp_idr);
3048 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3049 kfree(qm->factor);
3053 * hisi_qm_uninit() - Uninitialize qm.
3054 * @qm: The qm needed uninit.
3056 * This function uninits qm related device resources.
3058 void hisi_qm_uninit(struct hisi_qm *qm)
3060 qm_cmd_uninit(qm);
3061 hisi_qm_unint_work(qm);
3063 down_write(&qm->qps_lock);
3064 hisi_qm_memory_uninit(qm);
3065 hisi_qm_set_state(qm, QM_NOT_READY);
3066 up_write(&qm->qps_lock);
3068 qm_remove_uacce(qm);
3069 qm_irqs_unregister(qm);
3070 hisi_qm_pci_uninit(qm);
3075 * hisi_qm_get_vft() - Get vft from a qm.
3076 * @qm: The qm we want to get its vft.
3080 * We can allocate multiple queues to a qm by configuring virtual function
3084 * qm hw v1 does not support this interface.
3086 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3091 if (!qm->ops->get_vft) {
3092 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3096 return qm->ops->get_vft(qm, base, number);
3100 * hisi_qm_set_vft() - Set vft to a qm.
3101 * @qm: The qm we want to set its vft.
3109 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3110 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3113 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3116 u32 max_q_num = qm->ctrl_qp_num;
3122 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3125 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3127 struct hisi_qm_status *status = &qm->status;
3135 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
3138 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
3139 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
3141 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3142 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3145 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
3147 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3148 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3151 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3155 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3156 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3157 if (qm->ver == QM_HW_V1)
3159 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3161 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0);
3164 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3168 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3169 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3170 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
3172 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0);
3175 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3177 struct device *dev = &qm->pdev->dev;
3180 qm_init_eq_aeq_status(qm);
3182 ret = qm_eq_ctx_cfg(qm);
3188 return qm_aeq_ctx_cfg(qm);
3191 static int __hisi_qm_start(struct hisi_qm *qm)
3195 WARN_ON(!qm->qdma.va);
3197 if (qm->fun_type == QM_HW_PF) {
3198 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3203 ret = qm_eq_aeq_ctx_cfg(qm);
3207 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3211 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3215 qm_init_prefetch(qm);
3216 qm_enable_eq_aeq_interrupts(qm);
3222 * hisi_qm_start() - start qm
3223 * @qm: The qm to be started.
3225 * This function starts a qm, then we can allocate qp from this qm.
3227 int hisi_qm_start(struct hisi_qm *qm)
3229 struct device *dev = &qm->pdev->dev;
3232 down_write(&qm->qps_lock);
3234 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3236 if (!qm->qp_num) {
3242 ret = __hisi_qm_start(qm);
3246 atomic_set(&qm->status.flags, QM_WORK);
3247 hisi_qm_set_state(qm, QM_READY);
3250 up_write(&qm->qps_lock);
3255 static int qm_restart(struct hisi_qm *qm)
3257 struct device *dev = &qm->pdev->dev;
3261 ret = hisi_qm_start(qm);
3265 down_write(&qm->qps_lock);
3266 for (i = 0; i < qm->qp_num; i++) {
3267 qp = &qm->qp_array[i];
3274 up_write(&qm->qps_lock);
3280 up_write(&qm->qps_lock);
3286 static void qm_stop_started_qp(struct hisi_qm *qm)
3291 for (i = 0; i < qm->qp_num; i++) {
3292 qp = &qm->qp_array[i];
3301 * qm_clear_queues() - Clear all queues memory in a qm.
3302 * @qm: The qm in which the queues will be cleared.
3304 * This function clears all queues memory in a qm. Reset of accelerator can
3307 static void qm_clear_queues(struct hisi_qm *qm)
3312 for (i = 0; i < qm->qp_num; i++) {
3313 qp = &qm->qp_array[i];
3318 memset(qm->qdma.va, 0, qm->qdma.size);
3322 * hisi_qm_stop() - Stop a qm.
3323 * @qm: The qm which will be stopped.
3324 * @r: The reason to stop qm.
3326 * This function stops qm and its qps, then qm can not accept request.
3328 * to let qm start again.
3330 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
3332 struct device *dev = &qm->pdev->dev;
3335 down_write(&qm->qps_lock);
3337 if (atomic_read(&qm->status.flags) == QM_STOP)
3341 atomic_set(&qm->status.flags, QM_STOP);
3342 qm->status.stop_reason = r;
3344 if (qm->status.stop_reason != QM_NORMAL) {
3345 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
3351 if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) &&
3353 ret = qm_drain_qm(qm);
3355 dev_err(dev, "failed to drain qm!\n");
3360 qm_stop_started_qp(qm);
3362 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
3365 qm_disable_eq_aeq_interrupts(qm);
3366 if (qm->fun_type == QM_HW_PF) {
3367 ret = hisi_qm_set_vft(qm, 0, 0, 0);
3375 qm_clear_queues(qm);
3376 qm->status.stop_reason = QM_NORMAL;
3379 up_write(&qm->qps_lock);
3384 static void qm_hw_error_init(struct hisi_qm *qm)
3386 if (!qm->ops->hw_error_init) {
3387 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
3391 qm->ops->hw_error_init(qm);
3394 static void qm_hw_error_uninit(struct hisi_qm *qm)
3396 if (!qm->ops->hw_error_uninit) {
3397 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
3401 qm->ops->hw_error_uninit(qm);
3404 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
3406 if (!qm->ops->hw_error_handle) {
3407 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
3411 return qm->ops->hw_error_handle(qm);
3416 * @qm: The qm for which we want to do error initialization.
3420 void hisi_qm_dev_err_init(struct hisi_qm *qm)
3422 if (qm->fun_type == QM_HW_VF)
3425 qm_hw_error_init(qm);
3427 if (!qm->err_ini->hw_err_enable) {
3428 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
3431 qm->err_ini->hw_err_enable(qm);
3437 * @qm: The qm for which we want to do error uninitialization.
3441 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
3443 if (qm->fun_type == QM_HW_VF)
3446 qm_hw_error_uninit(qm);
3448 if (!qm->err_ini->hw_err_disable) {
3449 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
3452 qm->err_ini->hw_err_disable(qm);
3487 struct hisi_qm *qm;
3492 list_for_each_entry(qm, &qm_list->list, list) {
3493 dev = &qm->pdev->dev;
3503 res->qm = qm;
3549 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3573 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3576 u32 max_qp_num = qm->max_qp_num;
3577 u32 q_base = qm->qp_num;
3583 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
3608 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
3611 hisi_qm_set_vft(qm, j, 0, 0);
3620 static int qm_clear_vft_config(struct hisi_qm *qm)
3625 for (i = 1; i <= qm->vfs_num; i++) {
3626 ret = hisi_qm_set_vft(qm, i, 0, 0);
3630 qm->vfs_num = 0;
3635 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
3637 struct device *dev = &qm->pdev->dev;
3641 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
3645 qm->factor[fun_index].func_qos = qos;
3647 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
3655 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
3665 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
3673 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3679 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
3680 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
3681 writel(fun_index, qm->io_base + QM_VFT_CFG);
3683 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
3684 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
3686 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
3692 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
3693 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
3704 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
3708 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
3715 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
3717 struct device *dev = &qm->pdev->dev;
3721 qos = qm_get_shaper_vft_qos(qm, fun_num);
3727 ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num);
3732 static int qm_vf_read_qos(struct hisi_qm *qm)
3738 qm->mb_qos = 0;
3741 ret = qm_ping_pf(qm, QM_VF_GET_QOS);
3743 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
3749 if (qm->mb_qos)
3753 pci_err(qm->pdev, "PF ping VF timeout!\n");
3764 struct hisi_qm *qm = filp->private_data;
3769 ret = hisi_qm_get_dfx_access(qm);
3774 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3775 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
3780 if (qm->fun_type == QM_HW_PF) {
3781 ir = qm_get_shaper_vft_qos(qm, 0);
3783 ret = qm_vf_read_qos(qm);
3786 ir = qm->mb_qos;
3795 clear_bit(QM_RESETTING, &qm->misc_ctl);
3797 hisi_qm_put_dfx_access(qm);
3801 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
3805 const struct bus_type *bus_type = qm->pdev->dev.bus;
3818 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
3824 pci_err(qm->pdev, "input pci bdf number is error!\n");
3838 struct hisi_qm *qm = filp->private_data;
3855 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
3860 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
3861 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
3865 ret = qm_pm_get_sync(qm);
3871 ret = qm_func_shaper_enable(qm, fun_index, val);
3873 pci_err(qm->pdev, "failed to enable function shaper!\n");
3878 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
3883 qm_pm_put_sync(qm);
3885 clear_bit(QM_RESETTING, &qm->misc_ctl);
3898 * @qm: The qm for which we want to add debugfs files.
3902 void hisi_qm_set_algqos_init(struct hisi_qm *qm)
3904 if (qm->fun_type == QM_HW_PF)
3905 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
3906 qm, &qm_algqos_fops);
3907 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps))
3908 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
3909 qm, &qm_algqos_fops);
3912 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func)
3917 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
3931 struct hisi_qm *qm = pci_get_drvdata(pdev);
3934 ret = qm_pm_get_sync(qm);
3954 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
3955 hisi_qm_init_vf_qos(qm, num_vfs);
3957 ret = qm_vf_q_assign(qm, num_vfs);
3966 qm_clear_vft_config(qm);
3969 qm->vfs_num = num_vfs;
3976 qm_pm_put_sync(qm);
3990 struct hisi_qm *qm = pci_get_drvdata(pdev);
3998 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
4005 qm->vfs_num = 0;
4006 qm_pm_put_sync(qm);
4008 return qm_clear_vft_config(qm);
4028 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
4030 if (!qm->err_ini->get_err_result) {
4031 dev_err(&qm->pdev->dev, "Device doesn't support reset!\n");
4035 return qm->err_ini->get_err_result(qm);
4038 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
4042 /* log qm error */
4043 qm_ret = qm_hw_error_handle(qm);
4046 dev_ret = qm_dev_err_handle(qm);
4054 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4059 * qm hardware error status when error occur.
4064 struct hisi_qm *qm = pci_get_drvdata(pdev);
4074 ret = qm_process_dev_error(qm);
4082 static int qm_check_req_recv(struct hisi_qm *qm)
4084 struct pci_dev *pdev = qm->pdev;
4088 if (qm->ver >= QM_HW_V3)
4091 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4092 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4100 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4101 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4110 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4112 struct pci_dev *pdev = qm->pdev;
4134 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
4136 struct pci_dev *pdev = qm->pdev;
4166 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
4171 if (qm->ver >= QM_HW_V3)
4174 if (!qm->err_status.is_dev_ecc_mbit &&
4175 qm->err_status.is_qm_ecc_mbit &&
4176 qm->err_ini->close_axi_master_ooo) {
4177 qm->err_ini->close_axi_master_ooo(qm);
4178 } else if (qm->err_status.is_dev_ecc_mbit &&
4179 !qm->err_status.is_qm_ecc_mbit &&
4180 !qm->err_ini->close_axi_master_ooo) {
4181 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4183 qm->io_base + QM_RAS_NFE_ENABLE);
4184 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4188 static int qm_vf_reset_prepare(struct hisi_qm *qm,
4191 struct hisi_qm_list *qm_list = qm->qm_list;
4192 struct pci_dev *pdev = qm->pdev;
4218 static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd,
4221 struct pci_dev *pdev = qm->pdev;
4224 if (!qm->vfs_num)
4228 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4229 ret = qm_ping_all_vfs(qm, cmd);
4233 ret = qm_vf_reset_prepare(qm, stop_reason);
4241 static int qm_controller_reset_prepare(struct hisi_qm *qm)
4243 struct pci_dev *pdev = qm->pdev;
4246 if (qm->err_ini->set_priv_status) {
4247 ret = qm->err_ini->set_priv_status(qm);
4252 ret = qm_reset_prepare_ready(qm);
4258 qm_dev_ecc_mbit_handle(qm);
4261 qm_cmd_uninit(qm);
4264 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
4268 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
4271 qm_reset_bit_clear(qm);
4275 if (qm->use_sva) {
4276 ret = qm_hw_err_isolate(qm);
4281 ret = qm_wait_vf_prepare_finish(qm);
4285 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4290 static int qm_master_ooo_check(struct hisi_qm *qm)
4296 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
4297 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
4301 pci_warn(qm->pdev, "Bus lock! Please reset system.\n");
4306 static int qm_soft_reset_prepare(struct hisi_qm *qm)
4308 struct pci_dev *pdev = qm->pdev;
4312 ret = qm_check_req_recv(qm);
4316 if (qm->vfs_num) {
4317 ret = qm_set_vf_mse(qm, false);
4324 ret = qm->ops->set_msi(qm, false);
4330 ret = qm_master_ooo_check(qm);
4334 if (qm->err_ini->close_sva_prefetch)
4335 qm->err_ini->close_sva_prefetch(qm);
4337 ret = qm_set_pf_mse(qm, false);
4344 static int qm_reset_device(struct hisi_qm *qm)
4346 struct pci_dev *pdev = qm->pdev;
4354 qm->err_info.acpi_rst,
4373 static int qm_soft_reset(struct hisi_qm *qm)
4377 ret = qm_soft_reset_prepare(qm);
4381 return qm_reset_device(qm);
4384 static int qm_vf_reset_done(struct hisi_qm *qm)
4386 struct hisi_qm_list *qm_list = qm->qm_list;
4387 struct pci_dev *pdev = qm->pdev;
4413 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd)
4415 struct pci_dev *pdev = qm->pdev;
4418 if (!qm->vfs_num)
4421 ret = qm_vf_q_assign(qm, qm->vfs_num);
4428 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) {
4429 ret = qm_ping_all_vfs(qm, cmd);
4433 ret = qm_vf_reset_done(qm);
4441 static int qm_dev_hw_init(struct hisi_qm *qm)
4443 return qm->err_ini->hw_init(qm);
4446 static void qm_restart_prepare(struct hisi_qm *qm)
4450 if (qm->err_ini->open_sva_prefetch)
4451 qm->err_ini->open_sva_prefetch(qm);
4453 if (qm->ver >= QM_HW_V3)
4456 if (!qm->err_status.is_qm_ecc_mbit &&
4457 !qm->err_status.is_dev_ecc_mbit)
4461 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4462 writel(value & ~qm->err_info.msi_wr_port,
4463 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4466 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
4467 if (value && qm->err_ini->clear_dev_hw_err_status)
4468 qm->err_ini->clear_dev_hw_err_status(qm, value);
4471 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4474 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
4477 static void qm_restart_done(struct hisi_qm *qm)
4481 if (qm->ver >= QM_HW_V3)
4484 if (!qm->err_status.is_qm_ecc_mbit &&
4485 !qm->err_status.is_dev_ecc_mbit)
4489 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4490 value |= qm->err_info.msi_wr_port;
4491 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
4494 qm->err_status.is_qm_ecc_mbit = false;
4495 qm->err_status.is_dev_ecc_mbit = false;
4498 static int qm_controller_reset_done(struct hisi_qm *qm)
4500 struct pci_dev *pdev = qm->pdev;
4503 ret = qm->ops->set_msi(qm, true);
4509 ret = qm_set_pf_mse(qm, true);
4515 if (qm->vfs_num) {
4516 ret = qm_set_vf_mse(qm, true);
4523 ret = qm_dev_hw_init(qm);
4529 qm_restart_prepare(qm);
4530 hisi_qm_dev_err_init(qm);
4531 if (qm->err_ini->open_axi_master_ooo)
4532 qm->err_ini->open_axi_master_ooo(qm);
4534 ret = qm_dev_mem_reset(qm);
4540 ret = qm_restart(qm);
4546 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4550 ret = qm_wait_vf_prepare_finish(qm);
4554 qm_cmd_init(qm);
4555 qm_restart_done(qm);
4557 qm_reset_bit_clear(qm);
4562 static int qm_controller_reset(struct hisi_qm *qm)
4564 struct pci_dev *pdev = qm->pdev;
4569 ret = qm_controller_reset_prepare(qm);
4571 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4572 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4573 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4577 hisi_qm_show_last_dfx_regs(qm);
4578 if (qm->err_ini->show_last_dfx_regs)
4579 qm->err_ini->show_last_dfx_regs(qm);
4581 ret = qm_soft_reset(qm);
4585 ret = qm_controller_reset_done(qm);
4595 qm_reset_bit_clear(qm);
4598 if (qm->use_sva)
4599 qm->isolate_data.is_isolate = true;
4612 struct hisi_qm *qm = pci_get_drvdata(pdev);
4619 ret = qm_controller_reset(qm);
4632 struct hisi_qm *qm = pci_get_drvdata(pdev);
4642 while (qm_check_dev_error(qm)) {
4648 ret = qm_reset_prepare_ready(qm);
4655 if (qm->fun_type == QM_HW_PF)
4656 qm_cmd_uninit(qm);
4658 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN);
4662 ret = hisi_qm_stop(qm, QM_DOWN);
4665 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4666 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4670 ret = qm_wait_vf_prepare_finish(qm);
4681 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
4684 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
4696 struct hisi_qm *qm = pci_get_drvdata(pdev);
4699 if (qm->fun_type == QM_HW_PF) {
4700 ret = qm_dev_hw_init(qm);
4709 ret = qm_restart(qm);
4715 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
4719 ret = qm_wait_vf_prepare_finish(qm);
4724 if (qm->fun_type == QM_HW_PF)
4725 qm_cmd_init(qm);
4730 qm_reset_bit_clear(qm);
4736 struct hisi_qm *qm = data;
4739 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
4740 ret = qm_process_dev_error(qm);
4742 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
4743 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
4744 schedule_work(&qm->rst_work);
4753 * This function will stop qm when OS shutdown or rebooting.
4757 struct hisi_qm *qm = pci_get_drvdata(pdev);
4760 ret = hisi_qm_stop(qm, QM_DOWN);
4762 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
4764 hisi_qm_cache_wb(qm);
4770 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
4773 ret = qm_pm_get_sync(qm);
4775 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
4780 ret = qm_controller_reset(qm);
4782 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
4784 qm_pm_put_sync(qm);
4787 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
4791 struct pci_dev *pdev = qm->pdev;
4794 ret = qm_reset_prepare_ready(qm);
4797 atomic_set(&qm->status.flags, QM_STOP);
4802 ret = hisi_qm_stop(qm, stop_reason);
4805 atomic_set(&qm->status.flags, QM_STOP);
4813 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4814 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4817 ret = qm_ping_pf(qm, cmd);
4822 static void qm_pf_reset_vf_done(struct hisi_qm *qm)
4825 struct pci_dev *pdev = qm->pdev;
4829 ret = hisi_qm_start(qm);
4835 qm_cmd_init(qm);
4836 ret = qm_ping_pf(qm, cmd);
4840 qm_reset_bit_clear(qm);
4843 static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
4845 struct device *dev = &qm->pdev->dev;
4850 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
4863 ret = qm->ops->get_ifc(qm, &cmd, NULL, 0);
4864 qm_clear_cmd_interrupt(qm, 0);
4878 static void qm_pf_reset_vf_process(struct hisi_qm *qm,
4881 struct device *dev = &qm->pdev->dev;
4887 qm_cmd_uninit(qm);
4888 qm_pf_reset_vf_prepare(qm, stop_reason);
4890 ret = qm_wait_pf_reset_finish(qm);
4894 qm_pf_reset_vf_done(qm);
4901 qm_cmd_init(qm);
4902 qm_reset_bit_clear(qm);
4905 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
4907 struct device *dev = &qm->pdev->dev;
4916 ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num);
4917 qm_clear_cmd_interrupt(qm, BIT(fun_num));
4925 qm_pf_reset_vf_process(qm, QM_DOWN);
4928 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
4931 qm_vf_get_qos(qm, fun_num);
4934 qm->mb_qos = data;
4944 struct hisi_qm *qm = container_of(cmd_process,
4946 u32 vfs_num = qm->vfs_num;
4950 if (qm->fun_type == QM_HW_PF) {
4951 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
4957 qm_handle_cmd_msg(qm, i);
4963 qm_handle_cmd_msg(qm, 0);
4968 * @qm: The qm needs add.
4969 * @qm_list: The qm list.
4974 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
4976 struct device *dev = &qm->pdev->dev;
4978 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
4983 if (qm->qp_num < guard) {
4988 return qm_list->register_to_crypto(qm);
4994 * @qm: The qm needs delete.
4995 * @qm_list: The qm list.
5000 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
5002 if (qm->ver <= QM_HW_V2 && qm->use_sva)
5005 if (qm->qp_num < guard)
5008 qm_list->unregister_from_crypto(qm);
5012 static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
5014 struct pci_dev *pdev = qm->pdev;
5017 if (qm->fun_type == QM_HW_VF)
5020 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
5025 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5028 static int qm_register_abnormal_irq(struct hisi_qm *qm)
5030 struct pci_dev *pdev = qm->pdev;
5034 if (qm->fun_type == QM_HW_VF)
5037 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
5042 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
5044 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
5049 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm)
5051 struct pci_dev *pdev = qm->pdev;
5054 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
5059 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5062 static int qm_register_mb_cmd_irq(struct hisi_qm *qm)
5064 struct pci_dev *pdev = qm->pdev;
5068 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val;
5073 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm);
5080 static void qm_unregister_aeq_irq(struct hisi_qm *qm)
5082 struct pci_dev *pdev = qm->pdev;
5085 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
5090 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5093 static int qm_register_aeq_irq(struct hisi_qm *qm)
5095 struct pci_dev *pdev = qm->pdev;
5099 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val;
5105 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm);
5112 static void qm_unregister_eq_irq(struct hisi_qm *qm)
5114 struct pci_dev *pdev = qm->pdev;
5117 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
5122 free_irq(pci_irq_vector(pdev, irq_vector), qm);
5125 static int qm_register_eq_irq(struct hisi_qm *qm)
5127 struct pci_dev *pdev = qm->pdev;
5131 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val;
5136 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
5143 static void qm_irqs_unregister(struct hisi_qm *qm)
5145 qm_unregister_mb_cmd_irq(qm);
5146 qm_unregister_abnormal_irq(qm);
5147 qm_unregister_aeq_irq(qm);
5148 qm_unregister_eq_irq(qm);
5151 static int qm_irqs_register(struct hisi_qm *qm)
5155 ret = qm_register_eq_irq(qm);
5159 ret = qm_register_aeq_irq(qm);
5163 ret = qm_register_abnormal_irq(qm);
5167 ret = qm_register_mb_cmd_irq(qm);
5174 qm_unregister_abnormal_irq(qm);
5176 qm_unregister_aeq_irq(qm);
5178 qm_unregister_eq_irq(qm);
5182 static int qm_get_qp_num(struct hisi_qm *qm)
5184 struct device *dev = &qm->pdev->dev;
5188 if (qm->fun_type == QM_HW_VF) {
5189 if (qm->ver != QM_HW_V1)
5191 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
5196 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5197 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true);
5198 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info,
5201 if (qm->qp_num <= qm->max_qp_num)
5204 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) {
5207 qm->qp_num, qm->max_qp_num);
5212 qm->qp_num, qm->max_qp_num);
5213 qm->qp_num = qm->max_qp_num;
5214 qm->debug.curr_qm_qp_num = qm->qp_num;
5219 static int qm_pre_store_caps(struct hisi_qm *qm)
5222 struct pci_dev *pdev = qm->pdev;
5233 qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info,
5234 i, qm->cap_ver);
5237 qm->cap_tables.qm_cap_table = qm_cap;
5238 qm->cap_tables.qm_cap_size = size;
5243 static int qm_get_hw_caps(struct hisi_qm *qm)
5245 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ?
5247 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) :
5252 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true);
5254 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps);
5256 if (qm->ver >= QM_HW_V3) {
5257 val = readl(qm->io_base + QM_FUNC_CAPS_REG);
5258 qm->cap_ver = val & QM_CAPBILITY_VERSION;
5263 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver);
5265 set_bit(qm_cap_info_comm[i].type, &qm->caps);
5270 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver);
5272 set_bit(cap_info[i].type, &qm->caps);
5275 /* Fetch and save the value of qm capability registers */
5276 return qm_pre_store_caps(qm);
5279 static void qm_get_version(struct hisi_qm *qm)
5281 struct pci_dev *pdev = qm->pdev;
5284 qm->ver = pdev->revision;
5287 sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID);
5289 qm->ver = sub_version_id;
5293 static int qm_get_pci_res(struct hisi_qm *qm)
5295 struct pci_dev *pdev = qm->pdev;
5299 ret = pci_request_mem_regions(pdev, qm->dev_name);
5305 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5306 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5307 if (!qm->io_base) {
5312 qm_get_version(qm);
5314 ret = qm_get_hw_caps(qm);
5318 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) {
5319 qm->db_interval = QM_QP_DB_INTERVAL;
5320 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5321 qm->db_io_base = ioremap(qm->db_phys_base,
5323 if (!qm->db_io_base) {
5328 qm->db_phys_base = qm->phys_base;
5329 qm->db_io_base = qm->io_base;
5330 qm->db_interval = 0;
5333 hisi_qm_pre_init(qm);
5334 ret = qm_get_qp_num(qm);
5341 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
5342 iounmap(qm->db_io_base);
5344 iounmap(qm->io_base);
5350 static int qm_clear_device(struct hisi_qm *qm)
5352 acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev);
5355 if (qm->fun_type == QM_HW_VF)
5359 if (!qm->err_ini->err_info_init)
5361 qm->err_ini->err_info_init(qm);
5367 if (!acpi_has_method(handle, qm->err_info.acpi_rst))
5370 ret = qm_master_ooo_check(qm);
5372 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5376 if (qm->err_ini->set_priv_status) {
5377 ret = qm->err_ini->set_priv_status(qm);
5379 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5384 return qm_reset_device(qm);
5387 static int hisi_qm_pci_init(struct hisi_qm *qm)
5389 struct pci_dev *pdev = qm->pdev;
5400 ret = qm_get_pci_res(qm);
5409 num_vec = qm_get_irq_num(qm);
5416 ret = qm_clear_device(qm);
5425 qm_put_pci_res(qm);
5431 static int hisi_qm_init_work(struct hisi_qm *qm)
5435 for (i = 0; i < qm->qp_num; i++)
5436 INIT_WORK(&qm->poll_data[i].work, qm_work_process);
5438 if (qm->fun_type == QM_HW_PF)
5439 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
5441 if (qm->ver > QM_HW_V2)
5442 INIT_WORK(&qm->cmd_process, qm_cmd_process);
5444 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
5446 pci_name(qm->pdev));
5447 if (!qm->wq) {
5448 pci_err(qm->pdev, "failed to alloc workqueue!\n");
5455 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
5457 struct device *dev = &qm->pdev->dev;
5462 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
5463 if (!qm->qp_array)
5466 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
5467 if (!qm->poll_data) {
5468 kfree(qm->qp_array);
5472 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP);
5475 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth;
5477 for (i = 0; i < qm->qp_num; i++) {
5478 qm->poll_data[i].qm = qm;
5479 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth);
5488 hisi_qp_memory_uninit(qm, i);
5493 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm)
5495 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf;
5497 struct device *dev = &qm->pdev->dev;
5523 static int hisi_qm_memory_init(struct hisi_qm *qm)
5525 struct device *dev = &qm->pdev->dev;
5529 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) {
5530 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
5531 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
5532 if (!qm->factor)
5536 qm->factor[0].func_qos = QM_QOS_MAX_VAL;
5539 #define QM_INIT_BUF(qm, type, num) do { \
5540 (qm)->type = ((qm)->qdma.va + (off)); \
5541 (qm)->type##_dma = (qm)->qdma.dma + (off); \
5545 idr_init(&qm->qp_idr);
5546 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP);
5547 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) +
5548 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) +
5549 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
5550 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
5551 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
5553 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
5554 if (!qm->qdma.va) {
5559 QM_INIT_BUF(qm, eqe, qm->eq_depth);
5560 QM_INIT_BUF(qm, aeqe, qm->aeq_depth);
5561 QM_INIT_BUF(qm, sqc, qm->qp_num);
5562 QM_INIT_BUF(qm, cqc, qm->qp_num);
5564 ret = hisi_qm_alloc_rsv_buf(qm);
5568 ret = hisi_qp_alloc_memory(qm);
5575 hisi_qm_free_rsv_buf(qm);
5577 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
5579 idr_destroy(&qm->qp_idr);
5580 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps))
5581 kfree(qm->factor);
5587 * hisi_qm_init() - Initialize configures about qm.
5588 * @qm: The qm needing init.
5590 * This function init qm, then we can call hisi_qm_start to put qm into work.
5592 int hisi_qm_init(struct hisi_qm *qm)
5594 struct pci_dev *pdev = qm->pdev;
5598 ret = hisi_qm_pci_init(qm);
5602 ret = qm_irqs_register(qm);
5606 if (qm->fun_type == QM_HW_PF) {
5608 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5609 qm_disable_clock_gate(qm);
5610 ret = qm_dev_mem_reset(qm);
5617 if (qm->mode == UACCE_MODE_SVA) {
5618 ret = qm_alloc_uacce(qm);
5623 ret = hisi_qm_memory_init(qm);
5627 ret = hisi_qm_init_work(qm);
5631 qm_cmd_init(qm);
5636 hisi_qm_memory_uninit(qm);
5638 qm_remove_uacce(qm);
5640 qm_irqs_unregister(qm);
5642 hisi_qm_pci_uninit(qm);
5649 * @qm: pointer to accelerator device.
5656 int hisi_qm_get_dfx_access(struct hisi_qm *qm)
5658 struct device *dev = &qm->pdev->dev;
5665 return qm_pm_get_sync(qm);
5671 * @qm: pointer to accelerator device.
5675 void hisi_qm_put_dfx_access(struct hisi_qm *qm)
5677 qm_pm_put_sync(qm);
5682 * hisi_qm_pm_init() - Initialize qm runtime PM.
5683 * @qm: pointer to accelerator device.
5685 * Function that initialize qm runtime PM.
5687 void hisi_qm_pm_init(struct hisi_qm *qm)
5689 struct device *dev = &qm->pdev->dev;
5691 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5701 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
5702 * @qm: pointer to accelerator device.
5704 * Function that uninitialize qm runtime PM.
5706 void hisi_qm_pm_uninit(struct hisi_qm *qm)
5708 struct device *dev = &qm->pdev->dev;
5710 if (!test_bit(QM_SUPPORT_RPM, &qm->caps))
5718 static int qm_prepare_for_suspend(struct hisi_qm *qm)
5720 struct pci_dev *pdev = qm->pdev;
5723 ret = qm->ops->set_msi(qm, false);
5729 ret = qm_master_ooo_check(qm);
5733 if (qm->err_ini->set_priv_status) {
5734 ret = qm->err_ini->set_priv_status(qm);
5739 ret = qm_set_pf_mse(qm, false);
5746 static int qm_rebuild_for_resume(struct hisi_qm *qm)
5748 struct pci_dev *pdev = qm->pdev;
5751 ret = qm_set_pf_mse(qm, true);
5757 ret = qm->ops->set_msi(qm, true);
5763 ret = qm_dev_hw_init(qm);
5769 qm_cmd_init(qm);
5770 hisi_qm_dev_err_init(qm);
5772 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
5773 qm_disable_clock_gate(qm);
5774 ret = qm_dev_mem_reset(qm);
5790 struct hisi_qm *qm = pci_get_drvdata(pdev);
5795 ret = hisi_qm_stop(qm, QM_NORMAL);
5797 pci_err(pdev, "failed to stop qm(%d)\n", ret);
5801 ret = qm_prepare_for_suspend(qm);
5818 struct hisi_qm *qm = pci_get_drvdata(pdev);
5823 ret = qm_rebuild_for_resume(qm);
5829 ret = hisi_qm_start(qm);
5831 if (qm_check_dev_error(qm)) {
5832 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n");
5836 pci_err(pdev, "failed to start qm(%d)!\n", ret);