1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/bitmap.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/idr.h> 8 #include <linux/io.h> 9 #include <linux/irqreturn.h> 10 #include <linux/log2.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/seq_file.h> 13 #include <linux/slab.h> 14 #include <linux/uacce.h> 15 #include <linux/uaccess.h> 16 #include <uapi/misc/uacce/hisi_qm.h> 17 #include <linux/hisi_acc_qm.h> 18 #include "qm_common.h" 19 20 /* eq/aeq irq enable */ 21 #define QM_VF_AEQ_INT_SOURCE 0x0 22 #define QM_VF_AEQ_INT_MASK 0x4 23 #define QM_VF_EQ_INT_SOURCE 0x8 24 #define QM_VF_EQ_INT_MASK 0xc 25 26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 28 #define QM_IRQ_TYPE_SHIFT 16 29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 30 31 /* mailbox */ 32 #define QM_MB_PING_ALL_VFS 0xffff 33 #define QM_MB_CMD_DATA_SHIFT 32 34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 35 #define QM_MB_STATUS_MASK GENMASK(12, 9) 36 37 /* sqc shift */ 38 #define QM_SQ_HOP_NUM_SHIFT 0 39 #define QM_SQ_PAGE_SIZE_SHIFT 4 40 #define QM_SQ_BUF_SIZE_SHIFT 8 41 #define QM_SQ_SQE_SIZE_SHIFT 12 42 #define QM_SQ_PRIORITY_SHIFT 0 43 #define QM_SQ_ORDERS_SHIFT 4 44 #define QM_SQ_TYPE_SHIFT 8 45 #define QM_QC_PASID_ENABLE 0x1 46 #define QM_QC_PASID_ENABLE_SHIFT 7 47 48 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) 50 51 /* cqc shift */ 52 #define QM_CQ_HOP_NUM_SHIFT 0 53 #define QM_CQ_PAGE_SIZE_SHIFT 4 54 #define QM_CQ_BUF_SIZE_SHIFT 8 55 #define QM_CQ_CQE_SIZE_SHIFT 12 56 #define QM_CQ_PHASE_SHIFT 0 57 #define QM_CQ_FLAG_SHIFT 1 58 59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 60 #define QM_QC_CQE_SIZE 4 61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) 62 63 /* eqc shift */ 64 #define QM_EQE_AEQE_SIZE (2UL << 12) 65 #define QM_EQC_PHASE_SHIFT 16 66 67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) 68 #define QM_EQE_CQN_MASK GENMASK(15, 0) 69 70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) 71 #define QM_AEQE_TYPE_SHIFT 17 72 #define QM_AEQE_TYPE_MASK 0xf 73 #define QM_AEQE_CQN_MASK GENMASK(15, 0) 74 #define QM_CQ_OVERFLOW 0 75 #define QM_EQ_OVERFLOW 1 76 #define QM_CQE_ERROR 2 77 78 #define QM_XQ_DEPTH_SHIFT 16 79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 80 81 #define QM_DOORBELL_CMD_SQ 0 82 #define QM_DOORBELL_CMD_CQ 1 83 #define QM_DOORBELL_CMD_EQ 2 84 #define QM_DOORBELL_CMD_AEQ 3 85 86 #define QM_DOORBELL_BASE_V1 0x340 87 #define QM_DB_CMD_SHIFT_V1 16 88 #define QM_DB_INDEX_SHIFT_V1 32 89 #define QM_DB_PRIORITY_SHIFT_V1 48 90 #define QM_PAGE_SIZE 0x0034 91 #define QM_QP_DB_INTERVAL 0x10000 92 #define QM_DB_TIMEOUT_CFG 0x100074 93 #define QM_DB_TIMEOUT_SET 0x1fffff 94 95 #define QM_MEM_START_INIT 0x100040 96 #define QM_MEM_INIT_DONE 0x100044 97 #define QM_VFT_CFG_RDY 0x10006c 98 #define QM_VFT_CFG_OP_WR 0x100058 99 #define QM_VFT_CFG_TYPE 0x10005c 100 #define QM_VFT_CFG 0x100060 101 #define QM_VFT_CFG_OP_ENABLE 0x100054 102 #define QM_PM_CTRL 0x100148 103 #define QM_IDLE_DISABLE BIT(9) 104 105 #define QM_VFT_CFG_DATA_L 0x100064 106 #define QM_VFT_CFG_DATA_H 0x100068 107 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 108 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 109 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 110 #define QM_SQC_VFT_START_SQN_SHIFT 28 111 #define QM_SQC_VFT_VALID (1ULL << 44) 112 #define QM_SQC_VFT_SQN_SHIFT 45 113 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 114 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 115 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 116 #define QM_CQC_VFT_VALID (1ULL << 28) 117 118 #define QM_SQC_VFT_BASE_SHIFT_V2 28 119 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 120 #define QM_SQC_VFT_NUM_SHIFT_V2 45 121 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) 122 123 #define QM_ABNORMAL_INT_SOURCE 0x100000 124 #define QM_ABNORMAL_INT_MASK 0x100004 125 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 126 #define QM_ABNORMAL_INT_STATUS 0x100008 127 #define QM_ABNORMAL_INT_SET 0x10000c 128 #define QM_ABNORMAL_INF00 0x100010 129 #define QM_FIFO_OVERFLOW_TYPE 0xc0 130 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 131 #define QM_FIFO_OVERFLOW_VF 0x3f 132 #define QM_FIFO_OVERFLOW_QP_SHIFT 16 133 #define QM_ABNORMAL_INF01 0x100014 134 #define QM_DB_TIMEOUT_TYPE 0xc0 135 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 136 #define QM_DB_TIMEOUT_VF 0x3f 137 #define QM_DB_TIMEOUT_QP_SHIFT 16 138 #define QM_ABNORMAL_INF02 0x100018 139 #define QM_AXI_POISON_ERR BIT(22) 140 #define QM_RAS_CE_ENABLE 0x1000ec 141 #define QM_RAS_FE_ENABLE 0x1000f0 142 #define QM_RAS_NFE_ENABLE 0x1000f4 143 #define QM_RAS_CE_THRESHOLD 0x1000f8 144 #define QM_RAS_CE_TIMES_PER_IRQ 1 145 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 146 #define QM_AXI_RRESP_ERR BIT(0) 147 #define QM_ECC_MBIT BIT(2) 148 #define QM_DB_TIMEOUT BIT(10) 149 #define QM_OF_FIFO_OF BIT(11) 150 151 #define QM_RESET_WAIT_TIMEOUT 400 152 #define QM_PEH_VENDOR_ID 0x1000d8 153 #define ACC_VENDOR_ID_VALUE 0x5a5a 154 #define QM_PEH_DFX_INFO0 0x1000fc 155 #define QM_PEH_DFX_INFO1 0x100100 156 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 157 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 158 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 159 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 160 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 161 #define ACC_MASTER_TRANS_RETURN_RW 3 162 #define ACC_MASTER_TRANS_RETURN 0x300150 163 #define ACC_MASTER_GLOBAL_CTRL 0x300000 164 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 165 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT 166 #define ACC_AM_ROB_ECC_INT_STS 0x300104 167 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 168 #define QM_MSI_CAP_ENABLE BIT(16) 169 170 /* interfunction communication */ 171 #define QM_IFC_READY_STATUS 0x100128 172 #define QM_IFC_INT_SET_P 0x100130 173 #define QM_IFC_INT_CFG 0x100134 174 #define QM_IFC_INT_SOURCE_P 0x100138 175 #define QM_IFC_INT_SOURCE_V 0x0020 176 #define QM_IFC_INT_MASK 0x0024 177 #define QM_IFC_INT_STATUS 0x0028 178 #define QM_IFC_INT_SET_V 0x002C 179 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 180 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 181 #define QM_IFC_INT_SOURCE_MASK BIT(0) 182 #define QM_IFC_INT_DISABLE BIT(0) 183 #define QM_IFC_INT_STATUS_MASK BIT(0) 184 #define QM_IFC_INT_SET_MASK BIT(0) 185 #define QM_WAIT_DST_ACK 10 186 #define QM_MAX_PF_WAIT_COUNT 10 187 #define QM_MAX_VF_WAIT_COUNT 40 188 #define QM_VF_RESET_WAIT_US 20000 189 #define QM_VF_RESET_WAIT_CNT 3000 190 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 191 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 192 193 #define POLL_PERIOD 10 194 #define POLL_TIMEOUT 1000 195 #define WAIT_PERIOD_US_MAX 200 196 #define WAIT_PERIOD_US_MIN 100 197 #define MAX_WAIT_COUNTS 1000 198 #define QM_CACHE_WB_START 0x204 199 #define QM_CACHE_WB_DONE 0x208 200 #define QM_FUNC_CAPS_REG 0x3100 201 #define QM_CAPBILITY_VERSION GENMASK(7, 0) 202 203 #define PCI_BAR_2 2 204 #define PCI_BAR_4 4 205 #define QMC_ALIGN(sz) ALIGN(sz, 32) 206 207 #define QM_DBG_READ_LEN 256 208 #define QM_PCI_COMMAND_INVALID ~0 209 #define QM_RESET_STOP_TX_OFFSET 1 210 #define QM_RESET_STOP_RX_OFFSET 2 211 212 #define WAIT_PERIOD 20 213 #define REMOVE_WAIT_DELAY 10 214 215 #define QM_QOS_PARAM_NUM 2 216 #define QM_QOS_MAX_VAL 1000 217 #define QM_QOS_RATE 100 218 #define QM_QOS_EXPAND_RATE 1000 219 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 220 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 221 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 222 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 223 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 224 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 225 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 226 #define QM_SHAPER_CBS_B 1 227 #define QM_SHAPER_VFT_OFFSET 6 228 #define QM_QOS_MIN_ERROR_RATE 5 229 #define QM_SHAPER_MIN_CBS_S 8 230 #define QM_QOS_TICK 0x300U 231 #define QM_QOS_DIVISOR_CLK 0x1f40U 232 #define QM_QOS_MAX_CIR_B 200 233 #define QM_QOS_MIN_CIR_B 100 234 #define QM_QOS_MAX_CIR_U 6 235 #define QM_AUTOSUSPEND_DELAY 3000 236 237 #define QM_DEV_ALG_MAX_LEN 256 238 239 /* abnormal status value for stopping queue */ 240 #define QM_STOP_QUEUE_FAIL 1 241 #define QM_DUMP_SQC_FAIL 3 242 #define QM_DUMP_CQC_FAIL 4 243 #define QM_FINISH_WAIT 5 244 245 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 246 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 247 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 248 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 249 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 250 251 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 252 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 253 254 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 255 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 256 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 257 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 258 259 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 260 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 261 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 262 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 263 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 264 265 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 266 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 267 268 enum vft_type { 269 SQC_VFT = 0, 270 CQC_VFT, 271 SHAPER_VFT, 272 }; 273 274 enum qm_alg_type { 275 ALG_TYPE_0, 276 ALG_TYPE_1, 277 }; 278 279 enum qm_mb_cmd { 280 QM_PF_FLR_PREPARE = 0x01, 281 QM_PF_SRST_PREPARE, 282 QM_PF_RESET_DONE, 283 QM_VF_PREPARE_DONE, 284 QM_VF_PREPARE_FAIL, 285 QM_VF_START_DONE, 286 QM_VF_START_FAIL, 287 QM_PF_SET_QOS, 288 QM_VF_GET_QOS, 289 }; 290 291 enum qm_basic_type { 292 QM_TOTAL_QP_NUM_CAP = 0x0, 293 QM_FUNC_MAX_QP_CAP, 294 QM_XEQ_DEPTH_CAP, 295 QM_QP_DEPTH_CAP, 296 QM_EQ_IRQ_TYPE_CAP, 297 QM_AEQ_IRQ_TYPE_CAP, 298 QM_ABN_IRQ_TYPE_CAP, 299 QM_PF2VF_IRQ_TYPE_CAP, 300 QM_PF_IRQ_NUM_CAP, 301 QM_VF_IRQ_NUM_CAP, 302 }; 303 304 enum qm_cap_table_type { 305 QM_CAP_VF = 0x0, 306 QM_AEQE_NUM, 307 QM_SCQE_NUM, 308 QM_EQ_IRQ, 309 QM_AEQ_IRQ, 310 QM_ABNORMAL_IRQ, 311 QM_MB_IRQ, 312 MAX_IRQ_NUM, 313 EXT_BAR_INDEX, 314 }; 315 316 static const struct hisi_qm_cap_query_info qm_cap_query_info[] = { 317 {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01}, 318 {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800}, 319 {QM_SCQE_NUM, "QM_SCQE_NUM ", 320 0x3108, 0x4000400, 0x4000400, 0x4000400}, 321 {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000}, 322 {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001}, 323 {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003}, 324 {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002}, 325 {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003}, 326 {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14}, 327 }; 328 329 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 330 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 331 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 332 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 333 {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, 334 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 335 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 336 }; 337 338 static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 339 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 340 }; 341 342 static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 343 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 344 }; 345 346 static const struct hisi_qm_cap_info qm_basic_info[] = { 347 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 348 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 349 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, 350 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 351 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 352 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 353 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 354 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 355 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 356 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 357 }; 358 359 struct qm_mailbox { 360 __le16 w0; 361 __le16 queue_num; 362 __le32 base_l; 363 __le32 base_h; 364 __le32 rsvd; 365 }; 366 367 struct qm_doorbell { 368 __le16 queue_num; 369 __le16 cmd; 370 __le16 index; 371 __le16 priority; 372 }; 373 374 struct hisi_qm_resource { 375 struct hisi_qm *qm; 376 int distance; 377 struct list_head list; 378 }; 379 380 /** 381 * struct qm_hw_err - Structure describing the device errors 382 * @list: hardware error list 383 * @timestamp: timestamp when the error occurred 384 */ 385 struct qm_hw_err { 386 struct list_head list; 387 unsigned long long timestamp; 388 }; 389 390 struct hisi_qm_hw_ops { 391 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 392 void (*qm_db)(struct hisi_qm *qm, u16 qn, 393 u8 cmd, u16 index, u8 priority); 394 int (*debug_init)(struct hisi_qm *qm); 395 void (*hw_error_init)(struct hisi_qm *qm); 396 void (*hw_error_uninit)(struct hisi_qm *qm); 397 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 398 int (*set_msi)(struct hisi_qm *qm, bool set); 399 }; 400 401 struct hisi_qm_hw_error { 402 u32 int_msk; 403 const char *msg; 404 }; 405 406 static const struct hisi_qm_hw_error qm_hw_error[] = { 407 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 408 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 409 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 410 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 411 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 412 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 413 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 414 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 415 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 416 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 417 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 418 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 419 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 420 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 421 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 422 }; 423 424 static const char * const qm_db_timeout[] = { 425 "sq", "cq", "eq", "aeq", 426 }; 427 428 static const char * const qm_fifo_overflow[] = { 429 "cq", "eq", "aeq", 430 }; 431 432 struct qm_typical_qos_table { 433 u32 start; 434 u32 end; 435 u32 val; 436 }; 437 438 /* the qos step is 100 */ 439 static struct qm_typical_qos_table shaper_cir_s[] = { 440 {100, 100, 4}, 441 {200, 200, 3}, 442 {300, 500, 2}, 443 {600, 1000, 1}, 444 {1100, 100000, 0}, 445 }; 446 447 static struct qm_typical_qos_table shaper_cbs_s[] = { 448 {100, 200, 9}, 449 {300, 500, 11}, 450 {600, 1000, 12}, 451 {1100, 10000, 16}, 452 {10100, 25000, 17}, 453 {25100, 50000, 18}, 454 {50100, 100000, 19} 455 }; 456 457 static void qm_irqs_unregister(struct hisi_qm *qm); 458 static int qm_reset_device(struct hisi_qm *qm); 459 int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, 460 unsigned int device) 461 { 462 struct pci_dev *pdev; 463 u32 n, q_num; 464 int ret; 465 466 if (!val) 467 return -EINVAL; 468 469 pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); 470 if (!pdev) { 471 q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); 472 pr_info("No device found currently, suppose queue number is %u\n", 473 q_num); 474 } else { 475 if (pdev->revision == QM_HW_V1) 476 q_num = QM_QNUM_V1; 477 else 478 q_num = QM_QNUM_V2; 479 480 pci_dev_put(pdev); 481 } 482 483 ret = kstrtou32(val, 10, &n); 484 if (ret || n < QM_MIN_QNUM || n > q_num) 485 return -EINVAL; 486 487 return param_set_int(val, kp); 488 } 489 EXPORT_SYMBOL_GPL(hisi_qm_q_num_set); 490 491 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 492 { 493 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 494 } 495 496 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 497 { 498 return qm->err_ini->get_dev_hw_err_status(qm); 499 } 500 501 /* Check if the error causes the master ooo block */ 502 static bool qm_check_dev_error(struct hisi_qm *qm) 503 { 504 u32 val, dev_val; 505 506 if (qm->fun_type == QM_HW_VF) 507 return false; 508 509 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; 510 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; 511 512 return val || dev_val; 513 } 514 515 static int qm_wait_reset_finish(struct hisi_qm *qm) 516 { 517 int delay = 0; 518 519 /* All reset requests need to be queued for processing */ 520 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 521 msleep(++delay); 522 if (delay > QM_RESET_WAIT_TIMEOUT) 523 return -EBUSY; 524 } 525 526 return 0; 527 } 528 529 static int qm_reset_prepare_ready(struct hisi_qm *qm) 530 { 531 struct pci_dev *pdev = qm->pdev; 532 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 533 534 /* 535 * PF and VF on host doesnot support resetting at the 536 * same time on Kunpeng920. 537 */ 538 if (qm->ver < QM_HW_V3) 539 return qm_wait_reset_finish(pf_qm); 540 541 return qm_wait_reset_finish(qm); 542 } 543 544 static void qm_reset_bit_clear(struct hisi_qm *qm) 545 { 546 struct pci_dev *pdev = qm->pdev; 547 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 548 549 if (qm->ver < QM_HW_V3) 550 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 551 552 clear_bit(QM_RESETTING, &qm->misc_ctl); 553 } 554 555 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 556 u64 base, u16 queue, bool op) 557 { 558 mailbox->w0 = cpu_to_le16((cmd) | 559 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 560 (0x1 << QM_MB_BUSY_SHIFT)); 561 mailbox->queue_num = cpu_to_le16(queue); 562 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 563 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 564 mailbox->rsvd = 0; 565 } 566 567 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 568 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) 569 { 570 u32 val; 571 572 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 573 val, !((val >> QM_MB_BUSY_SHIFT) & 574 0x1), POLL_PERIOD, POLL_TIMEOUT); 575 } 576 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); 577 578 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 579 static void qm_mb_write(struct hisi_qm *qm, const void *src) 580 { 581 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 582 583 #if IS_ENABLED(CONFIG_ARM64) 584 unsigned long tmp0 = 0, tmp1 = 0; 585 #endif 586 587 if (!IS_ENABLED(CONFIG_ARM64)) { 588 memcpy_toio(fun_base, src, 16); 589 dma_wmb(); 590 return; 591 } 592 593 #if IS_ENABLED(CONFIG_ARM64) 594 asm volatile("ldp %0, %1, %3\n" 595 "stp %0, %1, %2\n" 596 "dmb oshst\n" 597 : "=&r" (tmp0), 598 "=&r" (tmp1), 599 "+Q" (*((char __iomem *)fun_base)) 600 : "Q" (*((char *)src)) 601 : "memory"); 602 #endif 603 } 604 605 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 606 { 607 int ret; 608 u32 val; 609 610 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 611 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 612 ret = -EBUSY; 613 goto mb_busy; 614 } 615 616 qm_mb_write(qm, mailbox); 617 618 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 619 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 620 ret = -ETIMEDOUT; 621 goto mb_busy; 622 } 623 624 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 625 if (val & QM_MB_STATUS_MASK) { 626 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 627 ret = -EIO; 628 goto mb_busy; 629 } 630 631 return 0; 632 633 mb_busy: 634 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 635 return ret; 636 } 637 638 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 639 bool op) 640 { 641 struct qm_mailbox mailbox; 642 int ret; 643 644 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 645 646 mutex_lock(&qm->mailbox_lock); 647 ret = qm_mb_nolock(qm, &mailbox); 648 mutex_unlock(&qm->mailbox_lock); 649 650 return ret; 651 } 652 EXPORT_SYMBOL_GPL(hisi_qm_mb); 653 654 /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ 655 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) 656 { 657 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 658 struct qm_mailbox mailbox; 659 dma_addr_t xqc_dma; 660 void *tmp_xqc; 661 size_t size; 662 int ret; 663 664 switch (cmd) { 665 case QM_MB_CMD_SQC: 666 size = sizeof(struct qm_sqc); 667 tmp_xqc = qm->xqc_buf.sqc; 668 xqc_dma = qm->xqc_buf.sqc_dma; 669 break; 670 case QM_MB_CMD_CQC: 671 size = sizeof(struct qm_cqc); 672 tmp_xqc = qm->xqc_buf.cqc; 673 xqc_dma = qm->xqc_buf.cqc_dma; 674 break; 675 case QM_MB_CMD_EQC: 676 size = sizeof(struct qm_eqc); 677 tmp_xqc = qm->xqc_buf.eqc; 678 xqc_dma = qm->xqc_buf.eqc_dma; 679 break; 680 case QM_MB_CMD_AEQC: 681 size = sizeof(struct qm_aeqc); 682 tmp_xqc = qm->xqc_buf.aeqc; 683 xqc_dma = qm->xqc_buf.aeqc_dma; 684 break; 685 default: 686 dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd); 687 return -EINVAL; 688 } 689 690 /* Setting xqc will fail if master OOO is blocked. */ 691 if (qm_check_dev_error(pf_qm)) { 692 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); 693 return -EIO; 694 } 695 696 mutex_lock(&qm->mailbox_lock); 697 if (!op) 698 memcpy(tmp_xqc, xqc, size); 699 700 qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); 701 ret = qm_mb_nolock(qm, &mailbox); 702 if (!ret && op) 703 memcpy(xqc, tmp_xqc, size); 704 705 mutex_unlock(&qm->mailbox_lock); 706 707 return ret; 708 } 709 710 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 711 { 712 u64 doorbell; 713 714 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 715 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 716 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 717 718 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 719 } 720 721 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 722 { 723 void __iomem *io_base = qm->io_base; 724 u16 randata = 0; 725 u64 doorbell; 726 727 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 728 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 729 QM_DOORBELL_SQ_CQ_BASE_V2; 730 else 731 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 732 733 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 734 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 735 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 736 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 737 738 writeq(doorbell, io_base); 739 } 740 741 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 742 { 743 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 744 qn, cmd, index); 745 746 qm->ops->qm_db(qm, qn, cmd, index, priority); 747 } 748 749 static void qm_disable_clock_gate(struct hisi_qm *qm) 750 { 751 u32 val; 752 753 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ 754 if (qm->ver < QM_HW_V3) 755 return; 756 757 val = readl(qm->io_base + QM_PM_CTRL); 758 val |= QM_IDLE_DISABLE; 759 writel(val, qm->io_base + QM_PM_CTRL); 760 } 761 762 static int qm_dev_mem_reset(struct hisi_qm *qm) 763 { 764 u32 val; 765 766 writel(0x1, qm->io_base + QM_MEM_START_INIT); 767 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 768 val & BIT(0), POLL_PERIOD, 769 POLL_TIMEOUT); 770 } 771 772 /** 773 * hisi_qm_get_hw_info() - Get device information. 774 * @qm: The qm which want to get information. 775 * @info_table: Array for storing device information. 776 * @index: Index in info_table. 777 * @is_read: Whether read from reg, 0: not support read from reg. 778 * 779 * This function returns device information the caller needs. 780 */ 781 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 782 const struct hisi_qm_cap_info *info_table, 783 u32 index, bool is_read) 784 { 785 u32 val; 786 787 switch (qm->ver) { 788 case QM_HW_V1: 789 return info_table[index].v1_val; 790 case QM_HW_V2: 791 return info_table[index].v2_val; 792 default: 793 if (!is_read) 794 return info_table[index].v3_val; 795 796 val = readl(qm->io_base + info_table[index].offset); 797 return (val >> info_table[index].shift) & info_table[index].mask; 798 } 799 } 800 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 801 802 u32 hisi_qm_get_cap_value(struct hisi_qm *qm, 803 const struct hisi_qm_cap_query_info *info_table, 804 u32 index, bool is_read) 805 { 806 u32 val; 807 808 switch (qm->ver) { 809 case QM_HW_V1: 810 return info_table[index].v1_val; 811 case QM_HW_V2: 812 return info_table[index].v2_val; 813 default: 814 if (!is_read) 815 return info_table[index].v3_val; 816 817 val = readl(qm->io_base + info_table[index].offset); 818 return val; 819 } 820 } 821 EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value); 822 823 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 824 u16 *high_bits, enum qm_basic_type type) 825 { 826 u32 depth; 827 828 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 829 *low_bits = depth & QM_XQ_DEPTH_MASK; 830 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 831 } 832 833 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 834 u32 dev_algs_size) 835 { 836 struct device *dev = &qm->pdev->dev; 837 char *algs, *ptr; 838 int i; 839 840 if (!qm->uacce) 841 return 0; 842 843 if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) { 844 dev_err(dev, "algs size %u is equal or larger than %d.\n", 845 dev_algs_size, QM_DEV_ALG_MAX_LEN); 846 return -EINVAL; 847 } 848 849 algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); 850 if (!algs) 851 return -ENOMEM; 852 853 for (i = 0; i < dev_algs_size; i++) 854 if (alg_msk & dev_algs[i].alg_msk) 855 strcat(algs, dev_algs[i].alg); 856 857 ptr = strrchr(algs, '\n'); 858 if (ptr) { 859 *ptr = '\0'; 860 qm->uacce->algs = algs; 861 } 862 863 return 0; 864 } 865 EXPORT_SYMBOL_GPL(hisi_qm_set_algs); 866 867 static u32 qm_get_irq_num(struct hisi_qm *qm) 868 { 869 if (qm->fun_type == QM_HW_PF) 870 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 871 872 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 873 } 874 875 static int qm_pm_get_sync(struct hisi_qm *qm) 876 { 877 struct device *dev = &qm->pdev->dev; 878 int ret; 879 880 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 881 return 0; 882 883 ret = pm_runtime_resume_and_get(dev); 884 if (ret < 0) { 885 dev_err(dev, "failed to get_sync(%d).\n", ret); 886 return ret; 887 } 888 889 return 0; 890 } 891 892 static void qm_pm_put_sync(struct hisi_qm *qm) 893 { 894 struct device *dev = &qm->pdev->dev; 895 896 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 897 return; 898 899 pm_runtime_mark_last_busy(dev); 900 pm_runtime_put_autosuspend(dev); 901 } 902 903 static void qm_cq_head_update(struct hisi_qp *qp) 904 { 905 if (qp->qp_status.cq_head == qp->cq_depth - 1) { 906 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 907 qp->qp_status.cq_head = 0; 908 } else { 909 qp->qp_status.cq_head++; 910 } 911 } 912 913 static void qm_poll_req_cb(struct hisi_qp *qp) 914 { 915 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 916 struct hisi_qm *qm = qp->qm; 917 918 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 919 dma_rmb(); 920 qp->req_cb(qp, qp->sqe + qm->sqe_size * 921 le16_to_cpu(cqe->sq_head)); 922 qm_cq_head_update(qp); 923 cqe = qp->cqe + qp->qp_status.cq_head; 924 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 925 qp->qp_status.cq_head, 0); 926 atomic_dec(&qp->qp_status.used); 927 928 cond_resched(); 929 } 930 931 /* set c_flag */ 932 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); 933 } 934 935 static void qm_work_process(struct work_struct *work) 936 { 937 struct hisi_qm_poll_data *poll_data = 938 container_of(work, struct hisi_qm_poll_data, work); 939 struct hisi_qm *qm = poll_data->qm; 940 u16 eqe_num = poll_data->eqe_num; 941 struct hisi_qp *qp; 942 int i; 943 944 for (i = eqe_num - 1; i >= 0; i--) { 945 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; 946 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 947 continue; 948 949 if (qp->event_cb) { 950 qp->event_cb(qp); 951 continue; 952 } 953 954 if (likely(qp->req_cb)) 955 qm_poll_req_cb(qp); 956 } 957 } 958 959 static void qm_get_complete_eqe_num(struct hisi_qm *qm) 960 { 961 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 962 struct hisi_qm_poll_data *poll_data = NULL; 963 u16 eq_depth = qm->eq_depth; 964 u16 cqn, eqe_num = 0; 965 966 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { 967 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 968 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 969 return; 970 } 971 972 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 973 if (unlikely(cqn >= qm->qp_num)) 974 return; 975 poll_data = &qm->poll_data[cqn]; 976 977 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 978 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 979 poll_data->qp_finish_id[eqe_num] = cqn; 980 eqe_num++; 981 982 if (qm->status.eq_head == eq_depth - 1) { 983 qm->status.eqc_phase = !qm->status.eqc_phase; 984 eqe = qm->eqe; 985 qm->status.eq_head = 0; 986 } else { 987 eqe++; 988 qm->status.eq_head++; 989 } 990 991 if (eqe_num == (eq_depth >> 1) - 1) 992 break; 993 } 994 995 poll_data->eqe_num = eqe_num; 996 queue_work(qm->wq, &poll_data->work); 997 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 998 } 999 1000 static irqreturn_t qm_eq_irq(int irq, void *data) 1001 { 1002 struct hisi_qm *qm = data; 1003 1004 /* Get qp id of completed tasks and re-enable the interrupt */ 1005 qm_get_complete_eqe_num(qm); 1006 1007 return IRQ_HANDLED; 1008 } 1009 1010 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 1011 { 1012 struct hisi_qm *qm = data; 1013 u32 val; 1014 1015 val = readl(qm->io_base + QM_IFC_INT_STATUS); 1016 val &= QM_IFC_INT_STATUS_MASK; 1017 if (!val) 1018 return IRQ_NONE; 1019 1020 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { 1021 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); 1022 return IRQ_HANDLED; 1023 } 1024 1025 schedule_work(&qm->cmd_process); 1026 1027 return IRQ_HANDLED; 1028 } 1029 1030 static void qm_set_qp_disable(struct hisi_qp *qp, int offset) 1031 { 1032 u32 *addr; 1033 1034 if (qp->is_in_kernel) 1035 return; 1036 1037 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; 1038 *addr = 1; 1039 1040 /* make sure setup is completed */ 1041 smp_wmb(); 1042 } 1043 1044 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) 1045 { 1046 struct hisi_qp *qp = &qm->qp_array[qp_id]; 1047 1048 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); 1049 hisi_qm_stop_qp(qp); 1050 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); 1051 } 1052 1053 static void qm_reset_function(struct hisi_qm *qm) 1054 { 1055 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 1056 struct device *dev = &qm->pdev->dev; 1057 int ret; 1058 1059 if (qm_check_dev_error(pf_qm)) 1060 return; 1061 1062 ret = qm_reset_prepare_ready(qm); 1063 if (ret) { 1064 dev_err(dev, "reset function not ready\n"); 1065 return; 1066 } 1067 1068 ret = hisi_qm_stop(qm, QM_DOWN); 1069 if (ret) { 1070 dev_err(dev, "failed to stop qm when reset function\n"); 1071 goto clear_bit; 1072 } 1073 1074 ret = hisi_qm_start(qm); 1075 if (ret) 1076 dev_err(dev, "failed to start qm when reset function\n"); 1077 1078 clear_bit: 1079 qm_reset_bit_clear(qm); 1080 } 1081 1082 static irqreturn_t qm_aeq_thread(int irq, void *data) 1083 { 1084 struct hisi_qm *qm = data; 1085 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1086 u16 aeq_depth = qm->aeq_depth; 1087 u32 type, qp_id; 1088 1089 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 1090 1091 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { 1092 type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & 1093 QM_AEQE_TYPE_MASK; 1094 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; 1095 1096 switch (type) { 1097 case QM_EQ_OVERFLOW: 1098 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); 1099 qm_reset_function(qm); 1100 return IRQ_HANDLED; 1101 case QM_CQ_OVERFLOW: 1102 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", 1103 qp_id); 1104 fallthrough; 1105 case QM_CQE_ERROR: 1106 qm_disable_qp(qm, qp_id); 1107 break; 1108 default: 1109 dev_err(&qm->pdev->dev, "unknown error type %u\n", 1110 type); 1111 break; 1112 } 1113 1114 if (qm->status.aeq_head == aeq_depth - 1) { 1115 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1116 aeqe = qm->aeqe; 1117 qm->status.aeq_head = 0; 1118 } else { 1119 aeqe++; 1120 qm->status.aeq_head++; 1121 } 1122 } 1123 1124 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 1125 1126 return IRQ_HANDLED; 1127 } 1128 1129 static void qm_init_qp_status(struct hisi_qp *qp) 1130 { 1131 struct hisi_qp_status *qp_status = &qp->qp_status; 1132 1133 qp_status->sq_tail = 0; 1134 qp_status->cq_head = 0; 1135 qp_status->cqc_phase = true; 1136 atomic_set(&qp_status->used, 0); 1137 } 1138 1139 static void qm_init_prefetch(struct hisi_qm *qm) 1140 { 1141 struct device *dev = &qm->pdev->dev; 1142 u32 page_type = 0x0; 1143 1144 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1145 return; 1146 1147 switch (PAGE_SIZE) { 1148 case SZ_4K: 1149 page_type = 0x0; 1150 break; 1151 case SZ_16K: 1152 page_type = 0x1; 1153 break; 1154 case SZ_64K: 1155 page_type = 0x2; 1156 break; 1157 default: 1158 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 1159 PAGE_SIZE); 1160 } 1161 1162 writel(page_type, qm->io_base + QM_PAGE_SIZE); 1163 } 1164 1165 /* 1166 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value 1167 * is the expected qos calculated. 1168 * the formula: 1169 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 1170 * 1171 * IR_b * (2 ^ IR_u) * 8000 1172 * IR(Mbps) = ------------------------- 1173 * Tick * (2 ^ IR_s) 1174 */ 1175 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 1176 { 1177 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 1178 (QM_QOS_TICK * (1 << cir_s)); 1179 } 1180 1181 static u32 acc_shaper_calc_cbs_s(u32 ir) 1182 { 1183 int table_size = ARRAY_SIZE(shaper_cbs_s); 1184 int i; 1185 1186 for (i = 0; i < table_size; i++) { 1187 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) 1188 return shaper_cbs_s[i].val; 1189 } 1190 1191 return QM_SHAPER_MIN_CBS_S; 1192 } 1193 1194 static u32 acc_shaper_calc_cir_s(u32 ir) 1195 { 1196 int table_size = ARRAY_SIZE(shaper_cir_s); 1197 int i; 1198 1199 for (i = 0; i < table_size; i++) { 1200 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) 1201 return shaper_cir_s[i].val; 1202 } 1203 1204 return 0; 1205 } 1206 1207 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 1208 { 1209 u32 cir_b, cir_u, cir_s, ir_calc; 1210 u32 error_rate; 1211 1212 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 1213 cir_s = acc_shaper_calc_cir_s(ir); 1214 1215 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 1216 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 1217 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 1218 1219 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 1220 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 1221 factor->cir_b = cir_b; 1222 factor->cir_u = cir_u; 1223 factor->cir_s = cir_s; 1224 return 0; 1225 } 1226 } 1227 } 1228 1229 return -EINVAL; 1230 } 1231 1232 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1233 u32 number, struct qm_shaper_factor *factor) 1234 { 1235 u64 tmp = 0; 1236 1237 if (number > 0) { 1238 switch (type) { 1239 case SQC_VFT: 1240 if (qm->ver == QM_HW_V1) { 1241 tmp = QM_SQC_VFT_BUF_SIZE | 1242 QM_SQC_VFT_SQC_SIZE | 1243 QM_SQC_VFT_INDEX_NUMBER | 1244 QM_SQC_VFT_VALID | 1245 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1246 } else { 1247 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1248 QM_SQC_VFT_VALID | 1249 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1250 } 1251 break; 1252 case CQC_VFT: 1253 if (qm->ver == QM_HW_V1) { 1254 tmp = QM_CQC_VFT_BUF_SIZE | 1255 QM_CQC_VFT_SQC_SIZE | 1256 QM_CQC_VFT_INDEX_NUMBER | 1257 QM_CQC_VFT_VALID; 1258 } else { 1259 tmp = QM_CQC_VFT_VALID; 1260 } 1261 break; 1262 case SHAPER_VFT: 1263 if (factor) { 1264 tmp = factor->cir_b | 1265 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1266 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1267 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1268 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1269 } 1270 break; 1271 } 1272 } 1273 1274 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1275 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1276 } 1277 1278 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1279 u32 fun_num, u32 base, u32 number) 1280 { 1281 struct qm_shaper_factor *factor = NULL; 1282 unsigned int val; 1283 int ret; 1284 1285 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1286 factor = &qm->factor[fun_num]; 1287 1288 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1289 val & BIT(0), POLL_PERIOD, 1290 POLL_TIMEOUT); 1291 if (ret) 1292 return ret; 1293 1294 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1295 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1296 if (type == SHAPER_VFT) 1297 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1298 1299 writel(fun_num, qm->io_base + QM_VFT_CFG); 1300 1301 qm_vft_data_cfg(qm, type, base, number, factor); 1302 1303 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1304 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1305 1306 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1307 val & BIT(0), POLL_PERIOD, 1308 POLL_TIMEOUT); 1309 } 1310 1311 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1312 { 1313 u32 qos = qm->factor[fun_num].func_qos; 1314 int ret, i; 1315 1316 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); 1317 if (ret) { 1318 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1319 return ret; 1320 } 1321 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1322 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1323 /* The base number of queue reuse for different alg type */ 1324 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1325 if (ret) 1326 return ret; 1327 } 1328 1329 return 0; 1330 } 1331 1332 /* The config should be conducted after qm_dev_mem_reset() */ 1333 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1334 u32 number) 1335 { 1336 int ret, i; 1337 1338 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1339 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1340 if (ret) 1341 return ret; 1342 } 1343 1344 /* init default shaper qos val */ 1345 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1346 ret = qm_shaper_init_vft(qm, fun_num); 1347 if (ret) 1348 goto back_sqc_cqc; 1349 } 1350 1351 return 0; 1352 back_sqc_cqc: 1353 for (i = SQC_VFT; i <= CQC_VFT; i++) 1354 qm_set_vft_common(qm, i, fun_num, 0, 0); 1355 1356 return ret; 1357 } 1358 1359 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1360 { 1361 u64 sqc_vft; 1362 int ret; 1363 1364 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1365 if (ret) 1366 return ret; 1367 1368 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1369 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1370 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1371 *number = (QM_SQC_VFT_NUM_MASK_V2 & 1372 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1373 1374 return 0; 1375 } 1376 1377 static void qm_hw_error_init_v1(struct hisi_qm *qm) 1378 { 1379 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1380 } 1381 1382 static void qm_hw_error_cfg(struct hisi_qm *qm) 1383 { 1384 struct hisi_qm_err_info *err_info = &qm->err_info; 1385 1386 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; 1387 /* clear QM hw residual error source */ 1388 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1389 1390 /* configure error type */ 1391 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); 1392 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1393 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1394 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); 1395 } 1396 1397 static void qm_hw_error_init_v2(struct hisi_qm *qm) 1398 { 1399 u32 irq_unmask; 1400 1401 qm_hw_error_cfg(qm); 1402 1403 irq_unmask = ~qm->error_mask; 1404 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1405 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1406 } 1407 1408 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1409 { 1410 u32 irq_mask = qm->error_mask; 1411 1412 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1413 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1414 } 1415 1416 static void qm_hw_error_init_v3(struct hisi_qm *qm) 1417 { 1418 u32 irq_unmask; 1419 1420 qm_hw_error_cfg(qm); 1421 1422 /* enable close master ooo when hardware error happened */ 1423 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1424 1425 irq_unmask = ~qm->error_mask; 1426 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1427 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1428 } 1429 1430 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1431 { 1432 u32 irq_mask = qm->error_mask; 1433 1434 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1435 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1436 1437 /* disable close master ooo when hardware error happened */ 1438 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1439 } 1440 1441 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1442 { 1443 const struct hisi_qm_hw_error *err; 1444 struct device *dev = &qm->pdev->dev; 1445 u32 reg_val, type, vf_num, qp_id; 1446 int i; 1447 1448 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1449 err = &qm_hw_error[i]; 1450 if (!(err->int_msk & error_status)) 1451 continue; 1452 1453 dev_err(dev, "%s [error status=0x%x] found\n", 1454 err->msg, err->int_msk); 1455 1456 if (err->int_msk & QM_DB_TIMEOUT) { 1457 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1458 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1459 QM_DB_TIMEOUT_TYPE_SHIFT; 1460 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1461 qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT; 1462 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n", 1463 qm_db_timeout[type], vf_num, qp_id); 1464 } else if (err->int_msk & QM_OF_FIFO_OF) { 1465 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1466 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1467 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1468 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1469 qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT; 1470 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1471 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n", 1472 qm_fifo_overflow[type], vf_num, qp_id); 1473 else 1474 dev_err(dev, "unknown error type\n"); 1475 } else if (err->int_msk & QM_AXI_RRESP_ERR) { 1476 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02); 1477 if (reg_val & QM_AXI_POISON_ERR) 1478 dev_err(dev, "qm axi poison error happened\n"); 1479 } 1480 } 1481 } 1482 1483 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1484 { 1485 u32 error_status; 1486 1487 error_status = qm_get_hw_error_status(qm); 1488 if (error_status & qm->error_mask) { 1489 if (error_status & QM_ECC_MBIT) 1490 qm->err_status.is_qm_ecc_mbit = true; 1491 1492 qm_log_hw_error(qm, error_status); 1493 if (error_status & qm->err_info.qm_reset_mask) { 1494 /* Disable the same error reporting until device is recovered. */ 1495 writel(qm->err_info.nfe & (~error_status), 1496 qm->io_base + QM_RAS_NFE_ENABLE); 1497 return ACC_ERR_NEED_RESET; 1498 } 1499 1500 /* Clear error source if not need reset. */ 1501 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1502 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1503 writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); 1504 } 1505 1506 return ACC_ERR_RECOVERED; 1507 } 1508 1509 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 1510 { 1511 struct qm_mailbox mailbox; 1512 int ret; 1513 1514 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 1515 mutex_lock(&qm->mailbox_lock); 1516 ret = qm_mb_nolock(qm, &mailbox); 1517 if (ret) 1518 goto err_unlock; 1519 1520 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1521 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1522 1523 err_unlock: 1524 mutex_unlock(&qm->mailbox_lock); 1525 return ret; 1526 } 1527 1528 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 1529 { 1530 u32 val; 1531 1532 if (qm->fun_type == QM_HW_PF) 1533 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 1534 1535 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 1536 val |= QM_IFC_INT_SOURCE_MASK; 1537 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 1538 } 1539 1540 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 1541 { 1542 struct device *dev = &qm->pdev->dev; 1543 u32 cmd; 1544 u64 msg; 1545 int ret; 1546 1547 ret = qm_get_mb_cmd(qm, &msg, vf_id); 1548 if (ret) { 1549 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); 1550 return; 1551 } 1552 1553 cmd = msg & QM_MB_CMD_DATA_MASK; 1554 switch (cmd) { 1555 case QM_VF_PREPARE_FAIL: 1556 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 1557 break; 1558 case QM_VF_START_FAIL: 1559 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 1560 break; 1561 case QM_VF_PREPARE_DONE: 1562 case QM_VF_START_DONE: 1563 break; 1564 default: 1565 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); 1566 break; 1567 } 1568 } 1569 1570 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 1571 { 1572 struct device *dev = &qm->pdev->dev; 1573 u32 vfs_num = qm->vfs_num; 1574 int cnt = 0; 1575 int ret = 0; 1576 u64 val; 1577 u32 i; 1578 1579 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 1580 return 0; 1581 1582 while (true) { 1583 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 1584 /* All VFs send command to PF, break */ 1585 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 1586 break; 1587 1588 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1589 ret = -EBUSY; 1590 break; 1591 } 1592 1593 msleep(QM_WAIT_DST_ACK); 1594 } 1595 1596 /* PF check VFs msg */ 1597 for (i = 1; i <= vfs_num; i++) { 1598 if (val & BIT(i)) 1599 qm_handle_vf_msg(qm, i); 1600 else 1601 dev_err(dev, "VF(%u) not ping PF!\n", i); 1602 } 1603 1604 /* PF clear interrupt to ack VFs */ 1605 qm_clear_cmd_interrupt(qm, val); 1606 1607 return ret; 1608 } 1609 1610 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 1611 { 1612 u32 val; 1613 1614 val = readl(qm->io_base + QM_IFC_INT_CFG); 1615 val &= ~QM_IFC_SEND_ALL_VFS; 1616 val |= fun_num; 1617 writel(val, qm->io_base + QM_IFC_INT_CFG); 1618 1619 val = readl(qm->io_base + QM_IFC_INT_SET_P); 1620 val |= QM_IFC_INT_SET_MASK; 1621 writel(val, qm->io_base + QM_IFC_INT_SET_P); 1622 } 1623 1624 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 1625 { 1626 u32 val; 1627 1628 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1629 val |= QM_IFC_INT_SET_MASK; 1630 writel(val, qm->io_base + QM_IFC_INT_SET_V); 1631 } 1632 1633 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) 1634 { 1635 struct device *dev = &qm->pdev->dev; 1636 struct qm_mailbox mailbox; 1637 int cnt = 0; 1638 u64 val; 1639 int ret; 1640 1641 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); 1642 mutex_lock(&qm->mailbox_lock); 1643 ret = qm_mb_nolock(qm, &mailbox); 1644 if (ret) { 1645 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 1646 goto err_unlock; 1647 } 1648 1649 qm_trigger_vf_interrupt(qm, fun_num); 1650 while (true) { 1651 msleep(QM_WAIT_DST_ACK); 1652 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1653 /* if VF respond, PF notifies VF successfully. */ 1654 if (!(val & BIT(fun_num))) 1655 goto err_unlock; 1656 1657 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1658 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 1659 ret = -ETIMEDOUT; 1660 break; 1661 } 1662 } 1663 1664 err_unlock: 1665 mutex_unlock(&qm->mailbox_lock); 1666 return ret; 1667 } 1668 1669 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) 1670 { 1671 struct device *dev = &qm->pdev->dev; 1672 u32 vfs_num = qm->vfs_num; 1673 struct qm_mailbox mailbox; 1674 u64 val = 0; 1675 int cnt = 0; 1676 int ret; 1677 u32 i; 1678 1679 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); 1680 mutex_lock(&qm->mailbox_lock); 1681 /* PF sends command to all VFs by mailbox */ 1682 ret = qm_mb_nolock(qm, &mailbox); 1683 if (ret) { 1684 dev_err(dev, "failed to send command to VFs!\n"); 1685 mutex_unlock(&qm->mailbox_lock); 1686 return ret; 1687 } 1688 1689 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 1690 while (true) { 1691 msleep(QM_WAIT_DST_ACK); 1692 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1693 /* If all VFs acked, PF notifies VFs successfully. */ 1694 if (!(val & GENMASK(vfs_num, 1))) { 1695 mutex_unlock(&qm->mailbox_lock); 1696 return 0; 1697 } 1698 1699 if (++cnt > QM_MAX_PF_WAIT_COUNT) 1700 break; 1701 } 1702 1703 mutex_unlock(&qm->mailbox_lock); 1704 1705 /* Check which vf respond timeout. */ 1706 for (i = 1; i <= vfs_num; i++) { 1707 if (val & BIT(i)) 1708 dev_err(dev, "failed to get response from VF(%u)!\n", i); 1709 } 1710 1711 return -ETIMEDOUT; 1712 } 1713 1714 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) 1715 { 1716 struct qm_mailbox mailbox; 1717 int cnt = 0; 1718 u32 val; 1719 int ret; 1720 1721 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); 1722 mutex_lock(&qm->mailbox_lock); 1723 ret = qm_mb_nolock(qm, &mailbox); 1724 if (ret) { 1725 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); 1726 goto unlock; 1727 } 1728 1729 qm_trigger_pf_interrupt(qm); 1730 /* Waiting for PF response */ 1731 while (true) { 1732 msleep(QM_WAIT_DST_ACK); 1733 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1734 if (!(val & QM_IFC_INT_STATUS_MASK)) 1735 break; 1736 1737 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 1738 ret = -ETIMEDOUT; 1739 break; 1740 } 1741 } 1742 1743 unlock: 1744 mutex_unlock(&qm->mailbox_lock); 1745 return ret; 1746 } 1747 1748 static int qm_drain_qm(struct hisi_qm *qm) 1749 { 1750 return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); 1751 } 1752 1753 static int qm_stop_qp(struct hisi_qp *qp) 1754 { 1755 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 1756 } 1757 1758 static int qm_set_msi(struct hisi_qm *qm, bool set) 1759 { 1760 struct pci_dev *pdev = qm->pdev; 1761 1762 if (set) { 1763 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1764 0); 1765 } else { 1766 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1767 ACC_PEH_MSI_DISABLE); 1768 if (qm->err_status.is_qm_ecc_mbit || 1769 qm->err_status.is_dev_ecc_mbit) 1770 return 0; 1771 1772 mdelay(1); 1773 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 1774 return -EFAULT; 1775 } 1776 1777 return 0; 1778 } 1779 1780 static void qm_wait_msi_finish(struct hisi_qm *qm) 1781 { 1782 struct pci_dev *pdev = qm->pdev; 1783 u32 cmd = ~0; 1784 int cnt = 0; 1785 u32 val; 1786 int ret; 1787 1788 while (true) { 1789 pci_read_config_dword(pdev, pdev->msi_cap + 1790 PCI_MSI_PENDING_64, &cmd); 1791 if (!cmd) 1792 break; 1793 1794 if (++cnt > MAX_WAIT_COUNTS) { 1795 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 1796 break; 1797 } 1798 1799 udelay(1); 1800 } 1801 1802 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 1803 val, !(val & QM_PEH_DFX_MASK), 1804 POLL_PERIOD, POLL_TIMEOUT); 1805 if (ret) 1806 pci_warn(pdev, "failed to empty PEH MSI!\n"); 1807 1808 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 1809 val, !(val & QM_PEH_MSI_FINISH_MASK), 1810 POLL_PERIOD, POLL_TIMEOUT); 1811 if (ret) 1812 pci_warn(pdev, "failed to finish MSI operation!\n"); 1813 } 1814 1815 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 1816 { 1817 struct pci_dev *pdev = qm->pdev; 1818 int ret = -ETIMEDOUT; 1819 u32 cmd, i; 1820 1821 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1822 if (set) 1823 cmd |= QM_MSI_CAP_ENABLE; 1824 else 1825 cmd &= ~QM_MSI_CAP_ENABLE; 1826 1827 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 1828 if (set) { 1829 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 1830 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1831 if (cmd & QM_MSI_CAP_ENABLE) 1832 return 0; 1833 1834 udelay(1); 1835 } 1836 } else { 1837 udelay(WAIT_PERIOD_US_MIN); 1838 qm_wait_msi_finish(qm); 1839 ret = 0; 1840 } 1841 1842 return ret; 1843 } 1844 1845 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 1846 .qm_db = qm_db_v1, 1847 .hw_error_init = qm_hw_error_init_v1, 1848 .set_msi = qm_set_msi, 1849 }; 1850 1851 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 1852 .get_vft = qm_get_vft_v2, 1853 .qm_db = qm_db_v2, 1854 .hw_error_init = qm_hw_error_init_v2, 1855 .hw_error_uninit = qm_hw_error_uninit_v2, 1856 .hw_error_handle = qm_hw_error_handle_v2, 1857 .set_msi = qm_set_msi, 1858 }; 1859 1860 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 1861 .get_vft = qm_get_vft_v2, 1862 .qm_db = qm_db_v2, 1863 .hw_error_init = qm_hw_error_init_v3, 1864 .hw_error_uninit = qm_hw_error_uninit_v3, 1865 .hw_error_handle = qm_hw_error_handle_v2, 1866 .set_msi = qm_set_msi_v3, 1867 }; 1868 1869 static void *qm_get_avail_sqe(struct hisi_qp *qp) 1870 { 1871 struct hisi_qp_status *qp_status = &qp->qp_status; 1872 u16 sq_tail = qp_status->sq_tail; 1873 1874 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 1875 return NULL; 1876 1877 return qp->sqe + sq_tail * qp->qm->sqe_size; 1878 } 1879 1880 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) 1881 { 1882 u64 *addr; 1883 1884 /* Use last 64 bits of DUS to reset status. */ 1885 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; 1886 *addr = 0; 1887 } 1888 1889 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 1890 { 1891 struct device *dev = &qm->pdev->dev; 1892 struct hisi_qp *qp; 1893 int qp_id; 1894 1895 if (atomic_read(&qm->status.flags) == QM_STOP) { 1896 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n"); 1897 return ERR_PTR(-EPERM); 1898 } 1899 1900 if (qm->qp_in_used == qm->qp_num) { 1901 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1902 qm->qp_num); 1903 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1904 return ERR_PTR(-EBUSY); 1905 } 1906 1907 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 1908 if (qp_id < 0) { 1909 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1910 qm->qp_num); 1911 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1912 return ERR_PTR(-EBUSY); 1913 } 1914 1915 qp = &qm->qp_array[qp_id]; 1916 hisi_qm_unset_hw_reset(qp); 1917 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 1918 1919 qp->event_cb = NULL; 1920 qp->req_cb = NULL; 1921 qp->qp_id = qp_id; 1922 qp->alg_type = alg_type; 1923 qp->is_in_kernel = true; 1924 qm->qp_in_used++; 1925 1926 return qp; 1927 } 1928 1929 /** 1930 * hisi_qm_create_qp() - Create a queue pair from qm. 1931 * @qm: The qm we create a qp from. 1932 * @alg_type: Accelerator specific algorithm type in sqc. 1933 * 1934 * Return created qp, negative error code if failed. 1935 */ 1936 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 1937 { 1938 struct hisi_qp *qp; 1939 int ret; 1940 1941 ret = qm_pm_get_sync(qm); 1942 if (ret) 1943 return ERR_PTR(ret); 1944 1945 down_write(&qm->qps_lock); 1946 qp = qm_create_qp_nolock(qm, alg_type); 1947 up_write(&qm->qps_lock); 1948 1949 if (IS_ERR(qp)) 1950 qm_pm_put_sync(qm); 1951 1952 return qp; 1953 } 1954 1955 /** 1956 * hisi_qm_release_qp() - Release a qp back to its qm. 1957 * @qp: The qp we want to release. 1958 * 1959 * This function releases the resource of a qp. 1960 */ 1961 static void hisi_qm_release_qp(struct hisi_qp *qp) 1962 { 1963 struct hisi_qm *qm = qp->qm; 1964 1965 down_write(&qm->qps_lock); 1966 1967 qm->qp_in_used--; 1968 idr_remove(&qm->qp_idr, qp->qp_id); 1969 1970 up_write(&qm->qps_lock); 1971 1972 qm_pm_put_sync(qm); 1973 } 1974 1975 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1976 { 1977 struct hisi_qm *qm = qp->qm; 1978 enum qm_hw_ver ver = qm->ver; 1979 struct qm_sqc sqc = {0}; 1980 1981 if (ver == QM_HW_V1) { 1982 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 1983 sqc.w8 = cpu_to_le16(qp->sq_depth - 1); 1984 } else { 1985 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 1986 sqc.w8 = 0; /* rand_qc */ 1987 } 1988 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 1989 sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); 1990 sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); 1991 sqc.cq_num = cpu_to_le16(qp_id); 1992 sqc.pasid = cpu_to_le16(pasid); 1993 1994 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1995 sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 1996 QM_QC_PASID_ENABLE_SHIFT); 1997 1998 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); 1999 } 2000 2001 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2002 { 2003 struct hisi_qm *qm = qp->qm; 2004 enum qm_hw_ver ver = qm->ver; 2005 struct qm_cqc cqc = {0}; 2006 2007 if (ver == QM_HW_V1) { 2008 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); 2009 cqc.w8 = cpu_to_le16(qp->cq_depth - 1); 2010 } else { 2011 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 2012 cqc.w8 = 0; /* rand_qc */ 2013 } 2014 /* 2015 * Enable request finishing interrupts defaultly. 2016 * So, there will be some interrupts until disabling 2017 * this. 2018 */ 2019 cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 2020 cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); 2021 cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); 2022 cqc.pasid = cpu_to_le16(pasid); 2023 2024 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2025 cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 2026 2027 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); 2028 } 2029 2030 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2031 { 2032 int ret; 2033 2034 qm_init_qp_status(qp); 2035 2036 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 2037 if (ret) 2038 return ret; 2039 2040 return qm_cq_ctx_cfg(qp, qp_id, pasid); 2041 } 2042 2043 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 2044 { 2045 struct hisi_qm *qm = qp->qm; 2046 struct device *dev = &qm->pdev->dev; 2047 int qp_id = qp->qp_id; 2048 u32 pasid = arg; 2049 int ret; 2050 2051 if (atomic_read(&qm->status.flags) == QM_STOP) { 2052 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n"); 2053 return -EPERM; 2054 } 2055 2056 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 2057 if (ret) 2058 return ret; 2059 2060 atomic_set(&qp->qp_status.flags, QP_START); 2061 dev_dbg(dev, "queue %d started\n", qp_id); 2062 2063 return 0; 2064 } 2065 2066 /** 2067 * hisi_qm_start_qp() - Start a qp into running. 2068 * @qp: The qp we want to start to run. 2069 * @arg: Accelerator specific argument. 2070 * 2071 * After this function, qp can receive request from user. Return 0 if 2072 * successful, negative error code if failed. 2073 */ 2074 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 2075 { 2076 struct hisi_qm *qm = qp->qm; 2077 int ret; 2078 2079 down_write(&qm->qps_lock); 2080 ret = qm_start_qp_nolock(qp, arg); 2081 up_write(&qm->qps_lock); 2082 2083 return ret; 2084 } 2085 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2086 2087 /** 2088 * qp_stop_fail_cb() - call request cb. 2089 * @qp: stopped failed qp. 2090 * 2091 * Callback function should be called whether task completed or not. 2092 */ 2093 static void qp_stop_fail_cb(struct hisi_qp *qp) 2094 { 2095 int qp_used = atomic_read(&qp->qp_status.used); 2096 u16 cur_tail = qp->qp_status.sq_tail; 2097 u16 sq_depth = qp->sq_depth; 2098 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 2099 struct hisi_qm *qm = qp->qm; 2100 u16 pos; 2101 int i; 2102 2103 for (i = 0; i < qp_used; i++) { 2104 pos = (i + cur_head) % sq_depth; 2105 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2106 atomic_dec(&qp->qp_status.used); 2107 } 2108 } 2109 2110 static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) 2111 { 2112 struct device *dev = &qm->pdev->dev; 2113 struct qm_sqc sqc; 2114 struct qm_cqc cqc; 2115 int ret, i = 0; 2116 2117 while (++i) { 2118 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); 2119 if (ret) { 2120 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2121 *state = QM_DUMP_SQC_FAIL; 2122 return ret; 2123 } 2124 2125 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); 2126 if (ret) { 2127 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2128 *state = QM_DUMP_CQC_FAIL; 2129 return ret; 2130 } 2131 2132 if ((sqc.tail == cqc.tail) && 2133 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2134 break; 2135 2136 if (i == MAX_WAIT_COUNTS) { 2137 dev_err(dev, "Fail to empty queue %u!\n", qp_id); 2138 *state = QM_STOP_QUEUE_FAIL; 2139 return -ETIMEDOUT; 2140 } 2141 2142 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2143 } 2144 2145 return 0; 2146 } 2147 2148 /** 2149 * qm_drain_qp() - Drain a qp. 2150 * @qp: The qp we want to drain. 2151 * 2152 * If the device does not support stopping queue by sending mailbox, 2153 * determine whether the queue is cleared by judging the tail pointers of 2154 * sq and cq. 2155 */ 2156 static int qm_drain_qp(struct hisi_qp *qp) 2157 { 2158 struct hisi_qm *qm = qp->qm; 2159 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2160 u32 state = 0; 2161 int ret; 2162 2163 /* No need to judge if master OOO is blocked. */ 2164 if (qm_check_dev_error(pf_qm)) 2165 return 0; 2166 2167 /* HW V3 supports drain qp by device */ 2168 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 2169 ret = qm_stop_qp(qp); 2170 if (ret) { 2171 dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); 2172 state = QM_STOP_QUEUE_FAIL; 2173 goto set_dev_state; 2174 } 2175 return ret; 2176 } 2177 2178 ret = qm_wait_qp_empty(qm, &state, qp->qp_id); 2179 if (ret) 2180 goto set_dev_state; 2181 2182 return 0; 2183 2184 set_dev_state: 2185 if (qm->debug.dev_dfx.dev_timeout) 2186 qm->debug.dev_dfx.dev_state = state; 2187 2188 return ret; 2189 } 2190 2191 static void qm_stop_qp_nolock(struct hisi_qp *qp) 2192 { 2193 struct hisi_qm *qm = qp->qm; 2194 struct device *dev = &qm->pdev->dev; 2195 int ret; 2196 2197 /* 2198 * It is allowed to stop and release qp when reset, If the qp is 2199 * stopped when reset but still want to be released then, the 2200 * is_resetting flag should be set negative so that this qp will not 2201 * be restarted after reset. 2202 */ 2203 if (atomic_read(&qp->qp_status.flags) != QP_START) { 2204 qp->is_resetting = false; 2205 return; 2206 } 2207 2208 atomic_set(&qp->qp_status.flags, QP_STOP); 2209 2210 /* V3 supports direct stop function when FLR prepare */ 2211 if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { 2212 ret = qm_drain_qp(qp); 2213 if (ret) 2214 dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id); 2215 } 2216 2217 flush_workqueue(qm->wq); 2218 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2219 qp_stop_fail_cb(qp); 2220 2221 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2222 } 2223 2224 /** 2225 * hisi_qm_stop_qp() - Stop a qp in qm. 2226 * @qp: The qp we want to stop. 2227 * 2228 * This function is reverse of hisi_qm_start_qp. 2229 */ 2230 void hisi_qm_stop_qp(struct hisi_qp *qp) 2231 { 2232 down_write(&qp->qm->qps_lock); 2233 qm_stop_qp_nolock(qp); 2234 up_write(&qp->qm->qps_lock); 2235 } 2236 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2237 2238 /** 2239 * hisi_qp_send() - Queue up a task in the hardware queue. 2240 * @qp: The qp in which to put the message. 2241 * @msg: The message. 2242 * 2243 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2244 * if qp related qm is resetting. 2245 * 2246 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2247 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2248 * reset may happen, we have no lock here considering performance. This 2249 * causes current qm_db sending fail or can not receive sended sqe. QM 2250 * sync/async receive function should handle the error sqe. ACC reset 2251 * done function should clear used sqe to 0. 2252 */ 2253 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2254 { 2255 struct hisi_qp_status *qp_status = &qp->qp_status; 2256 u16 sq_tail = qp_status->sq_tail; 2257 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 2258 void *sqe = qm_get_avail_sqe(qp); 2259 2260 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2261 atomic_read(&qp->qm->status.flags) == QM_STOP || 2262 qp->is_resetting)) { 2263 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2264 return -EAGAIN; 2265 } 2266 2267 if (!sqe) 2268 return -EBUSY; 2269 2270 memcpy(sqe, msg, qp->qm->sqe_size); 2271 2272 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2273 atomic_inc(&qp->qp_status.used); 2274 qp_status->sq_tail = sq_tail_next; 2275 2276 return 0; 2277 } 2278 EXPORT_SYMBOL_GPL(hisi_qp_send); 2279 2280 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2281 { 2282 unsigned int val; 2283 2284 if (qm->ver == QM_HW_V1) 2285 return; 2286 2287 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2288 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2289 val, val & BIT(0), POLL_PERIOD, 2290 POLL_TIMEOUT)) 2291 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2292 } 2293 2294 static void qm_qp_event_notifier(struct hisi_qp *qp) 2295 { 2296 wake_up_interruptible(&qp->uacce_q->wait); 2297 } 2298 2299 /* This function returns free number of qp in qm. */ 2300 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2301 { 2302 struct hisi_qm *qm = uacce->priv; 2303 int ret; 2304 2305 down_read(&qm->qps_lock); 2306 ret = qm->qp_num - qm->qp_in_used; 2307 up_read(&qm->qps_lock); 2308 2309 return ret; 2310 } 2311 2312 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) 2313 { 2314 int i; 2315 2316 for (i = 0; i < qm->qp_num; i++) 2317 qm_set_qp_disable(&qm->qp_array[i], offset); 2318 } 2319 2320 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2321 unsigned long arg, 2322 struct uacce_queue *q) 2323 { 2324 struct hisi_qm *qm = uacce->priv; 2325 struct hisi_qp *qp; 2326 u8 alg_type = 0; 2327 2328 qp = hisi_qm_create_qp(qm, alg_type); 2329 if (IS_ERR(qp)) 2330 return PTR_ERR(qp); 2331 2332 q->priv = qp; 2333 q->uacce = uacce; 2334 qp->uacce_q = q; 2335 qp->event_cb = qm_qp_event_notifier; 2336 qp->pasid = arg; 2337 qp->is_in_kernel = false; 2338 2339 return 0; 2340 } 2341 2342 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2343 { 2344 struct hisi_qp *qp = q->priv; 2345 2346 hisi_qm_release_qp(qp); 2347 } 2348 2349 /* map sq/cq/doorbell to user space */ 2350 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2351 struct vm_area_struct *vma, 2352 struct uacce_qfile_region *qfr) 2353 { 2354 struct hisi_qp *qp = q->priv; 2355 struct hisi_qm *qm = qp->qm; 2356 resource_size_t phys_base = qm->db_phys_base + 2357 qp->qp_id * qm->db_interval; 2358 size_t sz = vma->vm_end - vma->vm_start; 2359 struct pci_dev *pdev = qm->pdev; 2360 struct device *dev = &pdev->dev; 2361 unsigned long vm_pgoff; 2362 int ret; 2363 2364 switch (qfr->type) { 2365 case UACCE_QFRT_MMIO: 2366 if (qm->ver == QM_HW_V1) { 2367 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2368 return -EINVAL; 2369 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 2370 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2371 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2372 return -EINVAL; 2373 } else { 2374 if (sz > qm->db_interval) 2375 return -EINVAL; 2376 } 2377 2378 vm_flags_set(vma, VM_IO); 2379 2380 return remap_pfn_range(vma, vma->vm_start, 2381 phys_base >> PAGE_SHIFT, 2382 sz, pgprot_noncached(vma->vm_page_prot)); 2383 case UACCE_QFRT_DUS: 2384 if (sz != qp->qdma.size) 2385 return -EINVAL; 2386 2387 /* 2388 * dma_mmap_coherent() requires vm_pgoff as 0 2389 * restore vm_pfoff to initial value for mmap() 2390 */ 2391 vm_pgoff = vma->vm_pgoff; 2392 vma->vm_pgoff = 0; 2393 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2394 qp->qdma.dma, sz); 2395 vma->vm_pgoff = vm_pgoff; 2396 return ret; 2397 2398 default: 2399 return -EINVAL; 2400 } 2401 } 2402 2403 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2404 { 2405 struct hisi_qp *qp = q->priv; 2406 2407 return hisi_qm_start_qp(qp, qp->pasid); 2408 } 2409 2410 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2411 { 2412 struct hisi_qp *qp = q->priv; 2413 struct hisi_qm *qm = qp->qm; 2414 struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; 2415 u32 i = 0; 2416 2417 hisi_qm_stop_qp(qp); 2418 2419 if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) 2420 return; 2421 2422 /* 2423 * After the queue fails to be stopped, 2424 * wait for a period of time before releasing the queue. 2425 */ 2426 while (++i) { 2427 msleep(WAIT_PERIOD); 2428 2429 /* Since dev_timeout maybe modified, check i >= dev_timeout */ 2430 if (i >= dev_dfx->dev_timeout) { 2431 dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", 2432 qp->qp_id, dev_dfx->dev_state); 2433 dev_dfx->dev_state = QM_FINISH_WAIT; 2434 break; 2435 } 2436 } 2437 } 2438 2439 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2440 { 2441 struct hisi_qp *qp = q->priv; 2442 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2443 int updated = 0; 2444 2445 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2446 /* make sure to read data from memory */ 2447 dma_rmb(); 2448 qm_cq_head_update(qp); 2449 cqe = qp->cqe + qp->qp_status.cq_head; 2450 updated = 1; 2451 } 2452 2453 return updated; 2454 } 2455 2456 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2457 { 2458 struct hisi_qm *qm = q->uacce->priv; 2459 struct hisi_qp *qp = q->priv; 2460 2461 down_write(&qm->qps_lock); 2462 qp->alg_type = type; 2463 up_write(&qm->qps_lock); 2464 } 2465 2466 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2467 unsigned long arg) 2468 { 2469 struct hisi_qp *qp = q->priv; 2470 struct hisi_qp_info qp_info; 2471 struct hisi_qp_ctx qp_ctx; 2472 2473 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2474 if (copy_from_user(&qp_ctx, (void __user *)arg, 2475 sizeof(struct hisi_qp_ctx))) 2476 return -EFAULT; 2477 2478 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) 2479 return -EINVAL; 2480 2481 qm_set_sqctype(q, qp_ctx.qc_type); 2482 qp_ctx.id = qp->qp_id; 2483 2484 if (copy_to_user((void __user *)arg, &qp_ctx, 2485 sizeof(struct hisi_qp_ctx))) 2486 return -EFAULT; 2487 2488 return 0; 2489 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 2490 if (copy_from_user(&qp_info, (void __user *)arg, 2491 sizeof(struct hisi_qp_info))) 2492 return -EFAULT; 2493 2494 qp_info.sqe_size = qp->qm->sqe_size; 2495 qp_info.sq_depth = qp->sq_depth; 2496 qp_info.cq_depth = qp->cq_depth; 2497 2498 if (copy_to_user((void __user *)arg, &qp_info, 2499 sizeof(struct hisi_qp_info))) 2500 return -EFAULT; 2501 2502 return 0; 2503 } 2504 2505 return -EINVAL; 2506 } 2507 2508 /** 2509 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device 2510 * according to user's configuration of error threshold. 2511 * @qm: the uacce device 2512 */ 2513 static int qm_hw_err_isolate(struct hisi_qm *qm) 2514 { 2515 struct qm_hw_err *err, *tmp, *hw_err; 2516 struct qm_err_isolate *isolate; 2517 u32 count = 0; 2518 2519 isolate = &qm->isolate_data; 2520 2521 #define SECONDS_PER_HOUR 3600 2522 2523 /* All the hw errs are processed by PF driver */ 2524 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) 2525 return 0; 2526 2527 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL); 2528 if (!hw_err) 2529 return -ENOMEM; 2530 2531 /* 2532 * Time-stamp every slot AER error. Then check the AER error log when the 2533 * next device AER error occurred. if the device slot AER error count exceeds 2534 * the setting error threshold in one hour, the isolated state will be set 2535 * to true. And the AER error logs that exceed one hour will be cleared. 2536 */ 2537 mutex_lock(&isolate->isolate_lock); 2538 hw_err->timestamp = jiffies; 2539 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { 2540 if ((hw_err->timestamp - err->timestamp) / HZ > 2541 SECONDS_PER_HOUR) { 2542 list_del(&err->list); 2543 kfree(err); 2544 } else { 2545 count++; 2546 } 2547 } 2548 list_add(&hw_err->list, &isolate->qm_hw_errs); 2549 mutex_unlock(&isolate->isolate_lock); 2550 2551 if (count >= isolate->err_threshold) 2552 isolate->is_isolate = true; 2553 2554 return 0; 2555 } 2556 2557 static void qm_hw_err_destroy(struct hisi_qm *qm) 2558 { 2559 struct qm_hw_err *err, *tmp; 2560 2561 mutex_lock(&qm->isolate_data.isolate_lock); 2562 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { 2563 list_del(&err->list); 2564 kfree(err); 2565 } 2566 mutex_unlock(&qm->isolate_data.isolate_lock); 2567 } 2568 2569 static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) 2570 { 2571 struct hisi_qm *qm = uacce->priv; 2572 struct hisi_qm *pf_qm; 2573 2574 if (uacce->is_vf) 2575 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2576 else 2577 pf_qm = qm; 2578 2579 return pf_qm->isolate_data.is_isolate ? 2580 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; 2581 } 2582 2583 static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) 2584 { 2585 struct hisi_qm *qm = uacce->priv; 2586 2587 /* Must be set by PF */ 2588 if (uacce->is_vf) 2589 return -EPERM; 2590 2591 if (qm->isolate_data.is_isolate) 2592 return -EPERM; 2593 2594 qm->isolate_data.err_threshold = num; 2595 2596 /* After the policy is updated, need to reset the hardware err list */ 2597 qm_hw_err_destroy(qm); 2598 2599 return 0; 2600 } 2601 2602 static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) 2603 { 2604 struct hisi_qm *qm = uacce->priv; 2605 struct hisi_qm *pf_qm; 2606 2607 if (uacce->is_vf) { 2608 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2609 return pf_qm->isolate_data.err_threshold; 2610 } 2611 2612 return qm->isolate_data.err_threshold; 2613 } 2614 2615 static const struct uacce_ops uacce_qm_ops = { 2616 .get_available_instances = hisi_qm_get_available_instances, 2617 .get_queue = hisi_qm_uacce_get_queue, 2618 .put_queue = hisi_qm_uacce_put_queue, 2619 .start_queue = hisi_qm_uacce_start_queue, 2620 .stop_queue = hisi_qm_uacce_stop_queue, 2621 .mmap = hisi_qm_uacce_mmap, 2622 .ioctl = hisi_qm_uacce_ioctl, 2623 .is_q_updated = hisi_qm_is_q_updated, 2624 .get_isolate_state = hisi_qm_get_isolate_state, 2625 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, 2626 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, 2627 }; 2628 2629 static void qm_remove_uacce(struct hisi_qm *qm) 2630 { 2631 struct uacce_device *uacce = qm->uacce; 2632 2633 if (qm->use_sva) { 2634 qm_hw_err_destroy(qm); 2635 uacce_remove(uacce); 2636 qm->uacce = NULL; 2637 } 2638 } 2639 2640 static int qm_alloc_uacce(struct hisi_qm *qm) 2641 { 2642 struct pci_dev *pdev = qm->pdev; 2643 struct uacce_device *uacce; 2644 unsigned long mmio_page_nr; 2645 unsigned long dus_page_nr; 2646 u16 sq_depth, cq_depth; 2647 struct uacce_interface interface = { 2648 .flags = UACCE_DEV_SVA, 2649 .ops = &uacce_qm_ops, 2650 }; 2651 int ret; 2652 2653 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), 2654 sizeof(interface.name)); 2655 if (ret < 0) 2656 return -ENAMETOOLONG; 2657 2658 uacce = uacce_alloc(&pdev->dev, &interface); 2659 if (IS_ERR(uacce)) 2660 return PTR_ERR(uacce); 2661 2662 if (uacce->flags & UACCE_DEV_SVA) { 2663 qm->use_sva = true; 2664 } else { 2665 /* only consider sva case */ 2666 qm_remove_uacce(qm); 2667 return -EINVAL; 2668 } 2669 2670 uacce->is_vf = pdev->is_virtfn; 2671 uacce->priv = qm; 2672 2673 if (qm->ver == QM_HW_V1) 2674 uacce->api_ver = HISI_QM_API_VER_BASE; 2675 else if (qm->ver == QM_HW_V2) 2676 uacce->api_ver = HISI_QM_API_VER2_BASE; 2677 else 2678 uacce->api_ver = HISI_QM_API_VER3_BASE; 2679 2680 if (qm->ver == QM_HW_V1) 2681 mmio_page_nr = QM_DOORBELL_PAGE_NR; 2682 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2683 mmio_page_nr = QM_DOORBELL_PAGE_NR + 2684 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 2685 else 2686 mmio_page_nr = qm->db_interval / PAGE_SIZE; 2687 2688 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 2689 2690 /* Add one more page for device or qp status */ 2691 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 2692 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 2693 PAGE_SHIFT; 2694 2695 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 2696 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 2697 2698 qm->uacce = uacce; 2699 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); 2700 mutex_init(&qm->isolate_data.isolate_lock); 2701 2702 return 0; 2703 } 2704 2705 /** 2706 * qm_frozen() - Try to froze QM to cut continuous queue request. If 2707 * there is user on the QM, return failure without doing anything. 2708 * @qm: The qm needed to be fronzen. 2709 * 2710 * This function frozes QM, then we can do SRIOV disabling. 2711 */ 2712 static int qm_frozen(struct hisi_qm *qm) 2713 { 2714 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 2715 return 0; 2716 2717 down_write(&qm->qps_lock); 2718 2719 if (!qm->qp_in_used) { 2720 qm->qp_in_used = qm->qp_num; 2721 up_write(&qm->qps_lock); 2722 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 2723 return 0; 2724 } 2725 2726 up_write(&qm->qps_lock); 2727 2728 return -EBUSY; 2729 } 2730 2731 static int qm_try_frozen_vfs(struct pci_dev *pdev, 2732 struct hisi_qm_list *qm_list) 2733 { 2734 struct hisi_qm *qm, *vf_qm; 2735 struct pci_dev *dev; 2736 int ret = 0; 2737 2738 if (!qm_list || !pdev) 2739 return -EINVAL; 2740 2741 /* Try to frozen all the VFs as disable SRIOV */ 2742 mutex_lock(&qm_list->lock); 2743 list_for_each_entry(qm, &qm_list->list, list) { 2744 dev = qm->pdev; 2745 if (dev == pdev) 2746 continue; 2747 if (pci_physfn(dev) == pdev) { 2748 vf_qm = pci_get_drvdata(dev); 2749 ret = qm_frozen(vf_qm); 2750 if (ret) 2751 goto frozen_fail; 2752 } 2753 } 2754 2755 frozen_fail: 2756 mutex_unlock(&qm_list->lock); 2757 2758 return ret; 2759 } 2760 2761 /** 2762 * hisi_qm_wait_task_finish() - Wait until the task is finished 2763 * when removing the driver. 2764 * @qm: The qm needed to wait for the task to finish. 2765 * @qm_list: The list of all available devices. 2766 */ 2767 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 2768 { 2769 while (qm_frozen(qm) || 2770 ((qm->fun_type == QM_HW_PF) && 2771 qm_try_frozen_vfs(qm->pdev, qm_list))) { 2772 msleep(WAIT_PERIOD); 2773 } 2774 2775 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 2776 test_bit(QM_RESETTING, &qm->misc_ctl)) 2777 msleep(WAIT_PERIOD); 2778 2779 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2780 flush_work(&qm->cmd_process); 2781 2782 udelay(REMOVE_WAIT_DELAY); 2783 } 2784 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 2785 2786 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 2787 { 2788 struct device *dev = &qm->pdev->dev; 2789 struct qm_dma *qdma; 2790 int i; 2791 2792 for (i = num - 1; i >= 0; i--) { 2793 qdma = &qm->qp_array[i].qdma; 2794 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 2795 kfree(qm->poll_data[i].qp_finish_id); 2796 } 2797 2798 kfree(qm->poll_data); 2799 kfree(qm->qp_array); 2800 } 2801 2802 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 2803 u16 sq_depth, u16 cq_depth) 2804 { 2805 struct device *dev = &qm->pdev->dev; 2806 size_t off = qm->sqe_size * sq_depth; 2807 struct hisi_qp *qp; 2808 int ret = -ENOMEM; 2809 2810 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), 2811 GFP_KERNEL); 2812 if (!qm->poll_data[id].qp_finish_id) 2813 return -ENOMEM; 2814 2815 qp = &qm->qp_array[id]; 2816 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 2817 GFP_KERNEL); 2818 if (!qp->qdma.va) 2819 goto err_free_qp_finish_id; 2820 2821 qp->sqe = qp->qdma.va; 2822 qp->sqe_dma = qp->qdma.dma; 2823 qp->cqe = qp->qdma.va + off; 2824 qp->cqe_dma = qp->qdma.dma + off; 2825 qp->qdma.size = dma_size; 2826 qp->sq_depth = sq_depth; 2827 qp->cq_depth = cq_depth; 2828 qp->qm = qm; 2829 qp->qp_id = id; 2830 2831 return 0; 2832 2833 err_free_qp_finish_id: 2834 kfree(qm->poll_data[id].qp_finish_id); 2835 return ret; 2836 } 2837 2838 static void hisi_qm_pre_init(struct hisi_qm *qm) 2839 { 2840 struct pci_dev *pdev = qm->pdev; 2841 2842 if (qm->ver == QM_HW_V1) 2843 qm->ops = &qm_hw_ops_v1; 2844 else if (qm->ver == QM_HW_V2) 2845 qm->ops = &qm_hw_ops_v2; 2846 else 2847 qm->ops = &qm_hw_ops_v3; 2848 2849 pci_set_drvdata(pdev, qm); 2850 mutex_init(&qm->mailbox_lock); 2851 init_rwsem(&qm->qps_lock); 2852 qm->qp_in_used = 0; 2853 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 2854 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 2855 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 2856 } 2857 } 2858 2859 static void qm_cmd_uninit(struct hisi_qm *qm) 2860 { 2861 u32 val; 2862 2863 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2864 return; 2865 2866 val = readl(qm->io_base + QM_IFC_INT_MASK); 2867 val |= QM_IFC_INT_DISABLE; 2868 writel(val, qm->io_base + QM_IFC_INT_MASK); 2869 } 2870 2871 static void qm_cmd_init(struct hisi_qm *qm) 2872 { 2873 u32 val; 2874 2875 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2876 return; 2877 2878 /* Clear communication interrupt source */ 2879 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 2880 2881 /* Enable pf to vf communication reg. */ 2882 val = readl(qm->io_base + QM_IFC_INT_MASK); 2883 val &= ~QM_IFC_INT_DISABLE; 2884 writel(val, qm->io_base + QM_IFC_INT_MASK); 2885 } 2886 2887 static void qm_put_pci_res(struct hisi_qm *qm) 2888 { 2889 struct pci_dev *pdev = qm->pdev; 2890 2891 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2892 iounmap(qm->db_io_base); 2893 2894 iounmap(qm->io_base); 2895 pci_release_mem_regions(pdev); 2896 } 2897 2898 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 2899 { 2900 struct pci_dev *pdev = qm->pdev; 2901 2902 pci_free_irq_vectors(pdev); 2903 qm_put_pci_res(qm); 2904 pci_disable_device(pdev); 2905 } 2906 2907 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) 2908 { 2909 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) 2910 writel(state, qm->io_base + QM_VF_STATE); 2911 } 2912 2913 static void hisi_qm_unint_work(struct hisi_qm *qm) 2914 { 2915 destroy_workqueue(qm->wq); 2916 } 2917 2918 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) 2919 { 2920 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; 2921 struct device *dev = &qm->pdev->dev; 2922 2923 dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); 2924 } 2925 2926 static void hisi_qm_memory_uninit(struct hisi_qm *qm) 2927 { 2928 struct device *dev = &qm->pdev->dev; 2929 2930 hisi_qp_memory_uninit(qm, qm->qp_num); 2931 hisi_qm_free_rsv_buf(qm); 2932 if (qm->qdma.va) { 2933 hisi_qm_cache_wb(qm); 2934 dma_free_coherent(dev, qm->qdma.size, 2935 qm->qdma.va, qm->qdma.dma); 2936 } 2937 2938 idr_destroy(&qm->qp_idr); 2939 2940 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 2941 kfree(qm->factor); 2942 } 2943 2944 /** 2945 * hisi_qm_uninit() - Uninitialize qm. 2946 * @qm: The qm needed uninit. 2947 * 2948 * This function uninits qm related device resources. 2949 */ 2950 void hisi_qm_uninit(struct hisi_qm *qm) 2951 { 2952 qm_cmd_uninit(qm); 2953 hisi_qm_unint_work(qm); 2954 2955 down_write(&qm->qps_lock); 2956 hisi_qm_memory_uninit(qm); 2957 hisi_qm_set_state(qm, QM_NOT_READY); 2958 up_write(&qm->qps_lock); 2959 2960 qm_remove_uacce(qm); 2961 qm_irqs_unregister(qm); 2962 hisi_qm_pci_uninit(qm); 2963 } 2964 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 2965 2966 /** 2967 * hisi_qm_get_vft() - Get vft from a qm. 2968 * @qm: The qm we want to get its vft. 2969 * @base: The base number of queue in vft. 2970 * @number: The number of queues in vft. 2971 * 2972 * We can allocate multiple queues to a qm by configuring virtual function 2973 * table. We get related configures by this function. Normally, we call this 2974 * function in VF driver to get the queue information. 2975 * 2976 * qm hw v1 does not support this interface. 2977 */ 2978 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 2979 { 2980 if (!base || !number) 2981 return -EINVAL; 2982 2983 if (!qm->ops->get_vft) { 2984 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 2985 return -EINVAL; 2986 } 2987 2988 return qm->ops->get_vft(qm, base, number); 2989 } 2990 2991 /** 2992 * hisi_qm_set_vft() - Set vft to a qm. 2993 * @qm: The qm we want to set its vft. 2994 * @fun_num: The function number. 2995 * @base: The base number of queue in vft. 2996 * @number: The number of queues in vft. 2997 * 2998 * This function is alway called in PF driver, it is used to assign queues 2999 * among PF and VFs. 3000 * 3001 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 3002 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 3003 * (VF function number 0x2) 3004 */ 3005 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 3006 u32 number) 3007 { 3008 u32 max_q_num = qm->ctrl_qp_num; 3009 3010 if (base >= max_q_num || number > max_q_num || 3011 (base + number) > max_q_num) 3012 return -EINVAL; 3013 3014 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 3015 } 3016 3017 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 3018 { 3019 struct hisi_qm_status *status = &qm->status; 3020 3021 status->eq_head = 0; 3022 status->aeq_head = 0; 3023 status->eqc_phase = true; 3024 status->aeqc_phase = true; 3025 } 3026 3027 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) 3028 { 3029 /* Clear eq/aeq interrupt source */ 3030 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 3031 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 3032 3033 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 3034 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 3035 } 3036 3037 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) 3038 { 3039 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 3040 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 3041 } 3042 3043 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 3044 { 3045 struct qm_eqc eqc = {0}; 3046 3047 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 3048 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 3049 if (qm->ver == QM_HW_V1) 3050 eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 3051 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3052 3053 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); 3054 } 3055 3056 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 3057 { 3058 struct qm_aeqc aeqc = {0}; 3059 3060 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 3061 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 3062 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3063 3064 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); 3065 } 3066 3067 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 3068 { 3069 struct device *dev = &qm->pdev->dev; 3070 int ret; 3071 3072 qm_init_eq_aeq_status(qm); 3073 3074 ret = qm_eq_ctx_cfg(qm); 3075 if (ret) { 3076 dev_err(dev, "Set eqc failed!\n"); 3077 return ret; 3078 } 3079 3080 return qm_aeq_ctx_cfg(qm); 3081 } 3082 3083 static int __hisi_qm_start(struct hisi_qm *qm) 3084 { 3085 int ret; 3086 3087 WARN_ON(!qm->qdma.va); 3088 3089 if (qm->fun_type == QM_HW_PF) { 3090 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 3091 if (ret) 3092 return ret; 3093 } 3094 3095 ret = qm_eq_aeq_ctx_cfg(qm); 3096 if (ret) 3097 return ret; 3098 3099 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 3100 if (ret) 3101 return ret; 3102 3103 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 3104 if (ret) 3105 return ret; 3106 3107 qm_init_prefetch(qm); 3108 qm_enable_eq_aeq_interrupts(qm); 3109 3110 return 0; 3111 } 3112 3113 /** 3114 * hisi_qm_start() - start qm 3115 * @qm: The qm to be started. 3116 * 3117 * This function starts a qm, then we can allocate qp from this qm. 3118 */ 3119 int hisi_qm_start(struct hisi_qm *qm) 3120 { 3121 struct device *dev = &qm->pdev->dev; 3122 int ret = 0; 3123 3124 down_write(&qm->qps_lock); 3125 3126 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3127 3128 if (!qm->qp_num) { 3129 dev_err(dev, "qp_num should not be 0\n"); 3130 ret = -EINVAL; 3131 goto err_unlock; 3132 } 3133 3134 ret = __hisi_qm_start(qm); 3135 if (ret) 3136 goto err_unlock; 3137 3138 atomic_set(&qm->status.flags, QM_WORK); 3139 hisi_qm_set_state(qm, QM_READY); 3140 3141 err_unlock: 3142 up_write(&qm->qps_lock); 3143 return ret; 3144 } 3145 EXPORT_SYMBOL_GPL(hisi_qm_start); 3146 3147 static int qm_restart(struct hisi_qm *qm) 3148 { 3149 struct device *dev = &qm->pdev->dev; 3150 struct hisi_qp *qp; 3151 int ret, i; 3152 3153 ret = hisi_qm_start(qm); 3154 if (ret < 0) 3155 return ret; 3156 3157 down_write(&qm->qps_lock); 3158 for (i = 0; i < qm->qp_num; i++) { 3159 qp = &qm->qp_array[i]; 3160 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3161 qp->is_resetting == true) { 3162 ret = qm_start_qp_nolock(qp, 0); 3163 if (ret < 0) { 3164 dev_err(dev, "Failed to start qp%d!\n", i); 3165 3166 up_write(&qm->qps_lock); 3167 return ret; 3168 } 3169 qp->is_resetting = false; 3170 } 3171 } 3172 up_write(&qm->qps_lock); 3173 3174 return 0; 3175 } 3176 3177 /* Stop started qps in reset flow */ 3178 static void qm_stop_started_qp(struct hisi_qm *qm) 3179 { 3180 struct hisi_qp *qp; 3181 int i; 3182 3183 for (i = 0; i < qm->qp_num; i++) { 3184 qp = &qm->qp_array[i]; 3185 if (atomic_read(&qp->qp_status.flags) == QP_START) { 3186 qp->is_resetting = true; 3187 qm_stop_qp_nolock(qp); 3188 } 3189 } 3190 } 3191 3192 /** 3193 * qm_clear_queues() - Clear all queues memory in a qm. 3194 * @qm: The qm in which the queues will be cleared. 3195 * 3196 * This function clears all queues memory in a qm. Reset of accelerator can 3197 * use this to clear queues. 3198 */ 3199 static void qm_clear_queues(struct hisi_qm *qm) 3200 { 3201 struct hisi_qp *qp; 3202 int i; 3203 3204 for (i = 0; i < qm->qp_num; i++) { 3205 qp = &qm->qp_array[i]; 3206 if (qp->is_in_kernel && qp->is_resetting) 3207 memset(qp->qdma.va, 0, qp->qdma.size); 3208 } 3209 3210 memset(qm->qdma.va, 0, qm->qdma.size); 3211 } 3212 3213 /** 3214 * hisi_qm_stop() - Stop a qm. 3215 * @qm: The qm which will be stopped. 3216 * @r: The reason to stop qm. 3217 * 3218 * This function stops qm and its qps, then qm can not accept request. 3219 * Related resources are not released at this state, we can use hisi_qm_start 3220 * to let qm start again. 3221 */ 3222 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3223 { 3224 struct device *dev = &qm->pdev->dev; 3225 int ret = 0; 3226 3227 down_write(&qm->qps_lock); 3228 3229 if (atomic_read(&qm->status.flags) == QM_STOP) 3230 goto err_unlock; 3231 3232 /* Stop all the request sending at first. */ 3233 atomic_set(&qm->status.flags, QM_STOP); 3234 qm->status.stop_reason = r; 3235 3236 if (qm->status.stop_reason != QM_NORMAL) { 3237 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 3238 /* 3239 * When performing soft reset, the hardware will no longer 3240 * do tasks, and the tasks in the device will be flushed 3241 * out directly since the master ooo is closed. 3242 */ 3243 if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && 3244 r != QM_SOFT_RESET) { 3245 ret = qm_drain_qm(qm); 3246 if (ret) { 3247 dev_err(dev, "failed to drain qm!\n"); 3248 goto err_unlock; 3249 } 3250 } 3251 3252 qm_stop_started_qp(qm); 3253 3254 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 3255 } 3256 3257 qm_disable_eq_aeq_interrupts(qm); 3258 if (qm->fun_type == QM_HW_PF) { 3259 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3260 if (ret < 0) { 3261 dev_err(dev, "Failed to set vft!\n"); 3262 ret = -EBUSY; 3263 goto err_unlock; 3264 } 3265 } 3266 3267 qm_clear_queues(qm); 3268 qm->status.stop_reason = QM_NORMAL; 3269 3270 err_unlock: 3271 up_write(&qm->qps_lock); 3272 return ret; 3273 } 3274 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3275 3276 static void qm_hw_error_init(struct hisi_qm *qm) 3277 { 3278 if (!qm->ops->hw_error_init) { 3279 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3280 return; 3281 } 3282 3283 qm->ops->hw_error_init(qm); 3284 } 3285 3286 static void qm_hw_error_uninit(struct hisi_qm *qm) 3287 { 3288 if (!qm->ops->hw_error_uninit) { 3289 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3290 return; 3291 } 3292 3293 qm->ops->hw_error_uninit(qm); 3294 } 3295 3296 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3297 { 3298 if (!qm->ops->hw_error_handle) { 3299 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3300 return ACC_ERR_NONE; 3301 } 3302 3303 return qm->ops->hw_error_handle(qm); 3304 } 3305 3306 /** 3307 * hisi_qm_dev_err_init() - Initialize device error configuration. 3308 * @qm: The qm for which we want to do error initialization. 3309 * 3310 * Initialize QM and device error related configuration. 3311 */ 3312 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3313 { 3314 if (qm->fun_type == QM_HW_VF) 3315 return; 3316 3317 qm_hw_error_init(qm); 3318 3319 if (!qm->err_ini->hw_err_enable) { 3320 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3321 return; 3322 } 3323 qm->err_ini->hw_err_enable(qm); 3324 } 3325 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3326 3327 /** 3328 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3329 * @qm: The qm for which we want to do error uninitialization. 3330 * 3331 * Uninitialize QM and device error related configuration. 3332 */ 3333 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3334 { 3335 if (qm->fun_type == QM_HW_VF) 3336 return; 3337 3338 qm_hw_error_uninit(qm); 3339 3340 if (!qm->err_ini->hw_err_disable) { 3341 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3342 return; 3343 } 3344 qm->err_ini->hw_err_disable(qm); 3345 } 3346 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3347 3348 /** 3349 * hisi_qm_free_qps() - free multiple queue pairs. 3350 * @qps: The queue pairs need to be freed. 3351 * @qp_num: The num of queue pairs. 3352 */ 3353 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3354 { 3355 int i; 3356 3357 if (!qps || qp_num <= 0) 3358 return; 3359 3360 for (i = qp_num - 1; i >= 0; i--) 3361 hisi_qm_release_qp(qps[i]); 3362 } 3363 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3364 3365 static void free_list(struct list_head *head) 3366 { 3367 struct hisi_qm_resource *res, *tmp; 3368 3369 list_for_each_entry_safe(res, tmp, head, list) { 3370 list_del(&res->list); 3371 kfree(res); 3372 } 3373 } 3374 3375 static int hisi_qm_sort_devices(int node, struct list_head *head, 3376 struct hisi_qm_list *qm_list) 3377 { 3378 struct hisi_qm_resource *res, *tmp; 3379 struct hisi_qm *qm; 3380 struct list_head *n; 3381 struct device *dev; 3382 int dev_node; 3383 3384 list_for_each_entry(qm, &qm_list->list, list) { 3385 dev = &qm->pdev->dev; 3386 3387 dev_node = dev_to_node(dev); 3388 if (dev_node < 0) 3389 dev_node = 0; 3390 3391 res = kzalloc(sizeof(*res), GFP_KERNEL); 3392 if (!res) 3393 return -ENOMEM; 3394 3395 res->qm = qm; 3396 res->distance = node_distance(dev_node, node); 3397 n = head; 3398 list_for_each_entry(tmp, head, list) { 3399 if (res->distance < tmp->distance) { 3400 n = &tmp->list; 3401 break; 3402 } 3403 } 3404 list_add_tail(&res->list, n); 3405 } 3406 3407 return 0; 3408 } 3409 3410 /** 3411 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3412 * @qm_list: The list of all available devices. 3413 * @qp_num: The number of queue pairs need created. 3414 * @alg_type: The algorithm type. 3415 * @node: The numa node. 3416 * @qps: The queue pairs need created. 3417 * 3418 * This function will sort all available device according to numa distance. 3419 * Then try to create all queue pairs from one device, if all devices do 3420 * not meet the requirements will return error. 3421 */ 3422 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3423 u8 alg_type, int node, struct hisi_qp **qps) 3424 { 3425 struct hisi_qm_resource *tmp; 3426 int ret = -ENODEV; 3427 LIST_HEAD(head); 3428 int i; 3429 3430 if (!qps || !qm_list || qp_num <= 0) 3431 return -EINVAL; 3432 3433 mutex_lock(&qm_list->lock); 3434 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3435 mutex_unlock(&qm_list->lock); 3436 goto err; 3437 } 3438 3439 list_for_each_entry(tmp, &head, list) { 3440 for (i = 0; i < qp_num; i++) { 3441 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3442 if (IS_ERR(qps[i])) { 3443 hisi_qm_free_qps(qps, i); 3444 break; 3445 } 3446 } 3447 3448 if (i == qp_num) { 3449 ret = 0; 3450 break; 3451 } 3452 } 3453 3454 mutex_unlock(&qm_list->lock); 3455 if (ret) 3456 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3457 node, alg_type, qp_num); 3458 3459 err: 3460 free_list(&head); 3461 return ret; 3462 } 3463 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3464 3465 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3466 { 3467 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3468 u32 max_qp_num = qm->max_qp_num; 3469 u32 q_base = qm->qp_num; 3470 int ret; 3471 3472 if (!num_vfs) 3473 return -EINVAL; 3474 3475 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3476 3477 /* If vfs_q_num is less than num_vfs, return error. */ 3478 if (vfs_q_num < num_vfs) 3479 return -EINVAL; 3480 3481 q_num = vfs_q_num / num_vfs; 3482 remain_q_num = vfs_q_num % num_vfs; 3483 3484 for (i = num_vfs; i > 0; i--) { 3485 /* 3486 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3487 * remaining queues equally. 3488 */ 3489 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3490 act_q_num = q_num + remain_q_num; 3491 remain_q_num = 0; 3492 } else if (remain_q_num > 0) { 3493 act_q_num = q_num + 1; 3494 remain_q_num--; 3495 } else { 3496 act_q_num = q_num; 3497 } 3498 3499 act_q_num = min(act_q_num, max_qp_num); 3500 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3501 if (ret) { 3502 for (j = num_vfs; j > i; j--) 3503 hisi_qm_set_vft(qm, j, 0, 0); 3504 return ret; 3505 } 3506 q_base += act_q_num; 3507 } 3508 3509 return 0; 3510 } 3511 3512 static int qm_clear_vft_config(struct hisi_qm *qm) 3513 { 3514 int ret; 3515 u32 i; 3516 3517 for (i = 1; i <= qm->vfs_num; i++) { 3518 ret = hisi_qm_set_vft(qm, i, 0, 0); 3519 if (ret) 3520 return ret; 3521 } 3522 qm->vfs_num = 0; 3523 3524 return 0; 3525 } 3526 3527 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3528 { 3529 struct device *dev = &qm->pdev->dev; 3530 u32 ir = qos * QM_QOS_RATE; 3531 int ret, total_vfs, i; 3532 3533 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3534 if (fun_index > total_vfs) 3535 return -EINVAL; 3536 3537 qm->factor[fun_index].func_qos = qos; 3538 3539 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3540 if (ret) { 3541 dev_err(dev, "failed to calculate shaper parameter!\n"); 3542 return -EINVAL; 3543 } 3544 3545 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3546 /* The base number of queue reuse for different alg type */ 3547 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3548 if (ret) { 3549 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3550 return -EINVAL; 3551 } 3552 } 3553 3554 return 0; 3555 } 3556 3557 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3558 { 3559 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3560 u64 shaper_vft, ir_calc, ir; 3561 unsigned int val; 3562 u32 error_rate; 3563 int ret; 3564 3565 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3566 val & BIT(0), POLL_PERIOD, 3567 POLL_TIMEOUT); 3568 if (ret) 3569 return 0; 3570 3571 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3572 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3573 writel(fun_index, qm->io_base + QM_VFT_CFG); 3574 3575 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3576 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3577 3578 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3579 val & BIT(0), POLL_PERIOD, 3580 POLL_TIMEOUT); 3581 if (ret) 3582 return 0; 3583 3584 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3585 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3586 3587 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3588 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3589 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3590 3591 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3592 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3593 3594 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3595 3596 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3597 3598 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3599 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3600 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3601 return 0; 3602 } 3603 3604 return ir; 3605 } 3606 3607 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 3608 { 3609 struct device *dev = &qm->pdev->dev; 3610 u64 mb_cmd; 3611 u32 qos; 3612 int ret; 3613 3614 qos = qm_get_shaper_vft_qos(qm, fun_num); 3615 if (!qos) { 3616 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 3617 return; 3618 } 3619 3620 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; 3621 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); 3622 if (ret) 3623 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); 3624 } 3625 3626 static int qm_vf_read_qos(struct hisi_qm *qm) 3627 { 3628 int cnt = 0; 3629 int ret = -EINVAL; 3630 3631 /* reset mailbox qos val */ 3632 qm->mb_qos = 0; 3633 3634 /* vf ping pf to get function qos */ 3635 ret = qm_ping_pf(qm, QM_VF_GET_QOS); 3636 if (ret) { 3637 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 3638 return ret; 3639 } 3640 3641 while (true) { 3642 msleep(QM_WAIT_DST_ACK); 3643 if (qm->mb_qos) 3644 break; 3645 3646 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 3647 pci_err(qm->pdev, "PF ping VF timeout!\n"); 3648 return -ETIMEDOUT; 3649 } 3650 } 3651 3652 return ret; 3653 } 3654 3655 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 3656 size_t count, loff_t *pos) 3657 { 3658 struct hisi_qm *qm = filp->private_data; 3659 char tbuf[QM_DBG_READ_LEN]; 3660 u32 qos_val, ir; 3661 int ret; 3662 3663 ret = hisi_qm_get_dfx_access(qm); 3664 if (ret) 3665 return ret; 3666 3667 /* Mailbox and reset cannot be operated at the same time */ 3668 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3669 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 3670 ret = -EAGAIN; 3671 goto err_put_dfx_access; 3672 } 3673 3674 if (qm->fun_type == QM_HW_PF) { 3675 ir = qm_get_shaper_vft_qos(qm, 0); 3676 } else { 3677 ret = qm_vf_read_qos(qm); 3678 if (ret) 3679 goto err_get_status; 3680 ir = qm->mb_qos; 3681 } 3682 3683 qos_val = ir / QM_QOS_RATE; 3684 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 3685 3686 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 3687 3688 err_get_status: 3689 clear_bit(QM_RESETTING, &qm->misc_ctl); 3690 err_put_dfx_access: 3691 hisi_qm_put_dfx_access(qm); 3692 return ret; 3693 } 3694 3695 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, 3696 unsigned long *val, 3697 unsigned int *fun_index) 3698 { 3699 const struct bus_type *bus_type = qm->pdev->dev.bus; 3700 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 3701 char val_buf[QM_DBG_READ_LEN] = {0}; 3702 struct pci_dev *pdev; 3703 struct device *dev; 3704 int ret; 3705 3706 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); 3707 if (ret != QM_QOS_PARAM_NUM) 3708 return -EINVAL; 3709 3710 ret = kstrtoul(val_buf, 10, val); 3711 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { 3712 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 3713 return -EINVAL; 3714 } 3715 3716 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); 3717 if (!dev) { 3718 pci_err(qm->pdev, "input pci bdf number is error!\n"); 3719 return -ENODEV; 3720 } 3721 3722 pdev = container_of(dev, struct pci_dev, dev); 3723 3724 *fun_index = pdev->devfn; 3725 3726 return 0; 3727 } 3728 3729 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 3730 size_t count, loff_t *pos) 3731 { 3732 struct hisi_qm *qm = filp->private_data; 3733 char tbuf[QM_DBG_READ_LEN]; 3734 unsigned int fun_index; 3735 unsigned long val; 3736 int len, ret; 3737 3738 if (*pos != 0) 3739 return 0; 3740 3741 if (count >= QM_DBG_READ_LEN) 3742 return -ENOSPC; 3743 3744 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 3745 if (len < 0) 3746 return len; 3747 3748 tbuf[len] = '\0'; 3749 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); 3750 if (ret) 3751 return ret; 3752 3753 /* Mailbox and reset cannot be operated at the same time */ 3754 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3755 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 3756 return -EAGAIN; 3757 } 3758 3759 ret = qm_pm_get_sync(qm); 3760 if (ret) { 3761 ret = -EINVAL; 3762 goto err_get_status; 3763 } 3764 3765 ret = qm_func_shaper_enable(qm, fun_index, val); 3766 if (ret) { 3767 pci_err(qm->pdev, "failed to enable function shaper!\n"); 3768 ret = -EINVAL; 3769 goto err_put_sync; 3770 } 3771 3772 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", 3773 fun_index, val); 3774 ret = count; 3775 3776 err_put_sync: 3777 qm_pm_put_sync(qm); 3778 err_get_status: 3779 clear_bit(QM_RESETTING, &qm->misc_ctl); 3780 return ret; 3781 } 3782 3783 static const struct file_operations qm_algqos_fops = { 3784 .owner = THIS_MODULE, 3785 .open = simple_open, 3786 .read = qm_algqos_read, 3787 .write = qm_algqos_write, 3788 }; 3789 3790 /** 3791 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 3792 * @qm: The qm for which we want to add debugfs files. 3793 * 3794 * Create function qos debugfs files, VF ping PF to get function qos. 3795 */ 3796 void hisi_qm_set_algqos_init(struct hisi_qm *qm) 3797 { 3798 if (qm->fun_type == QM_HW_PF) 3799 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 3800 qm, &qm_algqos_fops); 3801 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3802 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 3803 qm, &qm_algqos_fops); 3804 } 3805 3806 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 3807 { 3808 int i; 3809 3810 for (i = 1; i <= total_func; i++) 3811 qm->factor[i].func_qos = QM_QOS_MAX_VAL; 3812 } 3813 3814 /** 3815 * hisi_qm_sriov_enable() - enable virtual functions 3816 * @pdev: the PCIe device 3817 * @max_vfs: the number of virtual functions to enable 3818 * 3819 * Returns the number of enabled VFs. If there are VFs enabled already or 3820 * max_vfs is more than the total number of device can be enabled, returns 3821 * failure. 3822 */ 3823 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 3824 { 3825 struct hisi_qm *qm = pci_get_drvdata(pdev); 3826 int pre_existing_vfs, num_vfs, total_vfs, ret; 3827 3828 ret = qm_pm_get_sync(qm); 3829 if (ret) 3830 return ret; 3831 3832 total_vfs = pci_sriov_get_totalvfs(pdev); 3833 pre_existing_vfs = pci_num_vf(pdev); 3834 if (pre_existing_vfs) { 3835 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 3836 pre_existing_vfs); 3837 goto err_put_sync; 3838 } 3839 3840 if (max_vfs > total_vfs) { 3841 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 3842 ret = -ERANGE; 3843 goto err_put_sync; 3844 } 3845 3846 num_vfs = max_vfs; 3847 3848 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3849 hisi_qm_init_vf_qos(qm, num_vfs); 3850 3851 ret = qm_vf_q_assign(qm, num_vfs); 3852 if (ret) { 3853 pci_err(pdev, "Can't assign queues for VF!\n"); 3854 goto err_put_sync; 3855 } 3856 3857 ret = pci_enable_sriov(pdev, num_vfs); 3858 if (ret) { 3859 pci_err(pdev, "Can't enable VF!\n"); 3860 qm_clear_vft_config(qm); 3861 goto err_put_sync; 3862 } 3863 qm->vfs_num = num_vfs; 3864 3865 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 3866 3867 return num_vfs; 3868 3869 err_put_sync: 3870 qm_pm_put_sync(qm); 3871 return ret; 3872 } 3873 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 3874 3875 /** 3876 * hisi_qm_sriov_disable - disable virtual functions 3877 * @pdev: the PCI device. 3878 * @is_frozen: true when all the VFs are frozen. 3879 * 3880 * Return failure if there are VFs assigned already or VF is in used. 3881 */ 3882 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 3883 { 3884 struct hisi_qm *qm = pci_get_drvdata(pdev); 3885 3886 if (pci_vfs_assigned(pdev)) { 3887 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 3888 return -EPERM; 3889 } 3890 3891 /* While VF is in used, SRIOV cannot be disabled. */ 3892 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 3893 pci_err(pdev, "Task is using its VF!\n"); 3894 return -EBUSY; 3895 } 3896 3897 pci_disable_sriov(pdev); 3898 3899 qm->vfs_num = 0; 3900 qm_pm_put_sync(qm); 3901 3902 return qm_clear_vft_config(qm); 3903 } 3904 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 3905 3906 /** 3907 * hisi_qm_sriov_configure - configure the number of VFs 3908 * @pdev: The PCI device 3909 * @num_vfs: The number of VFs need enabled 3910 * 3911 * Enable SR-IOV according to num_vfs, 0 means disable. 3912 */ 3913 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 3914 { 3915 if (num_vfs == 0) 3916 return hisi_qm_sriov_disable(pdev, false); 3917 else 3918 return hisi_qm_sriov_enable(pdev, num_vfs); 3919 } 3920 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 3921 3922 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 3923 { 3924 if (!qm->err_ini->get_err_result) { 3925 dev_err(&qm->pdev->dev, "Device doesn't support reset!\n"); 3926 return ACC_ERR_NONE; 3927 } 3928 3929 return qm->err_ini->get_err_result(qm); 3930 } 3931 3932 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 3933 { 3934 enum acc_err_result qm_ret, dev_ret; 3935 3936 /* log qm error */ 3937 qm_ret = qm_hw_error_handle(qm); 3938 3939 /* log device error */ 3940 dev_ret = qm_dev_err_handle(qm); 3941 3942 return (qm_ret == ACC_ERR_NEED_RESET || 3943 dev_ret == ACC_ERR_NEED_RESET) ? 3944 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 3945 } 3946 3947 /** 3948 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 3949 * @pdev: The PCI device which need report error. 3950 * @state: The connectivity between CPU and device. 3951 * 3952 * We register this function into PCIe AER handlers, It will report device or 3953 * qm hardware error status when error occur. 3954 */ 3955 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 3956 pci_channel_state_t state) 3957 { 3958 struct hisi_qm *qm = pci_get_drvdata(pdev); 3959 enum acc_err_result ret; 3960 3961 if (pdev->is_virtfn) 3962 return PCI_ERS_RESULT_NONE; 3963 3964 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 3965 if (state == pci_channel_io_perm_failure) 3966 return PCI_ERS_RESULT_DISCONNECT; 3967 3968 ret = qm_process_dev_error(qm); 3969 if (ret == ACC_ERR_NEED_RESET) 3970 return PCI_ERS_RESULT_NEED_RESET; 3971 3972 return PCI_ERS_RESULT_RECOVERED; 3973 } 3974 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 3975 3976 static int qm_check_req_recv(struct hisi_qm *qm) 3977 { 3978 struct pci_dev *pdev = qm->pdev; 3979 int ret; 3980 u32 val; 3981 3982 if (qm->ver >= QM_HW_V3) 3983 return 0; 3984 3985 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 3986 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3987 (val == ACC_VENDOR_ID_VALUE), 3988 POLL_PERIOD, POLL_TIMEOUT); 3989 if (ret) { 3990 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 3991 return ret; 3992 } 3993 3994 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 3995 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3996 (val == PCI_VENDOR_ID_HUAWEI), 3997 POLL_PERIOD, POLL_TIMEOUT); 3998 if (ret) 3999 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 4000 4001 return ret; 4002 } 4003 4004 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 4005 { 4006 struct pci_dev *pdev = qm->pdev; 4007 u16 cmd; 4008 int i; 4009 4010 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4011 if (set) 4012 cmd |= PCI_COMMAND_MEMORY; 4013 else 4014 cmd &= ~PCI_COMMAND_MEMORY; 4015 4016 pci_write_config_word(pdev, PCI_COMMAND, cmd); 4017 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4018 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4019 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 4020 return 0; 4021 4022 udelay(1); 4023 } 4024 4025 return -ETIMEDOUT; 4026 } 4027 4028 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 4029 { 4030 struct pci_dev *pdev = qm->pdev; 4031 u16 sriov_ctrl; 4032 int pos; 4033 int i; 4034 4035 /* 4036 * Since function qm_set_vf_mse is called only after SRIOV is enabled, 4037 * pci_find_ext_capability cannot return 0, pos does not need to be 4038 * checked. 4039 */ 4040 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4041 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4042 if (set) 4043 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 4044 else 4045 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 4046 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 4047 4048 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4049 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4050 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 4051 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 4052 return 0; 4053 4054 udelay(1); 4055 } 4056 4057 return -ETIMEDOUT; 4058 } 4059 4060 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4061 { 4062 u32 nfe_enb = 0; 4063 4064 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4065 if (qm->ver >= QM_HW_V3) 4066 return; 4067 4068 if (!qm->err_status.is_dev_ecc_mbit && 4069 qm->err_status.is_qm_ecc_mbit && 4070 qm->err_ini->close_axi_master_ooo) { 4071 qm->err_ini->close_axi_master_ooo(qm); 4072 } else if (qm->err_status.is_dev_ecc_mbit && 4073 !qm->err_status.is_qm_ecc_mbit && 4074 !qm->err_ini->close_axi_master_ooo) { 4075 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4076 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, 4077 qm->io_base + QM_RAS_NFE_ENABLE); 4078 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); 4079 } 4080 } 4081 4082 static int qm_vf_reset_prepare(struct hisi_qm *qm, 4083 enum qm_stop_reason stop_reason) 4084 { 4085 struct hisi_qm_list *qm_list = qm->qm_list; 4086 struct pci_dev *pdev = qm->pdev; 4087 struct pci_dev *virtfn; 4088 struct hisi_qm *vf_qm; 4089 int ret = 0; 4090 4091 mutex_lock(&qm_list->lock); 4092 list_for_each_entry(vf_qm, &qm_list->list, list) { 4093 virtfn = vf_qm->pdev; 4094 if (virtfn == pdev) 4095 continue; 4096 4097 if (pci_physfn(virtfn) == pdev) { 4098 /* save VFs PCIE BAR configuration */ 4099 pci_save_state(virtfn); 4100 4101 ret = hisi_qm_stop(vf_qm, stop_reason); 4102 if (ret) 4103 goto stop_fail; 4104 } 4105 } 4106 4107 stop_fail: 4108 mutex_unlock(&qm_list->lock); 4109 return ret; 4110 } 4111 4112 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, 4113 enum qm_stop_reason stop_reason) 4114 { 4115 struct pci_dev *pdev = qm->pdev; 4116 int ret; 4117 4118 if (!qm->vfs_num) 4119 return 0; 4120 4121 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 4122 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4123 ret = qm_ping_all_vfs(qm, cmd); 4124 if (ret) 4125 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 4126 } else { 4127 ret = qm_vf_reset_prepare(qm, stop_reason); 4128 if (ret) 4129 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4130 } 4131 4132 return ret; 4133 } 4134 4135 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4136 { 4137 struct pci_dev *pdev = qm->pdev; 4138 int ret; 4139 4140 ret = qm_reset_prepare_ready(qm); 4141 if (ret) { 4142 pci_err(pdev, "Controller reset not ready!\n"); 4143 return ret; 4144 } 4145 4146 qm_dev_ecc_mbit_handle(qm); 4147 4148 /* PF obtains the information of VF by querying the register. */ 4149 qm_cmd_uninit(qm); 4150 4151 /* Whether VFs stop successfully, soft reset will continue. */ 4152 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4153 if (ret) 4154 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4155 4156 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4157 if (ret) { 4158 pci_err(pdev, "Fails to stop QM!\n"); 4159 qm_reset_bit_clear(qm); 4160 return ret; 4161 } 4162 4163 if (qm->use_sva) { 4164 ret = qm_hw_err_isolate(qm); 4165 if (ret) 4166 pci_err(pdev, "failed to isolate hw err!\n"); 4167 } 4168 4169 ret = qm_wait_vf_prepare_finish(qm); 4170 if (ret) 4171 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4172 4173 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4174 4175 return 0; 4176 } 4177 4178 static int qm_master_ooo_check(struct hisi_qm *qm) 4179 { 4180 u32 val; 4181 int ret; 4182 4183 /* Check the ooo register of the device before resetting the device. */ 4184 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4185 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4186 val, (val == ACC_MASTER_TRANS_RETURN_RW), 4187 POLL_PERIOD, POLL_TIMEOUT); 4188 if (ret) 4189 pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); 4190 4191 return ret; 4192 } 4193 4194 static int qm_soft_reset_prepare(struct hisi_qm *qm) 4195 { 4196 struct pci_dev *pdev = qm->pdev; 4197 int ret; 4198 4199 /* Ensure all doorbells and mailboxes received by QM */ 4200 ret = qm_check_req_recv(qm); 4201 if (ret) 4202 return ret; 4203 4204 if (qm->vfs_num) { 4205 ret = qm_set_vf_mse(qm, false); 4206 if (ret) { 4207 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4208 return ret; 4209 } 4210 } 4211 4212 ret = qm->ops->set_msi(qm, false); 4213 if (ret) { 4214 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4215 return ret; 4216 } 4217 4218 ret = qm_master_ooo_check(qm); 4219 if (ret) 4220 return ret; 4221 4222 if (qm->err_ini->close_sva_prefetch) 4223 qm->err_ini->close_sva_prefetch(qm); 4224 4225 ret = qm_set_pf_mse(qm, false); 4226 if (ret) 4227 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4228 4229 return ret; 4230 } 4231 4232 static int qm_reset_device(struct hisi_qm *qm) 4233 { 4234 struct pci_dev *pdev = qm->pdev; 4235 4236 /* The reset related sub-control registers are not in PCI BAR */ 4237 if (ACPI_HANDLE(&pdev->dev)) { 4238 unsigned long long value = 0; 4239 acpi_status s; 4240 4241 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4242 qm->err_info.acpi_rst, 4243 NULL, &value); 4244 if (ACPI_FAILURE(s)) { 4245 pci_err(pdev, "NO controller reset method!\n"); 4246 return -EIO; 4247 } 4248 4249 if (value) { 4250 pci_err(pdev, "Reset step %llu failed!\n", value); 4251 return -EIO; 4252 } 4253 4254 return 0; 4255 } 4256 4257 pci_err(pdev, "No reset method!\n"); 4258 return -EINVAL; 4259 } 4260 4261 static int qm_soft_reset(struct hisi_qm *qm) 4262 { 4263 int ret; 4264 4265 ret = qm_soft_reset_prepare(qm); 4266 if (ret) 4267 return ret; 4268 4269 return qm_reset_device(qm); 4270 } 4271 4272 static int qm_vf_reset_done(struct hisi_qm *qm) 4273 { 4274 struct hisi_qm_list *qm_list = qm->qm_list; 4275 struct pci_dev *pdev = qm->pdev; 4276 struct pci_dev *virtfn; 4277 struct hisi_qm *vf_qm; 4278 int ret = 0; 4279 4280 mutex_lock(&qm_list->lock); 4281 list_for_each_entry(vf_qm, &qm_list->list, list) { 4282 virtfn = vf_qm->pdev; 4283 if (virtfn == pdev) 4284 continue; 4285 4286 if (pci_physfn(virtfn) == pdev) { 4287 /* enable VFs PCIE BAR configuration */ 4288 pci_restore_state(virtfn); 4289 4290 ret = qm_restart(vf_qm); 4291 if (ret) 4292 goto restart_fail; 4293 } 4294 } 4295 4296 restart_fail: 4297 mutex_unlock(&qm_list->lock); 4298 return ret; 4299 } 4300 4301 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) 4302 { 4303 struct pci_dev *pdev = qm->pdev; 4304 int ret; 4305 4306 if (!qm->vfs_num) 4307 return 0; 4308 4309 ret = qm_vf_q_assign(qm, qm->vfs_num); 4310 if (ret) { 4311 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4312 return ret; 4313 } 4314 4315 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4316 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4317 ret = qm_ping_all_vfs(qm, cmd); 4318 if (ret) 4319 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4320 } else { 4321 ret = qm_vf_reset_done(qm); 4322 if (ret) 4323 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4324 } 4325 4326 return ret; 4327 } 4328 4329 static int qm_dev_hw_init(struct hisi_qm *qm) 4330 { 4331 return qm->err_ini->hw_init(qm); 4332 } 4333 4334 static void qm_restart_prepare(struct hisi_qm *qm) 4335 { 4336 u32 value; 4337 4338 if (qm->err_ini->open_sva_prefetch) 4339 qm->err_ini->open_sva_prefetch(qm); 4340 4341 if (qm->ver >= QM_HW_V3) 4342 return; 4343 4344 if (!qm->err_status.is_qm_ecc_mbit && 4345 !qm->err_status.is_dev_ecc_mbit) 4346 return; 4347 4348 /* temporarily close the OOO port used for PEH to write out MSI */ 4349 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4350 writel(value & ~qm->err_info.msi_wr_port, 4351 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4352 4353 /* clear dev ecc 2bit error source if having */ 4354 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; 4355 if (value && qm->err_ini->clear_dev_hw_err_status) 4356 qm->err_ini->clear_dev_hw_err_status(qm, value); 4357 4358 /* clear QM ecc mbit error source */ 4359 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4360 4361 /* clear AM Reorder Buffer ecc mbit source */ 4362 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4363 } 4364 4365 static void qm_restart_done(struct hisi_qm *qm) 4366 { 4367 u32 value; 4368 4369 if (qm->ver >= QM_HW_V3) 4370 goto clear_flags; 4371 4372 if (!qm->err_status.is_qm_ecc_mbit && 4373 !qm->err_status.is_dev_ecc_mbit) 4374 return; 4375 4376 /* open the OOO port for PEH to write out MSI */ 4377 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4378 value |= qm->err_info.msi_wr_port; 4379 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4380 4381 clear_flags: 4382 qm->err_status.is_qm_ecc_mbit = false; 4383 qm->err_status.is_dev_ecc_mbit = false; 4384 } 4385 4386 static int qm_controller_reset_done(struct hisi_qm *qm) 4387 { 4388 struct pci_dev *pdev = qm->pdev; 4389 int ret; 4390 4391 ret = qm->ops->set_msi(qm, true); 4392 if (ret) { 4393 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4394 return ret; 4395 } 4396 4397 ret = qm_set_pf_mse(qm, true); 4398 if (ret) { 4399 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4400 return ret; 4401 } 4402 4403 if (qm->vfs_num) { 4404 ret = qm_set_vf_mse(qm, true); 4405 if (ret) { 4406 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4407 return ret; 4408 } 4409 } 4410 4411 ret = qm_dev_hw_init(qm); 4412 if (ret) { 4413 pci_err(pdev, "Failed to init device\n"); 4414 return ret; 4415 } 4416 4417 qm_restart_prepare(qm); 4418 hisi_qm_dev_err_init(qm); 4419 if (qm->err_ini->open_axi_master_ooo) 4420 qm->err_ini->open_axi_master_ooo(qm); 4421 4422 ret = qm_dev_mem_reset(qm); 4423 if (ret) { 4424 pci_err(pdev, "failed to reset device memory\n"); 4425 return ret; 4426 } 4427 4428 ret = qm_restart(qm); 4429 if (ret) { 4430 pci_err(pdev, "Failed to start QM!\n"); 4431 return ret; 4432 } 4433 4434 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4435 if (ret) 4436 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4437 4438 ret = qm_wait_vf_prepare_finish(qm); 4439 if (ret) 4440 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4441 4442 qm_cmd_init(qm); 4443 qm_restart_done(qm); 4444 4445 qm_reset_bit_clear(qm); 4446 4447 return 0; 4448 } 4449 4450 static int qm_controller_reset(struct hisi_qm *qm) 4451 { 4452 struct pci_dev *pdev = qm->pdev; 4453 int ret; 4454 4455 pci_info(pdev, "Controller resetting...\n"); 4456 4457 ret = qm_controller_reset_prepare(qm); 4458 if (ret) { 4459 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4460 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4461 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4462 return ret; 4463 } 4464 4465 hisi_qm_show_last_dfx_regs(qm); 4466 if (qm->err_ini->show_last_dfx_regs) 4467 qm->err_ini->show_last_dfx_regs(qm); 4468 4469 ret = qm_soft_reset(qm); 4470 if (ret) 4471 goto err_reset; 4472 4473 ret = qm_controller_reset_done(qm); 4474 if (ret) 4475 goto err_reset; 4476 4477 pci_info(pdev, "Controller reset complete\n"); 4478 4479 return 0; 4480 4481 err_reset: 4482 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4483 qm_reset_bit_clear(qm); 4484 4485 /* if resetting fails, isolate the device */ 4486 if (qm->use_sva) 4487 qm->isolate_data.is_isolate = true; 4488 return ret; 4489 } 4490 4491 /** 4492 * hisi_qm_dev_slot_reset() - slot reset 4493 * @pdev: the PCIe device 4494 * 4495 * This function offers QM relate PCIe device reset interface. Drivers which 4496 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4497 */ 4498 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4499 { 4500 struct hisi_qm *qm = pci_get_drvdata(pdev); 4501 int ret; 4502 4503 if (pdev->is_virtfn) 4504 return PCI_ERS_RESULT_RECOVERED; 4505 4506 /* reset pcie device controller */ 4507 ret = qm_controller_reset(qm); 4508 if (ret) { 4509 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4510 return PCI_ERS_RESULT_DISCONNECT; 4511 } 4512 4513 return PCI_ERS_RESULT_RECOVERED; 4514 } 4515 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4516 4517 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4518 { 4519 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4520 struct hisi_qm *qm = pci_get_drvdata(pdev); 4521 u32 delay = 0; 4522 int ret; 4523 4524 hisi_qm_dev_err_uninit(pf_qm); 4525 4526 /* 4527 * Check whether there is an ECC mbit error, If it occurs, need to 4528 * wait for soft reset to fix it. 4529 */ 4530 while (qm_check_dev_error(pf_qm)) { 4531 msleep(++delay); 4532 if (delay > QM_RESET_WAIT_TIMEOUT) 4533 return; 4534 } 4535 4536 ret = qm_reset_prepare_ready(qm); 4537 if (ret) { 4538 pci_err(pdev, "FLR not ready!\n"); 4539 return; 4540 } 4541 4542 /* PF obtains the information of VF by querying the register. */ 4543 if (qm->fun_type == QM_HW_PF) 4544 qm_cmd_uninit(qm); 4545 4546 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); 4547 if (ret) 4548 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 4549 4550 ret = hisi_qm_stop(qm, QM_DOWN); 4551 if (ret) { 4552 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 4553 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4554 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4555 return; 4556 } 4557 4558 ret = qm_wait_vf_prepare_finish(qm); 4559 if (ret) 4560 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 4561 4562 pci_info(pdev, "FLR resetting...\n"); 4563 } 4564 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 4565 4566 static bool qm_flr_reset_complete(struct pci_dev *pdev) 4567 { 4568 struct pci_dev *pf_pdev = pci_physfn(pdev); 4569 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 4570 u32 id; 4571 4572 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 4573 if (id == QM_PCI_COMMAND_INVALID) { 4574 pci_err(pdev, "Device can not be used!\n"); 4575 return false; 4576 } 4577 4578 return true; 4579 } 4580 4581 void hisi_qm_reset_done(struct pci_dev *pdev) 4582 { 4583 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4584 struct hisi_qm *qm = pci_get_drvdata(pdev); 4585 int ret; 4586 4587 if (qm->fun_type == QM_HW_PF) { 4588 ret = qm_dev_hw_init(qm); 4589 if (ret) { 4590 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 4591 goto flr_done; 4592 } 4593 } 4594 4595 hisi_qm_dev_err_init(pf_qm); 4596 4597 ret = qm_restart(qm); 4598 if (ret) { 4599 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 4600 goto flr_done; 4601 } 4602 4603 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4604 if (ret) 4605 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 4606 4607 ret = qm_wait_vf_prepare_finish(qm); 4608 if (ret) 4609 pci_err(pdev, "failed to start by vfs in FLR!\n"); 4610 4611 flr_done: 4612 if (qm->fun_type == QM_HW_PF) 4613 qm_cmd_init(qm); 4614 4615 if (qm_flr_reset_complete(pdev)) 4616 pci_info(pdev, "FLR reset complete\n"); 4617 4618 qm_reset_bit_clear(qm); 4619 } 4620 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 4621 4622 static irqreturn_t qm_abnormal_irq(int irq, void *data) 4623 { 4624 struct hisi_qm *qm = data; 4625 enum acc_err_result ret; 4626 4627 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 4628 ret = qm_process_dev_error(qm); 4629 if (ret == ACC_ERR_NEED_RESET && 4630 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 4631 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 4632 schedule_work(&qm->rst_work); 4633 4634 return IRQ_HANDLED; 4635 } 4636 4637 /** 4638 * hisi_qm_dev_shutdown() - Shutdown device. 4639 * @pdev: The device will be shutdown. 4640 * 4641 * This function will stop qm when OS shutdown or rebooting. 4642 */ 4643 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 4644 { 4645 struct hisi_qm *qm = pci_get_drvdata(pdev); 4646 int ret; 4647 4648 ret = hisi_qm_stop(qm, QM_DOWN); 4649 if (ret) 4650 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 4651 4652 hisi_qm_cache_wb(qm); 4653 } 4654 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 4655 4656 static void hisi_qm_controller_reset(struct work_struct *rst_work) 4657 { 4658 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 4659 int ret; 4660 4661 ret = qm_pm_get_sync(qm); 4662 if (ret) { 4663 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4664 return; 4665 } 4666 4667 /* reset pcie device controller */ 4668 ret = qm_controller_reset(qm); 4669 if (ret) 4670 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 4671 4672 qm_pm_put_sync(qm); 4673 } 4674 4675 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 4676 enum qm_stop_reason stop_reason) 4677 { 4678 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; 4679 struct pci_dev *pdev = qm->pdev; 4680 int ret; 4681 4682 ret = qm_reset_prepare_ready(qm); 4683 if (ret) { 4684 dev_err(&pdev->dev, "reset prepare not ready!\n"); 4685 atomic_set(&qm->status.flags, QM_STOP); 4686 cmd = QM_VF_PREPARE_FAIL; 4687 goto err_prepare; 4688 } 4689 4690 ret = hisi_qm_stop(qm, stop_reason); 4691 if (ret) { 4692 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 4693 atomic_set(&qm->status.flags, QM_STOP); 4694 cmd = QM_VF_PREPARE_FAIL; 4695 goto err_prepare; 4696 } else { 4697 goto out; 4698 } 4699 4700 err_prepare: 4701 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4702 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4703 out: 4704 pci_save_state(pdev); 4705 ret = qm_ping_pf(qm, cmd); 4706 if (ret) 4707 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 4708 } 4709 4710 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 4711 { 4712 enum qm_mb_cmd cmd = QM_VF_START_DONE; 4713 struct pci_dev *pdev = qm->pdev; 4714 int ret; 4715 4716 pci_restore_state(pdev); 4717 ret = hisi_qm_start(qm); 4718 if (ret) { 4719 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 4720 cmd = QM_VF_START_FAIL; 4721 } 4722 4723 qm_cmd_init(qm); 4724 ret = qm_ping_pf(qm, cmd); 4725 if (ret) 4726 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 4727 4728 qm_reset_bit_clear(qm); 4729 } 4730 4731 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 4732 { 4733 struct device *dev = &qm->pdev->dev; 4734 u32 val, cmd; 4735 u64 msg; 4736 int ret; 4737 4738 /* Wait for reset to finish */ 4739 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 4740 val == BIT(0), QM_VF_RESET_WAIT_US, 4741 QM_VF_RESET_WAIT_TIMEOUT_US); 4742 /* hardware completion status should be available by this time */ 4743 if (ret) { 4744 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 4745 return -ETIMEDOUT; 4746 } 4747 4748 /* 4749 * Whether message is got successfully, 4750 * VF needs to ack PF by clearing the interrupt. 4751 */ 4752 ret = qm_get_mb_cmd(qm, &msg, 0); 4753 qm_clear_cmd_interrupt(qm, 0); 4754 if (ret) { 4755 dev_err(dev, "failed to get msg from PF in reset done!\n"); 4756 return ret; 4757 } 4758 4759 cmd = msg & QM_MB_CMD_DATA_MASK; 4760 if (cmd != QM_PF_RESET_DONE) { 4761 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); 4762 ret = -EINVAL; 4763 } 4764 4765 return ret; 4766 } 4767 4768 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 4769 enum qm_stop_reason stop_reason) 4770 { 4771 struct device *dev = &qm->pdev->dev; 4772 int ret; 4773 4774 dev_info(dev, "device reset start...\n"); 4775 4776 /* The message is obtained by querying the register during resetting */ 4777 qm_cmd_uninit(qm); 4778 qm_pf_reset_vf_prepare(qm, stop_reason); 4779 4780 ret = qm_wait_pf_reset_finish(qm); 4781 if (ret) 4782 goto err_get_status; 4783 4784 qm_pf_reset_vf_done(qm); 4785 4786 dev_info(dev, "device reset done.\n"); 4787 4788 return; 4789 4790 err_get_status: 4791 qm_cmd_init(qm); 4792 qm_reset_bit_clear(qm); 4793 } 4794 4795 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 4796 { 4797 struct device *dev = &qm->pdev->dev; 4798 u64 msg; 4799 u32 cmd; 4800 int ret; 4801 4802 /* 4803 * Get the msg from source by sending mailbox. Whether message is got 4804 * successfully, destination needs to ack source by clearing the interrupt. 4805 */ 4806 ret = qm_get_mb_cmd(qm, &msg, fun_num); 4807 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 4808 if (ret) { 4809 dev_err(dev, "failed to get msg from source!\n"); 4810 return; 4811 } 4812 4813 cmd = msg & QM_MB_CMD_DATA_MASK; 4814 switch (cmd) { 4815 case QM_PF_FLR_PREPARE: 4816 qm_pf_reset_vf_process(qm, QM_DOWN); 4817 break; 4818 case QM_PF_SRST_PREPARE: 4819 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 4820 break; 4821 case QM_VF_GET_QOS: 4822 qm_vf_get_qos(qm, fun_num); 4823 break; 4824 case QM_PF_SET_QOS: 4825 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; 4826 break; 4827 default: 4828 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); 4829 break; 4830 } 4831 } 4832 4833 static void qm_cmd_process(struct work_struct *cmd_process) 4834 { 4835 struct hisi_qm *qm = container_of(cmd_process, 4836 struct hisi_qm, cmd_process); 4837 u32 vfs_num = qm->vfs_num; 4838 u64 val; 4839 u32 i; 4840 4841 if (qm->fun_type == QM_HW_PF) { 4842 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 4843 if (!val) 4844 return; 4845 4846 for (i = 1; i <= vfs_num; i++) { 4847 if (val & BIT(i)) 4848 qm_handle_cmd_msg(qm, i); 4849 } 4850 4851 return; 4852 } 4853 4854 qm_handle_cmd_msg(qm, 0); 4855 } 4856 4857 /** 4858 * hisi_qm_alg_register() - Register alg to crypto. 4859 * @qm: The qm needs add. 4860 * @qm_list: The qm list. 4861 * @guard: Guard of qp_num. 4862 * 4863 * Register algorithm to crypto when the function is satisfy guard. 4864 */ 4865 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 4866 { 4867 struct device *dev = &qm->pdev->dev; 4868 4869 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 4870 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 4871 return 0; 4872 } 4873 4874 if (qm->qp_num < guard) { 4875 dev_info(dev, "qp_num is less than task need.\n"); 4876 return 0; 4877 } 4878 4879 return qm_list->register_to_crypto(qm); 4880 } 4881 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 4882 4883 /** 4884 * hisi_qm_alg_unregister() - Unregister alg from crypto. 4885 * @qm: The qm needs delete. 4886 * @qm_list: The qm list. 4887 * @guard: Guard of qp_num. 4888 * 4889 * Unregister algorithm from crypto when the last function is satisfy guard. 4890 */ 4891 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 4892 { 4893 if (qm->ver <= QM_HW_V2 && qm->use_sva) 4894 return; 4895 4896 if (qm->qp_num < guard) 4897 return; 4898 4899 qm_list->unregister_from_crypto(qm); 4900 } 4901 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 4902 4903 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 4904 { 4905 struct pci_dev *pdev = qm->pdev; 4906 u32 irq_vector, val; 4907 4908 if (qm->fun_type == QM_HW_VF) 4909 return; 4910 4911 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; 4912 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4913 return; 4914 4915 irq_vector = val & QM_IRQ_VECTOR_MASK; 4916 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4917 } 4918 4919 static int qm_register_abnormal_irq(struct hisi_qm *qm) 4920 { 4921 struct pci_dev *pdev = qm->pdev; 4922 u32 irq_vector, val; 4923 int ret; 4924 4925 if (qm->fun_type == QM_HW_VF) 4926 return 0; 4927 4928 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; 4929 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4930 return 0; 4931 4932 irq_vector = val & QM_IRQ_VECTOR_MASK; 4933 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 4934 if (ret) 4935 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); 4936 4937 return ret; 4938 } 4939 4940 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 4941 { 4942 struct pci_dev *pdev = qm->pdev; 4943 u32 irq_vector, val; 4944 4945 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; 4946 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4947 return; 4948 4949 irq_vector = val & QM_IRQ_VECTOR_MASK; 4950 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4951 } 4952 4953 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 4954 { 4955 struct pci_dev *pdev = qm->pdev; 4956 u32 irq_vector, val; 4957 int ret; 4958 4959 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; 4960 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4961 return 0; 4962 4963 irq_vector = val & QM_IRQ_VECTOR_MASK; 4964 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 4965 if (ret) 4966 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 4967 4968 return ret; 4969 } 4970 4971 static void qm_unregister_aeq_irq(struct hisi_qm *qm) 4972 { 4973 struct pci_dev *pdev = qm->pdev; 4974 u32 irq_vector, val; 4975 4976 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; 4977 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4978 return; 4979 4980 irq_vector = val & QM_IRQ_VECTOR_MASK; 4981 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4982 } 4983 4984 static int qm_register_aeq_irq(struct hisi_qm *qm) 4985 { 4986 struct pci_dev *pdev = qm->pdev; 4987 u32 irq_vector, val; 4988 int ret; 4989 4990 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; 4991 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4992 return 0; 4993 4994 irq_vector = val & QM_IRQ_VECTOR_MASK; 4995 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, 4996 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); 4997 if (ret) 4998 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4999 5000 return ret; 5001 } 5002 5003 static void qm_unregister_eq_irq(struct hisi_qm *qm) 5004 { 5005 struct pci_dev *pdev = qm->pdev; 5006 u32 irq_vector, val; 5007 5008 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; 5009 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5010 return; 5011 5012 irq_vector = val & QM_IRQ_VECTOR_MASK; 5013 free_irq(pci_irq_vector(pdev, irq_vector), qm); 5014 } 5015 5016 static int qm_register_eq_irq(struct hisi_qm *qm) 5017 { 5018 struct pci_dev *pdev = qm->pdev; 5019 u32 irq_vector, val; 5020 int ret; 5021 5022 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; 5023 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5024 return 0; 5025 5026 irq_vector = val & QM_IRQ_VECTOR_MASK; 5027 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); 5028 if (ret) 5029 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 5030 5031 return ret; 5032 } 5033 5034 static void qm_irqs_unregister(struct hisi_qm *qm) 5035 { 5036 qm_unregister_mb_cmd_irq(qm); 5037 qm_unregister_abnormal_irq(qm); 5038 qm_unregister_aeq_irq(qm); 5039 qm_unregister_eq_irq(qm); 5040 } 5041 5042 static int qm_irqs_register(struct hisi_qm *qm) 5043 { 5044 int ret; 5045 5046 ret = qm_register_eq_irq(qm); 5047 if (ret) 5048 return ret; 5049 5050 ret = qm_register_aeq_irq(qm); 5051 if (ret) 5052 goto free_eq_irq; 5053 5054 ret = qm_register_abnormal_irq(qm); 5055 if (ret) 5056 goto free_aeq_irq; 5057 5058 ret = qm_register_mb_cmd_irq(qm); 5059 if (ret) 5060 goto free_abnormal_irq; 5061 5062 return 0; 5063 5064 free_abnormal_irq: 5065 qm_unregister_abnormal_irq(qm); 5066 free_aeq_irq: 5067 qm_unregister_aeq_irq(qm); 5068 free_eq_irq: 5069 qm_unregister_eq_irq(qm); 5070 return ret; 5071 } 5072 5073 static int qm_get_qp_num(struct hisi_qm *qm) 5074 { 5075 struct device *dev = &qm->pdev->dev; 5076 bool is_db_isolation; 5077 5078 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 5079 if (qm->fun_type == QM_HW_VF) { 5080 if (qm->ver != QM_HW_V1) 5081 /* v2 starts to support get vft by mailbox */ 5082 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 5083 5084 return 0; 5085 } 5086 5087 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5088 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 5089 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 5090 QM_FUNC_MAX_QP_CAP, is_db_isolation); 5091 5092 if (qm->qp_num <= qm->max_qp_num) 5093 return 0; 5094 5095 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { 5096 /* Check whether the set qp number is valid */ 5097 dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n", 5098 qm->qp_num, qm->max_qp_num); 5099 return -EINVAL; 5100 } 5101 5102 dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n", 5103 qm->qp_num, qm->max_qp_num); 5104 qm->qp_num = qm->max_qp_num; 5105 qm->debug.curr_qm_qp_num = qm->qp_num; 5106 5107 return 0; 5108 } 5109 5110 static int qm_pre_store_caps(struct hisi_qm *qm) 5111 { 5112 struct hisi_qm_cap_record *qm_cap; 5113 struct pci_dev *pdev = qm->pdev; 5114 size_t i, size; 5115 5116 size = ARRAY_SIZE(qm_cap_query_info); 5117 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); 5118 if (!qm_cap) 5119 return -ENOMEM; 5120 5121 for (i = 0; i < size; i++) { 5122 qm_cap[i].type = qm_cap_query_info[i].type; 5123 qm_cap[i].name = qm_cap_query_info[i].name; 5124 qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info, 5125 i, qm->cap_ver); 5126 } 5127 5128 qm->cap_tables.qm_cap_table = qm_cap; 5129 qm->cap_tables.qm_cap_size = size; 5130 5131 return 0; 5132 } 5133 5134 static int qm_get_hw_caps(struct hisi_qm *qm) 5135 { 5136 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 5137 qm_cap_info_pf : qm_cap_info_vf; 5138 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 5139 ARRAY_SIZE(qm_cap_info_vf); 5140 u32 val, i; 5141 5142 /* Doorbell isolate register is a independent register. */ 5143 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 5144 if (val) 5145 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5146 5147 if (qm->ver >= QM_HW_V3) { 5148 val = readl(qm->io_base + QM_FUNC_CAPS_REG); 5149 qm->cap_ver = val & QM_CAPBILITY_VERSION; 5150 } 5151 5152 /* Get PF/VF common capbility */ 5153 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5154 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5155 if (val) 5156 set_bit(qm_cap_info_comm[i].type, &qm->caps); 5157 } 5158 5159 /* Get PF/VF different capbility */ 5160 for (i = 0; i < size; i++) { 5161 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5162 if (val) 5163 set_bit(cap_info[i].type, &qm->caps); 5164 } 5165 5166 /* Fetch and save the value of qm capability registers */ 5167 return qm_pre_store_caps(qm); 5168 } 5169 5170 static int qm_get_pci_res(struct hisi_qm *qm) 5171 { 5172 struct pci_dev *pdev = qm->pdev; 5173 struct device *dev = &pdev->dev; 5174 int ret; 5175 5176 ret = pci_request_mem_regions(pdev, qm->dev_name); 5177 if (ret < 0) { 5178 dev_err(dev, "Failed to request mem regions!\n"); 5179 return ret; 5180 } 5181 5182 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5183 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5184 if (!qm->io_base) { 5185 ret = -EIO; 5186 goto err_request_mem_regions; 5187 } 5188 5189 ret = qm_get_hw_caps(qm); 5190 if (ret) 5191 goto err_ioremap; 5192 5193 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 5194 qm->db_interval = QM_QP_DB_INTERVAL; 5195 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5196 qm->db_io_base = ioremap(qm->db_phys_base, 5197 pci_resource_len(pdev, PCI_BAR_4)); 5198 if (!qm->db_io_base) { 5199 ret = -EIO; 5200 goto err_ioremap; 5201 } 5202 } else { 5203 qm->db_phys_base = qm->phys_base; 5204 qm->db_io_base = qm->io_base; 5205 qm->db_interval = 0; 5206 } 5207 5208 ret = qm_get_qp_num(qm); 5209 if (ret) 5210 goto err_db_ioremap; 5211 5212 return 0; 5213 5214 err_db_ioremap: 5215 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 5216 iounmap(qm->db_io_base); 5217 err_ioremap: 5218 iounmap(qm->io_base); 5219 err_request_mem_regions: 5220 pci_release_mem_regions(pdev); 5221 return ret; 5222 } 5223 5224 static int qm_clear_device(struct hisi_qm *qm) 5225 { 5226 acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); 5227 int ret; 5228 5229 if (qm->fun_type == QM_HW_VF) 5230 return 0; 5231 5232 /* Device does not support reset, return */ 5233 if (!qm->err_ini->err_info_init) 5234 return 0; 5235 qm->err_ini->err_info_init(qm); 5236 5237 if (!handle) 5238 return 0; 5239 5240 /* No reset method, return */ 5241 if (!acpi_has_method(handle, qm->err_info.acpi_rst)) 5242 return 0; 5243 5244 ret = qm_master_ooo_check(qm); 5245 if (ret) { 5246 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5247 return ret; 5248 } 5249 5250 return qm_reset_device(qm); 5251 } 5252 5253 static int hisi_qm_pci_init(struct hisi_qm *qm) 5254 { 5255 struct pci_dev *pdev = qm->pdev; 5256 struct device *dev = &pdev->dev; 5257 unsigned int num_vec; 5258 int ret; 5259 5260 ret = pci_enable_device_mem(pdev); 5261 if (ret < 0) { 5262 dev_err(dev, "Failed to enable device mem!\n"); 5263 return ret; 5264 } 5265 5266 ret = qm_get_pci_res(qm); 5267 if (ret) 5268 goto err_disable_pcidev; 5269 5270 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5271 if (ret < 0) 5272 goto err_get_pci_res; 5273 pci_set_master(pdev); 5274 5275 num_vec = qm_get_irq_num(qm); 5276 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5277 if (ret < 0) { 5278 dev_err(dev, "Failed to enable MSI vectors!\n"); 5279 goto err_get_pci_res; 5280 } 5281 5282 ret = qm_clear_device(qm); 5283 if (ret) 5284 goto err_free_vectors; 5285 5286 return 0; 5287 5288 err_free_vectors: 5289 pci_free_irq_vectors(pdev); 5290 err_get_pci_res: 5291 qm_put_pci_res(qm); 5292 err_disable_pcidev: 5293 pci_disable_device(pdev); 5294 return ret; 5295 } 5296 5297 static int hisi_qm_init_work(struct hisi_qm *qm) 5298 { 5299 int i; 5300 5301 for (i = 0; i < qm->qp_num; i++) 5302 INIT_WORK(&qm->poll_data[i].work, qm_work_process); 5303 5304 if (qm->fun_type == QM_HW_PF) 5305 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5306 5307 if (qm->ver > QM_HW_V2) 5308 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5309 5310 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 5311 WQ_UNBOUND, num_online_cpus(), 5312 pci_name(qm->pdev)); 5313 if (!qm->wq) { 5314 pci_err(qm->pdev, "failed to alloc workqueue!\n"); 5315 return -ENOMEM; 5316 } 5317 5318 return 0; 5319 } 5320 5321 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5322 { 5323 struct device *dev = &qm->pdev->dev; 5324 u16 sq_depth, cq_depth; 5325 size_t qp_dma_size; 5326 int i, ret; 5327 5328 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5329 if (!qm->qp_array) 5330 return -ENOMEM; 5331 5332 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); 5333 if (!qm->poll_data) { 5334 kfree(qm->qp_array); 5335 return -ENOMEM; 5336 } 5337 5338 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 5339 5340 /* one more page for device or qp statuses */ 5341 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 5342 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5343 for (i = 0; i < qm->qp_num; i++) { 5344 qm->poll_data[i].qm = qm; 5345 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 5346 if (ret) 5347 goto err_init_qp_mem; 5348 5349 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5350 } 5351 5352 return 0; 5353 err_init_qp_mem: 5354 hisi_qp_memory_uninit(qm, i); 5355 5356 return ret; 5357 } 5358 5359 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) 5360 { 5361 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; 5362 struct qm_dma *xqc_dma = &xqc_buf->qcdma; 5363 struct device *dev = &qm->pdev->dev; 5364 size_t off = 0; 5365 5366 #define QM_XQC_BUF_INIT(xqc_buf, type) do { \ 5367 (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ 5368 (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ 5369 off += QMC_ALIGN(sizeof(struct qm_##type)); \ 5370 } while (0) 5371 5372 xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + 5373 QMC_ALIGN(sizeof(struct qm_aeqc)) + 5374 QMC_ALIGN(sizeof(struct qm_sqc)) + 5375 QMC_ALIGN(sizeof(struct qm_cqc)); 5376 xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, 5377 &xqc_dma->dma, GFP_KERNEL); 5378 if (!xqc_dma->va) 5379 return -ENOMEM; 5380 5381 QM_XQC_BUF_INIT(xqc_buf, eqc); 5382 QM_XQC_BUF_INIT(xqc_buf, aeqc); 5383 QM_XQC_BUF_INIT(xqc_buf, sqc); 5384 QM_XQC_BUF_INIT(xqc_buf, cqc); 5385 5386 return 0; 5387 } 5388 5389 static int hisi_qm_memory_init(struct hisi_qm *qm) 5390 { 5391 struct device *dev = &qm->pdev->dev; 5392 int ret, total_func; 5393 size_t off = 0; 5394 5395 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5396 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5397 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5398 if (!qm->factor) 5399 return -ENOMEM; 5400 5401 /* Only the PF value needs to be initialized */ 5402 qm->factor[0].func_qos = QM_QOS_MAX_VAL; 5403 } 5404 5405 #define QM_INIT_BUF(qm, type, num) do { \ 5406 (qm)->type = ((qm)->qdma.va + (off)); \ 5407 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5408 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5409 } while (0) 5410 5411 idr_init(&qm->qp_idr); 5412 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 5413 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 5414 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 5415 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5416 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5417 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5418 GFP_ATOMIC); 5419 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5420 if (!qm->qdma.va) { 5421 ret = -ENOMEM; 5422 goto err_destroy_idr; 5423 } 5424 5425 QM_INIT_BUF(qm, eqe, qm->eq_depth); 5426 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 5427 QM_INIT_BUF(qm, sqc, qm->qp_num); 5428 QM_INIT_BUF(qm, cqc, qm->qp_num); 5429 5430 ret = hisi_qm_alloc_rsv_buf(qm); 5431 if (ret) 5432 goto err_free_qdma; 5433 5434 ret = hisi_qp_alloc_memory(qm); 5435 if (ret) 5436 goto err_free_reserve_buf; 5437 5438 return 0; 5439 5440 err_free_reserve_buf: 5441 hisi_qm_free_rsv_buf(qm); 5442 err_free_qdma: 5443 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5444 err_destroy_idr: 5445 idr_destroy(&qm->qp_idr); 5446 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 5447 kfree(qm->factor); 5448 5449 return ret; 5450 } 5451 5452 /** 5453 * hisi_qm_init() - Initialize configures about qm. 5454 * @qm: The qm needing init. 5455 * 5456 * This function init qm, then we can call hisi_qm_start to put qm into work. 5457 */ 5458 int hisi_qm_init(struct hisi_qm *qm) 5459 { 5460 struct pci_dev *pdev = qm->pdev; 5461 struct device *dev = &pdev->dev; 5462 int ret; 5463 5464 hisi_qm_pre_init(qm); 5465 5466 ret = hisi_qm_pci_init(qm); 5467 if (ret) 5468 return ret; 5469 5470 ret = qm_irqs_register(qm); 5471 if (ret) 5472 goto err_pci_init; 5473 5474 if (qm->fun_type == QM_HW_PF) { 5475 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5476 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5477 qm_disable_clock_gate(qm); 5478 ret = qm_dev_mem_reset(qm); 5479 if (ret) { 5480 dev_err(dev, "failed to reset device memory\n"); 5481 goto err_irq_register; 5482 } 5483 } 5484 5485 if (qm->mode == UACCE_MODE_SVA) { 5486 ret = qm_alloc_uacce(qm); 5487 if (ret < 0) 5488 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5489 } 5490 5491 ret = hisi_qm_memory_init(qm); 5492 if (ret) 5493 goto err_alloc_uacce; 5494 5495 ret = hisi_qm_init_work(qm); 5496 if (ret) 5497 goto err_free_qm_memory; 5498 5499 qm_cmd_init(qm); 5500 5501 return 0; 5502 5503 err_free_qm_memory: 5504 hisi_qm_memory_uninit(qm); 5505 err_alloc_uacce: 5506 qm_remove_uacce(qm); 5507 err_irq_register: 5508 qm_irqs_unregister(qm); 5509 err_pci_init: 5510 hisi_qm_pci_uninit(qm); 5511 return ret; 5512 } 5513 EXPORT_SYMBOL_GPL(hisi_qm_init); 5514 5515 /** 5516 * hisi_qm_get_dfx_access() - Try to get dfx access. 5517 * @qm: pointer to accelerator device. 5518 * 5519 * Try to get dfx access, then user can get message. 5520 * 5521 * If device is in suspended, return failure, otherwise 5522 * bump up the runtime PM usage counter. 5523 */ 5524 int hisi_qm_get_dfx_access(struct hisi_qm *qm) 5525 { 5526 struct device *dev = &qm->pdev->dev; 5527 5528 if (pm_runtime_suspended(dev)) { 5529 dev_info(dev, "can not read/write - device in suspended.\n"); 5530 return -EAGAIN; 5531 } 5532 5533 return qm_pm_get_sync(qm); 5534 } 5535 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); 5536 5537 /** 5538 * hisi_qm_put_dfx_access() - Put dfx access. 5539 * @qm: pointer to accelerator device. 5540 * 5541 * Put dfx access, drop runtime PM usage counter. 5542 */ 5543 void hisi_qm_put_dfx_access(struct hisi_qm *qm) 5544 { 5545 qm_pm_put_sync(qm); 5546 } 5547 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); 5548 5549 /** 5550 * hisi_qm_pm_init() - Initialize qm runtime PM. 5551 * @qm: pointer to accelerator device. 5552 * 5553 * Function that initialize qm runtime PM. 5554 */ 5555 void hisi_qm_pm_init(struct hisi_qm *qm) 5556 { 5557 struct device *dev = &qm->pdev->dev; 5558 5559 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5560 return; 5561 5562 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); 5563 pm_runtime_use_autosuspend(dev); 5564 pm_runtime_put_noidle(dev); 5565 } 5566 EXPORT_SYMBOL_GPL(hisi_qm_pm_init); 5567 5568 /** 5569 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. 5570 * @qm: pointer to accelerator device. 5571 * 5572 * Function that uninitialize qm runtime PM. 5573 */ 5574 void hisi_qm_pm_uninit(struct hisi_qm *qm) 5575 { 5576 struct device *dev = &qm->pdev->dev; 5577 5578 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5579 return; 5580 5581 pm_runtime_get_noresume(dev); 5582 pm_runtime_dont_use_autosuspend(dev); 5583 } 5584 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); 5585 5586 static int qm_prepare_for_suspend(struct hisi_qm *qm) 5587 { 5588 struct pci_dev *pdev = qm->pdev; 5589 int ret; 5590 5591 ret = qm->ops->set_msi(qm, false); 5592 if (ret) { 5593 pci_err(pdev, "failed to disable MSI before suspending!\n"); 5594 return ret; 5595 } 5596 5597 ret = qm_master_ooo_check(qm); 5598 if (ret) 5599 return ret; 5600 5601 ret = qm_set_pf_mse(qm, false); 5602 if (ret) 5603 pci_err(pdev, "failed to disable MSE before suspending!\n"); 5604 5605 return ret; 5606 } 5607 5608 static int qm_rebuild_for_resume(struct hisi_qm *qm) 5609 { 5610 struct pci_dev *pdev = qm->pdev; 5611 int ret; 5612 5613 ret = qm_set_pf_mse(qm, true); 5614 if (ret) { 5615 pci_err(pdev, "failed to enable MSE after resuming!\n"); 5616 return ret; 5617 } 5618 5619 ret = qm->ops->set_msi(qm, true); 5620 if (ret) { 5621 pci_err(pdev, "failed to enable MSI after resuming!\n"); 5622 return ret; 5623 } 5624 5625 ret = qm_dev_hw_init(qm); 5626 if (ret) { 5627 pci_err(pdev, "failed to init device after resuming\n"); 5628 return ret; 5629 } 5630 5631 qm_cmd_init(qm); 5632 hisi_qm_dev_err_init(qm); 5633 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5634 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5635 qm_disable_clock_gate(qm); 5636 ret = qm_dev_mem_reset(qm); 5637 if (ret) 5638 pci_err(pdev, "failed to reset device memory\n"); 5639 5640 return ret; 5641 } 5642 5643 /** 5644 * hisi_qm_suspend() - Runtime suspend of given device. 5645 * @dev: device to suspend. 5646 * 5647 * Function that suspend the device. 5648 */ 5649 int hisi_qm_suspend(struct device *dev) 5650 { 5651 struct pci_dev *pdev = to_pci_dev(dev); 5652 struct hisi_qm *qm = pci_get_drvdata(pdev); 5653 int ret; 5654 5655 pci_info(pdev, "entering suspended state\n"); 5656 5657 ret = hisi_qm_stop(qm, QM_NORMAL); 5658 if (ret) { 5659 pci_err(pdev, "failed to stop qm(%d)\n", ret); 5660 return ret; 5661 } 5662 5663 ret = qm_prepare_for_suspend(qm); 5664 if (ret) 5665 pci_err(pdev, "failed to prepare suspended(%d)\n", ret); 5666 5667 return ret; 5668 } 5669 EXPORT_SYMBOL_GPL(hisi_qm_suspend); 5670 5671 /** 5672 * hisi_qm_resume() - Runtime resume of given device. 5673 * @dev: device to resume. 5674 * 5675 * Function that resume the device. 5676 */ 5677 int hisi_qm_resume(struct device *dev) 5678 { 5679 struct pci_dev *pdev = to_pci_dev(dev); 5680 struct hisi_qm *qm = pci_get_drvdata(pdev); 5681 int ret; 5682 5683 pci_info(pdev, "resuming from suspend state\n"); 5684 5685 ret = qm_rebuild_for_resume(qm); 5686 if (ret) { 5687 pci_err(pdev, "failed to rebuild resume(%d)\n", ret); 5688 return ret; 5689 } 5690 5691 ret = hisi_qm_start(qm); 5692 if (ret) { 5693 if (qm_check_dev_error(qm)) { 5694 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); 5695 return 0; 5696 } 5697 5698 pci_err(pdev, "failed to start qm(%d)!\n", ret); 5699 } 5700 5701 return ret; 5702 } 5703 EXPORT_SYMBOL_GPL(hisi_qm_resume); 5704 5705 MODULE_LICENSE("GPL v2"); 5706 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5707 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5708