1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/bitmap.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/idr.h> 8 #include <linux/io.h> 9 #include <linux/irqreturn.h> 10 #include <linux/log2.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/seq_file.h> 13 #include <linux/slab.h> 14 #include <linux/uacce.h> 15 #include <linux/uaccess.h> 16 #include <uapi/misc/uacce/hisi_qm.h> 17 #include <linux/hisi_acc_qm.h> 18 #include "qm_common.h" 19 20 /* eq/aeq irq enable */ 21 #define QM_VF_AEQ_INT_SOURCE 0x0 22 #define QM_VF_AEQ_INT_MASK 0x4 23 #define QM_VF_EQ_INT_SOURCE 0x8 24 #define QM_VF_EQ_INT_MASK 0xc 25 26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 28 #define QM_IRQ_TYPE_SHIFT 16 29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 30 31 /* mailbox */ 32 #define QM_MB_PING_ALL_VFS 0xffff 33 #define QM_MB_CMD_DATA_SHIFT 32 34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 35 #define QM_MB_STATUS_MASK GENMASK(12, 9) 36 37 /* sqc shift */ 38 #define QM_SQ_HOP_NUM_SHIFT 0 39 #define QM_SQ_PAGE_SIZE_SHIFT 4 40 #define QM_SQ_BUF_SIZE_SHIFT 8 41 #define QM_SQ_SQE_SIZE_SHIFT 12 42 #define QM_SQ_PRIORITY_SHIFT 0 43 #define QM_SQ_ORDERS_SHIFT 4 44 #define QM_SQ_TYPE_SHIFT 8 45 #define QM_QC_PASID_ENABLE 0x1 46 #define QM_QC_PASID_ENABLE_SHIFT 7 47 48 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) 50 51 /* cqc shift */ 52 #define QM_CQ_HOP_NUM_SHIFT 0 53 #define QM_CQ_PAGE_SIZE_SHIFT 4 54 #define QM_CQ_BUF_SIZE_SHIFT 8 55 #define QM_CQ_CQE_SIZE_SHIFT 12 56 #define QM_CQ_PHASE_SHIFT 0 57 #define QM_CQ_FLAG_SHIFT 1 58 59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 60 #define QM_QC_CQE_SIZE 4 61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) 62 63 /* eqc shift */ 64 #define QM_EQE_AEQE_SIZE (2UL << 12) 65 #define QM_EQC_PHASE_SHIFT 16 66 67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) 68 #define QM_EQE_CQN_MASK GENMASK(15, 0) 69 70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) 71 #define QM_AEQE_TYPE_SHIFT 17 72 #define QM_AEQE_TYPE_MASK 0xf 73 #define QM_AEQE_CQN_MASK GENMASK(15, 0) 74 #define QM_CQ_OVERFLOW 0 75 #define QM_EQ_OVERFLOW 1 76 #define QM_CQE_ERROR 2 77 78 #define QM_XQ_DEPTH_SHIFT 16 79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 80 81 #define QM_DOORBELL_CMD_SQ 0 82 #define QM_DOORBELL_CMD_CQ 1 83 #define QM_DOORBELL_CMD_EQ 2 84 #define QM_DOORBELL_CMD_AEQ 3 85 86 #define QM_DOORBELL_BASE_V1 0x340 87 #define QM_DB_CMD_SHIFT_V1 16 88 #define QM_DB_INDEX_SHIFT_V1 32 89 #define QM_DB_PRIORITY_SHIFT_V1 48 90 #define QM_PAGE_SIZE 0x0034 91 #define QM_QP_DB_INTERVAL 0x10000 92 #define QM_DB_TIMEOUT_CFG 0x100074 93 #define QM_DB_TIMEOUT_SET 0x1fffff 94 95 #define QM_MEM_START_INIT 0x100040 96 #define QM_MEM_INIT_DONE 0x100044 97 #define QM_VFT_CFG_RDY 0x10006c 98 #define QM_VFT_CFG_OP_WR 0x100058 99 #define QM_VFT_CFG_TYPE 0x10005c 100 #define QM_VFT_CFG 0x100060 101 #define QM_VFT_CFG_OP_ENABLE 0x100054 102 #define QM_PM_CTRL 0x100148 103 #define QM_IDLE_DISABLE BIT(9) 104 105 #define QM_VFT_CFG_DATA_L 0x100064 106 #define QM_VFT_CFG_DATA_H 0x100068 107 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 108 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 109 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 110 #define QM_SQC_VFT_START_SQN_SHIFT 28 111 #define QM_SQC_VFT_VALID (1ULL << 44) 112 #define QM_SQC_VFT_SQN_SHIFT 45 113 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 114 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 115 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 116 #define QM_CQC_VFT_VALID (1ULL << 28) 117 118 #define QM_SQC_VFT_BASE_SHIFT_V2 28 119 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 120 #define QM_SQC_VFT_NUM_SHIFT_V2 45 121 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) 122 123 #define QM_ABNORMAL_INT_SOURCE 0x100000 124 #define QM_ABNORMAL_INT_MASK 0x100004 125 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 126 #define QM_ABNORMAL_INT_STATUS 0x100008 127 #define QM_ABNORMAL_INT_SET 0x10000c 128 #define QM_ABNORMAL_INF00 0x100010 129 #define QM_FIFO_OVERFLOW_TYPE 0xc0 130 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 131 #define QM_FIFO_OVERFLOW_VF 0x3f 132 #define QM_FIFO_OVERFLOW_QP_SHIFT 16 133 #define QM_ABNORMAL_INF01 0x100014 134 #define QM_DB_TIMEOUT_TYPE 0xc0 135 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 136 #define QM_DB_TIMEOUT_VF 0x3f 137 #define QM_DB_TIMEOUT_QP_SHIFT 16 138 #define QM_ABNORMAL_INF02 0x100018 139 #define QM_AXI_POISON_ERR BIT(22) 140 #define QM_RAS_CE_ENABLE 0x1000ec 141 #define QM_RAS_FE_ENABLE 0x1000f0 142 #define QM_RAS_NFE_ENABLE 0x1000f4 143 #define QM_RAS_CE_THRESHOLD 0x1000f8 144 #define QM_RAS_CE_TIMES_PER_IRQ 1 145 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 146 #define QM_AXI_RRESP_ERR BIT(0) 147 #define QM_ECC_MBIT BIT(2) 148 #define QM_DB_TIMEOUT BIT(10) 149 #define QM_OF_FIFO_OF BIT(11) 150 151 #define QM_RESET_WAIT_TIMEOUT 400 152 #define QM_PEH_VENDOR_ID 0x1000d8 153 #define ACC_VENDOR_ID_VALUE 0x5a5a 154 #define QM_PEH_DFX_INFO0 0x1000fc 155 #define QM_PEH_DFX_INFO1 0x100100 156 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 157 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 158 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 159 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 160 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 161 #define ACC_MASTER_TRANS_RETURN_RW 3 162 #define ACC_MASTER_TRANS_RETURN 0x300150 163 #define ACC_MASTER_GLOBAL_CTRL 0x300000 164 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 165 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT 166 #define ACC_AM_ROB_ECC_INT_STS 0x300104 167 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 168 #define QM_MSI_CAP_ENABLE BIT(16) 169 170 /* interfunction communication */ 171 #define QM_IFC_READY_STATUS 0x100128 172 #define QM_IFC_INT_SET_P 0x100130 173 #define QM_IFC_INT_CFG 0x100134 174 #define QM_IFC_INT_SOURCE_P 0x100138 175 #define QM_IFC_INT_SOURCE_V 0x0020 176 #define QM_IFC_INT_MASK 0x0024 177 #define QM_IFC_INT_STATUS 0x0028 178 #define QM_IFC_INT_SET_V 0x002C 179 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 180 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 181 #define QM_IFC_INT_SOURCE_MASK BIT(0) 182 #define QM_IFC_INT_DISABLE BIT(0) 183 #define QM_IFC_INT_STATUS_MASK BIT(0) 184 #define QM_IFC_INT_SET_MASK BIT(0) 185 #define QM_WAIT_DST_ACK 10 186 #define QM_MAX_PF_WAIT_COUNT 10 187 #define QM_MAX_VF_WAIT_COUNT 40 188 #define QM_VF_RESET_WAIT_US 20000 189 #define QM_VF_RESET_WAIT_CNT 3000 190 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 191 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 192 193 #define POLL_PERIOD 10 194 #define POLL_TIMEOUT 1000 195 #define WAIT_PERIOD_US_MAX 200 196 #define WAIT_PERIOD_US_MIN 100 197 #define MAX_WAIT_COUNTS 1000 198 #define QM_CACHE_WB_START 0x204 199 #define QM_CACHE_WB_DONE 0x208 200 #define QM_FUNC_CAPS_REG 0x3100 201 #define QM_CAPBILITY_VERSION GENMASK(7, 0) 202 203 #define PCI_BAR_2 2 204 #define PCI_BAR_4 4 205 #define QMC_ALIGN(sz) ALIGN(sz, 32) 206 207 #define QM_DBG_READ_LEN 256 208 #define QM_PCI_COMMAND_INVALID ~0 209 #define QM_RESET_STOP_TX_OFFSET 1 210 #define QM_RESET_STOP_RX_OFFSET 2 211 212 #define WAIT_PERIOD 20 213 #define REMOVE_WAIT_DELAY 10 214 215 #define QM_QOS_PARAM_NUM 2 216 #define QM_QOS_MAX_VAL 1000 217 #define QM_QOS_RATE 100 218 #define QM_QOS_EXPAND_RATE 1000 219 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 220 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 221 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 222 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 223 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 224 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 225 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 226 #define QM_SHAPER_CBS_B 1 227 #define QM_SHAPER_VFT_OFFSET 6 228 #define QM_QOS_MIN_ERROR_RATE 5 229 #define QM_SHAPER_MIN_CBS_S 8 230 #define QM_QOS_TICK 0x300U 231 #define QM_QOS_DIVISOR_CLK 0x1f40U 232 #define QM_QOS_MAX_CIR_B 200 233 #define QM_QOS_MIN_CIR_B 100 234 #define QM_QOS_MAX_CIR_U 6 235 #define QM_AUTOSUSPEND_DELAY 3000 236 237 #define QM_DEV_ALG_MAX_LEN 256 238 239 /* abnormal status value for stopping queue */ 240 #define QM_STOP_QUEUE_FAIL 1 241 #define QM_DUMP_SQC_FAIL 3 242 #define QM_DUMP_CQC_FAIL 4 243 #define QM_FINISH_WAIT 5 244 245 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 246 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 247 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 248 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 249 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 250 251 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 252 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 253 254 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 255 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 256 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 257 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 258 259 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 260 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 261 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 262 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 263 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 264 265 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 266 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 267 268 enum vft_type { 269 SQC_VFT = 0, 270 CQC_VFT, 271 SHAPER_VFT, 272 }; 273 274 enum acc_err_result { 275 ACC_ERR_NONE, 276 ACC_ERR_NEED_RESET, 277 ACC_ERR_RECOVERED, 278 }; 279 280 enum qm_alg_type { 281 ALG_TYPE_0, 282 ALG_TYPE_1, 283 }; 284 285 enum qm_mb_cmd { 286 QM_PF_FLR_PREPARE = 0x01, 287 QM_PF_SRST_PREPARE, 288 QM_PF_RESET_DONE, 289 QM_VF_PREPARE_DONE, 290 QM_VF_PREPARE_FAIL, 291 QM_VF_START_DONE, 292 QM_VF_START_FAIL, 293 QM_PF_SET_QOS, 294 QM_VF_GET_QOS, 295 }; 296 297 enum qm_basic_type { 298 QM_TOTAL_QP_NUM_CAP = 0x0, 299 QM_FUNC_MAX_QP_CAP, 300 QM_XEQ_DEPTH_CAP, 301 QM_QP_DEPTH_CAP, 302 QM_EQ_IRQ_TYPE_CAP, 303 QM_AEQ_IRQ_TYPE_CAP, 304 QM_ABN_IRQ_TYPE_CAP, 305 QM_PF2VF_IRQ_TYPE_CAP, 306 QM_PF_IRQ_NUM_CAP, 307 QM_VF_IRQ_NUM_CAP, 308 }; 309 310 enum qm_pre_store_cap_idx { 311 QM_EQ_IRQ_TYPE_CAP_IDX = 0x0, 312 QM_AEQ_IRQ_TYPE_CAP_IDX, 313 QM_ABN_IRQ_TYPE_CAP_IDX, 314 QM_PF2VF_IRQ_TYPE_CAP_IDX, 315 }; 316 317 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 318 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 319 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 320 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 321 {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, 322 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 323 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 324 }; 325 326 static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 327 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 328 }; 329 330 static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 331 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 332 }; 333 334 static const struct hisi_qm_cap_info qm_basic_info[] = { 335 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 336 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 337 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, 338 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 339 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 340 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 341 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 342 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 343 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 344 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 345 }; 346 347 static const u32 qm_pre_store_caps[] = { 348 QM_EQ_IRQ_TYPE_CAP, 349 QM_AEQ_IRQ_TYPE_CAP, 350 QM_ABN_IRQ_TYPE_CAP, 351 QM_PF2VF_IRQ_TYPE_CAP, 352 }; 353 354 struct qm_mailbox { 355 __le16 w0; 356 __le16 queue_num; 357 __le32 base_l; 358 __le32 base_h; 359 __le32 rsvd; 360 }; 361 362 struct qm_doorbell { 363 __le16 queue_num; 364 __le16 cmd; 365 __le16 index; 366 __le16 priority; 367 }; 368 369 struct hisi_qm_resource { 370 struct hisi_qm *qm; 371 int distance; 372 struct list_head list; 373 }; 374 375 /** 376 * struct qm_hw_err - Structure describing the device errors 377 * @list: hardware error list 378 * @timestamp: timestamp when the error occurred 379 */ 380 struct qm_hw_err { 381 struct list_head list; 382 unsigned long long timestamp; 383 }; 384 385 struct hisi_qm_hw_ops { 386 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 387 void (*qm_db)(struct hisi_qm *qm, u16 qn, 388 u8 cmd, u16 index, u8 priority); 389 int (*debug_init)(struct hisi_qm *qm); 390 void (*hw_error_init)(struct hisi_qm *qm); 391 void (*hw_error_uninit)(struct hisi_qm *qm); 392 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 393 int (*set_msi)(struct hisi_qm *qm, bool set); 394 }; 395 396 struct hisi_qm_hw_error { 397 u32 int_msk; 398 const char *msg; 399 }; 400 401 static const struct hisi_qm_hw_error qm_hw_error[] = { 402 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 403 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 404 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 405 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 406 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 407 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 408 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 409 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 410 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 411 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 412 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 413 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 414 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 415 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 416 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 417 }; 418 419 static const char * const qm_db_timeout[] = { 420 "sq", "cq", "eq", "aeq", 421 }; 422 423 static const char * const qm_fifo_overflow[] = { 424 "cq", "eq", "aeq", 425 }; 426 427 struct qm_typical_qos_table { 428 u32 start; 429 u32 end; 430 u32 val; 431 }; 432 433 /* the qos step is 100 */ 434 static struct qm_typical_qos_table shaper_cir_s[] = { 435 {100, 100, 4}, 436 {200, 200, 3}, 437 {300, 500, 2}, 438 {600, 1000, 1}, 439 {1100, 100000, 0}, 440 }; 441 442 static struct qm_typical_qos_table shaper_cbs_s[] = { 443 {100, 200, 9}, 444 {300, 500, 11}, 445 {600, 1000, 12}, 446 {1100, 10000, 16}, 447 {10100, 25000, 17}, 448 {25100, 50000, 18}, 449 {50100, 100000, 19} 450 }; 451 452 static void qm_irqs_unregister(struct hisi_qm *qm); 453 454 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 455 { 456 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 457 } 458 459 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 460 { 461 return qm->err_ini->get_dev_hw_err_status(qm); 462 } 463 464 /* Check if the error causes the master ooo block */ 465 static bool qm_check_dev_error(struct hisi_qm *qm) 466 { 467 u32 val, dev_val; 468 469 if (qm->fun_type == QM_HW_VF) 470 return false; 471 472 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; 473 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; 474 475 return val || dev_val; 476 } 477 478 static int qm_wait_reset_finish(struct hisi_qm *qm) 479 { 480 int delay = 0; 481 482 /* All reset requests need to be queued for processing */ 483 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 484 msleep(++delay); 485 if (delay > QM_RESET_WAIT_TIMEOUT) 486 return -EBUSY; 487 } 488 489 return 0; 490 } 491 492 static int qm_reset_prepare_ready(struct hisi_qm *qm) 493 { 494 struct pci_dev *pdev = qm->pdev; 495 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 496 497 /* 498 * PF and VF on host doesnot support resetting at the 499 * same time on Kunpeng920. 500 */ 501 if (qm->ver < QM_HW_V3) 502 return qm_wait_reset_finish(pf_qm); 503 504 return qm_wait_reset_finish(qm); 505 } 506 507 static void qm_reset_bit_clear(struct hisi_qm *qm) 508 { 509 struct pci_dev *pdev = qm->pdev; 510 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 511 512 if (qm->ver < QM_HW_V3) 513 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 514 515 clear_bit(QM_RESETTING, &qm->misc_ctl); 516 } 517 518 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 519 u64 base, u16 queue, bool op) 520 { 521 mailbox->w0 = cpu_to_le16((cmd) | 522 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 523 (0x1 << QM_MB_BUSY_SHIFT)); 524 mailbox->queue_num = cpu_to_le16(queue); 525 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 526 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 527 mailbox->rsvd = 0; 528 } 529 530 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 531 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) 532 { 533 u32 val; 534 535 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 536 val, !((val >> QM_MB_BUSY_SHIFT) & 537 0x1), POLL_PERIOD, POLL_TIMEOUT); 538 } 539 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); 540 541 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 542 static void qm_mb_write(struct hisi_qm *qm, const void *src) 543 { 544 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 545 546 #if IS_ENABLED(CONFIG_ARM64) 547 unsigned long tmp0 = 0, tmp1 = 0; 548 #endif 549 550 if (!IS_ENABLED(CONFIG_ARM64)) { 551 memcpy_toio(fun_base, src, 16); 552 dma_wmb(); 553 return; 554 } 555 556 #if IS_ENABLED(CONFIG_ARM64) 557 asm volatile("ldp %0, %1, %3\n" 558 "stp %0, %1, %2\n" 559 "dmb oshst\n" 560 : "=&r" (tmp0), 561 "=&r" (tmp1), 562 "+Q" (*((char __iomem *)fun_base)) 563 : "Q" (*((char *)src)) 564 : "memory"); 565 #endif 566 } 567 568 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 569 { 570 int ret; 571 u32 val; 572 573 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 574 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 575 ret = -EBUSY; 576 goto mb_busy; 577 } 578 579 qm_mb_write(qm, mailbox); 580 581 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 582 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 583 ret = -ETIMEDOUT; 584 goto mb_busy; 585 } 586 587 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 588 if (val & QM_MB_STATUS_MASK) { 589 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 590 ret = -EIO; 591 goto mb_busy; 592 } 593 594 return 0; 595 596 mb_busy: 597 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 598 return ret; 599 } 600 601 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 602 bool op) 603 { 604 struct qm_mailbox mailbox; 605 int ret; 606 607 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 608 609 mutex_lock(&qm->mailbox_lock); 610 ret = qm_mb_nolock(qm, &mailbox); 611 mutex_unlock(&qm->mailbox_lock); 612 613 return ret; 614 } 615 EXPORT_SYMBOL_GPL(hisi_qm_mb); 616 617 /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ 618 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) 619 { 620 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 621 struct qm_mailbox mailbox; 622 dma_addr_t xqc_dma; 623 void *tmp_xqc; 624 size_t size; 625 int ret; 626 627 switch (cmd) { 628 case QM_MB_CMD_SQC: 629 size = sizeof(struct qm_sqc); 630 tmp_xqc = qm->xqc_buf.sqc; 631 xqc_dma = qm->xqc_buf.sqc_dma; 632 break; 633 case QM_MB_CMD_CQC: 634 size = sizeof(struct qm_cqc); 635 tmp_xqc = qm->xqc_buf.cqc; 636 xqc_dma = qm->xqc_buf.cqc_dma; 637 break; 638 case QM_MB_CMD_EQC: 639 size = sizeof(struct qm_eqc); 640 tmp_xqc = qm->xqc_buf.eqc; 641 xqc_dma = qm->xqc_buf.eqc_dma; 642 break; 643 case QM_MB_CMD_AEQC: 644 size = sizeof(struct qm_aeqc); 645 tmp_xqc = qm->xqc_buf.aeqc; 646 xqc_dma = qm->xqc_buf.aeqc_dma; 647 break; 648 default: 649 dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd); 650 return -EINVAL; 651 } 652 653 /* Setting xqc will fail if master OOO is blocked. */ 654 if (qm_check_dev_error(pf_qm)) { 655 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); 656 return -EIO; 657 } 658 659 mutex_lock(&qm->mailbox_lock); 660 if (!op) 661 memcpy(tmp_xqc, xqc, size); 662 663 qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); 664 ret = qm_mb_nolock(qm, &mailbox); 665 if (!ret && op) 666 memcpy(xqc, tmp_xqc, size); 667 668 mutex_unlock(&qm->mailbox_lock); 669 670 return ret; 671 } 672 673 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 674 { 675 u64 doorbell; 676 677 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 678 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 679 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 680 681 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 682 } 683 684 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 685 { 686 void __iomem *io_base = qm->io_base; 687 u16 randata = 0; 688 u64 doorbell; 689 690 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 691 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 692 QM_DOORBELL_SQ_CQ_BASE_V2; 693 else 694 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 695 696 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 697 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 698 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 699 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 700 701 writeq(doorbell, io_base); 702 } 703 704 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 705 { 706 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 707 qn, cmd, index); 708 709 qm->ops->qm_db(qm, qn, cmd, index, priority); 710 } 711 712 static void qm_disable_clock_gate(struct hisi_qm *qm) 713 { 714 u32 val; 715 716 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ 717 if (qm->ver < QM_HW_V3) 718 return; 719 720 val = readl(qm->io_base + QM_PM_CTRL); 721 val |= QM_IDLE_DISABLE; 722 writel(val, qm->io_base + QM_PM_CTRL); 723 } 724 725 static int qm_dev_mem_reset(struct hisi_qm *qm) 726 { 727 u32 val; 728 729 writel(0x1, qm->io_base + QM_MEM_START_INIT); 730 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 731 val & BIT(0), POLL_PERIOD, 732 POLL_TIMEOUT); 733 } 734 735 /** 736 * hisi_qm_get_hw_info() - Get device information. 737 * @qm: The qm which want to get information. 738 * @info_table: Array for storing device information. 739 * @index: Index in info_table. 740 * @is_read: Whether read from reg, 0: not support read from reg. 741 * 742 * This function returns device information the caller needs. 743 */ 744 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 745 const struct hisi_qm_cap_info *info_table, 746 u32 index, bool is_read) 747 { 748 u32 val; 749 750 switch (qm->ver) { 751 case QM_HW_V1: 752 return info_table[index].v1_val; 753 case QM_HW_V2: 754 return info_table[index].v2_val; 755 default: 756 if (!is_read) 757 return info_table[index].v3_val; 758 759 val = readl(qm->io_base + info_table[index].offset); 760 return (val >> info_table[index].shift) & info_table[index].mask; 761 } 762 } 763 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 764 765 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 766 u16 *high_bits, enum qm_basic_type type) 767 { 768 u32 depth; 769 770 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 771 *low_bits = depth & QM_XQ_DEPTH_MASK; 772 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 773 } 774 775 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 776 u32 dev_algs_size) 777 { 778 struct device *dev = &qm->pdev->dev; 779 char *algs, *ptr; 780 int i; 781 782 if (!qm->uacce) 783 return 0; 784 785 if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) { 786 dev_err(dev, "algs size %u is equal or larger than %d.\n", 787 dev_algs_size, QM_DEV_ALG_MAX_LEN); 788 return -EINVAL; 789 } 790 791 algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); 792 if (!algs) 793 return -ENOMEM; 794 795 for (i = 0; i < dev_algs_size; i++) 796 if (alg_msk & dev_algs[i].alg_msk) 797 strcat(algs, dev_algs[i].alg); 798 799 ptr = strrchr(algs, '\n'); 800 if (ptr) { 801 *ptr = '\0'; 802 qm->uacce->algs = algs; 803 } 804 805 return 0; 806 } 807 EXPORT_SYMBOL_GPL(hisi_qm_set_algs); 808 809 static u32 qm_get_irq_num(struct hisi_qm *qm) 810 { 811 if (qm->fun_type == QM_HW_PF) 812 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 813 814 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 815 } 816 817 static int qm_pm_get_sync(struct hisi_qm *qm) 818 { 819 struct device *dev = &qm->pdev->dev; 820 int ret; 821 822 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 823 return 0; 824 825 ret = pm_runtime_resume_and_get(dev); 826 if (ret < 0) { 827 dev_err(dev, "failed to get_sync(%d).\n", ret); 828 return ret; 829 } 830 831 return 0; 832 } 833 834 static void qm_pm_put_sync(struct hisi_qm *qm) 835 { 836 struct device *dev = &qm->pdev->dev; 837 838 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 839 return; 840 841 pm_runtime_mark_last_busy(dev); 842 pm_runtime_put_autosuspend(dev); 843 } 844 845 static void qm_cq_head_update(struct hisi_qp *qp) 846 { 847 if (qp->qp_status.cq_head == qp->cq_depth - 1) { 848 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 849 qp->qp_status.cq_head = 0; 850 } else { 851 qp->qp_status.cq_head++; 852 } 853 } 854 855 static void qm_poll_req_cb(struct hisi_qp *qp) 856 { 857 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 858 struct hisi_qm *qm = qp->qm; 859 860 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 861 dma_rmb(); 862 qp->req_cb(qp, qp->sqe + qm->sqe_size * 863 le16_to_cpu(cqe->sq_head)); 864 qm_cq_head_update(qp); 865 cqe = qp->cqe + qp->qp_status.cq_head; 866 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 867 qp->qp_status.cq_head, 0); 868 atomic_dec(&qp->qp_status.used); 869 870 cond_resched(); 871 } 872 873 /* set c_flag */ 874 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); 875 } 876 877 static void qm_work_process(struct work_struct *work) 878 { 879 struct hisi_qm_poll_data *poll_data = 880 container_of(work, struct hisi_qm_poll_data, work); 881 struct hisi_qm *qm = poll_data->qm; 882 u16 eqe_num = poll_data->eqe_num; 883 struct hisi_qp *qp; 884 int i; 885 886 for (i = eqe_num - 1; i >= 0; i--) { 887 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; 888 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 889 continue; 890 891 if (qp->event_cb) { 892 qp->event_cb(qp); 893 continue; 894 } 895 896 if (likely(qp->req_cb)) 897 qm_poll_req_cb(qp); 898 } 899 } 900 901 static void qm_get_complete_eqe_num(struct hisi_qm *qm) 902 { 903 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 904 struct hisi_qm_poll_data *poll_data = NULL; 905 u16 eq_depth = qm->eq_depth; 906 u16 cqn, eqe_num = 0; 907 908 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { 909 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 910 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 911 return; 912 } 913 914 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 915 if (unlikely(cqn >= qm->qp_num)) 916 return; 917 poll_data = &qm->poll_data[cqn]; 918 919 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 920 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 921 poll_data->qp_finish_id[eqe_num] = cqn; 922 eqe_num++; 923 924 if (qm->status.eq_head == eq_depth - 1) { 925 qm->status.eqc_phase = !qm->status.eqc_phase; 926 eqe = qm->eqe; 927 qm->status.eq_head = 0; 928 } else { 929 eqe++; 930 qm->status.eq_head++; 931 } 932 933 if (eqe_num == (eq_depth >> 1) - 1) 934 break; 935 } 936 937 poll_data->eqe_num = eqe_num; 938 queue_work(qm->wq, &poll_data->work); 939 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 940 } 941 942 static irqreturn_t qm_eq_irq(int irq, void *data) 943 { 944 struct hisi_qm *qm = data; 945 946 /* Get qp id of completed tasks and re-enable the interrupt */ 947 qm_get_complete_eqe_num(qm); 948 949 return IRQ_HANDLED; 950 } 951 952 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 953 { 954 struct hisi_qm *qm = data; 955 u32 val; 956 957 val = readl(qm->io_base + QM_IFC_INT_STATUS); 958 val &= QM_IFC_INT_STATUS_MASK; 959 if (!val) 960 return IRQ_NONE; 961 962 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { 963 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); 964 return IRQ_HANDLED; 965 } 966 967 schedule_work(&qm->cmd_process); 968 969 return IRQ_HANDLED; 970 } 971 972 static void qm_set_qp_disable(struct hisi_qp *qp, int offset) 973 { 974 u32 *addr; 975 976 if (qp->is_in_kernel) 977 return; 978 979 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; 980 *addr = 1; 981 982 /* make sure setup is completed */ 983 smp_wmb(); 984 } 985 986 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) 987 { 988 struct hisi_qp *qp = &qm->qp_array[qp_id]; 989 990 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); 991 hisi_qm_stop_qp(qp); 992 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); 993 } 994 995 static void qm_reset_function(struct hisi_qm *qm) 996 { 997 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 998 struct device *dev = &qm->pdev->dev; 999 int ret; 1000 1001 if (qm_check_dev_error(pf_qm)) 1002 return; 1003 1004 ret = qm_reset_prepare_ready(qm); 1005 if (ret) { 1006 dev_err(dev, "reset function not ready\n"); 1007 return; 1008 } 1009 1010 ret = hisi_qm_stop(qm, QM_DOWN); 1011 if (ret) { 1012 dev_err(dev, "failed to stop qm when reset function\n"); 1013 goto clear_bit; 1014 } 1015 1016 ret = hisi_qm_start(qm); 1017 if (ret) 1018 dev_err(dev, "failed to start qm when reset function\n"); 1019 1020 clear_bit: 1021 qm_reset_bit_clear(qm); 1022 } 1023 1024 static irqreturn_t qm_aeq_thread(int irq, void *data) 1025 { 1026 struct hisi_qm *qm = data; 1027 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1028 u16 aeq_depth = qm->aeq_depth; 1029 u32 type, qp_id; 1030 1031 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 1032 1033 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { 1034 type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & 1035 QM_AEQE_TYPE_MASK; 1036 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; 1037 1038 switch (type) { 1039 case QM_EQ_OVERFLOW: 1040 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); 1041 qm_reset_function(qm); 1042 return IRQ_HANDLED; 1043 case QM_CQ_OVERFLOW: 1044 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", 1045 qp_id); 1046 fallthrough; 1047 case QM_CQE_ERROR: 1048 qm_disable_qp(qm, qp_id); 1049 break; 1050 default: 1051 dev_err(&qm->pdev->dev, "unknown error type %u\n", 1052 type); 1053 break; 1054 } 1055 1056 if (qm->status.aeq_head == aeq_depth - 1) { 1057 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1058 aeqe = qm->aeqe; 1059 qm->status.aeq_head = 0; 1060 } else { 1061 aeqe++; 1062 qm->status.aeq_head++; 1063 } 1064 } 1065 1066 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 1067 1068 return IRQ_HANDLED; 1069 } 1070 1071 static void qm_init_qp_status(struct hisi_qp *qp) 1072 { 1073 struct hisi_qp_status *qp_status = &qp->qp_status; 1074 1075 qp_status->sq_tail = 0; 1076 qp_status->cq_head = 0; 1077 qp_status->cqc_phase = true; 1078 atomic_set(&qp_status->used, 0); 1079 } 1080 1081 static void qm_init_prefetch(struct hisi_qm *qm) 1082 { 1083 struct device *dev = &qm->pdev->dev; 1084 u32 page_type = 0x0; 1085 1086 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1087 return; 1088 1089 switch (PAGE_SIZE) { 1090 case SZ_4K: 1091 page_type = 0x0; 1092 break; 1093 case SZ_16K: 1094 page_type = 0x1; 1095 break; 1096 case SZ_64K: 1097 page_type = 0x2; 1098 break; 1099 default: 1100 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 1101 PAGE_SIZE); 1102 } 1103 1104 writel(page_type, qm->io_base + QM_PAGE_SIZE); 1105 } 1106 1107 /* 1108 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value 1109 * is the expected qos calculated. 1110 * the formula: 1111 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 1112 * 1113 * IR_b * (2 ^ IR_u) * 8000 1114 * IR(Mbps) = ------------------------- 1115 * Tick * (2 ^ IR_s) 1116 */ 1117 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 1118 { 1119 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 1120 (QM_QOS_TICK * (1 << cir_s)); 1121 } 1122 1123 static u32 acc_shaper_calc_cbs_s(u32 ir) 1124 { 1125 int table_size = ARRAY_SIZE(shaper_cbs_s); 1126 int i; 1127 1128 for (i = 0; i < table_size; i++) { 1129 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) 1130 return shaper_cbs_s[i].val; 1131 } 1132 1133 return QM_SHAPER_MIN_CBS_S; 1134 } 1135 1136 static u32 acc_shaper_calc_cir_s(u32 ir) 1137 { 1138 int table_size = ARRAY_SIZE(shaper_cir_s); 1139 int i; 1140 1141 for (i = 0; i < table_size; i++) { 1142 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) 1143 return shaper_cir_s[i].val; 1144 } 1145 1146 return 0; 1147 } 1148 1149 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 1150 { 1151 u32 cir_b, cir_u, cir_s, ir_calc; 1152 u32 error_rate; 1153 1154 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 1155 cir_s = acc_shaper_calc_cir_s(ir); 1156 1157 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 1158 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 1159 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 1160 1161 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 1162 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 1163 factor->cir_b = cir_b; 1164 factor->cir_u = cir_u; 1165 factor->cir_s = cir_s; 1166 return 0; 1167 } 1168 } 1169 } 1170 1171 return -EINVAL; 1172 } 1173 1174 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1175 u32 number, struct qm_shaper_factor *factor) 1176 { 1177 u64 tmp = 0; 1178 1179 if (number > 0) { 1180 switch (type) { 1181 case SQC_VFT: 1182 if (qm->ver == QM_HW_V1) { 1183 tmp = QM_SQC_VFT_BUF_SIZE | 1184 QM_SQC_VFT_SQC_SIZE | 1185 QM_SQC_VFT_INDEX_NUMBER | 1186 QM_SQC_VFT_VALID | 1187 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1188 } else { 1189 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1190 QM_SQC_VFT_VALID | 1191 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1192 } 1193 break; 1194 case CQC_VFT: 1195 if (qm->ver == QM_HW_V1) { 1196 tmp = QM_CQC_VFT_BUF_SIZE | 1197 QM_CQC_VFT_SQC_SIZE | 1198 QM_CQC_VFT_INDEX_NUMBER | 1199 QM_CQC_VFT_VALID; 1200 } else { 1201 tmp = QM_CQC_VFT_VALID; 1202 } 1203 break; 1204 case SHAPER_VFT: 1205 if (factor) { 1206 tmp = factor->cir_b | 1207 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1208 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1209 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1210 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1211 } 1212 break; 1213 } 1214 } 1215 1216 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1217 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1218 } 1219 1220 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1221 u32 fun_num, u32 base, u32 number) 1222 { 1223 struct qm_shaper_factor *factor = NULL; 1224 unsigned int val; 1225 int ret; 1226 1227 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1228 factor = &qm->factor[fun_num]; 1229 1230 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1231 val & BIT(0), POLL_PERIOD, 1232 POLL_TIMEOUT); 1233 if (ret) 1234 return ret; 1235 1236 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1237 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1238 if (type == SHAPER_VFT) 1239 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1240 1241 writel(fun_num, qm->io_base + QM_VFT_CFG); 1242 1243 qm_vft_data_cfg(qm, type, base, number, factor); 1244 1245 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1246 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1247 1248 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1249 val & BIT(0), POLL_PERIOD, 1250 POLL_TIMEOUT); 1251 } 1252 1253 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1254 { 1255 u32 qos = qm->factor[fun_num].func_qos; 1256 int ret, i; 1257 1258 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); 1259 if (ret) { 1260 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1261 return ret; 1262 } 1263 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1264 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1265 /* The base number of queue reuse for different alg type */ 1266 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1267 if (ret) 1268 return ret; 1269 } 1270 1271 return 0; 1272 } 1273 1274 /* The config should be conducted after qm_dev_mem_reset() */ 1275 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1276 u32 number) 1277 { 1278 int ret, i; 1279 1280 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1281 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1282 if (ret) 1283 return ret; 1284 } 1285 1286 /* init default shaper qos val */ 1287 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1288 ret = qm_shaper_init_vft(qm, fun_num); 1289 if (ret) 1290 goto back_sqc_cqc; 1291 } 1292 1293 return 0; 1294 back_sqc_cqc: 1295 for (i = SQC_VFT; i <= CQC_VFT; i++) 1296 qm_set_vft_common(qm, i, fun_num, 0, 0); 1297 1298 return ret; 1299 } 1300 1301 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1302 { 1303 u64 sqc_vft; 1304 int ret; 1305 1306 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1307 if (ret) 1308 return ret; 1309 1310 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1311 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1312 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1313 *number = (QM_SQC_VFT_NUM_MASK_V2 & 1314 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1315 1316 return 0; 1317 } 1318 1319 static void qm_hw_error_init_v1(struct hisi_qm *qm) 1320 { 1321 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1322 } 1323 1324 static void qm_hw_error_cfg(struct hisi_qm *qm) 1325 { 1326 struct hisi_qm_err_info *err_info = &qm->err_info; 1327 1328 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; 1329 /* clear QM hw residual error source */ 1330 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1331 1332 /* configure error type */ 1333 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); 1334 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1335 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1336 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); 1337 } 1338 1339 static void qm_hw_error_init_v2(struct hisi_qm *qm) 1340 { 1341 u32 irq_unmask; 1342 1343 qm_hw_error_cfg(qm); 1344 1345 irq_unmask = ~qm->error_mask; 1346 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1347 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1348 } 1349 1350 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1351 { 1352 u32 irq_mask = qm->error_mask; 1353 1354 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1355 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1356 } 1357 1358 static void qm_hw_error_init_v3(struct hisi_qm *qm) 1359 { 1360 u32 irq_unmask; 1361 1362 qm_hw_error_cfg(qm); 1363 1364 /* enable close master ooo when hardware error happened */ 1365 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1366 1367 irq_unmask = ~qm->error_mask; 1368 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1369 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1370 } 1371 1372 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1373 { 1374 u32 irq_mask = qm->error_mask; 1375 1376 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1377 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1378 1379 /* disable close master ooo when hardware error happened */ 1380 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1381 } 1382 1383 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1384 { 1385 const struct hisi_qm_hw_error *err; 1386 struct device *dev = &qm->pdev->dev; 1387 u32 reg_val, type, vf_num, qp_id; 1388 int i; 1389 1390 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1391 err = &qm_hw_error[i]; 1392 if (!(err->int_msk & error_status)) 1393 continue; 1394 1395 dev_err(dev, "%s [error status=0x%x] found\n", 1396 err->msg, err->int_msk); 1397 1398 if (err->int_msk & QM_DB_TIMEOUT) { 1399 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1400 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1401 QM_DB_TIMEOUT_TYPE_SHIFT; 1402 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1403 qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT; 1404 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n", 1405 qm_db_timeout[type], vf_num, qp_id); 1406 } else if (err->int_msk & QM_OF_FIFO_OF) { 1407 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1408 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1409 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1410 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1411 qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT; 1412 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1413 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n", 1414 qm_fifo_overflow[type], vf_num, qp_id); 1415 else 1416 dev_err(dev, "unknown error type\n"); 1417 } else if (err->int_msk & QM_AXI_RRESP_ERR) { 1418 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02); 1419 if (reg_val & QM_AXI_POISON_ERR) 1420 dev_err(dev, "qm axi poison error happened\n"); 1421 } 1422 } 1423 } 1424 1425 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1426 { 1427 u32 error_status, tmp; 1428 1429 /* read err sts */ 1430 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 1431 error_status = qm->error_mask & tmp; 1432 1433 if (error_status) { 1434 if (error_status & QM_ECC_MBIT) 1435 qm->err_status.is_qm_ecc_mbit = true; 1436 1437 qm_log_hw_error(qm, error_status); 1438 if (error_status & qm->err_info.qm_reset_mask) 1439 return ACC_ERR_NEED_RESET; 1440 1441 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1442 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1443 } 1444 1445 return ACC_ERR_RECOVERED; 1446 } 1447 1448 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 1449 { 1450 struct qm_mailbox mailbox; 1451 int ret; 1452 1453 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 1454 mutex_lock(&qm->mailbox_lock); 1455 ret = qm_mb_nolock(qm, &mailbox); 1456 if (ret) 1457 goto err_unlock; 1458 1459 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1460 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1461 1462 err_unlock: 1463 mutex_unlock(&qm->mailbox_lock); 1464 return ret; 1465 } 1466 1467 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 1468 { 1469 u32 val; 1470 1471 if (qm->fun_type == QM_HW_PF) 1472 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 1473 1474 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 1475 val |= QM_IFC_INT_SOURCE_MASK; 1476 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 1477 } 1478 1479 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 1480 { 1481 struct device *dev = &qm->pdev->dev; 1482 u32 cmd; 1483 u64 msg; 1484 int ret; 1485 1486 ret = qm_get_mb_cmd(qm, &msg, vf_id); 1487 if (ret) { 1488 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); 1489 return; 1490 } 1491 1492 cmd = msg & QM_MB_CMD_DATA_MASK; 1493 switch (cmd) { 1494 case QM_VF_PREPARE_FAIL: 1495 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 1496 break; 1497 case QM_VF_START_FAIL: 1498 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 1499 break; 1500 case QM_VF_PREPARE_DONE: 1501 case QM_VF_START_DONE: 1502 break; 1503 default: 1504 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); 1505 break; 1506 } 1507 } 1508 1509 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 1510 { 1511 struct device *dev = &qm->pdev->dev; 1512 u32 vfs_num = qm->vfs_num; 1513 int cnt = 0; 1514 int ret = 0; 1515 u64 val; 1516 u32 i; 1517 1518 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 1519 return 0; 1520 1521 while (true) { 1522 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 1523 /* All VFs send command to PF, break */ 1524 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 1525 break; 1526 1527 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1528 ret = -EBUSY; 1529 break; 1530 } 1531 1532 msleep(QM_WAIT_DST_ACK); 1533 } 1534 1535 /* PF check VFs msg */ 1536 for (i = 1; i <= vfs_num; i++) { 1537 if (val & BIT(i)) 1538 qm_handle_vf_msg(qm, i); 1539 else 1540 dev_err(dev, "VF(%u) not ping PF!\n", i); 1541 } 1542 1543 /* PF clear interrupt to ack VFs */ 1544 qm_clear_cmd_interrupt(qm, val); 1545 1546 return ret; 1547 } 1548 1549 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 1550 { 1551 u32 val; 1552 1553 val = readl(qm->io_base + QM_IFC_INT_CFG); 1554 val &= ~QM_IFC_SEND_ALL_VFS; 1555 val |= fun_num; 1556 writel(val, qm->io_base + QM_IFC_INT_CFG); 1557 1558 val = readl(qm->io_base + QM_IFC_INT_SET_P); 1559 val |= QM_IFC_INT_SET_MASK; 1560 writel(val, qm->io_base + QM_IFC_INT_SET_P); 1561 } 1562 1563 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 1564 { 1565 u32 val; 1566 1567 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1568 val |= QM_IFC_INT_SET_MASK; 1569 writel(val, qm->io_base + QM_IFC_INT_SET_V); 1570 } 1571 1572 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) 1573 { 1574 struct device *dev = &qm->pdev->dev; 1575 struct qm_mailbox mailbox; 1576 int cnt = 0; 1577 u64 val; 1578 int ret; 1579 1580 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); 1581 mutex_lock(&qm->mailbox_lock); 1582 ret = qm_mb_nolock(qm, &mailbox); 1583 if (ret) { 1584 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 1585 goto err_unlock; 1586 } 1587 1588 qm_trigger_vf_interrupt(qm, fun_num); 1589 while (true) { 1590 msleep(QM_WAIT_DST_ACK); 1591 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1592 /* if VF respond, PF notifies VF successfully. */ 1593 if (!(val & BIT(fun_num))) 1594 goto err_unlock; 1595 1596 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1597 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 1598 ret = -ETIMEDOUT; 1599 break; 1600 } 1601 } 1602 1603 err_unlock: 1604 mutex_unlock(&qm->mailbox_lock); 1605 return ret; 1606 } 1607 1608 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) 1609 { 1610 struct device *dev = &qm->pdev->dev; 1611 u32 vfs_num = qm->vfs_num; 1612 struct qm_mailbox mailbox; 1613 u64 val = 0; 1614 int cnt = 0; 1615 int ret; 1616 u32 i; 1617 1618 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); 1619 mutex_lock(&qm->mailbox_lock); 1620 /* PF sends command to all VFs by mailbox */ 1621 ret = qm_mb_nolock(qm, &mailbox); 1622 if (ret) { 1623 dev_err(dev, "failed to send command to VFs!\n"); 1624 mutex_unlock(&qm->mailbox_lock); 1625 return ret; 1626 } 1627 1628 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 1629 while (true) { 1630 msleep(QM_WAIT_DST_ACK); 1631 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1632 /* If all VFs acked, PF notifies VFs successfully. */ 1633 if (!(val & GENMASK(vfs_num, 1))) { 1634 mutex_unlock(&qm->mailbox_lock); 1635 return 0; 1636 } 1637 1638 if (++cnt > QM_MAX_PF_WAIT_COUNT) 1639 break; 1640 } 1641 1642 mutex_unlock(&qm->mailbox_lock); 1643 1644 /* Check which vf respond timeout. */ 1645 for (i = 1; i <= vfs_num; i++) { 1646 if (val & BIT(i)) 1647 dev_err(dev, "failed to get response from VF(%u)!\n", i); 1648 } 1649 1650 return -ETIMEDOUT; 1651 } 1652 1653 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) 1654 { 1655 struct qm_mailbox mailbox; 1656 int cnt = 0; 1657 u32 val; 1658 int ret; 1659 1660 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); 1661 mutex_lock(&qm->mailbox_lock); 1662 ret = qm_mb_nolock(qm, &mailbox); 1663 if (ret) { 1664 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); 1665 goto unlock; 1666 } 1667 1668 qm_trigger_pf_interrupt(qm); 1669 /* Waiting for PF response */ 1670 while (true) { 1671 msleep(QM_WAIT_DST_ACK); 1672 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1673 if (!(val & QM_IFC_INT_STATUS_MASK)) 1674 break; 1675 1676 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 1677 ret = -ETIMEDOUT; 1678 break; 1679 } 1680 } 1681 1682 unlock: 1683 mutex_unlock(&qm->mailbox_lock); 1684 return ret; 1685 } 1686 1687 static int qm_drain_qm(struct hisi_qm *qm) 1688 { 1689 return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); 1690 } 1691 1692 static int qm_stop_qp(struct hisi_qp *qp) 1693 { 1694 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 1695 } 1696 1697 static int qm_set_msi(struct hisi_qm *qm, bool set) 1698 { 1699 struct pci_dev *pdev = qm->pdev; 1700 1701 if (set) { 1702 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1703 0); 1704 } else { 1705 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1706 ACC_PEH_MSI_DISABLE); 1707 if (qm->err_status.is_qm_ecc_mbit || 1708 qm->err_status.is_dev_ecc_mbit) 1709 return 0; 1710 1711 mdelay(1); 1712 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 1713 return -EFAULT; 1714 } 1715 1716 return 0; 1717 } 1718 1719 static void qm_wait_msi_finish(struct hisi_qm *qm) 1720 { 1721 struct pci_dev *pdev = qm->pdev; 1722 u32 cmd = ~0; 1723 int cnt = 0; 1724 u32 val; 1725 int ret; 1726 1727 while (true) { 1728 pci_read_config_dword(pdev, pdev->msi_cap + 1729 PCI_MSI_PENDING_64, &cmd); 1730 if (!cmd) 1731 break; 1732 1733 if (++cnt > MAX_WAIT_COUNTS) { 1734 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 1735 break; 1736 } 1737 1738 udelay(1); 1739 } 1740 1741 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 1742 val, !(val & QM_PEH_DFX_MASK), 1743 POLL_PERIOD, POLL_TIMEOUT); 1744 if (ret) 1745 pci_warn(pdev, "failed to empty PEH MSI!\n"); 1746 1747 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 1748 val, !(val & QM_PEH_MSI_FINISH_MASK), 1749 POLL_PERIOD, POLL_TIMEOUT); 1750 if (ret) 1751 pci_warn(pdev, "failed to finish MSI operation!\n"); 1752 } 1753 1754 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 1755 { 1756 struct pci_dev *pdev = qm->pdev; 1757 int ret = -ETIMEDOUT; 1758 u32 cmd, i; 1759 1760 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1761 if (set) 1762 cmd |= QM_MSI_CAP_ENABLE; 1763 else 1764 cmd &= ~QM_MSI_CAP_ENABLE; 1765 1766 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 1767 if (set) { 1768 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 1769 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1770 if (cmd & QM_MSI_CAP_ENABLE) 1771 return 0; 1772 1773 udelay(1); 1774 } 1775 } else { 1776 udelay(WAIT_PERIOD_US_MIN); 1777 qm_wait_msi_finish(qm); 1778 ret = 0; 1779 } 1780 1781 return ret; 1782 } 1783 1784 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 1785 .qm_db = qm_db_v1, 1786 .hw_error_init = qm_hw_error_init_v1, 1787 .set_msi = qm_set_msi, 1788 }; 1789 1790 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 1791 .get_vft = qm_get_vft_v2, 1792 .qm_db = qm_db_v2, 1793 .hw_error_init = qm_hw_error_init_v2, 1794 .hw_error_uninit = qm_hw_error_uninit_v2, 1795 .hw_error_handle = qm_hw_error_handle_v2, 1796 .set_msi = qm_set_msi, 1797 }; 1798 1799 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 1800 .get_vft = qm_get_vft_v2, 1801 .qm_db = qm_db_v2, 1802 .hw_error_init = qm_hw_error_init_v3, 1803 .hw_error_uninit = qm_hw_error_uninit_v3, 1804 .hw_error_handle = qm_hw_error_handle_v2, 1805 .set_msi = qm_set_msi_v3, 1806 }; 1807 1808 static void *qm_get_avail_sqe(struct hisi_qp *qp) 1809 { 1810 struct hisi_qp_status *qp_status = &qp->qp_status; 1811 u16 sq_tail = qp_status->sq_tail; 1812 1813 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 1814 return NULL; 1815 1816 return qp->sqe + sq_tail * qp->qm->sqe_size; 1817 } 1818 1819 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) 1820 { 1821 u64 *addr; 1822 1823 /* Use last 64 bits of DUS to reset status. */ 1824 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; 1825 *addr = 0; 1826 } 1827 1828 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 1829 { 1830 struct device *dev = &qm->pdev->dev; 1831 struct hisi_qp *qp; 1832 int qp_id; 1833 1834 if (atomic_read(&qm->status.flags) == QM_STOP) { 1835 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n"); 1836 return ERR_PTR(-EPERM); 1837 } 1838 1839 if (qm->qp_in_used == qm->qp_num) { 1840 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1841 qm->qp_num); 1842 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1843 return ERR_PTR(-EBUSY); 1844 } 1845 1846 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 1847 if (qp_id < 0) { 1848 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1849 qm->qp_num); 1850 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1851 return ERR_PTR(-EBUSY); 1852 } 1853 1854 qp = &qm->qp_array[qp_id]; 1855 hisi_qm_unset_hw_reset(qp); 1856 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 1857 1858 qp->event_cb = NULL; 1859 qp->req_cb = NULL; 1860 qp->qp_id = qp_id; 1861 qp->alg_type = alg_type; 1862 qp->is_in_kernel = true; 1863 qm->qp_in_used++; 1864 1865 return qp; 1866 } 1867 1868 /** 1869 * hisi_qm_create_qp() - Create a queue pair from qm. 1870 * @qm: The qm we create a qp from. 1871 * @alg_type: Accelerator specific algorithm type in sqc. 1872 * 1873 * Return created qp, negative error code if failed. 1874 */ 1875 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 1876 { 1877 struct hisi_qp *qp; 1878 int ret; 1879 1880 ret = qm_pm_get_sync(qm); 1881 if (ret) 1882 return ERR_PTR(ret); 1883 1884 down_write(&qm->qps_lock); 1885 qp = qm_create_qp_nolock(qm, alg_type); 1886 up_write(&qm->qps_lock); 1887 1888 if (IS_ERR(qp)) 1889 qm_pm_put_sync(qm); 1890 1891 return qp; 1892 } 1893 1894 /** 1895 * hisi_qm_release_qp() - Release a qp back to its qm. 1896 * @qp: The qp we want to release. 1897 * 1898 * This function releases the resource of a qp. 1899 */ 1900 static void hisi_qm_release_qp(struct hisi_qp *qp) 1901 { 1902 struct hisi_qm *qm = qp->qm; 1903 1904 down_write(&qm->qps_lock); 1905 1906 qm->qp_in_used--; 1907 idr_remove(&qm->qp_idr, qp->qp_id); 1908 1909 up_write(&qm->qps_lock); 1910 1911 qm_pm_put_sync(qm); 1912 } 1913 1914 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1915 { 1916 struct hisi_qm *qm = qp->qm; 1917 enum qm_hw_ver ver = qm->ver; 1918 struct qm_sqc sqc = {0}; 1919 1920 if (ver == QM_HW_V1) { 1921 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 1922 sqc.w8 = cpu_to_le16(qp->sq_depth - 1); 1923 } else { 1924 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 1925 sqc.w8 = 0; /* rand_qc */ 1926 } 1927 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 1928 sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); 1929 sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); 1930 sqc.cq_num = cpu_to_le16(qp_id); 1931 sqc.pasid = cpu_to_le16(pasid); 1932 1933 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1934 sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 1935 QM_QC_PASID_ENABLE_SHIFT); 1936 1937 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); 1938 } 1939 1940 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1941 { 1942 struct hisi_qm *qm = qp->qm; 1943 enum qm_hw_ver ver = qm->ver; 1944 struct qm_cqc cqc = {0}; 1945 1946 if (ver == QM_HW_V1) { 1947 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); 1948 cqc.w8 = cpu_to_le16(qp->cq_depth - 1); 1949 } else { 1950 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 1951 cqc.w8 = 0; /* rand_qc */ 1952 } 1953 /* 1954 * Enable request finishing interrupts defaultly. 1955 * So, there will be some interrupts until disabling 1956 * this. 1957 */ 1958 cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 1959 cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); 1960 cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); 1961 cqc.pasid = cpu_to_le16(pasid); 1962 1963 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1964 cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 1965 1966 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); 1967 } 1968 1969 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1970 { 1971 int ret; 1972 1973 qm_init_qp_status(qp); 1974 1975 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 1976 if (ret) 1977 return ret; 1978 1979 return qm_cq_ctx_cfg(qp, qp_id, pasid); 1980 } 1981 1982 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 1983 { 1984 struct hisi_qm *qm = qp->qm; 1985 struct device *dev = &qm->pdev->dev; 1986 int qp_id = qp->qp_id; 1987 u32 pasid = arg; 1988 int ret; 1989 1990 if (atomic_read(&qm->status.flags) == QM_STOP) { 1991 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n"); 1992 return -EPERM; 1993 } 1994 1995 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 1996 if (ret) 1997 return ret; 1998 1999 atomic_set(&qp->qp_status.flags, QP_START); 2000 dev_dbg(dev, "queue %d started\n", qp_id); 2001 2002 return 0; 2003 } 2004 2005 /** 2006 * hisi_qm_start_qp() - Start a qp into running. 2007 * @qp: The qp we want to start to run. 2008 * @arg: Accelerator specific argument. 2009 * 2010 * After this function, qp can receive request from user. Return 0 if 2011 * successful, negative error code if failed. 2012 */ 2013 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 2014 { 2015 struct hisi_qm *qm = qp->qm; 2016 int ret; 2017 2018 down_write(&qm->qps_lock); 2019 ret = qm_start_qp_nolock(qp, arg); 2020 up_write(&qm->qps_lock); 2021 2022 return ret; 2023 } 2024 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2025 2026 /** 2027 * qp_stop_fail_cb() - call request cb. 2028 * @qp: stopped failed qp. 2029 * 2030 * Callback function should be called whether task completed or not. 2031 */ 2032 static void qp_stop_fail_cb(struct hisi_qp *qp) 2033 { 2034 int qp_used = atomic_read(&qp->qp_status.used); 2035 u16 cur_tail = qp->qp_status.sq_tail; 2036 u16 sq_depth = qp->sq_depth; 2037 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 2038 struct hisi_qm *qm = qp->qm; 2039 u16 pos; 2040 int i; 2041 2042 for (i = 0; i < qp_used; i++) { 2043 pos = (i + cur_head) % sq_depth; 2044 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2045 atomic_dec(&qp->qp_status.used); 2046 } 2047 } 2048 2049 static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) 2050 { 2051 struct device *dev = &qm->pdev->dev; 2052 struct qm_sqc sqc; 2053 struct qm_cqc cqc; 2054 int ret, i = 0; 2055 2056 while (++i) { 2057 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); 2058 if (ret) { 2059 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2060 *state = QM_DUMP_SQC_FAIL; 2061 return ret; 2062 } 2063 2064 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); 2065 if (ret) { 2066 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2067 *state = QM_DUMP_CQC_FAIL; 2068 return ret; 2069 } 2070 2071 if ((sqc.tail == cqc.tail) && 2072 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2073 break; 2074 2075 if (i == MAX_WAIT_COUNTS) { 2076 dev_err(dev, "Fail to empty queue %u!\n", qp_id); 2077 *state = QM_STOP_QUEUE_FAIL; 2078 return -ETIMEDOUT; 2079 } 2080 2081 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2082 } 2083 2084 return 0; 2085 } 2086 2087 /** 2088 * qm_drain_qp() - Drain a qp. 2089 * @qp: The qp we want to drain. 2090 * 2091 * If the device does not support stopping queue by sending mailbox, 2092 * determine whether the queue is cleared by judging the tail pointers of 2093 * sq and cq. 2094 */ 2095 static int qm_drain_qp(struct hisi_qp *qp) 2096 { 2097 struct hisi_qm *qm = qp->qm; 2098 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2099 u32 state = 0; 2100 int ret; 2101 2102 /* No need to judge if master OOO is blocked. */ 2103 if (qm_check_dev_error(pf_qm)) 2104 return 0; 2105 2106 /* HW V3 supports drain qp by device */ 2107 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 2108 ret = qm_stop_qp(qp); 2109 if (ret) { 2110 dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); 2111 state = QM_STOP_QUEUE_FAIL; 2112 goto set_dev_state; 2113 } 2114 return ret; 2115 } 2116 2117 ret = qm_wait_qp_empty(qm, &state, qp->qp_id); 2118 if (ret) 2119 goto set_dev_state; 2120 2121 return 0; 2122 2123 set_dev_state: 2124 if (qm->debug.dev_dfx.dev_timeout) 2125 qm->debug.dev_dfx.dev_state = state; 2126 2127 return ret; 2128 } 2129 2130 static void qm_stop_qp_nolock(struct hisi_qp *qp) 2131 { 2132 struct hisi_qm *qm = qp->qm; 2133 struct device *dev = &qm->pdev->dev; 2134 int ret; 2135 2136 /* 2137 * It is allowed to stop and release qp when reset, If the qp is 2138 * stopped when reset but still want to be released then, the 2139 * is_resetting flag should be set negative so that this qp will not 2140 * be restarted after reset. 2141 */ 2142 if (atomic_read(&qp->qp_status.flags) != QP_START) { 2143 qp->is_resetting = false; 2144 return; 2145 } 2146 2147 atomic_set(&qp->qp_status.flags, QP_STOP); 2148 2149 /* V3 supports direct stop function when FLR prepare */ 2150 if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { 2151 ret = qm_drain_qp(qp); 2152 if (ret) 2153 dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id); 2154 } 2155 2156 flush_workqueue(qm->wq); 2157 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2158 qp_stop_fail_cb(qp); 2159 2160 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2161 } 2162 2163 /** 2164 * hisi_qm_stop_qp() - Stop a qp in qm. 2165 * @qp: The qp we want to stop. 2166 * 2167 * This function is reverse of hisi_qm_start_qp. 2168 */ 2169 void hisi_qm_stop_qp(struct hisi_qp *qp) 2170 { 2171 down_write(&qp->qm->qps_lock); 2172 qm_stop_qp_nolock(qp); 2173 up_write(&qp->qm->qps_lock); 2174 } 2175 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2176 2177 /** 2178 * hisi_qp_send() - Queue up a task in the hardware queue. 2179 * @qp: The qp in which to put the message. 2180 * @msg: The message. 2181 * 2182 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2183 * if qp related qm is resetting. 2184 * 2185 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2186 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2187 * reset may happen, we have no lock here considering performance. This 2188 * causes current qm_db sending fail or can not receive sended sqe. QM 2189 * sync/async receive function should handle the error sqe. ACC reset 2190 * done function should clear used sqe to 0. 2191 */ 2192 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2193 { 2194 struct hisi_qp_status *qp_status = &qp->qp_status; 2195 u16 sq_tail = qp_status->sq_tail; 2196 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 2197 void *sqe = qm_get_avail_sqe(qp); 2198 2199 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2200 atomic_read(&qp->qm->status.flags) == QM_STOP || 2201 qp->is_resetting)) { 2202 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2203 return -EAGAIN; 2204 } 2205 2206 if (!sqe) 2207 return -EBUSY; 2208 2209 memcpy(sqe, msg, qp->qm->sqe_size); 2210 2211 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2212 atomic_inc(&qp->qp_status.used); 2213 qp_status->sq_tail = sq_tail_next; 2214 2215 return 0; 2216 } 2217 EXPORT_SYMBOL_GPL(hisi_qp_send); 2218 2219 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2220 { 2221 unsigned int val; 2222 2223 if (qm->ver == QM_HW_V1) 2224 return; 2225 2226 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2227 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2228 val, val & BIT(0), POLL_PERIOD, 2229 POLL_TIMEOUT)) 2230 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2231 } 2232 2233 static void qm_qp_event_notifier(struct hisi_qp *qp) 2234 { 2235 wake_up_interruptible(&qp->uacce_q->wait); 2236 } 2237 2238 /* This function returns free number of qp in qm. */ 2239 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2240 { 2241 struct hisi_qm *qm = uacce->priv; 2242 int ret; 2243 2244 down_read(&qm->qps_lock); 2245 ret = qm->qp_num - qm->qp_in_used; 2246 up_read(&qm->qps_lock); 2247 2248 return ret; 2249 } 2250 2251 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) 2252 { 2253 int i; 2254 2255 for (i = 0; i < qm->qp_num; i++) 2256 qm_set_qp_disable(&qm->qp_array[i], offset); 2257 } 2258 2259 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2260 unsigned long arg, 2261 struct uacce_queue *q) 2262 { 2263 struct hisi_qm *qm = uacce->priv; 2264 struct hisi_qp *qp; 2265 u8 alg_type = 0; 2266 2267 qp = hisi_qm_create_qp(qm, alg_type); 2268 if (IS_ERR(qp)) 2269 return PTR_ERR(qp); 2270 2271 q->priv = qp; 2272 q->uacce = uacce; 2273 qp->uacce_q = q; 2274 qp->event_cb = qm_qp_event_notifier; 2275 qp->pasid = arg; 2276 qp->is_in_kernel = false; 2277 2278 return 0; 2279 } 2280 2281 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2282 { 2283 struct hisi_qp *qp = q->priv; 2284 2285 hisi_qm_release_qp(qp); 2286 } 2287 2288 /* map sq/cq/doorbell to user space */ 2289 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2290 struct vm_area_struct *vma, 2291 struct uacce_qfile_region *qfr) 2292 { 2293 struct hisi_qp *qp = q->priv; 2294 struct hisi_qm *qm = qp->qm; 2295 resource_size_t phys_base = qm->db_phys_base + 2296 qp->qp_id * qm->db_interval; 2297 size_t sz = vma->vm_end - vma->vm_start; 2298 struct pci_dev *pdev = qm->pdev; 2299 struct device *dev = &pdev->dev; 2300 unsigned long vm_pgoff; 2301 int ret; 2302 2303 switch (qfr->type) { 2304 case UACCE_QFRT_MMIO: 2305 if (qm->ver == QM_HW_V1) { 2306 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2307 return -EINVAL; 2308 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 2309 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2310 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2311 return -EINVAL; 2312 } else { 2313 if (sz > qm->db_interval) 2314 return -EINVAL; 2315 } 2316 2317 vm_flags_set(vma, VM_IO); 2318 2319 return remap_pfn_range(vma, vma->vm_start, 2320 phys_base >> PAGE_SHIFT, 2321 sz, pgprot_noncached(vma->vm_page_prot)); 2322 case UACCE_QFRT_DUS: 2323 if (sz != qp->qdma.size) 2324 return -EINVAL; 2325 2326 /* 2327 * dma_mmap_coherent() requires vm_pgoff as 0 2328 * restore vm_pfoff to initial value for mmap() 2329 */ 2330 vm_pgoff = vma->vm_pgoff; 2331 vma->vm_pgoff = 0; 2332 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2333 qp->qdma.dma, sz); 2334 vma->vm_pgoff = vm_pgoff; 2335 return ret; 2336 2337 default: 2338 return -EINVAL; 2339 } 2340 } 2341 2342 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2343 { 2344 struct hisi_qp *qp = q->priv; 2345 2346 return hisi_qm_start_qp(qp, qp->pasid); 2347 } 2348 2349 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2350 { 2351 struct hisi_qp *qp = q->priv; 2352 struct hisi_qm *qm = qp->qm; 2353 struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; 2354 u32 i = 0; 2355 2356 hisi_qm_stop_qp(qp); 2357 2358 if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) 2359 return; 2360 2361 /* 2362 * After the queue fails to be stopped, 2363 * wait for a period of time before releasing the queue. 2364 */ 2365 while (++i) { 2366 msleep(WAIT_PERIOD); 2367 2368 /* Since dev_timeout maybe modified, check i >= dev_timeout */ 2369 if (i >= dev_dfx->dev_timeout) { 2370 dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", 2371 qp->qp_id, dev_dfx->dev_state); 2372 dev_dfx->dev_state = QM_FINISH_WAIT; 2373 break; 2374 } 2375 } 2376 } 2377 2378 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2379 { 2380 struct hisi_qp *qp = q->priv; 2381 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2382 int updated = 0; 2383 2384 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2385 /* make sure to read data from memory */ 2386 dma_rmb(); 2387 qm_cq_head_update(qp); 2388 cqe = qp->cqe + qp->qp_status.cq_head; 2389 updated = 1; 2390 } 2391 2392 return updated; 2393 } 2394 2395 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2396 { 2397 struct hisi_qm *qm = q->uacce->priv; 2398 struct hisi_qp *qp = q->priv; 2399 2400 down_write(&qm->qps_lock); 2401 qp->alg_type = type; 2402 up_write(&qm->qps_lock); 2403 } 2404 2405 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2406 unsigned long arg) 2407 { 2408 struct hisi_qp *qp = q->priv; 2409 struct hisi_qp_info qp_info; 2410 struct hisi_qp_ctx qp_ctx; 2411 2412 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2413 if (copy_from_user(&qp_ctx, (void __user *)arg, 2414 sizeof(struct hisi_qp_ctx))) 2415 return -EFAULT; 2416 2417 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) 2418 return -EINVAL; 2419 2420 qm_set_sqctype(q, qp_ctx.qc_type); 2421 qp_ctx.id = qp->qp_id; 2422 2423 if (copy_to_user((void __user *)arg, &qp_ctx, 2424 sizeof(struct hisi_qp_ctx))) 2425 return -EFAULT; 2426 2427 return 0; 2428 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 2429 if (copy_from_user(&qp_info, (void __user *)arg, 2430 sizeof(struct hisi_qp_info))) 2431 return -EFAULT; 2432 2433 qp_info.sqe_size = qp->qm->sqe_size; 2434 qp_info.sq_depth = qp->sq_depth; 2435 qp_info.cq_depth = qp->cq_depth; 2436 2437 if (copy_to_user((void __user *)arg, &qp_info, 2438 sizeof(struct hisi_qp_info))) 2439 return -EFAULT; 2440 2441 return 0; 2442 } 2443 2444 return -EINVAL; 2445 } 2446 2447 /** 2448 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device 2449 * according to user's configuration of error threshold. 2450 * @qm: the uacce device 2451 */ 2452 static int qm_hw_err_isolate(struct hisi_qm *qm) 2453 { 2454 struct qm_hw_err *err, *tmp, *hw_err; 2455 struct qm_err_isolate *isolate; 2456 u32 count = 0; 2457 2458 isolate = &qm->isolate_data; 2459 2460 #define SECONDS_PER_HOUR 3600 2461 2462 /* All the hw errs are processed by PF driver */ 2463 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) 2464 return 0; 2465 2466 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL); 2467 if (!hw_err) 2468 return -ENOMEM; 2469 2470 /* 2471 * Time-stamp every slot AER error. Then check the AER error log when the 2472 * next device AER error occurred. if the device slot AER error count exceeds 2473 * the setting error threshold in one hour, the isolated state will be set 2474 * to true. And the AER error logs that exceed one hour will be cleared. 2475 */ 2476 mutex_lock(&isolate->isolate_lock); 2477 hw_err->timestamp = jiffies; 2478 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { 2479 if ((hw_err->timestamp - err->timestamp) / HZ > 2480 SECONDS_PER_HOUR) { 2481 list_del(&err->list); 2482 kfree(err); 2483 } else { 2484 count++; 2485 } 2486 } 2487 list_add(&hw_err->list, &isolate->qm_hw_errs); 2488 mutex_unlock(&isolate->isolate_lock); 2489 2490 if (count >= isolate->err_threshold) 2491 isolate->is_isolate = true; 2492 2493 return 0; 2494 } 2495 2496 static void qm_hw_err_destroy(struct hisi_qm *qm) 2497 { 2498 struct qm_hw_err *err, *tmp; 2499 2500 mutex_lock(&qm->isolate_data.isolate_lock); 2501 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { 2502 list_del(&err->list); 2503 kfree(err); 2504 } 2505 mutex_unlock(&qm->isolate_data.isolate_lock); 2506 } 2507 2508 static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) 2509 { 2510 struct hisi_qm *qm = uacce->priv; 2511 struct hisi_qm *pf_qm; 2512 2513 if (uacce->is_vf) 2514 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2515 else 2516 pf_qm = qm; 2517 2518 return pf_qm->isolate_data.is_isolate ? 2519 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; 2520 } 2521 2522 static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) 2523 { 2524 struct hisi_qm *qm = uacce->priv; 2525 2526 /* Must be set by PF */ 2527 if (uacce->is_vf) 2528 return -EPERM; 2529 2530 if (qm->isolate_data.is_isolate) 2531 return -EPERM; 2532 2533 qm->isolate_data.err_threshold = num; 2534 2535 /* After the policy is updated, need to reset the hardware err list */ 2536 qm_hw_err_destroy(qm); 2537 2538 return 0; 2539 } 2540 2541 static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) 2542 { 2543 struct hisi_qm *qm = uacce->priv; 2544 struct hisi_qm *pf_qm; 2545 2546 if (uacce->is_vf) { 2547 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2548 return pf_qm->isolate_data.err_threshold; 2549 } 2550 2551 return qm->isolate_data.err_threshold; 2552 } 2553 2554 static const struct uacce_ops uacce_qm_ops = { 2555 .get_available_instances = hisi_qm_get_available_instances, 2556 .get_queue = hisi_qm_uacce_get_queue, 2557 .put_queue = hisi_qm_uacce_put_queue, 2558 .start_queue = hisi_qm_uacce_start_queue, 2559 .stop_queue = hisi_qm_uacce_stop_queue, 2560 .mmap = hisi_qm_uacce_mmap, 2561 .ioctl = hisi_qm_uacce_ioctl, 2562 .is_q_updated = hisi_qm_is_q_updated, 2563 .get_isolate_state = hisi_qm_get_isolate_state, 2564 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, 2565 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, 2566 }; 2567 2568 static void qm_remove_uacce(struct hisi_qm *qm) 2569 { 2570 struct uacce_device *uacce = qm->uacce; 2571 2572 if (qm->use_sva) { 2573 qm_hw_err_destroy(qm); 2574 uacce_remove(uacce); 2575 qm->uacce = NULL; 2576 } 2577 } 2578 2579 static int qm_alloc_uacce(struct hisi_qm *qm) 2580 { 2581 struct pci_dev *pdev = qm->pdev; 2582 struct uacce_device *uacce; 2583 unsigned long mmio_page_nr; 2584 unsigned long dus_page_nr; 2585 u16 sq_depth, cq_depth; 2586 struct uacce_interface interface = { 2587 .flags = UACCE_DEV_SVA, 2588 .ops = &uacce_qm_ops, 2589 }; 2590 int ret; 2591 2592 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), 2593 sizeof(interface.name)); 2594 if (ret < 0) 2595 return -ENAMETOOLONG; 2596 2597 uacce = uacce_alloc(&pdev->dev, &interface); 2598 if (IS_ERR(uacce)) 2599 return PTR_ERR(uacce); 2600 2601 if (uacce->flags & UACCE_DEV_SVA) { 2602 qm->use_sva = true; 2603 } else { 2604 /* only consider sva case */ 2605 qm_remove_uacce(qm); 2606 return -EINVAL; 2607 } 2608 2609 uacce->is_vf = pdev->is_virtfn; 2610 uacce->priv = qm; 2611 2612 if (qm->ver == QM_HW_V1) 2613 uacce->api_ver = HISI_QM_API_VER_BASE; 2614 else if (qm->ver == QM_HW_V2) 2615 uacce->api_ver = HISI_QM_API_VER2_BASE; 2616 else 2617 uacce->api_ver = HISI_QM_API_VER3_BASE; 2618 2619 if (qm->ver == QM_HW_V1) 2620 mmio_page_nr = QM_DOORBELL_PAGE_NR; 2621 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2622 mmio_page_nr = QM_DOORBELL_PAGE_NR + 2623 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 2624 else 2625 mmio_page_nr = qm->db_interval / PAGE_SIZE; 2626 2627 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 2628 2629 /* Add one more page for device or qp status */ 2630 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 2631 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 2632 PAGE_SHIFT; 2633 2634 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 2635 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 2636 2637 qm->uacce = uacce; 2638 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); 2639 mutex_init(&qm->isolate_data.isolate_lock); 2640 2641 return 0; 2642 } 2643 2644 /** 2645 * qm_frozen() - Try to froze QM to cut continuous queue request. If 2646 * there is user on the QM, return failure without doing anything. 2647 * @qm: The qm needed to be fronzen. 2648 * 2649 * This function frozes QM, then we can do SRIOV disabling. 2650 */ 2651 static int qm_frozen(struct hisi_qm *qm) 2652 { 2653 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 2654 return 0; 2655 2656 down_write(&qm->qps_lock); 2657 2658 if (!qm->qp_in_used) { 2659 qm->qp_in_used = qm->qp_num; 2660 up_write(&qm->qps_lock); 2661 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 2662 return 0; 2663 } 2664 2665 up_write(&qm->qps_lock); 2666 2667 return -EBUSY; 2668 } 2669 2670 static int qm_try_frozen_vfs(struct pci_dev *pdev, 2671 struct hisi_qm_list *qm_list) 2672 { 2673 struct hisi_qm *qm, *vf_qm; 2674 struct pci_dev *dev; 2675 int ret = 0; 2676 2677 if (!qm_list || !pdev) 2678 return -EINVAL; 2679 2680 /* Try to frozen all the VFs as disable SRIOV */ 2681 mutex_lock(&qm_list->lock); 2682 list_for_each_entry(qm, &qm_list->list, list) { 2683 dev = qm->pdev; 2684 if (dev == pdev) 2685 continue; 2686 if (pci_physfn(dev) == pdev) { 2687 vf_qm = pci_get_drvdata(dev); 2688 ret = qm_frozen(vf_qm); 2689 if (ret) 2690 goto frozen_fail; 2691 } 2692 } 2693 2694 frozen_fail: 2695 mutex_unlock(&qm_list->lock); 2696 2697 return ret; 2698 } 2699 2700 /** 2701 * hisi_qm_wait_task_finish() - Wait until the task is finished 2702 * when removing the driver. 2703 * @qm: The qm needed to wait for the task to finish. 2704 * @qm_list: The list of all available devices. 2705 */ 2706 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 2707 { 2708 while (qm_frozen(qm) || 2709 ((qm->fun_type == QM_HW_PF) && 2710 qm_try_frozen_vfs(qm->pdev, qm_list))) { 2711 msleep(WAIT_PERIOD); 2712 } 2713 2714 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 2715 test_bit(QM_RESETTING, &qm->misc_ctl)) 2716 msleep(WAIT_PERIOD); 2717 2718 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2719 flush_work(&qm->cmd_process); 2720 2721 udelay(REMOVE_WAIT_DELAY); 2722 } 2723 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 2724 2725 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 2726 { 2727 struct device *dev = &qm->pdev->dev; 2728 struct qm_dma *qdma; 2729 int i; 2730 2731 for (i = num - 1; i >= 0; i--) { 2732 qdma = &qm->qp_array[i].qdma; 2733 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 2734 kfree(qm->poll_data[i].qp_finish_id); 2735 } 2736 2737 kfree(qm->poll_data); 2738 kfree(qm->qp_array); 2739 } 2740 2741 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 2742 u16 sq_depth, u16 cq_depth) 2743 { 2744 struct device *dev = &qm->pdev->dev; 2745 size_t off = qm->sqe_size * sq_depth; 2746 struct hisi_qp *qp; 2747 int ret = -ENOMEM; 2748 2749 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), 2750 GFP_KERNEL); 2751 if (!qm->poll_data[id].qp_finish_id) 2752 return -ENOMEM; 2753 2754 qp = &qm->qp_array[id]; 2755 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 2756 GFP_KERNEL); 2757 if (!qp->qdma.va) 2758 goto err_free_qp_finish_id; 2759 2760 qp->sqe = qp->qdma.va; 2761 qp->sqe_dma = qp->qdma.dma; 2762 qp->cqe = qp->qdma.va + off; 2763 qp->cqe_dma = qp->qdma.dma + off; 2764 qp->qdma.size = dma_size; 2765 qp->sq_depth = sq_depth; 2766 qp->cq_depth = cq_depth; 2767 qp->qm = qm; 2768 qp->qp_id = id; 2769 2770 return 0; 2771 2772 err_free_qp_finish_id: 2773 kfree(qm->poll_data[id].qp_finish_id); 2774 return ret; 2775 } 2776 2777 static void hisi_qm_pre_init(struct hisi_qm *qm) 2778 { 2779 struct pci_dev *pdev = qm->pdev; 2780 2781 if (qm->ver == QM_HW_V1) 2782 qm->ops = &qm_hw_ops_v1; 2783 else if (qm->ver == QM_HW_V2) 2784 qm->ops = &qm_hw_ops_v2; 2785 else 2786 qm->ops = &qm_hw_ops_v3; 2787 2788 pci_set_drvdata(pdev, qm); 2789 mutex_init(&qm->mailbox_lock); 2790 init_rwsem(&qm->qps_lock); 2791 qm->qp_in_used = 0; 2792 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 2793 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 2794 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 2795 } 2796 } 2797 2798 static void qm_cmd_uninit(struct hisi_qm *qm) 2799 { 2800 u32 val; 2801 2802 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2803 return; 2804 2805 val = readl(qm->io_base + QM_IFC_INT_MASK); 2806 val |= QM_IFC_INT_DISABLE; 2807 writel(val, qm->io_base + QM_IFC_INT_MASK); 2808 } 2809 2810 static void qm_cmd_init(struct hisi_qm *qm) 2811 { 2812 u32 val; 2813 2814 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2815 return; 2816 2817 /* Clear communication interrupt source */ 2818 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 2819 2820 /* Enable pf to vf communication reg. */ 2821 val = readl(qm->io_base + QM_IFC_INT_MASK); 2822 val &= ~QM_IFC_INT_DISABLE; 2823 writel(val, qm->io_base + QM_IFC_INT_MASK); 2824 } 2825 2826 static void qm_put_pci_res(struct hisi_qm *qm) 2827 { 2828 struct pci_dev *pdev = qm->pdev; 2829 2830 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2831 iounmap(qm->db_io_base); 2832 2833 iounmap(qm->io_base); 2834 pci_release_mem_regions(pdev); 2835 } 2836 2837 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 2838 { 2839 struct pci_dev *pdev = qm->pdev; 2840 2841 pci_free_irq_vectors(pdev); 2842 qm_put_pci_res(qm); 2843 pci_disable_device(pdev); 2844 } 2845 2846 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) 2847 { 2848 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) 2849 writel(state, qm->io_base + QM_VF_STATE); 2850 } 2851 2852 static void hisi_qm_unint_work(struct hisi_qm *qm) 2853 { 2854 destroy_workqueue(qm->wq); 2855 } 2856 2857 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) 2858 { 2859 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; 2860 struct device *dev = &qm->pdev->dev; 2861 2862 dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); 2863 } 2864 2865 static void hisi_qm_memory_uninit(struct hisi_qm *qm) 2866 { 2867 struct device *dev = &qm->pdev->dev; 2868 2869 hisi_qp_memory_uninit(qm, qm->qp_num); 2870 hisi_qm_free_rsv_buf(qm); 2871 if (qm->qdma.va) { 2872 hisi_qm_cache_wb(qm); 2873 dma_free_coherent(dev, qm->qdma.size, 2874 qm->qdma.va, qm->qdma.dma); 2875 } 2876 2877 idr_destroy(&qm->qp_idr); 2878 2879 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 2880 kfree(qm->factor); 2881 } 2882 2883 /** 2884 * hisi_qm_uninit() - Uninitialize qm. 2885 * @qm: The qm needed uninit. 2886 * 2887 * This function uninits qm related device resources. 2888 */ 2889 void hisi_qm_uninit(struct hisi_qm *qm) 2890 { 2891 qm_cmd_uninit(qm); 2892 hisi_qm_unint_work(qm); 2893 2894 down_write(&qm->qps_lock); 2895 hisi_qm_memory_uninit(qm); 2896 hisi_qm_set_state(qm, QM_NOT_READY); 2897 up_write(&qm->qps_lock); 2898 2899 qm_remove_uacce(qm); 2900 qm_irqs_unregister(qm); 2901 hisi_qm_pci_uninit(qm); 2902 } 2903 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 2904 2905 /** 2906 * hisi_qm_get_vft() - Get vft from a qm. 2907 * @qm: The qm we want to get its vft. 2908 * @base: The base number of queue in vft. 2909 * @number: The number of queues in vft. 2910 * 2911 * We can allocate multiple queues to a qm by configuring virtual function 2912 * table. We get related configures by this function. Normally, we call this 2913 * function in VF driver to get the queue information. 2914 * 2915 * qm hw v1 does not support this interface. 2916 */ 2917 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 2918 { 2919 if (!base || !number) 2920 return -EINVAL; 2921 2922 if (!qm->ops->get_vft) { 2923 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 2924 return -EINVAL; 2925 } 2926 2927 return qm->ops->get_vft(qm, base, number); 2928 } 2929 2930 /** 2931 * hisi_qm_set_vft() - Set vft to a qm. 2932 * @qm: The qm we want to set its vft. 2933 * @fun_num: The function number. 2934 * @base: The base number of queue in vft. 2935 * @number: The number of queues in vft. 2936 * 2937 * This function is alway called in PF driver, it is used to assign queues 2938 * among PF and VFs. 2939 * 2940 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 2941 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 2942 * (VF function number 0x2) 2943 */ 2944 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 2945 u32 number) 2946 { 2947 u32 max_q_num = qm->ctrl_qp_num; 2948 2949 if (base >= max_q_num || number > max_q_num || 2950 (base + number) > max_q_num) 2951 return -EINVAL; 2952 2953 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 2954 } 2955 2956 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 2957 { 2958 struct hisi_qm_status *status = &qm->status; 2959 2960 status->eq_head = 0; 2961 status->aeq_head = 0; 2962 status->eqc_phase = true; 2963 status->aeqc_phase = true; 2964 } 2965 2966 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) 2967 { 2968 /* Clear eq/aeq interrupt source */ 2969 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 2970 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 2971 2972 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 2973 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 2974 } 2975 2976 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) 2977 { 2978 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 2979 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 2980 } 2981 2982 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 2983 { 2984 struct qm_eqc eqc = {0}; 2985 2986 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 2987 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 2988 if (qm->ver == QM_HW_V1) 2989 eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 2990 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 2991 2992 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); 2993 } 2994 2995 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 2996 { 2997 struct qm_aeqc aeqc = {0}; 2998 2999 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 3000 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 3001 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3002 3003 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); 3004 } 3005 3006 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 3007 { 3008 struct device *dev = &qm->pdev->dev; 3009 int ret; 3010 3011 qm_init_eq_aeq_status(qm); 3012 3013 ret = qm_eq_ctx_cfg(qm); 3014 if (ret) { 3015 dev_err(dev, "Set eqc failed!\n"); 3016 return ret; 3017 } 3018 3019 return qm_aeq_ctx_cfg(qm); 3020 } 3021 3022 static int __hisi_qm_start(struct hisi_qm *qm) 3023 { 3024 int ret; 3025 3026 WARN_ON(!qm->qdma.va); 3027 3028 if (qm->fun_type == QM_HW_PF) { 3029 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 3030 if (ret) 3031 return ret; 3032 } 3033 3034 ret = qm_eq_aeq_ctx_cfg(qm); 3035 if (ret) 3036 return ret; 3037 3038 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 3039 if (ret) 3040 return ret; 3041 3042 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 3043 if (ret) 3044 return ret; 3045 3046 qm_init_prefetch(qm); 3047 qm_enable_eq_aeq_interrupts(qm); 3048 3049 return 0; 3050 } 3051 3052 /** 3053 * hisi_qm_start() - start qm 3054 * @qm: The qm to be started. 3055 * 3056 * This function starts a qm, then we can allocate qp from this qm. 3057 */ 3058 int hisi_qm_start(struct hisi_qm *qm) 3059 { 3060 struct device *dev = &qm->pdev->dev; 3061 int ret = 0; 3062 3063 down_write(&qm->qps_lock); 3064 3065 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3066 3067 if (!qm->qp_num) { 3068 dev_err(dev, "qp_num should not be 0\n"); 3069 ret = -EINVAL; 3070 goto err_unlock; 3071 } 3072 3073 ret = __hisi_qm_start(qm); 3074 if (ret) 3075 goto err_unlock; 3076 3077 atomic_set(&qm->status.flags, QM_WORK); 3078 hisi_qm_set_state(qm, QM_READY); 3079 3080 err_unlock: 3081 up_write(&qm->qps_lock); 3082 return ret; 3083 } 3084 EXPORT_SYMBOL_GPL(hisi_qm_start); 3085 3086 static int qm_restart(struct hisi_qm *qm) 3087 { 3088 struct device *dev = &qm->pdev->dev; 3089 struct hisi_qp *qp; 3090 int ret, i; 3091 3092 ret = hisi_qm_start(qm); 3093 if (ret < 0) 3094 return ret; 3095 3096 down_write(&qm->qps_lock); 3097 for (i = 0; i < qm->qp_num; i++) { 3098 qp = &qm->qp_array[i]; 3099 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3100 qp->is_resetting == true) { 3101 ret = qm_start_qp_nolock(qp, 0); 3102 if (ret < 0) { 3103 dev_err(dev, "Failed to start qp%d!\n", i); 3104 3105 up_write(&qm->qps_lock); 3106 return ret; 3107 } 3108 qp->is_resetting = false; 3109 } 3110 } 3111 up_write(&qm->qps_lock); 3112 3113 return 0; 3114 } 3115 3116 /* Stop started qps in reset flow */ 3117 static void qm_stop_started_qp(struct hisi_qm *qm) 3118 { 3119 struct hisi_qp *qp; 3120 int i; 3121 3122 for (i = 0; i < qm->qp_num; i++) { 3123 qp = &qm->qp_array[i]; 3124 if (atomic_read(&qp->qp_status.flags) == QP_START) { 3125 qp->is_resetting = true; 3126 qm_stop_qp_nolock(qp); 3127 } 3128 } 3129 } 3130 3131 /** 3132 * qm_clear_queues() - Clear all queues memory in a qm. 3133 * @qm: The qm in which the queues will be cleared. 3134 * 3135 * This function clears all queues memory in a qm. Reset of accelerator can 3136 * use this to clear queues. 3137 */ 3138 static void qm_clear_queues(struct hisi_qm *qm) 3139 { 3140 struct hisi_qp *qp; 3141 int i; 3142 3143 for (i = 0; i < qm->qp_num; i++) { 3144 qp = &qm->qp_array[i]; 3145 if (qp->is_in_kernel && qp->is_resetting) 3146 memset(qp->qdma.va, 0, qp->qdma.size); 3147 } 3148 3149 memset(qm->qdma.va, 0, qm->qdma.size); 3150 } 3151 3152 /** 3153 * hisi_qm_stop() - Stop a qm. 3154 * @qm: The qm which will be stopped. 3155 * @r: The reason to stop qm. 3156 * 3157 * This function stops qm and its qps, then qm can not accept request. 3158 * Related resources are not released at this state, we can use hisi_qm_start 3159 * to let qm start again. 3160 */ 3161 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3162 { 3163 struct device *dev = &qm->pdev->dev; 3164 int ret = 0; 3165 3166 down_write(&qm->qps_lock); 3167 3168 if (atomic_read(&qm->status.flags) == QM_STOP) 3169 goto err_unlock; 3170 3171 /* Stop all the request sending at first. */ 3172 atomic_set(&qm->status.flags, QM_STOP); 3173 qm->status.stop_reason = r; 3174 3175 if (qm->status.stop_reason != QM_NORMAL) { 3176 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 3177 /* 3178 * When performing soft reset, the hardware will no longer 3179 * do tasks, and the tasks in the device will be flushed 3180 * out directly since the master ooo is closed. 3181 */ 3182 if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && 3183 r != QM_SOFT_RESET) { 3184 ret = qm_drain_qm(qm); 3185 if (ret) { 3186 dev_err(dev, "failed to drain qm!\n"); 3187 goto err_unlock; 3188 } 3189 } 3190 3191 qm_stop_started_qp(qm); 3192 3193 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 3194 } 3195 3196 qm_disable_eq_aeq_interrupts(qm); 3197 if (qm->fun_type == QM_HW_PF) { 3198 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3199 if (ret < 0) { 3200 dev_err(dev, "Failed to set vft!\n"); 3201 ret = -EBUSY; 3202 goto err_unlock; 3203 } 3204 } 3205 3206 qm_clear_queues(qm); 3207 qm->status.stop_reason = QM_NORMAL; 3208 3209 err_unlock: 3210 up_write(&qm->qps_lock); 3211 return ret; 3212 } 3213 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3214 3215 static void qm_hw_error_init(struct hisi_qm *qm) 3216 { 3217 if (!qm->ops->hw_error_init) { 3218 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3219 return; 3220 } 3221 3222 qm->ops->hw_error_init(qm); 3223 } 3224 3225 static void qm_hw_error_uninit(struct hisi_qm *qm) 3226 { 3227 if (!qm->ops->hw_error_uninit) { 3228 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3229 return; 3230 } 3231 3232 qm->ops->hw_error_uninit(qm); 3233 } 3234 3235 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3236 { 3237 if (!qm->ops->hw_error_handle) { 3238 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3239 return ACC_ERR_NONE; 3240 } 3241 3242 return qm->ops->hw_error_handle(qm); 3243 } 3244 3245 /** 3246 * hisi_qm_dev_err_init() - Initialize device error configuration. 3247 * @qm: The qm for which we want to do error initialization. 3248 * 3249 * Initialize QM and device error related configuration. 3250 */ 3251 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3252 { 3253 if (qm->fun_type == QM_HW_VF) 3254 return; 3255 3256 qm_hw_error_init(qm); 3257 3258 if (!qm->err_ini->hw_err_enable) { 3259 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3260 return; 3261 } 3262 qm->err_ini->hw_err_enable(qm); 3263 } 3264 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3265 3266 /** 3267 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3268 * @qm: The qm for which we want to do error uninitialization. 3269 * 3270 * Uninitialize QM and device error related configuration. 3271 */ 3272 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3273 { 3274 if (qm->fun_type == QM_HW_VF) 3275 return; 3276 3277 qm_hw_error_uninit(qm); 3278 3279 if (!qm->err_ini->hw_err_disable) { 3280 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3281 return; 3282 } 3283 qm->err_ini->hw_err_disable(qm); 3284 } 3285 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3286 3287 /** 3288 * hisi_qm_free_qps() - free multiple queue pairs. 3289 * @qps: The queue pairs need to be freed. 3290 * @qp_num: The num of queue pairs. 3291 */ 3292 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3293 { 3294 int i; 3295 3296 if (!qps || qp_num <= 0) 3297 return; 3298 3299 for (i = qp_num - 1; i >= 0; i--) 3300 hisi_qm_release_qp(qps[i]); 3301 } 3302 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3303 3304 static void free_list(struct list_head *head) 3305 { 3306 struct hisi_qm_resource *res, *tmp; 3307 3308 list_for_each_entry_safe(res, tmp, head, list) { 3309 list_del(&res->list); 3310 kfree(res); 3311 } 3312 } 3313 3314 static int hisi_qm_sort_devices(int node, struct list_head *head, 3315 struct hisi_qm_list *qm_list) 3316 { 3317 struct hisi_qm_resource *res, *tmp; 3318 struct hisi_qm *qm; 3319 struct list_head *n; 3320 struct device *dev; 3321 int dev_node; 3322 3323 list_for_each_entry(qm, &qm_list->list, list) { 3324 dev = &qm->pdev->dev; 3325 3326 dev_node = dev_to_node(dev); 3327 if (dev_node < 0) 3328 dev_node = 0; 3329 3330 res = kzalloc(sizeof(*res), GFP_KERNEL); 3331 if (!res) 3332 return -ENOMEM; 3333 3334 res->qm = qm; 3335 res->distance = node_distance(dev_node, node); 3336 n = head; 3337 list_for_each_entry(tmp, head, list) { 3338 if (res->distance < tmp->distance) { 3339 n = &tmp->list; 3340 break; 3341 } 3342 } 3343 list_add_tail(&res->list, n); 3344 } 3345 3346 return 0; 3347 } 3348 3349 /** 3350 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3351 * @qm_list: The list of all available devices. 3352 * @qp_num: The number of queue pairs need created. 3353 * @alg_type: The algorithm type. 3354 * @node: The numa node. 3355 * @qps: The queue pairs need created. 3356 * 3357 * This function will sort all available device according to numa distance. 3358 * Then try to create all queue pairs from one device, if all devices do 3359 * not meet the requirements will return error. 3360 */ 3361 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3362 u8 alg_type, int node, struct hisi_qp **qps) 3363 { 3364 struct hisi_qm_resource *tmp; 3365 int ret = -ENODEV; 3366 LIST_HEAD(head); 3367 int i; 3368 3369 if (!qps || !qm_list || qp_num <= 0) 3370 return -EINVAL; 3371 3372 mutex_lock(&qm_list->lock); 3373 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3374 mutex_unlock(&qm_list->lock); 3375 goto err; 3376 } 3377 3378 list_for_each_entry(tmp, &head, list) { 3379 for (i = 0; i < qp_num; i++) { 3380 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3381 if (IS_ERR(qps[i])) { 3382 hisi_qm_free_qps(qps, i); 3383 break; 3384 } 3385 } 3386 3387 if (i == qp_num) { 3388 ret = 0; 3389 break; 3390 } 3391 } 3392 3393 mutex_unlock(&qm_list->lock); 3394 if (ret) 3395 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3396 node, alg_type, qp_num); 3397 3398 err: 3399 free_list(&head); 3400 return ret; 3401 } 3402 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3403 3404 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3405 { 3406 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3407 u32 max_qp_num = qm->max_qp_num; 3408 u32 q_base = qm->qp_num; 3409 int ret; 3410 3411 if (!num_vfs) 3412 return -EINVAL; 3413 3414 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3415 3416 /* If vfs_q_num is less than num_vfs, return error. */ 3417 if (vfs_q_num < num_vfs) 3418 return -EINVAL; 3419 3420 q_num = vfs_q_num / num_vfs; 3421 remain_q_num = vfs_q_num % num_vfs; 3422 3423 for (i = num_vfs; i > 0; i--) { 3424 /* 3425 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3426 * remaining queues equally. 3427 */ 3428 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3429 act_q_num = q_num + remain_q_num; 3430 remain_q_num = 0; 3431 } else if (remain_q_num > 0) { 3432 act_q_num = q_num + 1; 3433 remain_q_num--; 3434 } else { 3435 act_q_num = q_num; 3436 } 3437 3438 act_q_num = min(act_q_num, max_qp_num); 3439 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3440 if (ret) { 3441 for (j = num_vfs; j > i; j--) 3442 hisi_qm_set_vft(qm, j, 0, 0); 3443 return ret; 3444 } 3445 q_base += act_q_num; 3446 } 3447 3448 return 0; 3449 } 3450 3451 static int qm_clear_vft_config(struct hisi_qm *qm) 3452 { 3453 int ret; 3454 u32 i; 3455 3456 for (i = 1; i <= qm->vfs_num; i++) { 3457 ret = hisi_qm_set_vft(qm, i, 0, 0); 3458 if (ret) 3459 return ret; 3460 } 3461 qm->vfs_num = 0; 3462 3463 return 0; 3464 } 3465 3466 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3467 { 3468 struct device *dev = &qm->pdev->dev; 3469 u32 ir = qos * QM_QOS_RATE; 3470 int ret, total_vfs, i; 3471 3472 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3473 if (fun_index > total_vfs) 3474 return -EINVAL; 3475 3476 qm->factor[fun_index].func_qos = qos; 3477 3478 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3479 if (ret) { 3480 dev_err(dev, "failed to calculate shaper parameter!\n"); 3481 return -EINVAL; 3482 } 3483 3484 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3485 /* The base number of queue reuse for different alg type */ 3486 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3487 if (ret) { 3488 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3489 return -EINVAL; 3490 } 3491 } 3492 3493 return 0; 3494 } 3495 3496 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3497 { 3498 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3499 u64 shaper_vft, ir_calc, ir; 3500 unsigned int val; 3501 u32 error_rate; 3502 int ret; 3503 3504 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3505 val & BIT(0), POLL_PERIOD, 3506 POLL_TIMEOUT); 3507 if (ret) 3508 return 0; 3509 3510 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3511 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3512 writel(fun_index, qm->io_base + QM_VFT_CFG); 3513 3514 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3515 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3516 3517 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3518 val & BIT(0), POLL_PERIOD, 3519 POLL_TIMEOUT); 3520 if (ret) 3521 return 0; 3522 3523 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3524 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3525 3526 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3527 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3528 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3529 3530 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3531 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3532 3533 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3534 3535 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3536 3537 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3538 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3539 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3540 return 0; 3541 } 3542 3543 return ir; 3544 } 3545 3546 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 3547 { 3548 struct device *dev = &qm->pdev->dev; 3549 u64 mb_cmd; 3550 u32 qos; 3551 int ret; 3552 3553 qos = qm_get_shaper_vft_qos(qm, fun_num); 3554 if (!qos) { 3555 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 3556 return; 3557 } 3558 3559 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; 3560 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); 3561 if (ret) 3562 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); 3563 } 3564 3565 static int qm_vf_read_qos(struct hisi_qm *qm) 3566 { 3567 int cnt = 0; 3568 int ret = -EINVAL; 3569 3570 /* reset mailbox qos val */ 3571 qm->mb_qos = 0; 3572 3573 /* vf ping pf to get function qos */ 3574 ret = qm_ping_pf(qm, QM_VF_GET_QOS); 3575 if (ret) { 3576 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 3577 return ret; 3578 } 3579 3580 while (true) { 3581 msleep(QM_WAIT_DST_ACK); 3582 if (qm->mb_qos) 3583 break; 3584 3585 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 3586 pci_err(qm->pdev, "PF ping VF timeout!\n"); 3587 return -ETIMEDOUT; 3588 } 3589 } 3590 3591 return ret; 3592 } 3593 3594 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 3595 size_t count, loff_t *pos) 3596 { 3597 struct hisi_qm *qm = filp->private_data; 3598 char tbuf[QM_DBG_READ_LEN]; 3599 u32 qos_val, ir; 3600 int ret; 3601 3602 ret = hisi_qm_get_dfx_access(qm); 3603 if (ret) 3604 return ret; 3605 3606 /* Mailbox and reset cannot be operated at the same time */ 3607 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3608 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 3609 ret = -EAGAIN; 3610 goto err_put_dfx_access; 3611 } 3612 3613 if (qm->fun_type == QM_HW_PF) { 3614 ir = qm_get_shaper_vft_qos(qm, 0); 3615 } else { 3616 ret = qm_vf_read_qos(qm); 3617 if (ret) 3618 goto err_get_status; 3619 ir = qm->mb_qos; 3620 } 3621 3622 qos_val = ir / QM_QOS_RATE; 3623 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 3624 3625 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 3626 3627 err_get_status: 3628 clear_bit(QM_RESETTING, &qm->misc_ctl); 3629 err_put_dfx_access: 3630 hisi_qm_put_dfx_access(qm); 3631 return ret; 3632 } 3633 3634 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, 3635 unsigned long *val, 3636 unsigned int *fun_index) 3637 { 3638 const struct bus_type *bus_type = qm->pdev->dev.bus; 3639 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 3640 char val_buf[QM_DBG_READ_LEN] = {0}; 3641 struct pci_dev *pdev; 3642 struct device *dev; 3643 int ret; 3644 3645 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); 3646 if (ret != QM_QOS_PARAM_NUM) 3647 return -EINVAL; 3648 3649 ret = kstrtoul(val_buf, 10, val); 3650 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { 3651 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 3652 return -EINVAL; 3653 } 3654 3655 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); 3656 if (!dev) { 3657 pci_err(qm->pdev, "input pci bdf number is error!\n"); 3658 return -ENODEV; 3659 } 3660 3661 pdev = container_of(dev, struct pci_dev, dev); 3662 3663 *fun_index = pdev->devfn; 3664 3665 return 0; 3666 } 3667 3668 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 3669 size_t count, loff_t *pos) 3670 { 3671 struct hisi_qm *qm = filp->private_data; 3672 char tbuf[QM_DBG_READ_LEN]; 3673 unsigned int fun_index; 3674 unsigned long val; 3675 int len, ret; 3676 3677 if (*pos != 0) 3678 return 0; 3679 3680 if (count >= QM_DBG_READ_LEN) 3681 return -ENOSPC; 3682 3683 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 3684 if (len < 0) 3685 return len; 3686 3687 tbuf[len] = '\0'; 3688 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); 3689 if (ret) 3690 return ret; 3691 3692 /* Mailbox and reset cannot be operated at the same time */ 3693 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3694 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 3695 return -EAGAIN; 3696 } 3697 3698 ret = qm_pm_get_sync(qm); 3699 if (ret) { 3700 ret = -EINVAL; 3701 goto err_get_status; 3702 } 3703 3704 ret = qm_func_shaper_enable(qm, fun_index, val); 3705 if (ret) { 3706 pci_err(qm->pdev, "failed to enable function shaper!\n"); 3707 ret = -EINVAL; 3708 goto err_put_sync; 3709 } 3710 3711 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", 3712 fun_index, val); 3713 ret = count; 3714 3715 err_put_sync: 3716 qm_pm_put_sync(qm); 3717 err_get_status: 3718 clear_bit(QM_RESETTING, &qm->misc_ctl); 3719 return ret; 3720 } 3721 3722 static const struct file_operations qm_algqos_fops = { 3723 .owner = THIS_MODULE, 3724 .open = simple_open, 3725 .read = qm_algqos_read, 3726 .write = qm_algqos_write, 3727 }; 3728 3729 /** 3730 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 3731 * @qm: The qm for which we want to add debugfs files. 3732 * 3733 * Create function qos debugfs files, VF ping PF to get function qos. 3734 */ 3735 void hisi_qm_set_algqos_init(struct hisi_qm *qm) 3736 { 3737 if (qm->fun_type == QM_HW_PF) 3738 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 3739 qm, &qm_algqos_fops); 3740 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3741 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 3742 qm, &qm_algqos_fops); 3743 } 3744 3745 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 3746 { 3747 int i; 3748 3749 for (i = 1; i <= total_func; i++) 3750 qm->factor[i].func_qos = QM_QOS_MAX_VAL; 3751 } 3752 3753 /** 3754 * hisi_qm_sriov_enable() - enable virtual functions 3755 * @pdev: the PCIe device 3756 * @max_vfs: the number of virtual functions to enable 3757 * 3758 * Returns the number of enabled VFs. If there are VFs enabled already or 3759 * max_vfs is more than the total number of device can be enabled, returns 3760 * failure. 3761 */ 3762 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 3763 { 3764 struct hisi_qm *qm = pci_get_drvdata(pdev); 3765 int pre_existing_vfs, num_vfs, total_vfs, ret; 3766 3767 ret = qm_pm_get_sync(qm); 3768 if (ret) 3769 return ret; 3770 3771 total_vfs = pci_sriov_get_totalvfs(pdev); 3772 pre_existing_vfs = pci_num_vf(pdev); 3773 if (pre_existing_vfs) { 3774 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 3775 pre_existing_vfs); 3776 goto err_put_sync; 3777 } 3778 3779 if (max_vfs > total_vfs) { 3780 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 3781 ret = -ERANGE; 3782 goto err_put_sync; 3783 } 3784 3785 num_vfs = max_vfs; 3786 3787 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3788 hisi_qm_init_vf_qos(qm, num_vfs); 3789 3790 ret = qm_vf_q_assign(qm, num_vfs); 3791 if (ret) { 3792 pci_err(pdev, "Can't assign queues for VF!\n"); 3793 goto err_put_sync; 3794 } 3795 3796 ret = pci_enable_sriov(pdev, num_vfs); 3797 if (ret) { 3798 pci_err(pdev, "Can't enable VF!\n"); 3799 qm_clear_vft_config(qm); 3800 goto err_put_sync; 3801 } 3802 qm->vfs_num = num_vfs; 3803 3804 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 3805 3806 return num_vfs; 3807 3808 err_put_sync: 3809 qm_pm_put_sync(qm); 3810 return ret; 3811 } 3812 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 3813 3814 /** 3815 * hisi_qm_sriov_disable - disable virtual functions 3816 * @pdev: the PCI device. 3817 * @is_frozen: true when all the VFs are frozen. 3818 * 3819 * Return failure if there are VFs assigned already or VF is in used. 3820 */ 3821 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 3822 { 3823 struct hisi_qm *qm = pci_get_drvdata(pdev); 3824 3825 if (pci_vfs_assigned(pdev)) { 3826 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 3827 return -EPERM; 3828 } 3829 3830 /* While VF is in used, SRIOV cannot be disabled. */ 3831 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 3832 pci_err(pdev, "Task is using its VF!\n"); 3833 return -EBUSY; 3834 } 3835 3836 pci_disable_sriov(pdev); 3837 3838 qm->vfs_num = 0; 3839 qm_pm_put_sync(qm); 3840 3841 return qm_clear_vft_config(qm); 3842 } 3843 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 3844 3845 /** 3846 * hisi_qm_sriov_configure - configure the number of VFs 3847 * @pdev: The PCI device 3848 * @num_vfs: The number of VFs need enabled 3849 * 3850 * Enable SR-IOV according to num_vfs, 0 means disable. 3851 */ 3852 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 3853 { 3854 if (num_vfs == 0) 3855 return hisi_qm_sriov_disable(pdev, false); 3856 else 3857 return hisi_qm_sriov_enable(pdev, num_vfs); 3858 } 3859 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 3860 3861 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 3862 { 3863 u32 err_sts; 3864 3865 if (!qm->err_ini->get_dev_hw_err_status) { 3866 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); 3867 return ACC_ERR_NONE; 3868 } 3869 3870 /* get device hardware error status */ 3871 err_sts = qm->err_ini->get_dev_hw_err_status(qm); 3872 if (err_sts) { 3873 if (err_sts & qm->err_info.ecc_2bits_mask) 3874 qm->err_status.is_dev_ecc_mbit = true; 3875 3876 if (qm->err_ini->log_dev_hw_err) 3877 qm->err_ini->log_dev_hw_err(qm, err_sts); 3878 3879 if (err_sts & qm->err_info.dev_reset_mask) 3880 return ACC_ERR_NEED_RESET; 3881 3882 if (qm->err_ini->clear_dev_hw_err_status) 3883 qm->err_ini->clear_dev_hw_err_status(qm, err_sts); 3884 } 3885 3886 return ACC_ERR_RECOVERED; 3887 } 3888 3889 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 3890 { 3891 enum acc_err_result qm_ret, dev_ret; 3892 3893 /* log qm error */ 3894 qm_ret = qm_hw_error_handle(qm); 3895 3896 /* log device error */ 3897 dev_ret = qm_dev_err_handle(qm); 3898 3899 return (qm_ret == ACC_ERR_NEED_RESET || 3900 dev_ret == ACC_ERR_NEED_RESET) ? 3901 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 3902 } 3903 3904 /** 3905 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 3906 * @pdev: The PCI device which need report error. 3907 * @state: The connectivity between CPU and device. 3908 * 3909 * We register this function into PCIe AER handlers, It will report device or 3910 * qm hardware error status when error occur. 3911 */ 3912 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 3913 pci_channel_state_t state) 3914 { 3915 struct hisi_qm *qm = pci_get_drvdata(pdev); 3916 enum acc_err_result ret; 3917 3918 if (pdev->is_virtfn) 3919 return PCI_ERS_RESULT_NONE; 3920 3921 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 3922 if (state == pci_channel_io_perm_failure) 3923 return PCI_ERS_RESULT_DISCONNECT; 3924 3925 ret = qm_process_dev_error(qm); 3926 if (ret == ACC_ERR_NEED_RESET) 3927 return PCI_ERS_RESULT_NEED_RESET; 3928 3929 return PCI_ERS_RESULT_RECOVERED; 3930 } 3931 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 3932 3933 static int qm_check_req_recv(struct hisi_qm *qm) 3934 { 3935 struct pci_dev *pdev = qm->pdev; 3936 int ret; 3937 u32 val; 3938 3939 if (qm->ver >= QM_HW_V3) 3940 return 0; 3941 3942 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 3943 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3944 (val == ACC_VENDOR_ID_VALUE), 3945 POLL_PERIOD, POLL_TIMEOUT); 3946 if (ret) { 3947 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 3948 return ret; 3949 } 3950 3951 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 3952 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3953 (val == PCI_VENDOR_ID_HUAWEI), 3954 POLL_PERIOD, POLL_TIMEOUT); 3955 if (ret) 3956 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 3957 3958 return ret; 3959 } 3960 3961 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 3962 { 3963 struct pci_dev *pdev = qm->pdev; 3964 u16 cmd; 3965 int i; 3966 3967 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 3968 if (set) 3969 cmd |= PCI_COMMAND_MEMORY; 3970 else 3971 cmd &= ~PCI_COMMAND_MEMORY; 3972 3973 pci_write_config_word(pdev, PCI_COMMAND, cmd); 3974 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 3975 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 3976 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 3977 return 0; 3978 3979 udelay(1); 3980 } 3981 3982 return -ETIMEDOUT; 3983 } 3984 3985 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 3986 { 3987 struct pci_dev *pdev = qm->pdev; 3988 u16 sriov_ctrl; 3989 int pos; 3990 int i; 3991 3992 /* 3993 * Since function qm_set_vf_mse is called only after SRIOV is enabled, 3994 * pci_find_ext_capability cannot return 0, pos does not need to be 3995 * checked. 3996 */ 3997 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 3998 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 3999 if (set) 4000 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 4001 else 4002 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 4003 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 4004 4005 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4006 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4007 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 4008 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 4009 return 0; 4010 4011 udelay(1); 4012 } 4013 4014 return -ETIMEDOUT; 4015 } 4016 4017 static int qm_vf_reset_prepare(struct hisi_qm *qm, 4018 enum qm_stop_reason stop_reason) 4019 { 4020 struct hisi_qm_list *qm_list = qm->qm_list; 4021 struct pci_dev *pdev = qm->pdev; 4022 struct pci_dev *virtfn; 4023 struct hisi_qm *vf_qm; 4024 int ret = 0; 4025 4026 mutex_lock(&qm_list->lock); 4027 list_for_each_entry(vf_qm, &qm_list->list, list) { 4028 virtfn = vf_qm->pdev; 4029 if (virtfn == pdev) 4030 continue; 4031 4032 if (pci_physfn(virtfn) == pdev) { 4033 /* save VFs PCIE BAR configuration */ 4034 pci_save_state(virtfn); 4035 4036 ret = hisi_qm_stop(vf_qm, stop_reason); 4037 if (ret) 4038 goto stop_fail; 4039 } 4040 } 4041 4042 stop_fail: 4043 mutex_unlock(&qm_list->lock); 4044 return ret; 4045 } 4046 4047 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, 4048 enum qm_stop_reason stop_reason) 4049 { 4050 struct pci_dev *pdev = qm->pdev; 4051 int ret; 4052 4053 if (!qm->vfs_num) 4054 return 0; 4055 4056 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 4057 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4058 ret = qm_ping_all_vfs(qm, cmd); 4059 if (ret) 4060 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 4061 } else { 4062 ret = qm_vf_reset_prepare(qm, stop_reason); 4063 if (ret) 4064 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4065 } 4066 4067 return ret; 4068 } 4069 4070 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4071 { 4072 struct pci_dev *pdev = qm->pdev; 4073 int ret; 4074 4075 ret = qm_reset_prepare_ready(qm); 4076 if (ret) { 4077 pci_err(pdev, "Controller reset not ready!\n"); 4078 return ret; 4079 } 4080 4081 /* PF obtains the information of VF by querying the register. */ 4082 qm_cmd_uninit(qm); 4083 4084 /* Whether VFs stop successfully, soft reset will continue. */ 4085 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4086 if (ret) 4087 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4088 4089 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4090 if (ret) { 4091 pci_err(pdev, "Fails to stop QM!\n"); 4092 qm_reset_bit_clear(qm); 4093 return ret; 4094 } 4095 4096 if (qm->use_sva) { 4097 ret = qm_hw_err_isolate(qm); 4098 if (ret) 4099 pci_err(pdev, "failed to isolate hw err!\n"); 4100 } 4101 4102 ret = qm_wait_vf_prepare_finish(qm); 4103 if (ret) 4104 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4105 4106 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4107 4108 return 0; 4109 } 4110 4111 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4112 { 4113 u32 nfe_enb = 0; 4114 4115 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4116 if (qm->ver >= QM_HW_V3) 4117 return; 4118 4119 if (!qm->err_status.is_dev_ecc_mbit && 4120 qm->err_status.is_qm_ecc_mbit && 4121 qm->err_ini->close_axi_master_ooo) { 4122 qm->err_ini->close_axi_master_ooo(qm); 4123 } else if (qm->err_status.is_dev_ecc_mbit && 4124 !qm->err_status.is_qm_ecc_mbit && 4125 !qm->err_ini->close_axi_master_ooo) { 4126 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4127 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, 4128 qm->io_base + QM_RAS_NFE_ENABLE); 4129 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); 4130 } 4131 } 4132 4133 static int qm_soft_reset(struct hisi_qm *qm) 4134 { 4135 struct pci_dev *pdev = qm->pdev; 4136 int ret; 4137 u32 val; 4138 4139 /* Ensure all doorbells and mailboxes received by QM */ 4140 ret = qm_check_req_recv(qm); 4141 if (ret) 4142 return ret; 4143 4144 if (qm->vfs_num) { 4145 ret = qm_set_vf_mse(qm, false); 4146 if (ret) { 4147 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4148 return ret; 4149 } 4150 } 4151 4152 ret = qm->ops->set_msi(qm, false); 4153 if (ret) { 4154 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4155 return ret; 4156 } 4157 4158 qm_dev_ecc_mbit_handle(qm); 4159 4160 /* OOO register set and check */ 4161 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 4162 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4163 4164 /* If bus lock, reset chip */ 4165 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4166 val, 4167 (val == ACC_MASTER_TRANS_RETURN_RW), 4168 POLL_PERIOD, POLL_TIMEOUT); 4169 if (ret) { 4170 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 4171 return ret; 4172 } 4173 4174 if (qm->err_ini->close_sva_prefetch) 4175 qm->err_ini->close_sva_prefetch(qm); 4176 4177 ret = qm_set_pf_mse(qm, false); 4178 if (ret) { 4179 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4180 return ret; 4181 } 4182 4183 /* The reset related sub-control registers are not in PCI BAR */ 4184 if (ACPI_HANDLE(&pdev->dev)) { 4185 unsigned long long value = 0; 4186 acpi_status s; 4187 4188 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4189 qm->err_info.acpi_rst, 4190 NULL, &value); 4191 if (ACPI_FAILURE(s)) { 4192 pci_err(pdev, "NO controller reset method!\n"); 4193 return -EIO; 4194 } 4195 4196 if (value) { 4197 pci_err(pdev, "Reset step %llu failed!\n", value); 4198 return -EIO; 4199 } 4200 } else { 4201 pci_err(pdev, "No reset method!\n"); 4202 return -EINVAL; 4203 } 4204 4205 return 0; 4206 } 4207 4208 static int qm_vf_reset_done(struct hisi_qm *qm) 4209 { 4210 struct hisi_qm_list *qm_list = qm->qm_list; 4211 struct pci_dev *pdev = qm->pdev; 4212 struct pci_dev *virtfn; 4213 struct hisi_qm *vf_qm; 4214 int ret = 0; 4215 4216 mutex_lock(&qm_list->lock); 4217 list_for_each_entry(vf_qm, &qm_list->list, list) { 4218 virtfn = vf_qm->pdev; 4219 if (virtfn == pdev) 4220 continue; 4221 4222 if (pci_physfn(virtfn) == pdev) { 4223 /* enable VFs PCIE BAR configuration */ 4224 pci_restore_state(virtfn); 4225 4226 ret = qm_restart(vf_qm); 4227 if (ret) 4228 goto restart_fail; 4229 } 4230 } 4231 4232 restart_fail: 4233 mutex_unlock(&qm_list->lock); 4234 return ret; 4235 } 4236 4237 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) 4238 { 4239 struct pci_dev *pdev = qm->pdev; 4240 int ret; 4241 4242 if (!qm->vfs_num) 4243 return 0; 4244 4245 ret = qm_vf_q_assign(qm, qm->vfs_num); 4246 if (ret) { 4247 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4248 return ret; 4249 } 4250 4251 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4252 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4253 ret = qm_ping_all_vfs(qm, cmd); 4254 if (ret) 4255 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4256 } else { 4257 ret = qm_vf_reset_done(qm); 4258 if (ret) 4259 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4260 } 4261 4262 return ret; 4263 } 4264 4265 static int qm_dev_hw_init(struct hisi_qm *qm) 4266 { 4267 return qm->err_ini->hw_init(qm); 4268 } 4269 4270 static void qm_restart_prepare(struct hisi_qm *qm) 4271 { 4272 u32 value; 4273 4274 if (qm->err_ini->open_sva_prefetch) 4275 qm->err_ini->open_sva_prefetch(qm); 4276 4277 if (qm->ver >= QM_HW_V3) 4278 return; 4279 4280 if (!qm->err_status.is_qm_ecc_mbit && 4281 !qm->err_status.is_dev_ecc_mbit) 4282 return; 4283 4284 /* temporarily close the OOO port used for PEH to write out MSI */ 4285 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4286 writel(value & ~qm->err_info.msi_wr_port, 4287 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4288 4289 /* clear dev ecc 2bit error source if having */ 4290 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; 4291 if (value && qm->err_ini->clear_dev_hw_err_status) 4292 qm->err_ini->clear_dev_hw_err_status(qm, value); 4293 4294 /* clear QM ecc mbit error source */ 4295 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4296 4297 /* clear AM Reorder Buffer ecc mbit source */ 4298 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4299 } 4300 4301 static void qm_restart_done(struct hisi_qm *qm) 4302 { 4303 u32 value; 4304 4305 if (qm->ver >= QM_HW_V3) 4306 goto clear_flags; 4307 4308 if (!qm->err_status.is_qm_ecc_mbit && 4309 !qm->err_status.is_dev_ecc_mbit) 4310 return; 4311 4312 /* open the OOO port for PEH to write out MSI */ 4313 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4314 value |= qm->err_info.msi_wr_port; 4315 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4316 4317 clear_flags: 4318 qm->err_status.is_qm_ecc_mbit = false; 4319 qm->err_status.is_dev_ecc_mbit = false; 4320 } 4321 4322 static int qm_controller_reset_done(struct hisi_qm *qm) 4323 { 4324 struct pci_dev *pdev = qm->pdev; 4325 int ret; 4326 4327 ret = qm->ops->set_msi(qm, true); 4328 if (ret) { 4329 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4330 return ret; 4331 } 4332 4333 ret = qm_set_pf_mse(qm, true); 4334 if (ret) { 4335 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4336 return ret; 4337 } 4338 4339 if (qm->vfs_num) { 4340 ret = qm_set_vf_mse(qm, true); 4341 if (ret) { 4342 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4343 return ret; 4344 } 4345 } 4346 4347 ret = qm_dev_hw_init(qm); 4348 if (ret) { 4349 pci_err(pdev, "Failed to init device\n"); 4350 return ret; 4351 } 4352 4353 qm_restart_prepare(qm); 4354 hisi_qm_dev_err_init(qm); 4355 if (qm->err_ini->open_axi_master_ooo) 4356 qm->err_ini->open_axi_master_ooo(qm); 4357 4358 ret = qm_dev_mem_reset(qm); 4359 if (ret) { 4360 pci_err(pdev, "failed to reset device memory\n"); 4361 return ret; 4362 } 4363 4364 ret = qm_restart(qm); 4365 if (ret) { 4366 pci_err(pdev, "Failed to start QM!\n"); 4367 return ret; 4368 } 4369 4370 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4371 if (ret) 4372 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4373 4374 ret = qm_wait_vf_prepare_finish(qm); 4375 if (ret) 4376 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4377 4378 qm_cmd_init(qm); 4379 qm_restart_done(qm); 4380 4381 qm_reset_bit_clear(qm); 4382 4383 return 0; 4384 } 4385 4386 static int qm_controller_reset(struct hisi_qm *qm) 4387 { 4388 struct pci_dev *pdev = qm->pdev; 4389 int ret; 4390 4391 pci_info(pdev, "Controller resetting...\n"); 4392 4393 ret = qm_controller_reset_prepare(qm); 4394 if (ret) { 4395 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4396 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4397 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4398 return ret; 4399 } 4400 4401 hisi_qm_show_last_dfx_regs(qm); 4402 if (qm->err_ini->show_last_dfx_regs) 4403 qm->err_ini->show_last_dfx_regs(qm); 4404 4405 ret = qm_soft_reset(qm); 4406 if (ret) 4407 goto err_reset; 4408 4409 ret = qm_controller_reset_done(qm); 4410 if (ret) 4411 goto err_reset; 4412 4413 pci_info(pdev, "Controller reset complete\n"); 4414 4415 return 0; 4416 4417 err_reset: 4418 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4419 qm_reset_bit_clear(qm); 4420 4421 /* if resetting fails, isolate the device */ 4422 if (qm->use_sva) 4423 qm->isolate_data.is_isolate = true; 4424 return ret; 4425 } 4426 4427 /** 4428 * hisi_qm_dev_slot_reset() - slot reset 4429 * @pdev: the PCIe device 4430 * 4431 * This function offers QM relate PCIe device reset interface. Drivers which 4432 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4433 */ 4434 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4435 { 4436 struct hisi_qm *qm = pci_get_drvdata(pdev); 4437 int ret; 4438 4439 if (pdev->is_virtfn) 4440 return PCI_ERS_RESULT_RECOVERED; 4441 4442 /* reset pcie device controller */ 4443 ret = qm_controller_reset(qm); 4444 if (ret) { 4445 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4446 return PCI_ERS_RESULT_DISCONNECT; 4447 } 4448 4449 return PCI_ERS_RESULT_RECOVERED; 4450 } 4451 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4452 4453 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4454 { 4455 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4456 struct hisi_qm *qm = pci_get_drvdata(pdev); 4457 u32 delay = 0; 4458 int ret; 4459 4460 hisi_qm_dev_err_uninit(pf_qm); 4461 4462 /* 4463 * Check whether there is an ECC mbit error, If it occurs, need to 4464 * wait for soft reset to fix it. 4465 */ 4466 while (qm_check_dev_error(pf_qm)) { 4467 msleep(++delay); 4468 if (delay > QM_RESET_WAIT_TIMEOUT) 4469 return; 4470 } 4471 4472 ret = qm_reset_prepare_ready(qm); 4473 if (ret) { 4474 pci_err(pdev, "FLR not ready!\n"); 4475 return; 4476 } 4477 4478 /* PF obtains the information of VF by querying the register. */ 4479 if (qm->fun_type == QM_HW_PF) 4480 qm_cmd_uninit(qm); 4481 4482 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); 4483 if (ret) 4484 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 4485 4486 ret = hisi_qm_stop(qm, QM_DOWN); 4487 if (ret) { 4488 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 4489 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4490 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4491 return; 4492 } 4493 4494 ret = qm_wait_vf_prepare_finish(qm); 4495 if (ret) 4496 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 4497 4498 pci_info(pdev, "FLR resetting...\n"); 4499 } 4500 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 4501 4502 static bool qm_flr_reset_complete(struct pci_dev *pdev) 4503 { 4504 struct pci_dev *pf_pdev = pci_physfn(pdev); 4505 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 4506 u32 id; 4507 4508 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 4509 if (id == QM_PCI_COMMAND_INVALID) { 4510 pci_err(pdev, "Device can not be used!\n"); 4511 return false; 4512 } 4513 4514 return true; 4515 } 4516 4517 void hisi_qm_reset_done(struct pci_dev *pdev) 4518 { 4519 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4520 struct hisi_qm *qm = pci_get_drvdata(pdev); 4521 int ret; 4522 4523 if (qm->fun_type == QM_HW_PF) { 4524 ret = qm_dev_hw_init(qm); 4525 if (ret) { 4526 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 4527 goto flr_done; 4528 } 4529 } 4530 4531 hisi_qm_dev_err_init(pf_qm); 4532 4533 ret = qm_restart(qm); 4534 if (ret) { 4535 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 4536 goto flr_done; 4537 } 4538 4539 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4540 if (ret) 4541 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 4542 4543 ret = qm_wait_vf_prepare_finish(qm); 4544 if (ret) 4545 pci_err(pdev, "failed to start by vfs in FLR!\n"); 4546 4547 flr_done: 4548 if (qm->fun_type == QM_HW_PF) 4549 qm_cmd_init(qm); 4550 4551 if (qm_flr_reset_complete(pdev)) 4552 pci_info(pdev, "FLR reset complete\n"); 4553 4554 qm_reset_bit_clear(qm); 4555 } 4556 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 4557 4558 static irqreturn_t qm_abnormal_irq(int irq, void *data) 4559 { 4560 struct hisi_qm *qm = data; 4561 enum acc_err_result ret; 4562 4563 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 4564 ret = qm_process_dev_error(qm); 4565 if (ret == ACC_ERR_NEED_RESET && 4566 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 4567 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 4568 schedule_work(&qm->rst_work); 4569 4570 return IRQ_HANDLED; 4571 } 4572 4573 /** 4574 * hisi_qm_dev_shutdown() - Shutdown device. 4575 * @pdev: The device will be shutdown. 4576 * 4577 * This function will stop qm when OS shutdown or rebooting. 4578 */ 4579 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 4580 { 4581 struct hisi_qm *qm = pci_get_drvdata(pdev); 4582 int ret; 4583 4584 ret = hisi_qm_stop(qm, QM_DOWN); 4585 if (ret) 4586 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 4587 4588 hisi_qm_cache_wb(qm); 4589 } 4590 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 4591 4592 static void hisi_qm_controller_reset(struct work_struct *rst_work) 4593 { 4594 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 4595 int ret; 4596 4597 ret = qm_pm_get_sync(qm); 4598 if (ret) { 4599 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4600 return; 4601 } 4602 4603 /* reset pcie device controller */ 4604 ret = qm_controller_reset(qm); 4605 if (ret) 4606 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 4607 4608 qm_pm_put_sync(qm); 4609 } 4610 4611 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 4612 enum qm_stop_reason stop_reason) 4613 { 4614 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; 4615 struct pci_dev *pdev = qm->pdev; 4616 int ret; 4617 4618 ret = qm_reset_prepare_ready(qm); 4619 if (ret) { 4620 dev_err(&pdev->dev, "reset prepare not ready!\n"); 4621 atomic_set(&qm->status.flags, QM_STOP); 4622 cmd = QM_VF_PREPARE_FAIL; 4623 goto err_prepare; 4624 } 4625 4626 ret = hisi_qm_stop(qm, stop_reason); 4627 if (ret) { 4628 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 4629 atomic_set(&qm->status.flags, QM_STOP); 4630 cmd = QM_VF_PREPARE_FAIL; 4631 goto err_prepare; 4632 } else { 4633 goto out; 4634 } 4635 4636 err_prepare: 4637 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4638 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4639 out: 4640 pci_save_state(pdev); 4641 ret = qm_ping_pf(qm, cmd); 4642 if (ret) 4643 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 4644 } 4645 4646 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 4647 { 4648 enum qm_mb_cmd cmd = QM_VF_START_DONE; 4649 struct pci_dev *pdev = qm->pdev; 4650 int ret; 4651 4652 pci_restore_state(pdev); 4653 ret = hisi_qm_start(qm); 4654 if (ret) { 4655 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 4656 cmd = QM_VF_START_FAIL; 4657 } 4658 4659 qm_cmd_init(qm); 4660 ret = qm_ping_pf(qm, cmd); 4661 if (ret) 4662 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 4663 4664 qm_reset_bit_clear(qm); 4665 } 4666 4667 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 4668 { 4669 struct device *dev = &qm->pdev->dev; 4670 u32 val, cmd; 4671 u64 msg; 4672 int ret; 4673 4674 /* Wait for reset to finish */ 4675 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 4676 val == BIT(0), QM_VF_RESET_WAIT_US, 4677 QM_VF_RESET_WAIT_TIMEOUT_US); 4678 /* hardware completion status should be available by this time */ 4679 if (ret) { 4680 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 4681 return -ETIMEDOUT; 4682 } 4683 4684 /* 4685 * Whether message is got successfully, 4686 * VF needs to ack PF by clearing the interrupt. 4687 */ 4688 ret = qm_get_mb_cmd(qm, &msg, 0); 4689 qm_clear_cmd_interrupt(qm, 0); 4690 if (ret) { 4691 dev_err(dev, "failed to get msg from PF in reset done!\n"); 4692 return ret; 4693 } 4694 4695 cmd = msg & QM_MB_CMD_DATA_MASK; 4696 if (cmd != QM_PF_RESET_DONE) { 4697 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); 4698 ret = -EINVAL; 4699 } 4700 4701 return ret; 4702 } 4703 4704 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 4705 enum qm_stop_reason stop_reason) 4706 { 4707 struct device *dev = &qm->pdev->dev; 4708 int ret; 4709 4710 dev_info(dev, "device reset start...\n"); 4711 4712 /* The message is obtained by querying the register during resetting */ 4713 qm_cmd_uninit(qm); 4714 qm_pf_reset_vf_prepare(qm, stop_reason); 4715 4716 ret = qm_wait_pf_reset_finish(qm); 4717 if (ret) 4718 goto err_get_status; 4719 4720 qm_pf_reset_vf_done(qm); 4721 4722 dev_info(dev, "device reset done.\n"); 4723 4724 return; 4725 4726 err_get_status: 4727 qm_cmd_init(qm); 4728 qm_reset_bit_clear(qm); 4729 } 4730 4731 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 4732 { 4733 struct device *dev = &qm->pdev->dev; 4734 u64 msg; 4735 u32 cmd; 4736 int ret; 4737 4738 /* 4739 * Get the msg from source by sending mailbox. Whether message is got 4740 * successfully, destination needs to ack source by clearing the interrupt. 4741 */ 4742 ret = qm_get_mb_cmd(qm, &msg, fun_num); 4743 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 4744 if (ret) { 4745 dev_err(dev, "failed to get msg from source!\n"); 4746 return; 4747 } 4748 4749 cmd = msg & QM_MB_CMD_DATA_MASK; 4750 switch (cmd) { 4751 case QM_PF_FLR_PREPARE: 4752 qm_pf_reset_vf_process(qm, QM_DOWN); 4753 break; 4754 case QM_PF_SRST_PREPARE: 4755 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 4756 break; 4757 case QM_VF_GET_QOS: 4758 qm_vf_get_qos(qm, fun_num); 4759 break; 4760 case QM_PF_SET_QOS: 4761 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; 4762 break; 4763 default: 4764 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); 4765 break; 4766 } 4767 } 4768 4769 static void qm_cmd_process(struct work_struct *cmd_process) 4770 { 4771 struct hisi_qm *qm = container_of(cmd_process, 4772 struct hisi_qm, cmd_process); 4773 u32 vfs_num = qm->vfs_num; 4774 u64 val; 4775 u32 i; 4776 4777 if (qm->fun_type == QM_HW_PF) { 4778 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 4779 if (!val) 4780 return; 4781 4782 for (i = 1; i <= vfs_num; i++) { 4783 if (val & BIT(i)) 4784 qm_handle_cmd_msg(qm, i); 4785 } 4786 4787 return; 4788 } 4789 4790 qm_handle_cmd_msg(qm, 0); 4791 } 4792 4793 /** 4794 * hisi_qm_alg_register() - Register alg to crypto. 4795 * @qm: The qm needs add. 4796 * @qm_list: The qm list. 4797 * @guard: Guard of qp_num. 4798 * 4799 * Register algorithm to crypto when the function is satisfy guard. 4800 */ 4801 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 4802 { 4803 struct device *dev = &qm->pdev->dev; 4804 4805 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 4806 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 4807 return 0; 4808 } 4809 4810 if (qm->qp_num < guard) { 4811 dev_info(dev, "qp_num is less than task need.\n"); 4812 return 0; 4813 } 4814 4815 return qm_list->register_to_crypto(qm); 4816 } 4817 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 4818 4819 /** 4820 * hisi_qm_alg_unregister() - Unregister alg from crypto. 4821 * @qm: The qm needs delete. 4822 * @qm_list: The qm list. 4823 * @guard: Guard of qp_num. 4824 * 4825 * Unregister algorithm from crypto when the last function is satisfy guard. 4826 */ 4827 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 4828 { 4829 if (qm->ver <= QM_HW_V2 && qm->use_sva) 4830 return; 4831 4832 if (qm->qp_num < guard) 4833 return; 4834 4835 qm_list->unregister_from_crypto(qm); 4836 } 4837 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 4838 4839 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 4840 { 4841 struct pci_dev *pdev = qm->pdev; 4842 u32 irq_vector, val; 4843 4844 if (qm->fun_type == QM_HW_VF) 4845 return; 4846 4847 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; 4848 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4849 return; 4850 4851 irq_vector = val & QM_IRQ_VECTOR_MASK; 4852 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4853 } 4854 4855 static int qm_register_abnormal_irq(struct hisi_qm *qm) 4856 { 4857 struct pci_dev *pdev = qm->pdev; 4858 u32 irq_vector, val; 4859 int ret; 4860 4861 if (qm->fun_type == QM_HW_VF) 4862 return 0; 4863 4864 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; 4865 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4866 return 0; 4867 4868 irq_vector = val & QM_IRQ_VECTOR_MASK; 4869 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 4870 if (ret) 4871 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); 4872 4873 return ret; 4874 } 4875 4876 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 4877 { 4878 struct pci_dev *pdev = qm->pdev; 4879 u32 irq_vector, val; 4880 4881 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; 4882 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4883 return; 4884 4885 irq_vector = val & QM_IRQ_VECTOR_MASK; 4886 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4887 } 4888 4889 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 4890 { 4891 struct pci_dev *pdev = qm->pdev; 4892 u32 irq_vector, val; 4893 int ret; 4894 4895 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; 4896 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4897 return 0; 4898 4899 irq_vector = val & QM_IRQ_VECTOR_MASK; 4900 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 4901 if (ret) 4902 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 4903 4904 return ret; 4905 } 4906 4907 static void qm_unregister_aeq_irq(struct hisi_qm *qm) 4908 { 4909 struct pci_dev *pdev = qm->pdev; 4910 u32 irq_vector, val; 4911 4912 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; 4913 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4914 return; 4915 4916 irq_vector = val & QM_IRQ_VECTOR_MASK; 4917 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4918 } 4919 4920 static int qm_register_aeq_irq(struct hisi_qm *qm) 4921 { 4922 struct pci_dev *pdev = qm->pdev; 4923 u32 irq_vector, val; 4924 int ret; 4925 4926 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; 4927 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4928 return 0; 4929 4930 irq_vector = val & QM_IRQ_VECTOR_MASK; 4931 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, 4932 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); 4933 if (ret) 4934 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4935 4936 return ret; 4937 } 4938 4939 static void qm_unregister_eq_irq(struct hisi_qm *qm) 4940 { 4941 struct pci_dev *pdev = qm->pdev; 4942 u32 irq_vector, val; 4943 4944 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; 4945 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4946 return; 4947 4948 irq_vector = val & QM_IRQ_VECTOR_MASK; 4949 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4950 } 4951 4952 static int qm_register_eq_irq(struct hisi_qm *qm) 4953 { 4954 struct pci_dev *pdev = qm->pdev; 4955 u32 irq_vector, val; 4956 int ret; 4957 4958 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; 4959 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4960 return 0; 4961 4962 irq_vector = val & QM_IRQ_VECTOR_MASK; 4963 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); 4964 if (ret) 4965 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4966 4967 return ret; 4968 } 4969 4970 static void qm_irqs_unregister(struct hisi_qm *qm) 4971 { 4972 qm_unregister_mb_cmd_irq(qm); 4973 qm_unregister_abnormal_irq(qm); 4974 qm_unregister_aeq_irq(qm); 4975 qm_unregister_eq_irq(qm); 4976 } 4977 4978 static int qm_irqs_register(struct hisi_qm *qm) 4979 { 4980 int ret; 4981 4982 ret = qm_register_eq_irq(qm); 4983 if (ret) 4984 return ret; 4985 4986 ret = qm_register_aeq_irq(qm); 4987 if (ret) 4988 goto free_eq_irq; 4989 4990 ret = qm_register_abnormal_irq(qm); 4991 if (ret) 4992 goto free_aeq_irq; 4993 4994 ret = qm_register_mb_cmd_irq(qm); 4995 if (ret) 4996 goto free_abnormal_irq; 4997 4998 return 0; 4999 5000 free_abnormal_irq: 5001 qm_unregister_abnormal_irq(qm); 5002 free_aeq_irq: 5003 qm_unregister_aeq_irq(qm); 5004 free_eq_irq: 5005 qm_unregister_eq_irq(qm); 5006 return ret; 5007 } 5008 5009 static int qm_get_qp_num(struct hisi_qm *qm) 5010 { 5011 struct device *dev = &qm->pdev->dev; 5012 bool is_db_isolation; 5013 5014 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 5015 if (qm->fun_type == QM_HW_VF) { 5016 if (qm->ver != QM_HW_V1) 5017 /* v2 starts to support get vft by mailbox */ 5018 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 5019 5020 return 0; 5021 } 5022 5023 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5024 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 5025 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 5026 QM_FUNC_MAX_QP_CAP, is_db_isolation); 5027 5028 if (qm->qp_num <= qm->max_qp_num) 5029 return 0; 5030 5031 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { 5032 /* Check whether the set qp number is valid */ 5033 dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n", 5034 qm->qp_num, qm->max_qp_num); 5035 return -EINVAL; 5036 } 5037 5038 dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n", 5039 qm->qp_num, qm->max_qp_num); 5040 qm->qp_num = qm->max_qp_num; 5041 qm->debug.curr_qm_qp_num = qm->qp_num; 5042 5043 return 0; 5044 } 5045 5046 static int qm_pre_store_irq_type_caps(struct hisi_qm *qm) 5047 { 5048 struct hisi_qm_cap_record *qm_cap; 5049 struct pci_dev *pdev = qm->pdev; 5050 size_t i, size; 5051 5052 size = ARRAY_SIZE(qm_pre_store_caps); 5053 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); 5054 if (!qm_cap) 5055 return -ENOMEM; 5056 5057 for (i = 0; i < size; i++) { 5058 qm_cap[i].type = qm_pre_store_caps[i]; 5059 qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info, 5060 qm_pre_store_caps[i], qm->cap_ver); 5061 } 5062 5063 qm->cap_tables.qm_cap_table = qm_cap; 5064 5065 return 0; 5066 } 5067 5068 static int qm_get_hw_caps(struct hisi_qm *qm) 5069 { 5070 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 5071 qm_cap_info_pf : qm_cap_info_vf; 5072 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 5073 ARRAY_SIZE(qm_cap_info_vf); 5074 u32 val, i; 5075 5076 /* Doorbell isolate register is a independent register. */ 5077 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 5078 if (val) 5079 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5080 5081 if (qm->ver >= QM_HW_V3) { 5082 val = readl(qm->io_base + QM_FUNC_CAPS_REG); 5083 qm->cap_ver = val & QM_CAPBILITY_VERSION; 5084 } 5085 5086 /* Get PF/VF common capbility */ 5087 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5088 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5089 if (val) 5090 set_bit(qm_cap_info_comm[i].type, &qm->caps); 5091 } 5092 5093 /* Get PF/VF different capbility */ 5094 for (i = 0; i < size; i++) { 5095 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5096 if (val) 5097 set_bit(cap_info[i].type, &qm->caps); 5098 } 5099 5100 /* Fetch and save the value of irq type related capability registers */ 5101 return qm_pre_store_irq_type_caps(qm); 5102 } 5103 5104 static int qm_get_pci_res(struct hisi_qm *qm) 5105 { 5106 struct pci_dev *pdev = qm->pdev; 5107 struct device *dev = &pdev->dev; 5108 int ret; 5109 5110 ret = pci_request_mem_regions(pdev, qm->dev_name); 5111 if (ret < 0) { 5112 dev_err(dev, "Failed to request mem regions!\n"); 5113 return ret; 5114 } 5115 5116 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5117 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5118 if (!qm->io_base) { 5119 ret = -EIO; 5120 goto err_request_mem_regions; 5121 } 5122 5123 ret = qm_get_hw_caps(qm); 5124 if (ret) 5125 goto err_ioremap; 5126 5127 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 5128 qm->db_interval = QM_QP_DB_INTERVAL; 5129 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5130 qm->db_io_base = ioremap(qm->db_phys_base, 5131 pci_resource_len(pdev, PCI_BAR_4)); 5132 if (!qm->db_io_base) { 5133 ret = -EIO; 5134 goto err_ioremap; 5135 } 5136 } else { 5137 qm->db_phys_base = qm->phys_base; 5138 qm->db_io_base = qm->io_base; 5139 qm->db_interval = 0; 5140 } 5141 5142 ret = qm_get_qp_num(qm); 5143 if (ret) 5144 goto err_db_ioremap; 5145 5146 return 0; 5147 5148 err_db_ioremap: 5149 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 5150 iounmap(qm->db_io_base); 5151 err_ioremap: 5152 iounmap(qm->io_base); 5153 err_request_mem_regions: 5154 pci_release_mem_regions(pdev); 5155 return ret; 5156 } 5157 5158 static int hisi_qm_pci_init(struct hisi_qm *qm) 5159 { 5160 struct pci_dev *pdev = qm->pdev; 5161 struct device *dev = &pdev->dev; 5162 unsigned int num_vec; 5163 int ret; 5164 5165 ret = pci_enable_device_mem(pdev); 5166 if (ret < 0) { 5167 dev_err(dev, "Failed to enable device mem!\n"); 5168 return ret; 5169 } 5170 5171 ret = qm_get_pci_res(qm); 5172 if (ret) 5173 goto err_disable_pcidev; 5174 5175 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5176 if (ret < 0) 5177 goto err_get_pci_res; 5178 pci_set_master(pdev); 5179 5180 num_vec = qm_get_irq_num(qm); 5181 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5182 if (ret < 0) { 5183 dev_err(dev, "Failed to enable MSI vectors!\n"); 5184 goto err_get_pci_res; 5185 } 5186 5187 return 0; 5188 5189 err_get_pci_res: 5190 qm_put_pci_res(qm); 5191 err_disable_pcidev: 5192 pci_disable_device(pdev); 5193 return ret; 5194 } 5195 5196 static int hisi_qm_init_work(struct hisi_qm *qm) 5197 { 5198 int i; 5199 5200 for (i = 0; i < qm->qp_num; i++) 5201 INIT_WORK(&qm->poll_data[i].work, qm_work_process); 5202 5203 if (qm->fun_type == QM_HW_PF) 5204 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5205 5206 if (qm->ver > QM_HW_V2) 5207 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5208 5209 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 5210 WQ_UNBOUND, num_online_cpus(), 5211 pci_name(qm->pdev)); 5212 if (!qm->wq) { 5213 pci_err(qm->pdev, "failed to alloc workqueue!\n"); 5214 return -ENOMEM; 5215 } 5216 5217 return 0; 5218 } 5219 5220 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5221 { 5222 struct device *dev = &qm->pdev->dev; 5223 u16 sq_depth, cq_depth; 5224 size_t qp_dma_size; 5225 int i, ret; 5226 5227 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5228 if (!qm->qp_array) 5229 return -ENOMEM; 5230 5231 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); 5232 if (!qm->poll_data) { 5233 kfree(qm->qp_array); 5234 return -ENOMEM; 5235 } 5236 5237 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 5238 5239 /* one more page for device or qp statuses */ 5240 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 5241 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5242 for (i = 0; i < qm->qp_num; i++) { 5243 qm->poll_data[i].qm = qm; 5244 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 5245 if (ret) 5246 goto err_init_qp_mem; 5247 5248 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5249 } 5250 5251 return 0; 5252 err_init_qp_mem: 5253 hisi_qp_memory_uninit(qm, i); 5254 5255 return ret; 5256 } 5257 5258 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) 5259 { 5260 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; 5261 struct qm_dma *xqc_dma = &xqc_buf->qcdma; 5262 struct device *dev = &qm->pdev->dev; 5263 size_t off = 0; 5264 5265 #define QM_XQC_BUF_INIT(xqc_buf, type) do { \ 5266 (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ 5267 (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ 5268 off += QMC_ALIGN(sizeof(struct qm_##type)); \ 5269 } while (0) 5270 5271 xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + 5272 QMC_ALIGN(sizeof(struct qm_aeqc)) + 5273 QMC_ALIGN(sizeof(struct qm_sqc)) + 5274 QMC_ALIGN(sizeof(struct qm_cqc)); 5275 xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, 5276 &xqc_dma->dma, GFP_KERNEL); 5277 if (!xqc_dma->va) 5278 return -ENOMEM; 5279 5280 QM_XQC_BUF_INIT(xqc_buf, eqc); 5281 QM_XQC_BUF_INIT(xqc_buf, aeqc); 5282 QM_XQC_BUF_INIT(xqc_buf, sqc); 5283 QM_XQC_BUF_INIT(xqc_buf, cqc); 5284 5285 return 0; 5286 } 5287 5288 static int hisi_qm_memory_init(struct hisi_qm *qm) 5289 { 5290 struct device *dev = &qm->pdev->dev; 5291 int ret, total_func; 5292 size_t off = 0; 5293 5294 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5295 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5296 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5297 if (!qm->factor) 5298 return -ENOMEM; 5299 5300 /* Only the PF value needs to be initialized */ 5301 qm->factor[0].func_qos = QM_QOS_MAX_VAL; 5302 } 5303 5304 #define QM_INIT_BUF(qm, type, num) do { \ 5305 (qm)->type = ((qm)->qdma.va + (off)); \ 5306 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5307 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5308 } while (0) 5309 5310 idr_init(&qm->qp_idr); 5311 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 5312 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 5313 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 5314 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5315 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5316 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5317 GFP_ATOMIC); 5318 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5319 if (!qm->qdma.va) { 5320 ret = -ENOMEM; 5321 goto err_destroy_idr; 5322 } 5323 5324 QM_INIT_BUF(qm, eqe, qm->eq_depth); 5325 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 5326 QM_INIT_BUF(qm, sqc, qm->qp_num); 5327 QM_INIT_BUF(qm, cqc, qm->qp_num); 5328 5329 ret = hisi_qm_alloc_rsv_buf(qm); 5330 if (ret) 5331 goto err_free_qdma; 5332 5333 ret = hisi_qp_alloc_memory(qm); 5334 if (ret) 5335 goto err_free_reserve_buf; 5336 5337 return 0; 5338 5339 err_free_reserve_buf: 5340 hisi_qm_free_rsv_buf(qm); 5341 err_free_qdma: 5342 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5343 err_destroy_idr: 5344 idr_destroy(&qm->qp_idr); 5345 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 5346 kfree(qm->factor); 5347 5348 return ret; 5349 } 5350 5351 /** 5352 * hisi_qm_init() - Initialize configures about qm. 5353 * @qm: The qm needing init. 5354 * 5355 * This function init qm, then we can call hisi_qm_start to put qm into work. 5356 */ 5357 int hisi_qm_init(struct hisi_qm *qm) 5358 { 5359 struct pci_dev *pdev = qm->pdev; 5360 struct device *dev = &pdev->dev; 5361 int ret; 5362 5363 hisi_qm_pre_init(qm); 5364 5365 ret = hisi_qm_pci_init(qm); 5366 if (ret) 5367 return ret; 5368 5369 ret = qm_irqs_register(qm); 5370 if (ret) 5371 goto err_pci_init; 5372 5373 if (qm->fun_type == QM_HW_PF) { 5374 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5375 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5376 qm_disable_clock_gate(qm); 5377 ret = qm_dev_mem_reset(qm); 5378 if (ret) { 5379 dev_err(dev, "failed to reset device memory\n"); 5380 goto err_irq_register; 5381 } 5382 } 5383 5384 if (qm->mode == UACCE_MODE_SVA) { 5385 ret = qm_alloc_uacce(qm); 5386 if (ret < 0) 5387 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5388 } 5389 5390 ret = hisi_qm_memory_init(qm); 5391 if (ret) 5392 goto err_alloc_uacce; 5393 5394 ret = hisi_qm_init_work(qm); 5395 if (ret) 5396 goto err_free_qm_memory; 5397 5398 qm_cmd_init(qm); 5399 5400 return 0; 5401 5402 err_free_qm_memory: 5403 hisi_qm_memory_uninit(qm); 5404 err_alloc_uacce: 5405 qm_remove_uacce(qm); 5406 err_irq_register: 5407 qm_irqs_unregister(qm); 5408 err_pci_init: 5409 hisi_qm_pci_uninit(qm); 5410 return ret; 5411 } 5412 EXPORT_SYMBOL_GPL(hisi_qm_init); 5413 5414 /** 5415 * hisi_qm_get_dfx_access() - Try to get dfx access. 5416 * @qm: pointer to accelerator device. 5417 * 5418 * Try to get dfx access, then user can get message. 5419 * 5420 * If device is in suspended, return failure, otherwise 5421 * bump up the runtime PM usage counter. 5422 */ 5423 int hisi_qm_get_dfx_access(struct hisi_qm *qm) 5424 { 5425 struct device *dev = &qm->pdev->dev; 5426 5427 if (pm_runtime_suspended(dev)) { 5428 dev_info(dev, "can not read/write - device in suspended.\n"); 5429 return -EAGAIN; 5430 } 5431 5432 return qm_pm_get_sync(qm); 5433 } 5434 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); 5435 5436 /** 5437 * hisi_qm_put_dfx_access() - Put dfx access. 5438 * @qm: pointer to accelerator device. 5439 * 5440 * Put dfx access, drop runtime PM usage counter. 5441 */ 5442 void hisi_qm_put_dfx_access(struct hisi_qm *qm) 5443 { 5444 qm_pm_put_sync(qm); 5445 } 5446 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); 5447 5448 /** 5449 * hisi_qm_pm_init() - Initialize qm runtime PM. 5450 * @qm: pointer to accelerator device. 5451 * 5452 * Function that initialize qm runtime PM. 5453 */ 5454 void hisi_qm_pm_init(struct hisi_qm *qm) 5455 { 5456 struct device *dev = &qm->pdev->dev; 5457 5458 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5459 return; 5460 5461 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); 5462 pm_runtime_use_autosuspend(dev); 5463 pm_runtime_put_noidle(dev); 5464 } 5465 EXPORT_SYMBOL_GPL(hisi_qm_pm_init); 5466 5467 /** 5468 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. 5469 * @qm: pointer to accelerator device. 5470 * 5471 * Function that uninitialize qm runtime PM. 5472 */ 5473 void hisi_qm_pm_uninit(struct hisi_qm *qm) 5474 { 5475 struct device *dev = &qm->pdev->dev; 5476 5477 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5478 return; 5479 5480 pm_runtime_get_noresume(dev); 5481 pm_runtime_dont_use_autosuspend(dev); 5482 } 5483 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); 5484 5485 static int qm_prepare_for_suspend(struct hisi_qm *qm) 5486 { 5487 struct pci_dev *pdev = qm->pdev; 5488 int ret; 5489 u32 val; 5490 5491 ret = qm->ops->set_msi(qm, false); 5492 if (ret) { 5493 pci_err(pdev, "failed to disable MSI before suspending!\n"); 5494 return ret; 5495 } 5496 5497 /* shutdown OOO register */ 5498 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 5499 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5500 5501 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 5502 val, 5503 (val == ACC_MASTER_TRANS_RETURN_RW), 5504 POLL_PERIOD, POLL_TIMEOUT); 5505 if (ret) { 5506 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 5507 return ret; 5508 } 5509 5510 ret = qm_set_pf_mse(qm, false); 5511 if (ret) 5512 pci_err(pdev, "failed to disable MSE before suspending!\n"); 5513 5514 return ret; 5515 } 5516 5517 static int qm_rebuild_for_resume(struct hisi_qm *qm) 5518 { 5519 struct pci_dev *pdev = qm->pdev; 5520 int ret; 5521 5522 ret = qm_set_pf_mse(qm, true); 5523 if (ret) { 5524 pci_err(pdev, "failed to enable MSE after resuming!\n"); 5525 return ret; 5526 } 5527 5528 ret = qm->ops->set_msi(qm, true); 5529 if (ret) { 5530 pci_err(pdev, "failed to enable MSI after resuming!\n"); 5531 return ret; 5532 } 5533 5534 ret = qm_dev_hw_init(qm); 5535 if (ret) { 5536 pci_err(pdev, "failed to init device after resuming\n"); 5537 return ret; 5538 } 5539 5540 qm_cmd_init(qm); 5541 hisi_qm_dev_err_init(qm); 5542 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5543 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5544 qm_disable_clock_gate(qm); 5545 ret = qm_dev_mem_reset(qm); 5546 if (ret) 5547 pci_err(pdev, "failed to reset device memory\n"); 5548 5549 return ret; 5550 } 5551 5552 /** 5553 * hisi_qm_suspend() - Runtime suspend of given device. 5554 * @dev: device to suspend. 5555 * 5556 * Function that suspend the device. 5557 */ 5558 int hisi_qm_suspend(struct device *dev) 5559 { 5560 struct pci_dev *pdev = to_pci_dev(dev); 5561 struct hisi_qm *qm = pci_get_drvdata(pdev); 5562 int ret; 5563 5564 pci_info(pdev, "entering suspended state\n"); 5565 5566 ret = hisi_qm_stop(qm, QM_NORMAL); 5567 if (ret) { 5568 pci_err(pdev, "failed to stop qm(%d)\n", ret); 5569 return ret; 5570 } 5571 5572 ret = qm_prepare_for_suspend(qm); 5573 if (ret) 5574 pci_err(pdev, "failed to prepare suspended(%d)\n", ret); 5575 5576 return ret; 5577 } 5578 EXPORT_SYMBOL_GPL(hisi_qm_suspend); 5579 5580 /** 5581 * hisi_qm_resume() - Runtime resume of given device. 5582 * @dev: device to resume. 5583 * 5584 * Function that resume the device. 5585 */ 5586 int hisi_qm_resume(struct device *dev) 5587 { 5588 struct pci_dev *pdev = to_pci_dev(dev); 5589 struct hisi_qm *qm = pci_get_drvdata(pdev); 5590 int ret; 5591 5592 pci_info(pdev, "resuming from suspend state\n"); 5593 5594 ret = qm_rebuild_for_resume(qm); 5595 if (ret) { 5596 pci_err(pdev, "failed to rebuild resume(%d)\n", ret); 5597 return ret; 5598 } 5599 5600 ret = hisi_qm_start(qm); 5601 if (ret) { 5602 if (qm_check_dev_error(qm)) { 5603 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); 5604 return 0; 5605 } 5606 5607 pci_err(pdev, "failed to start qm(%d)!\n", ret); 5608 } 5609 5610 return ret; 5611 } 5612 EXPORT_SYMBOL_GPL(hisi_qm_resume); 5613 5614 MODULE_LICENSE("GPL v2"); 5615 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5616 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5617