1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/bitmap.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/idr.h> 8 #include <linux/io.h> 9 #include <linux/irqreturn.h> 10 #include <linux/log2.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/seq_file.h> 13 #include <linux/slab.h> 14 #include <linux/uacce.h> 15 #include <linux/uaccess.h> 16 #include <uapi/misc/uacce/hisi_qm.h> 17 #include <linux/hisi_acc_qm.h> 18 #include "qm_common.h" 19 20 /* eq/aeq irq enable */ 21 #define QM_VF_AEQ_INT_SOURCE 0x0 22 #define QM_VF_AEQ_INT_MASK 0x4 23 #define QM_VF_EQ_INT_SOURCE 0x8 24 #define QM_VF_EQ_INT_MASK 0xc 25 26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 28 #define QM_IRQ_TYPE_SHIFT 16 29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 30 31 /* mailbox */ 32 #define QM_MB_PING_ALL_VFS 0xffff 33 #define QM_MB_CMD_DATA_SHIFT 32 34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) 35 #define QM_MB_STATUS_MASK GENMASK(12, 9) 36 37 /* sqc shift */ 38 #define QM_SQ_HOP_NUM_SHIFT 0 39 #define QM_SQ_PAGE_SIZE_SHIFT 4 40 #define QM_SQ_BUF_SIZE_SHIFT 8 41 #define QM_SQ_SQE_SIZE_SHIFT 12 42 #define QM_SQ_PRIORITY_SHIFT 0 43 #define QM_SQ_ORDERS_SHIFT 4 44 #define QM_SQ_TYPE_SHIFT 8 45 #define QM_QC_PASID_ENABLE 0x1 46 #define QM_QC_PASID_ENABLE_SHIFT 7 47 48 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) 50 51 /* cqc shift */ 52 #define QM_CQ_HOP_NUM_SHIFT 0 53 #define QM_CQ_PAGE_SIZE_SHIFT 4 54 #define QM_CQ_BUF_SIZE_SHIFT 8 55 #define QM_CQ_CQE_SIZE_SHIFT 12 56 #define QM_CQ_PHASE_SHIFT 0 57 #define QM_CQ_FLAG_SHIFT 1 58 59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 60 #define QM_QC_CQE_SIZE 4 61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) 62 63 /* eqc shift */ 64 #define QM_EQE_AEQE_SIZE (2UL << 12) 65 #define QM_EQC_PHASE_SHIFT 16 66 67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) 68 #define QM_EQE_CQN_MASK GENMASK(15, 0) 69 70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) 71 #define QM_AEQE_TYPE_SHIFT 17 72 #define QM_AEQE_TYPE_MASK 0xf 73 #define QM_AEQE_CQN_MASK GENMASK(15, 0) 74 #define QM_CQ_OVERFLOW 0 75 #define QM_EQ_OVERFLOW 1 76 #define QM_CQE_ERROR 2 77 78 #define QM_XQ_DEPTH_SHIFT 16 79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 80 81 #define QM_DOORBELL_CMD_SQ 0 82 #define QM_DOORBELL_CMD_CQ 1 83 #define QM_DOORBELL_CMD_EQ 2 84 #define QM_DOORBELL_CMD_AEQ 3 85 86 #define QM_DOORBELL_BASE_V1 0x340 87 #define QM_DB_CMD_SHIFT_V1 16 88 #define QM_DB_INDEX_SHIFT_V1 32 89 #define QM_DB_PRIORITY_SHIFT_V1 48 90 #define QM_PAGE_SIZE 0x0034 91 #define QM_QP_DB_INTERVAL 0x10000 92 #define QM_DB_TIMEOUT_CFG 0x100074 93 #define QM_DB_TIMEOUT_SET 0x1fffff 94 95 #define QM_MEM_START_INIT 0x100040 96 #define QM_MEM_INIT_DONE 0x100044 97 #define QM_VFT_CFG_RDY 0x10006c 98 #define QM_VFT_CFG_OP_WR 0x100058 99 #define QM_VFT_CFG_TYPE 0x10005c 100 #define QM_VFT_CFG 0x100060 101 #define QM_VFT_CFG_OP_ENABLE 0x100054 102 #define QM_PM_CTRL 0x100148 103 #define QM_IDLE_DISABLE BIT(9) 104 105 #define QM_VFT_CFG_DATA_L 0x100064 106 #define QM_VFT_CFG_DATA_H 0x100068 107 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 108 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 109 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 110 #define QM_SQC_VFT_START_SQN_SHIFT 28 111 #define QM_SQC_VFT_VALID (1ULL << 44) 112 #define QM_SQC_VFT_SQN_SHIFT 45 113 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 114 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 115 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 116 #define QM_CQC_VFT_VALID (1ULL << 28) 117 118 #define QM_SQC_VFT_BASE_SHIFT_V2 28 119 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 120 #define QM_SQC_VFT_NUM_SHIFT_V2 45 121 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) 122 123 #define QM_ABNORMAL_INT_SOURCE 0x100000 124 #define QM_ABNORMAL_INT_MASK 0x100004 125 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 126 #define QM_ABNORMAL_INT_STATUS 0x100008 127 #define QM_ABNORMAL_INT_SET 0x10000c 128 #define QM_ABNORMAL_INF00 0x100010 129 #define QM_FIFO_OVERFLOW_TYPE 0xc0 130 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 131 #define QM_FIFO_OVERFLOW_VF 0x3f 132 #define QM_FIFO_OVERFLOW_QP_SHIFT 16 133 #define QM_ABNORMAL_INF01 0x100014 134 #define QM_DB_TIMEOUT_TYPE 0xc0 135 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 136 #define QM_DB_TIMEOUT_VF 0x3f 137 #define QM_DB_TIMEOUT_QP_SHIFT 16 138 #define QM_ABNORMAL_INF02 0x100018 139 #define QM_AXI_POISON_ERR BIT(22) 140 #define QM_RAS_CE_ENABLE 0x1000ec 141 #define QM_RAS_FE_ENABLE 0x1000f0 142 #define QM_RAS_NFE_ENABLE 0x1000f4 143 #define QM_RAS_CE_THRESHOLD 0x1000f8 144 #define QM_RAS_CE_TIMES_PER_IRQ 1 145 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 146 #define QM_AXI_RRESP_ERR BIT(0) 147 #define QM_ECC_MBIT BIT(2) 148 #define QM_DB_TIMEOUT BIT(10) 149 #define QM_OF_FIFO_OF BIT(11) 150 151 #define QM_RESET_WAIT_TIMEOUT 400 152 #define QM_PEH_VENDOR_ID 0x1000d8 153 #define ACC_VENDOR_ID_VALUE 0x5a5a 154 #define QM_PEH_DFX_INFO0 0x1000fc 155 #define QM_PEH_DFX_INFO1 0x100100 156 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 157 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 158 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 159 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 160 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 161 #define ACC_MASTER_TRANS_RETURN_RW 3 162 #define ACC_MASTER_TRANS_RETURN 0x300150 163 #define ACC_MASTER_GLOBAL_CTRL 0x300000 164 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 165 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT 166 #define ACC_AM_ROB_ECC_INT_STS 0x300104 167 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 168 #define QM_MSI_CAP_ENABLE BIT(16) 169 170 /* interfunction communication */ 171 #define QM_IFC_READY_STATUS 0x100128 172 #define QM_IFC_INT_SET_P 0x100130 173 #define QM_IFC_INT_CFG 0x100134 174 #define QM_IFC_INT_SOURCE_P 0x100138 175 #define QM_IFC_INT_SOURCE_V 0x0020 176 #define QM_IFC_INT_MASK 0x0024 177 #define QM_IFC_INT_STATUS 0x0028 178 #define QM_IFC_INT_SET_V 0x002C 179 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 180 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 181 #define QM_IFC_INT_SOURCE_MASK BIT(0) 182 #define QM_IFC_INT_DISABLE BIT(0) 183 #define QM_IFC_INT_STATUS_MASK BIT(0) 184 #define QM_IFC_INT_SET_MASK BIT(0) 185 #define QM_WAIT_DST_ACK 10 186 #define QM_MAX_PF_WAIT_COUNT 10 187 #define QM_MAX_VF_WAIT_COUNT 40 188 #define QM_VF_RESET_WAIT_US 20000 189 #define QM_VF_RESET_WAIT_CNT 3000 190 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 191 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 192 193 #define POLL_PERIOD 10 194 #define POLL_TIMEOUT 1000 195 #define WAIT_PERIOD_US_MAX 200 196 #define WAIT_PERIOD_US_MIN 100 197 #define MAX_WAIT_COUNTS 1000 198 #define QM_CACHE_WB_START 0x204 199 #define QM_CACHE_WB_DONE 0x208 200 #define QM_FUNC_CAPS_REG 0x3100 201 #define QM_CAPBILITY_VERSION GENMASK(7, 0) 202 203 #define PCI_BAR_2 2 204 #define PCI_BAR_4 4 205 #define QMC_ALIGN(sz) ALIGN(sz, 32) 206 207 #define QM_DBG_READ_LEN 256 208 #define QM_PCI_COMMAND_INVALID ~0 209 #define QM_RESET_STOP_TX_OFFSET 1 210 #define QM_RESET_STOP_RX_OFFSET 2 211 212 #define WAIT_PERIOD 20 213 #define REMOVE_WAIT_DELAY 10 214 215 #define QM_QOS_PARAM_NUM 2 216 #define QM_QOS_MAX_VAL 1000 217 #define QM_QOS_RATE 100 218 #define QM_QOS_EXPAND_RATE 1000 219 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 220 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 221 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 222 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 223 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 224 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 225 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 226 #define QM_SHAPER_CBS_B 1 227 #define QM_SHAPER_VFT_OFFSET 6 228 #define QM_QOS_MIN_ERROR_RATE 5 229 #define QM_SHAPER_MIN_CBS_S 8 230 #define QM_QOS_TICK 0x300U 231 #define QM_QOS_DIVISOR_CLK 0x1f40U 232 #define QM_QOS_MAX_CIR_B 200 233 #define QM_QOS_MIN_CIR_B 100 234 #define QM_QOS_MAX_CIR_U 6 235 #define QM_AUTOSUSPEND_DELAY 3000 236 237 #define QM_DEV_ALG_MAX_LEN 256 238 239 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 240 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 241 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 242 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 243 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 244 245 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 246 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 247 248 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 249 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 250 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 251 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 252 253 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 254 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 255 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 256 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 257 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 258 259 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 260 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 261 262 enum vft_type { 263 SQC_VFT = 0, 264 CQC_VFT, 265 SHAPER_VFT, 266 }; 267 268 enum acc_err_result { 269 ACC_ERR_NONE, 270 ACC_ERR_NEED_RESET, 271 ACC_ERR_RECOVERED, 272 }; 273 274 enum qm_alg_type { 275 ALG_TYPE_0, 276 ALG_TYPE_1, 277 }; 278 279 enum qm_mb_cmd { 280 QM_PF_FLR_PREPARE = 0x01, 281 QM_PF_SRST_PREPARE, 282 QM_PF_RESET_DONE, 283 QM_VF_PREPARE_DONE, 284 QM_VF_PREPARE_FAIL, 285 QM_VF_START_DONE, 286 QM_VF_START_FAIL, 287 QM_PF_SET_QOS, 288 QM_VF_GET_QOS, 289 }; 290 291 enum qm_basic_type { 292 QM_TOTAL_QP_NUM_CAP = 0x0, 293 QM_FUNC_MAX_QP_CAP, 294 QM_XEQ_DEPTH_CAP, 295 QM_QP_DEPTH_CAP, 296 QM_EQ_IRQ_TYPE_CAP, 297 QM_AEQ_IRQ_TYPE_CAP, 298 QM_ABN_IRQ_TYPE_CAP, 299 QM_PF2VF_IRQ_TYPE_CAP, 300 QM_PF_IRQ_NUM_CAP, 301 QM_VF_IRQ_NUM_CAP, 302 }; 303 304 enum qm_pre_store_cap_idx { 305 QM_EQ_IRQ_TYPE_CAP_IDX = 0x0, 306 QM_AEQ_IRQ_TYPE_CAP_IDX, 307 QM_ABN_IRQ_TYPE_CAP_IDX, 308 QM_PF2VF_IRQ_TYPE_CAP_IDX, 309 }; 310 311 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 312 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 313 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 314 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 315 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 316 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 317 }; 318 319 static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 320 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 321 }; 322 323 static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 324 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 325 }; 326 327 static const struct hisi_qm_cap_info qm_basic_info[] = { 328 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 329 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 330 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, 331 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 332 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 333 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 334 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 335 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 336 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 337 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 338 }; 339 340 static const u32 qm_pre_store_caps[] = { 341 QM_EQ_IRQ_TYPE_CAP, 342 QM_AEQ_IRQ_TYPE_CAP, 343 QM_ABN_IRQ_TYPE_CAP, 344 QM_PF2VF_IRQ_TYPE_CAP, 345 }; 346 347 struct qm_mailbox { 348 __le16 w0; 349 __le16 queue_num; 350 __le32 base_l; 351 __le32 base_h; 352 __le32 rsvd; 353 }; 354 355 struct qm_doorbell { 356 __le16 queue_num; 357 __le16 cmd; 358 __le16 index; 359 __le16 priority; 360 }; 361 362 struct hisi_qm_resource { 363 struct hisi_qm *qm; 364 int distance; 365 struct list_head list; 366 }; 367 368 /** 369 * struct qm_hw_err - Structure describing the device errors 370 * @list: hardware error list 371 * @timestamp: timestamp when the error occurred 372 */ 373 struct qm_hw_err { 374 struct list_head list; 375 unsigned long long timestamp; 376 }; 377 378 struct hisi_qm_hw_ops { 379 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 380 void (*qm_db)(struct hisi_qm *qm, u16 qn, 381 u8 cmd, u16 index, u8 priority); 382 int (*debug_init)(struct hisi_qm *qm); 383 void (*hw_error_init)(struct hisi_qm *qm); 384 void (*hw_error_uninit)(struct hisi_qm *qm); 385 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 386 int (*set_msi)(struct hisi_qm *qm, bool set); 387 }; 388 389 struct hisi_qm_hw_error { 390 u32 int_msk; 391 const char *msg; 392 }; 393 394 static const struct hisi_qm_hw_error qm_hw_error[] = { 395 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 396 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 397 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 398 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 399 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 400 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 401 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 402 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 403 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 404 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 405 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 406 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 407 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 408 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 409 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 410 }; 411 412 static const char * const qm_db_timeout[] = { 413 "sq", "cq", "eq", "aeq", 414 }; 415 416 static const char * const qm_fifo_overflow[] = { 417 "cq", "eq", "aeq", 418 }; 419 420 struct qm_typical_qos_table { 421 u32 start; 422 u32 end; 423 u32 val; 424 }; 425 426 /* the qos step is 100 */ 427 static struct qm_typical_qos_table shaper_cir_s[] = { 428 {100, 100, 4}, 429 {200, 200, 3}, 430 {300, 500, 2}, 431 {600, 1000, 1}, 432 {1100, 100000, 0}, 433 }; 434 435 static struct qm_typical_qos_table shaper_cbs_s[] = { 436 {100, 200, 9}, 437 {300, 500, 11}, 438 {600, 1000, 12}, 439 {1100, 10000, 16}, 440 {10100, 25000, 17}, 441 {25100, 50000, 18}, 442 {50100, 100000, 19} 443 }; 444 445 static void qm_irqs_unregister(struct hisi_qm *qm); 446 447 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 448 { 449 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 450 } 451 452 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 453 { 454 return qm->err_ini->get_dev_hw_err_status(qm); 455 } 456 457 /* Check if the error causes the master ooo block */ 458 static bool qm_check_dev_error(struct hisi_qm *qm) 459 { 460 u32 val, dev_val; 461 462 if (qm->fun_type == QM_HW_VF) 463 return false; 464 465 val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; 466 dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; 467 468 return val || dev_val; 469 } 470 471 static int qm_wait_reset_finish(struct hisi_qm *qm) 472 { 473 int delay = 0; 474 475 /* All reset requests need to be queued for processing */ 476 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 477 msleep(++delay); 478 if (delay > QM_RESET_WAIT_TIMEOUT) 479 return -EBUSY; 480 } 481 482 return 0; 483 } 484 485 static int qm_reset_prepare_ready(struct hisi_qm *qm) 486 { 487 struct pci_dev *pdev = qm->pdev; 488 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 489 490 /* 491 * PF and VF on host doesnot support resetting at the 492 * same time on Kunpeng920. 493 */ 494 if (qm->ver < QM_HW_V3) 495 return qm_wait_reset_finish(pf_qm); 496 497 return qm_wait_reset_finish(qm); 498 } 499 500 static void qm_reset_bit_clear(struct hisi_qm *qm) 501 { 502 struct pci_dev *pdev = qm->pdev; 503 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 504 505 if (qm->ver < QM_HW_V3) 506 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 507 508 clear_bit(QM_RESETTING, &qm->misc_ctl); 509 } 510 511 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 512 u64 base, u16 queue, bool op) 513 { 514 mailbox->w0 = cpu_to_le16((cmd) | 515 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 516 (0x1 << QM_MB_BUSY_SHIFT)); 517 mailbox->queue_num = cpu_to_le16(queue); 518 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 519 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 520 mailbox->rsvd = 0; 521 } 522 523 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 524 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) 525 { 526 u32 val; 527 528 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 529 val, !((val >> QM_MB_BUSY_SHIFT) & 530 0x1), POLL_PERIOD, POLL_TIMEOUT); 531 } 532 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); 533 534 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 535 static void qm_mb_write(struct hisi_qm *qm, const void *src) 536 { 537 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 538 539 #if IS_ENABLED(CONFIG_ARM64) 540 unsigned long tmp0 = 0, tmp1 = 0; 541 #endif 542 543 if (!IS_ENABLED(CONFIG_ARM64)) { 544 memcpy_toio(fun_base, src, 16); 545 dma_wmb(); 546 return; 547 } 548 549 #if IS_ENABLED(CONFIG_ARM64) 550 asm volatile("ldp %0, %1, %3\n" 551 "stp %0, %1, %2\n" 552 "dmb oshst\n" 553 : "=&r" (tmp0), 554 "=&r" (tmp1), 555 "+Q" (*((char __iomem *)fun_base)) 556 : "Q" (*((char *)src)) 557 : "memory"); 558 #endif 559 } 560 561 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 562 { 563 int ret; 564 u32 val; 565 566 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 567 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 568 ret = -EBUSY; 569 goto mb_busy; 570 } 571 572 qm_mb_write(qm, mailbox); 573 574 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 575 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 576 ret = -ETIMEDOUT; 577 goto mb_busy; 578 } 579 580 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 581 if (val & QM_MB_STATUS_MASK) { 582 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 583 ret = -EIO; 584 goto mb_busy; 585 } 586 587 return 0; 588 589 mb_busy: 590 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 591 return ret; 592 } 593 594 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 595 bool op) 596 { 597 struct qm_mailbox mailbox; 598 int ret; 599 600 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 601 602 mutex_lock(&qm->mailbox_lock); 603 ret = qm_mb_nolock(qm, &mailbox); 604 mutex_unlock(&qm->mailbox_lock); 605 606 return ret; 607 } 608 EXPORT_SYMBOL_GPL(hisi_qm_mb); 609 610 /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ 611 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) 612 { 613 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 614 struct qm_mailbox mailbox; 615 dma_addr_t xqc_dma; 616 void *tmp_xqc; 617 size_t size; 618 int ret; 619 620 switch (cmd) { 621 case QM_MB_CMD_SQC: 622 size = sizeof(struct qm_sqc); 623 tmp_xqc = qm->xqc_buf.sqc; 624 xqc_dma = qm->xqc_buf.sqc_dma; 625 break; 626 case QM_MB_CMD_CQC: 627 size = sizeof(struct qm_cqc); 628 tmp_xqc = qm->xqc_buf.cqc; 629 xqc_dma = qm->xqc_buf.cqc_dma; 630 break; 631 case QM_MB_CMD_EQC: 632 size = sizeof(struct qm_eqc); 633 tmp_xqc = qm->xqc_buf.eqc; 634 xqc_dma = qm->xqc_buf.eqc_dma; 635 break; 636 case QM_MB_CMD_AEQC: 637 size = sizeof(struct qm_aeqc); 638 tmp_xqc = qm->xqc_buf.aeqc; 639 xqc_dma = qm->xqc_buf.aeqc_dma; 640 break; 641 } 642 643 /* Setting xqc will fail if master OOO is blocked. */ 644 if (qm_check_dev_error(pf_qm)) { 645 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); 646 return -EIO; 647 } 648 649 mutex_lock(&qm->mailbox_lock); 650 if (!op) 651 memcpy(tmp_xqc, xqc, size); 652 653 qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); 654 ret = qm_mb_nolock(qm, &mailbox); 655 if (!ret && op) 656 memcpy(xqc, tmp_xqc, size); 657 658 mutex_unlock(&qm->mailbox_lock); 659 660 return ret; 661 } 662 663 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 664 { 665 u64 doorbell; 666 667 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 668 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 669 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 670 671 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 672 } 673 674 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 675 { 676 void __iomem *io_base = qm->io_base; 677 u16 randata = 0; 678 u64 doorbell; 679 680 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 681 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 682 QM_DOORBELL_SQ_CQ_BASE_V2; 683 else 684 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 685 686 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 687 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 688 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 689 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 690 691 writeq(doorbell, io_base); 692 } 693 694 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 695 { 696 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 697 qn, cmd, index); 698 699 qm->ops->qm_db(qm, qn, cmd, index, priority); 700 } 701 702 static void qm_disable_clock_gate(struct hisi_qm *qm) 703 { 704 u32 val; 705 706 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ 707 if (qm->ver < QM_HW_V3) 708 return; 709 710 val = readl(qm->io_base + QM_PM_CTRL); 711 val |= QM_IDLE_DISABLE; 712 writel(val, qm->io_base + QM_PM_CTRL); 713 } 714 715 static int qm_dev_mem_reset(struct hisi_qm *qm) 716 { 717 u32 val; 718 719 writel(0x1, qm->io_base + QM_MEM_START_INIT); 720 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 721 val & BIT(0), POLL_PERIOD, 722 POLL_TIMEOUT); 723 } 724 725 /** 726 * hisi_qm_get_hw_info() - Get device information. 727 * @qm: The qm which want to get information. 728 * @info_table: Array for storing device information. 729 * @index: Index in info_table. 730 * @is_read: Whether read from reg, 0: not support read from reg. 731 * 732 * This function returns device information the caller needs. 733 */ 734 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 735 const struct hisi_qm_cap_info *info_table, 736 u32 index, bool is_read) 737 { 738 u32 val; 739 740 switch (qm->ver) { 741 case QM_HW_V1: 742 return info_table[index].v1_val; 743 case QM_HW_V2: 744 return info_table[index].v2_val; 745 default: 746 if (!is_read) 747 return info_table[index].v3_val; 748 749 val = readl(qm->io_base + info_table[index].offset); 750 return (val >> info_table[index].shift) & info_table[index].mask; 751 } 752 } 753 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 754 755 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 756 u16 *high_bits, enum qm_basic_type type) 757 { 758 u32 depth; 759 760 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 761 *low_bits = depth & QM_XQ_DEPTH_MASK; 762 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 763 } 764 765 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 766 u32 dev_algs_size) 767 { 768 struct device *dev = &qm->pdev->dev; 769 char *algs, *ptr; 770 int i; 771 772 if (!qm->uacce) 773 return 0; 774 775 if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) { 776 dev_err(dev, "algs size %u is equal or larger than %d.\n", 777 dev_algs_size, QM_DEV_ALG_MAX_LEN); 778 return -EINVAL; 779 } 780 781 algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); 782 if (!algs) 783 return -ENOMEM; 784 785 for (i = 0; i < dev_algs_size; i++) 786 if (alg_msk & dev_algs[i].alg_msk) 787 strcat(algs, dev_algs[i].alg); 788 789 ptr = strrchr(algs, '\n'); 790 if (ptr) { 791 *ptr = '\0'; 792 qm->uacce->algs = algs; 793 } 794 795 return 0; 796 } 797 EXPORT_SYMBOL_GPL(hisi_qm_set_algs); 798 799 static u32 qm_get_irq_num(struct hisi_qm *qm) 800 { 801 if (qm->fun_type == QM_HW_PF) 802 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 803 804 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 805 } 806 807 static int qm_pm_get_sync(struct hisi_qm *qm) 808 { 809 struct device *dev = &qm->pdev->dev; 810 int ret; 811 812 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 813 return 0; 814 815 ret = pm_runtime_resume_and_get(dev); 816 if (ret < 0) { 817 dev_err(dev, "failed to get_sync(%d).\n", ret); 818 return ret; 819 } 820 821 return 0; 822 } 823 824 static void qm_pm_put_sync(struct hisi_qm *qm) 825 { 826 struct device *dev = &qm->pdev->dev; 827 828 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 829 return; 830 831 pm_runtime_mark_last_busy(dev); 832 pm_runtime_put_autosuspend(dev); 833 } 834 835 static void qm_cq_head_update(struct hisi_qp *qp) 836 { 837 if (qp->qp_status.cq_head == qp->cq_depth - 1) { 838 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 839 qp->qp_status.cq_head = 0; 840 } else { 841 qp->qp_status.cq_head++; 842 } 843 } 844 845 static void qm_poll_req_cb(struct hisi_qp *qp) 846 { 847 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 848 struct hisi_qm *qm = qp->qm; 849 850 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 851 dma_rmb(); 852 qp->req_cb(qp, qp->sqe + qm->sqe_size * 853 le16_to_cpu(cqe->sq_head)); 854 qm_cq_head_update(qp); 855 cqe = qp->cqe + qp->qp_status.cq_head; 856 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 857 qp->qp_status.cq_head, 0); 858 atomic_dec(&qp->qp_status.used); 859 860 cond_resched(); 861 } 862 863 /* set c_flag */ 864 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); 865 } 866 867 static void qm_work_process(struct work_struct *work) 868 { 869 struct hisi_qm_poll_data *poll_data = 870 container_of(work, struct hisi_qm_poll_data, work); 871 struct hisi_qm *qm = poll_data->qm; 872 u16 eqe_num = poll_data->eqe_num; 873 struct hisi_qp *qp; 874 int i; 875 876 for (i = eqe_num - 1; i >= 0; i--) { 877 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; 878 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 879 continue; 880 881 if (qp->event_cb) { 882 qp->event_cb(qp); 883 continue; 884 } 885 886 if (likely(qp->req_cb)) 887 qm_poll_req_cb(qp); 888 } 889 } 890 891 static void qm_get_complete_eqe_num(struct hisi_qm *qm) 892 { 893 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 894 struct hisi_qm_poll_data *poll_data = NULL; 895 u16 eq_depth = qm->eq_depth; 896 u16 cqn, eqe_num = 0; 897 898 if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) { 899 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 900 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 901 return; 902 } 903 904 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 905 if (unlikely(cqn >= qm->qp_num)) 906 return; 907 poll_data = &qm->poll_data[cqn]; 908 909 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { 910 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; 911 poll_data->qp_finish_id[eqe_num] = cqn; 912 eqe_num++; 913 914 if (qm->status.eq_head == eq_depth - 1) { 915 qm->status.eqc_phase = !qm->status.eqc_phase; 916 eqe = qm->eqe; 917 qm->status.eq_head = 0; 918 } else { 919 eqe++; 920 qm->status.eq_head++; 921 } 922 923 if (eqe_num == (eq_depth >> 1) - 1) 924 break; 925 } 926 927 poll_data->eqe_num = eqe_num; 928 queue_work(qm->wq, &poll_data->work); 929 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 930 } 931 932 static irqreturn_t qm_eq_irq(int irq, void *data) 933 { 934 struct hisi_qm *qm = data; 935 936 /* Get qp id of completed tasks and re-enable the interrupt */ 937 qm_get_complete_eqe_num(qm); 938 939 return IRQ_HANDLED; 940 } 941 942 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 943 { 944 struct hisi_qm *qm = data; 945 u32 val; 946 947 val = readl(qm->io_base + QM_IFC_INT_STATUS); 948 val &= QM_IFC_INT_STATUS_MASK; 949 if (!val) 950 return IRQ_NONE; 951 952 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { 953 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); 954 return IRQ_HANDLED; 955 } 956 957 schedule_work(&qm->cmd_process); 958 959 return IRQ_HANDLED; 960 } 961 962 static void qm_set_qp_disable(struct hisi_qp *qp, int offset) 963 { 964 u32 *addr; 965 966 if (qp->is_in_kernel) 967 return; 968 969 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; 970 *addr = 1; 971 972 /* make sure setup is completed */ 973 smp_wmb(); 974 } 975 976 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) 977 { 978 struct hisi_qp *qp = &qm->qp_array[qp_id]; 979 980 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); 981 hisi_qm_stop_qp(qp); 982 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); 983 } 984 985 static void qm_reset_function(struct hisi_qm *qm) 986 { 987 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 988 struct device *dev = &qm->pdev->dev; 989 int ret; 990 991 if (qm_check_dev_error(pf_qm)) 992 return; 993 994 ret = qm_reset_prepare_ready(qm); 995 if (ret) { 996 dev_err(dev, "reset function not ready\n"); 997 return; 998 } 999 1000 ret = hisi_qm_stop(qm, QM_DOWN); 1001 if (ret) { 1002 dev_err(dev, "failed to stop qm when reset function\n"); 1003 goto clear_bit; 1004 } 1005 1006 ret = hisi_qm_start(qm); 1007 if (ret) 1008 dev_err(dev, "failed to start qm when reset function\n"); 1009 1010 clear_bit: 1011 qm_reset_bit_clear(qm); 1012 } 1013 1014 static irqreturn_t qm_aeq_thread(int irq, void *data) 1015 { 1016 struct hisi_qm *qm = data; 1017 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1018 u16 aeq_depth = qm->aeq_depth; 1019 u32 type, qp_id; 1020 1021 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 1022 1023 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { 1024 type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & 1025 QM_AEQE_TYPE_MASK; 1026 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; 1027 1028 switch (type) { 1029 case QM_EQ_OVERFLOW: 1030 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); 1031 qm_reset_function(qm); 1032 return IRQ_HANDLED; 1033 case QM_CQ_OVERFLOW: 1034 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", 1035 qp_id); 1036 fallthrough; 1037 case QM_CQE_ERROR: 1038 qm_disable_qp(qm, qp_id); 1039 break; 1040 default: 1041 dev_err(&qm->pdev->dev, "unknown error type %u\n", 1042 type); 1043 break; 1044 } 1045 1046 if (qm->status.aeq_head == aeq_depth - 1) { 1047 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1048 aeqe = qm->aeqe; 1049 qm->status.aeq_head = 0; 1050 } else { 1051 aeqe++; 1052 qm->status.aeq_head++; 1053 } 1054 } 1055 1056 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 1057 1058 return IRQ_HANDLED; 1059 } 1060 1061 static void qm_init_qp_status(struct hisi_qp *qp) 1062 { 1063 struct hisi_qp_status *qp_status = &qp->qp_status; 1064 1065 qp_status->sq_tail = 0; 1066 qp_status->cq_head = 0; 1067 qp_status->cqc_phase = true; 1068 atomic_set(&qp_status->used, 0); 1069 } 1070 1071 static void qm_init_prefetch(struct hisi_qm *qm) 1072 { 1073 struct device *dev = &qm->pdev->dev; 1074 u32 page_type = 0x0; 1075 1076 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1077 return; 1078 1079 switch (PAGE_SIZE) { 1080 case SZ_4K: 1081 page_type = 0x0; 1082 break; 1083 case SZ_16K: 1084 page_type = 0x1; 1085 break; 1086 case SZ_64K: 1087 page_type = 0x2; 1088 break; 1089 default: 1090 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 1091 PAGE_SIZE); 1092 } 1093 1094 writel(page_type, qm->io_base + QM_PAGE_SIZE); 1095 } 1096 1097 /* 1098 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value 1099 * is the expected qos calculated. 1100 * the formula: 1101 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 1102 * 1103 * IR_b * (2 ^ IR_u) * 8000 1104 * IR(Mbps) = ------------------------- 1105 * Tick * (2 ^ IR_s) 1106 */ 1107 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 1108 { 1109 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 1110 (QM_QOS_TICK * (1 << cir_s)); 1111 } 1112 1113 static u32 acc_shaper_calc_cbs_s(u32 ir) 1114 { 1115 int table_size = ARRAY_SIZE(shaper_cbs_s); 1116 int i; 1117 1118 for (i = 0; i < table_size; i++) { 1119 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) 1120 return shaper_cbs_s[i].val; 1121 } 1122 1123 return QM_SHAPER_MIN_CBS_S; 1124 } 1125 1126 static u32 acc_shaper_calc_cir_s(u32 ir) 1127 { 1128 int table_size = ARRAY_SIZE(shaper_cir_s); 1129 int i; 1130 1131 for (i = 0; i < table_size; i++) { 1132 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) 1133 return shaper_cir_s[i].val; 1134 } 1135 1136 return 0; 1137 } 1138 1139 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 1140 { 1141 u32 cir_b, cir_u, cir_s, ir_calc; 1142 u32 error_rate; 1143 1144 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 1145 cir_s = acc_shaper_calc_cir_s(ir); 1146 1147 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 1148 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 1149 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 1150 1151 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 1152 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 1153 factor->cir_b = cir_b; 1154 factor->cir_u = cir_u; 1155 factor->cir_s = cir_s; 1156 return 0; 1157 } 1158 } 1159 } 1160 1161 return -EINVAL; 1162 } 1163 1164 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1165 u32 number, struct qm_shaper_factor *factor) 1166 { 1167 u64 tmp = 0; 1168 1169 if (number > 0) { 1170 switch (type) { 1171 case SQC_VFT: 1172 if (qm->ver == QM_HW_V1) { 1173 tmp = QM_SQC_VFT_BUF_SIZE | 1174 QM_SQC_VFT_SQC_SIZE | 1175 QM_SQC_VFT_INDEX_NUMBER | 1176 QM_SQC_VFT_VALID | 1177 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1178 } else { 1179 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1180 QM_SQC_VFT_VALID | 1181 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1182 } 1183 break; 1184 case CQC_VFT: 1185 if (qm->ver == QM_HW_V1) { 1186 tmp = QM_CQC_VFT_BUF_SIZE | 1187 QM_CQC_VFT_SQC_SIZE | 1188 QM_CQC_VFT_INDEX_NUMBER | 1189 QM_CQC_VFT_VALID; 1190 } else { 1191 tmp = QM_CQC_VFT_VALID; 1192 } 1193 break; 1194 case SHAPER_VFT: 1195 if (factor) { 1196 tmp = factor->cir_b | 1197 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1198 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1199 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1200 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1201 } 1202 break; 1203 } 1204 } 1205 1206 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1207 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1208 } 1209 1210 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1211 u32 fun_num, u32 base, u32 number) 1212 { 1213 struct qm_shaper_factor *factor = NULL; 1214 unsigned int val; 1215 int ret; 1216 1217 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1218 factor = &qm->factor[fun_num]; 1219 1220 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1221 val & BIT(0), POLL_PERIOD, 1222 POLL_TIMEOUT); 1223 if (ret) 1224 return ret; 1225 1226 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1227 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1228 if (type == SHAPER_VFT) 1229 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1230 1231 writel(fun_num, qm->io_base + QM_VFT_CFG); 1232 1233 qm_vft_data_cfg(qm, type, base, number, factor); 1234 1235 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1236 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1237 1238 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1239 val & BIT(0), POLL_PERIOD, 1240 POLL_TIMEOUT); 1241 } 1242 1243 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1244 { 1245 u32 qos = qm->factor[fun_num].func_qos; 1246 int ret, i; 1247 1248 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); 1249 if (ret) { 1250 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1251 return ret; 1252 } 1253 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1254 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1255 /* The base number of queue reuse for different alg type */ 1256 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1257 if (ret) 1258 return ret; 1259 } 1260 1261 return 0; 1262 } 1263 1264 /* The config should be conducted after qm_dev_mem_reset() */ 1265 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1266 u32 number) 1267 { 1268 int ret, i; 1269 1270 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1271 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1272 if (ret) 1273 return ret; 1274 } 1275 1276 /* init default shaper qos val */ 1277 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1278 ret = qm_shaper_init_vft(qm, fun_num); 1279 if (ret) 1280 goto back_sqc_cqc; 1281 } 1282 1283 return 0; 1284 back_sqc_cqc: 1285 for (i = SQC_VFT; i <= CQC_VFT; i++) 1286 qm_set_vft_common(qm, i, fun_num, 0, 0); 1287 1288 return ret; 1289 } 1290 1291 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1292 { 1293 u64 sqc_vft; 1294 int ret; 1295 1296 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1297 if (ret) 1298 return ret; 1299 1300 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1301 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1302 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1303 *number = (QM_SQC_VFT_NUM_MASK_V2 & 1304 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1305 1306 return 0; 1307 } 1308 1309 static void qm_hw_error_init_v1(struct hisi_qm *qm) 1310 { 1311 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1312 } 1313 1314 static void qm_hw_error_cfg(struct hisi_qm *qm) 1315 { 1316 struct hisi_qm_err_info *err_info = &qm->err_info; 1317 1318 qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; 1319 /* clear QM hw residual error source */ 1320 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1321 1322 /* configure error type */ 1323 writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); 1324 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1325 writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1326 writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); 1327 } 1328 1329 static void qm_hw_error_init_v2(struct hisi_qm *qm) 1330 { 1331 u32 irq_unmask; 1332 1333 qm_hw_error_cfg(qm); 1334 1335 irq_unmask = ~qm->error_mask; 1336 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1337 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1338 } 1339 1340 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1341 { 1342 u32 irq_mask = qm->error_mask; 1343 1344 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1345 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1346 } 1347 1348 static void qm_hw_error_init_v3(struct hisi_qm *qm) 1349 { 1350 u32 irq_unmask; 1351 1352 qm_hw_error_cfg(qm); 1353 1354 /* enable close master ooo when hardware error happened */ 1355 writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1356 1357 irq_unmask = ~qm->error_mask; 1358 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1359 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1360 } 1361 1362 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1363 { 1364 u32 irq_mask = qm->error_mask; 1365 1366 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1367 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1368 1369 /* disable close master ooo when hardware error happened */ 1370 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1371 } 1372 1373 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1374 { 1375 const struct hisi_qm_hw_error *err; 1376 struct device *dev = &qm->pdev->dev; 1377 u32 reg_val, type, vf_num, qp_id; 1378 int i; 1379 1380 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1381 err = &qm_hw_error[i]; 1382 if (!(err->int_msk & error_status)) 1383 continue; 1384 1385 dev_err(dev, "%s [error status=0x%x] found\n", 1386 err->msg, err->int_msk); 1387 1388 if (err->int_msk & QM_DB_TIMEOUT) { 1389 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1390 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1391 QM_DB_TIMEOUT_TYPE_SHIFT; 1392 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1393 qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT; 1394 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n", 1395 qm_db_timeout[type], vf_num, qp_id); 1396 } else if (err->int_msk & QM_OF_FIFO_OF) { 1397 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1398 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1399 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1400 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1401 qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT; 1402 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1403 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n", 1404 qm_fifo_overflow[type], vf_num, qp_id); 1405 else 1406 dev_err(dev, "unknown error type\n"); 1407 } else if (err->int_msk & QM_AXI_RRESP_ERR) { 1408 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02); 1409 if (reg_val & QM_AXI_POISON_ERR) 1410 dev_err(dev, "qm axi poison error happened\n"); 1411 } 1412 } 1413 } 1414 1415 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1416 { 1417 u32 error_status, tmp; 1418 1419 /* read err sts */ 1420 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 1421 error_status = qm->error_mask & tmp; 1422 1423 if (error_status) { 1424 if (error_status & QM_ECC_MBIT) 1425 qm->err_status.is_qm_ecc_mbit = true; 1426 1427 qm_log_hw_error(qm, error_status); 1428 if (error_status & qm->err_info.qm_reset_mask) 1429 return ACC_ERR_NEED_RESET; 1430 1431 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1432 writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1433 } 1434 1435 return ACC_ERR_RECOVERED; 1436 } 1437 1438 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 1439 { 1440 struct qm_mailbox mailbox; 1441 int ret; 1442 1443 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 1444 mutex_lock(&qm->mailbox_lock); 1445 ret = qm_mb_nolock(qm, &mailbox); 1446 if (ret) 1447 goto err_unlock; 1448 1449 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1450 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1451 1452 err_unlock: 1453 mutex_unlock(&qm->mailbox_lock); 1454 return ret; 1455 } 1456 1457 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 1458 { 1459 u32 val; 1460 1461 if (qm->fun_type == QM_HW_PF) 1462 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 1463 1464 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 1465 val |= QM_IFC_INT_SOURCE_MASK; 1466 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 1467 } 1468 1469 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 1470 { 1471 struct device *dev = &qm->pdev->dev; 1472 u32 cmd; 1473 u64 msg; 1474 int ret; 1475 1476 ret = qm_get_mb_cmd(qm, &msg, vf_id); 1477 if (ret) { 1478 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); 1479 return; 1480 } 1481 1482 cmd = msg & QM_MB_CMD_DATA_MASK; 1483 switch (cmd) { 1484 case QM_VF_PREPARE_FAIL: 1485 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 1486 break; 1487 case QM_VF_START_FAIL: 1488 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 1489 break; 1490 case QM_VF_PREPARE_DONE: 1491 case QM_VF_START_DONE: 1492 break; 1493 default: 1494 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); 1495 break; 1496 } 1497 } 1498 1499 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 1500 { 1501 struct device *dev = &qm->pdev->dev; 1502 u32 vfs_num = qm->vfs_num; 1503 int cnt = 0; 1504 int ret = 0; 1505 u64 val; 1506 u32 i; 1507 1508 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 1509 return 0; 1510 1511 while (true) { 1512 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 1513 /* All VFs send command to PF, break */ 1514 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 1515 break; 1516 1517 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1518 ret = -EBUSY; 1519 break; 1520 } 1521 1522 msleep(QM_WAIT_DST_ACK); 1523 } 1524 1525 /* PF check VFs msg */ 1526 for (i = 1; i <= vfs_num; i++) { 1527 if (val & BIT(i)) 1528 qm_handle_vf_msg(qm, i); 1529 else 1530 dev_err(dev, "VF(%u) not ping PF!\n", i); 1531 } 1532 1533 /* PF clear interrupt to ack VFs */ 1534 qm_clear_cmd_interrupt(qm, val); 1535 1536 return ret; 1537 } 1538 1539 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 1540 { 1541 u32 val; 1542 1543 val = readl(qm->io_base + QM_IFC_INT_CFG); 1544 val &= ~QM_IFC_SEND_ALL_VFS; 1545 val |= fun_num; 1546 writel(val, qm->io_base + QM_IFC_INT_CFG); 1547 1548 val = readl(qm->io_base + QM_IFC_INT_SET_P); 1549 val |= QM_IFC_INT_SET_MASK; 1550 writel(val, qm->io_base + QM_IFC_INT_SET_P); 1551 } 1552 1553 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 1554 { 1555 u32 val; 1556 1557 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1558 val |= QM_IFC_INT_SET_MASK; 1559 writel(val, qm->io_base + QM_IFC_INT_SET_V); 1560 } 1561 1562 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) 1563 { 1564 struct device *dev = &qm->pdev->dev; 1565 struct qm_mailbox mailbox; 1566 int cnt = 0; 1567 u64 val; 1568 int ret; 1569 1570 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); 1571 mutex_lock(&qm->mailbox_lock); 1572 ret = qm_mb_nolock(qm, &mailbox); 1573 if (ret) { 1574 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 1575 goto err_unlock; 1576 } 1577 1578 qm_trigger_vf_interrupt(qm, fun_num); 1579 while (true) { 1580 msleep(QM_WAIT_DST_ACK); 1581 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1582 /* if VF respond, PF notifies VF successfully. */ 1583 if (!(val & BIT(fun_num))) 1584 goto err_unlock; 1585 1586 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1587 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 1588 ret = -ETIMEDOUT; 1589 break; 1590 } 1591 } 1592 1593 err_unlock: 1594 mutex_unlock(&qm->mailbox_lock); 1595 return ret; 1596 } 1597 1598 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) 1599 { 1600 struct device *dev = &qm->pdev->dev; 1601 u32 vfs_num = qm->vfs_num; 1602 struct qm_mailbox mailbox; 1603 u64 val = 0; 1604 int cnt = 0; 1605 int ret; 1606 u32 i; 1607 1608 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); 1609 mutex_lock(&qm->mailbox_lock); 1610 /* PF sends command to all VFs by mailbox */ 1611 ret = qm_mb_nolock(qm, &mailbox); 1612 if (ret) { 1613 dev_err(dev, "failed to send command to VFs!\n"); 1614 mutex_unlock(&qm->mailbox_lock); 1615 return ret; 1616 } 1617 1618 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 1619 while (true) { 1620 msleep(QM_WAIT_DST_ACK); 1621 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1622 /* If all VFs acked, PF notifies VFs successfully. */ 1623 if (!(val & GENMASK(vfs_num, 1))) { 1624 mutex_unlock(&qm->mailbox_lock); 1625 return 0; 1626 } 1627 1628 if (++cnt > QM_MAX_PF_WAIT_COUNT) 1629 break; 1630 } 1631 1632 mutex_unlock(&qm->mailbox_lock); 1633 1634 /* Check which vf respond timeout. */ 1635 for (i = 1; i <= vfs_num; i++) { 1636 if (val & BIT(i)) 1637 dev_err(dev, "failed to get response from VF(%u)!\n", i); 1638 } 1639 1640 return -ETIMEDOUT; 1641 } 1642 1643 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) 1644 { 1645 struct qm_mailbox mailbox; 1646 int cnt = 0; 1647 u32 val; 1648 int ret; 1649 1650 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); 1651 mutex_lock(&qm->mailbox_lock); 1652 ret = qm_mb_nolock(qm, &mailbox); 1653 if (ret) { 1654 dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); 1655 goto unlock; 1656 } 1657 1658 qm_trigger_pf_interrupt(qm); 1659 /* Waiting for PF response */ 1660 while (true) { 1661 msleep(QM_WAIT_DST_ACK); 1662 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1663 if (!(val & QM_IFC_INT_STATUS_MASK)) 1664 break; 1665 1666 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 1667 ret = -ETIMEDOUT; 1668 break; 1669 } 1670 } 1671 1672 unlock: 1673 mutex_unlock(&qm->mailbox_lock); 1674 return ret; 1675 } 1676 1677 static int qm_stop_qp(struct hisi_qp *qp) 1678 { 1679 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 1680 } 1681 1682 static int qm_set_msi(struct hisi_qm *qm, bool set) 1683 { 1684 struct pci_dev *pdev = qm->pdev; 1685 1686 if (set) { 1687 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1688 0); 1689 } else { 1690 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1691 ACC_PEH_MSI_DISABLE); 1692 if (qm->err_status.is_qm_ecc_mbit || 1693 qm->err_status.is_dev_ecc_mbit) 1694 return 0; 1695 1696 mdelay(1); 1697 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 1698 return -EFAULT; 1699 } 1700 1701 return 0; 1702 } 1703 1704 static void qm_wait_msi_finish(struct hisi_qm *qm) 1705 { 1706 struct pci_dev *pdev = qm->pdev; 1707 u32 cmd = ~0; 1708 int cnt = 0; 1709 u32 val; 1710 int ret; 1711 1712 while (true) { 1713 pci_read_config_dword(pdev, pdev->msi_cap + 1714 PCI_MSI_PENDING_64, &cmd); 1715 if (!cmd) 1716 break; 1717 1718 if (++cnt > MAX_WAIT_COUNTS) { 1719 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 1720 break; 1721 } 1722 1723 udelay(1); 1724 } 1725 1726 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 1727 val, !(val & QM_PEH_DFX_MASK), 1728 POLL_PERIOD, POLL_TIMEOUT); 1729 if (ret) 1730 pci_warn(pdev, "failed to empty PEH MSI!\n"); 1731 1732 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 1733 val, !(val & QM_PEH_MSI_FINISH_MASK), 1734 POLL_PERIOD, POLL_TIMEOUT); 1735 if (ret) 1736 pci_warn(pdev, "failed to finish MSI operation!\n"); 1737 } 1738 1739 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 1740 { 1741 struct pci_dev *pdev = qm->pdev; 1742 int ret = -ETIMEDOUT; 1743 u32 cmd, i; 1744 1745 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1746 if (set) 1747 cmd |= QM_MSI_CAP_ENABLE; 1748 else 1749 cmd &= ~QM_MSI_CAP_ENABLE; 1750 1751 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 1752 if (set) { 1753 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 1754 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1755 if (cmd & QM_MSI_CAP_ENABLE) 1756 return 0; 1757 1758 udelay(1); 1759 } 1760 } else { 1761 udelay(WAIT_PERIOD_US_MIN); 1762 qm_wait_msi_finish(qm); 1763 ret = 0; 1764 } 1765 1766 return ret; 1767 } 1768 1769 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 1770 .qm_db = qm_db_v1, 1771 .hw_error_init = qm_hw_error_init_v1, 1772 .set_msi = qm_set_msi, 1773 }; 1774 1775 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 1776 .get_vft = qm_get_vft_v2, 1777 .qm_db = qm_db_v2, 1778 .hw_error_init = qm_hw_error_init_v2, 1779 .hw_error_uninit = qm_hw_error_uninit_v2, 1780 .hw_error_handle = qm_hw_error_handle_v2, 1781 .set_msi = qm_set_msi, 1782 }; 1783 1784 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 1785 .get_vft = qm_get_vft_v2, 1786 .qm_db = qm_db_v2, 1787 .hw_error_init = qm_hw_error_init_v3, 1788 .hw_error_uninit = qm_hw_error_uninit_v3, 1789 .hw_error_handle = qm_hw_error_handle_v2, 1790 .set_msi = qm_set_msi_v3, 1791 }; 1792 1793 static void *qm_get_avail_sqe(struct hisi_qp *qp) 1794 { 1795 struct hisi_qp_status *qp_status = &qp->qp_status; 1796 u16 sq_tail = qp_status->sq_tail; 1797 1798 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 1799 return NULL; 1800 1801 return qp->sqe + sq_tail * qp->qm->sqe_size; 1802 } 1803 1804 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) 1805 { 1806 u64 *addr; 1807 1808 /* Use last 64 bits of DUS to reset status. */ 1809 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; 1810 *addr = 0; 1811 } 1812 1813 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 1814 { 1815 struct device *dev = &qm->pdev->dev; 1816 struct hisi_qp *qp; 1817 int qp_id; 1818 1819 if (atomic_read(&qm->status.flags) == QM_STOP) { 1820 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n"); 1821 return ERR_PTR(-EPERM); 1822 } 1823 1824 if (qm->qp_in_used == qm->qp_num) { 1825 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1826 qm->qp_num); 1827 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1828 return ERR_PTR(-EBUSY); 1829 } 1830 1831 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 1832 if (qp_id < 0) { 1833 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 1834 qm->qp_num); 1835 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 1836 return ERR_PTR(-EBUSY); 1837 } 1838 1839 qp = &qm->qp_array[qp_id]; 1840 hisi_qm_unset_hw_reset(qp); 1841 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 1842 1843 qp->event_cb = NULL; 1844 qp->req_cb = NULL; 1845 qp->qp_id = qp_id; 1846 qp->alg_type = alg_type; 1847 qp->is_in_kernel = true; 1848 qm->qp_in_used++; 1849 1850 return qp; 1851 } 1852 1853 /** 1854 * hisi_qm_create_qp() - Create a queue pair from qm. 1855 * @qm: The qm we create a qp from. 1856 * @alg_type: Accelerator specific algorithm type in sqc. 1857 * 1858 * Return created qp, negative error code if failed. 1859 */ 1860 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 1861 { 1862 struct hisi_qp *qp; 1863 int ret; 1864 1865 ret = qm_pm_get_sync(qm); 1866 if (ret) 1867 return ERR_PTR(ret); 1868 1869 down_write(&qm->qps_lock); 1870 qp = qm_create_qp_nolock(qm, alg_type); 1871 up_write(&qm->qps_lock); 1872 1873 if (IS_ERR(qp)) 1874 qm_pm_put_sync(qm); 1875 1876 return qp; 1877 } 1878 1879 /** 1880 * hisi_qm_release_qp() - Release a qp back to its qm. 1881 * @qp: The qp we want to release. 1882 * 1883 * This function releases the resource of a qp. 1884 */ 1885 static void hisi_qm_release_qp(struct hisi_qp *qp) 1886 { 1887 struct hisi_qm *qm = qp->qm; 1888 1889 down_write(&qm->qps_lock); 1890 1891 qm->qp_in_used--; 1892 idr_remove(&qm->qp_idr, qp->qp_id); 1893 1894 up_write(&qm->qps_lock); 1895 1896 qm_pm_put_sync(qm); 1897 } 1898 1899 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1900 { 1901 struct hisi_qm *qm = qp->qm; 1902 enum qm_hw_ver ver = qm->ver; 1903 struct qm_sqc sqc = {0}; 1904 1905 if (ver == QM_HW_V1) { 1906 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 1907 sqc.w8 = cpu_to_le16(qp->sq_depth - 1); 1908 } else { 1909 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 1910 sqc.w8 = 0; /* rand_qc */ 1911 } 1912 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 1913 sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); 1914 sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); 1915 sqc.cq_num = cpu_to_le16(qp_id); 1916 sqc.pasid = cpu_to_le16(pasid); 1917 1918 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1919 sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 1920 QM_QC_PASID_ENABLE_SHIFT); 1921 1922 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); 1923 } 1924 1925 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1926 { 1927 struct hisi_qm *qm = qp->qm; 1928 enum qm_hw_ver ver = qm->ver; 1929 struct qm_cqc cqc = {0}; 1930 1931 if (ver == QM_HW_V1) { 1932 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); 1933 cqc.w8 = cpu_to_le16(qp->cq_depth - 1); 1934 } else { 1935 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 1936 cqc.w8 = 0; /* rand_qc */ 1937 } 1938 /* 1939 * Enable request finishing interrupts defaultly. 1940 * So, there will be some interrupts until disabling 1941 * this. 1942 */ 1943 cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 1944 cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); 1945 cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); 1946 cqc.pasid = cpu_to_le16(pasid); 1947 1948 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 1949 cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 1950 1951 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); 1952 } 1953 1954 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 1955 { 1956 int ret; 1957 1958 qm_init_qp_status(qp); 1959 1960 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 1961 if (ret) 1962 return ret; 1963 1964 return qm_cq_ctx_cfg(qp, qp_id, pasid); 1965 } 1966 1967 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 1968 { 1969 struct hisi_qm *qm = qp->qm; 1970 struct device *dev = &qm->pdev->dev; 1971 int qp_id = qp->qp_id; 1972 u32 pasid = arg; 1973 int ret; 1974 1975 if (atomic_read(&qm->status.flags) == QM_STOP) { 1976 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n"); 1977 return -EPERM; 1978 } 1979 1980 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 1981 if (ret) 1982 return ret; 1983 1984 atomic_set(&qp->qp_status.flags, QP_START); 1985 dev_dbg(dev, "queue %d started\n", qp_id); 1986 1987 return 0; 1988 } 1989 1990 /** 1991 * hisi_qm_start_qp() - Start a qp into running. 1992 * @qp: The qp we want to start to run. 1993 * @arg: Accelerator specific argument. 1994 * 1995 * After this function, qp can receive request from user. Return 0 if 1996 * successful, negative error code if failed. 1997 */ 1998 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 1999 { 2000 struct hisi_qm *qm = qp->qm; 2001 int ret; 2002 2003 down_write(&qm->qps_lock); 2004 ret = qm_start_qp_nolock(qp, arg); 2005 up_write(&qm->qps_lock); 2006 2007 return ret; 2008 } 2009 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2010 2011 /** 2012 * qp_stop_fail_cb() - call request cb. 2013 * @qp: stopped failed qp. 2014 * 2015 * Callback function should be called whether task completed or not. 2016 */ 2017 static void qp_stop_fail_cb(struct hisi_qp *qp) 2018 { 2019 int qp_used = atomic_read(&qp->qp_status.used); 2020 u16 cur_tail = qp->qp_status.sq_tail; 2021 u16 sq_depth = qp->sq_depth; 2022 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 2023 struct hisi_qm *qm = qp->qm; 2024 u16 pos; 2025 int i; 2026 2027 for (i = 0; i < qp_used; i++) { 2028 pos = (i + cur_head) % sq_depth; 2029 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2030 atomic_dec(&qp->qp_status.used); 2031 } 2032 } 2033 2034 /** 2035 * qm_drain_qp() - Drain a qp. 2036 * @qp: The qp we want to drain. 2037 * 2038 * Determine whether the queue is cleared by judging the tail pointers of 2039 * sq and cq. 2040 */ 2041 static int qm_drain_qp(struct hisi_qp *qp) 2042 { 2043 struct hisi_qm *qm = qp->qm; 2044 struct device *dev = &qm->pdev->dev; 2045 struct qm_sqc sqc; 2046 struct qm_cqc cqc; 2047 int ret, i = 0; 2048 2049 /* No need to judge if master OOO is blocked. */ 2050 if (qm_check_dev_error(qm)) 2051 return 0; 2052 2053 /* Kunpeng930 supports drain qp by device */ 2054 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 2055 ret = qm_stop_qp(qp); 2056 if (ret) 2057 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); 2058 return ret; 2059 } 2060 2061 while (++i) { 2062 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1); 2063 if (ret) { 2064 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2065 return ret; 2066 } 2067 2068 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1); 2069 if (ret) { 2070 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2071 return ret; 2072 } 2073 2074 if ((sqc.tail == cqc.tail) && 2075 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2076 break; 2077 2078 if (i == MAX_WAIT_COUNTS) { 2079 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); 2080 return -EBUSY; 2081 } 2082 2083 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2084 } 2085 2086 return 0; 2087 } 2088 2089 static int qm_stop_qp_nolock(struct hisi_qp *qp) 2090 { 2091 struct device *dev = &qp->qm->pdev->dev; 2092 int ret; 2093 2094 /* 2095 * It is allowed to stop and release qp when reset, If the qp is 2096 * stopped when reset but still want to be released then, the 2097 * is_resetting flag should be set negative so that this qp will not 2098 * be restarted after reset. 2099 */ 2100 if (atomic_read(&qp->qp_status.flags) != QP_START) { 2101 qp->is_resetting = false; 2102 return 0; 2103 } 2104 2105 atomic_set(&qp->qp_status.flags, QP_STOP); 2106 2107 ret = qm_drain_qp(qp); 2108 if (ret) 2109 dev_err(dev, "Failed to drain out data for stopping!\n"); 2110 2111 flush_workqueue(qp->qm->wq); 2112 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2113 qp_stop_fail_cb(qp); 2114 2115 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2116 2117 return 0; 2118 } 2119 2120 /** 2121 * hisi_qm_stop_qp() - Stop a qp in qm. 2122 * @qp: The qp we want to stop. 2123 * 2124 * This function is reverse of hisi_qm_start_qp. Return 0 if successful. 2125 */ 2126 int hisi_qm_stop_qp(struct hisi_qp *qp) 2127 { 2128 int ret; 2129 2130 down_write(&qp->qm->qps_lock); 2131 ret = qm_stop_qp_nolock(qp); 2132 up_write(&qp->qm->qps_lock); 2133 2134 return ret; 2135 } 2136 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2137 2138 /** 2139 * hisi_qp_send() - Queue up a task in the hardware queue. 2140 * @qp: The qp in which to put the message. 2141 * @msg: The message. 2142 * 2143 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2144 * if qp related qm is resetting. 2145 * 2146 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2147 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2148 * reset may happen, we have no lock here considering performance. This 2149 * causes current qm_db sending fail or can not receive sended sqe. QM 2150 * sync/async receive function should handle the error sqe. ACC reset 2151 * done function should clear used sqe to 0. 2152 */ 2153 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2154 { 2155 struct hisi_qp_status *qp_status = &qp->qp_status; 2156 u16 sq_tail = qp_status->sq_tail; 2157 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 2158 void *sqe = qm_get_avail_sqe(qp); 2159 2160 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2161 atomic_read(&qp->qm->status.flags) == QM_STOP || 2162 qp->is_resetting)) { 2163 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2164 return -EAGAIN; 2165 } 2166 2167 if (!sqe) 2168 return -EBUSY; 2169 2170 memcpy(sqe, msg, qp->qm->sqe_size); 2171 2172 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2173 atomic_inc(&qp->qp_status.used); 2174 qp_status->sq_tail = sq_tail_next; 2175 2176 return 0; 2177 } 2178 EXPORT_SYMBOL_GPL(hisi_qp_send); 2179 2180 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2181 { 2182 unsigned int val; 2183 2184 if (qm->ver == QM_HW_V1) 2185 return; 2186 2187 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2188 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2189 val, val & BIT(0), POLL_PERIOD, 2190 POLL_TIMEOUT)) 2191 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2192 } 2193 2194 static void qm_qp_event_notifier(struct hisi_qp *qp) 2195 { 2196 wake_up_interruptible(&qp->uacce_q->wait); 2197 } 2198 2199 /* This function returns free number of qp in qm. */ 2200 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2201 { 2202 struct hisi_qm *qm = uacce->priv; 2203 int ret; 2204 2205 down_read(&qm->qps_lock); 2206 ret = qm->qp_num - qm->qp_in_used; 2207 up_read(&qm->qps_lock); 2208 2209 return ret; 2210 } 2211 2212 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) 2213 { 2214 int i; 2215 2216 for (i = 0; i < qm->qp_num; i++) 2217 qm_set_qp_disable(&qm->qp_array[i], offset); 2218 } 2219 2220 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2221 unsigned long arg, 2222 struct uacce_queue *q) 2223 { 2224 struct hisi_qm *qm = uacce->priv; 2225 struct hisi_qp *qp; 2226 u8 alg_type = 0; 2227 2228 qp = hisi_qm_create_qp(qm, alg_type); 2229 if (IS_ERR(qp)) 2230 return PTR_ERR(qp); 2231 2232 q->priv = qp; 2233 q->uacce = uacce; 2234 qp->uacce_q = q; 2235 qp->event_cb = qm_qp_event_notifier; 2236 qp->pasid = arg; 2237 qp->is_in_kernel = false; 2238 2239 return 0; 2240 } 2241 2242 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2243 { 2244 struct hisi_qp *qp = q->priv; 2245 2246 hisi_qm_release_qp(qp); 2247 } 2248 2249 /* map sq/cq/doorbell to user space */ 2250 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2251 struct vm_area_struct *vma, 2252 struct uacce_qfile_region *qfr) 2253 { 2254 struct hisi_qp *qp = q->priv; 2255 struct hisi_qm *qm = qp->qm; 2256 resource_size_t phys_base = qm->db_phys_base + 2257 qp->qp_id * qm->db_interval; 2258 size_t sz = vma->vm_end - vma->vm_start; 2259 struct pci_dev *pdev = qm->pdev; 2260 struct device *dev = &pdev->dev; 2261 unsigned long vm_pgoff; 2262 int ret; 2263 2264 switch (qfr->type) { 2265 case UACCE_QFRT_MMIO: 2266 if (qm->ver == QM_HW_V1) { 2267 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2268 return -EINVAL; 2269 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 2270 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2271 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2272 return -EINVAL; 2273 } else { 2274 if (sz > qm->db_interval) 2275 return -EINVAL; 2276 } 2277 2278 vm_flags_set(vma, VM_IO); 2279 2280 return remap_pfn_range(vma, vma->vm_start, 2281 phys_base >> PAGE_SHIFT, 2282 sz, pgprot_noncached(vma->vm_page_prot)); 2283 case UACCE_QFRT_DUS: 2284 if (sz != qp->qdma.size) 2285 return -EINVAL; 2286 2287 /* 2288 * dma_mmap_coherent() requires vm_pgoff as 0 2289 * restore vm_pfoff to initial value for mmap() 2290 */ 2291 vm_pgoff = vma->vm_pgoff; 2292 vma->vm_pgoff = 0; 2293 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2294 qp->qdma.dma, sz); 2295 vma->vm_pgoff = vm_pgoff; 2296 return ret; 2297 2298 default: 2299 return -EINVAL; 2300 } 2301 } 2302 2303 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2304 { 2305 struct hisi_qp *qp = q->priv; 2306 2307 return hisi_qm_start_qp(qp, qp->pasid); 2308 } 2309 2310 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2311 { 2312 hisi_qm_stop_qp(q->priv); 2313 } 2314 2315 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2316 { 2317 struct hisi_qp *qp = q->priv; 2318 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2319 int updated = 0; 2320 2321 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2322 /* make sure to read data from memory */ 2323 dma_rmb(); 2324 qm_cq_head_update(qp); 2325 cqe = qp->cqe + qp->qp_status.cq_head; 2326 updated = 1; 2327 } 2328 2329 return updated; 2330 } 2331 2332 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2333 { 2334 struct hisi_qm *qm = q->uacce->priv; 2335 struct hisi_qp *qp = q->priv; 2336 2337 down_write(&qm->qps_lock); 2338 qp->alg_type = type; 2339 up_write(&qm->qps_lock); 2340 } 2341 2342 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2343 unsigned long arg) 2344 { 2345 struct hisi_qp *qp = q->priv; 2346 struct hisi_qp_info qp_info; 2347 struct hisi_qp_ctx qp_ctx; 2348 2349 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2350 if (copy_from_user(&qp_ctx, (void __user *)arg, 2351 sizeof(struct hisi_qp_ctx))) 2352 return -EFAULT; 2353 2354 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) 2355 return -EINVAL; 2356 2357 qm_set_sqctype(q, qp_ctx.qc_type); 2358 qp_ctx.id = qp->qp_id; 2359 2360 if (copy_to_user((void __user *)arg, &qp_ctx, 2361 sizeof(struct hisi_qp_ctx))) 2362 return -EFAULT; 2363 2364 return 0; 2365 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 2366 if (copy_from_user(&qp_info, (void __user *)arg, 2367 sizeof(struct hisi_qp_info))) 2368 return -EFAULT; 2369 2370 qp_info.sqe_size = qp->qm->sqe_size; 2371 qp_info.sq_depth = qp->sq_depth; 2372 qp_info.cq_depth = qp->cq_depth; 2373 2374 if (copy_to_user((void __user *)arg, &qp_info, 2375 sizeof(struct hisi_qp_info))) 2376 return -EFAULT; 2377 2378 return 0; 2379 } 2380 2381 return -EINVAL; 2382 } 2383 2384 /** 2385 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device 2386 * according to user's configuration of error threshold. 2387 * @qm: the uacce device 2388 */ 2389 static int qm_hw_err_isolate(struct hisi_qm *qm) 2390 { 2391 struct qm_hw_err *err, *tmp, *hw_err; 2392 struct qm_err_isolate *isolate; 2393 u32 count = 0; 2394 2395 isolate = &qm->isolate_data; 2396 2397 #define SECONDS_PER_HOUR 3600 2398 2399 /* All the hw errs are processed by PF driver */ 2400 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) 2401 return 0; 2402 2403 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL); 2404 if (!hw_err) 2405 return -ENOMEM; 2406 2407 /* 2408 * Time-stamp every slot AER error. Then check the AER error log when the 2409 * next device AER error occurred. if the device slot AER error count exceeds 2410 * the setting error threshold in one hour, the isolated state will be set 2411 * to true. And the AER error logs that exceed one hour will be cleared. 2412 */ 2413 mutex_lock(&isolate->isolate_lock); 2414 hw_err->timestamp = jiffies; 2415 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { 2416 if ((hw_err->timestamp - err->timestamp) / HZ > 2417 SECONDS_PER_HOUR) { 2418 list_del(&err->list); 2419 kfree(err); 2420 } else { 2421 count++; 2422 } 2423 } 2424 list_add(&hw_err->list, &isolate->qm_hw_errs); 2425 mutex_unlock(&isolate->isolate_lock); 2426 2427 if (count >= isolate->err_threshold) 2428 isolate->is_isolate = true; 2429 2430 return 0; 2431 } 2432 2433 static void qm_hw_err_destroy(struct hisi_qm *qm) 2434 { 2435 struct qm_hw_err *err, *tmp; 2436 2437 mutex_lock(&qm->isolate_data.isolate_lock); 2438 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { 2439 list_del(&err->list); 2440 kfree(err); 2441 } 2442 mutex_unlock(&qm->isolate_data.isolate_lock); 2443 } 2444 2445 static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) 2446 { 2447 struct hisi_qm *qm = uacce->priv; 2448 struct hisi_qm *pf_qm; 2449 2450 if (uacce->is_vf) 2451 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2452 else 2453 pf_qm = qm; 2454 2455 return pf_qm->isolate_data.is_isolate ? 2456 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; 2457 } 2458 2459 static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) 2460 { 2461 struct hisi_qm *qm = uacce->priv; 2462 2463 /* Must be set by PF */ 2464 if (uacce->is_vf) 2465 return -EPERM; 2466 2467 if (qm->isolate_data.is_isolate) 2468 return -EPERM; 2469 2470 qm->isolate_data.err_threshold = num; 2471 2472 /* After the policy is updated, need to reset the hardware err list */ 2473 qm_hw_err_destroy(qm); 2474 2475 return 0; 2476 } 2477 2478 static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) 2479 { 2480 struct hisi_qm *qm = uacce->priv; 2481 struct hisi_qm *pf_qm; 2482 2483 if (uacce->is_vf) { 2484 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2485 return pf_qm->isolate_data.err_threshold; 2486 } 2487 2488 return qm->isolate_data.err_threshold; 2489 } 2490 2491 static const struct uacce_ops uacce_qm_ops = { 2492 .get_available_instances = hisi_qm_get_available_instances, 2493 .get_queue = hisi_qm_uacce_get_queue, 2494 .put_queue = hisi_qm_uacce_put_queue, 2495 .start_queue = hisi_qm_uacce_start_queue, 2496 .stop_queue = hisi_qm_uacce_stop_queue, 2497 .mmap = hisi_qm_uacce_mmap, 2498 .ioctl = hisi_qm_uacce_ioctl, 2499 .is_q_updated = hisi_qm_is_q_updated, 2500 .get_isolate_state = hisi_qm_get_isolate_state, 2501 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, 2502 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, 2503 }; 2504 2505 static void qm_remove_uacce(struct hisi_qm *qm) 2506 { 2507 struct uacce_device *uacce = qm->uacce; 2508 2509 if (qm->use_sva) { 2510 qm_hw_err_destroy(qm); 2511 uacce_remove(uacce); 2512 qm->uacce = NULL; 2513 } 2514 } 2515 2516 static int qm_alloc_uacce(struct hisi_qm *qm) 2517 { 2518 struct pci_dev *pdev = qm->pdev; 2519 struct uacce_device *uacce; 2520 unsigned long mmio_page_nr; 2521 unsigned long dus_page_nr; 2522 u16 sq_depth, cq_depth; 2523 struct uacce_interface interface = { 2524 .flags = UACCE_DEV_SVA, 2525 .ops = &uacce_qm_ops, 2526 }; 2527 int ret; 2528 2529 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), 2530 sizeof(interface.name)); 2531 if (ret < 0) 2532 return -ENAMETOOLONG; 2533 2534 uacce = uacce_alloc(&pdev->dev, &interface); 2535 if (IS_ERR(uacce)) 2536 return PTR_ERR(uacce); 2537 2538 if (uacce->flags & UACCE_DEV_SVA) { 2539 qm->use_sva = true; 2540 } else { 2541 /* only consider sva case */ 2542 qm_remove_uacce(qm); 2543 return -EINVAL; 2544 } 2545 2546 uacce->is_vf = pdev->is_virtfn; 2547 uacce->priv = qm; 2548 2549 if (qm->ver == QM_HW_V1) 2550 uacce->api_ver = HISI_QM_API_VER_BASE; 2551 else if (qm->ver == QM_HW_V2) 2552 uacce->api_ver = HISI_QM_API_VER2_BASE; 2553 else 2554 uacce->api_ver = HISI_QM_API_VER3_BASE; 2555 2556 if (qm->ver == QM_HW_V1) 2557 mmio_page_nr = QM_DOORBELL_PAGE_NR; 2558 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2559 mmio_page_nr = QM_DOORBELL_PAGE_NR + 2560 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 2561 else 2562 mmio_page_nr = qm->db_interval / PAGE_SIZE; 2563 2564 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 2565 2566 /* Add one more page for device or qp status */ 2567 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 2568 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 2569 PAGE_SHIFT; 2570 2571 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 2572 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 2573 2574 qm->uacce = uacce; 2575 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); 2576 mutex_init(&qm->isolate_data.isolate_lock); 2577 2578 return 0; 2579 } 2580 2581 /** 2582 * qm_frozen() - Try to froze QM to cut continuous queue request. If 2583 * there is user on the QM, return failure without doing anything. 2584 * @qm: The qm needed to be fronzen. 2585 * 2586 * This function frozes QM, then we can do SRIOV disabling. 2587 */ 2588 static int qm_frozen(struct hisi_qm *qm) 2589 { 2590 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 2591 return 0; 2592 2593 down_write(&qm->qps_lock); 2594 2595 if (!qm->qp_in_used) { 2596 qm->qp_in_used = qm->qp_num; 2597 up_write(&qm->qps_lock); 2598 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 2599 return 0; 2600 } 2601 2602 up_write(&qm->qps_lock); 2603 2604 return -EBUSY; 2605 } 2606 2607 static int qm_try_frozen_vfs(struct pci_dev *pdev, 2608 struct hisi_qm_list *qm_list) 2609 { 2610 struct hisi_qm *qm, *vf_qm; 2611 struct pci_dev *dev; 2612 int ret = 0; 2613 2614 if (!qm_list || !pdev) 2615 return -EINVAL; 2616 2617 /* Try to frozen all the VFs as disable SRIOV */ 2618 mutex_lock(&qm_list->lock); 2619 list_for_each_entry(qm, &qm_list->list, list) { 2620 dev = qm->pdev; 2621 if (dev == pdev) 2622 continue; 2623 if (pci_physfn(dev) == pdev) { 2624 vf_qm = pci_get_drvdata(dev); 2625 ret = qm_frozen(vf_qm); 2626 if (ret) 2627 goto frozen_fail; 2628 } 2629 } 2630 2631 frozen_fail: 2632 mutex_unlock(&qm_list->lock); 2633 2634 return ret; 2635 } 2636 2637 /** 2638 * hisi_qm_wait_task_finish() - Wait until the task is finished 2639 * when removing the driver. 2640 * @qm: The qm needed to wait for the task to finish. 2641 * @qm_list: The list of all available devices. 2642 */ 2643 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 2644 { 2645 while (qm_frozen(qm) || 2646 ((qm->fun_type == QM_HW_PF) && 2647 qm_try_frozen_vfs(qm->pdev, qm_list))) { 2648 msleep(WAIT_PERIOD); 2649 } 2650 2651 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 2652 test_bit(QM_RESETTING, &qm->misc_ctl)) 2653 msleep(WAIT_PERIOD); 2654 2655 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2656 flush_work(&qm->cmd_process); 2657 2658 udelay(REMOVE_WAIT_DELAY); 2659 } 2660 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 2661 2662 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 2663 { 2664 struct device *dev = &qm->pdev->dev; 2665 struct qm_dma *qdma; 2666 int i; 2667 2668 for (i = num - 1; i >= 0; i--) { 2669 qdma = &qm->qp_array[i].qdma; 2670 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 2671 kfree(qm->poll_data[i].qp_finish_id); 2672 } 2673 2674 kfree(qm->poll_data); 2675 kfree(qm->qp_array); 2676 } 2677 2678 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 2679 u16 sq_depth, u16 cq_depth) 2680 { 2681 struct device *dev = &qm->pdev->dev; 2682 size_t off = qm->sqe_size * sq_depth; 2683 struct hisi_qp *qp; 2684 int ret = -ENOMEM; 2685 2686 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), 2687 GFP_KERNEL); 2688 if (!qm->poll_data[id].qp_finish_id) 2689 return -ENOMEM; 2690 2691 qp = &qm->qp_array[id]; 2692 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 2693 GFP_KERNEL); 2694 if (!qp->qdma.va) 2695 goto err_free_qp_finish_id; 2696 2697 qp->sqe = qp->qdma.va; 2698 qp->sqe_dma = qp->qdma.dma; 2699 qp->cqe = qp->qdma.va + off; 2700 qp->cqe_dma = qp->qdma.dma + off; 2701 qp->qdma.size = dma_size; 2702 qp->sq_depth = sq_depth; 2703 qp->cq_depth = cq_depth; 2704 qp->qm = qm; 2705 qp->qp_id = id; 2706 2707 return 0; 2708 2709 err_free_qp_finish_id: 2710 kfree(qm->poll_data[id].qp_finish_id); 2711 return ret; 2712 } 2713 2714 static void hisi_qm_pre_init(struct hisi_qm *qm) 2715 { 2716 struct pci_dev *pdev = qm->pdev; 2717 2718 if (qm->ver == QM_HW_V1) 2719 qm->ops = &qm_hw_ops_v1; 2720 else if (qm->ver == QM_HW_V2) 2721 qm->ops = &qm_hw_ops_v2; 2722 else 2723 qm->ops = &qm_hw_ops_v3; 2724 2725 pci_set_drvdata(pdev, qm); 2726 mutex_init(&qm->mailbox_lock); 2727 init_rwsem(&qm->qps_lock); 2728 qm->qp_in_used = 0; 2729 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 2730 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 2731 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 2732 } 2733 } 2734 2735 static void qm_cmd_uninit(struct hisi_qm *qm) 2736 { 2737 u32 val; 2738 2739 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2740 return; 2741 2742 val = readl(qm->io_base + QM_IFC_INT_MASK); 2743 val |= QM_IFC_INT_DISABLE; 2744 writel(val, qm->io_base + QM_IFC_INT_MASK); 2745 } 2746 2747 static void qm_cmd_init(struct hisi_qm *qm) 2748 { 2749 u32 val; 2750 2751 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2752 return; 2753 2754 /* Clear communication interrupt source */ 2755 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 2756 2757 /* Enable pf to vf communication reg. */ 2758 val = readl(qm->io_base + QM_IFC_INT_MASK); 2759 val &= ~QM_IFC_INT_DISABLE; 2760 writel(val, qm->io_base + QM_IFC_INT_MASK); 2761 } 2762 2763 static void qm_put_pci_res(struct hisi_qm *qm) 2764 { 2765 struct pci_dev *pdev = qm->pdev; 2766 2767 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2768 iounmap(qm->db_io_base); 2769 2770 iounmap(qm->io_base); 2771 pci_release_mem_regions(pdev); 2772 } 2773 2774 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 2775 { 2776 struct pci_dev *pdev = qm->pdev; 2777 2778 pci_free_irq_vectors(pdev); 2779 qm_put_pci_res(qm); 2780 pci_disable_device(pdev); 2781 } 2782 2783 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) 2784 { 2785 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) 2786 writel(state, qm->io_base + QM_VF_STATE); 2787 } 2788 2789 static void hisi_qm_unint_work(struct hisi_qm *qm) 2790 { 2791 destroy_workqueue(qm->wq); 2792 } 2793 2794 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) 2795 { 2796 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; 2797 struct device *dev = &qm->pdev->dev; 2798 2799 dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); 2800 } 2801 2802 static void hisi_qm_memory_uninit(struct hisi_qm *qm) 2803 { 2804 struct device *dev = &qm->pdev->dev; 2805 2806 hisi_qp_memory_uninit(qm, qm->qp_num); 2807 hisi_qm_free_rsv_buf(qm); 2808 if (qm->qdma.va) { 2809 hisi_qm_cache_wb(qm); 2810 dma_free_coherent(dev, qm->qdma.size, 2811 qm->qdma.va, qm->qdma.dma); 2812 } 2813 2814 idr_destroy(&qm->qp_idr); 2815 2816 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 2817 kfree(qm->factor); 2818 } 2819 2820 /** 2821 * hisi_qm_uninit() - Uninitialize qm. 2822 * @qm: The qm needed uninit. 2823 * 2824 * This function uninits qm related device resources. 2825 */ 2826 void hisi_qm_uninit(struct hisi_qm *qm) 2827 { 2828 qm_cmd_uninit(qm); 2829 hisi_qm_unint_work(qm); 2830 2831 down_write(&qm->qps_lock); 2832 hisi_qm_memory_uninit(qm); 2833 hisi_qm_set_state(qm, QM_NOT_READY); 2834 up_write(&qm->qps_lock); 2835 2836 qm_irqs_unregister(qm); 2837 hisi_qm_pci_uninit(qm); 2838 if (qm->use_sva) { 2839 uacce_remove(qm->uacce); 2840 qm->uacce = NULL; 2841 } 2842 } 2843 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 2844 2845 /** 2846 * hisi_qm_get_vft() - Get vft from a qm. 2847 * @qm: The qm we want to get its vft. 2848 * @base: The base number of queue in vft. 2849 * @number: The number of queues in vft. 2850 * 2851 * We can allocate multiple queues to a qm by configuring virtual function 2852 * table. We get related configures by this function. Normally, we call this 2853 * function in VF driver to get the queue information. 2854 * 2855 * qm hw v1 does not support this interface. 2856 */ 2857 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 2858 { 2859 if (!base || !number) 2860 return -EINVAL; 2861 2862 if (!qm->ops->get_vft) { 2863 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 2864 return -EINVAL; 2865 } 2866 2867 return qm->ops->get_vft(qm, base, number); 2868 } 2869 2870 /** 2871 * hisi_qm_set_vft() - Set vft to a qm. 2872 * @qm: The qm we want to set its vft. 2873 * @fun_num: The function number. 2874 * @base: The base number of queue in vft. 2875 * @number: The number of queues in vft. 2876 * 2877 * This function is alway called in PF driver, it is used to assign queues 2878 * among PF and VFs. 2879 * 2880 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 2881 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 2882 * (VF function number 0x2) 2883 */ 2884 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 2885 u32 number) 2886 { 2887 u32 max_q_num = qm->ctrl_qp_num; 2888 2889 if (base >= max_q_num || number > max_q_num || 2890 (base + number) > max_q_num) 2891 return -EINVAL; 2892 2893 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 2894 } 2895 2896 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 2897 { 2898 struct hisi_qm_status *status = &qm->status; 2899 2900 status->eq_head = 0; 2901 status->aeq_head = 0; 2902 status->eqc_phase = true; 2903 status->aeqc_phase = true; 2904 } 2905 2906 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) 2907 { 2908 /* Clear eq/aeq interrupt source */ 2909 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 2910 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 2911 2912 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 2913 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 2914 } 2915 2916 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) 2917 { 2918 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 2919 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 2920 } 2921 2922 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 2923 { 2924 struct qm_eqc eqc = {0}; 2925 2926 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 2927 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 2928 if (qm->ver == QM_HW_V1) 2929 eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 2930 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 2931 2932 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); 2933 } 2934 2935 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 2936 { 2937 struct qm_aeqc aeqc = {0}; 2938 2939 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 2940 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 2941 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 2942 2943 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); 2944 } 2945 2946 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 2947 { 2948 struct device *dev = &qm->pdev->dev; 2949 int ret; 2950 2951 qm_init_eq_aeq_status(qm); 2952 2953 ret = qm_eq_ctx_cfg(qm); 2954 if (ret) { 2955 dev_err(dev, "Set eqc failed!\n"); 2956 return ret; 2957 } 2958 2959 return qm_aeq_ctx_cfg(qm); 2960 } 2961 2962 static int __hisi_qm_start(struct hisi_qm *qm) 2963 { 2964 int ret; 2965 2966 WARN_ON(!qm->qdma.va); 2967 2968 if (qm->fun_type == QM_HW_PF) { 2969 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 2970 if (ret) 2971 return ret; 2972 } 2973 2974 ret = qm_eq_aeq_ctx_cfg(qm); 2975 if (ret) 2976 return ret; 2977 2978 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 2979 if (ret) 2980 return ret; 2981 2982 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 2983 if (ret) 2984 return ret; 2985 2986 qm_init_prefetch(qm); 2987 qm_enable_eq_aeq_interrupts(qm); 2988 2989 return 0; 2990 } 2991 2992 /** 2993 * hisi_qm_start() - start qm 2994 * @qm: The qm to be started. 2995 * 2996 * This function starts a qm, then we can allocate qp from this qm. 2997 */ 2998 int hisi_qm_start(struct hisi_qm *qm) 2999 { 3000 struct device *dev = &qm->pdev->dev; 3001 int ret = 0; 3002 3003 down_write(&qm->qps_lock); 3004 3005 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3006 3007 if (!qm->qp_num) { 3008 dev_err(dev, "qp_num should not be 0\n"); 3009 ret = -EINVAL; 3010 goto err_unlock; 3011 } 3012 3013 ret = __hisi_qm_start(qm); 3014 if (ret) 3015 goto err_unlock; 3016 3017 atomic_set(&qm->status.flags, QM_WORK); 3018 hisi_qm_set_state(qm, QM_READY); 3019 3020 err_unlock: 3021 up_write(&qm->qps_lock); 3022 return ret; 3023 } 3024 EXPORT_SYMBOL_GPL(hisi_qm_start); 3025 3026 static int qm_restart(struct hisi_qm *qm) 3027 { 3028 struct device *dev = &qm->pdev->dev; 3029 struct hisi_qp *qp; 3030 int ret, i; 3031 3032 ret = hisi_qm_start(qm); 3033 if (ret < 0) 3034 return ret; 3035 3036 down_write(&qm->qps_lock); 3037 for (i = 0; i < qm->qp_num; i++) { 3038 qp = &qm->qp_array[i]; 3039 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3040 qp->is_resetting == true) { 3041 ret = qm_start_qp_nolock(qp, 0); 3042 if (ret < 0) { 3043 dev_err(dev, "Failed to start qp%d!\n", i); 3044 3045 up_write(&qm->qps_lock); 3046 return ret; 3047 } 3048 qp->is_resetting = false; 3049 } 3050 } 3051 up_write(&qm->qps_lock); 3052 3053 return 0; 3054 } 3055 3056 /* Stop started qps in reset flow */ 3057 static int qm_stop_started_qp(struct hisi_qm *qm) 3058 { 3059 struct device *dev = &qm->pdev->dev; 3060 struct hisi_qp *qp; 3061 int i, ret; 3062 3063 for (i = 0; i < qm->qp_num; i++) { 3064 qp = &qm->qp_array[i]; 3065 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { 3066 qp->is_resetting = true; 3067 ret = qm_stop_qp_nolock(qp); 3068 if (ret < 0) { 3069 dev_err(dev, "Failed to stop qp%d!\n", i); 3070 return ret; 3071 } 3072 } 3073 } 3074 3075 return 0; 3076 } 3077 3078 /** 3079 * qm_clear_queues() - Clear all queues memory in a qm. 3080 * @qm: The qm in which the queues will be cleared. 3081 * 3082 * This function clears all queues memory in a qm. Reset of accelerator can 3083 * use this to clear queues. 3084 */ 3085 static void qm_clear_queues(struct hisi_qm *qm) 3086 { 3087 struct hisi_qp *qp; 3088 int i; 3089 3090 for (i = 0; i < qm->qp_num; i++) { 3091 qp = &qm->qp_array[i]; 3092 if (qp->is_in_kernel && qp->is_resetting) 3093 memset(qp->qdma.va, 0, qp->qdma.size); 3094 } 3095 3096 memset(qm->qdma.va, 0, qm->qdma.size); 3097 } 3098 3099 /** 3100 * hisi_qm_stop() - Stop a qm. 3101 * @qm: The qm which will be stopped. 3102 * @r: The reason to stop qm. 3103 * 3104 * This function stops qm and its qps, then qm can not accept request. 3105 * Related resources are not released at this state, we can use hisi_qm_start 3106 * to let qm start again. 3107 */ 3108 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3109 { 3110 struct device *dev = &qm->pdev->dev; 3111 int ret = 0; 3112 3113 down_write(&qm->qps_lock); 3114 3115 qm->status.stop_reason = r; 3116 if (atomic_read(&qm->status.flags) == QM_STOP) 3117 goto err_unlock; 3118 3119 /* Stop all the request sending at first. */ 3120 atomic_set(&qm->status.flags, QM_STOP); 3121 3122 if (qm->status.stop_reason == QM_SOFT_RESET || 3123 qm->status.stop_reason == QM_DOWN) { 3124 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 3125 ret = qm_stop_started_qp(qm); 3126 if (ret < 0) { 3127 dev_err(dev, "Failed to stop started qp!\n"); 3128 goto err_unlock; 3129 } 3130 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 3131 } 3132 3133 qm_disable_eq_aeq_interrupts(qm); 3134 if (qm->fun_type == QM_HW_PF) { 3135 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3136 if (ret < 0) { 3137 dev_err(dev, "Failed to set vft!\n"); 3138 ret = -EBUSY; 3139 goto err_unlock; 3140 } 3141 } 3142 3143 qm_clear_queues(qm); 3144 3145 err_unlock: 3146 up_write(&qm->qps_lock); 3147 return ret; 3148 } 3149 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3150 3151 static void qm_hw_error_init(struct hisi_qm *qm) 3152 { 3153 if (!qm->ops->hw_error_init) { 3154 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3155 return; 3156 } 3157 3158 qm->ops->hw_error_init(qm); 3159 } 3160 3161 static void qm_hw_error_uninit(struct hisi_qm *qm) 3162 { 3163 if (!qm->ops->hw_error_uninit) { 3164 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3165 return; 3166 } 3167 3168 qm->ops->hw_error_uninit(qm); 3169 } 3170 3171 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3172 { 3173 if (!qm->ops->hw_error_handle) { 3174 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3175 return ACC_ERR_NONE; 3176 } 3177 3178 return qm->ops->hw_error_handle(qm); 3179 } 3180 3181 /** 3182 * hisi_qm_dev_err_init() - Initialize device error configuration. 3183 * @qm: The qm for which we want to do error initialization. 3184 * 3185 * Initialize QM and device error related configuration. 3186 */ 3187 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3188 { 3189 if (qm->fun_type == QM_HW_VF) 3190 return; 3191 3192 qm_hw_error_init(qm); 3193 3194 if (!qm->err_ini->hw_err_enable) { 3195 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3196 return; 3197 } 3198 qm->err_ini->hw_err_enable(qm); 3199 } 3200 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3201 3202 /** 3203 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3204 * @qm: The qm for which we want to do error uninitialization. 3205 * 3206 * Uninitialize QM and device error related configuration. 3207 */ 3208 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3209 { 3210 if (qm->fun_type == QM_HW_VF) 3211 return; 3212 3213 qm_hw_error_uninit(qm); 3214 3215 if (!qm->err_ini->hw_err_disable) { 3216 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3217 return; 3218 } 3219 qm->err_ini->hw_err_disable(qm); 3220 } 3221 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3222 3223 /** 3224 * hisi_qm_free_qps() - free multiple queue pairs. 3225 * @qps: The queue pairs need to be freed. 3226 * @qp_num: The num of queue pairs. 3227 */ 3228 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3229 { 3230 int i; 3231 3232 if (!qps || qp_num <= 0) 3233 return; 3234 3235 for (i = qp_num - 1; i >= 0; i--) 3236 hisi_qm_release_qp(qps[i]); 3237 } 3238 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3239 3240 static void free_list(struct list_head *head) 3241 { 3242 struct hisi_qm_resource *res, *tmp; 3243 3244 list_for_each_entry_safe(res, tmp, head, list) { 3245 list_del(&res->list); 3246 kfree(res); 3247 } 3248 } 3249 3250 static int hisi_qm_sort_devices(int node, struct list_head *head, 3251 struct hisi_qm_list *qm_list) 3252 { 3253 struct hisi_qm_resource *res, *tmp; 3254 struct hisi_qm *qm; 3255 struct list_head *n; 3256 struct device *dev; 3257 int dev_node; 3258 3259 list_for_each_entry(qm, &qm_list->list, list) { 3260 dev = &qm->pdev->dev; 3261 3262 dev_node = dev_to_node(dev); 3263 if (dev_node < 0) 3264 dev_node = 0; 3265 3266 res = kzalloc(sizeof(*res), GFP_KERNEL); 3267 if (!res) 3268 return -ENOMEM; 3269 3270 res->qm = qm; 3271 res->distance = node_distance(dev_node, node); 3272 n = head; 3273 list_for_each_entry(tmp, head, list) { 3274 if (res->distance < tmp->distance) { 3275 n = &tmp->list; 3276 break; 3277 } 3278 } 3279 list_add_tail(&res->list, n); 3280 } 3281 3282 return 0; 3283 } 3284 3285 /** 3286 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3287 * @qm_list: The list of all available devices. 3288 * @qp_num: The number of queue pairs need created. 3289 * @alg_type: The algorithm type. 3290 * @node: The numa node. 3291 * @qps: The queue pairs need created. 3292 * 3293 * This function will sort all available device according to numa distance. 3294 * Then try to create all queue pairs from one device, if all devices do 3295 * not meet the requirements will return error. 3296 */ 3297 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3298 u8 alg_type, int node, struct hisi_qp **qps) 3299 { 3300 struct hisi_qm_resource *tmp; 3301 int ret = -ENODEV; 3302 LIST_HEAD(head); 3303 int i; 3304 3305 if (!qps || !qm_list || qp_num <= 0) 3306 return -EINVAL; 3307 3308 mutex_lock(&qm_list->lock); 3309 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3310 mutex_unlock(&qm_list->lock); 3311 goto err; 3312 } 3313 3314 list_for_each_entry(tmp, &head, list) { 3315 for (i = 0; i < qp_num; i++) { 3316 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3317 if (IS_ERR(qps[i])) { 3318 hisi_qm_free_qps(qps, i); 3319 break; 3320 } 3321 } 3322 3323 if (i == qp_num) { 3324 ret = 0; 3325 break; 3326 } 3327 } 3328 3329 mutex_unlock(&qm_list->lock); 3330 if (ret) 3331 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3332 node, alg_type, qp_num); 3333 3334 err: 3335 free_list(&head); 3336 return ret; 3337 } 3338 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3339 3340 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3341 { 3342 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3343 u32 max_qp_num = qm->max_qp_num; 3344 u32 q_base = qm->qp_num; 3345 int ret; 3346 3347 if (!num_vfs) 3348 return -EINVAL; 3349 3350 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3351 3352 /* If vfs_q_num is less than num_vfs, return error. */ 3353 if (vfs_q_num < num_vfs) 3354 return -EINVAL; 3355 3356 q_num = vfs_q_num / num_vfs; 3357 remain_q_num = vfs_q_num % num_vfs; 3358 3359 for (i = num_vfs; i > 0; i--) { 3360 /* 3361 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3362 * remaining queues equally. 3363 */ 3364 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3365 act_q_num = q_num + remain_q_num; 3366 remain_q_num = 0; 3367 } else if (remain_q_num > 0) { 3368 act_q_num = q_num + 1; 3369 remain_q_num--; 3370 } else { 3371 act_q_num = q_num; 3372 } 3373 3374 act_q_num = min(act_q_num, max_qp_num); 3375 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3376 if (ret) { 3377 for (j = num_vfs; j > i; j--) 3378 hisi_qm_set_vft(qm, j, 0, 0); 3379 return ret; 3380 } 3381 q_base += act_q_num; 3382 } 3383 3384 return 0; 3385 } 3386 3387 static int qm_clear_vft_config(struct hisi_qm *qm) 3388 { 3389 int ret; 3390 u32 i; 3391 3392 for (i = 1; i <= qm->vfs_num; i++) { 3393 ret = hisi_qm_set_vft(qm, i, 0, 0); 3394 if (ret) 3395 return ret; 3396 } 3397 qm->vfs_num = 0; 3398 3399 return 0; 3400 } 3401 3402 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3403 { 3404 struct device *dev = &qm->pdev->dev; 3405 u32 ir = qos * QM_QOS_RATE; 3406 int ret, total_vfs, i; 3407 3408 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3409 if (fun_index > total_vfs) 3410 return -EINVAL; 3411 3412 qm->factor[fun_index].func_qos = qos; 3413 3414 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3415 if (ret) { 3416 dev_err(dev, "failed to calculate shaper parameter!\n"); 3417 return -EINVAL; 3418 } 3419 3420 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3421 /* The base number of queue reuse for different alg type */ 3422 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3423 if (ret) { 3424 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3425 return -EINVAL; 3426 } 3427 } 3428 3429 return 0; 3430 } 3431 3432 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3433 { 3434 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3435 u64 shaper_vft, ir_calc, ir; 3436 unsigned int val; 3437 u32 error_rate; 3438 int ret; 3439 3440 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3441 val & BIT(0), POLL_PERIOD, 3442 POLL_TIMEOUT); 3443 if (ret) 3444 return 0; 3445 3446 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3447 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3448 writel(fun_index, qm->io_base + QM_VFT_CFG); 3449 3450 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3451 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3452 3453 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3454 val & BIT(0), POLL_PERIOD, 3455 POLL_TIMEOUT); 3456 if (ret) 3457 return 0; 3458 3459 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3460 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3461 3462 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3463 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3464 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3465 3466 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3467 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3468 3469 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3470 3471 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3472 3473 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3474 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3475 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3476 return 0; 3477 } 3478 3479 return ir; 3480 } 3481 3482 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 3483 { 3484 struct device *dev = &qm->pdev->dev; 3485 u64 mb_cmd; 3486 u32 qos; 3487 int ret; 3488 3489 qos = qm_get_shaper_vft_qos(qm, fun_num); 3490 if (!qos) { 3491 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 3492 return; 3493 } 3494 3495 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; 3496 ret = qm_ping_single_vf(qm, mb_cmd, fun_num); 3497 if (ret) 3498 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); 3499 } 3500 3501 static int qm_vf_read_qos(struct hisi_qm *qm) 3502 { 3503 int cnt = 0; 3504 int ret = -EINVAL; 3505 3506 /* reset mailbox qos val */ 3507 qm->mb_qos = 0; 3508 3509 /* vf ping pf to get function qos */ 3510 ret = qm_ping_pf(qm, QM_VF_GET_QOS); 3511 if (ret) { 3512 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 3513 return ret; 3514 } 3515 3516 while (true) { 3517 msleep(QM_WAIT_DST_ACK); 3518 if (qm->mb_qos) 3519 break; 3520 3521 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 3522 pci_err(qm->pdev, "PF ping VF timeout!\n"); 3523 return -ETIMEDOUT; 3524 } 3525 } 3526 3527 return ret; 3528 } 3529 3530 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 3531 size_t count, loff_t *pos) 3532 { 3533 struct hisi_qm *qm = filp->private_data; 3534 char tbuf[QM_DBG_READ_LEN]; 3535 u32 qos_val, ir; 3536 int ret; 3537 3538 ret = hisi_qm_get_dfx_access(qm); 3539 if (ret) 3540 return ret; 3541 3542 /* Mailbox and reset cannot be operated at the same time */ 3543 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3544 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 3545 ret = -EAGAIN; 3546 goto err_put_dfx_access; 3547 } 3548 3549 if (qm->fun_type == QM_HW_PF) { 3550 ir = qm_get_shaper_vft_qos(qm, 0); 3551 } else { 3552 ret = qm_vf_read_qos(qm); 3553 if (ret) 3554 goto err_get_status; 3555 ir = qm->mb_qos; 3556 } 3557 3558 qos_val = ir / QM_QOS_RATE; 3559 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 3560 3561 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 3562 3563 err_get_status: 3564 clear_bit(QM_RESETTING, &qm->misc_ctl); 3565 err_put_dfx_access: 3566 hisi_qm_put_dfx_access(qm); 3567 return ret; 3568 } 3569 3570 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, 3571 unsigned long *val, 3572 unsigned int *fun_index) 3573 { 3574 const struct bus_type *bus_type = qm->pdev->dev.bus; 3575 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 3576 char val_buf[QM_DBG_READ_LEN] = {0}; 3577 struct pci_dev *pdev; 3578 struct device *dev; 3579 int ret; 3580 3581 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); 3582 if (ret != QM_QOS_PARAM_NUM) 3583 return -EINVAL; 3584 3585 ret = kstrtoul(val_buf, 10, val); 3586 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { 3587 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 3588 return -EINVAL; 3589 } 3590 3591 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); 3592 if (!dev) { 3593 pci_err(qm->pdev, "input pci bdf number is error!\n"); 3594 return -ENODEV; 3595 } 3596 3597 pdev = container_of(dev, struct pci_dev, dev); 3598 3599 *fun_index = pdev->devfn; 3600 3601 return 0; 3602 } 3603 3604 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 3605 size_t count, loff_t *pos) 3606 { 3607 struct hisi_qm *qm = filp->private_data; 3608 char tbuf[QM_DBG_READ_LEN]; 3609 unsigned int fun_index; 3610 unsigned long val; 3611 int len, ret; 3612 3613 if (*pos != 0) 3614 return 0; 3615 3616 if (count >= QM_DBG_READ_LEN) 3617 return -ENOSPC; 3618 3619 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 3620 if (len < 0) 3621 return len; 3622 3623 tbuf[len] = '\0'; 3624 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); 3625 if (ret) 3626 return ret; 3627 3628 /* Mailbox and reset cannot be operated at the same time */ 3629 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3630 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 3631 return -EAGAIN; 3632 } 3633 3634 ret = qm_pm_get_sync(qm); 3635 if (ret) { 3636 ret = -EINVAL; 3637 goto err_get_status; 3638 } 3639 3640 ret = qm_func_shaper_enable(qm, fun_index, val); 3641 if (ret) { 3642 pci_err(qm->pdev, "failed to enable function shaper!\n"); 3643 ret = -EINVAL; 3644 goto err_put_sync; 3645 } 3646 3647 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", 3648 fun_index, val); 3649 ret = count; 3650 3651 err_put_sync: 3652 qm_pm_put_sync(qm); 3653 err_get_status: 3654 clear_bit(QM_RESETTING, &qm->misc_ctl); 3655 return ret; 3656 } 3657 3658 static const struct file_operations qm_algqos_fops = { 3659 .owner = THIS_MODULE, 3660 .open = simple_open, 3661 .read = qm_algqos_read, 3662 .write = qm_algqos_write, 3663 }; 3664 3665 /** 3666 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 3667 * @qm: The qm for which we want to add debugfs files. 3668 * 3669 * Create function qos debugfs files, VF ping PF to get function qos. 3670 */ 3671 void hisi_qm_set_algqos_init(struct hisi_qm *qm) 3672 { 3673 if (qm->fun_type == QM_HW_PF) 3674 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 3675 qm, &qm_algqos_fops); 3676 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3677 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 3678 qm, &qm_algqos_fops); 3679 } 3680 3681 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 3682 { 3683 int i; 3684 3685 for (i = 1; i <= total_func; i++) 3686 qm->factor[i].func_qos = QM_QOS_MAX_VAL; 3687 } 3688 3689 /** 3690 * hisi_qm_sriov_enable() - enable virtual functions 3691 * @pdev: the PCIe device 3692 * @max_vfs: the number of virtual functions to enable 3693 * 3694 * Returns the number of enabled VFs. If there are VFs enabled already or 3695 * max_vfs is more than the total number of device can be enabled, returns 3696 * failure. 3697 */ 3698 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 3699 { 3700 struct hisi_qm *qm = pci_get_drvdata(pdev); 3701 int pre_existing_vfs, num_vfs, total_vfs, ret; 3702 3703 ret = qm_pm_get_sync(qm); 3704 if (ret) 3705 return ret; 3706 3707 total_vfs = pci_sriov_get_totalvfs(pdev); 3708 pre_existing_vfs = pci_num_vf(pdev); 3709 if (pre_existing_vfs) { 3710 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 3711 pre_existing_vfs); 3712 goto err_put_sync; 3713 } 3714 3715 if (max_vfs > total_vfs) { 3716 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 3717 ret = -ERANGE; 3718 goto err_put_sync; 3719 } 3720 3721 num_vfs = max_vfs; 3722 3723 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3724 hisi_qm_init_vf_qos(qm, num_vfs); 3725 3726 ret = qm_vf_q_assign(qm, num_vfs); 3727 if (ret) { 3728 pci_err(pdev, "Can't assign queues for VF!\n"); 3729 goto err_put_sync; 3730 } 3731 3732 qm->vfs_num = num_vfs; 3733 3734 ret = pci_enable_sriov(pdev, num_vfs); 3735 if (ret) { 3736 pci_err(pdev, "Can't enable VF!\n"); 3737 qm_clear_vft_config(qm); 3738 goto err_put_sync; 3739 } 3740 3741 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 3742 3743 return num_vfs; 3744 3745 err_put_sync: 3746 qm_pm_put_sync(qm); 3747 return ret; 3748 } 3749 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 3750 3751 /** 3752 * hisi_qm_sriov_disable - disable virtual functions 3753 * @pdev: the PCI device. 3754 * @is_frozen: true when all the VFs are frozen. 3755 * 3756 * Return failure if there are VFs assigned already or VF is in used. 3757 */ 3758 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 3759 { 3760 struct hisi_qm *qm = pci_get_drvdata(pdev); 3761 int ret; 3762 3763 if (pci_vfs_assigned(pdev)) { 3764 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 3765 return -EPERM; 3766 } 3767 3768 /* While VF is in used, SRIOV cannot be disabled. */ 3769 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 3770 pci_err(pdev, "Task is using its VF!\n"); 3771 return -EBUSY; 3772 } 3773 3774 pci_disable_sriov(pdev); 3775 3776 ret = qm_clear_vft_config(qm); 3777 if (ret) 3778 return ret; 3779 3780 qm_pm_put_sync(qm); 3781 3782 return 0; 3783 } 3784 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 3785 3786 /** 3787 * hisi_qm_sriov_configure - configure the number of VFs 3788 * @pdev: The PCI device 3789 * @num_vfs: The number of VFs need enabled 3790 * 3791 * Enable SR-IOV according to num_vfs, 0 means disable. 3792 */ 3793 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 3794 { 3795 if (num_vfs == 0) 3796 return hisi_qm_sriov_disable(pdev, false); 3797 else 3798 return hisi_qm_sriov_enable(pdev, num_vfs); 3799 } 3800 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 3801 3802 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 3803 { 3804 u32 err_sts; 3805 3806 if (!qm->err_ini->get_dev_hw_err_status) { 3807 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); 3808 return ACC_ERR_NONE; 3809 } 3810 3811 /* get device hardware error status */ 3812 err_sts = qm->err_ini->get_dev_hw_err_status(qm); 3813 if (err_sts) { 3814 if (err_sts & qm->err_info.ecc_2bits_mask) 3815 qm->err_status.is_dev_ecc_mbit = true; 3816 3817 if (qm->err_ini->log_dev_hw_err) 3818 qm->err_ini->log_dev_hw_err(qm, err_sts); 3819 3820 if (err_sts & qm->err_info.dev_reset_mask) 3821 return ACC_ERR_NEED_RESET; 3822 3823 if (qm->err_ini->clear_dev_hw_err_status) 3824 qm->err_ini->clear_dev_hw_err_status(qm, err_sts); 3825 } 3826 3827 return ACC_ERR_RECOVERED; 3828 } 3829 3830 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 3831 { 3832 enum acc_err_result qm_ret, dev_ret; 3833 3834 /* log qm error */ 3835 qm_ret = qm_hw_error_handle(qm); 3836 3837 /* log device error */ 3838 dev_ret = qm_dev_err_handle(qm); 3839 3840 return (qm_ret == ACC_ERR_NEED_RESET || 3841 dev_ret == ACC_ERR_NEED_RESET) ? 3842 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 3843 } 3844 3845 /** 3846 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 3847 * @pdev: The PCI device which need report error. 3848 * @state: The connectivity between CPU and device. 3849 * 3850 * We register this function into PCIe AER handlers, It will report device or 3851 * qm hardware error status when error occur. 3852 */ 3853 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 3854 pci_channel_state_t state) 3855 { 3856 struct hisi_qm *qm = pci_get_drvdata(pdev); 3857 enum acc_err_result ret; 3858 3859 if (pdev->is_virtfn) 3860 return PCI_ERS_RESULT_NONE; 3861 3862 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 3863 if (state == pci_channel_io_perm_failure) 3864 return PCI_ERS_RESULT_DISCONNECT; 3865 3866 ret = qm_process_dev_error(qm); 3867 if (ret == ACC_ERR_NEED_RESET) 3868 return PCI_ERS_RESULT_NEED_RESET; 3869 3870 return PCI_ERS_RESULT_RECOVERED; 3871 } 3872 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 3873 3874 static int qm_check_req_recv(struct hisi_qm *qm) 3875 { 3876 struct pci_dev *pdev = qm->pdev; 3877 int ret; 3878 u32 val; 3879 3880 if (qm->ver >= QM_HW_V3) 3881 return 0; 3882 3883 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 3884 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3885 (val == ACC_VENDOR_ID_VALUE), 3886 POLL_PERIOD, POLL_TIMEOUT); 3887 if (ret) { 3888 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 3889 return ret; 3890 } 3891 3892 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 3893 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 3894 (val == PCI_VENDOR_ID_HUAWEI), 3895 POLL_PERIOD, POLL_TIMEOUT); 3896 if (ret) 3897 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 3898 3899 return ret; 3900 } 3901 3902 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 3903 { 3904 struct pci_dev *pdev = qm->pdev; 3905 u16 cmd; 3906 int i; 3907 3908 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 3909 if (set) 3910 cmd |= PCI_COMMAND_MEMORY; 3911 else 3912 cmd &= ~PCI_COMMAND_MEMORY; 3913 3914 pci_write_config_word(pdev, PCI_COMMAND, cmd); 3915 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 3916 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 3917 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 3918 return 0; 3919 3920 udelay(1); 3921 } 3922 3923 return -ETIMEDOUT; 3924 } 3925 3926 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 3927 { 3928 struct pci_dev *pdev = qm->pdev; 3929 u16 sriov_ctrl; 3930 int pos; 3931 int i; 3932 3933 /* 3934 * Since function qm_set_vf_mse is called only after SRIOV is enabled, 3935 * pci_find_ext_capability cannot return 0, pos does not need to be 3936 * checked. 3937 */ 3938 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 3939 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 3940 if (set) 3941 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 3942 else 3943 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 3944 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 3945 3946 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 3947 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 3948 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 3949 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 3950 return 0; 3951 3952 udelay(1); 3953 } 3954 3955 return -ETIMEDOUT; 3956 } 3957 3958 static int qm_vf_reset_prepare(struct hisi_qm *qm, 3959 enum qm_stop_reason stop_reason) 3960 { 3961 struct hisi_qm_list *qm_list = qm->qm_list; 3962 struct pci_dev *pdev = qm->pdev; 3963 struct pci_dev *virtfn; 3964 struct hisi_qm *vf_qm; 3965 int ret = 0; 3966 3967 mutex_lock(&qm_list->lock); 3968 list_for_each_entry(vf_qm, &qm_list->list, list) { 3969 virtfn = vf_qm->pdev; 3970 if (virtfn == pdev) 3971 continue; 3972 3973 if (pci_physfn(virtfn) == pdev) { 3974 /* save VFs PCIE BAR configuration */ 3975 pci_save_state(virtfn); 3976 3977 ret = hisi_qm_stop(vf_qm, stop_reason); 3978 if (ret) 3979 goto stop_fail; 3980 } 3981 } 3982 3983 stop_fail: 3984 mutex_unlock(&qm_list->lock); 3985 return ret; 3986 } 3987 3988 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, 3989 enum qm_stop_reason stop_reason) 3990 { 3991 struct pci_dev *pdev = qm->pdev; 3992 int ret; 3993 3994 if (!qm->vfs_num) 3995 return 0; 3996 3997 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 3998 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 3999 ret = qm_ping_all_vfs(qm, cmd); 4000 if (ret) 4001 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); 4002 } else { 4003 ret = qm_vf_reset_prepare(qm, stop_reason); 4004 if (ret) 4005 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4006 } 4007 4008 return ret; 4009 } 4010 4011 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4012 { 4013 struct pci_dev *pdev = qm->pdev; 4014 int ret; 4015 4016 ret = qm_reset_prepare_ready(qm); 4017 if (ret) { 4018 pci_err(pdev, "Controller reset not ready!\n"); 4019 return ret; 4020 } 4021 4022 /* PF obtains the information of VF by querying the register. */ 4023 qm_cmd_uninit(qm); 4024 4025 /* Whether VFs stop successfully, soft reset will continue. */ 4026 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4027 if (ret) 4028 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4029 4030 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4031 if (ret) { 4032 pci_err(pdev, "Fails to stop QM!\n"); 4033 qm_reset_bit_clear(qm); 4034 return ret; 4035 } 4036 4037 if (qm->use_sva) { 4038 ret = qm_hw_err_isolate(qm); 4039 if (ret) 4040 pci_err(pdev, "failed to isolate hw err!\n"); 4041 } 4042 4043 ret = qm_wait_vf_prepare_finish(qm); 4044 if (ret) 4045 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4046 4047 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4048 4049 return 0; 4050 } 4051 4052 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4053 { 4054 u32 nfe_enb = 0; 4055 4056 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4057 if (qm->ver >= QM_HW_V3) 4058 return; 4059 4060 if (!qm->err_status.is_dev_ecc_mbit && 4061 qm->err_status.is_qm_ecc_mbit && 4062 qm->err_ini->close_axi_master_ooo) { 4063 qm->err_ini->close_axi_master_ooo(qm); 4064 } else if (qm->err_status.is_dev_ecc_mbit && 4065 !qm->err_status.is_qm_ecc_mbit && 4066 !qm->err_ini->close_axi_master_ooo) { 4067 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4068 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, 4069 qm->io_base + QM_RAS_NFE_ENABLE); 4070 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); 4071 } 4072 } 4073 4074 static int qm_soft_reset(struct hisi_qm *qm) 4075 { 4076 struct pci_dev *pdev = qm->pdev; 4077 int ret; 4078 u32 val; 4079 4080 /* Ensure all doorbells and mailboxes received by QM */ 4081 ret = qm_check_req_recv(qm); 4082 if (ret) 4083 return ret; 4084 4085 if (qm->vfs_num) { 4086 ret = qm_set_vf_mse(qm, false); 4087 if (ret) { 4088 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4089 return ret; 4090 } 4091 } 4092 4093 ret = qm->ops->set_msi(qm, false); 4094 if (ret) { 4095 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4096 return ret; 4097 } 4098 4099 qm_dev_ecc_mbit_handle(qm); 4100 4101 /* OOO register set and check */ 4102 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 4103 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4104 4105 /* If bus lock, reset chip */ 4106 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4107 val, 4108 (val == ACC_MASTER_TRANS_RETURN_RW), 4109 POLL_PERIOD, POLL_TIMEOUT); 4110 if (ret) { 4111 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 4112 return ret; 4113 } 4114 4115 if (qm->err_ini->close_sva_prefetch) 4116 qm->err_ini->close_sva_prefetch(qm); 4117 4118 ret = qm_set_pf_mse(qm, false); 4119 if (ret) { 4120 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4121 return ret; 4122 } 4123 4124 /* The reset related sub-control registers are not in PCI BAR */ 4125 if (ACPI_HANDLE(&pdev->dev)) { 4126 unsigned long long value = 0; 4127 acpi_status s; 4128 4129 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4130 qm->err_info.acpi_rst, 4131 NULL, &value); 4132 if (ACPI_FAILURE(s)) { 4133 pci_err(pdev, "NO controller reset method!\n"); 4134 return -EIO; 4135 } 4136 4137 if (value) { 4138 pci_err(pdev, "Reset step %llu failed!\n", value); 4139 return -EIO; 4140 } 4141 } else { 4142 pci_err(pdev, "No reset method!\n"); 4143 return -EINVAL; 4144 } 4145 4146 return 0; 4147 } 4148 4149 static int qm_vf_reset_done(struct hisi_qm *qm) 4150 { 4151 struct hisi_qm_list *qm_list = qm->qm_list; 4152 struct pci_dev *pdev = qm->pdev; 4153 struct pci_dev *virtfn; 4154 struct hisi_qm *vf_qm; 4155 int ret = 0; 4156 4157 mutex_lock(&qm_list->lock); 4158 list_for_each_entry(vf_qm, &qm_list->list, list) { 4159 virtfn = vf_qm->pdev; 4160 if (virtfn == pdev) 4161 continue; 4162 4163 if (pci_physfn(virtfn) == pdev) { 4164 /* enable VFs PCIE BAR configuration */ 4165 pci_restore_state(virtfn); 4166 4167 ret = qm_restart(vf_qm); 4168 if (ret) 4169 goto restart_fail; 4170 } 4171 } 4172 4173 restart_fail: 4174 mutex_unlock(&qm_list->lock); 4175 return ret; 4176 } 4177 4178 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) 4179 { 4180 struct pci_dev *pdev = qm->pdev; 4181 int ret; 4182 4183 if (!qm->vfs_num) 4184 return 0; 4185 4186 ret = qm_vf_q_assign(qm, qm->vfs_num); 4187 if (ret) { 4188 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4189 return ret; 4190 } 4191 4192 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4193 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4194 ret = qm_ping_all_vfs(qm, cmd); 4195 if (ret) 4196 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4197 } else { 4198 ret = qm_vf_reset_done(qm); 4199 if (ret) 4200 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4201 } 4202 4203 return ret; 4204 } 4205 4206 static int qm_dev_hw_init(struct hisi_qm *qm) 4207 { 4208 return qm->err_ini->hw_init(qm); 4209 } 4210 4211 static void qm_restart_prepare(struct hisi_qm *qm) 4212 { 4213 u32 value; 4214 4215 if (qm->err_ini->open_sva_prefetch) 4216 qm->err_ini->open_sva_prefetch(qm); 4217 4218 if (qm->ver >= QM_HW_V3) 4219 return; 4220 4221 if (!qm->err_status.is_qm_ecc_mbit && 4222 !qm->err_status.is_dev_ecc_mbit) 4223 return; 4224 4225 /* temporarily close the OOO port used for PEH to write out MSI */ 4226 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4227 writel(value & ~qm->err_info.msi_wr_port, 4228 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4229 4230 /* clear dev ecc 2bit error source if having */ 4231 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; 4232 if (value && qm->err_ini->clear_dev_hw_err_status) 4233 qm->err_ini->clear_dev_hw_err_status(qm, value); 4234 4235 /* clear QM ecc mbit error source */ 4236 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4237 4238 /* clear AM Reorder Buffer ecc mbit source */ 4239 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4240 } 4241 4242 static void qm_restart_done(struct hisi_qm *qm) 4243 { 4244 u32 value; 4245 4246 if (qm->ver >= QM_HW_V3) 4247 goto clear_flags; 4248 4249 if (!qm->err_status.is_qm_ecc_mbit && 4250 !qm->err_status.is_dev_ecc_mbit) 4251 return; 4252 4253 /* open the OOO port for PEH to write out MSI */ 4254 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4255 value |= qm->err_info.msi_wr_port; 4256 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4257 4258 clear_flags: 4259 qm->err_status.is_qm_ecc_mbit = false; 4260 qm->err_status.is_dev_ecc_mbit = false; 4261 } 4262 4263 static int qm_controller_reset_done(struct hisi_qm *qm) 4264 { 4265 struct pci_dev *pdev = qm->pdev; 4266 int ret; 4267 4268 ret = qm->ops->set_msi(qm, true); 4269 if (ret) { 4270 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4271 return ret; 4272 } 4273 4274 ret = qm_set_pf_mse(qm, true); 4275 if (ret) { 4276 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4277 return ret; 4278 } 4279 4280 if (qm->vfs_num) { 4281 ret = qm_set_vf_mse(qm, true); 4282 if (ret) { 4283 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4284 return ret; 4285 } 4286 } 4287 4288 ret = qm_dev_hw_init(qm); 4289 if (ret) { 4290 pci_err(pdev, "Failed to init device\n"); 4291 return ret; 4292 } 4293 4294 qm_restart_prepare(qm); 4295 hisi_qm_dev_err_init(qm); 4296 if (qm->err_ini->open_axi_master_ooo) 4297 qm->err_ini->open_axi_master_ooo(qm); 4298 4299 ret = qm_dev_mem_reset(qm); 4300 if (ret) { 4301 pci_err(pdev, "failed to reset device memory\n"); 4302 return ret; 4303 } 4304 4305 ret = qm_restart(qm); 4306 if (ret) { 4307 pci_err(pdev, "Failed to start QM!\n"); 4308 return ret; 4309 } 4310 4311 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4312 if (ret) 4313 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4314 4315 ret = qm_wait_vf_prepare_finish(qm); 4316 if (ret) 4317 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4318 4319 qm_cmd_init(qm); 4320 qm_restart_done(qm); 4321 4322 qm_reset_bit_clear(qm); 4323 4324 return 0; 4325 } 4326 4327 static int qm_controller_reset(struct hisi_qm *qm) 4328 { 4329 struct pci_dev *pdev = qm->pdev; 4330 int ret; 4331 4332 pci_info(pdev, "Controller resetting...\n"); 4333 4334 ret = qm_controller_reset_prepare(qm); 4335 if (ret) { 4336 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4337 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4338 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4339 return ret; 4340 } 4341 4342 hisi_qm_show_last_dfx_regs(qm); 4343 if (qm->err_ini->show_last_dfx_regs) 4344 qm->err_ini->show_last_dfx_regs(qm); 4345 4346 ret = qm_soft_reset(qm); 4347 if (ret) 4348 goto err_reset; 4349 4350 ret = qm_controller_reset_done(qm); 4351 if (ret) 4352 goto err_reset; 4353 4354 pci_info(pdev, "Controller reset complete\n"); 4355 4356 return 0; 4357 4358 err_reset: 4359 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4360 qm_reset_bit_clear(qm); 4361 4362 /* if resetting fails, isolate the device */ 4363 if (qm->use_sva) 4364 qm->isolate_data.is_isolate = true; 4365 return ret; 4366 } 4367 4368 /** 4369 * hisi_qm_dev_slot_reset() - slot reset 4370 * @pdev: the PCIe device 4371 * 4372 * This function offers QM relate PCIe device reset interface. Drivers which 4373 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4374 */ 4375 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4376 { 4377 struct hisi_qm *qm = pci_get_drvdata(pdev); 4378 int ret; 4379 4380 if (pdev->is_virtfn) 4381 return PCI_ERS_RESULT_RECOVERED; 4382 4383 /* reset pcie device controller */ 4384 ret = qm_controller_reset(qm); 4385 if (ret) { 4386 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4387 return PCI_ERS_RESULT_DISCONNECT; 4388 } 4389 4390 return PCI_ERS_RESULT_RECOVERED; 4391 } 4392 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4393 4394 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4395 { 4396 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4397 struct hisi_qm *qm = pci_get_drvdata(pdev); 4398 u32 delay = 0; 4399 int ret; 4400 4401 hisi_qm_dev_err_uninit(pf_qm); 4402 4403 /* 4404 * Check whether there is an ECC mbit error, If it occurs, need to 4405 * wait for soft reset to fix it. 4406 */ 4407 while (qm_check_dev_error(pf_qm)) { 4408 msleep(++delay); 4409 if (delay > QM_RESET_WAIT_TIMEOUT) 4410 return; 4411 } 4412 4413 ret = qm_reset_prepare_ready(qm); 4414 if (ret) { 4415 pci_err(pdev, "FLR not ready!\n"); 4416 return; 4417 } 4418 4419 /* PF obtains the information of VF by querying the register. */ 4420 if (qm->fun_type == QM_HW_PF) 4421 qm_cmd_uninit(qm); 4422 4423 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); 4424 if (ret) 4425 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 4426 4427 ret = hisi_qm_stop(qm, QM_DOWN); 4428 if (ret) { 4429 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 4430 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4431 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4432 return; 4433 } 4434 4435 ret = qm_wait_vf_prepare_finish(qm); 4436 if (ret) 4437 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 4438 4439 pci_info(pdev, "FLR resetting...\n"); 4440 } 4441 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 4442 4443 static bool qm_flr_reset_complete(struct pci_dev *pdev) 4444 { 4445 struct pci_dev *pf_pdev = pci_physfn(pdev); 4446 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 4447 u32 id; 4448 4449 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 4450 if (id == QM_PCI_COMMAND_INVALID) { 4451 pci_err(pdev, "Device can not be used!\n"); 4452 return false; 4453 } 4454 4455 return true; 4456 } 4457 4458 void hisi_qm_reset_done(struct pci_dev *pdev) 4459 { 4460 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4461 struct hisi_qm *qm = pci_get_drvdata(pdev); 4462 int ret; 4463 4464 if (qm->fun_type == QM_HW_PF) { 4465 ret = qm_dev_hw_init(qm); 4466 if (ret) { 4467 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 4468 goto flr_done; 4469 } 4470 } 4471 4472 hisi_qm_dev_err_init(pf_qm); 4473 4474 ret = qm_restart(qm); 4475 if (ret) { 4476 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 4477 goto flr_done; 4478 } 4479 4480 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4481 if (ret) 4482 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 4483 4484 ret = qm_wait_vf_prepare_finish(qm); 4485 if (ret) 4486 pci_err(pdev, "failed to start by vfs in FLR!\n"); 4487 4488 flr_done: 4489 if (qm->fun_type == QM_HW_PF) 4490 qm_cmd_init(qm); 4491 4492 if (qm_flr_reset_complete(pdev)) 4493 pci_info(pdev, "FLR reset complete\n"); 4494 4495 qm_reset_bit_clear(qm); 4496 } 4497 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 4498 4499 static irqreturn_t qm_abnormal_irq(int irq, void *data) 4500 { 4501 struct hisi_qm *qm = data; 4502 enum acc_err_result ret; 4503 4504 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 4505 ret = qm_process_dev_error(qm); 4506 if (ret == ACC_ERR_NEED_RESET && 4507 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 4508 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 4509 schedule_work(&qm->rst_work); 4510 4511 return IRQ_HANDLED; 4512 } 4513 4514 /** 4515 * hisi_qm_dev_shutdown() - Shutdown device. 4516 * @pdev: The device will be shutdown. 4517 * 4518 * This function will stop qm when OS shutdown or rebooting. 4519 */ 4520 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 4521 { 4522 struct hisi_qm *qm = pci_get_drvdata(pdev); 4523 int ret; 4524 4525 ret = hisi_qm_stop(qm, QM_DOWN); 4526 if (ret) 4527 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 4528 4529 hisi_qm_cache_wb(qm); 4530 } 4531 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 4532 4533 static void hisi_qm_controller_reset(struct work_struct *rst_work) 4534 { 4535 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 4536 int ret; 4537 4538 ret = qm_pm_get_sync(qm); 4539 if (ret) { 4540 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4541 return; 4542 } 4543 4544 /* reset pcie device controller */ 4545 ret = qm_controller_reset(qm); 4546 if (ret) 4547 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 4548 4549 qm_pm_put_sync(qm); 4550 } 4551 4552 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 4553 enum qm_stop_reason stop_reason) 4554 { 4555 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; 4556 struct pci_dev *pdev = qm->pdev; 4557 int ret; 4558 4559 ret = qm_reset_prepare_ready(qm); 4560 if (ret) { 4561 dev_err(&pdev->dev, "reset prepare not ready!\n"); 4562 atomic_set(&qm->status.flags, QM_STOP); 4563 cmd = QM_VF_PREPARE_FAIL; 4564 goto err_prepare; 4565 } 4566 4567 ret = hisi_qm_stop(qm, stop_reason); 4568 if (ret) { 4569 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 4570 atomic_set(&qm->status.flags, QM_STOP); 4571 cmd = QM_VF_PREPARE_FAIL; 4572 goto err_prepare; 4573 } else { 4574 goto out; 4575 } 4576 4577 err_prepare: 4578 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4579 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4580 out: 4581 pci_save_state(pdev); 4582 ret = qm_ping_pf(qm, cmd); 4583 if (ret) 4584 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 4585 } 4586 4587 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 4588 { 4589 enum qm_mb_cmd cmd = QM_VF_START_DONE; 4590 struct pci_dev *pdev = qm->pdev; 4591 int ret; 4592 4593 pci_restore_state(pdev); 4594 ret = hisi_qm_start(qm); 4595 if (ret) { 4596 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 4597 cmd = QM_VF_START_FAIL; 4598 } 4599 4600 qm_cmd_init(qm); 4601 ret = qm_ping_pf(qm, cmd); 4602 if (ret) 4603 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 4604 4605 qm_reset_bit_clear(qm); 4606 } 4607 4608 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 4609 { 4610 struct device *dev = &qm->pdev->dev; 4611 u32 val, cmd; 4612 u64 msg; 4613 int ret; 4614 4615 /* Wait for reset to finish */ 4616 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 4617 val == BIT(0), QM_VF_RESET_WAIT_US, 4618 QM_VF_RESET_WAIT_TIMEOUT_US); 4619 /* hardware completion status should be available by this time */ 4620 if (ret) { 4621 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 4622 return -ETIMEDOUT; 4623 } 4624 4625 /* 4626 * Whether message is got successfully, 4627 * VF needs to ack PF by clearing the interrupt. 4628 */ 4629 ret = qm_get_mb_cmd(qm, &msg, 0); 4630 qm_clear_cmd_interrupt(qm, 0); 4631 if (ret) { 4632 dev_err(dev, "failed to get msg from PF in reset done!\n"); 4633 return ret; 4634 } 4635 4636 cmd = msg & QM_MB_CMD_DATA_MASK; 4637 if (cmd != QM_PF_RESET_DONE) { 4638 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); 4639 ret = -EINVAL; 4640 } 4641 4642 return ret; 4643 } 4644 4645 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 4646 enum qm_stop_reason stop_reason) 4647 { 4648 struct device *dev = &qm->pdev->dev; 4649 int ret; 4650 4651 dev_info(dev, "device reset start...\n"); 4652 4653 /* The message is obtained by querying the register during resetting */ 4654 qm_cmd_uninit(qm); 4655 qm_pf_reset_vf_prepare(qm, stop_reason); 4656 4657 ret = qm_wait_pf_reset_finish(qm); 4658 if (ret) 4659 goto err_get_status; 4660 4661 qm_pf_reset_vf_done(qm); 4662 4663 dev_info(dev, "device reset done.\n"); 4664 4665 return; 4666 4667 err_get_status: 4668 qm_cmd_init(qm); 4669 qm_reset_bit_clear(qm); 4670 } 4671 4672 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 4673 { 4674 struct device *dev = &qm->pdev->dev; 4675 u64 msg; 4676 u32 cmd; 4677 int ret; 4678 4679 /* 4680 * Get the msg from source by sending mailbox. Whether message is got 4681 * successfully, destination needs to ack source by clearing the interrupt. 4682 */ 4683 ret = qm_get_mb_cmd(qm, &msg, fun_num); 4684 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 4685 if (ret) { 4686 dev_err(dev, "failed to get msg from source!\n"); 4687 return; 4688 } 4689 4690 cmd = msg & QM_MB_CMD_DATA_MASK; 4691 switch (cmd) { 4692 case QM_PF_FLR_PREPARE: 4693 qm_pf_reset_vf_process(qm, QM_DOWN); 4694 break; 4695 case QM_PF_SRST_PREPARE: 4696 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 4697 break; 4698 case QM_VF_GET_QOS: 4699 qm_vf_get_qos(qm, fun_num); 4700 break; 4701 case QM_PF_SET_QOS: 4702 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; 4703 break; 4704 default: 4705 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); 4706 break; 4707 } 4708 } 4709 4710 static void qm_cmd_process(struct work_struct *cmd_process) 4711 { 4712 struct hisi_qm *qm = container_of(cmd_process, 4713 struct hisi_qm, cmd_process); 4714 u32 vfs_num = qm->vfs_num; 4715 u64 val; 4716 u32 i; 4717 4718 if (qm->fun_type == QM_HW_PF) { 4719 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 4720 if (!val) 4721 return; 4722 4723 for (i = 1; i <= vfs_num; i++) { 4724 if (val & BIT(i)) 4725 qm_handle_cmd_msg(qm, i); 4726 } 4727 4728 return; 4729 } 4730 4731 qm_handle_cmd_msg(qm, 0); 4732 } 4733 4734 /** 4735 * hisi_qm_alg_register() - Register alg to crypto. 4736 * @qm: The qm needs add. 4737 * @qm_list: The qm list. 4738 * @guard: Guard of qp_num. 4739 * 4740 * Register algorithm to crypto when the function is satisfy guard. 4741 */ 4742 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 4743 { 4744 struct device *dev = &qm->pdev->dev; 4745 4746 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 4747 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 4748 return 0; 4749 } 4750 4751 if (qm->qp_num < guard) { 4752 dev_info(dev, "qp_num is less than task need.\n"); 4753 return 0; 4754 } 4755 4756 return qm_list->register_to_crypto(qm); 4757 } 4758 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 4759 4760 /** 4761 * hisi_qm_alg_unregister() - Unregister alg from crypto. 4762 * @qm: The qm needs delete. 4763 * @qm_list: The qm list. 4764 * @guard: Guard of qp_num. 4765 * 4766 * Unregister algorithm from crypto when the last function is satisfy guard. 4767 */ 4768 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 4769 { 4770 if (qm->ver <= QM_HW_V2 && qm->use_sva) 4771 return; 4772 4773 if (qm->qp_num < guard) 4774 return; 4775 4776 qm_list->unregister_from_crypto(qm); 4777 } 4778 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 4779 4780 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 4781 { 4782 struct pci_dev *pdev = qm->pdev; 4783 u32 irq_vector, val; 4784 4785 if (qm->fun_type == QM_HW_VF) 4786 return; 4787 4788 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; 4789 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4790 return; 4791 4792 irq_vector = val & QM_IRQ_VECTOR_MASK; 4793 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4794 } 4795 4796 static int qm_register_abnormal_irq(struct hisi_qm *qm) 4797 { 4798 struct pci_dev *pdev = qm->pdev; 4799 u32 irq_vector, val; 4800 int ret; 4801 4802 if (qm->fun_type == QM_HW_VF) 4803 return 0; 4804 4805 val = qm->cap_tables.qm_cap_table[QM_ABN_IRQ_TYPE_CAP_IDX].cap_val; 4806 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 4807 return 0; 4808 4809 irq_vector = val & QM_IRQ_VECTOR_MASK; 4810 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 4811 if (ret) 4812 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); 4813 4814 return ret; 4815 } 4816 4817 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 4818 { 4819 struct pci_dev *pdev = qm->pdev; 4820 u32 irq_vector, val; 4821 4822 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; 4823 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4824 return; 4825 4826 irq_vector = val & QM_IRQ_VECTOR_MASK; 4827 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4828 } 4829 4830 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 4831 { 4832 struct pci_dev *pdev = qm->pdev; 4833 u32 irq_vector, val; 4834 int ret; 4835 4836 val = qm->cap_tables.qm_cap_table[QM_PF2VF_IRQ_TYPE_CAP_IDX].cap_val; 4837 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4838 return 0; 4839 4840 irq_vector = val & QM_IRQ_VECTOR_MASK; 4841 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 4842 if (ret) 4843 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 4844 4845 return ret; 4846 } 4847 4848 static void qm_unregister_aeq_irq(struct hisi_qm *qm) 4849 { 4850 struct pci_dev *pdev = qm->pdev; 4851 u32 irq_vector, val; 4852 4853 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; 4854 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4855 return; 4856 4857 irq_vector = val & QM_IRQ_VECTOR_MASK; 4858 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4859 } 4860 4861 static int qm_register_aeq_irq(struct hisi_qm *qm) 4862 { 4863 struct pci_dev *pdev = qm->pdev; 4864 u32 irq_vector, val; 4865 int ret; 4866 4867 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ_TYPE_CAP_IDX].cap_val; 4868 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4869 return 0; 4870 4871 irq_vector = val & QM_IRQ_VECTOR_MASK; 4872 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, 4873 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); 4874 if (ret) 4875 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4876 4877 return ret; 4878 } 4879 4880 static void qm_unregister_eq_irq(struct hisi_qm *qm) 4881 { 4882 struct pci_dev *pdev = qm->pdev; 4883 u32 irq_vector, val; 4884 4885 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; 4886 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4887 return; 4888 4889 irq_vector = val & QM_IRQ_VECTOR_MASK; 4890 free_irq(pci_irq_vector(pdev, irq_vector), qm); 4891 } 4892 4893 static int qm_register_eq_irq(struct hisi_qm *qm) 4894 { 4895 struct pci_dev *pdev = qm->pdev; 4896 u32 irq_vector, val; 4897 int ret; 4898 4899 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ_TYPE_CAP_IDX].cap_val; 4900 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 4901 return 0; 4902 4903 irq_vector = val & QM_IRQ_VECTOR_MASK; 4904 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); 4905 if (ret) 4906 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 4907 4908 return ret; 4909 } 4910 4911 static void qm_irqs_unregister(struct hisi_qm *qm) 4912 { 4913 qm_unregister_mb_cmd_irq(qm); 4914 qm_unregister_abnormal_irq(qm); 4915 qm_unregister_aeq_irq(qm); 4916 qm_unregister_eq_irq(qm); 4917 } 4918 4919 static int qm_irqs_register(struct hisi_qm *qm) 4920 { 4921 int ret; 4922 4923 ret = qm_register_eq_irq(qm); 4924 if (ret) 4925 return ret; 4926 4927 ret = qm_register_aeq_irq(qm); 4928 if (ret) 4929 goto free_eq_irq; 4930 4931 ret = qm_register_abnormal_irq(qm); 4932 if (ret) 4933 goto free_aeq_irq; 4934 4935 ret = qm_register_mb_cmd_irq(qm); 4936 if (ret) 4937 goto free_abnormal_irq; 4938 4939 return 0; 4940 4941 free_abnormal_irq: 4942 qm_unregister_abnormal_irq(qm); 4943 free_aeq_irq: 4944 qm_unregister_aeq_irq(qm); 4945 free_eq_irq: 4946 qm_unregister_eq_irq(qm); 4947 return ret; 4948 } 4949 4950 static int qm_get_qp_num(struct hisi_qm *qm) 4951 { 4952 struct device *dev = &qm->pdev->dev; 4953 bool is_db_isolation; 4954 4955 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 4956 if (qm->fun_type == QM_HW_VF) { 4957 if (qm->ver != QM_HW_V1) 4958 /* v2 starts to support get vft by mailbox */ 4959 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 4960 4961 return 0; 4962 } 4963 4964 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 4965 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 4966 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 4967 QM_FUNC_MAX_QP_CAP, is_db_isolation); 4968 4969 if (qm->qp_num <= qm->max_qp_num) 4970 return 0; 4971 4972 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { 4973 /* Check whether the set qp number is valid */ 4974 dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n", 4975 qm->qp_num, qm->max_qp_num); 4976 return -EINVAL; 4977 } 4978 4979 dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n", 4980 qm->qp_num, qm->max_qp_num); 4981 qm->qp_num = qm->max_qp_num; 4982 qm->debug.curr_qm_qp_num = qm->qp_num; 4983 4984 return 0; 4985 } 4986 4987 static int qm_pre_store_irq_type_caps(struct hisi_qm *qm) 4988 { 4989 struct hisi_qm_cap_record *qm_cap; 4990 struct pci_dev *pdev = qm->pdev; 4991 size_t i, size; 4992 4993 size = ARRAY_SIZE(qm_pre_store_caps); 4994 qm_cap = devm_kzalloc(&pdev->dev, sizeof(*qm_cap) * size, GFP_KERNEL); 4995 if (!qm_cap) 4996 return -ENOMEM; 4997 4998 for (i = 0; i < size; i++) { 4999 qm_cap[i].type = qm_pre_store_caps[i]; 5000 qm_cap[i].cap_val = hisi_qm_get_hw_info(qm, qm_basic_info, 5001 qm_pre_store_caps[i], qm->cap_ver); 5002 } 5003 5004 qm->cap_tables.qm_cap_table = qm_cap; 5005 5006 return 0; 5007 } 5008 5009 static int qm_get_hw_caps(struct hisi_qm *qm) 5010 { 5011 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 5012 qm_cap_info_pf : qm_cap_info_vf; 5013 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 5014 ARRAY_SIZE(qm_cap_info_vf); 5015 u32 val, i; 5016 5017 /* Doorbell isolate register is a independent register. */ 5018 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 5019 if (val) 5020 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5021 5022 if (qm->ver >= QM_HW_V3) { 5023 val = readl(qm->io_base + QM_FUNC_CAPS_REG); 5024 qm->cap_ver = val & QM_CAPBILITY_VERSION; 5025 } 5026 5027 /* Get PF/VF common capbility */ 5028 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5029 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5030 if (val) 5031 set_bit(qm_cap_info_comm[i].type, &qm->caps); 5032 } 5033 5034 /* Get PF/VF different capbility */ 5035 for (i = 0; i < size; i++) { 5036 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5037 if (val) 5038 set_bit(cap_info[i].type, &qm->caps); 5039 } 5040 5041 /* Fetch and save the value of irq type related capability registers */ 5042 return qm_pre_store_irq_type_caps(qm); 5043 } 5044 5045 static int qm_get_pci_res(struct hisi_qm *qm) 5046 { 5047 struct pci_dev *pdev = qm->pdev; 5048 struct device *dev = &pdev->dev; 5049 int ret; 5050 5051 ret = pci_request_mem_regions(pdev, qm->dev_name); 5052 if (ret < 0) { 5053 dev_err(dev, "Failed to request mem regions!\n"); 5054 return ret; 5055 } 5056 5057 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5058 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5059 if (!qm->io_base) { 5060 ret = -EIO; 5061 goto err_request_mem_regions; 5062 } 5063 5064 ret = qm_get_hw_caps(qm); 5065 if (ret) 5066 goto err_ioremap; 5067 5068 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 5069 qm->db_interval = QM_QP_DB_INTERVAL; 5070 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5071 qm->db_io_base = ioremap(qm->db_phys_base, 5072 pci_resource_len(pdev, PCI_BAR_4)); 5073 if (!qm->db_io_base) { 5074 ret = -EIO; 5075 goto err_ioremap; 5076 } 5077 } else { 5078 qm->db_phys_base = qm->phys_base; 5079 qm->db_io_base = qm->io_base; 5080 qm->db_interval = 0; 5081 } 5082 5083 ret = qm_get_qp_num(qm); 5084 if (ret) 5085 goto err_db_ioremap; 5086 5087 return 0; 5088 5089 err_db_ioremap: 5090 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 5091 iounmap(qm->db_io_base); 5092 err_ioremap: 5093 iounmap(qm->io_base); 5094 err_request_mem_regions: 5095 pci_release_mem_regions(pdev); 5096 return ret; 5097 } 5098 5099 static int hisi_qm_pci_init(struct hisi_qm *qm) 5100 { 5101 struct pci_dev *pdev = qm->pdev; 5102 struct device *dev = &pdev->dev; 5103 unsigned int num_vec; 5104 int ret; 5105 5106 ret = pci_enable_device_mem(pdev); 5107 if (ret < 0) { 5108 dev_err(dev, "Failed to enable device mem!\n"); 5109 return ret; 5110 } 5111 5112 ret = qm_get_pci_res(qm); 5113 if (ret) 5114 goto err_disable_pcidev; 5115 5116 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5117 if (ret < 0) 5118 goto err_get_pci_res; 5119 pci_set_master(pdev); 5120 5121 num_vec = qm_get_irq_num(qm); 5122 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5123 if (ret < 0) { 5124 dev_err(dev, "Failed to enable MSI vectors!\n"); 5125 goto err_get_pci_res; 5126 } 5127 5128 return 0; 5129 5130 err_get_pci_res: 5131 qm_put_pci_res(qm); 5132 err_disable_pcidev: 5133 pci_disable_device(pdev); 5134 return ret; 5135 } 5136 5137 static int hisi_qm_init_work(struct hisi_qm *qm) 5138 { 5139 int i; 5140 5141 for (i = 0; i < qm->qp_num; i++) 5142 INIT_WORK(&qm->poll_data[i].work, qm_work_process); 5143 5144 if (qm->fun_type == QM_HW_PF) 5145 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5146 5147 if (qm->ver > QM_HW_V2) 5148 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5149 5150 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 5151 WQ_UNBOUND, num_online_cpus(), 5152 pci_name(qm->pdev)); 5153 if (!qm->wq) { 5154 pci_err(qm->pdev, "failed to alloc workqueue!\n"); 5155 return -ENOMEM; 5156 } 5157 5158 return 0; 5159 } 5160 5161 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5162 { 5163 struct device *dev = &qm->pdev->dev; 5164 u16 sq_depth, cq_depth; 5165 size_t qp_dma_size; 5166 int i, ret; 5167 5168 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5169 if (!qm->qp_array) 5170 return -ENOMEM; 5171 5172 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); 5173 if (!qm->poll_data) { 5174 kfree(qm->qp_array); 5175 return -ENOMEM; 5176 } 5177 5178 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 5179 5180 /* one more page for device or qp statuses */ 5181 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 5182 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5183 for (i = 0; i < qm->qp_num; i++) { 5184 qm->poll_data[i].qm = qm; 5185 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 5186 if (ret) 5187 goto err_init_qp_mem; 5188 5189 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5190 } 5191 5192 return 0; 5193 err_init_qp_mem: 5194 hisi_qp_memory_uninit(qm, i); 5195 5196 return ret; 5197 } 5198 5199 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) 5200 { 5201 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; 5202 struct qm_dma *xqc_dma = &xqc_buf->qcdma; 5203 struct device *dev = &qm->pdev->dev; 5204 size_t off = 0; 5205 5206 #define QM_XQC_BUF_INIT(xqc_buf, type) do { \ 5207 (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ 5208 (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ 5209 off += QMC_ALIGN(sizeof(struct qm_##type)); \ 5210 } while (0) 5211 5212 xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + 5213 QMC_ALIGN(sizeof(struct qm_aeqc)) + 5214 QMC_ALIGN(sizeof(struct qm_sqc)) + 5215 QMC_ALIGN(sizeof(struct qm_cqc)); 5216 xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, 5217 &xqc_dma->dma, GFP_KERNEL); 5218 if (!xqc_dma->va) 5219 return -ENOMEM; 5220 5221 QM_XQC_BUF_INIT(xqc_buf, eqc); 5222 QM_XQC_BUF_INIT(xqc_buf, aeqc); 5223 QM_XQC_BUF_INIT(xqc_buf, sqc); 5224 QM_XQC_BUF_INIT(xqc_buf, cqc); 5225 5226 return 0; 5227 } 5228 5229 static int hisi_qm_memory_init(struct hisi_qm *qm) 5230 { 5231 struct device *dev = &qm->pdev->dev; 5232 int ret, total_func; 5233 size_t off = 0; 5234 5235 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5236 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5237 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5238 if (!qm->factor) 5239 return -ENOMEM; 5240 5241 /* Only the PF value needs to be initialized */ 5242 qm->factor[0].func_qos = QM_QOS_MAX_VAL; 5243 } 5244 5245 #define QM_INIT_BUF(qm, type, num) do { \ 5246 (qm)->type = ((qm)->qdma.va + (off)); \ 5247 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5248 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5249 } while (0) 5250 5251 idr_init(&qm->qp_idr); 5252 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 5253 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 5254 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 5255 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5256 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5257 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5258 GFP_ATOMIC); 5259 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5260 if (!qm->qdma.va) { 5261 ret = -ENOMEM; 5262 goto err_destroy_idr; 5263 } 5264 5265 QM_INIT_BUF(qm, eqe, qm->eq_depth); 5266 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 5267 QM_INIT_BUF(qm, sqc, qm->qp_num); 5268 QM_INIT_BUF(qm, cqc, qm->qp_num); 5269 5270 ret = hisi_qm_alloc_rsv_buf(qm); 5271 if (ret) 5272 goto err_free_qdma; 5273 5274 ret = hisi_qp_alloc_memory(qm); 5275 if (ret) 5276 goto err_free_reserve_buf; 5277 5278 return 0; 5279 5280 err_free_reserve_buf: 5281 hisi_qm_free_rsv_buf(qm); 5282 err_free_qdma: 5283 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5284 err_destroy_idr: 5285 idr_destroy(&qm->qp_idr); 5286 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 5287 kfree(qm->factor); 5288 5289 return ret; 5290 } 5291 5292 /** 5293 * hisi_qm_init() - Initialize configures about qm. 5294 * @qm: The qm needing init. 5295 * 5296 * This function init qm, then we can call hisi_qm_start to put qm into work. 5297 */ 5298 int hisi_qm_init(struct hisi_qm *qm) 5299 { 5300 struct pci_dev *pdev = qm->pdev; 5301 struct device *dev = &pdev->dev; 5302 int ret; 5303 5304 hisi_qm_pre_init(qm); 5305 5306 ret = hisi_qm_pci_init(qm); 5307 if (ret) 5308 return ret; 5309 5310 ret = qm_irqs_register(qm); 5311 if (ret) 5312 goto err_pci_init; 5313 5314 if (qm->fun_type == QM_HW_PF) { 5315 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5316 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5317 qm_disable_clock_gate(qm); 5318 ret = qm_dev_mem_reset(qm); 5319 if (ret) { 5320 dev_err(dev, "failed to reset device memory\n"); 5321 goto err_irq_register; 5322 } 5323 } 5324 5325 if (qm->mode == UACCE_MODE_SVA) { 5326 ret = qm_alloc_uacce(qm); 5327 if (ret < 0) 5328 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5329 } 5330 5331 ret = hisi_qm_memory_init(qm); 5332 if (ret) 5333 goto err_alloc_uacce; 5334 5335 ret = hisi_qm_init_work(qm); 5336 if (ret) 5337 goto err_free_qm_memory; 5338 5339 qm_cmd_init(qm); 5340 5341 return 0; 5342 5343 err_free_qm_memory: 5344 hisi_qm_memory_uninit(qm); 5345 err_alloc_uacce: 5346 qm_remove_uacce(qm); 5347 err_irq_register: 5348 qm_irqs_unregister(qm); 5349 err_pci_init: 5350 hisi_qm_pci_uninit(qm); 5351 return ret; 5352 } 5353 EXPORT_SYMBOL_GPL(hisi_qm_init); 5354 5355 /** 5356 * hisi_qm_get_dfx_access() - Try to get dfx access. 5357 * @qm: pointer to accelerator device. 5358 * 5359 * Try to get dfx access, then user can get message. 5360 * 5361 * If device is in suspended, return failure, otherwise 5362 * bump up the runtime PM usage counter. 5363 */ 5364 int hisi_qm_get_dfx_access(struct hisi_qm *qm) 5365 { 5366 struct device *dev = &qm->pdev->dev; 5367 5368 if (pm_runtime_suspended(dev)) { 5369 dev_info(dev, "can not read/write - device in suspended.\n"); 5370 return -EAGAIN; 5371 } 5372 5373 return qm_pm_get_sync(qm); 5374 } 5375 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); 5376 5377 /** 5378 * hisi_qm_put_dfx_access() - Put dfx access. 5379 * @qm: pointer to accelerator device. 5380 * 5381 * Put dfx access, drop runtime PM usage counter. 5382 */ 5383 void hisi_qm_put_dfx_access(struct hisi_qm *qm) 5384 { 5385 qm_pm_put_sync(qm); 5386 } 5387 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); 5388 5389 /** 5390 * hisi_qm_pm_init() - Initialize qm runtime PM. 5391 * @qm: pointer to accelerator device. 5392 * 5393 * Function that initialize qm runtime PM. 5394 */ 5395 void hisi_qm_pm_init(struct hisi_qm *qm) 5396 { 5397 struct device *dev = &qm->pdev->dev; 5398 5399 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5400 return; 5401 5402 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); 5403 pm_runtime_use_autosuspend(dev); 5404 pm_runtime_put_noidle(dev); 5405 } 5406 EXPORT_SYMBOL_GPL(hisi_qm_pm_init); 5407 5408 /** 5409 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. 5410 * @qm: pointer to accelerator device. 5411 * 5412 * Function that uninitialize qm runtime PM. 5413 */ 5414 void hisi_qm_pm_uninit(struct hisi_qm *qm) 5415 { 5416 struct device *dev = &qm->pdev->dev; 5417 5418 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5419 return; 5420 5421 pm_runtime_get_noresume(dev); 5422 pm_runtime_dont_use_autosuspend(dev); 5423 } 5424 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); 5425 5426 static int qm_prepare_for_suspend(struct hisi_qm *qm) 5427 { 5428 struct pci_dev *pdev = qm->pdev; 5429 int ret; 5430 u32 val; 5431 5432 ret = qm->ops->set_msi(qm, false); 5433 if (ret) { 5434 pci_err(pdev, "failed to disable MSI before suspending!\n"); 5435 return ret; 5436 } 5437 5438 /* shutdown OOO register */ 5439 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, 5440 qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5441 5442 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 5443 val, 5444 (val == ACC_MASTER_TRANS_RETURN_RW), 5445 POLL_PERIOD, POLL_TIMEOUT); 5446 if (ret) { 5447 pci_emerg(pdev, "Bus lock! Please reset system.\n"); 5448 return ret; 5449 } 5450 5451 ret = qm_set_pf_mse(qm, false); 5452 if (ret) 5453 pci_err(pdev, "failed to disable MSE before suspending!\n"); 5454 5455 return ret; 5456 } 5457 5458 static int qm_rebuild_for_resume(struct hisi_qm *qm) 5459 { 5460 struct pci_dev *pdev = qm->pdev; 5461 int ret; 5462 5463 ret = qm_set_pf_mse(qm, true); 5464 if (ret) { 5465 pci_err(pdev, "failed to enable MSE after resuming!\n"); 5466 return ret; 5467 } 5468 5469 ret = qm->ops->set_msi(qm, true); 5470 if (ret) { 5471 pci_err(pdev, "failed to enable MSI after resuming!\n"); 5472 return ret; 5473 } 5474 5475 ret = qm_dev_hw_init(qm); 5476 if (ret) { 5477 pci_err(pdev, "failed to init device after resuming\n"); 5478 return ret; 5479 } 5480 5481 qm_cmd_init(qm); 5482 hisi_qm_dev_err_init(qm); 5483 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5484 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5485 qm_disable_clock_gate(qm); 5486 ret = qm_dev_mem_reset(qm); 5487 if (ret) 5488 pci_err(pdev, "failed to reset device memory\n"); 5489 5490 return ret; 5491 } 5492 5493 /** 5494 * hisi_qm_suspend() - Runtime suspend of given device. 5495 * @dev: device to suspend. 5496 * 5497 * Function that suspend the device. 5498 */ 5499 int hisi_qm_suspend(struct device *dev) 5500 { 5501 struct pci_dev *pdev = to_pci_dev(dev); 5502 struct hisi_qm *qm = pci_get_drvdata(pdev); 5503 int ret; 5504 5505 pci_info(pdev, "entering suspended state\n"); 5506 5507 ret = hisi_qm_stop(qm, QM_NORMAL); 5508 if (ret) { 5509 pci_err(pdev, "failed to stop qm(%d)\n", ret); 5510 return ret; 5511 } 5512 5513 ret = qm_prepare_for_suspend(qm); 5514 if (ret) 5515 pci_err(pdev, "failed to prepare suspended(%d)\n", ret); 5516 5517 return ret; 5518 } 5519 EXPORT_SYMBOL_GPL(hisi_qm_suspend); 5520 5521 /** 5522 * hisi_qm_resume() - Runtime resume of given device. 5523 * @dev: device to resume. 5524 * 5525 * Function that resume the device. 5526 */ 5527 int hisi_qm_resume(struct device *dev) 5528 { 5529 struct pci_dev *pdev = to_pci_dev(dev); 5530 struct hisi_qm *qm = pci_get_drvdata(pdev); 5531 int ret; 5532 5533 pci_info(pdev, "resuming from suspend state\n"); 5534 5535 ret = qm_rebuild_for_resume(qm); 5536 if (ret) { 5537 pci_err(pdev, "failed to rebuild resume(%d)\n", ret); 5538 return ret; 5539 } 5540 5541 ret = hisi_qm_start(qm); 5542 if (ret) { 5543 if (qm_check_dev_error(qm)) { 5544 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); 5545 return 0; 5546 } 5547 5548 pci_err(pdev, "failed to start qm(%d)!\n", ret); 5549 } 5550 5551 return ret; 5552 } 5553 EXPORT_SYMBOL_GPL(hisi_qm_resume); 5554 5555 MODULE_LICENSE("GPL v2"); 5556 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5557 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5558