1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <asm/page.h> 4 #include <linux/acpi.h> 5 #include <linux/bitmap.h> 6 #include <linux/dma-mapping.h> 7 #include <linux/idr.h> 8 #include <linux/io.h> 9 #include <linux/irqreturn.h> 10 #include <linux/log2.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/seq_file.h> 13 #include <linux/slab.h> 14 #include <linux/uacce.h> 15 #include <linux/uaccess.h> 16 #include <uapi/misc/uacce/hisi_qm.h> 17 #include <linux/hisi_acc_qm.h> 18 #include "qm_common.h" 19 20 /* eq/aeq irq enable */ 21 #define QM_VF_AEQ_INT_SOURCE 0x0 22 #define QM_VF_AEQ_INT_MASK 0x4 23 #define QM_VF_EQ_INT_SOURCE 0x8 24 #define QM_VF_EQ_INT_MASK 0xc 25 26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) 27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0) 28 #define QM_IRQ_TYPE_SHIFT 16 29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) 30 31 /* mailbox */ 32 #define QM_MB_PING_ALL_VFS 0xffff 33 #define QM_MB_STATUS_MASK GENMASK(12, 9) 34 35 /* sqc shift */ 36 #define QM_SQ_HOP_NUM_SHIFT 0 37 #define QM_SQ_PAGE_SIZE_SHIFT 4 38 #define QM_SQ_BUF_SIZE_SHIFT 8 39 #define QM_SQ_SQE_SIZE_SHIFT 12 40 #define QM_SQ_PRIORITY_SHIFT 0 41 #define QM_SQ_ORDERS_SHIFT 4 42 #define QM_SQ_TYPE_SHIFT 8 43 #define QM_QC_PASID_ENABLE 0x1 44 #define QM_QC_PASID_ENABLE_SHIFT 7 45 46 #define QM_SQ_TYPE_MASK GENMASK(3, 0) 47 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) 48 #define QM_SQC_DISABLE_QP (1U << 6) 49 #define QM_XQC_RANDOM_DATA 0xaaaa 50 51 /* cqc shift */ 52 #define QM_CQ_HOP_NUM_SHIFT 0 53 #define QM_CQ_PAGE_SIZE_SHIFT 4 54 #define QM_CQ_BUF_SIZE_SHIFT 8 55 #define QM_CQ_CQE_SIZE_SHIFT 12 56 #define QM_CQ_PHASE_SHIFT 0 57 #define QM_CQ_FLAG_SHIFT 1 58 59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) 60 #define QM_QC_CQE_SIZE 4 61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) 62 63 /* eqc shift */ 64 #define QM_EQE_AEQE_SIZE (2UL << 12) 65 #define QM_EQC_PHASE_SHIFT 16 66 67 #define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1) 68 #define QM_EQE_CQN_MASK GENMASK(15, 0) 69 70 #define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1) 71 #define QM_AEQE_TYPE_SHIFT 17 72 #define QM_AEQE_TYPE_MASK 0xf 73 #define QM_AEQE_CQN_MASK GENMASK(15, 0) 74 #define QM_CQ_OVERFLOW 0 75 #define QM_EQ_OVERFLOW 1 76 #define QM_CQE_ERROR 2 77 78 #define QM_XQ_DEPTH_SHIFT 16 79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) 80 81 #define QM_DOORBELL_CMD_SQ 0 82 #define QM_DOORBELL_CMD_CQ 1 83 #define QM_DOORBELL_CMD_EQ 2 84 #define QM_DOORBELL_CMD_AEQ 3 85 86 #define QM_DOORBELL_BASE_V1 0x340 87 #define QM_DB_CMD_SHIFT_V1 16 88 #define QM_DB_INDEX_SHIFT_V1 32 89 #define QM_DB_PRIORITY_SHIFT_V1 48 90 #define QM_PAGE_SIZE 0x0034 91 #define QM_QP_DB_INTERVAL 0x10000 92 #define QM_DB_TIMEOUT_CFG 0x100074 93 #define QM_DB_TIMEOUT_SET 0x1fffff 94 95 #define QM_MEM_START_INIT 0x100040 96 #define QM_MEM_INIT_DONE 0x100044 97 #define QM_VFT_CFG_RDY 0x10006c 98 #define QM_VFT_CFG_OP_WR 0x100058 99 #define QM_VFT_CFG_TYPE 0x10005c 100 #define QM_VFT_CFG 0x100060 101 #define QM_VFT_CFG_OP_ENABLE 0x100054 102 #define QM_PM_CTRL 0x100148 103 #define QM_IDLE_DISABLE BIT(9) 104 105 #define QM_SUB_VERSION_ID 0x210 106 107 #define QM_VFT_CFG_DATA_L 0x100064 108 #define QM_VFT_CFG_DATA_H 0x100068 109 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) 110 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) 111 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) 112 #define QM_SQC_VFT_START_SQN_SHIFT 28 113 #define QM_SQC_VFT_VALID (1ULL << 44) 114 #define QM_SQC_VFT_SQN_SHIFT 45 115 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) 116 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) 117 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) 118 #define QM_CQC_VFT_VALID (1ULL << 28) 119 120 #define QM_SQC_VFT_BASE_SHIFT_V2 28 121 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) 122 #define QM_SQC_VFT_NUM_SHIFT_V2 45 123 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) 124 #define QM_MAX_QC_TYPE 2 125 126 #define QM_ABNORMAL_INT_SOURCE 0x100000 127 #define QM_ABNORMAL_INT_MASK 0x100004 128 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff 129 #define QM_ABNORMAL_INT_STATUS 0x100008 130 #define QM_ABNORMAL_INT_SET 0x10000c 131 #define QM_ABNORMAL_INF00 0x100010 132 #define QM_FIFO_OVERFLOW_TYPE 0xc0 133 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 134 #define QM_FIFO_OVERFLOW_VF 0x3f 135 #define QM_FIFO_OVERFLOW_QP_SHIFT 16 136 #define QM_ABNORMAL_INF01 0x100014 137 #define QM_DB_TIMEOUT_TYPE 0xc0 138 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 139 #define QM_DB_TIMEOUT_VF 0x3f 140 #define QM_DB_TIMEOUT_QP_SHIFT 16 141 #define QM_ABNORMAL_INF02 0x100018 142 #define QM_AXI_POISON_ERR BIT(22) 143 #define QM_RAS_CE_ENABLE 0x1000ec 144 #define QM_RAS_FE_ENABLE 0x1000f0 145 #define QM_RAS_NFE_ENABLE 0x1000f4 146 #define QM_RAS_CE_THRESHOLD 0x1000f8 147 #define QM_RAS_CE_TIMES_PER_IRQ 1 148 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 149 #define QM_AXI_RRESP_ERR BIT(0) 150 #define QM_DB_TIMEOUT BIT(10) 151 #define QM_OF_FIFO_OF BIT(11) 152 #define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12)) 153 154 #define QM_RESET_WAIT_TIMEOUT 400 155 #define QM_PEH_VENDOR_ID 0x1000d8 156 #define ACC_VENDOR_ID_VALUE 0x5a5a 157 #define QM_PEH_DFX_INFO0 0x1000fc 158 #define QM_PEH_DFX_INFO1 0x100100 159 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) 160 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) 161 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 162 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) 163 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 164 #define ACC_MASTER_TRANS_RETURN_RW 3 165 #define ACC_MASTER_TRANS_RETURN 0x300150 166 #define ACC_MASTER_GLOBAL_CTRL 0x300000 167 #define ACC_AM_CFG_PORT_WR_EN 0x30001c 168 #define ACC_AM_ROB_ECC_INT_STS 0x300104 169 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) 170 #define QM_MSI_CAP_ENABLE BIT(16) 171 172 /* interfunction communication */ 173 #define QM_IFC_READY_STATUS 0x100128 174 #define QM_IFC_INT_SET_P 0x100130 175 #define QM_IFC_INT_CFG 0x100134 176 #define QM_IFC_INT_SOURCE_P 0x100138 177 #define QM_IFC_INT_SOURCE_V 0x0020 178 #define QM_IFC_INT_MASK 0x0024 179 #define QM_IFC_INT_STATUS 0x0028 180 #define QM_IFC_INT_SET_V 0x002C 181 #define QM_PF2VF_PF_W 0x104700 182 #define QM_VF2PF_PF_R 0x104800 183 #define QM_VF2PF_VF_W 0x320 184 #define QM_PF2VF_VF_R 0x380 185 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) 186 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) 187 #define QM_IFC_INT_SOURCE_MASK BIT(0) 188 #define QM_IFC_INT_DISABLE BIT(0) 189 #define QM_IFC_INT_STATUS_MASK BIT(0) 190 #define QM_IFC_INT_SET_MASK BIT(0) 191 #define QM_WAIT_DST_ACK 10 192 #define QM_MAX_PF_WAIT_COUNT 10 193 #define QM_MAX_VF_WAIT_COUNT 40 194 #define QM_VF_RESET_WAIT_US 20000 195 #define QM_VF_RESET_WAIT_CNT 3000 196 #define QM_VF2PF_REG_SIZE 4 197 #define QM_IFC_CMD_MASK GENMASK(31, 0) 198 #define QM_IFC_DATA_SHIFT 32 199 #define QM_VF_RESET_WAIT_TIMEOUT_US \ 200 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) 201 202 #define POLL_PERIOD 10 203 #define POLL_TIMEOUT 1000 204 #define WAIT_PERIOD_US_MAX 200 205 #define WAIT_PERIOD_US_MIN 100 206 #define MAX_WAIT_COUNTS 1000 207 #define QM_CACHE_WB_START 0x204 208 #define QM_CACHE_WB_DONE 0x208 209 #define QM_FUNC_CAPS_REG 0x3100 210 #define QM_CAPBILITY_VERSION GENMASK(7, 0) 211 212 #define PCI_BAR_2 2 213 #define PCI_BAR_4 4 214 #define QMC_ALIGN(sz) ALIGN(sz, 32) 215 216 #define QM_DBG_READ_LEN 256 217 #define QM_PCI_COMMAND_INVALID ~0 218 #define QM_RESET_STOP_TX_OFFSET 1 219 #define QM_RESET_STOP_RX_OFFSET 2 220 221 #define WAIT_PERIOD 20 222 #define REMOVE_WAIT_DELAY 10 223 224 #define QM_QOS_PARAM_NUM 2 225 #define QM_QOS_MAX_VAL 1000 226 #define QM_QOS_RATE 100 227 #define QM_QOS_EXPAND_RATE 1000 228 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) 229 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) 230 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) 231 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 232 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 233 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 234 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 235 #define QM_SHAPER_CBS_B 1 236 #define QM_SHAPER_VFT_OFFSET 6 237 #define QM_QOS_MIN_ERROR_RATE 5 238 #define QM_SHAPER_MIN_CBS_S 8 239 #define QM_QOS_TICK 0x300U 240 #define QM_QOS_DIVISOR_CLK 0x1f40U 241 #define QM_QOS_MAX_CIR_B 200 242 #define QM_QOS_MIN_CIR_B 100 243 #define QM_QOS_MAX_CIR_U 6 244 #define QM_AUTOSUSPEND_DELAY 3000 245 246 /* abnormal status value for stopping queue */ 247 #define QM_STOP_QUEUE_FAIL 1 248 #define QM_DUMP_SQC_FAIL 3 249 #define QM_DUMP_CQC_FAIL 4 250 #define QM_FINISH_WAIT 5 251 252 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ 253 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ 254 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ 255 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ 256 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 257 258 #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ 259 ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) 260 261 #define QM_MK_SQC_W13(priority, orders, alg_type) \ 262 (((priority) << QM_SQ_PRIORITY_SHIFT) | \ 263 ((orders) << QM_SQ_ORDERS_SHIFT) | \ 264 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) 265 266 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ 267 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ 268 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ 269 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ 270 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 271 272 #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ 273 ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) 274 275 enum vft_type { 276 SQC_VFT = 0, 277 CQC_VFT, 278 SHAPER_VFT, 279 }; 280 281 enum qm_alg_type { 282 ALG_TYPE_0, 283 ALG_TYPE_1, 284 }; 285 286 enum qm_ifc_cmd { 287 QM_PF_FLR_PREPARE = 0x01, 288 QM_PF_SRST_PREPARE, 289 QM_PF_RESET_DONE, 290 QM_VF_PREPARE_DONE, 291 QM_VF_PREPARE_FAIL, 292 QM_VF_START_DONE, 293 QM_VF_START_FAIL, 294 QM_PF_SET_QOS, 295 QM_VF_GET_QOS, 296 }; 297 298 enum qm_basic_type { 299 QM_TOTAL_QP_NUM_CAP = 0x0, 300 QM_FUNC_MAX_QP_CAP, 301 QM_XEQ_DEPTH_CAP, 302 QM_QP_DEPTH_CAP, 303 QM_EQ_IRQ_TYPE_CAP, 304 QM_AEQ_IRQ_TYPE_CAP, 305 QM_ABN_IRQ_TYPE_CAP, 306 QM_PF2VF_IRQ_TYPE_CAP, 307 QM_PF_IRQ_NUM_CAP, 308 QM_VF_IRQ_NUM_CAP, 309 }; 310 311 enum qm_cap_table_type { 312 QM_CAP_VF = 0x0, 313 QM_AEQE_NUM, 314 QM_SCQE_NUM, 315 QM_EQ_IRQ, 316 QM_AEQ_IRQ, 317 QM_ABNORMAL_IRQ, 318 QM_MB_IRQ, 319 MAX_IRQ_NUM, 320 EXT_BAR_INDEX, 321 }; 322 323 static const struct hisi_qm_cap_query_info qm_cap_query_info[] = { 324 {QM_CAP_VF, "QM_CAP_VF ", 0x3100, 0x0, 0x0, 0x6F01}, 325 {QM_AEQE_NUM, "QM_AEQE_NUM ", 0x3104, 0x800, 0x4000800, 0x4000800}, 326 {QM_SCQE_NUM, "QM_SCQE_NUM ", 327 0x3108, 0x4000400, 0x4000400, 0x4000400}, 328 {QM_EQ_IRQ, "QM_EQ_IRQ ", 0x310c, 0x10000, 0x10000, 0x10000}, 329 {QM_AEQ_IRQ, "QM_AEQ_IRQ ", 0x3110, 0x0, 0x10001, 0x10001}, 330 {QM_ABNORMAL_IRQ, "QM_ABNORMAL_IRQ ", 0x3114, 0x0, 0x10003, 0x10003}, 331 {QM_MB_IRQ, "QM_MB_IRQ ", 0x3118, 0x0, 0x0, 0x10002}, 332 {MAX_IRQ_NUM, "MAX_IRQ_NUM ", 0x311c, 0x10001, 0x40002, 0x40003}, 333 {EXT_BAR_INDEX, "EXT_BAR_INDEX ", 0x3120, 0x0, 0x0, 0x14}, 334 }; 335 336 static const struct hisi_qm_cap_info qm_cap_info_comm[] = { 337 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, 338 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, 339 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, 340 {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, 341 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, 342 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, 343 {QM_SUPPORT_DAE, 0x3100, 0, BIT(15), 0x0, 0x0, 0x0}, 344 }; 345 346 static const struct hisi_qm_cap_info qm_cap_info_pf[] = { 347 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, 348 }; 349 350 static const struct hisi_qm_cap_info qm_cap_info_vf[] = { 351 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, 352 }; 353 354 static const struct hisi_qm_cap_info qm_basic_info[] = { 355 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 356 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, 357 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, 358 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, 359 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, 360 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, 361 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, 362 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, 363 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, 364 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, 365 }; 366 367 struct qm_mailbox { 368 __le16 w0; 369 __le16 queue_num; 370 __le32 base_l; 371 __le32 base_h; 372 __le32 rsvd; 373 }; 374 375 struct qm_doorbell { 376 __le16 queue_num; 377 __le16 cmd; 378 __le16 index; 379 __le16 priority; 380 }; 381 382 struct hisi_qm_resource { 383 struct hisi_qm *qm; 384 int distance; 385 struct list_head list; 386 }; 387 388 /** 389 * struct qm_hw_err - Structure describing the device errors 390 * @list: hardware error list 391 * @timestamp: timestamp when the error occurred 392 */ 393 struct qm_hw_err { 394 struct list_head list; 395 unsigned long long timestamp; 396 }; 397 398 struct hisi_qm_hw_ops { 399 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); 400 void (*qm_db)(struct hisi_qm *qm, u16 qn, 401 u8 cmd, u16 index, u8 priority); 402 int (*debug_init)(struct hisi_qm *qm); 403 void (*hw_error_init)(struct hisi_qm *qm); 404 void (*hw_error_uninit)(struct hisi_qm *qm); 405 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); 406 int (*set_msi)(struct hisi_qm *qm, bool set); 407 408 /* (u64)msg = (u32)data << 32 | (enum qm_ifc_cmd)cmd */ 409 int (*set_ifc_begin)(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num); 410 void (*set_ifc_end)(struct hisi_qm *qm); 411 int (*get_ifc)(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num); 412 }; 413 414 struct hisi_qm_hw_error { 415 u32 int_msk; 416 const char *msg; 417 }; 418 419 static const struct hisi_qm_hw_error qm_hw_error[] = { 420 { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, 421 { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, 422 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, 423 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, 424 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, 425 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, 426 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, 427 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, 428 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, 429 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, 430 { .int_msk = BIT(10), .msg = "qm_db_timeout" }, 431 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, 432 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, 433 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, 434 { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, 435 }; 436 437 static const char * const qm_db_timeout[] = { 438 "sq", "cq", "eq", "aeq", 439 }; 440 441 static const char * const qm_fifo_overflow[] = { 442 "cq", "eq", "aeq", 443 }; 444 445 struct qm_typical_qos_table { 446 u32 start; 447 u32 end; 448 u32 val; 449 }; 450 451 /* the qos step is 100 */ 452 static struct qm_typical_qos_table shaper_cir_s[] = { 453 {100, 100, 4}, 454 {200, 200, 3}, 455 {300, 500, 2}, 456 {600, 1000, 1}, 457 {1100, 100000, 0}, 458 }; 459 460 static struct qm_typical_qos_table shaper_cbs_s[] = { 461 {100, 200, 9}, 462 {300, 500, 11}, 463 {600, 1000, 12}, 464 {1100, 10000, 16}, 465 {10100, 25000, 17}, 466 {25100, 50000, 18}, 467 {50100, 100000, 19} 468 }; 469 470 static void qm_irqs_unregister(struct hisi_qm *qm); 471 static int qm_reset_device(struct hisi_qm *qm); 472 int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, 473 unsigned int device) 474 { 475 struct pci_dev *pdev; 476 u32 n, q_num; 477 int ret; 478 479 if (!val) 480 return -EINVAL; 481 482 pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); 483 if (!pdev) { 484 q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); 485 pr_info("No device found currently, suppose queue number is %u\n", 486 q_num); 487 } else { 488 if (pdev->revision == QM_HW_V1) 489 q_num = QM_QNUM_V1; 490 else 491 q_num = QM_QNUM_V2; 492 493 pci_dev_put(pdev); 494 } 495 496 ret = kstrtou32(val, 10, &n); 497 if (ret || n < QM_MIN_QNUM || n > q_num) 498 return -EINVAL; 499 500 return param_set_int(val, kp); 501 } 502 EXPORT_SYMBOL_GPL(hisi_qm_q_num_set); 503 504 static u32 qm_get_hw_error_status(struct hisi_qm *qm) 505 { 506 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); 507 } 508 509 static u32 qm_get_dev_err_status(struct hisi_qm *qm) 510 { 511 return qm->err_ini->get_dev_hw_err_status(qm); 512 } 513 514 /* Check if the error causes the master ooo block */ 515 static bool qm_check_dev_error(struct hisi_qm *qm) 516 { 517 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 518 u32 err_status; 519 520 if (pf_qm->fun_type == QM_HW_VF) 521 return false; 522 523 err_status = qm_get_hw_error_status(pf_qm); 524 if (err_status & pf_qm->err_info.qm_err.shutdown_mask) 525 return true; 526 527 if (pf_qm->err_ini->dev_is_abnormal) 528 return pf_qm->err_ini->dev_is_abnormal(pf_qm); 529 530 return false; 531 } 532 533 static int qm_wait_reset_finish(struct hisi_qm *qm) 534 { 535 int delay = 0; 536 537 /* All reset requests need to be queued for processing */ 538 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 539 msleep(++delay); 540 if (delay > QM_RESET_WAIT_TIMEOUT) 541 return -EBUSY; 542 } 543 544 return 0; 545 } 546 547 static int qm_reset_prepare_ready(struct hisi_qm *qm) 548 { 549 struct pci_dev *pdev = qm->pdev; 550 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 551 552 /* 553 * PF and VF on host doesnot support resetting at the 554 * same time on Kunpeng920. 555 */ 556 if (qm->ver < QM_HW_V3) 557 return qm_wait_reset_finish(pf_qm); 558 559 return qm_wait_reset_finish(qm); 560 } 561 562 static void qm_reset_bit_clear(struct hisi_qm *qm) 563 { 564 struct pci_dev *pdev = qm->pdev; 565 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 566 567 if (qm->ver < QM_HW_V3) 568 clear_bit(QM_RESETTING, &pf_qm->misc_ctl); 569 570 clear_bit(QM_RESETTING, &qm->misc_ctl); 571 } 572 573 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, 574 u64 base, u16 queue, bool op) 575 { 576 mailbox->w0 = cpu_to_le16((cmd) | 577 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | 578 (0x1 << QM_MB_BUSY_SHIFT)); 579 mailbox->queue_num = cpu_to_le16(queue); 580 mailbox->base_l = cpu_to_le32(lower_32_bits(base)); 581 mailbox->base_h = cpu_to_le32(upper_32_bits(base)); 582 mailbox->rsvd = 0; 583 } 584 585 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ 586 int hisi_qm_wait_mb_ready(struct hisi_qm *qm) 587 { 588 u32 val; 589 590 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, 591 val, !((val >> QM_MB_BUSY_SHIFT) & 592 0x1), POLL_PERIOD, POLL_TIMEOUT); 593 } 594 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); 595 596 /* 128 bit should be written to hardware at one time to trigger a mailbox */ 597 static void qm_mb_write(struct hisi_qm *qm, const void *src) 598 { 599 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; 600 601 #if IS_ENABLED(CONFIG_ARM64) 602 unsigned long tmp0 = 0, tmp1 = 0; 603 #endif 604 605 if (!IS_ENABLED(CONFIG_ARM64)) { 606 memcpy_toio(fun_base, src, 16); 607 dma_wmb(); 608 return; 609 } 610 611 #if IS_ENABLED(CONFIG_ARM64) 612 asm volatile("ldp %0, %1, %3\n" 613 "stp %0, %1, %2\n" 614 "dmb oshst\n" 615 : "=&r" (tmp0), 616 "=&r" (tmp1), 617 "+Q" (*((char __iomem *)fun_base)) 618 : "Q" (*((char *)src)) 619 : "memory"); 620 #endif 621 } 622 623 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) 624 { 625 int ret; 626 u32 val; 627 628 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 629 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); 630 ret = -EBUSY; 631 goto mb_busy; 632 } 633 634 qm_mb_write(qm, mailbox); 635 636 if (unlikely(hisi_qm_wait_mb_ready(qm))) { 637 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); 638 ret = -ETIMEDOUT; 639 goto mb_busy; 640 } 641 642 val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); 643 if (val & QM_MB_STATUS_MASK) { 644 dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); 645 ret = -EIO; 646 goto mb_busy; 647 } 648 649 return 0; 650 651 mb_busy: 652 atomic64_inc(&qm->debug.dfx.mb_err_cnt); 653 return ret; 654 } 655 656 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 657 bool op) 658 { 659 struct qm_mailbox mailbox; 660 int ret; 661 662 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); 663 664 mutex_lock(&qm->mailbox_lock); 665 ret = qm_mb_nolock(qm, &mailbox); 666 mutex_unlock(&qm->mailbox_lock); 667 668 return ret; 669 } 670 EXPORT_SYMBOL_GPL(hisi_qm_mb); 671 672 /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ 673 int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) 674 { 675 struct qm_mailbox mailbox; 676 dma_addr_t xqc_dma; 677 void *tmp_xqc; 678 size_t size; 679 int ret; 680 681 switch (cmd) { 682 case QM_MB_CMD_SQC: 683 size = sizeof(struct qm_sqc); 684 tmp_xqc = qm->xqc_buf.sqc; 685 xqc_dma = qm->xqc_buf.sqc_dma; 686 break; 687 case QM_MB_CMD_CQC: 688 size = sizeof(struct qm_cqc); 689 tmp_xqc = qm->xqc_buf.cqc; 690 xqc_dma = qm->xqc_buf.cqc_dma; 691 break; 692 case QM_MB_CMD_EQC: 693 size = sizeof(struct qm_eqc); 694 tmp_xqc = qm->xqc_buf.eqc; 695 xqc_dma = qm->xqc_buf.eqc_dma; 696 break; 697 case QM_MB_CMD_AEQC: 698 size = sizeof(struct qm_aeqc); 699 tmp_xqc = qm->xqc_buf.aeqc; 700 xqc_dma = qm->xqc_buf.aeqc_dma; 701 break; 702 default: 703 dev_err(&qm->pdev->dev, "unknown mailbox cmd %u\n", cmd); 704 return -EINVAL; 705 } 706 707 /* Setting xqc will fail if master OOO is blocked. */ 708 if (qm_check_dev_error(qm)) { 709 dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); 710 return -EIO; 711 } 712 713 mutex_lock(&qm->mailbox_lock); 714 if (!op) 715 memcpy(tmp_xqc, xqc, size); 716 717 qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); 718 ret = qm_mb_nolock(qm, &mailbox); 719 if (!ret && op) 720 memcpy(xqc, tmp_xqc, size); 721 722 mutex_unlock(&qm->mailbox_lock); 723 724 return ret; 725 } 726 727 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 728 { 729 u64 doorbell; 730 731 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | 732 ((u64)index << QM_DB_INDEX_SHIFT_V1) | 733 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); 734 735 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); 736 } 737 738 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 739 { 740 void __iomem *io_base = qm->io_base; 741 u16 randata = 0; 742 u64 doorbell; 743 744 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) 745 io_base = qm->db_io_base + (u64)qn * qm->db_interval + 746 QM_DOORBELL_SQ_CQ_BASE_V2; 747 else 748 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; 749 750 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | 751 ((u64)randata << QM_DB_RAND_SHIFT_V2) | 752 ((u64)index << QM_DB_INDEX_SHIFT_V2) | 753 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); 754 755 writeq(doorbell, io_base); 756 } 757 758 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) 759 { 760 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", 761 qn, cmd, index); 762 763 qm->ops->qm_db(qm, qn, cmd, index, priority); 764 } 765 766 static void qm_disable_clock_gate(struct hisi_qm *qm) 767 { 768 u32 val; 769 770 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ 771 if (qm->ver < QM_HW_V3) 772 return; 773 774 val = readl(qm->io_base + QM_PM_CTRL); 775 val |= QM_IDLE_DISABLE; 776 writel(val, qm->io_base + QM_PM_CTRL); 777 } 778 779 static int qm_dev_mem_reset(struct hisi_qm *qm) 780 { 781 u32 val; 782 783 writel(0x1, qm->io_base + QM_MEM_START_INIT); 784 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, 785 val & BIT(0), POLL_PERIOD, 786 POLL_TIMEOUT); 787 } 788 789 /** 790 * hisi_qm_get_hw_info() - Get device information. 791 * @qm: The qm which want to get information. 792 * @info_table: Array for storing device information. 793 * @index: Index in info_table. 794 * @is_read: Whether read from reg, 0: not support read from reg. 795 * 796 * This function returns device information the caller needs. 797 */ 798 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 799 const struct hisi_qm_cap_info *info_table, 800 u32 index, bool is_read) 801 { 802 u32 val; 803 804 switch (qm->ver) { 805 case QM_HW_V1: 806 return info_table[index].v1_val; 807 case QM_HW_V2: 808 return info_table[index].v2_val; 809 default: 810 if (!is_read) 811 return info_table[index].v3_val; 812 813 val = readl(qm->io_base + info_table[index].offset); 814 return (val >> info_table[index].shift) & info_table[index].mask; 815 } 816 } 817 EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); 818 819 u32 hisi_qm_get_cap_value(struct hisi_qm *qm, 820 const struct hisi_qm_cap_query_info *info_table, 821 u32 index, bool is_read) 822 { 823 u32 val; 824 825 switch (qm->ver) { 826 case QM_HW_V1: 827 return info_table[index].v1_val; 828 case QM_HW_V2: 829 return info_table[index].v2_val; 830 default: 831 if (!is_read) 832 return info_table[index].v3_val; 833 834 val = readl(qm->io_base + info_table[index].offset); 835 return val; 836 } 837 } 838 EXPORT_SYMBOL_GPL(hisi_qm_get_cap_value); 839 840 static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, 841 u16 *high_bits, enum qm_basic_type type) 842 { 843 u32 depth; 844 845 depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); 846 *low_bits = depth & QM_XQ_DEPTH_MASK; 847 *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; 848 } 849 850 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 851 u32 dev_algs_size) 852 { 853 struct device *dev = &qm->pdev->dev; 854 char *algs, *ptr; 855 int i; 856 857 if (!qm->uacce) 858 return 0; 859 860 if (dev_algs_size >= QM_DEV_ALG_MAX_LEN) { 861 dev_err(dev, "algs size %u is equal or larger than %d.\n", 862 dev_algs_size, QM_DEV_ALG_MAX_LEN); 863 return -EINVAL; 864 } 865 866 algs = devm_kzalloc(dev, QM_DEV_ALG_MAX_LEN, GFP_KERNEL); 867 if (!algs) 868 return -ENOMEM; 869 870 for (i = 0; i < dev_algs_size; i++) 871 if (alg_msk & dev_algs[i].alg_msk) 872 strcat(algs, dev_algs[i].alg); 873 874 ptr = strrchr(algs, '\n'); 875 if (ptr) 876 *ptr = '\0'; 877 878 qm->uacce->algs = algs; 879 880 return 0; 881 } 882 EXPORT_SYMBOL_GPL(hisi_qm_set_algs); 883 884 static u32 qm_get_irq_num(struct hisi_qm *qm) 885 { 886 if (qm->fun_type == QM_HW_PF) 887 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); 888 889 return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); 890 } 891 892 static int qm_pm_get_sync(struct hisi_qm *qm) 893 { 894 struct device *dev = &qm->pdev->dev; 895 int ret; 896 897 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 898 return 0; 899 900 ret = pm_runtime_resume_and_get(dev); 901 if (ret < 0) { 902 dev_err(dev, "failed to get_sync(%d).\n", ret); 903 return ret; 904 } 905 906 return 0; 907 } 908 909 static void qm_pm_put_sync(struct hisi_qm *qm) 910 { 911 struct device *dev = &qm->pdev->dev; 912 913 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 914 return; 915 916 pm_runtime_put_autosuspend(dev); 917 } 918 919 static void qm_cq_head_update(struct hisi_qp *qp) 920 { 921 if (qp->qp_status.cq_head == qp->cq_depth - 1) { 922 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; 923 qp->qp_status.cq_head = 0; 924 } else { 925 qp->qp_status.cq_head++; 926 } 927 } 928 929 static void qm_poll_req_cb(struct hisi_qp *qp) 930 { 931 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 932 struct hisi_qm *qm = qp->qm; 933 934 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 935 dma_rmb(); 936 qp->req_cb(qp, qp->sqe + qm->sqe_size * 937 le16_to_cpu(cqe->sq_head)); 938 qm_cq_head_update(qp); 939 cqe = qp->cqe + qp->qp_status.cq_head; 940 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, 941 qp->qp_status.cq_head, 0); 942 atomic_dec(&qp->qp_status.used); 943 944 cond_resched(); 945 } 946 947 /* set c_flag */ 948 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); 949 } 950 951 static void qm_work_process(struct work_struct *work) 952 { 953 struct hisi_qm_poll_data *poll_data = 954 container_of(work, struct hisi_qm_poll_data, work); 955 struct hisi_qm *qm = poll_data->qm; 956 u16 eqe_num = poll_data->eqe_num; 957 struct hisi_qp *qp; 958 int i; 959 960 for (i = eqe_num - 1; i >= 0; i--) { 961 qp = &qm->qp_array[poll_data->qp_finish_id[i]]; 962 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) 963 continue; 964 965 if (qp->event_cb) { 966 qp->event_cb(qp); 967 continue; 968 } 969 970 if (likely(qp->req_cb)) 971 qm_poll_req_cb(qp); 972 } 973 } 974 975 static void qm_get_complete_eqe_num(struct hisi_qm *qm) 976 { 977 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; 978 struct hisi_qm_poll_data *poll_data = NULL; 979 u32 dw0 = le32_to_cpu(eqe->dw0); 980 u16 eq_depth = qm->eq_depth; 981 u16 cqn, eqe_num = 0; 982 983 if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) { 984 atomic64_inc(&qm->debug.dfx.err_irq_cnt); 985 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 986 return; 987 } 988 989 cqn = dw0 & QM_EQE_CQN_MASK; 990 if (unlikely(cqn >= qm->qp_num)) 991 return; 992 poll_data = &qm->poll_data[cqn]; 993 994 while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) { 995 poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK; 996 eqe_num++; 997 998 if (qm->status.eq_head == eq_depth - 1) { 999 qm->status.eqc_phase = !qm->status.eqc_phase; 1000 eqe = qm->eqe; 1001 qm->status.eq_head = 0; 1002 } else { 1003 eqe++; 1004 qm->status.eq_head++; 1005 } 1006 1007 if (eqe_num == (eq_depth >> 1) - 1) 1008 break; 1009 1010 dw0 = le32_to_cpu(eqe->dw0); 1011 } 1012 1013 poll_data->eqe_num = eqe_num; 1014 queue_work(qm->wq, &poll_data->work); 1015 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 1016 } 1017 1018 static irqreturn_t qm_eq_irq(int irq, void *data) 1019 { 1020 struct hisi_qm *qm = data; 1021 1022 /* Get qp id of completed tasks and re-enable the interrupt */ 1023 qm_get_complete_eqe_num(qm); 1024 1025 return IRQ_HANDLED; 1026 } 1027 1028 static irqreturn_t qm_mb_cmd_irq(int irq, void *data) 1029 { 1030 struct hisi_qm *qm = data; 1031 u32 val; 1032 1033 val = readl(qm->io_base + QM_IFC_INT_STATUS); 1034 val &= QM_IFC_INT_STATUS_MASK; 1035 if (!val) 1036 return IRQ_NONE; 1037 1038 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { 1039 dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); 1040 return IRQ_HANDLED; 1041 } 1042 1043 schedule_work(&qm->cmd_process); 1044 1045 return IRQ_HANDLED; 1046 } 1047 1048 static void qm_set_qp_disable(struct hisi_qp *qp, int offset) 1049 { 1050 u32 *addr; 1051 1052 if (qp->is_in_kernel) 1053 return; 1054 1055 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; 1056 *addr = 1; 1057 1058 /* make sure setup is completed */ 1059 smp_wmb(); 1060 } 1061 1062 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) 1063 { 1064 struct hisi_qp *qp = &qm->qp_array[qp_id]; 1065 1066 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); 1067 hisi_qm_stop_qp(qp); 1068 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); 1069 } 1070 1071 static void qm_reset_function(struct hisi_qm *qm) 1072 { 1073 struct device *dev = &qm->pdev->dev; 1074 int ret; 1075 1076 if (qm_check_dev_error(qm)) 1077 return; 1078 1079 ret = qm_reset_prepare_ready(qm); 1080 if (ret) { 1081 dev_err(dev, "reset function not ready\n"); 1082 return; 1083 } 1084 1085 ret = hisi_qm_stop(qm, QM_DOWN); 1086 if (ret) { 1087 dev_err(dev, "failed to stop qm when reset function\n"); 1088 goto clear_bit; 1089 } 1090 1091 ret = hisi_qm_start(qm); 1092 if (ret) 1093 dev_err(dev, "failed to start qm when reset function\n"); 1094 1095 clear_bit: 1096 qm_reset_bit_clear(qm); 1097 } 1098 1099 static irqreturn_t qm_aeq_thread(int irq, void *data) 1100 { 1101 struct hisi_qm *qm = data; 1102 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; 1103 u32 dw0 = le32_to_cpu(aeqe->dw0); 1104 u16 aeq_depth = qm->aeq_depth; 1105 u32 type, qp_id; 1106 1107 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); 1108 1109 while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) { 1110 type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK; 1111 qp_id = dw0 & QM_AEQE_CQN_MASK; 1112 1113 switch (type) { 1114 case QM_EQ_OVERFLOW: 1115 dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); 1116 qm_reset_function(qm); 1117 return IRQ_HANDLED; 1118 case QM_CQ_OVERFLOW: 1119 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", 1120 qp_id); 1121 fallthrough; 1122 case QM_CQE_ERROR: 1123 qm_disable_qp(qm, qp_id); 1124 break; 1125 default: 1126 dev_err(&qm->pdev->dev, "unknown error type %u\n", 1127 type); 1128 break; 1129 } 1130 1131 if (qm->status.aeq_head == aeq_depth - 1) { 1132 qm->status.aeqc_phase = !qm->status.aeqc_phase; 1133 aeqe = qm->aeqe; 1134 qm->status.aeq_head = 0; 1135 } else { 1136 aeqe++; 1137 qm->status.aeq_head++; 1138 } 1139 dw0 = le32_to_cpu(aeqe->dw0); 1140 } 1141 1142 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 1143 1144 return IRQ_HANDLED; 1145 } 1146 1147 static void qm_init_qp_status(struct hisi_qp *qp) 1148 { 1149 struct hisi_qp_status *qp_status = &qp->qp_status; 1150 1151 qp_status->sq_tail = 0; 1152 qp_status->cq_head = 0; 1153 qp_status->cqc_phase = true; 1154 atomic_set(&qp_status->used, 0); 1155 } 1156 1157 static void qm_init_prefetch(struct hisi_qm *qm) 1158 { 1159 struct device *dev = &qm->pdev->dev; 1160 u32 page_type = 0x0; 1161 1162 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 1163 return; 1164 1165 switch (PAGE_SIZE) { 1166 case SZ_4K: 1167 page_type = 0x0; 1168 break; 1169 case SZ_16K: 1170 page_type = 0x1; 1171 break; 1172 case SZ_64K: 1173 page_type = 0x2; 1174 break; 1175 default: 1176 dev_err(dev, "system page size is not support: %lu, default set to 4KB", 1177 PAGE_SIZE); 1178 } 1179 1180 writel(page_type, qm->io_base + QM_PAGE_SIZE); 1181 } 1182 1183 /* 1184 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value 1185 * is the expected qos calculated. 1186 * the formula: 1187 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps 1188 * 1189 * IR_b * (2 ^ IR_u) * 8000 1190 * IR(Mbps) = ------------------------- 1191 * Tick * (2 ^ IR_s) 1192 */ 1193 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) 1194 { 1195 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / 1196 (QM_QOS_TICK * (1 << cir_s)); 1197 } 1198 1199 static u32 acc_shaper_calc_cbs_s(u32 ir) 1200 { 1201 int table_size = ARRAY_SIZE(shaper_cbs_s); 1202 int i; 1203 1204 for (i = 0; i < table_size; i++) { 1205 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) 1206 return shaper_cbs_s[i].val; 1207 } 1208 1209 return QM_SHAPER_MIN_CBS_S; 1210 } 1211 1212 static u32 acc_shaper_calc_cir_s(u32 ir) 1213 { 1214 int table_size = ARRAY_SIZE(shaper_cir_s); 1215 int i; 1216 1217 for (i = 0; i < table_size; i++) { 1218 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) 1219 return shaper_cir_s[i].val; 1220 } 1221 1222 return 0; 1223 } 1224 1225 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) 1226 { 1227 u32 cir_b, cir_u, cir_s, ir_calc; 1228 u32 error_rate; 1229 1230 factor->cbs_s = acc_shaper_calc_cbs_s(ir); 1231 cir_s = acc_shaper_calc_cir_s(ir); 1232 1233 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { 1234 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { 1235 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 1236 1237 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 1238 if (error_rate <= QM_QOS_MIN_ERROR_RATE) { 1239 factor->cir_b = cir_b; 1240 factor->cir_u = cir_u; 1241 factor->cir_s = cir_s; 1242 return 0; 1243 } 1244 } 1245 } 1246 1247 return -EINVAL; 1248 } 1249 1250 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, 1251 u32 number, struct qm_shaper_factor *factor) 1252 { 1253 u64 tmp = 0; 1254 1255 if (number > 0) { 1256 switch (type) { 1257 case SQC_VFT: 1258 if (qm->ver == QM_HW_V1) { 1259 tmp = QM_SQC_VFT_BUF_SIZE | 1260 QM_SQC_VFT_SQC_SIZE | 1261 QM_SQC_VFT_INDEX_NUMBER | 1262 QM_SQC_VFT_VALID | 1263 (u64)base << QM_SQC_VFT_START_SQN_SHIFT; 1264 } else { 1265 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | 1266 QM_SQC_VFT_VALID | 1267 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; 1268 } 1269 break; 1270 case CQC_VFT: 1271 if (qm->ver == QM_HW_V1) { 1272 tmp = QM_CQC_VFT_BUF_SIZE | 1273 QM_CQC_VFT_SQC_SIZE | 1274 QM_CQC_VFT_INDEX_NUMBER | 1275 QM_CQC_VFT_VALID; 1276 } else { 1277 tmp = QM_CQC_VFT_VALID; 1278 } 1279 break; 1280 case SHAPER_VFT: 1281 if (factor) { 1282 tmp = factor->cir_b | 1283 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | 1284 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | 1285 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | 1286 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); 1287 } 1288 break; 1289 /* 1290 * Note: The current logic only needs to handle the above three types 1291 * If new types are added, they need to be supplemented here, 1292 * otherwise undefined behavior may occur. 1293 */ 1294 default: 1295 break; 1296 } 1297 } 1298 1299 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); 1300 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); 1301 } 1302 1303 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, 1304 u32 fun_num, u32 base, u32 number) 1305 { 1306 struct qm_shaper_factor *factor = NULL; 1307 unsigned int val; 1308 int ret; 1309 1310 if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 1311 factor = &qm->factor[fun_num]; 1312 1313 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1314 val & BIT(0), POLL_PERIOD, 1315 POLL_TIMEOUT); 1316 if (ret) 1317 return ret; 1318 1319 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); 1320 writel(type, qm->io_base + QM_VFT_CFG_TYPE); 1321 if (type == SHAPER_VFT) 1322 fun_num |= base << QM_SHAPER_VFT_OFFSET; 1323 1324 writel(fun_num, qm->io_base + QM_VFT_CFG); 1325 1326 qm_vft_data_cfg(qm, type, base, number, factor); 1327 1328 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 1329 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 1330 1331 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 1332 val & BIT(0), POLL_PERIOD, 1333 POLL_TIMEOUT); 1334 } 1335 1336 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) 1337 { 1338 u32 qos = qm->factor[fun_num].func_qos; 1339 int ret, i; 1340 1341 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); 1342 if (ret) { 1343 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); 1344 return ret; 1345 } 1346 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); 1347 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 1348 /* The base number of queue reuse for different alg type */ 1349 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); 1350 if (ret) 1351 return ret; 1352 } 1353 1354 return 0; 1355 } 1356 1357 /* The config should be conducted after qm_dev_mem_reset() */ 1358 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 1359 u32 number) 1360 { 1361 int ret, i; 1362 1363 for (i = SQC_VFT; i <= CQC_VFT; i++) { 1364 ret = qm_set_vft_common(qm, i, fun_num, base, number); 1365 if (ret) 1366 return ret; 1367 } 1368 1369 /* init default shaper qos val */ 1370 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 1371 ret = qm_shaper_init_vft(qm, fun_num); 1372 if (ret) 1373 goto back_sqc_cqc; 1374 } 1375 1376 return 0; 1377 back_sqc_cqc: 1378 for (i = SQC_VFT; i <= CQC_VFT; i++) 1379 qm_set_vft_common(qm, i, fun_num, 0, 0); 1380 1381 return ret; 1382 } 1383 1384 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) 1385 { 1386 u64 sqc_vft; 1387 int ret; 1388 1389 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); 1390 if (ret) 1391 return ret; 1392 1393 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1394 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1395 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); 1396 *number = (QM_SQC_VFT_NUM_MASK_V2 & 1397 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; 1398 1399 return 0; 1400 } 1401 1402 static void qm_hw_error_init_v1(struct hisi_qm *qm) 1403 { 1404 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); 1405 } 1406 1407 static void qm_hw_error_cfg(struct hisi_qm *qm) 1408 { 1409 struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; 1410 1411 qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe; 1412 /* clear QM hw residual error source */ 1413 writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1414 1415 /* configure error type */ 1416 writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE); 1417 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); 1418 writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1419 writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE); 1420 } 1421 1422 static void qm_hw_error_init_v2(struct hisi_qm *qm) 1423 { 1424 u32 irq_unmask; 1425 1426 qm_hw_error_cfg(qm); 1427 1428 irq_unmask = ~qm->error_mask; 1429 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1430 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1431 } 1432 1433 static void qm_hw_error_uninit_v2(struct hisi_qm *qm) 1434 { 1435 u32 irq_mask = qm->error_mask; 1436 1437 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1438 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1439 } 1440 1441 static void qm_hw_error_init_v3(struct hisi_qm *qm) 1442 { 1443 u32 irq_unmask; 1444 1445 qm_hw_error_cfg(qm); 1446 1447 /* enable close master ooo when hardware error happened */ 1448 writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1449 1450 irq_unmask = ~qm->error_mask; 1451 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1452 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); 1453 } 1454 1455 static void qm_hw_error_uninit_v3(struct hisi_qm *qm) 1456 { 1457 u32 irq_mask = qm->error_mask; 1458 1459 irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); 1460 writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 1461 1462 /* disable close master ooo when hardware error happened */ 1463 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); 1464 } 1465 1466 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) 1467 { 1468 const struct hisi_qm_hw_error *err; 1469 struct device *dev = &qm->pdev->dev; 1470 u32 reg_val, type, vf_num, qp_id; 1471 int i; 1472 1473 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { 1474 err = &qm_hw_error[i]; 1475 if (!(err->int_msk & error_status)) 1476 continue; 1477 1478 dev_err(dev, "%s [error status=0x%x] found\n", 1479 err->msg, err->int_msk); 1480 1481 if (err->int_msk & QM_DB_TIMEOUT) { 1482 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); 1483 type = (reg_val & QM_DB_TIMEOUT_TYPE) >> 1484 QM_DB_TIMEOUT_TYPE_SHIFT; 1485 vf_num = reg_val & QM_DB_TIMEOUT_VF; 1486 qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT; 1487 dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n", 1488 qm_db_timeout[type], vf_num, qp_id); 1489 } else if (err->int_msk & QM_OF_FIFO_OF) { 1490 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); 1491 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> 1492 QM_FIFO_OVERFLOW_TYPE_SHIFT; 1493 vf_num = reg_val & QM_FIFO_OVERFLOW_VF; 1494 qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT; 1495 if (type < ARRAY_SIZE(qm_fifo_overflow)) 1496 dev_err(dev, "qm %s fifo overflow in function %u qp %u\n", 1497 qm_fifo_overflow[type], vf_num, qp_id); 1498 else 1499 dev_err(dev, "unknown error type\n"); 1500 } else if (err->int_msk & QM_AXI_RRESP_ERR) { 1501 reg_val = readl(qm->io_base + QM_ABNORMAL_INF02); 1502 if (reg_val & QM_AXI_POISON_ERR) 1503 dev_err(dev, "qm axi poison error happened\n"); 1504 } 1505 } 1506 } 1507 1508 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) 1509 { 1510 struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; 1511 u32 error_status; 1512 1513 error_status = qm_get_hw_error_status(qm); 1514 if (error_status & qm->error_mask) { 1515 if (error_status & QM_ECC_MBIT) 1516 qm->err_status.is_qm_ecc_mbit = true; 1517 1518 qm_log_hw_error(qm, error_status); 1519 if (error_status & qm_err->reset_mask) { 1520 /* Disable the same error reporting until device is recovered. */ 1521 writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE); 1522 return ACC_ERR_NEED_RESET; 1523 } 1524 1525 /* Clear error source if not need reset. */ 1526 writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); 1527 writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE); 1528 writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE); 1529 } 1530 1531 return ACC_ERR_RECOVERED; 1532 } 1533 1534 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) 1535 { 1536 struct qm_mailbox mailbox; 1537 int ret; 1538 1539 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); 1540 mutex_lock(&qm->mailbox_lock); 1541 ret = qm_mb_nolock(qm, &mailbox); 1542 if (ret) 1543 goto err_unlock; 1544 1545 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | 1546 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); 1547 1548 err_unlock: 1549 mutex_unlock(&qm->mailbox_lock); 1550 return ret; 1551 } 1552 1553 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) 1554 { 1555 u32 val; 1556 1557 if (qm->fun_type == QM_HW_PF) 1558 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); 1559 1560 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); 1561 val |= QM_IFC_INT_SOURCE_MASK; 1562 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); 1563 } 1564 1565 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) 1566 { 1567 struct device *dev = &qm->pdev->dev; 1568 enum qm_ifc_cmd cmd; 1569 int ret; 1570 1571 ret = qm->ops->get_ifc(qm, &cmd, NULL, vf_id); 1572 if (ret) { 1573 dev_err(dev, "failed to get command from VF(%u)!\n", vf_id); 1574 return; 1575 } 1576 1577 switch (cmd) { 1578 case QM_VF_PREPARE_FAIL: 1579 dev_err(dev, "failed to stop VF(%u)!\n", vf_id); 1580 break; 1581 case QM_VF_START_FAIL: 1582 dev_err(dev, "failed to start VF(%u)!\n", vf_id); 1583 break; 1584 case QM_VF_PREPARE_DONE: 1585 case QM_VF_START_DONE: 1586 break; 1587 default: 1588 dev_err(dev, "unsupported command(0x%x) sent by VF(%u)!\n", cmd, vf_id); 1589 break; 1590 } 1591 } 1592 1593 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) 1594 { 1595 struct device *dev = &qm->pdev->dev; 1596 u32 vfs_num = qm->vfs_num; 1597 int cnt = 0; 1598 int ret = 0; 1599 u64 val; 1600 u32 i; 1601 1602 if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 1603 return 0; 1604 1605 while (true) { 1606 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 1607 /* All VFs send command to PF, break */ 1608 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) 1609 break; 1610 1611 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1612 ret = -EBUSY; 1613 break; 1614 } 1615 1616 msleep(QM_WAIT_DST_ACK); 1617 } 1618 1619 /* PF check VFs msg */ 1620 for (i = 1; i <= vfs_num; i++) { 1621 if (val & BIT(i)) 1622 qm_handle_vf_msg(qm, i); 1623 else 1624 dev_err(dev, "VF(%u) not ping PF!\n", i); 1625 } 1626 1627 /* PF clear interrupt to ack VFs */ 1628 qm_clear_cmd_interrupt(qm, val); 1629 1630 return ret; 1631 } 1632 1633 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) 1634 { 1635 u32 val; 1636 1637 val = readl(qm->io_base + QM_IFC_INT_CFG); 1638 val &= ~QM_IFC_SEND_ALL_VFS; 1639 val |= fun_num; 1640 writel(val, qm->io_base + QM_IFC_INT_CFG); 1641 1642 val = readl(qm->io_base + QM_IFC_INT_SET_P); 1643 val |= QM_IFC_INT_SET_MASK; 1644 writel(val, qm->io_base + QM_IFC_INT_SET_P); 1645 } 1646 1647 static void qm_trigger_pf_interrupt(struct hisi_qm *qm) 1648 { 1649 u32 val; 1650 1651 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1652 val |= QM_IFC_INT_SET_MASK; 1653 writel(val, qm->io_base + QM_IFC_INT_SET_V); 1654 } 1655 1656 static int qm_ping_single_vf(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) 1657 { 1658 struct device *dev = &qm->pdev->dev; 1659 int cnt = 0; 1660 u64 val; 1661 int ret; 1662 1663 ret = qm->ops->set_ifc_begin(qm, cmd, data, fun_num); 1664 if (ret) { 1665 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); 1666 goto err_unlock; 1667 } 1668 1669 qm_trigger_vf_interrupt(qm, fun_num); 1670 while (true) { 1671 msleep(QM_WAIT_DST_ACK); 1672 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1673 /* if VF respond, PF notifies VF successfully. */ 1674 if (!(val & BIT(fun_num))) 1675 goto err_unlock; 1676 1677 if (++cnt > QM_MAX_PF_WAIT_COUNT) { 1678 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); 1679 ret = -ETIMEDOUT; 1680 break; 1681 } 1682 } 1683 1684 err_unlock: 1685 qm->ops->set_ifc_end(qm); 1686 return ret; 1687 } 1688 1689 static int qm_ping_all_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) 1690 { 1691 struct device *dev = &qm->pdev->dev; 1692 u32 vfs_num = qm->vfs_num; 1693 u64 val = 0; 1694 int cnt = 0; 1695 int ret; 1696 u32 i; 1697 1698 ret = qm->ops->set_ifc_begin(qm, cmd, 0, QM_MB_PING_ALL_VFS); 1699 if (ret) { 1700 dev_err(dev, "failed to send command(0x%x) to all vfs!\n", cmd); 1701 qm->ops->set_ifc_end(qm); 1702 return ret; 1703 } 1704 1705 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); 1706 while (true) { 1707 msleep(QM_WAIT_DST_ACK); 1708 val = readq(qm->io_base + QM_IFC_READY_STATUS); 1709 /* If all VFs acked, PF notifies VFs successfully. */ 1710 if (!(val & GENMASK(vfs_num, 1))) { 1711 qm->ops->set_ifc_end(qm); 1712 return 0; 1713 } 1714 1715 if (++cnt > QM_MAX_PF_WAIT_COUNT) 1716 break; 1717 } 1718 1719 qm->ops->set_ifc_end(qm); 1720 1721 /* Check which vf respond timeout. */ 1722 for (i = 1; i <= vfs_num; i++) { 1723 if (val & BIT(i)) 1724 dev_err(dev, "failed to get response from VF(%u)!\n", i); 1725 } 1726 1727 return -ETIMEDOUT; 1728 } 1729 1730 static int qm_ping_pf(struct hisi_qm *qm, enum qm_ifc_cmd cmd) 1731 { 1732 int cnt = 0; 1733 u32 val; 1734 int ret; 1735 1736 ret = qm->ops->set_ifc_begin(qm, cmd, 0, 0); 1737 if (ret) { 1738 dev_err(&qm->pdev->dev, "failed to send command(0x%x) to PF!\n", cmd); 1739 goto unlock; 1740 } 1741 1742 qm_trigger_pf_interrupt(qm); 1743 /* Waiting for PF response */ 1744 while (true) { 1745 msleep(QM_WAIT_DST_ACK); 1746 val = readl(qm->io_base + QM_IFC_INT_SET_V); 1747 if (!(val & QM_IFC_INT_STATUS_MASK)) 1748 break; 1749 1750 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 1751 ret = -ETIMEDOUT; 1752 break; 1753 } 1754 } 1755 1756 unlock: 1757 qm->ops->set_ifc_end(qm); 1758 1759 return ret; 1760 } 1761 1762 static int qm_drain_qm(struct hisi_qm *qm) 1763 { 1764 return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); 1765 } 1766 1767 static int qm_stop_qp(struct hisi_qp *qp) 1768 { 1769 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); 1770 } 1771 1772 static int qm_set_msi(struct hisi_qm *qm, bool set) 1773 { 1774 struct pci_dev *pdev = qm->pdev; 1775 1776 if (set) { 1777 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1778 0); 1779 } else { 1780 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 1781 ACC_PEH_MSI_DISABLE); 1782 if (qm->err_status.is_qm_ecc_mbit || 1783 qm->err_status.is_dev_ecc_mbit) 1784 return 0; 1785 1786 mdelay(1); 1787 if (readl(qm->io_base + QM_PEH_DFX_INFO0)) 1788 return -EFAULT; 1789 } 1790 1791 return 0; 1792 } 1793 1794 static void qm_wait_msi_finish(struct hisi_qm *qm) 1795 { 1796 struct pci_dev *pdev = qm->pdev; 1797 u32 cmd = ~0; 1798 int cnt = 0; 1799 u32 val; 1800 int ret; 1801 1802 while (true) { 1803 pci_read_config_dword(pdev, pdev->msi_cap + 1804 PCI_MSI_PENDING_64, &cmd); 1805 if (!cmd) 1806 break; 1807 1808 if (++cnt > MAX_WAIT_COUNTS) { 1809 pci_warn(pdev, "failed to empty MSI PENDING!\n"); 1810 break; 1811 } 1812 1813 udelay(1); 1814 } 1815 1816 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, 1817 val, !(val & QM_PEH_DFX_MASK), 1818 POLL_PERIOD, POLL_TIMEOUT); 1819 if (ret) 1820 pci_warn(pdev, "failed to empty PEH MSI!\n"); 1821 1822 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, 1823 val, !(val & QM_PEH_MSI_FINISH_MASK), 1824 POLL_PERIOD, POLL_TIMEOUT); 1825 if (ret) 1826 pci_warn(pdev, "failed to finish MSI operation!\n"); 1827 } 1828 1829 static int qm_set_msi_v3(struct hisi_qm *qm, bool set) 1830 { 1831 struct pci_dev *pdev = qm->pdev; 1832 int ret = -ETIMEDOUT; 1833 u32 cmd, i; 1834 1835 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1836 if (set) 1837 cmd |= QM_MSI_CAP_ENABLE; 1838 else 1839 cmd &= ~QM_MSI_CAP_ENABLE; 1840 1841 pci_write_config_dword(pdev, pdev->msi_cap, cmd); 1842 if (set) { 1843 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 1844 pci_read_config_dword(pdev, pdev->msi_cap, &cmd); 1845 if (cmd & QM_MSI_CAP_ENABLE) 1846 return 0; 1847 1848 udelay(1); 1849 } 1850 } else { 1851 udelay(WAIT_PERIOD_US_MIN); 1852 qm_wait_msi_finish(qm); 1853 ret = 0; 1854 } 1855 1856 return ret; 1857 } 1858 1859 static int qm_set_ifc_begin_v3(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) 1860 { 1861 struct qm_mailbox mailbox; 1862 u64 msg; 1863 1864 msg = cmd | (u64)data << QM_IFC_DATA_SHIFT; 1865 1866 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, msg, fun_num, 0); 1867 mutex_lock(&qm->mailbox_lock); 1868 return qm_mb_nolock(qm, &mailbox); 1869 } 1870 1871 static void qm_set_ifc_end_v3(struct hisi_qm *qm) 1872 { 1873 mutex_unlock(&qm->mailbox_lock); 1874 } 1875 1876 static int qm_get_ifc_v3(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num) 1877 { 1878 u64 msg; 1879 int ret; 1880 1881 ret = qm_get_mb_cmd(qm, &msg, fun_num); 1882 if (ret) 1883 return ret; 1884 1885 *cmd = msg & QM_IFC_CMD_MASK; 1886 1887 if (data) 1888 *data = msg >> QM_IFC_DATA_SHIFT; 1889 1890 return 0; 1891 } 1892 1893 static int qm_set_ifc_begin_v4(struct hisi_qm *qm, enum qm_ifc_cmd cmd, u32 data, u32 fun_num) 1894 { 1895 uintptr_t offset; 1896 u64 msg; 1897 1898 if (qm->fun_type == QM_HW_PF) 1899 offset = QM_PF2VF_PF_W; 1900 else 1901 offset = QM_VF2PF_VF_W; 1902 1903 msg = cmd | (u64)data << QM_IFC_DATA_SHIFT; 1904 1905 mutex_lock(&qm->ifc_lock); 1906 writeq(msg, qm->io_base + offset); 1907 1908 return 0; 1909 } 1910 1911 static void qm_set_ifc_end_v4(struct hisi_qm *qm) 1912 { 1913 mutex_unlock(&qm->ifc_lock); 1914 } 1915 1916 static u64 qm_get_ifc_pf(struct hisi_qm *qm, u32 fun_num) 1917 { 1918 uintptr_t offset; 1919 1920 offset = QM_VF2PF_PF_R + QM_VF2PF_REG_SIZE * fun_num; 1921 1922 return (u64)readl(qm->io_base + offset); 1923 } 1924 1925 static u64 qm_get_ifc_vf(struct hisi_qm *qm) 1926 { 1927 return readq(qm->io_base + QM_PF2VF_VF_R); 1928 } 1929 1930 static int qm_get_ifc_v4(struct hisi_qm *qm, enum qm_ifc_cmd *cmd, u32 *data, u32 fun_num) 1931 { 1932 u64 msg; 1933 1934 if (qm->fun_type == QM_HW_PF) 1935 msg = qm_get_ifc_pf(qm, fun_num); 1936 else 1937 msg = qm_get_ifc_vf(qm); 1938 1939 *cmd = msg & QM_IFC_CMD_MASK; 1940 1941 if (data) 1942 *data = msg >> QM_IFC_DATA_SHIFT; 1943 1944 return 0; 1945 } 1946 1947 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { 1948 .qm_db = qm_db_v1, 1949 .hw_error_init = qm_hw_error_init_v1, 1950 .set_msi = qm_set_msi, 1951 }; 1952 1953 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { 1954 .get_vft = qm_get_vft_v2, 1955 .qm_db = qm_db_v2, 1956 .hw_error_init = qm_hw_error_init_v2, 1957 .hw_error_uninit = qm_hw_error_uninit_v2, 1958 .hw_error_handle = qm_hw_error_handle_v2, 1959 .set_msi = qm_set_msi, 1960 }; 1961 1962 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { 1963 .get_vft = qm_get_vft_v2, 1964 .qm_db = qm_db_v2, 1965 .hw_error_init = qm_hw_error_init_v3, 1966 .hw_error_uninit = qm_hw_error_uninit_v3, 1967 .hw_error_handle = qm_hw_error_handle_v2, 1968 .set_msi = qm_set_msi_v3, 1969 .set_ifc_begin = qm_set_ifc_begin_v3, 1970 .set_ifc_end = qm_set_ifc_end_v3, 1971 .get_ifc = qm_get_ifc_v3, 1972 }; 1973 1974 static const struct hisi_qm_hw_ops qm_hw_ops_v4 = { 1975 .get_vft = qm_get_vft_v2, 1976 .qm_db = qm_db_v2, 1977 .hw_error_init = qm_hw_error_init_v3, 1978 .hw_error_uninit = qm_hw_error_uninit_v3, 1979 .hw_error_handle = qm_hw_error_handle_v2, 1980 .set_msi = qm_set_msi_v3, 1981 .set_ifc_begin = qm_set_ifc_begin_v4, 1982 .set_ifc_end = qm_set_ifc_end_v4, 1983 .get_ifc = qm_get_ifc_v4, 1984 }; 1985 1986 static void *qm_get_avail_sqe(struct hisi_qp *qp) 1987 { 1988 struct hisi_qp_status *qp_status = &qp->qp_status; 1989 u16 sq_tail = qp_status->sq_tail; 1990 1991 if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) 1992 return NULL; 1993 1994 return qp->sqe + sq_tail * qp->qm->sqe_size; 1995 } 1996 1997 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) 1998 { 1999 u64 *addr; 2000 2001 /* Use last 64 bits of DUS to reset status. */ 2002 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; 2003 *addr = 0; 2004 } 2005 2006 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) 2007 { 2008 struct device *dev = &qm->pdev->dev; 2009 struct hisi_qp *qp; 2010 int qp_id; 2011 2012 if (atomic_read(&qm->status.flags) == QM_STOP) { 2013 dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n"); 2014 return ERR_PTR(-EPERM); 2015 } 2016 2017 if (qm->qp_in_used == qm->qp_num) { 2018 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 2019 qm->qp_num); 2020 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 2021 return ERR_PTR(-EBUSY); 2022 } 2023 2024 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); 2025 if (qp_id < 0) { 2026 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", 2027 qm->qp_num); 2028 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); 2029 return ERR_PTR(-EBUSY); 2030 } 2031 2032 qp = &qm->qp_array[qp_id]; 2033 hisi_qm_unset_hw_reset(qp); 2034 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); 2035 2036 qp->event_cb = NULL; 2037 qp->req_cb = NULL; 2038 qp->qp_id = qp_id; 2039 qp->alg_type = alg_type; 2040 qp->is_in_kernel = true; 2041 qm->qp_in_used++; 2042 2043 return qp; 2044 } 2045 2046 /** 2047 * hisi_qm_create_qp() - Create a queue pair from qm. 2048 * @qm: The qm we create a qp from. 2049 * @alg_type: Accelerator specific algorithm type in sqc. 2050 * 2051 * Return created qp, negative error code if failed. 2052 */ 2053 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) 2054 { 2055 struct hisi_qp *qp; 2056 int ret; 2057 2058 ret = qm_pm_get_sync(qm); 2059 if (ret) 2060 return ERR_PTR(ret); 2061 2062 down_write(&qm->qps_lock); 2063 qp = qm_create_qp_nolock(qm, alg_type); 2064 up_write(&qm->qps_lock); 2065 2066 if (IS_ERR(qp)) 2067 qm_pm_put_sync(qm); 2068 2069 return qp; 2070 } 2071 2072 /** 2073 * hisi_qm_release_qp() - Release a qp back to its qm. 2074 * @qp: The qp we want to release. 2075 * 2076 * This function releases the resource of a qp. 2077 */ 2078 static void hisi_qm_release_qp(struct hisi_qp *qp) 2079 { 2080 struct hisi_qm *qm = qp->qm; 2081 2082 down_write(&qm->qps_lock); 2083 2084 qm->qp_in_used--; 2085 idr_remove(&qm->qp_idr, qp->qp_id); 2086 2087 up_write(&qm->qps_lock); 2088 2089 qm_pm_put_sync(qm); 2090 } 2091 2092 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2093 { 2094 struct hisi_qm *qm = qp->qm; 2095 enum qm_hw_ver ver = qm->ver; 2096 struct qm_sqc sqc = {0}; 2097 2098 if (ver == QM_HW_V1) { 2099 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); 2100 sqc.w8 = cpu_to_le16(qp->sq_depth - 1); 2101 } else { 2102 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); 2103 sqc.w8 = 0; /* rand_qc */ 2104 } 2105 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); 2106 sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); 2107 sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); 2108 sqc.cq_num = cpu_to_le16(qp_id); 2109 sqc.pasid = cpu_to_le16(pasid); 2110 2111 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2112 sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << 2113 QM_QC_PASID_ENABLE_SHIFT); 2114 2115 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); 2116 } 2117 2118 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2119 { 2120 struct hisi_qm *qm = qp->qm; 2121 enum qm_hw_ver ver = qm->ver; 2122 struct qm_cqc cqc = {0}; 2123 2124 if (ver == QM_HW_V1) { 2125 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); 2126 cqc.w8 = cpu_to_le16(qp->cq_depth - 1); 2127 } else { 2128 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); 2129 cqc.w8 = 0; /* rand_qc */ 2130 } 2131 /* 2132 * Enable request finishing interrupts defaultly. 2133 * So, there will be some interrupts until disabling 2134 * this. 2135 */ 2136 cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); 2137 cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); 2138 cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); 2139 cqc.pasid = cpu_to_le16(pasid); 2140 2141 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) 2142 cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); 2143 2144 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); 2145 } 2146 2147 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) 2148 { 2149 int ret; 2150 2151 qm_init_qp_status(qp); 2152 2153 ret = qm_sq_ctx_cfg(qp, qp_id, pasid); 2154 if (ret) 2155 return ret; 2156 2157 return qm_cq_ctx_cfg(qp, qp_id, pasid); 2158 } 2159 2160 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) 2161 { 2162 struct hisi_qm *qm = qp->qm; 2163 struct device *dev = &qm->pdev->dev; 2164 int qp_id = qp->qp_id; 2165 u32 pasid = arg; 2166 int ret; 2167 2168 if (atomic_read(&qm->status.flags) == QM_STOP) { 2169 dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n"); 2170 return -EPERM; 2171 } 2172 2173 ret = qm_qp_ctx_cfg(qp, qp_id, pasid); 2174 if (ret) 2175 return ret; 2176 2177 atomic_set(&qp->qp_status.flags, QP_START); 2178 dev_dbg(dev, "queue %d started\n", qp_id); 2179 2180 return 0; 2181 } 2182 2183 /** 2184 * hisi_qm_start_qp() - Start a qp into running. 2185 * @qp: The qp we want to start to run. 2186 * @arg: Accelerator specific argument. 2187 * 2188 * After this function, qp can receive request from user. Return 0 if 2189 * successful, negative error code if failed. 2190 */ 2191 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) 2192 { 2193 struct hisi_qm *qm = qp->qm; 2194 int ret; 2195 2196 down_write(&qm->qps_lock); 2197 ret = qm_start_qp_nolock(qp, arg); 2198 up_write(&qm->qps_lock); 2199 2200 return ret; 2201 } 2202 EXPORT_SYMBOL_GPL(hisi_qm_start_qp); 2203 2204 /** 2205 * qp_stop_fail_cb() - call request cb. 2206 * @qp: stopped failed qp. 2207 * 2208 * Callback function should be called whether task completed or not. 2209 */ 2210 static void qp_stop_fail_cb(struct hisi_qp *qp) 2211 { 2212 int qp_used = atomic_read(&qp->qp_status.used); 2213 u16 cur_tail = qp->qp_status.sq_tail; 2214 u16 sq_depth = qp->sq_depth; 2215 u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; 2216 struct hisi_qm *qm = qp->qm; 2217 u16 pos; 2218 int i; 2219 2220 for (i = 0; i < qp_used; i++) { 2221 pos = (i + cur_head) % sq_depth; 2222 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); 2223 atomic_dec(&qp->qp_status.used); 2224 } 2225 } 2226 2227 static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) 2228 { 2229 struct device *dev = &qm->pdev->dev; 2230 struct qm_sqc sqc; 2231 struct qm_cqc cqc; 2232 int ret, i = 0; 2233 2234 while (++i) { 2235 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); 2236 if (ret) { 2237 dev_err_ratelimited(dev, "Failed to dump sqc!\n"); 2238 *state = QM_DUMP_SQC_FAIL; 2239 return ret; 2240 } 2241 2242 ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); 2243 if (ret) { 2244 dev_err_ratelimited(dev, "Failed to dump cqc!\n"); 2245 *state = QM_DUMP_CQC_FAIL; 2246 return ret; 2247 } 2248 2249 if ((sqc.tail == cqc.tail) && 2250 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) 2251 break; 2252 2253 if (i == MAX_WAIT_COUNTS) { 2254 dev_err(dev, "Fail to empty queue %u!\n", qp_id); 2255 *state = QM_STOP_QUEUE_FAIL; 2256 return -ETIMEDOUT; 2257 } 2258 2259 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); 2260 } 2261 2262 return 0; 2263 } 2264 2265 /** 2266 * qm_drain_qp() - Drain a qp. 2267 * @qp: The qp we want to drain. 2268 * 2269 * If the device does not support stopping queue by sending mailbox, 2270 * determine whether the queue is cleared by judging the tail pointers of 2271 * sq and cq. 2272 */ 2273 static int qm_drain_qp(struct hisi_qp *qp) 2274 { 2275 struct hisi_qm *qm = qp->qm; 2276 u32 state = 0; 2277 int ret; 2278 2279 /* No need to judge if master OOO is blocked. */ 2280 if (qm_check_dev_error(qm)) 2281 return 0; 2282 2283 /* HW V3 supports drain qp by device */ 2284 if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { 2285 ret = qm_stop_qp(qp); 2286 if (ret) { 2287 dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); 2288 state = QM_STOP_QUEUE_FAIL; 2289 goto set_dev_state; 2290 } 2291 return ret; 2292 } 2293 2294 ret = qm_wait_qp_empty(qm, &state, qp->qp_id); 2295 if (ret) 2296 goto set_dev_state; 2297 2298 return 0; 2299 2300 set_dev_state: 2301 if (qm->debug.dev_dfx.dev_timeout) 2302 qm->debug.dev_dfx.dev_state = state; 2303 2304 return ret; 2305 } 2306 2307 static void qm_stop_qp_nolock(struct hisi_qp *qp) 2308 { 2309 struct hisi_qm *qm = qp->qm; 2310 struct device *dev = &qm->pdev->dev; 2311 int ret; 2312 2313 /* 2314 * It is allowed to stop and release qp when reset, If the qp is 2315 * stopped when reset but still want to be released then, the 2316 * is_resetting flag should be set negative so that this qp will not 2317 * be restarted after reset. 2318 */ 2319 if (atomic_read(&qp->qp_status.flags) != QP_START) { 2320 qp->is_resetting = false; 2321 return; 2322 } 2323 2324 atomic_set(&qp->qp_status.flags, QP_STOP); 2325 2326 /* V3 supports direct stop function when FLR prepare */ 2327 if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { 2328 ret = qm_drain_qp(qp); 2329 if (ret) 2330 dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id); 2331 } 2332 2333 flush_workqueue(qm->wq); 2334 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) 2335 qp_stop_fail_cb(qp); 2336 2337 dev_dbg(dev, "stop queue %u!", qp->qp_id); 2338 } 2339 2340 /** 2341 * hisi_qm_stop_qp() - Stop a qp in qm. 2342 * @qp: The qp we want to stop. 2343 * 2344 * This function is reverse of hisi_qm_start_qp. 2345 */ 2346 void hisi_qm_stop_qp(struct hisi_qp *qp) 2347 { 2348 down_write(&qp->qm->qps_lock); 2349 qm_stop_qp_nolock(qp); 2350 up_write(&qp->qm->qps_lock); 2351 } 2352 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); 2353 2354 /** 2355 * hisi_qp_send() - Queue up a task in the hardware queue. 2356 * @qp: The qp in which to put the message. 2357 * @msg: The message. 2358 * 2359 * This function will return -EBUSY if qp is currently full, and -EAGAIN 2360 * if qp related qm is resetting. 2361 * 2362 * Note: This function may run with qm_irq_thread and ACC reset at same time. 2363 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC 2364 * reset may happen, we have no lock here considering performance. This 2365 * causes current qm_db sending fail or can not receive sended sqe. QM 2366 * sync/async receive function should handle the error sqe. ACC reset 2367 * done function should clear used sqe to 0. 2368 */ 2369 int hisi_qp_send(struct hisi_qp *qp, const void *msg) 2370 { 2371 struct hisi_qp_status *qp_status = &qp->qp_status; 2372 u16 sq_tail = qp_status->sq_tail; 2373 u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; 2374 void *sqe = qm_get_avail_sqe(qp); 2375 2376 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || 2377 atomic_read(&qp->qm->status.flags) == QM_STOP || 2378 qp->is_resetting)) { 2379 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); 2380 return -EAGAIN; 2381 } 2382 2383 if (!sqe) 2384 return -EBUSY; 2385 2386 memcpy(sqe, msg, qp->qm->sqe_size); 2387 2388 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); 2389 atomic_inc(&qp->qp_status.used); 2390 qp_status->sq_tail = sq_tail_next; 2391 2392 return 0; 2393 } 2394 EXPORT_SYMBOL_GPL(hisi_qp_send); 2395 2396 static void hisi_qm_cache_wb(struct hisi_qm *qm) 2397 { 2398 unsigned int val; 2399 2400 if (qm->ver == QM_HW_V1) 2401 return; 2402 2403 writel(0x1, qm->io_base + QM_CACHE_WB_START); 2404 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, 2405 val, val & BIT(0), POLL_PERIOD, 2406 POLL_TIMEOUT)) 2407 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); 2408 } 2409 2410 static void qm_qp_event_notifier(struct hisi_qp *qp) 2411 { 2412 wake_up_interruptible(&qp->uacce_q->wait); 2413 } 2414 2415 /* This function returns free number of qp in qm. */ 2416 static int hisi_qm_get_available_instances(struct uacce_device *uacce) 2417 { 2418 struct hisi_qm *qm = uacce->priv; 2419 int ret; 2420 2421 down_read(&qm->qps_lock); 2422 ret = qm->qp_num - qm->qp_in_used; 2423 up_read(&qm->qps_lock); 2424 2425 return ret; 2426 } 2427 2428 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) 2429 { 2430 int i; 2431 2432 for (i = 0; i < qm->qp_num; i++) 2433 qm_set_qp_disable(&qm->qp_array[i], offset); 2434 } 2435 2436 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, 2437 unsigned long arg, 2438 struct uacce_queue *q) 2439 { 2440 struct hisi_qm *qm = uacce->priv; 2441 struct hisi_qp *qp; 2442 u8 alg_type = 0; 2443 2444 qp = hisi_qm_create_qp(qm, alg_type); 2445 if (IS_ERR(qp)) 2446 return PTR_ERR(qp); 2447 2448 q->priv = qp; 2449 q->uacce = uacce; 2450 qp->uacce_q = q; 2451 qp->event_cb = qm_qp_event_notifier; 2452 qp->pasid = arg; 2453 qp->is_in_kernel = false; 2454 2455 return 0; 2456 } 2457 2458 static void hisi_qm_uacce_put_queue(struct uacce_queue *q) 2459 { 2460 struct hisi_qp *qp = q->priv; 2461 2462 hisi_qm_release_qp(qp); 2463 } 2464 2465 /* map sq/cq/doorbell to user space */ 2466 static int hisi_qm_uacce_mmap(struct uacce_queue *q, 2467 struct vm_area_struct *vma, 2468 struct uacce_qfile_region *qfr) 2469 { 2470 struct hisi_qp *qp = q->priv; 2471 struct hisi_qm *qm = qp->qm; 2472 resource_size_t phys_base = qm->db_phys_base + 2473 qp->qp_id * qm->db_interval; 2474 size_t sz = vma->vm_end - vma->vm_start; 2475 struct pci_dev *pdev = qm->pdev; 2476 struct device *dev = &pdev->dev; 2477 unsigned long vm_pgoff; 2478 int ret; 2479 2480 switch (qfr->type) { 2481 case UACCE_QFRT_MMIO: 2482 if (qm->ver == QM_HW_V1) { 2483 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) 2484 return -EINVAL; 2485 } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 2486 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + 2487 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) 2488 return -EINVAL; 2489 } else { 2490 if (sz > qm->db_interval) 2491 return -EINVAL; 2492 } 2493 2494 vm_flags_set(vma, VM_IO); 2495 2496 return remap_pfn_range(vma, vma->vm_start, 2497 phys_base >> PAGE_SHIFT, 2498 sz, pgprot_noncached(vma->vm_page_prot)); 2499 case UACCE_QFRT_DUS: 2500 if (sz != qp->qdma.size) 2501 return -EINVAL; 2502 2503 /* 2504 * dma_mmap_coherent() requires vm_pgoff as 0 2505 * restore vm_pfoff to initial value for mmap() 2506 */ 2507 vm_pgoff = vma->vm_pgoff; 2508 vma->vm_pgoff = 0; 2509 ret = dma_mmap_coherent(dev, vma, qp->qdma.va, 2510 qp->qdma.dma, sz); 2511 vma->vm_pgoff = vm_pgoff; 2512 return ret; 2513 2514 default: 2515 return -EINVAL; 2516 } 2517 } 2518 2519 static int hisi_qm_uacce_start_queue(struct uacce_queue *q) 2520 { 2521 struct hisi_qp *qp = q->priv; 2522 2523 return hisi_qm_start_qp(qp, qp->pasid); 2524 } 2525 2526 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) 2527 { 2528 struct hisi_qp *qp = q->priv; 2529 struct hisi_qm *qm = qp->qm; 2530 struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; 2531 u32 i = 0; 2532 2533 hisi_qm_stop_qp(qp); 2534 2535 if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) 2536 return; 2537 2538 /* 2539 * After the queue fails to be stopped, 2540 * wait for a period of time before releasing the queue. 2541 */ 2542 while (++i) { 2543 msleep(WAIT_PERIOD); 2544 2545 /* Since dev_timeout maybe modified, check i >= dev_timeout */ 2546 if (i >= dev_dfx->dev_timeout) { 2547 dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", 2548 qp->qp_id, dev_dfx->dev_state); 2549 dev_dfx->dev_state = QM_FINISH_WAIT; 2550 break; 2551 } 2552 } 2553 } 2554 2555 static int hisi_qm_is_q_updated(struct uacce_queue *q) 2556 { 2557 struct hisi_qp *qp = q->priv; 2558 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; 2559 int updated = 0; 2560 2561 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { 2562 /* make sure to read data from memory */ 2563 dma_rmb(); 2564 qm_cq_head_update(qp); 2565 cqe = qp->cqe + qp->qp_status.cq_head; 2566 updated = 1; 2567 } 2568 2569 return updated; 2570 } 2571 2572 static void qm_set_sqctype(struct uacce_queue *q, u16 type) 2573 { 2574 struct hisi_qm *qm = q->uacce->priv; 2575 struct hisi_qp *qp = q->priv; 2576 2577 down_write(&qm->qps_lock); 2578 qp->alg_type = type; 2579 up_write(&qm->qps_lock); 2580 } 2581 2582 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, 2583 unsigned long arg) 2584 { 2585 struct hisi_qp *qp = q->priv; 2586 struct hisi_qp_info qp_info; 2587 struct hisi_qp_ctx qp_ctx; 2588 2589 if (cmd == UACCE_CMD_QM_SET_QP_CTX) { 2590 if (copy_from_user(&qp_ctx, (void __user *)arg, 2591 sizeof(struct hisi_qp_ctx))) 2592 return -EFAULT; 2593 2594 if (qp_ctx.qc_type > QM_MAX_QC_TYPE) 2595 return -EINVAL; 2596 2597 qm_set_sqctype(q, qp_ctx.qc_type); 2598 qp_ctx.id = qp->qp_id; 2599 2600 if (copy_to_user((void __user *)arg, &qp_ctx, 2601 sizeof(struct hisi_qp_ctx))) 2602 return -EFAULT; 2603 2604 return 0; 2605 } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { 2606 if (copy_from_user(&qp_info, (void __user *)arg, 2607 sizeof(struct hisi_qp_info))) 2608 return -EFAULT; 2609 2610 qp_info.sqe_size = qp->qm->sqe_size; 2611 qp_info.sq_depth = qp->sq_depth; 2612 qp_info.cq_depth = qp->cq_depth; 2613 2614 if (copy_to_user((void __user *)arg, &qp_info, 2615 sizeof(struct hisi_qp_info))) 2616 return -EFAULT; 2617 2618 return 0; 2619 } 2620 2621 return -EINVAL; 2622 } 2623 2624 /** 2625 * qm_hw_err_isolate() - Try to set the isolation status of the uacce device 2626 * according to user's configuration of error threshold. 2627 * @qm: the uacce device 2628 */ 2629 static int qm_hw_err_isolate(struct hisi_qm *qm) 2630 { 2631 struct qm_hw_err *err, *tmp, *hw_err; 2632 struct qm_err_isolate *isolate; 2633 u32 count = 0; 2634 2635 isolate = &qm->isolate_data; 2636 2637 #define SECONDS_PER_HOUR 3600 2638 2639 /* All the hw errs are processed by PF driver */ 2640 if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) 2641 return 0; 2642 2643 hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL); 2644 if (!hw_err) 2645 return -ENOMEM; 2646 2647 /* 2648 * Time-stamp every slot AER error. Then check the AER error log when the 2649 * next device AER error occurred. if the device slot AER error count exceeds 2650 * the setting error threshold in one hour, the isolated state will be set 2651 * to true. And the AER error logs that exceed one hour will be cleared. 2652 */ 2653 mutex_lock(&isolate->isolate_lock); 2654 hw_err->timestamp = jiffies; 2655 list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { 2656 if ((hw_err->timestamp - err->timestamp) / HZ > 2657 SECONDS_PER_HOUR) { 2658 list_del(&err->list); 2659 kfree(err); 2660 } else { 2661 count++; 2662 } 2663 } 2664 list_add(&hw_err->list, &isolate->qm_hw_errs); 2665 2666 if (count >= isolate->err_threshold) 2667 isolate->is_isolate = true; 2668 mutex_unlock(&isolate->isolate_lock); 2669 2670 return 0; 2671 } 2672 2673 static void qm_hw_err_destroy(struct hisi_qm *qm) 2674 { 2675 struct qm_hw_err *err, *tmp; 2676 2677 list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { 2678 list_del(&err->list); 2679 kfree(err); 2680 } 2681 } 2682 2683 static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) 2684 { 2685 struct hisi_qm *qm = uacce->priv; 2686 struct hisi_qm *pf_qm; 2687 2688 if (uacce->is_vf) 2689 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2690 else 2691 pf_qm = qm; 2692 2693 return pf_qm->isolate_data.is_isolate ? 2694 UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; 2695 } 2696 2697 static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) 2698 { 2699 struct hisi_qm *qm = uacce->priv; 2700 2701 /* Must be set by PF */ 2702 if (uacce->is_vf) 2703 return -EPERM; 2704 2705 if (qm->isolate_data.is_isolate) 2706 return -EPERM; 2707 2708 mutex_lock(&qm->isolate_data.isolate_lock); 2709 qm->isolate_data.err_threshold = num; 2710 2711 /* After the policy is updated, need to reset the hardware err list */ 2712 qm_hw_err_destroy(qm); 2713 mutex_unlock(&qm->isolate_data.isolate_lock); 2714 2715 return 0; 2716 } 2717 2718 static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) 2719 { 2720 struct hisi_qm *qm = uacce->priv; 2721 struct hisi_qm *pf_qm; 2722 2723 if (uacce->is_vf) { 2724 pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); 2725 return pf_qm->isolate_data.err_threshold; 2726 } 2727 2728 return qm->isolate_data.err_threshold; 2729 } 2730 2731 static const struct uacce_ops uacce_qm_ops = { 2732 .get_available_instances = hisi_qm_get_available_instances, 2733 .get_queue = hisi_qm_uacce_get_queue, 2734 .put_queue = hisi_qm_uacce_put_queue, 2735 .start_queue = hisi_qm_uacce_start_queue, 2736 .stop_queue = hisi_qm_uacce_stop_queue, 2737 .mmap = hisi_qm_uacce_mmap, 2738 .ioctl = hisi_qm_uacce_ioctl, 2739 .is_q_updated = hisi_qm_is_q_updated, 2740 .get_isolate_state = hisi_qm_get_isolate_state, 2741 .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, 2742 .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, 2743 }; 2744 2745 static void qm_remove_uacce(struct hisi_qm *qm) 2746 { 2747 struct uacce_device *uacce = qm->uacce; 2748 2749 if (qm->use_sva) { 2750 mutex_lock(&qm->isolate_data.isolate_lock); 2751 qm_hw_err_destroy(qm); 2752 mutex_unlock(&qm->isolate_data.isolate_lock); 2753 2754 uacce_remove(uacce); 2755 qm->uacce = NULL; 2756 } 2757 } 2758 2759 static void qm_uacce_api_ver_init(struct hisi_qm *qm) 2760 { 2761 struct uacce_device *uacce = qm->uacce; 2762 2763 switch (qm->ver) { 2764 case QM_HW_V1: 2765 uacce->api_ver = HISI_QM_API_VER_BASE; 2766 break; 2767 case QM_HW_V2: 2768 uacce->api_ver = HISI_QM_API_VER2_BASE; 2769 break; 2770 case QM_HW_V3: 2771 case QM_HW_V4: 2772 uacce->api_ver = HISI_QM_API_VER3_BASE; 2773 break; 2774 default: 2775 uacce->api_ver = HISI_QM_API_VER5_BASE; 2776 break; 2777 } 2778 } 2779 2780 static int qm_alloc_uacce(struct hisi_qm *qm) 2781 { 2782 struct pci_dev *pdev = qm->pdev; 2783 struct uacce_device *uacce; 2784 unsigned long mmio_page_nr; 2785 unsigned long dus_page_nr; 2786 u16 sq_depth, cq_depth; 2787 struct uacce_interface interface = { 2788 .flags = UACCE_DEV_SVA, 2789 .ops = &uacce_qm_ops, 2790 }; 2791 int ret; 2792 2793 ret = strscpy(interface.name, dev_driver_string(&pdev->dev), 2794 sizeof(interface.name)); 2795 if (ret < 0) 2796 return -ENAMETOOLONG; 2797 2798 uacce = uacce_alloc(&pdev->dev, &interface); 2799 if (IS_ERR(uacce)) 2800 return PTR_ERR(uacce); 2801 2802 if (uacce->flags & UACCE_DEV_SVA) { 2803 qm->use_sva = true; 2804 } else { 2805 /* only consider sva case */ 2806 qm_remove_uacce(qm); 2807 return -EINVAL; 2808 } 2809 2810 uacce->is_vf = pdev->is_virtfn; 2811 uacce->priv = qm; 2812 2813 if (qm->ver == QM_HW_V1) 2814 mmio_page_nr = QM_DOORBELL_PAGE_NR; 2815 else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 2816 mmio_page_nr = QM_DOORBELL_PAGE_NR + 2817 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; 2818 else 2819 mmio_page_nr = qm->db_interval / PAGE_SIZE; 2820 2821 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 2822 2823 /* Add one more page for device or qp status */ 2824 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + 2825 sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> 2826 PAGE_SHIFT; 2827 2828 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; 2829 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; 2830 2831 qm->uacce = uacce; 2832 qm_uacce_api_ver_init(qm); 2833 INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); 2834 mutex_init(&qm->isolate_data.isolate_lock); 2835 2836 return 0; 2837 } 2838 2839 /** 2840 * qm_frozen() - Try to froze QM to cut continuous queue request. If 2841 * there is user on the QM, return failure without doing anything. 2842 * @qm: The qm needed to be fronzen. 2843 * 2844 * This function frozes QM, then we can do SRIOV disabling. 2845 */ 2846 static int qm_frozen(struct hisi_qm *qm) 2847 { 2848 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) 2849 return 0; 2850 2851 down_write(&qm->qps_lock); 2852 2853 if (!qm->qp_in_used) { 2854 qm->qp_in_used = qm->qp_num; 2855 up_write(&qm->qps_lock); 2856 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); 2857 return 0; 2858 } 2859 2860 up_write(&qm->qps_lock); 2861 2862 return -EBUSY; 2863 } 2864 2865 static int qm_try_frozen_vfs(struct pci_dev *pdev, 2866 struct hisi_qm_list *qm_list) 2867 { 2868 struct hisi_qm *qm, *vf_qm; 2869 struct pci_dev *dev; 2870 int ret = 0; 2871 2872 if (!qm_list || !pdev) 2873 return -EINVAL; 2874 2875 /* Try to frozen all the VFs as disable SRIOV */ 2876 mutex_lock(&qm_list->lock); 2877 list_for_each_entry(qm, &qm_list->list, list) { 2878 dev = qm->pdev; 2879 if (dev == pdev) 2880 continue; 2881 if (pci_physfn(dev) == pdev) { 2882 vf_qm = pci_get_drvdata(dev); 2883 ret = qm_frozen(vf_qm); 2884 if (ret) 2885 goto frozen_fail; 2886 } 2887 } 2888 2889 frozen_fail: 2890 mutex_unlock(&qm_list->lock); 2891 2892 return ret; 2893 } 2894 2895 /** 2896 * hisi_qm_wait_task_finish() - Wait until the task is finished 2897 * when removing the driver. 2898 * @qm: The qm needed to wait for the task to finish. 2899 * @qm_list: The list of all available devices. 2900 */ 2901 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 2902 { 2903 while (qm_frozen(qm) || 2904 ((qm->fun_type == QM_HW_PF) && 2905 qm_try_frozen_vfs(qm->pdev, qm_list))) { 2906 msleep(WAIT_PERIOD); 2907 } 2908 2909 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || 2910 test_bit(QM_RESETTING, &qm->misc_ctl)) 2911 msleep(WAIT_PERIOD); 2912 2913 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 2914 flush_work(&qm->cmd_process); 2915 2916 udelay(REMOVE_WAIT_DELAY); 2917 } 2918 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); 2919 2920 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) 2921 { 2922 struct device *dev = &qm->pdev->dev; 2923 struct qm_dma *qdma; 2924 int i; 2925 2926 for (i = num - 1; i >= 0; i--) { 2927 qdma = &qm->qp_array[i].qdma; 2928 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); 2929 kfree(qm->poll_data[i].qp_finish_id); 2930 } 2931 2932 kfree(qm->poll_data); 2933 kfree(qm->qp_array); 2934 } 2935 2936 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, 2937 u16 sq_depth, u16 cq_depth) 2938 { 2939 struct device *dev = &qm->pdev->dev; 2940 size_t off = qm->sqe_size * sq_depth; 2941 struct hisi_qp *qp; 2942 int ret = -ENOMEM; 2943 2944 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), 2945 GFP_KERNEL); 2946 if (!qm->poll_data[id].qp_finish_id) 2947 return -ENOMEM; 2948 2949 qp = &qm->qp_array[id]; 2950 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, 2951 GFP_KERNEL); 2952 if (!qp->qdma.va) 2953 goto err_free_qp_finish_id; 2954 2955 qp->sqe = qp->qdma.va; 2956 qp->sqe_dma = qp->qdma.dma; 2957 qp->cqe = qp->qdma.va + off; 2958 qp->cqe_dma = qp->qdma.dma + off; 2959 qp->qdma.size = dma_size; 2960 qp->sq_depth = sq_depth; 2961 qp->cq_depth = cq_depth; 2962 qp->qm = qm; 2963 qp->qp_id = id; 2964 2965 return 0; 2966 2967 err_free_qp_finish_id: 2968 kfree(qm->poll_data[id].qp_finish_id); 2969 return ret; 2970 } 2971 2972 static void hisi_qm_pre_init(struct hisi_qm *qm) 2973 { 2974 struct pci_dev *pdev = qm->pdev; 2975 2976 if (qm->ver == QM_HW_V1) 2977 qm->ops = &qm_hw_ops_v1; 2978 else if (qm->ver == QM_HW_V2) 2979 qm->ops = &qm_hw_ops_v2; 2980 else if (qm->ver == QM_HW_V3) 2981 qm->ops = &qm_hw_ops_v3; 2982 else 2983 qm->ops = &qm_hw_ops_v4; 2984 2985 pci_set_drvdata(pdev, qm); 2986 mutex_init(&qm->mailbox_lock); 2987 mutex_init(&qm->ifc_lock); 2988 init_rwsem(&qm->qps_lock); 2989 qm->qp_in_used = 0; 2990 if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { 2991 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) 2992 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); 2993 } 2994 } 2995 2996 static void qm_cmd_uninit(struct hisi_qm *qm) 2997 { 2998 u32 val; 2999 3000 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3001 return; 3002 3003 val = readl(qm->io_base + QM_IFC_INT_MASK); 3004 val |= QM_IFC_INT_DISABLE; 3005 writel(val, qm->io_base + QM_IFC_INT_MASK); 3006 } 3007 3008 static void qm_cmd_init(struct hisi_qm *qm) 3009 { 3010 u32 val; 3011 3012 if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 3013 return; 3014 3015 /* Clear communication interrupt source */ 3016 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); 3017 3018 /* Enable pf to vf communication reg. */ 3019 val = readl(qm->io_base + QM_IFC_INT_MASK); 3020 val &= ~QM_IFC_INT_DISABLE; 3021 writel(val, qm->io_base + QM_IFC_INT_MASK); 3022 } 3023 3024 static void qm_put_pci_res(struct hisi_qm *qm) 3025 { 3026 struct pci_dev *pdev = qm->pdev; 3027 3028 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 3029 iounmap(qm->db_io_base); 3030 3031 iounmap(qm->io_base); 3032 pci_release_mem_regions(pdev); 3033 } 3034 3035 static void hisi_mig_region_clear(struct hisi_qm *qm) 3036 { 3037 u32 val; 3038 3039 /* Clear migration region set of PF */ 3040 if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) { 3041 val = readl(qm->io_base + QM_MIG_REGION_SEL); 3042 val &= ~QM_MIG_REGION_EN; 3043 writel(val, qm->io_base + QM_MIG_REGION_SEL); 3044 } 3045 } 3046 3047 static void hisi_mig_region_enable(struct hisi_qm *qm) 3048 { 3049 u32 val; 3050 3051 /* Select migration region of PF */ 3052 if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) { 3053 val = readl(qm->io_base + QM_MIG_REGION_SEL); 3054 val |= QM_MIG_REGION_EN; 3055 writel(val, qm->io_base + QM_MIG_REGION_SEL); 3056 } 3057 } 3058 3059 static void hisi_qm_pci_uninit(struct hisi_qm *qm) 3060 { 3061 struct pci_dev *pdev = qm->pdev; 3062 3063 pci_free_irq_vectors(pdev); 3064 hisi_mig_region_clear(qm); 3065 qm_put_pci_res(qm); 3066 pci_disable_device(pdev); 3067 } 3068 3069 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) 3070 { 3071 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) 3072 writel(state, qm->io_base + QM_VF_STATE); 3073 } 3074 3075 static void hisi_qm_unint_work(struct hisi_qm *qm) 3076 { 3077 destroy_workqueue(qm->wq); 3078 } 3079 3080 static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) 3081 { 3082 struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; 3083 struct device *dev = &qm->pdev->dev; 3084 3085 dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); 3086 } 3087 3088 static void hisi_qm_memory_uninit(struct hisi_qm *qm) 3089 { 3090 struct device *dev = &qm->pdev->dev; 3091 3092 hisi_qp_memory_uninit(qm, qm->qp_num); 3093 hisi_qm_free_rsv_buf(qm); 3094 if (qm->qdma.va) { 3095 hisi_qm_cache_wb(qm); 3096 dma_free_coherent(dev, qm->qdma.size, 3097 qm->qdma.va, qm->qdma.dma); 3098 } 3099 3100 idr_destroy(&qm->qp_idr); 3101 3102 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 3103 kfree(qm->factor); 3104 } 3105 3106 /** 3107 * hisi_qm_uninit() - Uninitialize qm. 3108 * @qm: The qm needed uninit. 3109 * 3110 * This function uninits qm related device resources. 3111 */ 3112 void hisi_qm_uninit(struct hisi_qm *qm) 3113 { 3114 qm_cmd_uninit(qm); 3115 hisi_qm_unint_work(qm); 3116 3117 down_write(&qm->qps_lock); 3118 hisi_qm_memory_uninit(qm); 3119 hisi_qm_set_state(qm, QM_NOT_READY); 3120 up_write(&qm->qps_lock); 3121 3122 qm_remove_uacce(qm); 3123 qm_irqs_unregister(qm); 3124 hisi_qm_pci_uninit(qm); 3125 } 3126 EXPORT_SYMBOL_GPL(hisi_qm_uninit); 3127 3128 /** 3129 * hisi_qm_get_vft() - Get vft from a qm. 3130 * @qm: The qm we want to get its vft. 3131 * @base: The base number of queue in vft. 3132 * @number: The number of queues in vft. 3133 * 3134 * We can allocate multiple queues to a qm by configuring virtual function 3135 * table. We get related configures by this function. Normally, we call this 3136 * function in VF driver to get the queue information. 3137 * 3138 * qm hw v1 does not support this interface. 3139 */ 3140 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) 3141 { 3142 if (!base || !number) 3143 return -EINVAL; 3144 3145 if (!qm->ops->get_vft) { 3146 dev_err(&qm->pdev->dev, "Don't support vft read!\n"); 3147 return -EINVAL; 3148 } 3149 3150 return qm->ops->get_vft(qm, base, number); 3151 } 3152 3153 /** 3154 * hisi_qm_set_vft() - Set vft to a qm. 3155 * @qm: The qm we want to set its vft. 3156 * @fun_num: The function number. 3157 * @base: The base number of queue in vft. 3158 * @number: The number of queues in vft. 3159 * 3160 * This function is alway called in PF driver, it is used to assign queues 3161 * among PF and VFs. 3162 * 3163 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) 3164 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) 3165 * (VF function number 0x2) 3166 */ 3167 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, 3168 u32 number) 3169 { 3170 u32 max_q_num = qm->ctrl_qp_num; 3171 3172 if (base >= max_q_num || number > max_q_num || 3173 (base + number) > max_q_num) 3174 return -EINVAL; 3175 3176 return qm_set_sqc_cqc_vft(qm, fun_num, base, number); 3177 } 3178 3179 static void qm_init_eq_aeq_status(struct hisi_qm *qm) 3180 { 3181 struct hisi_qm_status *status = &qm->status; 3182 3183 status->eq_head = 0; 3184 status->aeq_head = 0; 3185 status->eqc_phase = true; 3186 status->aeqc_phase = true; 3187 } 3188 3189 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) 3190 { 3191 /* Clear eq/aeq interrupt source */ 3192 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); 3193 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); 3194 3195 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); 3196 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); 3197 } 3198 3199 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) 3200 { 3201 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); 3202 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); 3203 } 3204 3205 static int qm_eq_ctx_cfg(struct hisi_qm *qm) 3206 { 3207 struct qm_eqc eqc = {0}; 3208 3209 eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); 3210 eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); 3211 if (qm->ver == QM_HW_V1) 3212 eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); 3213 eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3214 3215 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); 3216 } 3217 3218 static int qm_aeq_ctx_cfg(struct hisi_qm *qm) 3219 { 3220 struct qm_aeqc aeqc = {0}; 3221 3222 aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); 3223 aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); 3224 aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); 3225 3226 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); 3227 } 3228 3229 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) 3230 { 3231 struct device *dev = &qm->pdev->dev; 3232 int ret; 3233 3234 qm_init_eq_aeq_status(qm); 3235 3236 /* Before starting the dev, clear the memory and then configure to device using. */ 3237 memset(qm->qdma.va, 0, qm->qdma.size); 3238 3239 ret = qm_eq_ctx_cfg(qm); 3240 if (ret) { 3241 dev_err(dev, "Set eqc failed!\n"); 3242 return ret; 3243 } 3244 3245 return qm_aeq_ctx_cfg(qm); 3246 } 3247 3248 static int __hisi_qm_start(struct hisi_qm *qm) 3249 { 3250 struct device *dev = &qm->pdev->dev; 3251 int ret; 3252 3253 if (!qm->qdma.va) { 3254 dev_err(dev, "qm qdma is NULL!\n"); 3255 return -EINVAL; 3256 } 3257 3258 if (qm->fun_type == QM_HW_PF) { 3259 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); 3260 if (ret) 3261 return ret; 3262 } 3263 3264 ret = qm_eq_aeq_ctx_cfg(qm); 3265 if (ret) 3266 return ret; 3267 3268 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); 3269 if (ret) 3270 return ret; 3271 3272 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); 3273 if (ret) 3274 return ret; 3275 3276 qm_init_prefetch(qm); 3277 qm_enable_eq_aeq_interrupts(qm); 3278 3279 return 0; 3280 } 3281 3282 /** 3283 * hisi_qm_start() - start qm 3284 * @qm: The qm to be started. 3285 * 3286 * This function starts a qm, then we can allocate qp from this qm. 3287 */ 3288 int hisi_qm_start(struct hisi_qm *qm) 3289 { 3290 struct device *dev = &qm->pdev->dev; 3291 int ret = 0; 3292 3293 down_write(&qm->qps_lock); 3294 3295 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); 3296 3297 if (!qm->qp_num) { 3298 dev_err(dev, "qp_num should not be 0\n"); 3299 ret = -EINVAL; 3300 goto err_unlock; 3301 } 3302 3303 ret = __hisi_qm_start(qm); 3304 if (ret) 3305 goto err_unlock; 3306 3307 atomic_set(&qm->status.flags, QM_WORK); 3308 hisi_qm_set_state(qm, QM_READY); 3309 3310 err_unlock: 3311 up_write(&qm->qps_lock); 3312 return ret; 3313 } 3314 EXPORT_SYMBOL_GPL(hisi_qm_start); 3315 3316 static int qm_restart(struct hisi_qm *qm) 3317 { 3318 struct device *dev = &qm->pdev->dev; 3319 struct hisi_qp *qp; 3320 int ret, i; 3321 3322 ret = hisi_qm_start(qm); 3323 if (ret < 0) 3324 return ret; 3325 3326 down_write(&qm->qps_lock); 3327 for (i = 0; i < qm->qp_num; i++) { 3328 qp = &qm->qp_array[i]; 3329 if (atomic_read(&qp->qp_status.flags) == QP_STOP && 3330 qp->is_resetting == true && qp->is_in_kernel == true) { 3331 ret = qm_start_qp_nolock(qp, 0); 3332 if (ret < 0) { 3333 dev_err(dev, "Failed to start qp%d!\n", i); 3334 3335 up_write(&qm->qps_lock); 3336 return ret; 3337 } 3338 qp->is_resetting = false; 3339 } 3340 } 3341 up_write(&qm->qps_lock); 3342 3343 return 0; 3344 } 3345 3346 /* Stop started qps in reset flow */ 3347 static void qm_stop_started_qp(struct hisi_qm *qm) 3348 { 3349 struct hisi_qp *qp; 3350 int i; 3351 3352 for (i = 0; i < qm->qp_num; i++) { 3353 qp = &qm->qp_array[i]; 3354 if (atomic_read(&qp->qp_status.flags) == QP_START) { 3355 qp->is_resetting = true; 3356 qm_stop_qp_nolock(qp); 3357 } 3358 } 3359 } 3360 3361 /** 3362 * qm_invalid_queues() - invalid all queues in use. 3363 * @qm: The qm in which the queues will be invalidated. 3364 * 3365 * This function invalid all queues in use. If the doorbell command is sent 3366 * to device in user space after the device is reset, the device discards 3367 * the doorbell command. 3368 */ 3369 static void qm_invalid_queues(struct hisi_qm *qm) 3370 { 3371 struct hisi_qp *qp; 3372 struct qm_sqc *sqc; 3373 struct qm_cqc *cqc; 3374 int i; 3375 3376 /* 3377 * Normal stop queues is no longer used and does not need to be 3378 * invalid queues. 3379 */ 3380 if (qm->status.stop_reason == QM_NORMAL) 3381 return; 3382 3383 if (qm->status.stop_reason == QM_DOWN) 3384 hisi_qm_cache_wb(qm); 3385 3386 for (i = 0; i < qm->qp_num; i++) { 3387 qp = &qm->qp_array[i]; 3388 if (!qp->is_resetting) 3389 continue; 3390 3391 /* Modify random data and set sqc close bit to invalid queue. */ 3392 sqc = qm->sqc + i; 3393 cqc = qm->cqc + i; 3394 sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA); 3395 sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP); 3396 cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA); 3397 if (qp->is_in_kernel) 3398 memset(qp->qdma.va, 0, qp->qdma.size); 3399 } 3400 } 3401 3402 /** 3403 * hisi_qm_stop() - Stop a qm. 3404 * @qm: The qm which will be stopped. 3405 * @r: The reason to stop qm. 3406 * 3407 * This function stops qm and its qps, then qm can not accept request. 3408 * Related resources are not released at this state, we can use hisi_qm_start 3409 * to let qm start again. 3410 */ 3411 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) 3412 { 3413 struct device *dev = &qm->pdev->dev; 3414 int ret = 0; 3415 3416 down_write(&qm->qps_lock); 3417 3418 if (atomic_read(&qm->status.flags) == QM_STOP) 3419 goto err_unlock; 3420 3421 /* Stop all the request sending at first. */ 3422 atomic_set(&qm->status.flags, QM_STOP); 3423 qm->status.stop_reason = r; 3424 3425 if (qm->status.stop_reason != QM_NORMAL) { 3426 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 3427 /* 3428 * When performing soft reset, the hardware will no longer 3429 * do tasks, and the tasks in the device will be flushed 3430 * out directly since the master ooo is closed. 3431 */ 3432 if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && 3433 r != QM_SOFT_RESET) { 3434 ret = qm_drain_qm(qm); 3435 if (ret) { 3436 dev_err(dev, "failed to drain qm!\n"); 3437 goto err_unlock; 3438 } 3439 } 3440 3441 qm_stop_started_qp(qm); 3442 3443 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 3444 } 3445 3446 qm_disable_eq_aeq_interrupts(qm); 3447 if (qm->fun_type == QM_HW_PF) { 3448 ret = hisi_qm_set_vft(qm, 0, 0, 0); 3449 if (ret < 0) { 3450 dev_err(dev, "Failed to set vft!\n"); 3451 ret = -EBUSY; 3452 goto err_unlock; 3453 } 3454 } 3455 3456 qm_invalid_queues(qm); 3457 qm->status.stop_reason = QM_NORMAL; 3458 3459 err_unlock: 3460 up_write(&qm->qps_lock); 3461 return ret; 3462 } 3463 EXPORT_SYMBOL_GPL(hisi_qm_stop); 3464 3465 static void qm_hw_error_init(struct hisi_qm *qm) 3466 { 3467 if (!qm->ops->hw_error_init) { 3468 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); 3469 return; 3470 } 3471 3472 qm->ops->hw_error_init(qm); 3473 } 3474 3475 static void qm_hw_error_uninit(struct hisi_qm *qm) 3476 { 3477 if (!qm->ops->hw_error_uninit) { 3478 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); 3479 return; 3480 } 3481 3482 qm->ops->hw_error_uninit(qm); 3483 } 3484 3485 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) 3486 { 3487 if (!qm->ops->hw_error_handle) { 3488 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); 3489 return ACC_ERR_NONE; 3490 } 3491 3492 return qm->ops->hw_error_handle(qm); 3493 } 3494 3495 /** 3496 * hisi_qm_dev_err_init() - Initialize device error configuration. 3497 * @qm: The qm for which we want to do error initialization. 3498 * 3499 * Initialize QM and device error related configuration. 3500 */ 3501 void hisi_qm_dev_err_init(struct hisi_qm *qm) 3502 { 3503 if (qm->fun_type == QM_HW_VF) 3504 return; 3505 3506 qm_hw_error_init(qm); 3507 3508 if (!qm->err_ini->hw_err_enable) { 3509 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); 3510 return; 3511 } 3512 qm->err_ini->hw_err_enable(qm); 3513 } 3514 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); 3515 3516 /** 3517 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. 3518 * @qm: The qm for which we want to do error uninitialization. 3519 * 3520 * Uninitialize QM and device error related configuration. 3521 */ 3522 void hisi_qm_dev_err_uninit(struct hisi_qm *qm) 3523 { 3524 if (qm->fun_type == QM_HW_VF) 3525 return; 3526 3527 qm_hw_error_uninit(qm); 3528 3529 if (!qm->err_ini->hw_err_disable) { 3530 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); 3531 return; 3532 } 3533 qm->err_ini->hw_err_disable(qm); 3534 } 3535 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); 3536 3537 /** 3538 * hisi_qm_free_qps() - free multiple queue pairs. 3539 * @qps: The queue pairs need to be freed. 3540 * @qp_num: The num of queue pairs. 3541 */ 3542 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) 3543 { 3544 int i; 3545 3546 if (!qps || qp_num <= 0) 3547 return; 3548 3549 for (i = qp_num - 1; i >= 0; i--) 3550 hisi_qm_release_qp(qps[i]); 3551 } 3552 EXPORT_SYMBOL_GPL(hisi_qm_free_qps); 3553 3554 static void free_list(struct list_head *head) 3555 { 3556 struct hisi_qm_resource *res, *tmp; 3557 3558 list_for_each_entry_safe(res, tmp, head, list) { 3559 list_del(&res->list); 3560 kfree(res); 3561 } 3562 } 3563 3564 static int hisi_qm_sort_devices(int node, struct list_head *head, 3565 struct hisi_qm_list *qm_list) 3566 { 3567 struct hisi_qm_resource *res, *tmp; 3568 struct hisi_qm *qm; 3569 struct list_head *n; 3570 struct device *dev; 3571 int dev_node; 3572 3573 list_for_each_entry(qm, &qm_list->list, list) { 3574 dev = &qm->pdev->dev; 3575 3576 dev_node = dev_to_node(dev); 3577 if (dev_node < 0) 3578 dev_node = 0; 3579 3580 res = kzalloc(sizeof(*res), GFP_KERNEL); 3581 if (!res) 3582 return -ENOMEM; 3583 3584 res->qm = qm; 3585 res->distance = node_distance(dev_node, node); 3586 n = head; 3587 list_for_each_entry(tmp, head, list) { 3588 if (res->distance < tmp->distance) { 3589 n = &tmp->list; 3590 break; 3591 } 3592 } 3593 list_add_tail(&res->list, n); 3594 } 3595 3596 return 0; 3597 } 3598 3599 /** 3600 * hisi_qm_alloc_qps_node() - Create multiple queue pairs. 3601 * @qm_list: The list of all available devices. 3602 * @qp_num: The number of queue pairs need created. 3603 * @alg_type: The algorithm type. 3604 * @node: The numa node. 3605 * @qps: The queue pairs need created. 3606 * 3607 * This function will sort all available device according to numa distance. 3608 * Then try to create all queue pairs from one device, if all devices do 3609 * not meet the requirements will return error. 3610 */ 3611 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 3612 u8 alg_type, int node, struct hisi_qp **qps) 3613 { 3614 struct hisi_qm_resource *tmp; 3615 int ret = -ENODEV; 3616 LIST_HEAD(head); 3617 int i; 3618 3619 if (!qps || !qm_list || qp_num <= 0) 3620 return -EINVAL; 3621 3622 mutex_lock(&qm_list->lock); 3623 if (hisi_qm_sort_devices(node, &head, qm_list)) { 3624 mutex_unlock(&qm_list->lock); 3625 goto err; 3626 } 3627 3628 list_for_each_entry(tmp, &head, list) { 3629 for (i = 0; i < qp_num; i++) { 3630 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); 3631 if (IS_ERR(qps[i])) { 3632 hisi_qm_free_qps(qps, i); 3633 break; 3634 } 3635 } 3636 3637 if (i == qp_num) { 3638 ret = 0; 3639 break; 3640 } 3641 } 3642 3643 mutex_unlock(&qm_list->lock); 3644 if (ret) 3645 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", 3646 node, alg_type, qp_num); 3647 3648 err: 3649 free_list(&head); 3650 return ret; 3651 } 3652 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); 3653 3654 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) 3655 { 3656 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; 3657 u32 max_qp_num = qm->max_qp_num; 3658 u32 q_base = qm->qp_num; 3659 int ret; 3660 3661 if (!num_vfs) 3662 return -EINVAL; 3663 3664 vfs_q_num = qm->ctrl_qp_num - qm->qp_num; 3665 3666 /* If vfs_q_num is less than num_vfs, return error. */ 3667 if (vfs_q_num < num_vfs) 3668 return -EINVAL; 3669 3670 q_num = vfs_q_num / num_vfs; 3671 remain_q_num = vfs_q_num % num_vfs; 3672 3673 for (i = num_vfs; i > 0; i--) { 3674 /* 3675 * if q_num + remain_q_num > max_qp_num in last vf, divide the 3676 * remaining queues equally. 3677 */ 3678 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { 3679 act_q_num = q_num + remain_q_num; 3680 remain_q_num = 0; 3681 } else if (remain_q_num > 0) { 3682 act_q_num = q_num + 1; 3683 remain_q_num--; 3684 } else { 3685 act_q_num = q_num; 3686 } 3687 3688 act_q_num = min(act_q_num, max_qp_num); 3689 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); 3690 if (ret) { 3691 for (j = num_vfs; j > i; j--) 3692 hisi_qm_set_vft(qm, j, 0, 0); 3693 return ret; 3694 } 3695 q_base += act_q_num; 3696 } 3697 3698 return 0; 3699 } 3700 3701 static void qm_clear_vft_config(struct hisi_qm *qm) 3702 { 3703 u32 i; 3704 3705 /* 3706 * When disabling SR-IOV, clear the configuration of each VF in the hardware 3707 * sequentially. Failure to clear a single VF should not affect the clearing 3708 * operation of other VFs. 3709 */ 3710 for (i = 1; i <= qm->vfs_num; i++) 3711 (void)hisi_qm_set_vft(qm, i, 0, 0); 3712 3713 qm->vfs_num = 0; 3714 } 3715 3716 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) 3717 { 3718 struct device *dev = &qm->pdev->dev; 3719 struct qm_shaper_factor t_factor; 3720 u32 ir = qos * QM_QOS_RATE; 3721 int ret, total_vfs, i; 3722 3723 total_vfs = pci_sriov_get_totalvfs(qm->pdev); 3724 if (fun_index > total_vfs) 3725 return -EINVAL; 3726 3727 memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor)); 3728 qm->factor[fun_index].func_qos = qos; 3729 3730 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); 3731 if (ret) { 3732 dev_err(dev, "failed to calculate shaper parameter!\n"); 3733 return -EINVAL; 3734 } 3735 3736 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { 3737 /* The base number of queue reuse for different alg type */ 3738 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3739 if (ret) { 3740 dev_err(dev, "type: %d, failed to set shaper vft!\n", i); 3741 goto back_func_qos; 3742 } 3743 } 3744 3745 return 0; 3746 3747 back_func_qos: 3748 memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor)); 3749 for (i--; i >= ALG_TYPE_0; i--) { 3750 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); 3751 if (ret) 3752 dev_err(dev, "failed to restore shaper vft during rollback!\n"); 3753 } 3754 3755 return -EINVAL; 3756 } 3757 3758 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) 3759 { 3760 u64 cir_u = 0, cir_b = 0, cir_s = 0; 3761 u64 shaper_vft, ir_calc, ir; 3762 unsigned int val; 3763 u32 error_rate; 3764 int ret; 3765 3766 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3767 val & BIT(0), POLL_PERIOD, 3768 POLL_TIMEOUT); 3769 if (ret) 3770 return 0; 3771 3772 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); 3773 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); 3774 writel(fun_index, qm->io_base + QM_VFT_CFG); 3775 3776 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); 3777 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); 3778 3779 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, 3780 val & BIT(0), POLL_PERIOD, 3781 POLL_TIMEOUT); 3782 if (ret) 3783 return 0; 3784 3785 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | 3786 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); 3787 3788 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; 3789 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; 3790 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; 3791 3792 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; 3793 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; 3794 3795 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); 3796 3797 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; 3798 3799 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; 3800 if (error_rate > QM_QOS_MIN_ERROR_RATE) { 3801 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); 3802 return 0; 3803 } 3804 3805 return ir; 3806 } 3807 3808 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) 3809 { 3810 struct device *dev = &qm->pdev->dev; 3811 u32 qos; 3812 int ret; 3813 3814 qos = qm_get_shaper_vft_qos(qm, fun_num); 3815 if (!qos) { 3816 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); 3817 return; 3818 } 3819 3820 ret = qm_ping_single_vf(qm, QM_PF_SET_QOS, qos, fun_num); 3821 if (ret) 3822 dev_err(dev, "failed to send command(0x%x) to VF(%u)!\n", QM_PF_SET_QOS, fun_num); 3823 } 3824 3825 static int qm_vf_read_qos(struct hisi_qm *qm) 3826 { 3827 int cnt = 0; 3828 int ret = -EINVAL; 3829 3830 /* reset mailbox qos val */ 3831 qm->mb_qos = 0; 3832 3833 /* vf ping pf to get function qos */ 3834 ret = qm_ping_pf(qm, QM_VF_GET_QOS); 3835 if (ret) { 3836 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); 3837 return ret; 3838 } 3839 3840 while (true) { 3841 msleep(QM_WAIT_DST_ACK); 3842 if (qm->mb_qos) 3843 break; 3844 3845 if (++cnt > QM_MAX_VF_WAIT_COUNT) { 3846 pci_err(qm->pdev, "PF ping VF timeout!\n"); 3847 return -ETIMEDOUT; 3848 } 3849 } 3850 3851 return ret; 3852 } 3853 3854 static ssize_t qm_algqos_read(struct file *filp, char __user *buf, 3855 size_t count, loff_t *pos) 3856 { 3857 struct hisi_qm *qm = filp->private_data; 3858 char tbuf[QM_DBG_READ_LEN]; 3859 u32 qos_val, ir; 3860 int ret; 3861 3862 ret = hisi_qm_get_dfx_access(qm); 3863 if (ret) 3864 return ret; 3865 3866 /* Mailbox and reset cannot be operated at the same time */ 3867 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3868 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); 3869 ret = -EAGAIN; 3870 goto err_put_dfx_access; 3871 } 3872 3873 if (qm->fun_type == QM_HW_PF) { 3874 ir = qm_get_shaper_vft_qos(qm, 0); 3875 } else { 3876 ret = qm_vf_read_qos(qm); 3877 if (ret) 3878 goto err_get_status; 3879 ir = qm->mb_qos; 3880 } 3881 3882 qos_val = ir / QM_QOS_RATE; 3883 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); 3884 3885 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); 3886 3887 err_get_status: 3888 clear_bit(QM_RESETTING, &qm->misc_ctl); 3889 err_put_dfx_access: 3890 hisi_qm_put_dfx_access(qm); 3891 return ret; 3892 } 3893 3894 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, 3895 unsigned long *val, 3896 unsigned int *fun_index) 3897 { 3898 const struct bus_type *bus_type = qm->pdev->dev.bus; 3899 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; 3900 char val_buf[QM_DBG_READ_LEN] = {0}; 3901 struct pci_dev *pdev; 3902 struct device *dev; 3903 int ret; 3904 3905 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); 3906 if (ret != QM_QOS_PARAM_NUM) 3907 return -EINVAL; 3908 3909 ret = kstrtoul(val_buf, 10, val); 3910 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { 3911 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); 3912 return -EINVAL; 3913 } 3914 3915 dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); 3916 if (!dev) { 3917 pci_err(qm->pdev, "input pci bdf number is error!\n"); 3918 return -ENODEV; 3919 } 3920 3921 pdev = container_of(dev, struct pci_dev, dev); 3922 if (pci_physfn(pdev) != qm->pdev) { 3923 pci_err(qm->pdev, "the pdev input does not match the pf!\n"); 3924 put_device(dev); 3925 return -EINVAL; 3926 } 3927 3928 *fun_index = pdev->devfn; 3929 put_device(dev); 3930 3931 return 0; 3932 } 3933 3934 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, 3935 size_t count, loff_t *pos) 3936 { 3937 struct hisi_qm *qm = filp->private_data; 3938 char tbuf[QM_DBG_READ_LEN]; 3939 unsigned int fun_index; 3940 unsigned long val; 3941 int len, ret; 3942 3943 if (*pos != 0) 3944 return 0; 3945 3946 if (count >= QM_DBG_READ_LEN) 3947 return -ENOSPC; 3948 3949 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); 3950 if (len < 0) 3951 return len; 3952 3953 tbuf[len] = '\0'; 3954 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); 3955 if (ret) 3956 return ret; 3957 3958 /* Mailbox and reset cannot be operated at the same time */ 3959 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { 3960 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); 3961 return -EAGAIN; 3962 } 3963 3964 ret = qm_pm_get_sync(qm); 3965 if (ret) { 3966 ret = -EINVAL; 3967 goto err_get_status; 3968 } 3969 3970 ret = qm_func_shaper_enable(qm, fun_index, val); 3971 if (ret) { 3972 pci_err(qm->pdev, "failed to enable function shaper!\n"); 3973 ret = -EINVAL; 3974 goto err_put_sync; 3975 } 3976 3977 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", 3978 fun_index, val); 3979 ret = count; 3980 3981 err_put_sync: 3982 qm_pm_put_sync(qm); 3983 err_get_status: 3984 clear_bit(QM_RESETTING, &qm->misc_ctl); 3985 return ret; 3986 } 3987 3988 static const struct file_operations qm_algqos_fops = { 3989 .owner = THIS_MODULE, 3990 .open = simple_open, 3991 .read = qm_algqos_read, 3992 .write = qm_algqos_write, 3993 }; 3994 3995 /** 3996 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. 3997 * @qm: The qm for which we want to add debugfs files. 3998 * 3999 * Create function qos debugfs files, VF ping PF to get function qos. 4000 */ 4001 void hisi_qm_set_algqos_init(struct hisi_qm *qm) 4002 { 4003 if (qm->fun_type == QM_HW_PF) 4004 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, 4005 qm, &qm_algqos_fops); 4006 else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) 4007 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, 4008 qm, &qm_algqos_fops); 4009 } 4010 4011 static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) 4012 { 4013 int i; 4014 4015 for (i = 1; i <= total_func; i++) 4016 qm->factor[i].func_qos = QM_QOS_MAX_VAL; 4017 } 4018 4019 /** 4020 * hisi_qm_sriov_enable() - enable virtual functions 4021 * @pdev: the PCIe device 4022 * @max_vfs: the number of virtual functions to enable 4023 * 4024 * Returns the number of enabled VFs. If there are VFs enabled already or 4025 * max_vfs is more than the total number of device can be enabled, returns 4026 * failure. 4027 */ 4028 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) 4029 { 4030 struct hisi_qm *qm = pci_get_drvdata(pdev); 4031 int pre_existing_vfs, num_vfs, total_vfs, ret; 4032 4033 ret = qm_pm_get_sync(qm); 4034 if (ret) 4035 return ret; 4036 4037 total_vfs = pci_sriov_get_totalvfs(pdev); 4038 pre_existing_vfs = pci_num_vf(pdev); 4039 if (pre_existing_vfs) { 4040 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", 4041 pre_existing_vfs); 4042 goto err_put_sync; 4043 } 4044 4045 if (max_vfs > total_vfs) { 4046 pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); 4047 ret = -ERANGE; 4048 goto err_put_sync; 4049 } 4050 4051 num_vfs = max_vfs; 4052 4053 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 4054 hisi_qm_init_vf_qos(qm, num_vfs); 4055 4056 ret = qm_vf_q_assign(qm, num_vfs); 4057 if (ret) { 4058 pci_err(pdev, "Can't assign queues for VF!\n"); 4059 goto err_put_sync; 4060 } 4061 4062 qm->vfs_num = num_vfs; 4063 ret = pci_enable_sriov(pdev, num_vfs); 4064 if (ret) { 4065 pci_err(pdev, "Can't enable VF!\n"); 4066 qm_clear_vft_config(qm); 4067 goto err_put_sync; 4068 } 4069 4070 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); 4071 4072 return num_vfs; 4073 4074 err_put_sync: 4075 qm_pm_put_sync(qm); 4076 return ret; 4077 } 4078 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); 4079 4080 /** 4081 * hisi_qm_sriov_disable - disable virtual functions 4082 * @pdev: the PCI device. 4083 * @is_frozen: true when all the VFs are frozen. 4084 * 4085 * Return failure if there are VFs assigned already or VF is in used. 4086 */ 4087 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) 4088 { 4089 struct hisi_qm *qm = pci_get_drvdata(pdev); 4090 4091 if (pci_vfs_assigned(pdev)) { 4092 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); 4093 return -EPERM; 4094 } 4095 4096 /* While VF is in used, SRIOV cannot be disabled. */ 4097 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { 4098 pci_err(pdev, "Task is using its VF!\n"); 4099 return -EBUSY; 4100 } 4101 4102 pci_disable_sriov(pdev); 4103 qm_clear_vft_config(qm); 4104 qm_pm_put_sync(qm); 4105 4106 return 0; 4107 } 4108 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); 4109 4110 /** 4111 * hisi_qm_sriov_configure - configure the number of VFs 4112 * @pdev: The PCI device 4113 * @num_vfs: The number of VFs need enabled 4114 * 4115 * Enable SR-IOV according to num_vfs, 0 means disable. 4116 */ 4117 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) 4118 { 4119 if (num_vfs == 0) 4120 return hisi_qm_sriov_disable(pdev, false); 4121 else 4122 return hisi_qm_sriov_enable(pdev, num_vfs); 4123 } 4124 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); 4125 4126 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) 4127 { 4128 if (!qm->err_ini->get_err_result) { 4129 dev_err(&qm->pdev->dev, "Device doesn't support reset!\n"); 4130 return ACC_ERR_NONE; 4131 } 4132 4133 return qm->err_ini->get_err_result(qm); 4134 } 4135 4136 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) 4137 { 4138 enum acc_err_result qm_ret, dev_ret; 4139 4140 /* log qm error */ 4141 qm_ret = qm_hw_error_handle(qm); 4142 4143 /* log device error */ 4144 dev_ret = qm_dev_err_handle(qm); 4145 4146 return (qm_ret == ACC_ERR_NEED_RESET || 4147 dev_ret == ACC_ERR_NEED_RESET) ? 4148 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; 4149 } 4150 4151 /** 4152 * hisi_qm_dev_err_detected() - Get device and qm error status then log it. 4153 * @pdev: The PCI device which need report error. 4154 * @state: The connectivity between CPU and device. 4155 * 4156 * We register this function into PCIe AER handlers, It will report device or 4157 * qm hardware error status when error occur. 4158 */ 4159 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 4160 pci_channel_state_t state) 4161 { 4162 struct hisi_qm *qm = pci_get_drvdata(pdev); 4163 enum acc_err_result ret; 4164 4165 if (pdev->is_virtfn) 4166 return PCI_ERS_RESULT_NONE; 4167 4168 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); 4169 if (state == pci_channel_io_perm_failure) 4170 return PCI_ERS_RESULT_DISCONNECT; 4171 4172 ret = qm_process_dev_error(qm); 4173 if (ret == ACC_ERR_NEED_RESET) 4174 return PCI_ERS_RESULT_NEED_RESET; 4175 4176 return PCI_ERS_RESULT_RECOVERED; 4177 } 4178 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); 4179 4180 static int qm_check_req_recv(struct hisi_qm *qm) 4181 { 4182 struct pci_dev *pdev = qm->pdev; 4183 int ret; 4184 u32 val; 4185 4186 if (qm->ver >= QM_HW_V3) 4187 return 0; 4188 4189 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); 4190 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 4191 (val == ACC_VENDOR_ID_VALUE), 4192 POLL_PERIOD, POLL_TIMEOUT); 4193 if (ret) { 4194 dev_err(&pdev->dev, "Fails to read QM reg!\n"); 4195 return ret; 4196 } 4197 4198 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); 4199 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, 4200 (val == PCI_VENDOR_ID_HUAWEI), 4201 POLL_PERIOD, POLL_TIMEOUT); 4202 if (ret) 4203 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); 4204 4205 return ret; 4206 } 4207 4208 static int qm_set_pf_mse(struct hisi_qm *qm, bool set) 4209 { 4210 struct pci_dev *pdev = qm->pdev; 4211 u16 cmd; 4212 int i; 4213 4214 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4215 if (set) 4216 cmd |= PCI_COMMAND_MEMORY; 4217 else 4218 cmd &= ~PCI_COMMAND_MEMORY; 4219 4220 pci_write_config_word(pdev, PCI_COMMAND, cmd); 4221 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4222 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 4223 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) 4224 return 0; 4225 4226 udelay(1); 4227 } 4228 4229 return -ETIMEDOUT; 4230 } 4231 4232 static int qm_set_vf_mse(struct hisi_qm *qm, bool set) 4233 { 4234 struct pci_dev *pdev = qm->pdev; 4235 u16 sriov_ctrl; 4236 int pos; 4237 int i; 4238 4239 /* 4240 * Since function qm_set_vf_mse is called only after SRIOV is enabled, 4241 * pci_find_ext_capability cannot return 0, pos does not need to be 4242 * checked. 4243 */ 4244 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4245 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4246 if (set) 4247 sriov_ctrl |= PCI_SRIOV_CTRL_MSE; 4248 else 4249 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; 4250 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); 4251 4252 for (i = 0; i < MAX_WAIT_COUNTS; i++) { 4253 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); 4254 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> 4255 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) 4256 return 0; 4257 4258 udelay(1); 4259 } 4260 4261 return -ETIMEDOUT; 4262 } 4263 4264 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) 4265 { 4266 u32 nfe_enb = 0; 4267 4268 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ 4269 if (qm->ver >= QM_HW_V3) 4270 return; 4271 4272 if (!qm->err_status.is_dev_ecc_mbit && 4273 qm->err_status.is_qm_ecc_mbit && 4274 qm->err_ini->close_axi_master_ooo) { 4275 qm->err_ini->close_axi_master_ooo(qm); 4276 } else if (qm->err_status.is_dev_ecc_mbit && 4277 !qm->err_status.is_qm_ecc_mbit && 4278 !qm->err_ini->close_axi_master_ooo) { 4279 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); 4280 writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask, 4281 qm->io_base + QM_RAS_NFE_ENABLE); 4282 writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET); 4283 } 4284 } 4285 4286 static int qm_vf_reset_prepare(struct hisi_qm *qm, 4287 enum qm_stop_reason stop_reason) 4288 { 4289 struct hisi_qm_list *qm_list = qm->qm_list; 4290 struct pci_dev *pdev = qm->pdev; 4291 struct pci_dev *virtfn; 4292 struct hisi_qm *vf_qm; 4293 int ret = 0; 4294 4295 mutex_lock(&qm_list->lock); 4296 list_for_each_entry(vf_qm, &qm_list->list, list) { 4297 virtfn = vf_qm->pdev; 4298 if (virtfn == pdev) 4299 continue; 4300 4301 if (pci_physfn(virtfn) == pdev) { 4302 /* save VFs PCIE BAR configuration */ 4303 pci_save_state(virtfn); 4304 4305 ret = hisi_qm_stop(vf_qm, stop_reason); 4306 if (ret) 4307 goto stop_fail; 4308 } 4309 } 4310 4311 stop_fail: 4312 mutex_unlock(&qm_list->lock); 4313 return ret; 4314 } 4315 4316 static int qm_try_stop_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd, 4317 enum qm_stop_reason stop_reason) 4318 { 4319 struct pci_dev *pdev = qm->pdev; 4320 int ret; 4321 4322 if (!qm->vfs_num) 4323 return 0; 4324 4325 /* Kunpeng930 supports to notify VFs to stop before PF reset */ 4326 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4327 ret = qm_ping_all_vfs(qm, cmd); 4328 if (ret) 4329 pci_err(pdev, "failed to send command to all VFs before PF reset!\n"); 4330 } else { 4331 ret = qm_vf_reset_prepare(qm, stop_reason); 4332 if (ret) 4333 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); 4334 } 4335 4336 return ret; 4337 } 4338 4339 static int qm_controller_reset_prepare(struct hisi_qm *qm) 4340 { 4341 struct pci_dev *pdev = qm->pdev; 4342 int ret; 4343 4344 if (qm->err_ini->set_priv_status) { 4345 ret = qm->err_ini->set_priv_status(qm); 4346 if (ret) 4347 return ret; 4348 } 4349 4350 ret = qm_reset_prepare_ready(qm); 4351 if (ret) { 4352 pci_err(pdev, "Controller reset not ready!\n"); 4353 return ret; 4354 } 4355 4356 qm_dev_ecc_mbit_handle(qm); 4357 4358 /* PF obtains the information of VF by querying the register. */ 4359 qm_cmd_uninit(qm); 4360 4361 /* Whether VFs stop successfully, soft reset will continue. */ 4362 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); 4363 if (ret) 4364 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); 4365 4366 ret = hisi_qm_stop(qm, QM_SOFT_RESET); 4367 if (ret) { 4368 pci_err(pdev, "Fails to stop QM!\n"); 4369 qm_reset_bit_clear(qm); 4370 return ret; 4371 } 4372 4373 if (qm->use_sva) { 4374 ret = qm_hw_err_isolate(qm); 4375 if (ret) 4376 pci_err(pdev, "failed to isolate hw err!\n"); 4377 } 4378 4379 ret = qm_wait_vf_prepare_finish(qm); 4380 if (ret) 4381 pci_err(pdev, "failed to stop by vfs in soft reset!\n"); 4382 4383 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4384 4385 return 0; 4386 } 4387 4388 static int qm_master_ooo_check(struct hisi_qm *qm) 4389 { 4390 u32 val; 4391 int ret; 4392 4393 /* Check the ooo register of the device before resetting the device. */ 4394 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); 4395 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, 4396 val, (val == ACC_MASTER_TRANS_RETURN_RW), 4397 POLL_PERIOD, POLL_TIMEOUT); 4398 if (ret) 4399 pci_warn(qm->pdev, "Bus lock! Please reset system.\n"); 4400 4401 return ret; 4402 } 4403 4404 static int qm_soft_reset_prepare(struct hisi_qm *qm) 4405 { 4406 struct pci_dev *pdev = qm->pdev; 4407 int ret; 4408 4409 /* Ensure all doorbells and mailboxes received by QM */ 4410 ret = qm_check_req_recv(qm); 4411 if (ret) 4412 return ret; 4413 4414 if (qm->vfs_num) { 4415 ret = qm_set_vf_mse(qm, false); 4416 if (ret) { 4417 pci_err(pdev, "Fails to disable vf MSE bit.\n"); 4418 return ret; 4419 } 4420 } 4421 4422 ret = qm->ops->set_msi(qm, false); 4423 if (ret) { 4424 pci_err(pdev, "Fails to disable PEH MSI bit.\n"); 4425 return ret; 4426 } 4427 4428 ret = qm_master_ooo_check(qm); 4429 if (ret) 4430 return ret; 4431 4432 if (qm->err_ini->close_sva_prefetch) 4433 qm->err_ini->close_sva_prefetch(qm); 4434 4435 ret = qm_set_pf_mse(qm, false); 4436 if (ret) 4437 pci_err(pdev, "Fails to disable pf MSE bit.\n"); 4438 4439 return ret; 4440 } 4441 4442 static int qm_reset_device(struct hisi_qm *qm) 4443 { 4444 struct pci_dev *pdev = qm->pdev; 4445 4446 /* The reset related sub-control registers are not in PCI BAR */ 4447 if (ACPI_HANDLE(&pdev->dev)) { 4448 unsigned long long value = 0; 4449 acpi_status s; 4450 4451 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), 4452 qm->err_info.acpi_rst, 4453 NULL, &value); 4454 if (ACPI_FAILURE(s)) { 4455 pci_err(pdev, "NO controller reset method!\n"); 4456 return -EIO; 4457 } 4458 4459 if (value) { 4460 pci_err(pdev, "Reset step %llu failed!\n", value); 4461 return -EIO; 4462 } 4463 4464 return 0; 4465 } 4466 4467 pci_err(pdev, "No reset method!\n"); 4468 return -EINVAL; 4469 } 4470 4471 static int qm_soft_reset(struct hisi_qm *qm) 4472 { 4473 int ret; 4474 4475 ret = qm_soft_reset_prepare(qm); 4476 if (ret) 4477 return ret; 4478 4479 return qm_reset_device(qm); 4480 } 4481 4482 static int qm_vf_reset_done(struct hisi_qm *qm) 4483 { 4484 struct hisi_qm_list *qm_list = qm->qm_list; 4485 struct pci_dev *pdev = qm->pdev; 4486 struct pci_dev *virtfn; 4487 struct hisi_qm *vf_qm; 4488 int ret = 0; 4489 4490 mutex_lock(&qm_list->lock); 4491 list_for_each_entry(vf_qm, &qm_list->list, list) { 4492 virtfn = vf_qm->pdev; 4493 if (virtfn == pdev) 4494 continue; 4495 4496 if (pci_physfn(virtfn) == pdev) { 4497 /* enable VFs PCIE BAR configuration */ 4498 pci_restore_state(virtfn); 4499 4500 ret = qm_restart(vf_qm); 4501 if (ret) 4502 goto restart_fail; 4503 } 4504 } 4505 4506 restart_fail: 4507 mutex_unlock(&qm_list->lock); 4508 return ret; 4509 } 4510 4511 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_ifc_cmd cmd) 4512 { 4513 struct pci_dev *pdev = qm->pdev; 4514 int ret; 4515 4516 if (!qm->vfs_num) 4517 return 0; 4518 4519 ret = qm_vf_q_assign(qm, qm->vfs_num); 4520 if (ret) { 4521 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); 4522 return ret; 4523 } 4524 4525 /* Kunpeng930 supports to notify VFs to start after PF reset. */ 4526 if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { 4527 ret = qm_ping_all_vfs(qm, cmd); 4528 if (ret) 4529 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); 4530 } else { 4531 ret = qm_vf_reset_done(qm); 4532 if (ret) 4533 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); 4534 } 4535 4536 return ret; 4537 } 4538 4539 static int qm_dev_hw_init(struct hisi_qm *qm) 4540 { 4541 return qm->err_ini->hw_init(qm); 4542 } 4543 4544 static void qm_restart_prepare(struct hisi_qm *qm) 4545 { 4546 u32 value; 4547 4548 if (qm->ver >= QM_HW_V3) 4549 return; 4550 4551 if (!qm->err_status.is_qm_ecc_mbit && 4552 !qm->err_status.is_dev_ecc_mbit) 4553 return; 4554 4555 /* temporarily close the OOO port used for PEH to write out MSI */ 4556 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4557 writel(value & ~qm->err_info.msi_wr_port, 4558 qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4559 4560 /* clear dev ecc 2bit error source if having */ 4561 value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask; 4562 if (value && qm->err_ini->clear_dev_hw_err_status) 4563 qm->err_ini->clear_dev_hw_err_status(qm, value); 4564 4565 /* clear QM ecc mbit error source */ 4566 writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4567 4568 /* clear AM Reorder Buffer ecc mbit source */ 4569 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); 4570 } 4571 4572 static void qm_restart_done(struct hisi_qm *qm) 4573 { 4574 u32 value; 4575 4576 if (qm->ver >= QM_HW_V3) 4577 goto clear_flags; 4578 4579 if (!qm->err_status.is_qm_ecc_mbit && 4580 !qm->err_status.is_dev_ecc_mbit) 4581 return; 4582 4583 /* open the OOO port for PEH to write out MSI */ 4584 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4585 value |= qm->err_info.msi_wr_port; 4586 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); 4587 4588 clear_flags: 4589 qm->err_status.is_qm_ecc_mbit = false; 4590 qm->err_status.is_dev_ecc_mbit = false; 4591 } 4592 4593 static void qm_disable_axi_error(struct hisi_qm *qm) 4594 { 4595 struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err; 4596 u32 val; 4597 4598 val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR)); 4599 writel(val, qm->io_base + QM_ABNORMAL_INT_MASK); 4600 if (qm->ver > QM_HW_V2) 4601 writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR), 4602 qm->io_base + QM_OOO_SHUTDOWN_SEL); 4603 4604 if (qm->err_ini->disable_axi_error) 4605 qm->err_ini->disable_axi_error(qm); 4606 } 4607 4608 static void qm_enable_axi_error(struct hisi_qm *qm) 4609 { 4610 /* clear axi error source */ 4611 writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE); 4612 4613 writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK); 4614 if (qm->ver > QM_HW_V2) 4615 writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); 4616 4617 if (qm->err_ini->enable_axi_error) 4618 qm->err_ini->enable_axi_error(qm); 4619 } 4620 4621 static int qm_controller_reset_done(struct hisi_qm *qm) 4622 { 4623 struct pci_dev *pdev = qm->pdev; 4624 int ret; 4625 4626 ret = qm->ops->set_msi(qm, true); 4627 if (ret) { 4628 pci_err(pdev, "Fails to enable PEH MSI bit!\n"); 4629 return ret; 4630 } 4631 4632 ret = qm_set_pf_mse(qm, true); 4633 if (ret) { 4634 pci_err(pdev, "Fails to enable pf MSE bit!\n"); 4635 return ret; 4636 } 4637 4638 if (qm->vfs_num) { 4639 ret = qm_set_vf_mse(qm, true); 4640 if (ret) { 4641 pci_err(pdev, "Fails to enable vf MSE bit!\n"); 4642 return ret; 4643 } 4644 } 4645 4646 ret = qm_dev_hw_init(qm); 4647 if (ret) { 4648 pci_err(pdev, "Failed to init device\n"); 4649 return ret; 4650 } 4651 4652 qm_restart_prepare(qm); 4653 hisi_qm_dev_err_init(qm); 4654 qm_disable_axi_error(qm); 4655 if (qm->err_ini->open_axi_master_ooo) 4656 qm->err_ini->open_axi_master_ooo(qm); 4657 4658 ret = qm_dev_mem_reset(qm); 4659 if (ret) { 4660 pci_err(pdev, "failed to reset device memory\n"); 4661 return ret; 4662 } 4663 4664 ret = qm_restart(qm); 4665 if (ret) { 4666 pci_err(pdev, "Failed to start QM!\n"); 4667 return ret; 4668 } 4669 4670 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4671 if (ret) 4672 pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); 4673 4674 ret = qm_wait_vf_prepare_finish(qm); 4675 if (ret) 4676 pci_err(pdev, "failed to start by vfs in soft reset!\n"); 4677 qm_enable_axi_error(qm); 4678 qm_cmd_init(qm); 4679 qm_restart_done(qm); 4680 4681 qm_reset_bit_clear(qm); 4682 4683 return 0; 4684 } 4685 4686 static int qm_controller_reset(struct hisi_qm *qm) 4687 { 4688 struct pci_dev *pdev = qm->pdev; 4689 int ret; 4690 4691 pci_info(pdev, "Controller resetting...\n"); 4692 4693 ret = qm_controller_reset_prepare(qm); 4694 if (ret) { 4695 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4696 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4697 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4698 return ret; 4699 } 4700 4701 hisi_qm_show_last_dfx_regs(qm); 4702 if (qm->err_ini->show_last_dfx_regs) 4703 qm->err_ini->show_last_dfx_regs(qm); 4704 4705 ret = qm_soft_reset(qm); 4706 if (ret) 4707 goto err_reset; 4708 4709 ret = qm_controller_reset_done(qm); 4710 if (ret) 4711 goto err_reset; 4712 4713 pci_info(pdev, "Controller reset complete\n"); 4714 4715 return 0; 4716 4717 err_reset: 4718 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4719 qm_reset_bit_clear(qm); 4720 4721 /* if resetting fails, isolate the device */ 4722 if (qm->use_sva) 4723 qm->isolate_data.is_isolate = true; 4724 return ret; 4725 } 4726 4727 /** 4728 * hisi_qm_dev_slot_reset() - slot reset 4729 * @pdev: the PCIe device 4730 * 4731 * This function offers QM relate PCIe device reset interface. Drivers which 4732 * use QM can use this function as slot_reset in its struct pci_error_handlers. 4733 */ 4734 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) 4735 { 4736 struct hisi_qm *qm = pci_get_drvdata(pdev); 4737 int ret; 4738 4739 if (pdev->is_virtfn) 4740 return PCI_ERS_RESULT_RECOVERED; 4741 4742 /* reset pcie device controller */ 4743 ret = qm_controller_reset(qm); 4744 if (ret) { 4745 pci_err(pdev, "Controller reset failed (%d)\n", ret); 4746 return PCI_ERS_RESULT_DISCONNECT; 4747 } 4748 4749 return PCI_ERS_RESULT_RECOVERED; 4750 } 4751 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); 4752 4753 void hisi_qm_reset_prepare(struct pci_dev *pdev) 4754 { 4755 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4756 struct hisi_qm *qm = pci_get_drvdata(pdev); 4757 u32 delay = 0; 4758 int ret; 4759 4760 hisi_qm_dev_err_uninit(pf_qm); 4761 4762 /* 4763 * Check whether there is an ECC mbit error, If it occurs, need to 4764 * wait for soft reset to fix it. 4765 */ 4766 while (qm_check_dev_error(qm)) { 4767 msleep(++delay); 4768 if (delay > QM_RESET_WAIT_TIMEOUT) 4769 return; 4770 } 4771 4772 ret = qm_reset_prepare_ready(qm); 4773 if (ret) { 4774 pci_err(pdev, "FLR not ready!\n"); 4775 return; 4776 } 4777 4778 /* PF obtains the information of VF by querying the register. */ 4779 if (qm->fun_type == QM_HW_PF) 4780 qm_cmd_uninit(qm); 4781 4782 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); 4783 if (ret) 4784 pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); 4785 4786 ret = hisi_qm_stop(qm, QM_DOWN); 4787 if (ret) { 4788 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); 4789 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4790 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4791 return; 4792 } 4793 4794 ret = qm_wait_vf_prepare_finish(qm); 4795 if (ret) 4796 pci_err(pdev, "failed to stop by vfs in FLR!\n"); 4797 4798 pci_info(pdev, "FLR resetting...\n"); 4799 } 4800 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); 4801 4802 static bool qm_flr_reset_complete(struct pci_dev *pdev) 4803 { 4804 struct pci_dev *pf_pdev = pci_physfn(pdev); 4805 struct hisi_qm *qm = pci_get_drvdata(pf_pdev); 4806 u32 id; 4807 4808 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); 4809 if (id == QM_PCI_COMMAND_INVALID) { 4810 pci_err(pdev, "Device can not be used!\n"); 4811 return false; 4812 } 4813 4814 return true; 4815 } 4816 4817 void hisi_qm_reset_done(struct pci_dev *pdev) 4818 { 4819 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); 4820 struct hisi_qm *qm = pci_get_drvdata(pdev); 4821 int ret; 4822 4823 if (qm->fun_type == QM_HW_PF) { 4824 ret = qm_dev_hw_init(qm); 4825 if (ret) { 4826 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); 4827 goto flr_done; 4828 } 4829 } 4830 4831 hisi_qm_dev_err_init(pf_qm); 4832 4833 ret = qm_restart(qm); 4834 if (ret) { 4835 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); 4836 goto flr_done; 4837 } 4838 4839 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); 4840 if (ret) 4841 pci_err(pdev, "failed to start vfs by pf in FLR.\n"); 4842 4843 ret = qm_wait_vf_prepare_finish(qm); 4844 if (ret) 4845 pci_err(pdev, "failed to start by vfs in FLR!\n"); 4846 4847 flr_done: 4848 if (qm->fun_type == QM_HW_PF) 4849 qm_cmd_init(qm); 4850 4851 if (qm_flr_reset_complete(pdev)) 4852 pci_info(pdev, "FLR reset complete\n"); 4853 4854 qm_reset_bit_clear(qm); 4855 } 4856 EXPORT_SYMBOL_GPL(hisi_qm_reset_done); 4857 4858 static irqreturn_t qm_rsvd_irq(int irq, void *data) 4859 { 4860 struct hisi_qm *qm = data; 4861 4862 dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n"); 4863 4864 return IRQ_HANDLED; 4865 } 4866 4867 static irqreturn_t qm_abnormal_irq(int irq, void *data) 4868 { 4869 struct hisi_qm *qm = data; 4870 enum acc_err_result ret; 4871 4872 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); 4873 ret = qm_process_dev_error(qm); 4874 if (ret == ACC_ERR_NEED_RESET && 4875 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && 4876 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) 4877 schedule_work(&qm->rst_work); 4878 4879 return IRQ_HANDLED; 4880 } 4881 4882 /** 4883 * hisi_qm_dev_shutdown() - Shutdown device. 4884 * @pdev: The device will be shutdown. 4885 * 4886 * This function will stop qm when OS shutdown or rebooting. 4887 */ 4888 void hisi_qm_dev_shutdown(struct pci_dev *pdev) 4889 { 4890 struct hisi_qm *qm = pci_get_drvdata(pdev); 4891 int ret; 4892 4893 ret = hisi_qm_stop(qm, QM_DOWN); 4894 if (ret) 4895 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); 4896 } 4897 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); 4898 4899 static void hisi_qm_controller_reset(struct work_struct *rst_work) 4900 { 4901 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); 4902 int ret; 4903 4904 ret = qm_pm_get_sync(qm); 4905 if (ret) { 4906 clear_bit(QM_RST_SCHED, &qm->misc_ctl); 4907 return; 4908 } 4909 4910 /* reset pcie device controller */ 4911 ret = qm_controller_reset(qm); 4912 if (ret) 4913 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); 4914 4915 qm_pm_put_sync(qm); 4916 } 4917 4918 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, 4919 enum qm_stop_reason stop_reason) 4920 { 4921 enum qm_ifc_cmd cmd = QM_VF_PREPARE_DONE; 4922 struct pci_dev *pdev = qm->pdev; 4923 int ret; 4924 4925 ret = qm_reset_prepare_ready(qm); 4926 if (ret) { 4927 dev_err(&pdev->dev, "reset prepare not ready!\n"); 4928 atomic_set(&qm->status.flags, QM_STOP); 4929 cmd = QM_VF_PREPARE_FAIL; 4930 goto err_prepare; 4931 } 4932 4933 ret = hisi_qm_stop(qm, stop_reason); 4934 if (ret) { 4935 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); 4936 atomic_set(&qm->status.flags, QM_STOP); 4937 cmd = QM_VF_PREPARE_FAIL; 4938 goto err_prepare; 4939 } else { 4940 goto out; 4941 } 4942 4943 err_prepare: 4944 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); 4945 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); 4946 out: 4947 pci_save_state(pdev); 4948 ret = qm_ping_pf(qm, cmd); 4949 if (ret) 4950 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); 4951 } 4952 4953 static void qm_pf_reset_vf_done(struct hisi_qm *qm) 4954 { 4955 enum qm_ifc_cmd cmd = QM_VF_START_DONE; 4956 struct pci_dev *pdev = qm->pdev; 4957 int ret; 4958 4959 pci_restore_state(pdev); 4960 ret = hisi_qm_start(qm); 4961 if (ret) { 4962 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); 4963 cmd = QM_VF_START_FAIL; 4964 } 4965 4966 qm_cmd_init(qm); 4967 ret = qm_ping_pf(qm, cmd); 4968 if (ret) 4969 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); 4970 4971 qm_reset_bit_clear(qm); 4972 } 4973 4974 static int qm_wait_pf_reset_finish(struct hisi_qm *qm) 4975 { 4976 struct device *dev = &qm->pdev->dev; 4977 u32 val, cmd; 4978 int ret; 4979 4980 /* Wait for reset to finish */ 4981 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, 4982 val == BIT(0), QM_VF_RESET_WAIT_US, 4983 QM_VF_RESET_WAIT_TIMEOUT_US); 4984 /* hardware completion status should be available by this time */ 4985 if (ret) { 4986 dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); 4987 return -ETIMEDOUT; 4988 } 4989 4990 /* 4991 * Whether message is got successfully, 4992 * VF needs to ack PF by clearing the interrupt. 4993 */ 4994 ret = qm->ops->get_ifc(qm, &cmd, NULL, 0); 4995 qm_clear_cmd_interrupt(qm, 0); 4996 if (ret) { 4997 dev_err(dev, "failed to get command from PF in reset done!\n"); 4998 return ret; 4999 } 5000 5001 if (cmd != QM_PF_RESET_DONE) { 5002 dev_err(dev, "the command(0x%x) is not reset done!\n", cmd); 5003 ret = -EINVAL; 5004 } 5005 5006 return ret; 5007 } 5008 5009 static void qm_pf_reset_vf_process(struct hisi_qm *qm, 5010 enum qm_stop_reason stop_reason) 5011 { 5012 struct device *dev = &qm->pdev->dev; 5013 int ret; 5014 5015 dev_info(dev, "device reset start...\n"); 5016 5017 /* The message is obtained by querying the register during resetting */ 5018 qm_cmd_uninit(qm); 5019 qm_pf_reset_vf_prepare(qm, stop_reason); 5020 5021 ret = qm_wait_pf_reset_finish(qm); 5022 if (ret) 5023 goto err_get_status; 5024 5025 qm_pf_reset_vf_done(qm); 5026 5027 dev_info(dev, "device reset done.\n"); 5028 5029 return; 5030 5031 err_get_status: 5032 qm_cmd_init(qm); 5033 qm_reset_bit_clear(qm); 5034 } 5035 5036 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) 5037 { 5038 struct device *dev = &qm->pdev->dev; 5039 enum qm_ifc_cmd cmd; 5040 u32 data; 5041 int ret; 5042 5043 /* 5044 * Get the msg from source by sending mailbox. Whether message is got 5045 * successfully, destination needs to ack source by clearing the interrupt. 5046 */ 5047 ret = qm->ops->get_ifc(qm, &cmd, &data, fun_num); 5048 qm_clear_cmd_interrupt(qm, BIT(fun_num)); 5049 if (ret) { 5050 dev_err(dev, "failed to get command from source!\n"); 5051 return; 5052 } 5053 5054 switch (cmd) { 5055 case QM_PF_FLR_PREPARE: 5056 qm_pf_reset_vf_process(qm, QM_DOWN); 5057 break; 5058 case QM_PF_SRST_PREPARE: 5059 qm_pf_reset_vf_process(qm, QM_SOFT_RESET); 5060 break; 5061 case QM_VF_GET_QOS: 5062 qm_vf_get_qos(qm, fun_num); 5063 break; 5064 case QM_PF_SET_QOS: 5065 qm->mb_qos = data; 5066 break; 5067 default: 5068 dev_err(dev, "unsupported command(0x%x) sent by function(%u)!\n", cmd, fun_num); 5069 break; 5070 } 5071 } 5072 5073 static void qm_cmd_process(struct work_struct *cmd_process) 5074 { 5075 struct hisi_qm *qm = container_of(cmd_process, 5076 struct hisi_qm, cmd_process); 5077 u32 vfs_num = qm->vfs_num; 5078 u64 val; 5079 u32 i; 5080 5081 if (qm->fun_type == QM_HW_PF) { 5082 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); 5083 if (!val) 5084 return; 5085 5086 for (i = 1; i <= vfs_num; i++) { 5087 if (val & BIT(i)) 5088 qm_handle_cmd_msg(qm, i); 5089 } 5090 5091 return; 5092 } 5093 5094 qm_handle_cmd_msg(qm, 0); 5095 } 5096 5097 /** 5098 * hisi_qm_alg_register() - Register alg to crypto. 5099 * @qm: The qm needs add. 5100 * @qm_list: The qm list. 5101 * @guard: Guard of qp_num. 5102 * 5103 * Register algorithm to crypto when the function is satisfy guard. 5104 */ 5105 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 5106 { 5107 struct device *dev = &qm->pdev->dev; 5108 5109 if (qm->ver <= QM_HW_V2 && qm->use_sva) { 5110 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); 5111 return 0; 5112 } 5113 5114 if (qm->qp_num < guard) { 5115 dev_info(dev, "qp_num is less than task need.\n"); 5116 return 0; 5117 } 5118 5119 return qm_list->register_to_crypto(qm); 5120 } 5121 EXPORT_SYMBOL_GPL(hisi_qm_alg_register); 5122 5123 /** 5124 * hisi_qm_alg_unregister() - Unregister alg from crypto. 5125 * @qm: The qm needs delete. 5126 * @qm_list: The qm list. 5127 * @guard: Guard of qp_num. 5128 * 5129 * Unregister algorithm from crypto when the last function is satisfy guard. 5130 */ 5131 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) 5132 { 5133 if (qm->ver <= QM_HW_V2 && qm->use_sva) 5134 return; 5135 5136 if (qm->qp_num < guard) 5137 return; 5138 5139 qm_list->unregister_from_crypto(qm); 5140 } 5141 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); 5142 5143 static void qm_unregister_abnormal_irq(struct hisi_qm *qm) 5144 { 5145 struct pci_dev *pdev = qm->pdev; 5146 u32 irq_vector, val; 5147 5148 if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3) 5149 return; 5150 5151 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; 5152 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 5153 return; 5154 5155 irq_vector = val & QM_IRQ_VECTOR_MASK; 5156 free_irq(pci_irq_vector(pdev, irq_vector), qm); 5157 } 5158 5159 static int qm_register_abnormal_irq(struct hisi_qm *qm) 5160 { 5161 struct pci_dev *pdev = qm->pdev; 5162 u32 irq_vector, val; 5163 int ret; 5164 5165 val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val; 5166 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) 5167 return 0; 5168 irq_vector = val & QM_IRQ_VECTOR_MASK; 5169 5170 /* For VF, this is a reserved interrupt in V3 version. */ 5171 if (qm->fun_type == QM_HW_VF) { 5172 if (qm->ver < QM_HW_V3) 5173 return 0; 5174 5175 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq, 5176 IRQF_NO_AUTOEN, qm->dev_name, qm); 5177 if (ret) { 5178 dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret); 5179 return ret; 5180 } 5181 return 0; 5182 } 5183 5184 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); 5185 if (ret) 5186 dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret); 5187 5188 return ret; 5189 } 5190 5191 static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) 5192 { 5193 struct pci_dev *pdev = qm->pdev; 5194 u32 irq_vector, val; 5195 5196 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; 5197 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5198 return; 5199 5200 irq_vector = val & QM_IRQ_VECTOR_MASK; 5201 free_irq(pci_irq_vector(pdev, irq_vector), qm); 5202 } 5203 5204 static int qm_register_mb_cmd_irq(struct hisi_qm *qm) 5205 { 5206 struct pci_dev *pdev = qm->pdev; 5207 u32 irq_vector, val; 5208 int ret; 5209 5210 val = qm->cap_tables.qm_cap_table[QM_MB_IRQ].cap_val; 5211 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5212 return 0; 5213 5214 irq_vector = val & QM_IRQ_VECTOR_MASK; 5215 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); 5216 if (ret) 5217 dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); 5218 5219 return ret; 5220 } 5221 5222 static void qm_unregister_aeq_irq(struct hisi_qm *qm) 5223 { 5224 struct pci_dev *pdev = qm->pdev; 5225 u32 irq_vector, val; 5226 5227 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; 5228 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5229 return; 5230 5231 irq_vector = val & QM_IRQ_VECTOR_MASK; 5232 free_irq(pci_irq_vector(pdev, irq_vector), qm); 5233 } 5234 5235 static int qm_register_aeq_irq(struct hisi_qm *qm) 5236 { 5237 struct pci_dev *pdev = qm->pdev; 5238 u32 irq_vector, val; 5239 int ret; 5240 5241 val = qm->cap_tables.qm_cap_table[QM_AEQ_IRQ].cap_val; 5242 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5243 return 0; 5244 5245 irq_vector = val & QM_IRQ_VECTOR_MASK; 5246 ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), NULL, 5247 qm_aeq_thread, IRQF_ONESHOT, qm->dev_name, qm); 5248 if (ret) 5249 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 5250 5251 return ret; 5252 } 5253 5254 static void qm_unregister_eq_irq(struct hisi_qm *qm) 5255 { 5256 struct pci_dev *pdev = qm->pdev; 5257 u32 irq_vector, val; 5258 5259 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; 5260 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5261 return; 5262 5263 irq_vector = val & QM_IRQ_VECTOR_MASK; 5264 free_irq(pci_irq_vector(pdev, irq_vector), qm); 5265 } 5266 5267 static int qm_register_eq_irq(struct hisi_qm *qm) 5268 { 5269 struct pci_dev *pdev = qm->pdev; 5270 u32 irq_vector, val; 5271 int ret; 5272 5273 val = qm->cap_tables.qm_cap_table[QM_EQ_IRQ].cap_val; 5274 if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) 5275 return 0; 5276 5277 irq_vector = val & QM_IRQ_VECTOR_MASK; 5278 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); 5279 if (ret) 5280 dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); 5281 5282 return ret; 5283 } 5284 5285 static void qm_irqs_unregister(struct hisi_qm *qm) 5286 { 5287 qm_unregister_mb_cmd_irq(qm); 5288 qm_unregister_abnormal_irq(qm); 5289 qm_unregister_aeq_irq(qm); 5290 qm_unregister_eq_irq(qm); 5291 } 5292 5293 static int qm_irqs_register(struct hisi_qm *qm) 5294 { 5295 int ret; 5296 5297 ret = qm_register_eq_irq(qm); 5298 if (ret) 5299 return ret; 5300 5301 ret = qm_register_aeq_irq(qm); 5302 if (ret) 5303 goto free_eq_irq; 5304 5305 ret = qm_register_abnormal_irq(qm); 5306 if (ret) 5307 goto free_aeq_irq; 5308 5309 ret = qm_register_mb_cmd_irq(qm); 5310 if (ret) 5311 goto free_abnormal_irq; 5312 5313 return 0; 5314 5315 free_abnormal_irq: 5316 qm_unregister_abnormal_irq(qm); 5317 free_aeq_irq: 5318 qm_unregister_aeq_irq(qm); 5319 free_eq_irq: 5320 qm_unregister_eq_irq(qm); 5321 return ret; 5322 } 5323 5324 static int qm_get_qp_num(struct hisi_qm *qm) 5325 { 5326 struct device *dev = &qm->pdev->dev; 5327 bool is_db_isolation; 5328 5329 /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ 5330 if (qm->fun_type == QM_HW_VF) { 5331 if (qm->ver != QM_HW_V1) 5332 /* v2 starts to support get vft by mailbox */ 5333 return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); 5334 5335 return 0; 5336 } 5337 5338 is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5339 qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); 5340 qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, 5341 QM_FUNC_MAX_QP_CAP, is_db_isolation); 5342 5343 if (qm->qp_num <= qm->max_qp_num) 5344 return 0; 5345 5346 if (test_bit(QM_MODULE_PARAM, &qm->misc_ctl)) { 5347 /* Check whether the set qp number is valid */ 5348 dev_err(dev, "qp num(%u) is more than max qp num(%u)!\n", 5349 qm->qp_num, qm->max_qp_num); 5350 return -EINVAL; 5351 } 5352 5353 dev_info(dev, "Default qp num(%u) is too big, reset it to Function's max qp num(%u)!\n", 5354 qm->qp_num, qm->max_qp_num); 5355 qm->qp_num = qm->max_qp_num; 5356 qm->debug.curr_qm_qp_num = qm->qp_num; 5357 5358 return 0; 5359 } 5360 5361 static int qm_pre_store_caps(struct hisi_qm *qm) 5362 { 5363 struct hisi_qm_cap_record *qm_cap; 5364 struct pci_dev *pdev = qm->pdev; 5365 size_t i, size; 5366 5367 size = ARRAY_SIZE(qm_cap_query_info); 5368 qm_cap = devm_kcalloc(&pdev->dev, sizeof(*qm_cap), size, GFP_KERNEL); 5369 if (!qm_cap) 5370 return -ENOMEM; 5371 5372 for (i = 0; i < size; i++) { 5373 qm_cap[i].type = qm_cap_query_info[i].type; 5374 qm_cap[i].name = qm_cap_query_info[i].name; 5375 qm_cap[i].cap_val = hisi_qm_get_cap_value(qm, qm_cap_query_info, 5376 i, qm->cap_ver); 5377 } 5378 5379 qm->cap_tables.qm_cap_table = qm_cap; 5380 qm->cap_tables.qm_cap_size = size; 5381 5382 return 0; 5383 } 5384 5385 static int qm_get_hw_caps(struct hisi_qm *qm) 5386 { 5387 const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? 5388 qm_cap_info_pf : qm_cap_info_vf; 5389 u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : 5390 ARRAY_SIZE(qm_cap_info_vf); 5391 u32 val, i; 5392 5393 /* Doorbell isolate register is a independent register. */ 5394 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); 5395 if (val) 5396 set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); 5397 5398 if (qm->ver >= QM_HW_V3) { 5399 val = readl(qm->io_base + QM_FUNC_CAPS_REG); 5400 qm->cap_ver = val & QM_CAPBILITY_VERSION; 5401 } 5402 5403 /* Get PF/VF common capbility */ 5404 for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { 5405 val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); 5406 if (val) 5407 set_bit(qm_cap_info_comm[i].type, &qm->caps); 5408 } 5409 5410 /* Get PF/VF different capbility */ 5411 for (i = 0; i < size; i++) { 5412 val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); 5413 if (val) 5414 set_bit(cap_info[i].type, &qm->caps); 5415 } 5416 5417 /* Fetch and save the value of qm capability registers */ 5418 return qm_pre_store_caps(qm); 5419 } 5420 5421 static void qm_get_version(struct hisi_qm *qm) 5422 { 5423 struct pci_dev *pdev = qm->pdev; 5424 u32 sub_version_id; 5425 5426 qm->ver = pdev->revision; 5427 5428 if (pdev->revision == QM_HW_V3) { 5429 sub_version_id = readl(qm->io_base + QM_SUB_VERSION_ID); 5430 if (sub_version_id) 5431 qm->ver = sub_version_id; 5432 } 5433 } 5434 5435 static int qm_get_pci_res(struct hisi_qm *qm) 5436 { 5437 struct pci_dev *pdev = qm->pdev; 5438 struct device *dev = &pdev->dev; 5439 int ret; 5440 5441 ret = pci_request_mem_regions(pdev, qm->dev_name); 5442 if (ret < 0) { 5443 dev_err(dev, "Failed to request mem regions!\n"); 5444 return ret; 5445 } 5446 5447 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); 5448 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); 5449 if (!qm->io_base) { 5450 ret = -EIO; 5451 goto err_request_mem_regions; 5452 } 5453 5454 qm_get_version(qm); 5455 5456 ret = qm_get_hw_caps(qm); 5457 if (ret) 5458 goto err_ioremap; 5459 5460 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { 5461 qm->db_interval = QM_QP_DB_INTERVAL; 5462 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); 5463 qm->db_io_base = ioremap(qm->db_phys_base, 5464 pci_resource_len(pdev, PCI_BAR_4)); 5465 if (!qm->db_io_base) { 5466 ret = -EIO; 5467 goto err_ioremap; 5468 } 5469 } else { 5470 qm->db_phys_base = qm->phys_base; 5471 qm->db_io_base = qm->io_base; 5472 qm->db_interval = 0; 5473 } 5474 5475 hisi_qm_pre_init(qm); 5476 ret = qm_get_qp_num(qm); 5477 if (ret) 5478 goto err_db_ioremap; 5479 5480 return 0; 5481 5482 err_db_ioremap: 5483 if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) 5484 iounmap(qm->db_io_base); 5485 err_ioremap: 5486 iounmap(qm->io_base); 5487 err_request_mem_regions: 5488 pci_release_mem_regions(pdev); 5489 return ret; 5490 } 5491 5492 static int qm_clear_device(struct hisi_qm *qm) 5493 { 5494 acpi_handle handle = ACPI_HANDLE(&qm->pdev->dev); 5495 int ret; 5496 5497 if (qm->fun_type == QM_HW_VF) 5498 return 0; 5499 5500 /* Device does not support reset, return */ 5501 if (!qm->err_ini->err_info_init) 5502 return 0; 5503 qm->err_ini->err_info_init(qm); 5504 5505 if (!handle) 5506 return 0; 5507 5508 /* No reset method, return */ 5509 if (!acpi_has_method(handle, qm->err_info.acpi_rst)) 5510 return 0; 5511 5512 ret = qm_master_ooo_check(qm); 5513 if (ret) { 5514 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5515 return ret; 5516 } 5517 5518 if (qm->err_ini->set_priv_status) { 5519 ret = qm->err_ini->set_priv_status(qm); 5520 if (ret) { 5521 writel(0x0, qm->io_base + ACC_MASTER_GLOBAL_CTRL); 5522 return ret; 5523 } 5524 } 5525 5526 return qm_reset_device(qm); 5527 } 5528 5529 static int hisi_qm_pci_init(struct hisi_qm *qm) 5530 { 5531 struct pci_dev *pdev = qm->pdev; 5532 struct device *dev = &pdev->dev; 5533 unsigned int num_vec; 5534 int ret; 5535 5536 ret = pci_enable_device_mem(pdev); 5537 if (ret < 0) { 5538 dev_err(dev, "Failed to enable device mem!\n"); 5539 return ret; 5540 } 5541 5542 ret = qm_get_pci_res(qm); 5543 if (ret) 5544 goto err_disable_pcidev; 5545 5546 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 5547 if (ret < 0) 5548 goto err_get_pci_res; 5549 pci_set_master(pdev); 5550 5551 num_vec = qm_get_irq_num(qm); 5552 if (!num_vec) { 5553 dev_err(dev, "Device irq num is zero!\n"); 5554 ret = -EINVAL; 5555 goto err_get_pci_res; 5556 } 5557 num_vec = roundup_pow_of_two(num_vec); 5558 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); 5559 if (ret < 0) { 5560 dev_err(dev, "Failed to enable MSI vectors!\n"); 5561 goto err_get_pci_res; 5562 } 5563 5564 ret = qm_clear_device(qm); 5565 if (ret) 5566 goto err_free_vectors; 5567 5568 return 0; 5569 5570 err_free_vectors: 5571 pci_free_irq_vectors(pdev); 5572 err_get_pci_res: 5573 qm_put_pci_res(qm); 5574 err_disable_pcidev: 5575 pci_disable_device(pdev); 5576 return ret; 5577 } 5578 5579 static int hisi_qm_init_work(struct hisi_qm *qm) 5580 { 5581 int i; 5582 5583 for (i = 0; i < qm->qp_num; i++) 5584 INIT_WORK(&qm->poll_data[i].work, qm_work_process); 5585 5586 if (qm->fun_type == QM_HW_PF) 5587 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); 5588 5589 if (qm->ver > QM_HW_V2) 5590 INIT_WORK(&qm->cmd_process, qm_cmd_process); 5591 5592 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 5593 WQ_UNBOUND, num_online_cpus(), 5594 pci_name(qm->pdev)); 5595 if (!qm->wq) { 5596 pci_err(qm->pdev, "failed to alloc workqueue!\n"); 5597 return -ENOMEM; 5598 } 5599 5600 return 0; 5601 } 5602 5603 static int hisi_qp_alloc_memory(struct hisi_qm *qm) 5604 { 5605 struct device *dev = &qm->pdev->dev; 5606 u16 sq_depth, cq_depth; 5607 size_t qp_dma_size; 5608 int i, ret; 5609 5610 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); 5611 if (!qm->qp_array) 5612 return -ENOMEM; 5613 5614 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); 5615 if (!qm->poll_data) { 5616 kfree(qm->qp_array); 5617 return -ENOMEM; 5618 } 5619 5620 qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); 5621 5622 /* one more page for device or qp statuses */ 5623 qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; 5624 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; 5625 for (i = 0; i < qm->qp_num; i++) { 5626 qm->poll_data[i].qm = qm; 5627 ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); 5628 if (ret) 5629 goto err_init_qp_mem; 5630 5631 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); 5632 } 5633 5634 return 0; 5635 err_init_qp_mem: 5636 hisi_qp_memory_uninit(qm, i); 5637 5638 return ret; 5639 } 5640 5641 static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) 5642 { 5643 struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; 5644 struct qm_dma *xqc_dma = &xqc_buf->qcdma; 5645 struct device *dev = &qm->pdev->dev; 5646 size_t off = 0; 5647 5648 #define QM_XQC_BUF_INIT(xqc_buf, type) do { \ 5649 (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ 5650 (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ 5651 off += QMC_ALIGN(sizeof(struct qm_##type)); \ 5652 } while (0) 5653 5654 xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + 5655 QMC_ALIGN(sizeof(struct qm_aeqc)) + 5656 QMC_ALIGN(sizeof(struct qm_sqc)) + 5657 QMC_ALIGN(sizeof(struct qm_cqc)); 5658 xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, 5659 &xqc_dma->dma, GFP_KERNEL); 5660 if (!xqc_dma->va) 5661 return -ENOMEM; 5662 5663 QM_XQC_BUF_INIT(xqc_buf, eqc); 5664 QM_XQC_BUF_INIT(xqc_buf, aeqc); 5665 QM_XQC_BUF_INIT(xqc_buf, sqc); 5666 QM_XQC_BUF_INIT(xqc_buf, cqc); 5667 5668 return 0; 5669 } 5670 5671 static int hisi_qm_memory_init(struct hisi_qm *qm) 5672 { 5673 struct device *dev = &qm->pdev->dev; 5674 int ret, total_func; 5675 size_t off = 0; 5676 5677 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { 5678 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; 5679 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); 5680 if (!qm->factor) 5681 return -ENOMEM; 5682 5683 /* Only the PF value needs to be initialized */ 5684 qm->factor[0].func_qos = QM_QOS_MAX_VAL; 5685 } 5686 5687 #define QM_INIT_BUF(qm, type, num) do { \ 5688 (qm)->type = ((qm)->qdma.va + (off)); \ 5689 (qm)->type##_dma = (qm)->qdma.dma + (off); \ 5690 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ 5691 } while (0) 5692 5693 idr_init(&qm->qp_idr); 5694 qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); 5695 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + 5696 QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + 5697 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + 5698 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); 5699 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, 5700 GFP_ATOMIC); 5701 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); 5702 if (!qm->qdma.va) { 5703 ret = -ENOMEM; 5704 goto err_destroy_idr; 5705 } 5706 5707 QM_INIT_BUF(qm, eqe, qm->eq_depth); 5708 QM_INIT_BUF(qm, aeqe, qm->aeq_depth); 5709 QM_INIT_BUF(qm, sqc, qm->qp_num); 5710 QM_INIT_BUF(qm, cqc, qm->qp_num); 5711 5712 ret = hisi_qm_alloc_rsv_buf(qm); 5713 if (ret) 5714 goto err_free_qdma; 5715 5716 ret = hisi_qp_alloc_memory(qm); 5717 if (ret) 5718 goto err_free_reserve_buf; 5719 5720 return 0; 5721 5722 err_free_reserve_buf: 5723 hisi_qm_free_rsv_buf(qm); 5724 err_free_qdma: 5725 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); 5726 err_destroy_idr: 5727 idr_destroy(&qm->qp_idr); 5728 if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) 5729 kfree(qm->factor); 5730 5731 return ret; 5732 } 5733 5734 /** 5735 * hisi_qm_init() - Initialize configures about qm. 5736 * @qm: The qm needing init. 5737 * 5738 * This function init qm, then we can call hisi_qm_start to put qm into work. 5739 */ 5740 int hisi_qm_init(struct hisi_qm *qm) 5741 { 5742 struct pci_dev *pdev = qm->pdev; 5743 struct device *dev = &pdev->dev; 5744 int ret; 5745 5746 ret = hisi_qm_pci_init(qm); 5747 if (ret) 5748 return ret; 5749 5750 ret = qm_irqs_register(qm); 5751 if (ret) 5752 goto err_pci_init; 5753 5754 if (qm->fun_type == QM_HW_PF) { 5755 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5756 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5757 qm_disable_clock_gate(qm); 5758 ret = qm_dev_mem_reset(qm); 5759 if (ret) { 5760 dev_err(dev, "failed to reset device memory\n"); 5761 goto err_irq_register; 5762 } 5763 } 5764 5765 if (qm->mode == UACCE_MODE_SVA) { 5766 ret = qm_alloc_uacce(qm); 5767 if (ret < 0) 5768 dev_warn(dev, "fail to alloc uacce (%d)\n", ret); 5769 } 5770 5771 ret = hisi_qm_memory_init(qm); 5772 if (ret) 5773 goto err_alloc_uacce; 5774 5775 ret = hisi_qm_init_work(qm); 5776 if (ret) 5777 goto err_free_qm_memory; 5778 5779 qm_cmd_init(qm); 5780 hisi_mig_region_enable(qm); 5781 5782 return 0; 5783 5784 err_free_qm_memory: 5785 hisi_qm_memory_uninit(qm); 5786 err_alloc_uacce: 5787 qm_remove_uacce(qm); 5788 err_irq_register: 5789 qm_irqs_unregister(qm); 5790 err_pci_init: 5791 hisi_qm_pci_uninit(qm); 5792 return ret; 5793 } 5794 EXPORT_SYMBOL_GPL(hisi_qm_init); 5795 5796 /** 5797 * hisi_qm_get_dfx_access() - Try to get dfx access. 5798 * @qm: pointer to accelerator device. 5799 * 5800 * Try to get dfx access, then user can get message. 5801 * 5802 * If device is in suspended, return failure, otherwise 5803 * bump up the runtime PM usage counter. 5804 */ 5805 int hisi_qm_get_dfx_access(struct hisi_qm *qm) 5806 { 5807 struct device *dev = &qm->pdev->dev; 5808 5809 if (pm_runtime_suspended(dev)) { 5810 dev_info(dev, "can not read/write - device in suspended.\n"); 5811 return -EAGAIN; 5812 } 5813 5814 return qm_pm_get_sync(qm); 5815 } 5816 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); 5817 5818 /** 5819 * hisi_qm_put_dfx_access() - Put dfx access. 5820 * @qm: pointer to accelerator device. 5821 * 5822 * Put dfx access, drop runtime PM usage counter. 5823 */ 5824 void hisi_qm_put_dfx_access(struct hisi_qm *qm) 5825 { 5826 qm_pm_put_sync(qm); 5827 } 5828 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); 5829 5830 /** 5831 * hisi_qm_pm_init() - Initialize qm runtime PM. 5832 * @qm: pointer to accelerator device. 5833 * 5834 * Function that initialize qm runtime PM. 5835 */ 5836 void hisi_qm_pm_init(struct hisi_qm *qm) 5837 { 5838 struct device *dev = &qm->pdev->dev; 5839 5840 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5841 return; 5842 5843 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); 5844 pm_runtime_use_autosuspend(dev); 5845 pm_runtime_put_noidle(dev); 5846 } 5847 EXPORT_SYMBOL_GPL(hisi_qm_pm_init); 5848 5849 /** 5850 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. 5851 * @qm: pointer to accelerator device. 5852 * 5853 * Function that uninitialize qm runtime PM. 5854 */ 5855 void hisi_qm_pm_uninit(struct hisi_qm *qm) 5856 { 5857 struct device *dev = &qm->pdev->dev; 5858 5859 if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) 5860 return; 5861 5862 pm_runtime_get_noresume(dev); 5863 pm_runtime_dont_use_autosuspend(dev); 5864 } 5865 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); 5866 5867 static int qm_prepare_for_suspend(struct hisi_qm *qm) 5868 { 5869 struct pci_dev *pdev = qm->pdev; 5870 int ret; 5871 5872 ret = qm->ops->set_msi(qm, false); 5873 if (ret) { 5874 pci_err(pdev, "failed to disable MSI before suspending!\n"); 5875 return ret; 5876 } 5877 5878 ret = qm_master_ooo_check(qm); 5879 if (ret) 5880 return ret; 5881 5882 if (qm->err_ini->set_priv_status) { 5883 ret = qm->err_ini->set_priv_status(qm); 5884 if (ret) 5885 return ret; 5886 } 5887 5888 ret = qm_set_pf_mse(qm, false); 5889 if (ret) 5890 pci_err(pdev, "failed to disable MSE before suspending!\n"); 5891 5892 return ret; 5893 } 5894 5895 static int qm_rebuild_for_resume(struct hisi_qm *qm) 5896 { 5897 struct pci_dev *pdev = qm->pdev; 5898 int ret; 5899 5900 ret = qm_set_pf_mse(qm, true); 5901 if (ret) { 5902 pci_err(pdev, "failed to enable MSE after resuming!\n"); 5903 return ret; 5904 } 5905 5906 ret = qm->ops->set_msi(qm, true); 5907 if (ret) { 5908 pci_err(pdev, "failed to enable MSI after resuming!\n"); 5909 return ret; 5910 } 5911 5912 ret = qm_dev_hw_init(qm); 5913 if (ret) { 5914 pci_err(pdev, "failed to init device after resuming\n"); 5915 return ret; 5916 } 5917 5918 qm_cmd_init(qm); 5919 hisi_mig_region_enable(qm); 5920 hisi_qm_dev_err_init(qm); 5921 /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ 5922 writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); 5923 qm_disable_clock_gate(qm); 5924 ret = qm_dev_mem_reset(qm); 5925 if (ret) 5926 pci_err(pdev, "failed to reset device memory\n"); 5927 5928 return ret; 5929 } 5930 5931 /** 5932 * hisi_qm_suspend() - Runtime suspend of given device. 5933 * @dev: device to suspend. 5934 * 5935 * Function that suspend the device. 5936 */ 5937 int hisi_qm_suspend(struct device *dev) 5938 { 5939 struct pci_dev *pdev = to_pci_dev(dev); 5940 struct hisi_qm *qm = pci_get_drvdata(pdev); 5941 int ret; 5942 5943 pci_info(pdev, "entering suspended state\n"); 5944 5945 ret = hisi_qm_stop(qm, QM_NORMAL); 5946 if (ret) { 5947 pci_err(pdev, "failed to stop qm(%d)\n", ret); 5948 return ret; 5949 } 5950 5951 ret = qm_prepare_for_suspend(qm); 5952 if (ret) 5953 pci_err(pdev, "failed to prepare suspended(%d)\n", ret); 5954 5955 return ret; 5956 } 5957 EXPORT_SYMBOL_GPL(hisi_qm_suspend); 5958 5959 /** 5960 * hisi_qm_resume() - Runtime resume of given device. 5961 * @dev: device to resume. 5962 * 5963 * Function that resume the device. 5964 */ 5965 int hisi_qm_resume(struct device *dev) 5966 { 5967 struct pci_dev *pdev = to_pci_dev(dev); 5968 struct hisi_qm *qm = pci_get_drvdata(pdev); 5969 int ret; 5970 5971 pci_info(pdev, "resuming from suspend state\n"); 5972 5973 ret = qm_rebuild_for_resume(qm); 5974 if (ret) { 5975 pci_err(pdev, "failed to rebuild resume(%d)\n", ret); 5976 return ret; 5977 } 5978 5979 ret = hisi_qm_start(qm); 5980 if (ret) { 5981 if (qm_check_dev_error(qm)) { 5982 pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); 5983 return 0; 5984 } 5985 5986 pci_err(pdev, "failed to start qm(%d)!\n", ret); 5987 } 5988 5989 return ret; 5990 } 5991 EXPORT_SYMBOL_GPL(hisi_qm_resume); 5992 5993 MODULE_LICENSE("GPL v2"); 5994 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); 5995 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver"); 5996