1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #ifndef HISI_ACC_QM_H 4 #define HISI_ACC_QM_H 5 6 #include <linux/bitfield.h> 7 #include <linux/debugfs.h> 8 #include <linux/iopoll.h> 9 #include <linux/module.h> 10 #include <linux/pci.h> 11 12 #define QM_QNUM_V1 4096 13 #define QM_QNUM_V2 1024 14 #define QM_MAX_VFS_NUM_V2 63 15 16 /* qm user domain */ 17 #define QM_ARUSER_M_CFG_1 0x100088 18 #define AXUSER_SNOOP_ENABLE BIT(30) 19 #define AXUSER_CMD_TYPE GENMASK(14, 12) 20 #define AXUSER_CMD_SMMU_NORMAL 1 21 #define AXUSER_NS BIT(6) 22 #define AXUSER_NO BIT(5) 23 #define AXUSER_FP BIT(4) 24 #define AXUSER_SSV BIT(0) 25 #define AXUSER_BASE (AXUSER_SNOOP_ENABLE | \ 26 FIELD_PREP(AXUSER_CMD_TYPE, \ 27 AXUSER_CMD_SMMU_NORMAL) | \ 28 AXUSER_NS | AXUSER_NO | AXUSER_FP) 29 #define QM_ARUSER_M_CFG_ENABLE 0x100090 30 #define ARUSER_M_CFG_ENABLE 0xfffffffe 31 #define QM_AWUSER_M_CFG_1 0x100098 32 #define QM_AWUSER_M_CFG_ENABLE 0x1000a0 33 #define AWUSER_M_CFG_ENABLE 0xfffffffe 34 #define QM_WUSER_M_CFG_ENABLE 0x1000a8 35 #define WUSER_M_CFG_ENABLE 0xffffffff 36 37 /* mailbox */ 38 #define QM_MB_CMD_SQC 0x0 39 #define QM_MB_CMD_CQC 0x1 40 #define QM_MB_CMD_EQC 0x2 41 #define QM_MB_CMD_AEQC 0x3 42 #define QM_MB_CMD_SQC_BT 0x4 43 #define QM_MB_CMD_CQC_BT 0x5 44 #define QM_MB_CMD_SQC_VFT_V2 0x6 45 #define QM_MB_CMD_STOP_QP 0x8 46 #define QM_MB_CMD_FLUSH_QM 0x9 47 #define QM_MB_CMD_SRC 0xc 48 #define QM_MB_CMD_DST 0xd 49 50 #define QM_MB_CMD_SEND_BASE 0x300 51 #define QM_MB_EVENT_SHIFT 8 52 #define QM_MB_BUSY_SHIFT 13 53 #define QM_MB_OP_SHIFT 14 54 #define QM_MB_CMD_DATA_ADDR_L 0x304 55 #define QM_MB_CMD_DATA_ADDR_H 0x308 56 #define QM_MB_MAX_WAIT_CNT 6000 57 58 /* doorbell */ 59 #define QM_DOORBELL_CMD_SQ 0 60 #define QM_DOORBELL_CMD_CQ 1 61 #define QM_DOORBELL_CMD_EQ 2 62 #define QM_DOORBELL_CMD_AEQ 3 63 64 #define QM_DOORBELL_SQ_CQ_BASE_V2 0x1000 65 #define QM_DOORBELL_EQ_AEQ_BASE_V2 0x2000 66 #define QM_QP_MAX_NUM_SHIFT 11 67 #define QM_DB_CMD_SHIFT_V2 12 68 #define QM_DB_RAND_SHIFT_V2 16 69 #define QM_DB_INDEX_SHIFT_V2 32 70 #define QM_DB_PRIORITY_SHIFT_V2 48 71 #define QM_VF_STATE 0x60 72 73 /* qm cache */ 74 #define QM_CACHE_CTL 0x100050 75 #define SQC_CACHE_ENABLE BIT(0) 76 #define CQC_CACHE_ENABLE BIT(1) 77 #define SQC_CACHE_WB_ENABLE BIT(4) 78 #define SQC_CACHE_WB_THRD GENMASK(10, 5) 79 #define CQC_CACHE_WB_ENABLE BIT(11) 80 #define CQC_CACHE_WB_THRD GENMASK(17, 12) 81 #define QM_AXI_M_CFG 0x1000ac 82 #define AXI_M_CFG 0xffff 83 #define QM_AXI_M_CFG_ENABLE 0x1000b0 84 #define AM_CFG_SINGLE_PORT_MAX_TRANS 0x300014 85 #define AXI_M_CFG_ENABLE 0xffffffff 86 #define QM_PEH_AXUSER_CFG 0x1000cc 87 #define QM_PEH_AXUSER_CFG_ENABLE 0x1000d0 88 #define PEH_AXUSER_CFG 0x401001 89 #define PEH_AXUSER_CFG_ENABLE 0xffffffff 90 91 #define QM_MIN_QNUM 2 92 #define HISI_ACC_SGL_SGE_NR_MAX 255 93 #define QM_SHAPER_CFG 0x100164 94 #define QM_SHAPER_ENABLE BIT(30) 95 #define QM_SHAPER_TYPE1_OFFSET 10 96 97 /* page number for queue file region */ 98 #define QM_DOORBELL_PAGE_NR 1 99 100 #define QM_DEV_ALG_MAX_LEN 256 101 102 #define QM_MIG_REGION_SEL 0x100198 103 #define QM_MIG_REGION_EN BIT(0) 104 105 /* uacce mode of the driver */ 106 #define UACCE_MODE_NOUACCE 0 /* don't use uacce */ 107 #define UACCE_MODE_SVA 1 /* use uacce sva mode */ 108 #define UACCE_MODE_DESC "0(default) means only register to crypto, 1 means both register to crypto and uacce" 109 110 #define QM_ECC_MBIT BIT(2) 111 112 enum qm_stop_reason { 113 QM_NORMAL, 114 QM_SOFT_RESET, 115 QM_DOWN, 116 }; 117 118 enum qm_state { 119 QM_WORK = 0, 120 QM_STOP, 121 }; 122 123 enum qp_state { 124 QP_START = 1, 125 QP_STOP, 126 }; 127 128 enum qm_hw_ver { 129 QM_HW_V1 = 0x20, 130 QM_HW_V2 = 0x21, 131 QM_HW_V3 = 0x30, 132 QM_HW_V4 = 0x50, 133 QM_HW_V5 = 0x51, 134 }; 135 136 enum qm_fun_type { 137 QM_HW_PF, 138 QM_HW_VF, 139 }; 140 141 enum qm_debug_file { 142 CURRENT_QM, 143 CURRENT_Q, 144 CLEAR_ENABLE, 145 DEBUG_FILE_NUM, 146 }; 147 148 enum qm_vf_state { 149 QM_READY = 0, 150 QM_NOT_READY, 151 }; 152 153 enum qm_misc_ctl_bits { 154 QM_DRIVER_REMOVING = 0x0, 155 QM_RST_SCHED, 156 QM_RESETTING, 157 QM_MODULE_PARAM, 158 }; 159 160 enum qm_cap_bits { 161 QM_SUPPORT_DB_ISOLATION = 0x0, 162 QM_SUPPORT_FUNC_QOS, 163 QM_SUPPORT_STOP_QP, 164 QM_SUPPORT_STOP_FUNC, 165 QM_SUPPORT_MB_COMMAND, 166 QM_SUPPORT_SVA_PREFETCH, 167 QM_SUPPORT_RPM, 168 QM_SUPPORT_DAE, 169 }; 170 171 struct qm_dev_alg { 172 u64 alg_msk; 173 const char *alg; 174 }; 175 176 struct qm_dev_dfx { 177 u32 dev_state; 178 u32 dev_timeout; 179 }; 180 181 struct dfx_diff_registers { 182 u32 *regs; 183 u32 reg_offset; 184 u32 reg_len; 185 }; 186 187 struct qm_dfx { 188 atomic64_t err_irq_cnt; 189 atomic64_t aeq_irq_cnt; 190 atomic64_t abnormal_irq_cnt; 191 atomic64_t create_qp_err_cnt; 192 atomic64_t mb_err_cnt; 193 }; 194 195 struct debugfs_file { 196 enum qm_debug_file index; 197 struct mutex lock; 198 struct qm_debug *debug; 199 }; 200 201 struct qm_debug { 202 u32 curr_qm_qp_num; 203 u32 sqe_mask_offset; 204 u32 sqe_mask_len; 205 struct qm_dfx dfx; 206 struct dentry *debug_root; 207 struct dentry *qm_d; 208 struct debugfs_file files[DEBUG_FILE_NUM]; 209 struct qm_dev_dfx dev_dfx; 210 unsigned int *qm_last_words; 211 /* ACC engines recoreding last regs */ 212 unsigned int *last_words; 213 struct dfx_diff_registers *qm_diff_regs; 214 struct dfx_diff_registers *acc_diff_regs; 215 }; 216 217 struct qm_shaper_factor { 218 u32 func_qos; 219 u64 cir_b; 220 u64 cir_u; 221 u64 cir_s; 222 u64 cbs_s; 223 }; 224 225 struct qm_dma { 226 void *va; 227 dma_addr_t dma; 228 size_t size; 229 }; 230 231 struct hisi_qm_status { 232 u32 eq_head; 233 bool eqc_phase; 234 u32 aeq_head; 235 bool aeqc_phase; 236 atomic_t flags; 237 int stop_reason; 238 }; 239 240 struct hisi_qm; 241 242 enum acc_err_result { 243 ACC_ERR_NONE, 244 ACC_ERR_NEED_RESET, 245 ACC_ERR_RECOVERED, 246 }; 247 248 struct hisi_qm_err_mask { 249 u32 ecc_2bits_mask; 250 u32 shutdown_mask; 251 u32 reset_mask; 252 u32 ce; 253 u32 nfe; 254 u32 fe; 255 }; 256 257 struct hisi_qm_err_info { 258 char *acpi_rst; 259 u32 msi_wr_port; 260 struct hisi_qm_err_mask qm_err; 261 struct hisi_qm_err_mask dev_err; 262 }; 263 264 struct hisi_qm_err_status { 265 u32 is_qm_ecc_mbit; 266 u32 is_dev_ecc_mbit; 267 }; 268 269 struct hisi_qm_err_ini { 270 int (*hw_init)(struct hisi_qm *qm); 271 void (*hw_err_enable)(struct hisi_qm *qm); 272 void (*hw_err_disable)(struct hisi_qm *qm); 273 u32 (*get_dev_hw_err_status)(struct hisi_qm *qm); 274 void (*clear_dev_hw_err_status)(struct hisi_qm *qm, u32 err_sts); 275 void (*open_axi_master_ooo)(struct hisi_qm *qm); 276 void (*close_axi_master_ooo)(struct hisi_qm *qm); 277 void (*open_sva_prefetch)(struct hisi_qm *qm); 278 void (*close_sva_prefetch)(struct hisi_qm *qm); 279 void (*show_last_dfx_regs)(struct hisi_qm *qm); 280 void (*err_info_init)(struct hisi_qm *qm); 281 enum acc_err_result (*get_err_result)(struct hisi_qm *qm); 282 bool (*dev_is_abnormal)(struct hisi_qm *qm); 283 int (*set_priv_status)(struct hisi_qm *qm); 284 void (*disable_axi_error)(struct hisi_qm *qm); 285 void (*enable_axi_error)(struct hisi_qm *qm); 286 }; 287 288 struct hisi_qm_cap_info { 289 u32 type; 290 /* Register offset */ 291 u32 offset; 292 /* Bit offset in register */ 293 u32 shift; 294 u32 mask; 295 u32 v1_val; 296 u32 v2_val; 297 u32 v3_val; 298 }; 299 300 struct hisi_qm_cap_query_info { 301 u32 type; 302 const char *name; 303 u32 offset; 304 u32 v1_val; 305 u32 v2_val; 306 u32 v3_val; 307 }; 308 309 struct hisi_qm_cap_record { 310 u32 type; 311 const char *name; 312 u32 cap_val; 313 }; 314 315 struct hisi_qm_cap_tables { 316 u32 qm_cap_size; 317 struct hisi_qm_cap_record *qm_cap_table; 318 u32 dev_cap_size; 319 struct hisi_qm_cap_record *dev_cap_table; 320 }; 321 322 struct hisi_qm_list { 323 struct mutex lock; 324 struct list_head list; 325 int (*register_to_crypto)(struct hisi_qm *qm); 326 void (*unregister_from_crypto)(struct hisi_qm *qm); 327 }; 328 329 struct hisi_qm_poll_data { 330 struct hisi_qm *qm; 331 struct work_struct work; 332 u16 *qp_finish_id; 333 u16 eqe_num; 334 }; 335 336 /** 337 * struct qm_err_isolate 338 * @isolate_lock: protects device error log 339 * @err_threshold: user config error threshold which triggers isolation 340 * @is_isolate: device isolation state 341 * @uacce_hw_errs: index into qm device error list 342 */ 343 struct qm_err_isolate { 344 struct mutex isolate_lock; 345 u32 err_threshold; 346 bool is_isolate; 347 struct list_head qm_hw_errs; 348 }; 349 350 struct qm_rsv_buf { 351 struct qm_sqc *sqc; 352 struct qm_cqc *cqc; 353 struct qm_eqc *eqc; 354 struct qm_aeqc *aeqc; 355 dma_addr_t sqc_dma; 356 dma_addr_t cqc_dma; 357 dma_addr_t eqc_dma; 358 dma_addr_t aeqc_dma; 359 struct qm_dma qcdma; 360 }; 361 362 struct hisi_qm { 363 enum qm_hw_ver ver; 364 enum qm_fun_type fun_type; 365 const char *dev_name; 366 struct pci_dev *pdev; 367 void __iomem *io_base; 368 void __iomem *db_io_base; 369 370 /* Capbility version, 0: not supports */ 371 u32 cap_ver; 372 u32 sqe_size; 373 u32 qp_base; 374 u32 qp_num; 375 u32 qp_in_used; 376 u32 ctrl_qp_num; 377 u32 max_qp_num; 378 u32 vfs_num; 379 u32 db_interval; 380 u16 eq_depth; 381 u16 aeq_depth; 382 struct list_head list; 383 struct hisi_qm_list *qm_list; 384 385 struct qm_dma qdma; 386 struct qm_sqc *sqc; 387 struct qm_cqc *cqc; 388 struct qm_eqe *eqe; 389 struct qm_aeqe *aeqe; 390 dma_addr_t sqc_dma; 391 dma_addr_t cqc_dma; 392 dma_addr_t eqe_dma; 393 dma_addr_t aeqe_dma; 394 struct qm_rsv_buf xqc_buf; 395 396 struct hisi_qm_status status; 397 const struct hisi_qm_err_ini *err_ini; 398 struct hisi_qm_err_info err_info; 399 struct hisi_qm_err_status err_status; 400 /* driver removing and reset sched */ 401 unsigned long misc_ctl; 402 /* Device capability bit */ 403 unsigned long caps; 404 405 struct rw_semaphore qps_lock; 406 struct idr qp_idr; 407 struct hisi_qp *qp_array; 408 struct hisi_qm_poll_data *poll_data; 409 410 struct mutex mailbox_lock; 411 412 struct mutex ifc_lock; 413 414 const struct hisi_qm_hw_ops *ops; 415 416 struct qm_debug debug; 417 418 u32 error_mask; 419 420 struct workqueue_struct *wq; 421 struct work_struct rst_work; 422 struct work_struct cmd_process; 423 424 bool use_sva; 425 426 resource_size_t phys_base; 427 resource_size_t db_phys_base; 428 struct uacce_device *uacce; 429 int mode; 430 struct qm_shaper_factor *factor; 431 u32 mb_qos; 432 u32 type_rate; 433 struct qm_err_isolate isolate_data; 434 435 struct hisi_qm_cap_tables cap_tables; 436 }; 437 438 struct hisi_qp_status { 439 atomic_t used; 440 u16 sq_tail; 441 u16 cq_head; 442 bool cqc_phase; 443 atomic_t flags; 444 }; 445 446 struct hisi_qp_ops { 447 int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); 448 }; 449 450 struct instance_backlog { 451 struct list_head list; 452 spinlock_t lock; 453 }; 454 455 struct hisi_qp { 456 u32 qp_id; 457 u16 sq_depth; 458 u16 cq_depth; 459 u8 alg_type; 460 461 struct qm_dma qdma; 462 void *sqe; 463 struct qm_cqe *cqe; 464 dma_addr_t sqe_dma; 465 dma_addr_t cqe_dma; 466 467 struct hisi_qp_status qp_status; 468 struct hisi_qp_ops *hw_ops; 469 void (*req_cb)(struct hisi_qp *qp, void *data); 470 void (*event_cb)(struct hisi_qp *qp); 471 472 struct hisi_qm *qm; 473 bool is_resetting; 474 bool is_in_kernel; 475 u16 pasid; 476 struct uacce_queue *uacce_q; 477 478 u32 ref_count; 479 spinlock_t qp_lock; 480 struct instance_backlog backlog; 481 const void **msg; 482 }; 483 484 static inline int vfs_num_set(const char *val, const struct kernel_param *kp) 485 { 486 u32 n; 487 int ret; 488 489 if (!val) 490 return -EINVAL; 491 492 ret = kstrtou32(val, 10, &n); 493 if (ret < 0) 494 return ret; 495 496 if (n > QM_MAX_VFS_NUM_V2) 497 return -EINVAL; 498 499 return param_set_int(val, kp); 500 } 501 502 static inline int mode_set(const char *val, const struct kernel_param *kp) 503 { 504 u32 n; 505 int ret; 506 507 if (!val) 508 return -EINVAL; 509 510 ret = kstrtou32(val, 10, &n); 511 if (ret != 0 || (n != UACCE_MODE_SVA && 512 n != UACCE_MODE_NOUACCE)) 513 return -EINVAL; 514 515 return param_set_int(val, kp); 516 } 517 518 static inline int uacce_mode_set(const char *val, const struct kernel_param *kp) 519 { 520 return mode_set(val, kp); 521 } 522 523 static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list) 524 { 525 INIT_LIST_HEAD(&qm_list->list); 526 mutex_init(&qm_list->lock); 527 } 528 529 static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 530 { 531 mutex_lock(&qm_list->lock); 532 list_add_tail(&qm->list, &qm_list->list); 533 mutex_unlock(&qm_list->lock); 534 } 535 536 static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list) 537 { 538 mutex_lock(&qm_list->lock); 539 list_del(&qm->list); 540 mutex_unlock(&qm_list->lock); 541 } 542 543 int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, 544 unsigned int device); 545 int hisi_qm_init(struct hisi_qm *qm); 546 void hisi_qm_uninit(struct hisi_qm *qm); 547 int hisi_qm_start(struct hisi_qm *qm); 548 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r); 549 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg); 550 void hisi_qm_stop_qp(struct hisi_qp *qp); 551 int hisi_qp_send(struct hisi_qp *qp, const void *msg); 552 void hisi_qm_debug_init(struct hisi_qm *qm); 553 void hisi_qm_debug_regs_clear(struct hisi_qm *qm); 554 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs); 555 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen); 556 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs); 557 void hisi_qm_dev_err_init(struct hisi_qm *qm); 558 void hisi_qm_dev_err_uninit(struct hisi_qm *qm); 559 int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, 560 struct dfx_diff_registers *dregs, u32 reg_len); 561 void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len); 562 void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, 563 struct dfx_diff_registers *dregs, u32 regs_len); 564 565 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, 566 pci_channel_state_t state); 567 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev); 568 void hisi_qm_reset_prepare(struct pci_dev *pdev); 569 void hisi_qm_reset_done(struct pci_dev *pdev); 570 571 int hisi_qm_wait_mb_ready(struct hisi_qm *qm); 572 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, 573 bool op); 574 int hisi_qm_mb_read(struct hisi_qm *qm, u64 *base, u8 cmd, u16 queue); 575 576 struct hisi_acc_sgl_pool; 577 struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, 578 struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, 579 u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir); 580 void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, 581 struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir); 582 struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, 583 u32 count, u32 sge_nr); 584 void hisi_acc_free_sgl_pool(struct device *dev, 585 struct hisi_acc_sgl_pool *pool); 586 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, 587 u8 *alg_type, int node, struct hisi_qp **qps); 588 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num); 589 void hisi_qm_dev_shutdown(struct pci_dev *pdev); 590 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list); 591 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); 592 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard); 593 int hisi_qm_resume(struct device *dev); 594 int hisi_qm_suspend(struct device *dev); 595 void hisi_qm_pm_uninit(struct hisi_qm *qm); 596 void hisi_qm_pm_init(struct hisi_qm *qm); 597 int hisi_qm_get_dfx_access(struct hisi_qm *qm); 598 void hisi_qm_put_dfx_access(struct hisi_qm *qm); 599 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); 600 u32 hisi_qm_get_hw_info(struct hisi_qm *qm, 601 const struct hisi_qm_cap_info *info_table, 602 u32 index, bool is_read); 603 u32 hisi_qm_get_cap_value(struct hisi_qm *qm, 604 const struct hisi_qm_cap_query_info *info_table, 605 u32 index, bool is_read); 606 int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, 607 u32 dev_algs_size); 608 609 /* Used by VFIO ACC live migration driver */ 610 struct pci_driver *hisi_sec_get_pf_driver(void); 611 struct pci_driver *hisi_hpre_get_pf_driver(void); 612 struct pci_driver *hisi_zip_get_pf_driver(void); 613 #endif 614