1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/pm_runtime.h> 15 #include <linux/seq_file.h> 16 #include <linux/topology.h> 17 #include <linux/uacce.h> 18 19 #include "sec.h" 20 21 #define SEC_VF_NUM 63 22 #define SEC_QUEUE_NUM_V1 4096 23 #define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255 24 25 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF 26 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd 27 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 28 29 #define SEC_SQE_SIZE 128 30 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 31 #define SEC_PF_DEF_Q_NUM 256 32 #define SEC_PF_DEF_Q_BASE 0 33 #define SEC_CTX_Q_NUM_DEF 2 34 #define SEC_CTX_Q_NUM_MAX 32 35 36 #define SEC_CTRL_CNT_CLR_CE 0x301120 37 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 38 #define SEC_CORE_INT_SOURCE 0x301010 39 #define SEC_CORE_INT_MASK 0x301000 40 #define SEC_CORE_INT_STATUS 0x301008 41 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 42 #define SEC_ECC_NUM 16 43 #define SEC_ECC_MASH 0xFF 44 #define SEC_CORE_INT_DISABLE 0x0 45 #define SEC_CORE_INT_ENABLE 0x7c1ff 46 #define SEC_CORE_INT_CLEAR 0x7c1ff 47 #define SEC_SAA_ENABLE 0x17f 48 49 #define SEC_RAS_CE_REG 0x301050 50 #define SEC_RAS_FE_REG 0x301054 51 #define SEC_RAS_NFE_REG 0x301058 52 #define SEC_RAS_CE_ENB_MSK 0x88 53 #define SEC_RAS_FE_ENB_MSK 0x0 54 #define SEC_RAS_NFE_ENB_MSK 0x7c177 55 #define SEC_OOO_SHUTDOWN_SEL 0x301014 56 #define SEC_RAS_DISABLE 0x0 57 #define SEC_MEM_START_INIT_REG 0x301100 58 #define SEC_MEM_INIT_DONE_REG 0x301104 59 60 /* clock gating */ 61 #define SEC_CONTROL_REG 0x301200 62 #define SEC_DYNAMIC_GATE_REG 0x30121c 63 #define SEC_CORE_AUTO_GATE 0x30212c 64 #define SEC_DYNAMIC_GATE_EN 0x7bff 65 #define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0) 66 #define SEC_CLK_GATE_ENABLE BIT(3) 67 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 68 69 #define SEC_TRNG_EN_SHIFT 8 70 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 71 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 72 73 #define SEC_INTERFACE_USER_CTRL0_REG 0x301220 74 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224 75 #define SEC_SAA_EN_REG 0x301270 76 #define SEC_BD_ERR_CHK_EN_REG0 0x301380 77 #define SEC_BD_ERR_CHK_EN_REG1 0x301384 78 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c 79 80 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 81 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 82 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) 83 #define SEC_USER1_ENABLE_DATA_SSV BIT(16) 84 #define SEC_USER1_WB_CONTEXT_SSV BIT(8) 85 #define SEC_USER1_WB_DATA_SSV BIT(0) 86 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ 87 SEC_USER1_ENABLE_DATA_SSV | \ 88 SEC_USER1_WB_CONTEXT_SSV | \ 89 SEC_USER1_WB_DATA_SSV) 90 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) 91 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) 92 #define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220 93 #define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224 94 #define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5)) 95 #define SEC_USER1_SMMU_MASK_V3 0xFF79E79E 96 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 97 98 #define SEC_PREFETCH_CFG 0x301130 99 #define SEC_SVA_TRANS 0x301EC4 100 #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) 101 #define SEC_PREFETCH_DISABLE BIT(1) 102 #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) 103 104 #define SEC_DELAY_10_US 10 105 #define SEC_POLL_TIMEOUT_US 1000 106 #define SEC_DBGFS_VAL_MAX_LEN 20 107 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 108 109 #define SEC_SQE_MASK_OFFSET 64 110 #define SEC_SQE_MASK_LEN 48 111 #define SEC_SHAPER_TYPE_RATE 400 112 113 #define SEC_DFX_BASE 0x301000 114 #define SEC_DFX_CORE 0x302100 115 #define SEC_DFX_COMMON1 0x301600 116 #define SEC_DFX_COMMON2 0x301C00 117 #define SEC_DFX_BASE_LEN 0x9D 118 #define SEC_DFX_CORE_LEN 0x32B 119 #define SEC_DFX_COMMON1_LEN 0x45 120 #define SEC_DFX_COMMON2_LEN 0xBA 121 122 struct sec_hw_error { 123 u32 int_msk; 124 const char *msg; 125 }; 126 127 struct sec_dfx_item { 128 const char *name; 129 u32 offset; 130 }; 131 132 static const char sec_name[] = "hisi_sec2"; 133 static struct dentry *sec_debugfs_root; 134 135 static struct hisi_qm_list sec_devices = { 136 .register_to_crypto = sec_register_to_crypto, 137 .unregister_from_crypto = sec_unregister_from_crypto, 138 }; 139 140 static const struct sec_hw_error sec_hw_errors[] = { 141 { 142 .int_msk = BIT(0), 143 .msg = "sec_axi_rresp_err_rint" 144 }, 145 { 146 .int_msk = BIT(1), 147 .msg = "sec_axi_bresp_err_rint" 148 }, 149 { 150 .int_msk = BIT(2), 151 .msg = "sec_ecc_2bit_err_rint" 152 }, 153 { 154 .int_msk = BIT(3), 155 .msg = "sec_ecc_1bit_err_rint" 156 }, 157 { 158 .int_msk = BIT(4), 159 .msg = "sec_req_trng_timeout_rint" 160 }, 161 { 162 .int_msk = BIT(5), 163 .msg = "sec_fsm_hbeat_rint" 164 }, 165 { 166 .int_msk = BIT(6), 167 .msg = "sec_channel_req_rng_timeout_rint" 168 }, 169 { 170 .int_msk = BIT(7), 171 .msg = "sec_bd_err_rint" 172 }, 173 { 174 .int_msk = BIT(8), 175 .msg = "sec_chain_buff_err_rint" 176 }, 177 { 178 .int_msk = BIT(14), 179 .msg = "sec_no_secure_access" 180 }, 181 { 182 .int_msk = BIT(15), 183 .msg = "sec_wrapping_key_auth_err" 184 }, 185 { 186 .int_msk = BIT(16), 187 .msg = "sec_km_key_crc_fail" 188 }, 189 { 190 .int_msk = BIT(17), 191 .msg = "sec_axi_poison_err" 192 }, 193 { 194 .int_msk = BIT(18), 195 .msg = "sec_sva_err" 196 }, 197 {} 198 }; 199 200 static const char * const sec_dbg_file_name[] = { 201 [SEC_CLEAR_ENABLE] = "clear_enable", 202 }; 203 204 static struct sec_dfx_item sec_dfx_labels[] = { 205 {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, 206 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, 207 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, 208 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, 209 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, 210 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, 211 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, 212 }; 213 214 static const struct debugfs_reg32 sec_dfx_regs[] = { 215 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 216 {"SEC_SAA_EN ", 0x301270}, 217 {"SEC_BD_LATENCY_MIN ", 0x301600}, 218 {"SEC_BD_LATENCY_MAX ", 0x301608}, 219 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 220 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 221 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 222 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 223 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 224 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 225 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 226 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 227 {"SEC_BD_SAA0 ", 0x301C20}, 228 {"SEC_BD_SAA1 ", 0x301C24}, 229 {"SEC_BD_SAA2 ", 0x301C28}, 230 {"SEC_BD_SAA3 ", 0x301C2C}, 231 {"SEC_BD_SAA4 ", 0x301C30}, 232 {"SEC_BD_SAA5 ", 0x301C34}, 233 {"SEC_BD_SAA6 ", 0x301C38}, 234 {"SEC_BD_SAA7 ", 0x301C3C}, 235 {"SEC_BD_SAA8 ", 0x301C40}, 236 }; 237 238 /* define the SEC's dfx regs region and region length */ 239 static struct dfx_diff_registers sec_diff_regs[] = { 240 { 241 .reg_offset = SEC_DFX_BASE, 242 .reg_len = SEC_DFX_BASE_LEN, 243 }, { 244 .reg_offset = SEC_DFX_COMMON1, 245 .reg_len = SEC_DFX_COMMON1_LEN, 246 }, { 247 .reg_offset = SEC_DFX_COMMON2, 248 .reg_len = SEC_DFX_COMMON2_LEN, 249 }, { 250 .reg_offset = SEC_DFX_CORE, 251 .reg_len = SEC_DFX_CORE_LEN, 252 }, 253 }; 254 255 static int sec_diff_regs_show(struct seq_file *s, void *unused) 256 { 257 struct hisi_qm *qm = s->private; 258 259 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, 260 ARRAY_SIZE(sec_diff_regs)); 261 262 return 0; 263 } 264 DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); 265 266 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 267 { 268 return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); 269 } 270 271 static const struct kernel_param_ops sec_pf_q_num_ops = { 272 .set = sec_pf_q_num_set, 273 .get = param_get_int, 274 }; 275 276 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 277 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 278 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 279 280 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 281 { 282 u32 ctx_q_num; 283 int ret; 284 285 if (!val) 286 return -EINVAL; 287 288 ret = kstrtou32(val, 10, &ctx_q_num); 289 if (ret) 290 return -EINVAL; 291 292 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 293 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 294 return -EINVAL; 295 } 296 297 return param_set_int(val, kp); 298 } 299 300 static const struct kernel_param_ops sec_ctx_q_num_ops = { 301 .set = sec_ctx_q_num_set, 302 .get = param_get_int, 303 }; 304 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 305 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 306 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); 307 308 static const struct kernel_param_ops vfs_num_ops = { 309 .set = vfs_num_set, 310 .get = param_get_int, 311 }; 312 313 static u32 vfs_num; 314 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 315 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 316 317 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) 318 { 319 hisi_qm_free_qps(qps, qp_num); 320 kfree(qps); 321 } 322 323 struct hisi_qp **sec_create_qps(void) 324 { 325 int node = cpu_to_node(smp_processor_id()); 326 u32 ctx_num = ctx_q_num; 327 struct hisi_qp **qps; 328 int ret; 329 330 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); 331 if (!qps) 332 return NULL; 333 334 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); 335 if (!ret) 336 return qps; 337 338 kfree(qps); 339 return NULL; 340 } 341 342 static const struct kernel_param_ops sec_uacce_mode_ops = { 343 .set = uacce_mode_set, 344 .get = param_get_int, 345 }; 346 347 /* 348 * uacce_mode = 0 means sec only register to crypto, 349 * uacce_mode = 1 means sec both register to crypto and uacce. 350 */ 351 static u32 uacce_mode = UACCE_MODE_NOUACCE; 352 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); 353 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 354 355 static const struct pci_device_id sec_dev_ids[] = { 356 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) }, 357 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) }, 358 { 0, } 359 }; 360 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 361 362 static void sec_set_endian(struct hisi_qm *qm) 363 { 364 u32 reg; 365 366 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 367 reg &= ~(BIT(1) | BIT(0)); 368 if (!IS_ENABLED(CONFIG_64BIT)) 369 reg |= BIT(1); 370 371 372 if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) 373 reg |= BIT(0); 374 375 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 376 } 377 378 static void sec_engine_sva_config(struct hisi_qm *qm) 379 { 380 u32 reg; 381 382 if (qm->ver > QM_HW_V2) { 383 reg = readl_relaxed(qm->io_base + 384 SEC_INTERFACE_USER_CTRL0_REG_V3); 385 reg |= SEC_USER0_SMMU_NORMAL; 386 writel_relaxed(reg, qm->io_base + 387 SEC_INTERFACE_USER_CTRL0_REG_V3); 388 389 reg = readl_relaxed(qm->io_base + 390 SEC_INTERFACE_USER_CTRL1_REG_V3); 391 reg &= SEC_USER1_SMMU_MASK_V3; 392 reg |= SEC_USER1_SMMU_NORMAL_V3; 393 writel_relaxed(reg, qm->io_base + 394 SEC_INTERFACE_USER_CTRL1_REG_V3); 395 } else { 396 reg = readl_relaxed(qm->io_base + 397 SEC_INTERFACE_USER_CTRL0_REG); 398 reg |= SEC_USER0_SMMU_NORMAL; 399 writel_relaxed(reg, qm->io_base + 400 SEC_INTERFACE_USER_CTRL0_REG); 401 reg = readl_relaxed(qm->io_base + 402 SEC_INTERFACE_USER_CTRL1_REG); 403 reg &= SEC_USER1_SMMU_MASK; 404 if (qm->use_sva) 405 reg |= SEC_USER1_SMMU_SVA; 406 else 407 reg |= SEC_USER1_SMMU_NORMAL; 408 writel_relaxed(reg, qm->io_base + 409 SEC_INTERFACE_USER_CTRL1_REG); 410 } 411 } 412 413 static void sec_open_sva_prefetch(struct hisi_qm *qm) 414 { 415 u32 val; 416 int ret; 417 418 if (qm->ver < QM_HW_V3) 419 return; 420 421 /* Enable prefetch */ 422 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 423 val &= SEC_PREFETCH_ENABLE; 424 writel(val, qm->io_base + SEC_PREFETCH_CFG); 425 426 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, 427 val, !(val & SEC_PREFETCH_DISABLE), 428 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 429 if (ret) 430 pci_err(qm->pdev, "failed to open sva prefetch\n"); 431 } 432 433 static void sec_close_sva_prefetch(struct hisi_qm *qm) 434 { 435 u32 val; 436 int ret; 437 438 if (qm->ver < QM_HW_V3) 439 return; 440 441 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 442 val |= SEC_PREFETCH_DISABLE; 443 writel(val, qm->io_base + SEC_PREFETCH_CFG); 444 445 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, 446 val, !(val & SEC_SVA_DISABLE_READY), 447 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 448 if (ret) 449 pci_err(qm->pdev, "failed to close sva prefetch\n"); 450 } 451 452 static void sec_enable_clock_gate(struct hisi_qm *qm) 453 { 454 u32 val; 455 456 if (qm->ver < QM_HW_V3) 457 return; 458 459 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 460 val |= SEC_CLK_GATE_ENABLE; 461 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG); 462 463 val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG); 464 val |= SEC_DYNAMIC_GATE_EN; 465 writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG); 466 467 val = readl(qm->io_base + SEC_CORE_AUTO_GATE); 468 val |= SEC_CORE_AUTO_GATE_EN; 469 writel(val, qm->io_base + SEC_CORE_AUTO_GATE); 470 } 471 472 static void sec_disable_clock_gate(struct hisi_qm *qm) 473 { 474 u32 val; 475 476 /* Kunpeng920 needs to close clock gating */ 477 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 478 val &= SEC_CLK_GATE_DISABLE; 479 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG); 480 } 481 482 static int sec_engine_init(struct hisi_qm *qm) 483 { 484 int ret; 485 u32 reg; 486 487 /* disable clock gate control before mem init */ 488 sec_disable_clock_gate(qm); 489 490 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); 491 492 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, 493 reg, reg & 0x1, SEC_DELAY_10_US, 494 SEC_POLL_TIMEOUT_US); 495 if (ret) { 496 pci_err(qm->pdev, "fail to init sec mem\n"); 497 return ret; 498 } 499 500 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 501 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 502 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 503 504 sec_engine_sva_config(qm); 505 506 writel(SEC_SINGLE_PORT_MAX_TRANS, 507 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 508 509 writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); 510 511 if (qm->ver < QM_HW_V3) { 512 /* HW V2 enable sm4 extra mode, as ctr/ecb */ 513 writel_relaxed(SEC_BD_ERR_CHK_EN0, 514 qm->io_base + SEC_BD_ERR_CHK_EN_REG0); 515 516 /* HW V2 enable sm4 xts mode multiple iv */ 517 writel_relaxed(SEC_BD_ERR_CHK_EN1, 518 qm->io_base + SEC_BD_ERR_CHK_EN_REG1); 519 writel_relaxed(SEC_BD_ERR_CHK_EN3, 520 qm->io_base + SEC_BD_ERR_CHK_EN_REG3); 521 } 522 523 /* config endian */ 524 sec_set_endian(qm); 525 526 sec_enable_clock_gate(qm); 527 528 return 0; 529 } 530 531 static int sec_set_user_domain_and_cache(struct hisi_qm *qm) 532 { 533 /* qm user domain */ 534 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 535 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 536 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 537 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 538 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 539 540 /* qm cache */ 541 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 542 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 543 544 /* disable FLR triggered by BME(bus master enable) */ 545 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 546 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 547 548 /* enable sqc,cqc writeback */ 549 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 550 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 551 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 552 553 return sec_engine_init(qm); 554 } 555 556 /* sec_debug_regs_clear() - clear the sec debug regs */ 557 static void sec_debug_regs_clear(struct hisi_qm *qm) 558 { 559 int i; 560 561 /* clear sec dfx regs */ 562 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); 563 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 564 readl(qm->io_base + sec_dfx_regs[i].offset); 565 566 /* clear rdclr_en */ 567 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 568 569 hisi_qm_debug_regs_clear(qm); 570 } 571 572 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) 573 { 574 u32 val1, val2; 575 576 val1 = readl(qm->io_base + SEC_CONTROL_REG); 577 if (enable) { 578 val1 |= SEC_AXI_SHUTDOWN_ENABLE; 579 val2 = SEC_RAS_NFE_ENB_MSK; 580 } else { 581 val1 &= SEC_AXI_SHUTDOWN_DISABLE; 582 val2 = 0x0; 583 } 584 585 if (qm->ver > QM_HW_V2) 586 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL); 587 588 writel(val1, qm->io_base + SEC_CONTROL_REG); 589 } 590 591 static void sec_hw_error_enable(struct hisi_qm *qm) 592 { 593 if (qm->ver == QM_HW_V1) { 594 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 595 pci_info(qm->pdev, "V1 not support hw error handle\n"); 596 return; 597 } 598 599 /* clear SEC hw error source if having */ 600 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); 601 602 /* enable RAS int */ 603 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 604 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 605 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 606 607 /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ 608 sec_master_ooo_ctrl(qm, true); 609 610 /* enable SEC hw error interrupts */ 611 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 612 } 613 614 static void sec_hw_error_disable(struct hisi_qm *qm) 615 { 616 /* disable SEC hw error interrupts */ 617 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 618 619 /* disable SEC block master OOO when nfe occurs on Kunpeng930 */ 620 sec_master_ooo_ctrl(qm, false); 621 622 /* disable RAS int */ 623 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 624 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 625 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 626 } 627 628 static u32 sec_clear_enable_read(struct hisi_qm *qm) 629 { 630 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 631 SEC_CTRL_CNT_CLR_CE_BIT; 632 } 633 634 static int sec_clear_enable_write(struct hisi_qm *qm, u32 val) 635 { 636 u32 tmp; 637 638 if (val != 1 && val) 639 return -EINVAL; 640 641 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 642 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 643 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 644 645 return 0; 646 } 647 648 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 649 size_t count, loff_t *pos) 650 { 651 struct sec_debug_file *file = filp->private_data; 652 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 653 struct hisi_qm *qm = file->qm; 654 u32 val; 655 int ret; 656 657 ret = hisi_qm_get_dfx_access(qm); 658 if (ret) 659 return ret; 660 661 spin_lock_irq(&file->lock); 662 663 switch (file->index) { 664 case SEC_CLEAR_ENABLE: 665 val = sec_clear_enable_read(qm); 666 break; 667 default: 668 goto err_input; 669 } 670 671 spin_unlock_irq(&file->lock); 672 673 hisi_qm_put_dfx_access(qm); 674 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 675 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 676 677 err_input: 678 spin_unlock_irq(&file->lock); 679 hisi_qm_put_dfx_access(qm); 680 return -EINVAL; 681 } 682 683 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 684 size_t count, loff_t *pos) 685 { 686 struct sec_debug_file *file = filp->private_data; 687 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 688 struct hisi_qm *qm = file->qm; 689 unsigned long val; 690 int len, ret; 691 692 if (*pos != 0) 693 return 0; 694 695 if (count >= SEC_DBGFS_VAL_MAX_LEN) 696 return -ENOSPC; 697 698 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 699 pos, buf, count); 700 if (len < 0) 701 return len; 702 703 tbuf[len] = '\0'; 704 if (kstrtoul(tbuf, 0, &val)) 705 return -EFAULT; 706 707 ret = hisi_qm_get_dfx_access(qm); 708 if (ret) 709 return ret; 710 711 spin_lock_irq(&file->lock); 712 713 switch (file->index) { 714 case SEC_CLEAR_ENABLE: 715 ret = sec_clear_enable_write(qm, val); 716 if (ret) 717 goto err_input; 718 break; 719 default: 720 ret = -EINVAL; 721 goto err_input; 722 } 723 724 ret = count; 725 726 err_input: 727 spin_unlock_irq(&file->lock); 728 hisi_qm_put_dfx_access(qm); 729 return ret; 730 } 731 732 static const struct file_operations sec_dbg_fops = { 733 .owner = THIS_MODULE, 734 .open = simple_open, 735 .read = sec_debug_read, 736 .write = sec_debug_write, 737 }; 738 739 static int sec_debugfs_atomic64_get(void *data, u64 *val) 740 { 741 *val = atomic64_read((atomic64_t *)data); 742 743 return 0; 744 } 745 746 static int sec_debugfs_atomic64_set(void *data, u64 val) 747 { 748 if (val) 749 return -EINVAL; 750 751 atomic64_set((atomic64_t *)data, 0); 752 753 return 0; 754 } 755 756 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 757 sec_debugfs_atomic64_set, "%lld\n"); 758 759 static int sec_regs_show(struct seq_file *s, void *unused) 760 { 761 hisi_qm_regs_dump(s, s->private); 762 763 return 0; 764 } 765 766 DEFINE_SHOW_ATTRIBUTE(sec_regs); 767 768 static int sec_core_debug_init(struct hisi_qm *qm) 769 { 770 struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs; 771 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 772 struct device *dev = &qm->pdev->dev; 773 struct sec_dfx *dfx = &sec->debug.dfx; 774 struct debugfs_regset32 *regset; 775 struct dentry *tmp_d; 776 int i; 777 778 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); 779 780 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 781 if (!regset) 782 return -ENOMEM; 783 784 regset->regs = sec_dfx_regs; 785 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 786 regset->base = qm->io_base; 787 regset->dev = dev; 788 789 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) 790 debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops); 791 if (qm->fun_type == QM_HW_PF && sec_regs) 792 debugfs_create_file("diff_regs", 0444, tmp_d, 793 qm, &sec_diff_regs_fops); 794 795 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { 796 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + 797 sec_dfx_labels[i].offset); 798 debugfs_create_file(sec_dfx_labels[i].name, 0644, 799 tmp_d, data, &sec_atomic64_ops); 800 } 801 802 return 0; 803 } 804 805 static int sec_debug_init(struct hisi_qm *qm) 806 { 807 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 808 int i; 809 810 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) { 811 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { 812 spin_lock_init(&sec->debug.files[i].lock); 813 sec->debug.files[i].index = i; 814 sec->debug.files[i].qm = qm; 815 816 debugfs_create_file(sec_dbg_file_name[i], 0600, 817 qm->debug.debug_root, 818 sec->debug.files + i, 819 &sec_dbg_fops); 820 } 821 } 822 823 return sec_core_debug_init(qm); 824 } 825 826 static int sec_debugfs_init(struct hisi_qm *qm) 827 { 828 struct device *dev = &qm->pdev->dev; 829 int ret; 830 831 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 832 sec_debugfs_root); 833 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; 834 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; 835 836 ret = hisi_qm_diff_regs_init(qm, sec_diff_regs, 837 ARRAY_SIZE(sec_diff_regs)); 838 if (ret) { 839 dev_warn(dev, "Failed to init SEC diff regs!\n"); 840 goto debugfs_remove; 841 } 842 843 hisi_qm_debug_init(qm); 844 845 ret = sec_debug_init(qm); 846 if (ret) 847 goto failed_to_create; 848 849 return 0; 850 851 failed_to_create: 852 hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); 853 debugfs_remove: 854 debugfs_remove_recursive(sec_debugfs_root); 855 return ret; 856 } 857 858 static void sec_debugfs_exit(struct hisi_qm *qm) 859 { 860 hisi_qm_diff_regs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); 861 862 debugfs_remove_recursive(qm->debug.debug_root); 863 } 864 865 static int sec_show_last_regs_init(struct hisi_qm *qm) 866 { 867 struct qm_debug *debug = &qm->debug; 868 int i; 869 870 debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs), 871 sizeof(unsigned int), GFP_KERNEL); 872 if (!debug->last_words) 873 return -ENOMEM; 874 875 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 876 debug->last_words[i] = readl_relaxed(qm->io_base + 877 sec_dfx_regs[i].offset); 878 879 return 0; 880 } 881 882 static void sec_show_last_regs_uninit(struct hisi_qm *qm) 883 { 884 struct qm_debug *debug = &qm->debug; 885 886 if (qm->fun_type == QM_HW_VF || !debug->last_words) 887 return; 888 889 kfree(debug->last_words); 890 debug->last_words = NULL; 891 } 892 893 static void sec_show_last_dfx_regs(struct hisi_qm *qm) 894 { 895 struct qm_debug *debug = &qm->debug; 896 struct pci_dev *pdev = qm->pdev; 897 u32 val; 898 int i; 899 900 if (qm->fun_type == QM_HW_VF || !debug->last_words) 901 return; 902 903 /* dumps last word of the debugging registers during controller reset */ 904 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) { 905 val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset); 906 if (val != debug->last_words[i]) 907 pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", 908 sec_dfx_regs[i].name, debug->last_words[i], val); 909 } 910 } 911 912 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 913 { 914 const struct sec_hw_error *errs = sec_hw_errors; 915 struct device *dev = &qm->pdev->dev; 916 u32 err_val; 917 918 while (errs->msg) { 919 if (errs->int_msk & err_sts) { 920 dev_err(dev, "%s [error status=0x%x] found\n", 921 errs->msg, errs->int_msk); 922 923 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 924 err_val = readl(qm->io_base + 925 SEC_CORE_SRAM_ECC_ERR_INFO); 926 dev_err(dev, "multi ecc sram num=0x%x\n", 927 ((err_val) >> SEC_ECC_NUM) & 928 SEC_ECC_MASH); 929 } 930 } 931 errs++; 932 } 933 } 934 935 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 936 { 937 return readl(qm->io_base + SEC_CORE_INT_STATUS); 938 } 939 940 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 941 { 942 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 943 } 944 945 static void sec_open_axi_master_ooo(struct hisi_qm *qm) 946 { 947 u32 val; 948 949 val = readl(qm->io_base + SEC_CONTROL_REG); 950 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); 951 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); 952 } 953 954 static void sec_err_info_init(struct hisi_qm *qm) 955 { 956 struct hisi_qm_err_info *err_info = &qm->err_info; 957 958 err_info->ce = QM_BASE_CE; 959 err_info->fe = 0; 960 err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; 961 err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; 962 err_info->msi_wr_port = BIT(0); 963 err_info->acpi_rst = "SRST"; 964 err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 965 QM_ACC_WB_NOT_READY_TIMEOUT; 966 } 967 968 static const struct hisi_qm_err_ini sec_err_ini = { 969 .hw_init = sec_set_user_domain_and_cache, 970 .hw_err_enable = sec_hw_error_enable, 971 .hw_err_disable = sec_hw_error_disable, 972 .get_dev_hw_err_status = sec_get_hw_err_status, 973 .clear_dev_hw_err_status = sec_clear_hw_err_status, 974 .log_dev_hw_err = sec_log_hw_error, 975 .open_axi_master_ooo = sec_open_axi_master_ooo, 976 .open_sva_prefetch = sec_open_sva_prefetch, 977 .close_sva_prefetch = sec_close_sva_prefetch, 978 .show_last_dfx_regs = sec_show_last_dfx_regs, 979 .err_info_init = sec_err_info_init, 980 }; 981 982 static int sec_pf_probe_init(struct sec_dev *sec) 983 { 984 struct hisi_qm *qm = &sec->qm; 985 int ret; 986 987 qm->err_ini = &sec_err_ini; 988 qm->err_ini->err_info_init(qm); 989 990 ret = sec_set_user_domain_and_cache(qm); 991 if (ret) 992 return ret; 993 994 sec_open_sva_prefetch(qm); 995 hisi_qm_dev_err_init(qm); 996 sec_debug_regs_clear(qm); 997 ret = sec_show_last_regs_init(qm); 998 if (ret) 999 pci_err(qm->pdev, "Failed to init last word regs!\n"); 1000 1001 return ret; 1002 } 1003 1004 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 1005 { 1006 qm->pdev = pdev; 1007 qm->ver = pdev->revision; 1008 qm->algs = "cipher\ndigest\naead"; 1009 qm->mode = uacce_mode; 1010 qm->sqe_size = SEC_SQE_SIZE; 1011 qm->dev_name = sec_name; 1012 1013 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ? 1014 QM_HW_PF : QM_HW_VF; 1015 if (qm->fun_type == QM_HW_PF) { 1016 qm->qp_base = SEC_PF_DEF_Q_BASE; 1017 qm->qp_num = pf_q_num; 1018 qm->debug.curr_qm_qp_num = pf_q_num; 1019 qm->qm_list = &sec_devices; 1020 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 1021 /* 1022 * have no way to get qm configure in VM in v1 hardware, 1023 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 1024 * to trigger only one VF in v1 hardware. 1025 * v2 hardware has no such problem. 1026 */ 1027 qm->qp_base = SEC_PF_DEF_Q_NUM; 1028 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 1029 } 1030 1031 return hisi_qm_init(qm); 1032 } 1033 1034 static void sec_qm_uninit(struct hisi_qm *qm) 1035 { 1036 hisi_qm_uninit(qm); 1037 } 1038 1039 static int sec_probe_init(struct sec_dev *sec) 1040 { 1041 u32 type_rate = SEC_SHAPER_TYPE_RATE; 1042 struct hisi_qm *qm = &sec->qm; 1043 int ret; 1044 1045 if (qm->fun_type == QM_HW_PF) { 1046 ret = sec_pf_probe_init(sec); 1047 if (ret) 1048 return ret; 1049 /* enable shaper type 0 */ 1050 if (qm->ver >= QM_HW_V3) { 1051 type_rate |= QM_SHAPER_ENABLE; 1052 qm->type_rate = type_rate; 1053 } 1054 } 1055 1056 return 0; 1057 } 1058 1059 static void sec_probe_uninit(struct hisi_qm *qm) 1060 { 1061 hisi_qm_dev_err_uninit(qm); 1062 } 1063 1064 static void sec_iommu_used_check(struct sec_dev *sec) 1065 { 1066 struct iommu_domain *domain; 1067 struct device *dev = &sec->qm.pdev->dev; 1068 1069 domain = iommu_get_domain_for_dev(dev); 1070 1071 /* Check if iommu is used */ 1072 sec->iommu_used = false; 1073 if (domain) { 1074 if (domain->type & __IOMMU_DOMAIN_PAGING) 1075 sec->iommu_used = true; 1076 dev_info(dev, "SMMU Opened, the iommu type = %u\n", 1077 domain->type); 1078 } 1079 } 1080 1081 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1082 { 1083 struct sec_dev *sec; 1084 struct hisi_qm *qm; 1085 int ret; 1086 1087 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 1088 if (!sec) 1089 return -ENOMEM; 1090 1091 qm = &sec->qm; 1092 ret = sec_qm_init(qm, pdev); 1093 if (ret) { 1094 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); 1095 return ret; 1096 } 1097 1098 sec->ctx_q_num = ctx_q_num; 1099 sec_iommu_used_check(sec); 1100 1101 ret = sec_probe_init(sec); 1102 if (ret) { 1103 pci_err(pdev, "Failed to probe!\n"); 1104 goto err_qm_uninit; 1105 } 1106 1107 ret = hisi_qm_start(qm); 1108 if (ret) { 1109 pci_err(pdev, "Failed to start sec qm!\n"); 1110 goto err_probe_uninit; 1111 } 1112 1113 ret = sec_debugfs_init(qm); 1114 if (ret) 1115 pci_warn(pdev, "Failed to init debugfs!\n"); 1116 1117 if (qm->qp_num >= ctx_q_num) { 1118 ret = hisi_qm_alg_register(qm, &sec_devices); 1119 if (ret < 0) { 1120 pr_err("Failed to register driver to crypto.\n"); 1121 goto err_qm_stop; 1122 } 1123 } else { 1124 pci_warn(qm->pdev, 1125 "Failed to use kernel mode, qp not enough!\n"); 1126 } 1127 1128 if (qm->uacce) { 1129 ret = uacce_register(qm->uacce); 1130 if (ret) { 1131 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 1132 goto err_alg_unregister; 1133 } 1134 } 1135 1136 if (qm->fun_type == QM_HW_PF && vfs_num) { 1137 ret = hisi_qm_sriov_enable(pdev, vfs_num); 1138 if (ret < 0) 1139 goto err_alg_unregister; 1140 } 1141 1142 hisi_qm_pm_init(qm); 1143 1144 return 0; 1145 1146 err_alg_unregister: 1147 if (qm->qp_num >= ctx_q_num) 1148 hisi_qm_alg_unregister(qm, &sec_devices); 1149 err_qm_stop: 1150 sec_debugfs_exit(qm); 1151 hisi_qm_stop(qm, QM_NORMAL); 1152 err_probe_uninit: 1153 sec_show_last_regs_uninit(qm); 1154 sec_probe_uninit(qm); 1155 err_qm_uninit: 1156 sec_qm_uninit(qm); 1157 return ret; 1158 } 1159 1160 static void sec_remove(struct pci_dev *pdev) 1161 { 1162 struct hisi_qm *qm = pci_get_drvdata(pdev); 1163 1164 hisi_qm_pm_uninit(qm); 1165 hisi_qm_wait_task_finish(qm, &sec_devices); 1166 if (qm->qp_num >= ctx_q_num) 1167 hisi_qm_alg_unregister(qm, &sec_devices); 1168 1169 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 1170 hisi_qm_sriov_disable(pdev, true); 1171 1172 sec_debugfs_exit(qm); 1173 1174 (void)hisi_qm_stop(qm, QM_NORMAL); 1175 1176 if (qm->fun_type == QM_HW_PF) 1177 sec_debug_regs_clear(qm); 1178 sec_show_last_regs_uninit(qm); 1179 1180 sec_probe_uninit(qm); 1181 1182 sec_qm_uninit(qm); 1183 } 1184 1185 static const struct dev_pm_ops sec_pm_ops = { 1186 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) 1187 }; 1188 1189 static const struct pci_error_handlers sec_err_handler = { 1190 .error_detected = hisi_qm_dev_err_detected, 1191 .slot_reset = hisi_qm_dev_slot_reset, 1192 .reset_prepare = hisi_qm_reset_prepare, 1193 .reset_done = hisi_qm_reset_done, 1194 }; 1195 1196 static struct pci_driver sec_pci_driver = { 1197 .name = "hisi_sec2", 1198 .id_table = sec_dev_ids, 1199 .probe = sec_probe, 1200 .remove = sec_remove, 1201 .err_handler = &sec_err_handler, 1202 .sriov_configure = hisi_qm_sriov_configure, 1203 .shutdown = hisi_qm_dev_shutdown, 1204 .driver.pm = &sec_pm_ops, 1205 }; 1206 1207 struct pci_driver *hisi_sec_get_pf_driver(void) 1208 { 1209 return &sec_pci_driver; 1210 } 1211 EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver); 1212 1213 static void sec_register_debugfs(void) 1214 { 1215 if (!debugfs_initialized()) 1216 return; 1217 1218 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 1219 } 1220 1221 static void sec_unregister_debugfs(void) 1222 { 1223 debugfs_remove_recursive(sec_debugfs_root); 1224 } 1225 1226 static int __init sec_init(void) 1227 { 1228 int ret; 1229 1230 hisi_qm_init_list(&sec_devices); 1231 sec_register_debugfs(); 1232 1233 ret = pci_register_driver(&sec_pci_driver); 1234 if (ret < 0) { 1235 sec_unregister_debugfs(); 1236 pr_err("Failed to register pci driver.\n"); 1237 return ret; 1238 } 1239 1240 return 0; 1241 } 1242 1243 static void __exit sec_exit(void) 1244 { 1245 pci_unregister_driver(&sec_pci_driver); 1246 sec_unregister_debugfs(); 1247 } 1248 1249 module_init(sec_init); 1250 module_exit(sec_exit); 1251 1252 MODULE_LICENSE("GPL v2"); 1253 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1254 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1255 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); 1256 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1257 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1258