1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/aer.h> 6 #include <linux/bitops.h> 7 #include <linux/debugfs.h> 8 #include <linux/init.h> 9 #include <linux/io.h> 10 #include <linux/iommu.h> 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/pci.h> 14 #include <linux/seq_file.h> 15 #include <linux/topology.h> 16 #include <linux/uacce.h> 17 18 #include "sec.h" 19 20 #define SEC_VF_NUM 63 21 #define SEC_QUEUE_NUM_V1 4096 22 #define SEC_PF_PCI_DEVICE_ID 0xa255 23 #define SEC_VF_PCI_DEVICE_ID 0xa256 24 25 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF 26 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd 27 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 28 29 #define SEC_SQE_SIZE 128 30 #define SEC_SQ_SIZE (SEC_SQE_SIZE * QM_Q_DEPTH) 31 #define SEC_PF_DEF_Q_NUM 256 32 #define SEC_PF_DEF_Q_BASE 0 33 #define SEC_CTX_Q_NUM_DEF 2 34 #define SEC_CTX_Q_NUM_MAX 32 35 36 #define SEC_CTRL_CNT_CLR_CE 0x301120 37 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 38 #define SEC_CORE_INT_SOURCE 0x301010 39 #define SEC_CORE_INT_MASK 0x301000 40 #define SEC_CORE_INT_STATUS 0x301008 41 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 42 #define SEC_ECC_NUM 16 43 #define SEC_ECC_MASH 0xFF 44 #define SEC_CORE_INT_DISABLE 0x0 45 #define SEC_CORE_INT_ENABLE 0x7c1ff 46 #define SEC_CORE_INT_CLEAR 0x7c1ff 47 #define SEC_SAA_ENABLE 0x17f 48 49 #define SEC_RAS_CE_REG 0x301050 50 #define SEC_RAS_FE_REG 0x301054 51 #define SEC_RAS_NFE_REG 0x301058 52 #define SEC_RAS_CE_ENB_MSK 0x88 53 #define SEC_RAS_FE_ENB_MSK 0x0 54 #define SEC_RAS_NFE_ENB_MSK 0x7c177 55 #define SEC_OOO_SHUTDOWN_SEL 0x301014 56 #define SEC_RAS_DISABLE 0x0 57 #define SEC_MEM_START_INIT_REG 0x301100 58 #define SEC_MEM_INIT_DONE_REG 0x301104 59 60 #define SEC_CONTROL_REG 0x301200 61 #define SEC_TRNG_EN_SHIFT 8 62 #define SEC_CLK_GATE_ENABLE BIT(3) 63 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 64 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 65 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 66 67 #define SEC_INTERFACE_USER_CTRL0_REG 0x301220 68 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224 69 #define SEC_SAA_EN_REG 0x301270 70 #define SEC_BD_ERR_CHK_EN_REG0 0x301380 71 #define SEC_BD_ERR_CHK_EN_REG1 0x301384 72 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c 73 74 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 75 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 76 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) 77 #define SEC_USER1_ENABLE_DATA_SSV BIT(16) 78 #define SEC_USER1_WB_CONTEXT_SSV BIT(8) 79 #define SEC_USER1_WB_DATA_SSV BIT(0) 80 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ 81 SEC_USER1_ENABLE_DATA_SSV | \ 82 SEC_USER1_WB_CONTEXT_SSV | \ 83 SEC_USER1_WB_DATA_SSV) 84 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) 85 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) 86 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 87 88 #define SEC_PREFETCH_CFG 0x301130 89 #define SEC_SVA_TRANS 0x301EC4 90 #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) 91 #define SEC_PREFETCH_DISABLE BIT(1) 92 #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) 93 94 #define SEC_DELAY_10_US 10 95 #define SEC_POLL_TIMEOUT_US 1000 96 #define SEC_DBGFS_VAL_MAX_LEN 20 97 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 98 99 #define SEC_SQE_MASK_OFFSET 64 100 #define SEC_SQE_MASK_LEN 48 101 #define SEC_SHAPER_TYPE_RATE 128 102 103 struct sec_hw_error { 104 u32 int_msk; 105 const char *msg; 106 }; 107 108 struct sec_dfx_item { 109 const char *name; 110 u32 offset; 111 }; 112 113 static const char sec_name[] = "hisi_sec2"; 114 static struct dentry *sec_debugfs_root; 115 116 static struct hisi_qm_list sec_devices = { 117 .register_to_crypto = sec_register_to_crypto, 118 .unregister_from_crypto = sec_unregister_from_crypto, 119 }; 120 121 static const struct sec_hw_error sec_hw_errors[] = { 122 { 123 .int_msk = BIT(0), 124 .msg = "sec_axi_rresp_err_rint" 125 }, 126 { 127 .int_msk = BIT(1), 128 .msg = "sec_axi_bresp_err_rint" 129 }, 130 { 131 .int_msk = BIT(2), 132 .msg = "sec_ecc_2bit_err_rint" 133 }, 134 { 135 .int_msk = BIT(3), 136 .msg = "sec_ecc_1bit_err_rint" 137 }, 138 { 139 .int_msk = BIT(4), 140 .msg = "sec_req_trng_timeout_rint" 141 }, 142 { 143 .int_msk = BIT(5), 144 .msg = "sec_fsm_hbeat_rint" 145 }, 146 { 147 .int_msk = BIT(6), 148 .msg = "sec_channel_req_rng_timeout_rint" 149 }, 150 { 151 .int_msk = BIT(7), 152 .msg = "sec_bd_err_rint" 153 }, 154 { 155 .int_msk = BIT(8), 156 .msg = "sec_chain_buff_err_rint" 157 }, 158 { 159 .int_msk = BIT(14), 160 .msg = "sec_no_secure_access" 161 }, 162 { 163 .int_msk = BIT(15), 164 .msg = "sec_wrapping_key_auth_err" 165 }, 166 { 167 .int_msk = BIT(16), 168 .msg = "sec_km_key_crc_fail" 169 }, 170 { 171 .int_msk = BIT(17), 172 .msg = "sec_axi_poison_err" 173 }, 174 { 175 .int_msk = BIT(18), 176 .msg = "sec_sva_err" 177 }, 178 {} 179 }; 180 181 static const char * const sec_dbg_file_name[] = { 182 [SEC_CLEAR_ENABLE] = "clear_enable", 183 }; 184 185 static struct sec_dfx_item sec_dfx_labels[] = { 186 {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, 187 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, 188 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, 189 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, 190 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, 191 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, 192 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, 193 }; 194 195 static const struct debugfs_reg32 sec_dfx_regs[] = { 196 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 197 {"SEC_SAA_EN ", 0x301270}, 198 {"SEC_BD_LATENCY_MIN ", 0x301600}, 199 {"SEC_BD_LATENCY_MAX ", 0x301608}, 200 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 201 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 202 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 203 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 204 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 205 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 206 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 207 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 208 {"SEC_BD_SAA0 ", 0x301C20}, 209 {"SEC_BD_SAA1 ", 0x301C24}, 210 {"SEC_BD_SAA2 ", 0x301C28}, 211 {"SEC_BD_SAA3 ", 0x301C2C}, 212 {"SEC_BD_SAA4 ", 0x301C30}, 213 {"SEC_BD_SAA5 ", 0x301C34}, 214 {"SEC_BD_SAA6 ", 0x301C38}, 215 {"SEC_BD_SAA7 ", 0x301C3C}, 216 {"SEC_BD_SAA8 ", 0x301C40}, 217 }; 218 219 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 220 { 221 return q_num_set(val, kp, SEC_PF_PCI_DEVICE_ID); 222 } 223 224 static const struct kernel_param_ops sec_pf_q_num_ops = { 225 .set = sec_pf_q_num_set, 226 .get = param_get_int, 227 }; 228 229 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 230 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 231 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 232 233 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 234 { 235 u32 ctx_q_num; 236 int ret; 237 238 if (!val) 239 return -EINVAL; 240 241 ret = kstrtou32(val, 10, &ctx_q_num); 242 if (ret) 243 return -EINVAL; 244 245 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 246 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 247 return -EINVAL; 248 } 249 250 return param_set_int(val, kp); 251 } 252 253 static const struct kernel_param_ops sec_ctx_q_num_ops = { 254 .set = sec_ctx_q_num_set, 255 .get = param_get_int, 256 }; 257 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 258 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 259 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); 260 261 static const struct kernel_param_ops vfs_num_ops = { 262 .set = vfs_num_set, 263 .get = param_get_int, 264 }; 265 266 static u32 vfs_num; 267 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 268 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 269 270 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) 271 { 272 hisi_qm_free_qps(qps, qp_num); 273 kfree(qps); 274 } 275 276 struct hisi_qp **sec_create_qps(void) 277 { 278 int node = cpu_to_node(smp_processor_id()); 279 u32 ctx_num = ctx_q_num; 280 struct hisi_qp **qps; 281 int ret; 282 283 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); 284 if (!qps) 285 return NULL; 286 287 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); 288 if (!ret) 289 return qps; 290 291 kfree(qps); 292 return NULL; 293 } 294 295 static const struct kernel_param_ops sec_uacce_mode_ops = { 296 .set = uacce_mode_set, 297 .get = param_get_int, 298 }; 299 300 /* 301 * uacce_mode = 0 means sec only register to crypto, 302 * uacce_mode = 1 means sec both register to crypto and uacce. 303 */ 304 static u32 uacce_mode = UACCE_MODE_NOUACCE; 305 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); 306 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 307 308 static const struct pci_device_id sec_dev_ids[] = { 309 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_PF_PCI_DEVICE_ID) }, 310 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, SEC_VF_PCI_DEVICE_ID) }, 311 { 0, } 312 }; 313 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 314 315 static u8 sec_get_endian(struct hisi_qm *qm) 316 { 317 u32 reg; 318 319 /* 320 * As for VF, it is a wrong way to get endian setting by 321 * reading a register of the engine 322 */ 323 if (qm->pdev->is_virtfn) { 324 dev_err_ratelimited(&qm->pdev->dev, 325 "cannot access a register in VF!\n"); 326 return SEC_LE; 327 } 328 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 329 /* BD little endian mode */ 330 if (!(reg & BIT(0))) 331 return SEC_LE; 332 333 /* BD 32-bits big endian mode */ 334 else if (!(reg & BIT(1))) 335 return SEC_32BE; 336 337 /* BD 64-bits big endian mode */ 338 else 339 return SEC_64BE; 340 } 341 342 static void sec_open_sva_prefetch(struct hisi_qm *qm) 343 { 344 u32 val; 345 int ret; 346 347 if (qm->ver < QM_HW_V3) 348 return; 349 350 /* Enable prefetch */ 351 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 352 val &= SEC_PREFETCH_ENABLE; 353 writel(val, qm->io_base + SEC_PREFETCH_CFG); 354 355 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, 356 val, !(val & SEC_PREFETCH_DISABLE), 357 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 358 if (ret) 359 pci_err(qm->pdev, "failed to open sva prefetch\n"); 360 } 361 362 static void sec_close_sva_prefetch(struct hisi_qm *qm) 363 { 364 u32 val; 365 int ret; 366 367 if (qm->ver < QM_HW_V3) 368 return; 369 370 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 371 val |= SEC_PREFETCH_DISABLE; 372 writel(val, qm->io_base + SEC_PREFETCH_CFG); 373 374 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, 375 val, !(val & SEC_SVA_DISABLE_READY), 376 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 377 if (ret) 378 pci_err(qm->pdev, "failed to close sva prefetch\n"); 379 } 380 381 static int sec_engine_init(struct hisi_qm *qm) 382 { 383 int ret; 384 u32 reg; 385 386 /* disable clock gate control */ 387 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 388 reg &= SEC_CLK_GATE_DISABLE; 389 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 390 391 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); 392 393 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, 394 reg, reg & 0x1, SEC_DELAY_10_US, 395 SEC_POLL_TIMEOUT_US); 396 if (ret) { 397 pci_err(qm->pdev, "fail to init sec mem\n"); 398 return ret; 399 } 400 401 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 402 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 403 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 404 405 reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); 406 reg |= SEC_USER0_SMMU_NORMAL; 407 writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); 408 409 reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); 410 reg &= SEC_USER1_SMMU_MASK; 411 if (qm->use_sva && qm->ver == QM_HW_V2) 412 reg |= SEC_USER1_SMMU_SVA; 413 else 414 reg |= SEC_USER1_SMMU_NORMAL; 415 writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); 416 417 writel(SEC_SINGLE_PORT_MAX_TRANS, 418 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 419 420 writel(SEC_SAA_ENABLE, qm->io_base + SEC_SAA_EN_REG); 421 422 /* Enable sm4 extra mode, as ctr/ecb */ 423 writel_relaxed(SEC_BD_ERR_CHK_EN0, 424 qm->io_base + SEC_BD_ERR_CHK_EN_REG0); 425 /* Enable sm4 xts mode multiple iv */ 426 writel_relaxed(SEC_BD_ERR_CHK_EN1, 427 qm->io_base + SEC_BD_ERR_CHK_EN_REG1); 428 writel_relaxed(SEC_BD_ERR_CHK_EN3, 429 qm->io_base + SEC_BD_ERR_CHK_EN_REG3); 430 431 /* config endian */ 432 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 433 reg |= sec_get_endian(qm); 434 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 435 436 return 0; 437 } 438 439 static int sec_set_user_domain_and_cache(struct hisi_qm *qm) 440 { 441 /* qm user domain */ 442 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 443 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 444 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 445 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 446 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 447 448 /* qm cache */ 449 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 450 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 451 452 /* disable FLR triggered by BME(bus master enable) */ 453 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 454 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 455 456 /* enable sqc,cqc writeback */ 457 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 458 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 459 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 460 461 return sec_engine_init(qm); 462 } 463 464 /* sec_debug_regs_clear() - clear the sec debug regs */ 465 static void sec_debug_regs_clear(struct hisi_qm *qm) 466 { 467 int i; 468 469 /* clear sec dfx regs */ 470 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); 471 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 472 readl(qm->io_base + sec_dfx_regs[i].offset); 473 474 /* clear rdclr_en */ 475 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 476 477 hisi_qm_debug_regs_clear(qm); 478 } 479 480 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) 481 { 482 u32 val1, val2; 483 484 val1 = readl(qm->io_base + SEC_CONTROL_REG); 485 if (enable) { 486 val1 |= SEC_AXI_SHUTDOWN_ENABLE; 487 val2 = SEC_RAS_NFE_ENB_MSK; 488 } else { 489 val1 &= SEC_AXI_SHUTDOWN_DISABLE; 490 val2 = 0x0; 491 } 492 493 if (qm->ver > QM_HW_V2) 494 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL); 495 496 writel(val1, qm->io_base + SEC_CONTROL_REG); 497 } 498 499 static void sec_hw_error_enable(struct hisi_qm *qm) 500 { 501 if (qm->ver == QM_HW_V1) { 502 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 503 pci_info(qm->pdev, "V1 not support hw error handle\n"); 504 return; 505 } 506 507 /* clear SEC hw error source if having */ 508 writel(SEC_CORE_INT_CLEAR, qm->io_base + SEC_CORE_INT_SOURCE); 509 510 /* enable RAS int */ 511 writel(SEC_RAS_CE_ENB_MSK, qm->io_base + SEC_RAS_CE_REG); 512 writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); 513 writel(SEC_RAS_NFE_ENB_MSK, qm->io_base + SEC_RAS_NFE_REG); 514 515 /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ 516 sec_master_ooo_ctrl(qm, true); 517 518 /* enable SEC hw error interrupts */ 519 writel(SEC_CORE_INT_ENABLE, qm->io_base + SEC_CORE_INT_MASK); 520 } 521 522 static void sec_hw_error_disable(struct hisi_qm *qm) 523 { 524 /* disable SEC hw error interrupts */ 525 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 526 527 /* disable SEC block master OOO when nfe occurs on Kunpeng930 */ 528 sec_master_ooo_ctrl(qm, false); 529 530 /* disable RAS int */ 531 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 532 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 533 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 534 } 535 536 static u32 sec_clear_enable_read(struct sec_debug_file *file) 537 { 538 struct hisi_qm *qm = file->qm; 539 540 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 541 SEC_CTRL_CNT_CLR_CE_BIT; 542 } 543 544 static int sec_clear_enable_write(struct sec_debug_file *file, u32 val) 545 { 546 struct hisi_qm *qm = file->qm; 547 u32 tmp; 548 549 if (val != 1 && val) 550 return -EINVAL; 551 552 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 553 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 554 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 555 556 return 0; 557 } 558 559 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 560 size_t count, loff_t *pos) 561 { 562 struct sec_debug_file *file = filp->private_data; 563 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 564 u32 val; 565 int ret; 566 567 spin_lock_irq(&file->lock); 568 569 switch (file->index) { 570 case SEC_CLEAR_ENABLE: 571 val = sec_clear_enable_read(file); 572 break; 573 default: 574 spin_unlock_irq(&file->lock); 575 return -EINVAL; 576 } 577 578 spin_unlock_irq(&file->lock); 579 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 580 581 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 582 } 583 584 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 585 size_t count, loff_t *pos) 586 { 587 struct sec_debug_file *file = filp->private_data; 588 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 589 unsigned long val; 590 int len, ret; 591 592 if (*pos != 0) 593 return 0; 594 595 if (count >= SEC_DBGFS_VAL_MAX_LEN) 596 return -ENOSPC; 597 598 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 599 pos, buf, count); 600 if (len < 0) 601 return len; 602 603 tbuf[len] = '\0'; 604 if (kstrtoul(tbuf, 0, &val)) 605 return -EFAULT; 606 607 spin_lock_irq(&file->lock); 608 609 switch (file->index) { 610 case SEC_CLEAR_ENABLE: 611 ret = sec_clear_enable_write(file, val); 612 if (ret) 613 goto err_input; 614 break; 615 default: 616 ret = -EINVAL; 617 goto err_input; 618 } 619 620 spin_unlock_irq(&file->lock); 621 622 return count; 623 624 err_input: 625 spin_unlock_irq(&file->lock); 626 return ret; 627 } 628 629 static const struct file_operations sec_dbg_fops = { 630 .owner = THIS_MODULE, 631 .open = simple_open, 632 .read = sec_debug_read, 633 .write = sec_debug_write, 634 }; 635 636 static int sec_debugfs_atomic64_get(void *data, u64 *val) 637 { 638 *val = atomic64_read((atomic64_t *)data); 639 640 return 0; 641 } 642 643 static int sec_debugfs_atomic64_set(void *data, u64 val) 644 { 645 if (val) 646 return -EINVAL; 647 648 atomic64_set((atomic64_t *)data, 0); 649 650 return 0; 651 } 652 653 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 654 sec_debugfs_atomic64_set, "%lld\n"); 655 656 static int sec_core_debug_init(struct hisi_qm *qm) 657 { 658 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 659 struct device *dev = &qm->pdev->dev; 660 struct sec_dfx *dfx = &sec->debug.dfx; 661 struct debugfs_regset32 *regset; 662 struct dentry *tmp_d; 663 int i; 664 665 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); 666 667 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 668 if (!regset) 669 return -ENOMEM; 670 671 regset->regs = sec_dfx_regs; 672 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 673 regset->base = qm->io_base; 674 675 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) 676 debugfs_create_regset32("regs", 0444, tmp_d, regset); 677 678 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { 679 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + 680 sec_dfx_labels[i].offset); 681 debugfs_create_file(sec_dfx_labels[i].name, 0644, 682 tmp_d, data, &sec_atomic64_ops); 683 } 684 685 return 0; 686 } 687 688 static int sec_debug_init(struct hisi_qm *qm) 689 { 690 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 691 int i; 692 693 if (qm->pdev->device == SEC_PF_PCI_DEVICE_ID) { 694 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { 695 spin_lock_init(&sec->debug.files[i].lock); 696 sec->debug.files[i].index = i; 697 sec->debug.files[i].qm = qm; 698 699 debugfs_create_file(sec_dbg_file_name[i], 0600, 700 qm->debug.debug_root, 701 sec->debug.files + i, 702 &sec_dbg_fops); 703 } 704 } 705 706 return sec_core_debug_init(qm); 707 } 708 709 static int sec_debugfs_init(struct hisi_qm *qm) 710 { 711 struct device *dev = &qm->pdev->dev; 712 int ret; 713 714 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 715 sec_debugfs_root); 716 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; 717 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; 718 hisi_qm_debug_init(qm); 719 720 ret = sec_debug_init(qm); 721 if (ret) 722 goto failed_to_create; 723 724 return 0; 725 726 failed_to_create: 727 debugfs_remove_recursive(sec_debugfs_root); 728 return ret; 729 } 730 731 static void sec_debugfs_exit(struct hisi_qm *qm) 732 { 733 debugfs_remove_recursive(qm->debug.debug_root); 734 } 735 736 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 737 { 738 const struct sec_hw_error *errs = sec_hw_errors; 739 struct device *dev = &qm->pdev->dev; 740 u32 err_val; 741 742 while (errs->msg) { 743 if (errs->int_msk & err_sts) { 744 dev_err(dev, "%s [error status=0x%x] found\n", 745 errs->msg, errs->int_msk); 746 747 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 748 err_val = readl(qm->io_base + 749 SEC_CORE_SRAM_ECC_ERR_INFO); 750 dev_err(dev, "multi ecc sram num=0x%x\n", 751 ((err_val) >> SEC_ECC_NUM) & 752 SEC_ECC_MASH); 753 } 754 } 755 errs++; 756 } 757 } 758 759 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 760 { 761 return readl(qm->io_base + SEC_CORE_INT_STATUS); 762 } 763 764 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 765 { 766 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 767 } 768 769 static void sec_open_axi_master_ooo(struct hisi_qm *qm) 770 { 771 u32 val; 772 773 val = readl(qm->io_base + SEC_CONTROL_REG); 774 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); 775 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); 776 } 777 778 static void sec_err_info_init(struct hisi_qm *qm) 779 { 780 struct hisi_qm_err_info *err_info = &qm->err_info; 781 782 err_info->ce = QM_BASE_CE; 783 err_info->fe = 0; 784 err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; 785 err_info->dev_ce_mask = SEC_RAS_CE_ENB_MSK; 786 err_info->msi_wr_port = BIT(0); 787 err_info->acpi_rst = "SRST"; 788 err_info->nfe = QM_BASE_NFE | QM_ACC_DO_TASK_TIMEOUT | 789 QM_ACC_WB_NOT_READY_TIMEOUT; 790 } 791 792 static const struct hisi_qm_err_ini sec_err_ini = { 793 .hw_init = sec_set_user_domain_and_cache, 794 .hw_err_enable = sec_hw_error_enable, 795 .hw_err_disable = sec_hw_error_disable, 796 .get_dev_hw_err_status = sec_get_hw_err_status, 797 .clear_dev_hw_err_status = sec_clear_hw_err_status, 798 .log_dev_hw_err = sec_log_hw_error, 799 .open_axi_master_ooo = sec_open_axi_master_ooo, 800 .open_sva_prefetch = sec_open_sva_prefetch, 801 .close_sva_prefetch = sec_close_sva_prefetch, 802 .err_info_init = sec_err_info_init, 803 }; 804 805 static int sec_pf_probe_init(struct sec_dev *sec) 806 { 807 struct hisi_qm *qm = &sec->qm; 808 int ret; 809 810 qm->err_ini = &sec_err_ini; 811 qm->err_ini->err_info_init(qm); 812 813 ret = sec_set_user_domain_and_cache(qm); 814 if (ret) 815 return ret; 816 817 sec_open_sva_prefetch(qm); 818 hisi_qm_dev_err_init(qm); 819 sec_debug_regs_clear(qm); 820 821 return 0; 822 } 823 824 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 825 { 826 int ret; 827 828 qm->pdev = pdev; 829 qm->ver = pdev->revision; 830 qm->algs = "cipher\ndigest\naead"; 831 qm->mode = uacce_mode; 832 qm->sqe_size = SEC_SQE_SIZE; 833 qm->dev_name = sec_name; 834 835 qm->fun_type = (pdev->device == SEC_PF_PCI_DEVICE_ID) ? 836 QM_HW_PF : QM_HW_VF; 837 if (qm->fun_type == QM_HW_PF) { 838 qm->qp_base = SEC_PF_DEF_Q_BASE; 839 qm->qp_num = pf_q_num; 840 qm->debug.curr_qm_qp_num = pf_q_num; 841 qm->qm_list = &sec_devices; 842 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 843 /* 844 * have no way to get qm configure in VM in v1 hardware, 845 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 846 * to trigger only one VF in v1 hardware. 847 * v2 hardware has no such problem. 848 */ 849 qm->qp_base = SEC_PF_DEF_Q_NUM; 850 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 851 } 852 853 /* 854 * WQ_HIGHPRI: SEC request must be low delayed, 855 * so need a high priority workqueue. 856 * WQ_UNBOUND: SEC task is likely with long 857 * running CPU intensive workloads. 858 */ 859 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | 860 WQ_UNBOUND, num_online_cpus(), 861 pci_name(qm->pdev)); 862 if (!qm->wq) { 863 pci_err(qm->pdev, "fail to alloc workqueue\n"); 864 return -ENOMEM; 865 } 866 867 ret = hisi_qm_init(qm); 868 if (ret) 869 destroy_workqueue(qm->wq); 870 871 return ret; 872 } 873 874 static void sec_qm_uninit(struct hisi_qm *qm) 875 { 876 hisi_qm_uninit(qm); 877 } 878 879 static int sec_probe_init(struct sec_dev *sec) 880 { 881 u32 type_rate = SEC_SHAPER_TYPE_RATE; 882 struct hisi_qm *qm = &sec->qm; 883 int ret; 884 885 if (qm->fun_type == QM_HW_PF) { 886 ret = sec_pf_probe_init(sec); 887 if (ret) 888 return ret; 889 /* enable shaper type 0 */ 890 if (qm->ver >= QM_HW_V3) { 891 type_rate |= QM_SHAPER_ENABLE; 892 qm->type_rate = type_rate; 893 } 894 } 895 896 return 0; 897 } 898 899 static void sec_probe_uninit(struct hisi_qm *qm) 900 { 901 hisi_qm_dev_err_uninit(qm); 902 903 destroy_workqueue(qm->wq); 904 } 905 906 static void sec_iommu_used_check(struct sec_dev *sec) 907 { 908 struct iommu_domain *domain; 909 struct device *dev = &sec->qm.pdev->dev; 910 911 domain = iommu_get_domain_for_dev(dev); 912 913 /* Check if iommu is used */ 914 sec->iommu_used = false; 915 if (domain) { 916 if (domain->type & __IOMMU_DOMAIN_PAGING) 917 sec->iommu_used = true; 918 dev_info(dev, "SMMU Opened, the iommu type = %u\n", 919 domain->type); 920 } 921 } 922 923 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 924 { 925 struct sec_dev *sec; 926 struct hisi_qm *qm; 927 int ret; 928 929 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 930 if (!sec) 931 return -ENOMEM; 932 933 qm = &sec->qm; 934 ret = sec_qm_init(qm, pdev); 935 if (ret) { 936 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); 937 return ret; 938 } 939 940 sec->ctx_q_num = ctx_q_num; 941 sec_iommu_used_check(sec); 942 943 ret = sec_probe_init(sec); 944 if (ret) { 945 pci_err(pdev, "Failed to probe!\n"); 946 goto err_qm_uninit; 947 } 948 949 ret = hisi_qm_start(qm); 950 if (ret) { 951 pci_err(pdev, "Failed to start sec qm!\n"); 952 goto err_probe_uninit; 953 } 954 955 ret = sec_debugfs_init(qm); 956 if (ret) 957 pci_warn(pdev, "Failed to init debugfs!\n"); 958 959 if (qm->qp_num >= ctx_q_num) { 960 ret = hisi_qm_alg_register(qm, &sec_devices); 961 if (ret < 0) { 962 pr_err("Failed to register driver to crypto.\n"); 963 goto err_qm_stop; 964 } 965 } else { 966 pci_warn(qm->pdev, 967 "Failed to use kernel mode, qp not enough!\n"); 968 } 969 970 if (qm->uacce) { 971 ret = uacce_register(qm->uacce); 972 if (ret) { 973 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 974 goto err_alg_unregister; 975 } 976 } 977 978 if (qm->fun_type == QM_HW_PF && vfs_num) { 979 ret = hisi_qm_sriov_enable(pdev, vfs_num); 980 if (ret < 0) 981 goto err_alg_unregister; 982 } 983 984 return 0; 985 986 err_alg_unregister: 987 hisi_qm_alg_unregister(qm, &sec_devices); 988 err_qm_stop: 989 sec_debugfs_exit(qm); 990 hisi_qm_stop(qm, QM_NORMAL); 991 err_probe_uninit: 992 sec_probe_uninit(qm); 993 err_qm_uninit: 994 sec_qm_uninit(qm); 995 return ret; 996 } 997 998 static void sec_remove(struct pci_dev *pdev) 999 { 1000 struct hisi_qm *qm = pci_get_drvdata(pdev); 1001 1002 hisi_qm_wait_task_finish(qm, &sec_devices); 1003 if (qm->qp_num >= ctx_q_num) 1004 hisi_qm_alg_unregister(qm, &sec_devices); 1005 1006 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 1007 hisi_qm_sriov_disable(pdev, true); 1008 1009 sec_debugfs_exit(qm); 1010 1011 (void)hisi_qm_stop(qm, QM_NORMAL); 1012 1013 if (qm->fun_type == QM_HW_PF) 1014 sec_debug_regs_clear(qm); 1015 1016 sec_probe_uninit(qm); 1017 1018 sec_qm_uninit(qm); 1019 } 1020 1021 static const struct pci_error_handlers sec_err_handler = { 1022 .error_detected = hisi_qm_dev_err_detected, 1023 .slot_reset = hisi_qm_dev_slot_reset, 1024 .reset_prepare = hisi_qm_reset_prepare, 1025 .reset_done = hisi_qm_reset_done, 1026 }; 1027 1028 static struct pci_driver sec_pci_driver = { 1029 .name = "hisi_sec2", 1030 .id_table = sec_dev_ids, 1031 .probe = sec_probe, 1032 .remove = sec_remove, 1033 .err_handler = &sec_err_handler, 1034 .sriov_configure = hisi_qm_sriov_configure, 1035 .shutdown = hisi_qm_dev_shutdown, 1036 }; 1037 1038 static void sec_register_debugfs(void) 1039 { 1040 if (!debugfs_initialized()) 1041 return; 1042 1043 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 1044 } 1045 1046 static void sec_unregister_debugfs(void) 1047 { 1048 debugfs_remove_recursive(sec_debugfs_root); 1049 } 1050 1051 static int __init sec_init(void) 1052 { 1053 int ret; 1054 1055 hisi_qm_init_list(&sec_devices); 1056 sec_register_debugfs(); 1057 1058 ret = pci_register_driver(&sec_pci_driver); 1059 if (ret < 0) { 1060 sec_unregister_debugfs(); 1061 pr_err("Failed to register pci driver.\n"); 1062 return ret; 1063 } 1064 1065 return 0; 1066 } 1067 1068 static void __exit sec_exit(void) 1069 { 1070 pci_unregister_driver(&sec_pci_driver); 1071 sec_unregister_debugfs(); 1072 } 1073 1074 module_init(sec_init); 1075 module_exit(sec_exit); 1076 1077 MODULE_LICENSE("GPL v2"); 1078 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1079 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1080 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); 1081 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1082 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1083