1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 4 #include <linux/acpi.h> 5 #include <linux/bitops.h> 6 #include <linux/debugfs.h> 7 #include <linux/init.h> 8 #include <linux/io.h> 9 #include <linux/iommu.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/pci.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/seq_file.h> 15 #include <linux/topology.h> 16 #include <linux/uacce.h> 17 #include "sec.h" 18 19 #define CAP_FILE_PERMISSION 0444 20 #define SEC_VF_NUM 63 21 #define SEC_QUEUE_NUM_V1 4096 22 #define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255 23 24 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF 25 #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd 26 #define SEC_BD_ERR_CHK_EN3 0xffffbfff 27 28 #define SEC_SQE_SIZE 128 29 #define SEC_PF_DEF_Q_NUM 256 30 #define SEC_PF_DEF_Q_BASE 0 31 #define SEC_CTX_Q_NUM_DEF 2 32 #define SEC_CTX_Q_NUM_MAX 32 33 34 #define SEC_CTRL_CNT_CLR_CE 0x301120 35 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) 36 #define SEC_CORE_INT_SOURCE 0x301010 37 #define SEC_CORE_INT_MASK 0x301000 38 #define SEC_CORE_INT_STATUS 0x301008 39 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 40 #define SEC_ECC_NUM 16 41 #define SEC_ECC_MASH 0xFF 42 #define SEC_CORE_INT_DISABLE 0x0 43 44 #define SEC_RAS_CE_REG 0x301050 45 #define SEC_RAS_FE_REG 0x301054 46 #define SEC_RAS_NFE_REG 0x301058 47 #define SEC_RAS_FE_ENB_MSK 0x0 48 #define SEC_OOO_SHUTDOWN_SEL 0x301014 49 #define SEC_RAS_DISABLE 0x0 50 #define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1)) 51 52 #define SEC_MEM_START_INIT_REG 0x301100 53 #define SEC_MEM_INIT_DONE_REG 0x301104 54 55 /* clock gating */ 56 #define SEC_CONTROL_REG 0x301200 57 #define SEC_DYNAMIC_GATE_REG 0x30121c 58 #define SEC_CORE_AUTO_GATE 0x30212c 59 #define SEC_DYNAMIC_GATE_EN 0x7fff 60 #define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0) 61 #define SEC_CLK_GATE_ENABLE BIT(3) 62 #define SEC_CLK_GATE_DISABLE (~BIT(3)) 63 64 #define SEC_TRNG_EN_SHIFT 8 65 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) 66 #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF 67 68 #define SEC_INTERFACE_USER_CTRL0_REG 0x301220 69 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224 70 #define SEC_SAA_EN_REG 0x301270 71 #define SEC_BD_ERR_CHK_EN_REG0 0x301380 72 #define SEC_BD_ERR_CHK_EN_REG1 0x301384 73 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c 74 75 #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) 76 #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) 77 #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) 78 #define SEC_USER1_ENABLE_DATA_SSV BIT(16) 79 #define SEC_USER1_WB_CONTEXT_SSV BIT(8) 80 #define SEC_USER1_WB_DATA_SSV BIT(0) 81 #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ 82 SEC_USER1_ENABLE_DATA_SSV | \ 83 SEC_USER1_WB_CONTEXT_SSV | \ 84 SEC_USER1_WB_DATA_SSV) 85 #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) 86 #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) 87 #define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220 88 #define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224 89 #define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5)) 90 #define SEC_USER1_SMMU_MASK_V3 0xFF79E79E 91 #define SEC_CORE_INT_STATUS_M_ECC BIT(2) 92 93 #define SEC_PREFETCH_CFG 0x301130 94 #define SEC_SVA_TRANS 0x301EC4 95 #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) 96 #define SEC_PREFETCH_DISABLE BIT(1) 97 #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) 98 #define SEC_SVA_PREFETCH_INFO 0x301ED4 99 #define SEC_SVA_STALL_NUM GENMASK(23, 8) 100 #define SEC_SVA_PREFETCH_NUM GENMASK(2, 0) 101 #define SEC_WAIT_SVA_READY 500000 102 #define SEC_READ_SVA_STATUS_TIMES 3 103 #define SEC_WAIT_US_MIN 10 104 #define SEC_WAIT_US_MAX 20 105 #define SEC_WAIT_QP_US_MIN 1000 106 #define SEC_WAIT_QP_US_MAX 2000 107 #define SEC_MAX_WAIT_TIMES 2000 108 109 #define SEC_DELAY_10_US 10 110 #define SEC_POLL_TIMEOUT_US 1000 111 #define SEC_DBGFS_VAL_MAX_LEN 20 112 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 113 114 #define SEC_SQE_MASK_OFFSET 16 115 #define SEC_SQE_MASK_LEN 108 116 #define SEC_SHAPER_TYPE_RATE 400 117 118 #define SEC_DFX_BASE 0x301000 119 #define SEC_DFX_CORE 0x302100 120 #define SEC_DFX_COMMON1 0x301600 121 #define SEC_DFX_COMMON2 0x301C00 122 #define SEC_DFX_BASE_LEN 0x9D 123 #define SEC_DFX_CORE_LEN 0x32B 124 #define SEC_DFX_COMMON1_LEN 0x45 125 #define SEC_DFX_COMMON2_LEN 0xBA 126 127 #define SEC_ALG_BITMAP_SHIFT 32 128 129 #define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \ 130 GENMASK(24, 21)) 131 #define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \ 132 GENMASK_ULL(42, 25)) 133 #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \ 134 GENMASK_ULL(45, 43)) 135 136 struct sec_hw_error { 137 u32 int_msk; 138 const char *msg; 139 }; 140 141 struct sec_dfx_item { 142 const char *name; 143 u32 offset; 144 }; 145 146 static const char sec_name[] = "hisi_sec2"; 147 static struct dentry *sec_debugfs_root; 148 149 static struct hisi_qm_list sec_devices = { 150 .register_to_crypto = sec_register_to_crypto, 151 .unregister_from_crypto = sec_unregister_from_crypto, 152 }; 153 154 static const struct hisi_qm_cap_info sec_basic_info[] = { 155 {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77}, 156 {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77}, 157 {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, 158 {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, 159 {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177}, 160 {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177}, 161 {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177}, 162 {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088}, 163 {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1}, 164 {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1}, 165 {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4}, 166 {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4}, 167 {SEC_CORE_ENABLE_BITMAP, 0x3140, 0, GENMASK(31, 0), 0x17F, 0x17F, 0xF}, 168 {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF}, 169 {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C}, 170 {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 171 {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 172 {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 173 {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 174 {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 175 {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 176 {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 177 {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 178 {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 179 {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, 180 }; 181 182 static const struct hisi_qm_cap_query_info sec_cap_query_info[] = { 183 {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C77, 0x7C77}, 184 {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77}, 185 {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8}, 186 {SEC_RAS_NFE_TYPE, "SEC_RAS_NFE_TYPE ", 0x3130, 0x0, 0x177, 0x60177}, 187 {SEC_RAS_NFE_RESET, "SEC_RAS_NFE_RESET ", 0x3134, 0x0, 0x177, 0x177}, 188 {SEC_RAS_CE_TYPE, "SEC_RAS_CE_TYPE ", 0x3138, 0x0, 0x88, 0xC088}, 189 {SEC_CORE_INFO, "SEC_CORE_INFO ", 0x313c, 0x110404, 0x110404, 0x110404}, 190 {SEC_CORE_EN, "SEC_CORE_EN ", 0x3140, 0x17F, 0x17F, 0xF}, 191 {SEC_DRV_ALG_BITMAP_LOW_TB, "SEC_DRV_ALG_BITMAP_LOW ", 192 0x3144, 0x18050CB, 0x18050CB, 0x18670CF}, 193 {SEC_DRV_ALG_BITMAP_HIGH_TB, "SEC_DRV_ALG_BITMAP_HIGH ", 194 0x3148, 0x395C, 0x395C, 0x395C}, 195 {SEC_ALG_BITMAP_LOW, "SEC_ALG_BITMAP_LOW ", 196 0x314c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 197 {SEC_ALG_BITMAP_HIGH, "SEC_ALG_BITMAP_HIGH ", 0x3150, 0x3FFF, 0x3FFF, 0x3FFF}, 198 {SEC_CORE1_BITMAP_LOW, "SEC_CORE1_BITMAP_LOW ", 199 0x3154, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 200 {SEC_CORE1_BITMAP_HIGH, "SEC_CORE1_BITMAP_HIGH ", 0x3158, 0x3FFF, 0x3FFF, 0x3FFF}, 201 {SEC_CORE2_BITMAP_LOW, "SEC_CORE2_BITMAP_LOW ", 202 0x315c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 203 {SEC_CORE2_BITMAP_HIGH, "SEC_CORE2_BITMAP_HIGH ", 0x3160, 0x3FFF, 0x3FFF, 0x3FFF}, 204 {SEC_CORE3_BITMAP_LOW, "SEC_CORE3_BITMAP_LOW ", 205 0x3164, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 206 {SEC_CORE3_BITMAP_HIGH, "SEC_CORE3_BITMAP_HIGH ", 0x3168, 0x3FFF, 0x3FFF, 0x3FFF}, 207 {SEC_CORE4_BITMAP_LOW, "SEC_CORE4_BITMAP_LOW ", 208 0x316c, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, 209 {SEC_CORE4_BITMAP_HIGH, "SEC_CORE4_BITMAP_HIGH ", 0x3170, 0x3FFF, 0x3FFF, 0x3FFF}, 210 }; 211 212 static const struct qm_dev_alg sec_dev_algs[] = { { 213 .alg_msk = SEC_CIPHER_BITMAP, 214 .alg = "cipher\n", 215 }, { 216 .alg_msk = SEC_DIGEST_BITMAP, 217 .alg = "digest\n", 218 }, { 219 .alg_msk = SEC_AEAD_BITMAP, 220 .alg = "aead\n", 221 }, 222 }; 223 224 static const struct sec_hw_error sec_hw_errors[] = { 225 { 226 .int_msk = BIT(0), 227 .msg = "sec_axi_rresp_err_rint" 228 }, 229 { 230 .int_msk = BIT(1), 231 .msg = "sec_axi_bresp_err_rint" 232 }, 233 { 234 .int_msk = BIT(2), 235 .msg = "sec_ecc_2bit_err_rint" 236 }, 237 { 238 .int_msk = BIT(3), 239 .msg = "sec_ecc_1bit_err_rint" 240 }, 241 { 242 .int_msk = BIT(4), 243 .msg = "sec_req_trng_timeout_rint" 244 }, 245 { 246 .int_msk = BIT(5), 247 .msg = "sec_fsm_hbeat_rint" 248 }, 249 { 250 .int_msk = BIT(6), 251 .msg = "sec_channel_req_rng_timeout_rint" 252 }, 253 { 254 .int_msk = BIT(7), 255 .msg = "sec_bd_err_rint" 256 }, 257 { 258 .int_msk = BIT(8), 259 .msg = "sec_chain_buff_err_rint" 260 }, 261 { 262 .int_msk = BIT(14), 263 .msg = "sec_no_secure_access" 264 }, 265 { 266 .int_msk = BIT(15), 267 .msg = "sec_wrapping_key_auth_err" 268 }, 269 { 270 .int_msk = BIT(16), 271 .msg = "sec_km_key_crc_fail" 272 }, 273 { 274 .int_msk = BIT(17), 275 .msg = "sec_axi_poison_err" 276 }, 277 { 278 .int_msk = BIT(18), 279 .msg = "sec_sva_err" 280 }, 281 {} 282 }; 283 284 static const char * const sec_dbg_file_name[] = { 285 [SEC_CLEAR_ENABLE] = "clear_enable", 286 }; 287 288 static struct sec_dfx_item sec_dfx_labels[] = { 289 {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, 290 {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, 291 {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, 292 {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, 293 {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, 294 {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, 295 {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, 296 }; 297 298 static const struct debugfs_reg32 sec_dfx_regs[] = { 299 {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, 300 {"SEC_SAA_EN ", 0x301270}, 301 {"SEC_BD_LATENCY_MIN ", 0x301600}, 302 {"SEC_BD_LATENCY_MAX ", 0x301608}, 303 {"SEC_BD_LATENCY_AVG ", 0x30160C}, 304 {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, 305 {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, 306 {"SEC_BD_NUM_IN_SEC ", 0x301680}, 307 {"SEC_ECC_1BIT_CNT ", 0x301C00}, 308 {"SEC_ECC_1BIT_INFO ", 0x301C04}, 309 {"SEC_ECC_2BIT_CNT ", 0x301C10}, 310 {"SEC_ECC_2BIT_INFO ", 0x301C14}, 311 {"SEC_BD_SAA0 ", 0x301C20}, 312 {"SEC_BD_SAA1 ", 0x301C24}, 313 {"SEC_BD_SAA2 ", 0x301C28}, 314 {"SEC_BD_SAA3 ", 0x301C2C}, 315 {"SEC_BD_SAA4 ", 0x301C30}, 316 {"SEC_BD_SAA5 ", 0x301C34}, 317 {"SEC_BD_SAA6 ", 0x301C38}, 318 {"SEC_BD_SAA7 ", 0x301C3C}, 319 {"SEC_BD_SAA8 ", 0x301C40}, 320 {"SEC_RAS_CE_ENABLE ", 0x301050}, 321 {"SEC_RAS_FE_ENABLE ", 0x301054}, 322 {"SEC_RAS_NFE_ENABLE ", 0x301058}, 323 {"SEC_REQ_TRNG_TIME_TH ", 0x30112C}, 324 {"SEC_CHANNEL_RNG_REQ_THLD ", 0x302110}, 325 }; 326 327 /* define the SEC's dfx regs region and region length */ 328 static struct dfx_diff_registers sec_diff_regs[] = { 329 { 330 .reg_offset = SEC_DFX_BASE, 331 .reg_len = SEC_DFX_BASE_LEN, 332 }, { 333 .reg_offset = SEC_DFX_COMMON1, 334 .reg_len = SEC_DFX_COMMON1_LEN, 335 }, { 336 .reg_offset = SEC_DFX_COMMON2, 337 .reg_len = SEC_DFX_COMMON2_LEN, 338 }, { 339 .reg_offset = SEC_DFX_CORE, 340 .reg_len = SEC_DFX_CORE_LEN, 341 }, 342 }; 343 344 static int sec_diff_regs_show(struct seq_file *s, void *unused) 345 { 346 struct hisi_qm *qm = s->private; 347 348 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, 349 ARRAY_SIZE(sec_diff_regs)); 350 351 return 0; 352 } 353 DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); 354 355 static bool pf_q_num_flag; 356 static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) 357 { 358 pf_q_num_flag = true; 359 360 return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); 361 } 362 363 static const struct kernel_param_ops sec_pf_q_num_ops = { 364 .set = sec_pf_q_num_set, 365 .get = param_get_int, 366 }; 367 368 static u32 pf_q_num = SEC_PF_DEF_Q_NUM; 369 module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); 370 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); 371 372 static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) 373 { 374 u32 ctx_q_num; 375 int ret; 376 377 if (!val) 378 return -EINVAL; 379 380 ret = kstrtou32(val, 10, &ctx_q_num); 381 if (ret) 382 return -EINVAL; 383 384 if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { 385 pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); 386 return -EINVAL; 387 } 388 389 return param_set_int(val, kp); 390 } 391 392 static const struct kernel_param_ops sec_ctx_q_num_ops = { 393 .set = sec_ctx_q_num_set, 394 .get = param_get_int, 395 }; 396 static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; 397 module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); 398 MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); 399 400 static const struct kernel_param_ops vfs_num_ops = { 401 .set = vfs_num_set, 402 .get = param_get_int, 403 }; 404 405 static u32 vfs_num; 406 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); 407 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); 408 409 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) 410 { 411 hisi_qm_free_qps(qps, qp_num); 412 kfree(qps); 413 } 414 415 struct hisi_qp **sec_create_qps(void) 416 { 417 int node = cpu_to_node(raw_smp_processor_id()); 418 u32 ctx_num = ctx_q_num; 419 struct hisi_qp **qps; 420 int ret; 421 422 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); 423 if (!qps) 424 return NULL; 425 426 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); 427 if (!ret) 428 return qps; 429 430 kfree(qps); 431 return NULL; 432 } 433 434 u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low) 435 { 436 u32 cap_val_h, cap_val_l; 437 438 cap_val_h = qm->cap_tables.dev_cap_table[high].cap_val; 439 cap_val_l = qm->cap_tables.dev_cap_table[low].cap_val; 440 441 return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l; 442 } 443 444 static const struct kernel_param_ops sec_uacce_mode_ops = { 445 .set = uacce_mode_set, 446 .get = param_get_int, 447 }; 448 449 /* 450 * uacce_mode = 0 means sec only register to crypto, 451 * uacce_mode = 1 means sec both register to crypto and uacce. 452 */ 453 static u32 uacce_mode = UACCE_MODE_NOUACCE; 454 module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); 455 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); 456 457 static const struct pci_device_id sec_dev_ids[] = { 458 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) }, 459 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) }, 460 { 0, } 461 }; 462 MODULE_DEVICE_TABLE(pci, sec_dev_ids); 463 464 static void sec_set_endian(struct hisi_qm *qm) 465 { 466 u32 reg; 467 468 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 469 reg &= ~(BIT(1) | BIT(0)); 470 if (!IS_ENABLED(CONFIG_64BIT)) 471 reg |= BIT(1); 472 473 if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) 474 reg |= BIT(0); 475 476 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 477 } 478 479 static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask) 480 { 481 u32 val, try_times = 0; 482 u8 count = 0; 483 484 /* 485 * Read the register value every 10-20us. If the value is 0 for three 486 * consecutive times, the SVA module is ready. 487 */ 488 do { 489 val = readl(qm->io_base + offset); 490 if (val & mask) 491 count = 0; 492 else if (++count == SEC_READ_SVA_STATUS_TIMES) 493 break; 494 495 usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX); 496 } while (++try_times < SEC_WAIT_SVA_READY); 497 498 if (try_times == SEC_WAIT_SVA_READY) { 499 pci_err(qm->pdev, "failed to wait sva prefetch ready\n"); 500 return -ETIMEDOUT; 501 } 502 503 return 0; 504 } 505 506 static void sec_close_sva_prefetch(struct hisi_qm *qm) 507 { 508 u32 val; 509 int ret; 510 511 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 512 return; 513 514 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 515 val |= SEC_PREFETCH_DISABLE; 516 writel(val, qm->io_base + SEC_PREFETCH_CFG); 517 518 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, 519 val, !(val & SEC_SVA_DISABLE_READY), 520 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 521 if (ret) 522 pci_err(qm->pdev, "failed to close sva prefetch\n"); 523 524 (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM); 525 } 526 527 static void sec_open_sva_prefetch(struct hisi_qm *qm) 528 { 529 u32 val; 530 int ret; 531 532 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) 533 return; 534 535 /* Enable prefetch */ 536 val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); 537 val &= SEC_PREFETCH_ENABLE; 538 writel(val, qm->io_base + SEC_PREFETCH_CFG); 539 540 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, 541 val, !(val & SEC_PREFETCH_DISABLE), 542 SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); 543 if (ret) { 544 pci_err(qm->pdev, "failed to open sva prefetch\n"); 545 sec_close_sva_prefetch(qm); 546 return; 547 } 548 549 ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM); 550 if (ret) 551 sec_close_sva_prefetch(qm); 552 } 553 554 static void sec_engine_sva_config(struct hisi_qm *qm) 555 { 556 u32 reg; 557 558 if (qm->ver > QM_HW_V2) { 559 reg = readl_relaxed(qm->io_base + 560 SEC_INTERFACE_USER_CTRL0_REG_V3); 561 reg |= SEC_USER0_SMMU_NORMAL; 562 writel_relaxed(reg, qm->io_base + 563 SEC_INTERFACE_USER_CTRL0_REG_V3); 564 565 reg = readl_relaxed(qm->io_base + 566 SEC_INTERFACE_USER_CTRL1_REG_V3); 567 reg &= SEC_USER1_SMMU_MASK_V3; 568 reg |= SEC_USER1_SMMU_NORMAL_V3; 569 writel_relaxed(reg, qm->io_base + 570 SEC_INTERFACE_USER_CTRL1_REG_V3); 571 } else { 572 reg = readl_relaxed(qm->io_base + 573 SEC_INTERFACE_USER_CTRL0_REG); 574 reg |= SEC_USER0_SMMU_NORMAL; 575 writel_relaxed(reg, qm->io_base + 576 SEC_INTERFACE_USER_CTRL0_REG); 577 reg = readl_relaxed(qm->io_base + 578 SEC_INTERFACE_USER_CTRL1_REG); 579 reg &= SEC_USER1_SMMU_MASK; 580 if (qm->use_sva) 581 reg |= SEC_USER1_SMMU_SVA; 582 else 583 reg |= SEC_USER1_SMMU_NORMAL; 584 writel_relaxed(reg, qm->io_base + 585 SEC_INTERFACE_USER_CTRL1_REG); 586 } 587 sec_open_sva_prefetch(qm); 588 } 589 590 static void sec_enable_clock_gate(struct hisi_qm *qm) 591 { 592 u32 val; 593 594 if (qm->ver < QM_HW_V3) 595 return; 596 597 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 598 val |= SEC_CLK_GATE_ENABLE; 599 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG); 600 601 val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG); 602 val |= SEC_DYNAMIC_GATE_EN; 603 writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG); 604 605 val = readl(qm->io_base + SEC_CORE_AUTO_GATE); 606 val |= SEC_CORE_AUTO_GATE_EN; 607 writel(val, qm->io_base + SEC_CORE_AUTO_GATE); 608 } 609 610 static void sec_disable_clock_gate(struct hisi_qm *qm) 611 { 612 u32 val; 613 614 /* Kunpeng920 needs to close clock gating */ 615 val = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 616 val &= SEC_CLK_GATE_DISABLE; 617 writel_relaxed(val, qm->io_base + SEC_CONTROL_REG); 618 } 619 620 static int sec_engine_init(struct hisi_qm *qm) 621 { 622 int ret; 623 u32 reg; 624 625 /* disable clock gate control before mem init */ 626 sec_disable_clock_gate(qm); 627 628 writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); 629 630 ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, 631 reg, reg & 0x1, SEC_DELAY_10_US, 632 SEC_POLL_TIMEOUT_US); 633 if (ret) { 634 pci_err(qm->pdev, "fail to init sec mem\n"); 635 return ret; 636 } 637 638 reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); 639 reg |= (0x1 << SEC_TRNG_EN_SHIFT); 640 writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); 641 642 sec_engine_sva_config(qm); 643 644 writel(SEC_SINGLE_PORT_MAX_TRANS, 645 qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); 646 647 reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver); 648 writel(reg, qm->io_base + SEC_SAA_EN_REG); 649 650 if (qm->ver < QM_HW_V3) { 651 /* HW V2 enable sm4 extra mode, as ctr/ecb */ 652 writel_relaxed(SEC_BD_ERR_CHK_EN0, 653 qm->io_base + SEC_BD_ERR_CHK_EN_REG0); 654 655 /* HW V2 enable sm4 xts mode multiple iv */ 656 writel_relaxed(SEC_BD_ERR_CHK_EN1, 657 qm->io_base + SEC_BD_ERR_CHK_EN_REG1); 658 writel_relaxed(SEC_BD_ERR_CHK_EN3, 659 qm->io_base + SEC_BD_ERR_CHK_EN_REG3); 660 } 661 662 /* config endian */ 663 sec_set_endian(qm); 664 665 sec_enable_clock_gate(qm); 666 667 return 0; 668 } 669 670 static int sec_set_user_domain_and_cache(struct hisi_qm *qm) 671 { 672 /* qm user domain */ 673 writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); 674 writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); 675 writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); 676 writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); 677 writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); 678 679 /* qm cache */ 680 writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); 681 writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); 682 683 /* disable FLR triggered by BME(bus master enable) */ 684 writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); 685 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); 686 687 /* enable sqc,cqc writeback */ 688 writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | 689 CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | 690 FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); 691 692 return sec_engine_init(qm); 693 } 694 695 /* sec_debug_regs_clear() - clear the sec debug regs */ 696 static void sec_debug_regs_clear(struct hisi_qm *qm) 697 { 698 int i; 699 700 /* clear sec dfx regs */ 701 writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); 702 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 703 readl(qm->io_base + sec_dfx_regs[i].offset); 704 705 /* clear rdclr_en */ 706 writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); 707 708 hisi_qm_debug_regs_clear(qm); 709 } 710 711 static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) 712 { 713 u32 val1, val2; 714 715 val1 = readl(qm->io_base + SEC_CONTROL_REG); 716 if (enable) { 717 val1 |= SEC_AXI_SHUTDOWN_ENABLE; 718 val2 = qm->err_info.dev_err.shutdown_mask; 719 } else { 720 val1 &= SEC_AXI_SHUTDOWN_DISABLE; 721 val2 = 0x0; 722 } 723 724 if (qm->ver > QM_HW_V2) 725 writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL); 726 727 writel(val1, qm->io_base + SEC_CONTROL_REG); 728 } 729 730 static void sec_hw_error_enable(struct hisi_qm *qm) 731 { 732 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; 733 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; 734 735 if (qm->ver == QM_HW_V1) { 736 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 737 pci_info(qm->pdev, "V1 not support hw error handle\n"); 738 return; 739 } 740 741 /* clear SEC hw error source if having */ 742 writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE); 743 744 /* enable RAS int */ 745 writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG); 746 writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG); 747 writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG); 748 749 /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ 750 sec_master_ooo_ctrl(qm, true); 751 752 /* enable SEC hw error interrupts */ 753 writel(err_mask, qm->io_base + SEC_CORE_INT_MASK); 754 } 755 756 static void sec_hw_error_disable(struct hisi_qm *qm) 757 { 758 /* disable SEC hw error interrupts */ 759 writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); 760 761 /* disable SEC block master OOO when nfe occurs on Kunpeng930 */ 762 sec_master_ooo_ctrl(qm, false); 763 764 /* disable RAS int */ 765 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); 766 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); 767 writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); 768 } 769 770 static u32 sec_clear_enable_read(struct hisi_qm *qm) 771 { 772 return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 773 SEC_CTRL_CNT_CLR_CE_BIT; 774 } 775 776 static int sec_clear_enable_write(struct hisi_qm *qm, u32 val) 777 { 778 u32 tmp; 779 780 if (val != 1 && val) 781 return -EINVAL; 782 783 tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & 784 ~SEC_CTRL_CNT_CLR_CE_BIT) | val; 785 writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); 786 787 return 0; 788 } 789 790 static ssize_t sec_debug_read(struct file *filp, char __user *buf, 791 size_t count, loff_t *pos) 792 { 793 struct sec_debug_file *file = filp->private_data; 794 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 795 struct hisi_qm *qm = file->qm; 796 u32 val; 797 int ret; 798 799 ret = hisi_qm_get_dfx_access(qm); 800 if (ret) 801 return ret; 802 803 spin_lock_irq(&file->lock); 804 805 switch (file->index) { 806 case SEC_CLEAR_ENABLE: 807 val = sec_clear_enable_read(qm); 808 break; 809 default: 810 goto err_input; 811 } 812 813 spin_unlock_irq(&file->lock); 814 815 hisi_qm_put_dfx_access(qm); 816 ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); 817 return simple_read_from_buffer(buf, count, pos, tbuf, ret); 818 819 err_input: 820 spin_unlock_irq(&file->lock); 821 hisi_qm_put_dfx_access(qm); 822 return -EINVAL; 823 } 824 825 static ssize_t sec_debug_write(struct file *filp, const char __user *buf, 826 size_t count, loff_t *pos) 827 { 828 struct sec_debug_file *file = filp->private_data; 829 char tbuf[SEC_DBGFS_VAL_MAX_LEN]; 830 struct hisi_qm *qm = file->qm; 831 unsigned long val; 832 int len, ret; 833 834 if (*pos != 0) 835 return 0; 836 837 if (count >= SEC_DBGFS_VAL_MAX_LEN) 838 return -ENOSPC; 839 840 len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, 841 pos, buf, count); 842 if (len < 0) 843 return len; 844 845 tbuf[len] = '\0'; 846 if (kstrtoul(tbuf, 0, &val)) 847 return -EFAULT; 848 849 ret = hisi_qm_get_dfx_access(qm); 850 if (ret) 851 return ret; 852 853 spin_lock_irq(&file->lock); 854 855 switch (file->index) { 856 case SEC_CLEAR_ENABLE: 857 ret = sec_clear_enable_write(qm, val); 858 if (ret) 859 goto err_input; 860 break; 861 default: 862 ret = -EINVAL; 863 goto err_input; 864 } 865 866 ret = count; 867 868 err_input: 869 spin_unlock_irq(&file->lock); 870 hisi_qm_put_dfx_access(qm); 871 return ret; 872 } 873 874 static const struct file_operations sec_dbg_fops = { 875 .owner = THIS_MODULE, 876 .open = simple_open, 877 .read = sec_debug_read, 878 .write = sec_debug_write, 879 }; 880 881 static int sec_debugfs_atomic64_get(void *data, u64 *val) 882 { 883 *val = atomic64_read((atomic64_t *)data); 884 885 return 0; 886 } 887 888 static int sec_debugfs_atomic64_set(void *data, u64 val) 889 { 890 if (val) 891 return -EINVAL; 892 893 atomic64_set((atomic64_t *)data, 0); 894 895 return 0; 896 } 897 898 DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, 899 sec_debugfs_atomic64_set, "%lld\n"); 900 901 static int sec_regs_show(struct seq_file *s, void *unused) 902 { 903 hisi_qm_regs_dump(s, s->private); 904 905 return 0; 906 } 907 908 DEFINE_SHOW_ATTRIBUTE(sec_regs); 909 910 static int sec_cap_regs_show(struct seq_file *s, void *unused) 911 { 912 struct hisi_qm *qm = s->private; 913 u32 i, size; 914 915 size = qm->cap_tables.qm_cap_size; 916 for (i = 0; i < size; i++) 917 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name, 918 qm->cap_tables.qm_cap_table[i].cap_val); 919 920 size = qm->cap_tables.dev_cap_size; 921 for (i = 0; i < size; i++) 922 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name, 923 qm->cap_tables.dev_cap_table[i].cap_val); 924 925 return 0; 926 } 927 928 DEFINE_SHOW_ATTRIBUTE(sec_cap_regs); 929 930 static int sec_core_debug_init(struct hisi_qm *qm) 931 { 932 struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs; 933 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 934 struct device *dev = &qm->pdev->dev; 935 struct sec_dfx *dfx = &sec->debug.dfx; 936 struct debugfs_regset32 *regset; 937 struct dentry *tmp_d; 938 int i; 939 940 tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); 941 942 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); 943 if (!regset) 944 return -ENOMEM; 945 946 regset->regs = sec_dfx_regs; 947 regset->nregs = ARRAY_SIZE(sec_dfx_regs); 948 regset->base = qm->io_base; 949 regset->dev = dev; 950 951 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) 952 debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops); 953 if (qm->fun_type == QM_HW_PF && sec_regs) 954 debugfs_create_file("diff_regs", 0444, tmp_d, 955 qm, &sec_diff_regs_fops); 956 957 for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { 958 atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + 959 sec_dfx_labels[i].offset); 960 debugfs_create_file(sec_dfx_labels[i].name, 0644, 961 tmp_d, data, &sec_atomic64_ops); 962 } 963 964 debugfs_create_file("cap_regs", CAP_FILE_PERMISSION, 965 qm->debug.debug_root, qm, &sec_cap_regs_fops); 966 967 return 0; 968 } 969 970 static int sec_debug_init(struct hisi_qm *qm) 971 { 972 struct sec_dev *sec = container_of(qm, struct sec_dev, qm); 973 int i; 974 975 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) { 976 for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { 977 spin_lock_init(&sec->debug.files[i].lock); 978 sec->debug.files[i].index = i; 979 sec->debug.files[i].qm = qm; 980 981 debugfs_create_file(sec_dbg_file_name[i], 0600, 982 qm->debug.debug_root, 983 sec->debug.files + i, 984 &sec_dbg_fops); 985 } 986 } 987 988 return sec_core_debug_init(qm); 989 } 990 991 static int sec_debugfs_init(struct hisi_qm *qm) 992 { 993 struct device *dev = &qm->pdev->dev; 994 int ret; 995 996 ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs)); 997 if (ret) { 998 dev_warn(dev, "Failed to init SEC diff regs!\n"); 999 return ret; 1000 } 1001 1002 qm->debug.debug_root = debugfs_create_dir(dev_name(dev), 1003 sec_debugfs_root); 1004 qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; 1005 qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; 1006 1007 hisi_qm_debug_init(qm); 1008 1009 ret = sec_debug_init(qm); 1010 if (ret) 1011 goto debugfs_remove; 1012 1013 return 0; 1014 1015 debugfs_remove: 1016 debugfs_remove_recursive(qm->debug.debug_root); 1017 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); 1018 return ret; 1019 } 1020 1021 static void sec_debugfs_exit(struct hisi_qm *qm) 1022 { 1023 debugfs_remove_recursive(qm->debug.debug_root); 1024 1025 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); 1026 } 1027 1028 static int sec_show_last_regs_init(struct hisi_qm *qm) 1029 { 1030 struct qm_debug *debug = &qm->debug; 1031 int i; 1032 1033 debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs), 1034 sizeof(unsigned int), GFP_KERNEL); 1035 if (!debug->last_words) 1036 return -ENOMEM; 1037 1038 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) 1039 debug->last_words[i] = readl_relaxed(qm->io_base + 1040 sec_dfx_regs[i].offset); 1041 1042 return 0; 1043 } 1044 1045 static void sec_show_last_regs_uninit(struct hisi_qm *qm) 1046 { 1047 struct qm_debug *debug = &qm->debug; 1048 1049 if (qm->fun_type == QM_HW_VF || !debug->last_words) 1050 return; 1051 1052 kfree(debug->last_words); 1053 debug->last_words = NULL; 1054 } 1055 1056 static void sec_show_last_dfx_regs(struct hisi_qm *qm) 1057 { 1058 struct qm_debug *debug = &qm->debug; 1059 struct pci_dev *pdev = qm->pdev; 1060 u32 val; 1061 int i; 1062 1063 if (qm->fun_type == QM_HW_VF || !debug->last_words) 1064 return; 1065 1066 /* dumps last word of the debugging registers during controller reset */ 1067 for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) { 1068 val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset); 1069 if (val != debug->last_words[i]) 1070 pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", 1071 sec_dfx_regs[i].name, debug->last_words[i], val); 1072 } 1073 } 1074 1075 static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) 1076 { 1077 const struct sec_hw_error *errs = sec_hw_errors; 1078 struct device *dev = &qm->pdev->dev; 1079 u32 err_val; 1080 1081 while (errs->msg) { 1082 if (errs->int_msk & err_sts) { 1083 dev_err(dev, "%s [error status=0x%x] found\n", 1084 errs->msg, errs->int_msk); 1085 1086 if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { 1087 err_val = readl(qm->io_base + 1088 SEC_CORE_SRAM_ECC_ERR_INFO); 1089 dev_err(dev, "multi ecc sram num=0x%x\n", 1090 ((err_val) >> SEC_ECC_NUM) & 1091 SEC_ECC_MASH); 1092 } 1093 } 1094 errs++; 1095 } 1096 } 1097 1098 static u32 sec_get_hw_err_status(struct hisi_qm *qm) 1099 { 1100 return readl(qm->io_base + SEC_CORE_INT_STATUS); 1101 } 1102 1103 static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) 1104 { 1105 writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); 1106 } 1107 1108 static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type) 1109 { 1110 u32 nfe_mask = qm->err_info.dev_err.nfe; 1111 1112 writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG); 1113 } 1114 1115 static void sec_enable_error_report(struct hisi_qm *qm) 1116 { 1117 u32 nfe_mask = qm->err_info.dev_err.nfe; 1118 u32 ce_mask = qm->err_info.dev_err.ce; 1119 1120 writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG); 1121 writel(ce_mask, qm->io_base + SEC_RAS_CE_REG); 1122 } 1123 1124 static void sec_open_axi_master_ooo(struct hisi_qm *qm) 1125 { 1126 u32 val; 1127 1128 val = readl(qm->io_base + SEC_CONTROL_REG); 1129 writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); 1130 writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); 1131 } 1132 1133 static enum acc_err_result sec_get_err_result(struct hisi_qm *qm) 1134 { 1135 u32 err_status; 1136 1137 err_status = sec_get_hw_err_status(qm); 1138 if (err_status) { 1139 if (err_status & qm->err_info.dev_err.ecc_2bits_mask) 1140 qm->err_status.is_dev_ecc_mbit = true; 1141 sec_log_hw_error(qm, err_status); 1142 1143 if (err_status & qm->err_info.dev_err.reset_mask) { 1144 /* Disable the same error reporting until device is recovered. */ 1145 sec_disable_error_report(qm, err_status); 1146 return ACC_ERR_NEED_RESET; 1147 } 1148 sec_clear_hw_err_status(qm, err_status); 1149 /* Avoid firmware disable error report, re-enable. */ 1150 sec_enable_error_report(qm); 1151 } 1152 1153 return ACC_ERR_RECOVERED; 1154 } 1155 1156 static bool sec_dev_is_abnormal(struct hisi_qm *qm) 1157 { 1158 u32 err_status; 1159 1160 err_status = sec_get_hw_err_status(qm); 1161 if (err_status & qm->err_info.dev_err.shutdown_mask) 1162 return true; 1163 1164 return false; 1165 } 1166 1167 static void sec_disable_axi_error(struct hisi_qm *qm) 1168 { 1169 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; 1170 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; 1171 1172 writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK); 1173 1174 if (qm->ver > QM_HW_V2) 1175 writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK), 1176 qm->io_base + SEC_OOO_SHUTDOWN_SEL); 1177 } 1178 1179 static void sec_enable_axi_error(struct hisi_qm *qm) 1180 { 1181 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err; 1182 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe; 1183 1184 /* clear axi error source */ 1185 writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE); 1186 1187 writel(err_mask, qm->io_base + SEC_CORE_INT_MASK); 1188 1189 if (qm->ver > QM_HW_V2) 1190 writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL); 1191 } 1192 1193 static void sec_err_info_init(struct hisi_qm *qm) 1194 { 1195 struct hisi_qm_err_info *err_info = &qm->err_info; 1196 struct hisi_qm_err_mask *qm_err = &err_info->qm_err; 1197 struct hisi_qm_err_mask *dev_err = &err_info->dev_err; 1198 1199 qm_err->fe = SEC_RAS_FE_ENB_MSK; 1200 qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); 1201 qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); 1202 qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 1203 SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 1204 qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 1205 SEC_QM_RESET_MASK_CAP, qm->cap_ver); 1206 qm_err->ecc_2bits_mask = QM_ECC_MBIT; 1207 1208 dev_err->fe = SEC_RAS_FE_ENB_MSK; 1209 dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); 1210 dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); 1211 dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 1212 SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); 1213 dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, 1214 SEC_RESET_MASK_CAP, qm->cap_ver); 1215 dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; 1216 1217 err_info->msi_wr_port = BIT(0); 1218 err_info->acpi_rst = "SRST"; 1219 } 1220 1221 static const struct hisi_qm_err_ini sec_err_ini = { 1222 .hw_init = sec_set_user_domain_and_cache, 1223 .hw_err_enable = sec_hw_error_enable, 1224 .hw_err_disable = sec_hw_error_disable, 1225 .get_dev_hw_err_status = sec_get_hw_err_status, 1226 .clear_dev_hw_err_status = sec_clear_hw_err_status, 1227 .open_axi_master_ooo = sec_open_axi_master_ooo, 1228 .open_sva_prefetch = sec_open_sva_prefetch, 1229 .close_sva_prefetch = sec_close_sva_prefetch, 1230 .show_last_dfx_regs = sec_show_last_dfx_regs, 1231 .err_info_init = sec_err_info_init, 1232 .get_err_result = sec_get_err_result, 1233 .dev_is_abnormal = sec_dev_is_abnormal, 1234 .disable_axi_error = sec_disable_axi_error, 1235 .enable_axi_error = sec_enable_axi_error, 1236 }; 1237 1238 static int sec_pf_probe_init(struct sec_dev *sec) 1239 { 1240 struct hisi_qm *qm = &sec->qm; 1241 int ret; 1242 1243 ret = sec_set_user_domain_and_cache(qm); 1244 if (ret) 1245 return ret; 1246 1247 hisi_qm_dev_err_init(qm); 1248 sec_debug_regs_clear(qm); 1249 ret = sec_show_last_regs_init(qm); 1250 if (ret) 1251 pci_err(qm->pdev, "Failed to init last word regs!\n"); 1252 1253 return ret; 1254 } 1255 1256 static int sec_pre_store_cap_reg(struct hisi_qm *qm) 1257 { 1258 struct hisi_qm_cap_record *sec_cap; 1259 struct pci_dev *pdev = qm->pdev; 1260 size_t i, size; 1261 1262 size = ARRAY_SIZE(sec_cap_query_info); 1263 sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL); 1264 if (!sec_cap) 1265 return -ENOMEM; 1266 1267 for (i = 0; i < size; i++) { 1268 sec_cap[i].type = sec_cap_query_info[i].type; 1269 sec_cap[i].name = sec_cap_query_info[i].name; 1270 sec_cap[i].cap_val = hisi_qm_get_cap_value(qm, sec_cap_query_info, 1271 i, qm->cap_ver); 1272 } 1273 1274 qm->cap_tables.dev_cap_table = sec_cap; 1275 qm->cap_tables.dev_cap_size = size; 1276 1277 return 0; 1278 } 1279 1280 static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) 1281 { 1282 u64 alg_msk; 1283 int ret; 1284 1285 qm->pdev = pdev; 1286 qm->mode = uacce_mode; 1287 qm->sqe_size = SEC_SQE_SIZE; 1288 qm->dev_name = sec_name; 1289 1290 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ? 1291 QM_HW_PF : QM_HW_VF; 1292 if (qm->fun_type == QM_HW_PF) { 1293 qm->qp_base = SEC_PF_DEF_Q_BASE; 1294 qm->qp_num = pf_q_num; 1295 qm->debug.curr_qm_qp_num = pf_q_num; 1296 qm->qm_list = &sec_devices; 1297 qm->err_ini = &sec_err_ini; 1298 if (pf_q_num_flag) 1299 set_bit(QM_MODULE_PARAM, &qm->misc_ctl); 1300 } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { 1301 /* 1302 * have no way to get qm configure in VM in v1 hardware, 1303 * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force 1304 * to trigger only one VF in v1 hardware. 1305 * v2 hardware has no such problem. 1306 */ 1307 qm->qp_base = SEC_PF_DEF_Q_NUM; 1308 qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; 1309 } 1310 1311 ret = hisi_qm_init(qm); 1312 if (ret) { 1313 pci_err(qm->pdev, "Failed to init sec qm configures!\n"); 1314 return ret; 1315 } 1316 1317 /* Fetch and save the value of capability registers */ 1318 ret = sec_pre_store_cap_reg(qm); 1319 if (ret) { 1320 pci_err(qm->pdev, "Failed to pre-store capability registers!\n"); 1321 hisi_qm_uninit(qm); 1322 return ret; 1323 } 1324 alg_msk = sec_get_alg_bitmap(qm, SEC_ALG_BITMAP_HIGH, SEC_ALG_BITMAP_LOW); 1325 ret = hisi_qm_set_algs(qm, alg_msk, sec_dev_algs, ARRAY_SIZE(sec_dev_algs)); 1326 if (ret) { 1327 pci_err(qm->pdev, "Failed to set sec algs!\n"); 1328 hisi_qm_uninit(qm); 1329 } 1330 1331 return ret; 1332 } 1333 1334 static void sec_qm_uninit(struct hisi_qm *qm) 1335 { 1336 hisi_qm_uninit(qm); 1337 } 1338 1339 static int sec_probe_init(struct sec_dev *sec) 1340 { 1341 u32 type_rate = SEC_SHAPER_TYPE_RATE; 1342 struct hisi_qm *qm = &sec->qm; 1343 int ret; 1344 1345 if (qm->fun_type == QM_HW_PF) { 1346 ret = sec_pf_probe_init(sec); 1347 if (ret) 1348 return ret; 1349 /* enable shaper type 0 */ 1350 if (qm->ver >= QM_HW_V3) { 1351 type_rate |= QM_SHAPER_ENABLE; 1352 qm->type_rate = type_rate; 1353 } 1354 } 1355 1356 return 0; 1357 } 1358 1359 static void sec_probe_uninit(struct hisi_qm *qm) 1360 { 1361 if (qm->fun_type == QM_HW_VF) 1362 return; 1363 1364 sec_debug_regs_clear(qm); 1365 sec_show_last_regs_uninit(qm); 1366 sec_close_sva_prefetch(qm); 1367 hisi_qm_dev_err_uninit(qm); 1368 } 1369 1370 static void sec_iommu_used_check(struct sec_dev *sec) 1371 { 1372 struct iommu_domain *domain; 1373 struct device *dev = &sec->qm.pdev->dev; 1374 1375 domain = iommu_get_domain_for_dev(dev); 1376 1377 /* Check if iommu is used */ 1378 sec->iommu_used = false; 1379 if (domain) { 1380 if (domain->type & __IOMMU_DOMAIN_PAGING) 1381 sec->iommu_used = true; 1382 dev_info(dev, "SMMU Opened, the iommu type = %u\n", 1383 domain->type); 1384 } 1385 } 1386 1387 static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1388 { 1389 struct sec_dev *sec; 1390 struct hisi_qm *qm; 1391 int ret; 1392 1393 sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); 1394 if (!sec) 1395 return -ENOMEM; 1396 1397 qm = &sec->qm; 1398 ret = sec_qm_init(qm, pdev); 1399 if (ret) { 1400 pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); 1401 return ret; 1402 } 1403 1404 sec->ctx_q_num = ctx_q_num; 1405 sec_iommu_used_check(sec); 1406 1407 ret = sec_probe_init(sec); 1408 if (ret) { 1409 pci_err(pdev, "Failed to probe!\n"); 1410 goto err_qm_uninit; 1411 } 1412 1413 ret = hisi_qm_start(qm); 1414 if (ret) { 1415 pci_err(pdev, "Failed to start sec qm!\n"); 1416 goto err_probe_uninit; 1417 } 1418 1419 ret = sec_debugfs_init(qm); 1420 if (ret) 1421 pci_warn(pdev, "Failed to init debugfs!\n"); 1422 1423 hisi_qm_add_list(qm, &sec_devices); 1424 ret = hisi_qm_alg_register(qm, &sec_devices, ctx_q_num); 1425 if (ret < 0) { 1426 pr_err("Failed to register driver to crypto.\n"); 1427 goto err_qm_del_list; 1428 } 1429 1430 if (qm->uacce) { 1431 ret = uacce_register(qm->uacce); 1432 if (ret) { 1433 pci_err(pdev, "failed to register uacce (%d)!\n", ret); 1434 goto err_alg_unregister; 1435 } 1436 } 1437 1438 if (qm->fun_type == QM_HW_PF && vfs_num) { 1439 ret = hisi_qm_sriov_enable(pdev, vfs_num); 1440 if (ret < 0) 1441 goto err_alg_unregister; 1442 } 1443 1444 hisi_qm_pm_init(qm); 1445 1446 return 0; 1447 1448 err_alg_unregister: 1449 hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); 1450 err_qm_del_list: 1451 hisi_qm_del_list(qm, &sec_devices); 1452 sec_debugfs_exit(qm); 1453 hisi_qm_stop(qm, QM_NORMAL); 1454 err_probe_uninit: 1455 sec_probe_uninit(qm); 1456 err_qm_uninit: 1457 sec_qm_uninit(qm); 1458 return ret; 1459 } 1460 1461 static void sec_remove(struct pci_dev *pdev) 1462 { 1463 struct hisi_qm *qm = pci_get_drvdata(pdev); 1464 1465 hisi_qm_pm_uninit(qm); 1466 hisi_qm_wait_task_finish(qm, &sec_devices); 1467 hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); 1468 hisi_qm_del_list(qm, &sec_devices); 1469 1470 if (qm->fun_type == QM_HW_PF && qm->vfs_num) 1471 hisi_qm_sriov_disable(pdev, true); 1472 1473 sec_debugfs_exit(qm); 1474 1475 (void)hisi_qm_stop(qm, QM_NORMAL); 1476 sec_probe_uninit(qm); 1477 1478 sec_qm_uninit(qm); 1479 } 1480 1481 static const struct dev_pm_ops sec_pm_ops = { 1482 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) 1483 }; 1484 1485 static const struct pci_error_handlers sec_err_handler = { 1486 .error_detected = hisi_qm_dev_err_detected, 1487 .slot_reset = hisi_qm_dev_slot_reset, 1488 .reset_prepare = hisi_qm_reset_prepare, 1489 .reset_done = hisi_qm_reset_done, 1490 }; 1491 1492 static struct pci_driver sec_pci_driver = { 1493 .name = "hisi_sec2", 1494 .id_table = sec_dev_ids, 1495 .probe = sec_probe, 1496 .remove = sec_remove, 1497 .err_handler = &sec_err_handler, 1498 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? 1499 hisi_qm_sriov_configure : NULL, 1500 .shutdown = hisi_qm_dev_shutdown, 1501 .driver.pm = &sec_pm_ops, 1502 }; 1503 1504 struct pci_driver *hisi_sec_get_pf_driver(void) 1505 { 1506 return &sec_pci_driver; 1507 } 1508 EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver); 1509 1510 static void sec_register_debugfs(void) 1511 { 1512 if (!debugfs_initialized()) 1513 return; 1514 1515 sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); 1516 } 1517 1518 static void sec_unregister_debugfs(void) 1519 { 1520 debugfs_remove_recursive(sec_debugfs_root); 1521 } 1522 1523 static int __init sec_init(void) 1524 { 1525 int ret; 1526 1527 hisi_qm_init_list(&sec_devices); 1528 sec_register_debugfs(); 1529 1530 ret = pci_register_driver(&sec_pci_driver); 1531 if (ret < 0) { 1532 sec_unregister_debugfs(); 1533 pr_err("Failed to register pci driver.\n"); 1534 return ret; 1535 } 1536 1537 return 0; 1538 } 1539 1540 static void __exit sec_exit(void) 1541 { 1542 pci_unregister_driver(&sec_pci_driver); 1543 sec_unregister_debugfs(); 1544 } 1545 1546 module_init(sec_init); 1547 module_exit(sec_exit); 1548 1549 MODULE_LICENSE("GPL v2"); 1550 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>"); 1551 MODULE_AUTHOR("Longfang Liu <liulongfang@huawei.com>"); 1552 MODULE_AUTHOR("Kai Ye <yekai13@huawei.com>"); 1553 MODULE_AUTHOR("Wei Zhang <zhangwei375@huawei.com>"); 1554 MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator"); 1555