Lines Matching +full:inside +full:- +full:secure

1 // SPDX-License-Identifier: GPL-2.0
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
10 #include <linux/dma-mapping.h>
45 writel(0, priv->base + EIP197_FLUE_IFC_LUT(i)); in eip197_trc_cache_setupvirt()
51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt()
52 writel(0, priv->base + EIP197_FLUE_CACHEBASE_LO(i)); in eip197_trc_cache_setupvirt()
53 writel(0, priv->base + EIP197_FLUE_CACHEBASE_HI(i)); in eip197_trc_cache_setupvirt()
55 priv->base + EIP197_FLUE_CONFIG(i)); in eip197_trc_cache_setupvirt()
57 writel(0, priv->base + EIP197_FLUE_OFFSETS); in eip197_trc_cache_setupvirt()
58 writel(0, priv->base + EIP197_FLUE_ARC4_OFFSET); in eip197_trc_cache_setupvirt()
69 val = readl(priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_banksel()
72 writel(val, priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_banksel()
89 actbank = min(maxbanks - 1, 0); in eip197_trc_cache_probe()
90 while ((addrhi - addrlo) > stride) { in eip197_trc_cache_probe()
96 priv->base + EIP197_CLASSIFICATION_RAMS + in eip197_trc_cache_probe()
102 addralias = addrmid - delta; in eip197_trc_cache_probe()
105 priv->base + EIP197_CLASSIFICATION_RAMS + in eip197_trc_cache_probe()
112 val = readl(priv->base + EIP197_CLASSIFICATION_RAMS + in eip197_trc_cache_probe()
137 priv->base + offset); in eip197_trc_cache_clear()
139 val = EIP197_CS_RC_NEXT(i + 1) | EIP197_CS_RC_PREV(i - 1); in eip197_trc_cache_clear()
142 else if (i == cs_rc_max - 1) in eip197_trc_cache_clear()
144 writel(val, priv->base + offset + 4); in eip197_trc_cache_clear()
146 writel(0, priv->base + offset + 8); in eip197_trc_cache_clear()
147 writel(0, priv->base + offset + 12); in eip197_trc_cache_clear()
154 priv->base + EIP197_CLASSIFICATION_RAMS + in eip197_trc_cache_clear()
172 val = readl(priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_init()
175 writel(val, priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_init()
176 val = readl(priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_init()
180 writel(0, priv->base + EIP197_TRC_ECCCTRL); in eip197_trc_cache_init()
186 val = readl(priv->base + EIP197_TRC_PARAMS); in eip197_trc_cache_init()
188 writel(val, priv->base + EIP197_TRC_PARAMS); in eip197_trc_cache_init()
198 val = readl(priv->base + EIP197_TRC_PARAMS); in eip197_trc_cache_init()
201 writel(val, priv->base + EIP197_TRC_PARAMS); in eip197_trc_cache_init()
207 writel(0, priv->base + EIP197_TRC_ECCCTRL); in eip197_trc_cache_init()
211 dev_err(priv->dev, "Record cache probing failed (%d,%d).", in eip197_trc_cache_init()
213 return -ENODEV; in eip197_trc_cache_init()
234 cs_ht_sz = __fls(asize - cs_rc_max) - 2; in eip197_trc_cache_init()
238 cs_rc_max = min_t(uint, cs_rc_abs_max, asize - (cs_ht_wc >> 2)); in eip197_trc_cache_init()
244 val = readl(priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_init()
246 writel(val, priv->base + EIP197_CS_RAM_CTRL); in eip197_trc_cache_init()
250 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1); in eip197_trc_cache_init()
251 writel(val, priv->base + EIP197_TRC_FREECHAIN); in eip197_trc_cache_init()
256 writel(val, priv->base + EIP197_TRC_PARAMS2); in eip197_trc_cache_init()
262 writel(val, priv->base + EIP197_TRC_PARAMS); in eip197_trc_cache_init()
264 dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n", in eip197_trc_cache_init()
274 for (pe = 0; pe < priv->config.pes; pe++) { in eip197_init_firmware()
309 if (priv->flags & EIP197_OCE) in eip197_init_firmware()
323 for (i = 0; i < fw->size / sizeof(u32); i++) { in eip197_write_firmware()
324 if (priv->data->fw_little_endian) in eip197_write_firmware()
325 val = le32_to_cpu(((const __le32 *)fw->data)[i]); in eip197_write_firmware()
327 val = be32_to_cpu(((const __be32 *)fw->data)[i]); in eip197_write_firmware()
330 priv->base + EIP197_CLASSIFICATION_RAMS + in eip197_write_firmware()
335 return i - EIP197_FW_TERMINAL_NOPS; in eip197_write_firmware()
352 for (pe = 0; pe < priv->config.pes; pe++) { in poll_fw_ready()
358 pollcnt--; in poll_fw_ready()
361 dev_err(priv->dev, "FW(%d) for PE %d failed to start\n", in poll_fw_ready()
375 for (pe = 0; pe < priv->config.pes; pe++) { in eip197_start_firmware()
383 val = EIP197_PE_ICE_UENG_START_OFFSET((ifppsz - 1) & in eip197_start_firmware()
392 val = EIP197_PE_ICE_UENG_START_OFFSET((ipuesz - 1) & in eip197_start_firmware()
419 if (priv->data->version == EIP197D_MRVL) in eip197_load_firmwares()
421 else if (priv->data->version == EIP197B_MRVL || in eip197_load_firmwares()
422 priv->data->version == EIP197_DEVBRD) in eip197_load_firmwares()
424 else if (priv->data->version == EIP197C_MXL) in eip197_load_firmwares()
427 return -ENODEV; in eip197_load_firmwares()
431 snprintf(fw_path, 37, "inside-secure/%s/%s", dir, fw_name[i]); in eip197_load_firmwares()
432 ret = firmware_request_nowarn(&fw[i], fw_path, priv->dev); in eip197_load_firmwares()
434 if (minifw || priv->data->version != EIP197B_MRVL) in eip197_load_firmwares()
441 priv->dev); in eip197_load_firmwares()
452 for (pe = 0; pe < priv->config.pes; pe++) in eip197_load_firmwares()
459 dev_dbg(priv->dev, "Firmware loaded successfully\n"); in eip197_load_firmwares()
463 ret = -ENODEV; in eip197_load_firmwares()
471 …dev_dbg(priv->dev, "Firmware set not (fully) present or init failed, falling back to BCLA mode\n"); in eip197_load_firmwares()
477 dev_err(priv->dev, "Firmware load failed.\n"); in eip197_load_firmwares()
487 cd_size_rnd = (priv->config.cd_size + in safexcel_hw_setup_cdesc_rings()
488 (BIT(priv->hwconfig.hwdataw) - 1)) >> in safexcel_hw_setup_cdesc_rings()
489 priv->hwconfig.hwdataw; in safexcel_hw_setup_cdesc_rings()
491 if (priv->flags & SAFEXCEL_HW_EIP197) { in safexcel_hw_setup_cdesc_rings()
493 cd_fetch_cnt = (1 << priv->hwconfig.hwcfsize) / cd_size_rnd; in safexcel_hw_setup_cdesc_rings()
495 (priv->config.pes * EIP197_FETCH_DEPTH)); in safexcel_hw_setup_cdesc_rings()
498 cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) / in safexcel_hw_setup_cdesc_rings()
499 cd_size_rnd) - 1; in safexcel_hw_setup_cdesc_rings()
503 * we need to check whether we can fit even 1 for low-end EIP196's! in safexcel_hw_setup_cdesc_rings()
506 dev_err(priv->dev, "Unable to fit even 1 command desc!\n"); in safexcel_hw_setup_cdesc_rings()
507 return -ENODEV; in safexcel_hw_setup_cdesc_rings()
510 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings()
512 writel(lower_32_bits(priv->ring[i].cdr.base_dma), in safexcel_hw_setup_cdesc_rings()
514 writel(upper_32_bits(priv->ring[i].cdr.base_dma), in safexcel_hw_setup_cdesc_rings()
518 (priv->config.cd_offset << 14) | priv->config.cd_size, in safexcel_hw_setup_cdesc_rings()
521 (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) | in safexcel_hw_setup_cdesc_rings()
522 (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))), in safexcel_hw_setup_cdesc_rings()
545 (BIT(priv->hwconfig.hwdataw) - 1)) >> in safexcel_hw_setup_rdesc_rings()
546 priv->hwconfig.hwdataw; in safexcel_hw_setup_rdesc_rings()
547 if (priv->flags & SAFEXCEL_HW_EIP197) { in safexcel_hw_setup_rdesc_rings()
549 rd_fetch_cnt = (1 << priv->hwconfig.hwrfsize) / rd_size_rnd; in safexcel_hw_setup_rdesc_rings()
551 (priv->config.pes * EIP197_FETCH_DEPTH)); in safexcel_hw_setup_rdesc_rings()
554 rd_fetch_cnt = ((1 << priv->hwconfig.hwrfsize) / in safexcel_hw_setup_rdesc_rings()
555 rd_size_rnd) - 1; in safexcel_hw_setup_rdesc_rings()
558 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings()
560 writel(lower_32_bits(priv->ring[i].rdr.base_dma), in safexcel_hw_setup_rdesc_rings()
562 writel(upper_32_bits(priv->ring[i].rdr.base_dma), in safexcel_hw_setup_rdesc_rings()
565 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) | in safexcel_hw_setup_rdesc_rings()
566 priv->config.rd_size, in safexcel_hw_setup_rdesc_rings()
570 (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) | in safexcel_hw_setup_rdesc_rings()
571 (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))), in safexcel_hw_setup_rdesc_rings()
599 dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n", in safexcel_hw_init()
600 priv->config.pes, priv->config.rings); in safexcel_hw_init()
606 if (priv->flags & SAFEXCEL_HW_EIP197) { in safexcel_hw_init()
626 for (pe = 0; pe < priv->config.pes; pe++) { in safexcel_hw_init()
633 if (priv->flags & EIP197_PE_ARB) in safexcel_hw_init()
659 if (priv->flags & SAFEXCEL_HW_EIP197) in safexcel_hw_init()
662 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
677 if (priv->hwconfig.hwnumpes > 4) { in safexcel_hw_init()
692 if (priv->flags & SAFEXCEL_HW_EIP197) in safexcel_hw_init()
720 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
741 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset), in safexcel_hw_init()
746 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
764 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset), in safexcel_hw_init()
768 for (pe = 0; pe < priv->config.pes; pe++) { in safexcel_hw_init()
770 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
774 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
781 if (priv->flags & EIP197_SIMPLE_TRC) { in safexcel_hw_init()
785 priv->base + EIP197_STRC_CONFIG); in safexcel_hw_init()
788 } else if (priv->flags & SAFEXCEL_HW_EIP197) { in safexcel_hw_init()
794 if (priv->flags & EIP197_ICE) { in safexcel_hw_init()
809 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ); in safexcel_try_push_requests()
829 req = priv->ring[ring].req; in safexcel_dequeue()
830 backlog = priv->ring[ring].backlog; in safexcel_dequeue()
835 spin_lock_bh(&priv->ring[ring].queue_lock); in safexcel_dequeue()
836 backlog = crypto_get_backlog(&priv->ring[ring].queue); in safexcel_dequeue()
837 req = crypto_dequeue_request(&priv->ring[ring].queue); in safexcel_dequeue()
838 spin_unlock_bh(&priv->ring[ring].queue_lock); in safexcel_dequeue()
841 priv->ring[ring].req = NULL; in safexcel_dequeue()
842 priv->ring[ring].backlog = NULL; in safexcel_dequeue()
847 ctx = crypto_tfm_ctx(req->tfm); in safexcel_dequeue()
848 ret = ctx->send(req, ring, &commands, &results); in safexcel_dequeue()
853 crypto_request_complete(backlog, -EINPROGRESS); in safexcel_dequeue()
869 * the request and the backlog for the next dequeue call (per-ring). in safexcel_dequeue()
871 priv->ring[ring].req = req; in safexcel_dequeue()
872 priv->ring[ring].backlog = backlog; in safexcel_dequeue()
878 spin_lock_bh(&priv->ring[ring].lock); in safexcel_dequeue()
880 priv->ring[ring].requests += nreq; in safexcel_dequeue()
882 if (!priv->ring[ring].busy) { in safexcel_dequeue()
884 priv->ring[ring].busy = true; in safexcel_dequeue()
887 spin_unlock_bh(&priv->ring[ring].lock); in safexcel_dequeue()
890 writel((rdesc * priv->config.rd_offset), in safexcel_dequeue()
894 writel((cdesc * priv->config.cd_offset), in safexcel_dequeue()
902 struct result_data_desc *result_data = rdp + priv->config.res_offset; in safexcel_rdesc_check_errors()
904 if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */ in safexcel_rdesc_check_errors()
905 ((!rdesc->descriptor_overflow) && in safexcel_rdesc_check_errors()
906 (!rdesc->buffer_overflow) && in safexcel_rdesc_check_errors()
907 (!result_data->error_code)))) in safexcel_rdesc_check_errors()
910 if (rdesc->descriptor_overflow) in safexcel_rdesc_check_errors()
911 dev_err(priv->dev, "Descriptor overflow detected"); in safexcel_rdesc_check_errors()
913 if (rdesc->buffer_overflow) in safexcel_rdesc_check_errors()
914 dev_err(priv->dev, "Buffer overflow detected"); in safexcel_rdesc_check_errors()
916 if (result_data->error_code & 0x4066) { in safexcel_rdesc_check_errors()
918 dev_err(priv->dev, in safexcel_rdesc_check_errors()
920 result_data->error_code); in safexcel_rdesc_check_errors()
922 return -EIO; in safexcel_rdesc_check_errors()
923 } else if (result_data->error_code & in safexcel_rdesc_check_errors()
930 return -EINVAL; in safexcel_rdesc_check_errors()
931 } else if (result_data->error_code & BIT(9)) { in safexcel_rdesc_check_errors()
933 return -EBADMSG; in safexcel_rdesc_check_errors()
936 /* All other non-fatal errors */ in safexcel_rdesc_check_errors()
937 return -EINVAL; in safexcel_rdesc_check_errors()
947 priv->ring[ring].rdr_req[i] = req; in safexcel_rdr_req_set()
955 return priv->ring[ring].rdr_req[i]; in safexcel_rdr_req_get()
964 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr); in safexcel_complete()
966 dev_err(priv->dev, in safexcel_complete()
970 } while (!cdesc->last_seg); in safexcel_complete()
988 cdesc->control_data.type = EIP197_TYPE_EXTENDED; in safexcel_invalidate_cache()
989 cdesc->control_data.options = 0; in safexcel_invalidate_cache()
990 cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK; in safexcel_invalidate_cache()
991 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR; in safexcel_invalidate_cache()
1006 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); in safexcel_invalidate_cache()
1031 ctx = crypto_tfm_ctx(req->tfm); in safexcel_handle_result_descriptor()
1032 ndesc = ctx->handle_result(priv, ring, req, in safexcel_handle_result_descriptor()
1035 dev_err(priv->dev, "failed to handle result (%d)\n", in safexcel_handle_result_descriptor()
1053 (tot_descs * priv->config.rd_offset), in safexcel_handle_result_descriptor()
1063 spin_lock_bh(&priv->ring[ring].lock); in safexcel_handle_result_descriptor()
1065 priv->ring[ring].requests -= handled; in safexcel_handle_result_descriptor()
1068 if (!priv->ring[ring].requests) in safexcel_handle_result_descriptor()
1069 priv->ring[ring].busy = false; in safexcel_handle_result_descriptor()
1071 spin_unlock_bh(&priv->ring[ring].lock); in safexcel_handle_result_descriptor()
1079 safexcel_dequeue(data->priv, data->ring); in safexcel_dequeue_work()
1090 struct safexcel_crypto_priv *priv = irq_data->priv; in safexcel_irq_ring()
1091 int ring = irq_data->ring, rc = IRQ_NONE; in safexcel_irq_ring()
1108 dev_err(priv->dev, "RDR: fatal error.\n"); in safexcel_irq_ring()
1127 struct safexcel_crypto_priv *priv = irq_data->priv; in safexcel_irq_ring_thread()
1128 int ring = irq_data->ring; in safexcel_irq_ring_thread()
1132 queue_work(priv->ring[ring].workqueue, in safexcel_irq_ring_thread()
1133 &priv->ring[ring].work_data.work); in safexcel_irq_ring_thread()
1151 dev = &pci_pdev->dev; in safexcel_request_ring_irq()
1163 dev = &plf_pdev->dev; in safexcel_request_ring_irq()
1169 return -ENXIO; in safexcel_request_ring_irq()
1264 safexcel_algs[i]->priv = priv; in safexcel_register_algorithms()
1267 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != in safexcel_register_algorithms()
1268 safexcel_algs[i]->algo_mask) in safexcel_register_algorithms()
1272 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) in safexcel_register_algorithms()
1273 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher); in safexcel_register_algorithms()
1274 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) in safexcel_register_algorithms()
1275 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead); in safexcel_register_algorithms()
1277 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash); in safexcel_register_algorithms()
1288 if ((safexcel_algs[j]->algo_mask & priv->hwconfig.algo_flags) != in safexcel_register_algorithms()
1289 safexcel_algs[j]->algo_mask) in safexcel_register_algorithms()
1293 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) in safexcel_register_algorithms()
1294 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher); in safexcel_register_algorithms()
1295 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD) in safexcel_register_algorithms()
1296 crypto_unregister_aead(&safexcel_algs[j]->alg.aead); in safexcel_register_algorithms()
1298 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash); in safexcel_register_algorithms()
1310 if ((safexcel_algs[i]->algo_mask & priv->hwconfig.algo_flags) != in safexcel_unregister_algorithms()
1311 safexcel_algs[i]->algo_mask) in safexcel_unregister_algorithms()
1315 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER) in safexcel_unregister_algorithms()
1316 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher); in safexcel_unregister_algorithms()
1317 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD) in safexcel_unregister_algorithms()
1318 crypto_unregister_aead(&safexcel_algs[i]->alg.aead); in safexcel_unregister_algorithms()
1320 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash); in safexcel_unregister_algorithms()
1326 u32 mask = BIT(priv->hwconfig.hwdataw) - 1; in safexcel_configure()
1328 priv->config.pes = priv->hwconfig.hwnumpes; in safexcel_configure()
1329 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); in safexcel_configure()
1331 priv->config.rings = min_t(u32, priv->config.rings, in safexcel_configure()
1332 priv->hwconfig.hwnumraic); in safexcel_configure()
1334 priv->config.cd_size = EIP197_CD64_FETCH_SIZE; in safexcel_configure()
1335 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask; in safexcel_configure()
1336 priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask; in safexcel_configure()
1339 priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask; in safexcel_configure()
1341 priv->config.rd_size = priv->config.res_offset + in safexcel_configure()
1343 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask; in safexcel_configure()
1346 priv->config.cd_offset *= sizeof(u32); in safexcel_configure()
1347 priv->config.cdsh_offset *= sizeof(u32); in safexcel_configure()
1348 priv->config.rd_offset *= sizeof(u32); in safexcel_configure()
1349 priv->config.res_offset *= sizeof(u32); in safexcel_configure()
1354 struct safexcel_register_offsets *offsets = &priv->offsets; in safexcel_init_register_offsets()
1356 if (priv->flags & SAFEXCEL_HW_EIP197) { in safexcel_init_register_offsets()
1357 offsets->hia_aic = EIP197_HIA_AIC_BASE; in safexcel_init_register_offsets()
1358 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE; in safexcel_init_register_offsets()
1359 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE; in safexcel_init_register_offsets()
1360 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE; in safexcel_init_register_offsets()
1361 offsets->hia_dfe = EIP197_HIA_DFE_BASE; in safexcel_init_register_offsets()
1362 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE; in safexcel_init_register_offsets()
1363 offsets->hia_dse = EIP197_HIA_DSE_BASE; in safexcel_init_register_offsets()
1364 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE; in safexcel_init_register_offsets()
1365 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE; in safexcel_init_register_offsets()
1366 offsets->pe = EIP197_PE_BASE; in safexcel_init_register_offsets()
1367 offsets->global = EIP197_GLOBAL_BASE; in safexcel_init_register_offsets()
1369 offsets->hia_aic = EIP97_HIA_AIC_BASE; in safexcel_init_register_offsets()
1370 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE; in safexcel_init_register_offsets()
1371 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE; in safexcel_init_register_offsets()
1372 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE; in safexcel_init_register_offsets()
1373 offsets->hia_dfe = EIP97_HIA_DFE_BASE; in safexcel_init_register_offsets()
1374 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE; in safexcel_init_register_offsets()
1375 offsets->hia_dse = EIP97_HIA_DSE_BASE; in safexcel_init_register_offsets()
1376 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE; in safexcel_init_register_offsets()
1377 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE; in safexcel_init_register_offsets()
1378 offsets->pe = EIP97_PE_BASE; in safexcel_init_register_offsets()
1379 offsets->global = EIP97_GLOBAL_BASE; in safexcel_init_register_offsets()
1394 struct device *dev = priv->dev; in safexcel_probe_generic()
1398 priv->context_pool = dmam_pool_create("safexcel-context", dev, in safexcel_probe_generic()
1401 if (!priv->context_pool) in safexcel_probe_generic()
1402 return -ENOMEM; in safexcel_probe_generic()
1409 version = readl(priv->base + EIP97_HIA_AIC_BASE + EIP197_HIA_VERSION); in safexcel_probe_generic()
1413 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1415 /* read back byte-swapped, so complement byte swap bits */ in safexcel_probe_generic()
1417 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); in safexcel_probe_generic()
1420 version = readl(priv->base + EIP197_HIA_AIC_BASE + in safexcel_probe_generic()
1423 priv->hwconfig.hiaver = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1424 priv->flags |= SAFEXCEL_HW_EIP197; in safexcel_probe_generic()
1427 /* read back byte-swapped, so complement swap bits */ in safexcel_probe_generic()
1429 priv->hwconfig.hiaver = EIP197_VERSION_SWAP(version); in safexcel_probe_generic()
1430 priv->flags |= SAFEXCEL_HW_EIP197; in safexcel_probe_generic()
1432 return -ENODEV; in safexcel_probe_generic()
1440 * If the version was read byte-swapped, we need to flip the device in safexcel_probe_generic()
1442 * byte-swapped ... in safexcel_probe_generic()
1456 if (((priv->flags & SAFEXCEL_HW_EIP197) && in safexcel_probe_generic()
1459 ((!(priv->flags & SAFEXCEL_HW_EIP197) && in safexcel_probe_generic()
1465 dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n", in safexcel_probe_generic()
1467 return -ENODEV; in safexcel_probe_generic()
1470 priv->hwconfig.hwver = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1477 dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid); in safexcel_probe_generic()
1478 return -ENODEV; in safexcel_probe_generic()
1480 priv->hwconfig.ppver = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1486 return -ENODEV; in safexcel_probe_generic()
1488 priv->hwconfig.pever = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1493 priv->hwconfig.icever = 0; in safexcel_probe_generic()
1494 priv->hwconfig.ocever = 0; in safexcel_probe_generic()
1495 priv->hwconfig.psever = 0; in safexcel_probe_generic()
1496 if (priv->flags & SAFEXCEL_HW_EIP197) { in safexcel_probe_generic()
1500 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & in safexcel_probe_generic()
1502 priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) & in safexcel_probe_generic()
1505 priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) & in safexcel_probe_generic()
1508 priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) & in safexcel_probe_generic()
1510 priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) & in safexcel_probe_generic()
1513 priv->flags |= EIP197_PE_ARB; in safexcel_probe_generic()
1515 priv->flags |= EIP197_ICE; in safexcel_probe_generic()
1522 return -ENODEV; in safexcel_probe_generic()
1524 priv->hwconfig.icever = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1527 priv->flags |= EIP197_OCE; in safexcel_probe_generic()
1532 return -ENODEV; in safexcel_probe_generic()
1534 priv->hwconfig.psever = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1541 return -ENODEV; in safexcel_probe_generic()
1543 priv->hwconfig.ocever = EIP197_VERSION_MASK(version); in safexcel_probe_generic()
1547 priv->flags |= EIP197_SIMPLE_TRC; in safexcel_probe_generic()
1549 priv->flags |= EIP197_TRC_CACHE; in safexcel_probe_generic()
1552 priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) & in safexcel_probe_generic()
1554 priv->hwconfig.hwcfsize = (hiaopt >> EIP97_CFSIZE_OFFSET) & in safexcel_probe_generic()
1556 priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) & in safexcel_probe_generic()
1558 priv->hwconfig.hwnumpes = 1; /* by definition */ in safexcel_probe_generic()
1559 priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) & in safexcel_probe_generic()
1570 priv->hwconfig.hwnumraic = i; in safexcel_probe_generic()
1571 /* Low-end EIP196 may not have any ring AIC's ... */ in safexcel_probe_generic()
1572 if (!priv->hwconfig.hwnumraic) { in safexcel_probe_generic()
1573 dev_err(priv->dev, "No ring interrupt controller present!\n"); in safexcel_probe_generic()
1574 return -ENODEV; in safexcel_probe_generic()
1578 priv->hwconfig.algo_flags = readl(EIP197_PE(priv) + in safexcel_probe_generic()
1582 dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x(alg:%08x)/%x/%x/%x\n", in safexcel_probe_generic()
1583 peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes, in safexcel_probe_generic()
1584 priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic, in safexcel_probe_generic()
1585 priv->hwconfig.hiaver, priv->hwconfig.hwdataw, in safexcel_probe_generic()
1586 priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize, in safexcel_probe_generic()
1587 priv->hwconfig.ppver, priv->hwconfig.pever, in safexcel_probe_generic()
1588 priv->hwconfig.algo_flags, priv->hwconfig.icever, in safexcel_probe_generic()
1589 priv->hwconfig.ocever, priv->hwconfig.psever); in safexcel_probe_generic()
1593 if (IS_ENABLED(CONFIG_PCI) && priv->data->version == EIP197_DEVBRD) { in safexcel_probe_generic()
1595 * Request MSI vectors for global + 1 per ring - in safexcel_probe_generic()
1601 priv->config.rings + 1, in safexcel_probe_generic()
1602 priv->config.rings + 1, in safexcel_probe_generic()
1611 priv->ring = devm_kcalloc(dev, priv->config.rings, in safexcel_probe_generic()
1612 sizeof(*priv->ring), in safexcel_probe_generic()
1614 if (!priv->ring) in safexcel_probe_generic()
1615 return -ENOMEM; in safexcel_probe_generic()
1617 for (i = 0; i < priv->config.rings; i++) { in safexcel_probe_generic()
1623 &priv->ring[i].cdr, in safexcel_probe_generic()
1624 &priv->ring[i].rdr); in safexcel_probe_generic()
1630 priv->ring[i].rdr_req = devm_kcalloc(dev, in safexcel_probe_generic()
1632 sizeof(*priv->ring[i].rdr_req), in safexcel_probe_generic()
1634 if (!priv->ring[i].rdr_req) { in safexcel_probe_generic()
1635 ret = -ENOMEM; in safexcel_probe_generic()
1641 ret = -ENOMEM; in safexcel_probe_generic()
1645 ring_irq->priv = priv; in safexcel_probe_generic()
1646 ring_irq->ring = i; in safexcel_probe_generic()
1661 priv->ring[i].irq = irq; in safexcel_probe_generic()
1662 priv->ring[i].work_data.priv = priv; in safexcel_probe_generic()
1663 priv->ring[i].work_data.ring = i; in safexcel_probe_generic()
1664 INIT_WORK(&priv->ring[i].work_data.work, in safexcel_probe_generic()
1668 priv->ring[i].workqueue = in safexcel_probe_generic()
1670 if (!priv->ring[i].workqueue) { in safexcel_probe_generic()
1671 ret = -ENOMEM; in safexcel_probe_generic()
1675 priv->ring[i].requests = 0; in safexcel_probe_generic()
1676 priv->ring[i].busy = false; in safexcel_probe_generic()
1678 crypto_init_queue(&priv->ring[i].queue, in safexcel_probe_generic()
1681 spin_lock_init(&priv->ring[i].lock); in safexcel_probe_generic()
1682 spin_lock_init(&priv->ring[i].queue_lock); in safexcel_probe_generic()
1685 atomic_set(&priv->ring_used, 0); in safexcel_probe_generic()
1702 for (i = 0; i < priv->config.rings; i++) { in safexcel_probe_generic()
1703 if (priv->ring[i].irq) in safexcel_probe_generic()
1704 irq_set_affinity_hint(priv->ring[i].irq, NULL); in safexcel_probe_generic()
1705 if (priv->ring[i].workqueue) in safexcel_probe_generic()
1706 destroy_workqueue(priv->ring[i].workqueue); in safexcel_probe_generic()
1716 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_reset_rings()
1735 struct device *dev = &pdev->dev; in safexcel_probe()
1741 return -ENOMEM; in safexcel_probe()
1743 priv->dev = dev; in safexcel_probe()
1744 priv->data = (struct safexcel_priv_data *)of_device_get_match_data(dev); in safexcel_probe()
1748 priv->base = devm_platform_ioremap_resource(pdev, 0); in safexcel_probe()
1749 if (IS_ERR(priv->base)) { in safexcel_probe()
1751 return PTR_ERR(priv->base); in safexcel_probe()
1754 priv->clk = devm_clk_get(&pdev->dev, NULL); in safexcel_probe()
1755 ret = PTR_ERR_OR_ZERO(priv->clk); in safexcel_probe()
1757 if (ret != -ENOENT) { in safexcel_probe()
1761 ret = clk_prepare_enable(priv->clk); in safexcel_probe()
1768 priv->reg_clk = devm_clk_get(&pdev->dev, "reg"); in safexcel_probe()
1769 ret = PTR_ERR_OR_ZERO(priv->reg_clk); in safexcel_probe()
1771 if (ret != -ENOENT) { in safexcel_probe()
1775 ret = clk_prepare_enable(priv->reg_clk); in safexcel_probe()
1794 clk_disable_unprepare(priv->reg_clk); in safexcel_probe()
1796 clk_disable_unprepare(priv->clk); in safexcel_probe()
1808 clk_disable_unprepare(priv->reg_clk); in safexcel_remove()
1809 clk_disable_unprepare(priv->clk); in safexcel_remove()
1811 for (i = 0; i < priv->config.rings; i++) { in safexcel_remove()
1812 irq_set_affinity_hint(priv->ring[i].irq, NULL); in safexcel_remove()
1813 destroy_workqueue(priv->ring[i].workqueue); in safexcel_remove()
1840 .compatible = "inside-secure,safexcel-eip97ies",
1844 .compatible = "inside-secure,safexcel-eip197b",
1848 .compatible = "inside-secure,safexcel-eip197d",
1852 .compatible = "inside-secure,safexcel-eip197c-mxl",
1857 .compatible = "inside-secure,safexcel-eip97",
1861 .compatible = "inside-secure,safexcel-eip197",
1873 .name = "crypto-safexcel",
1878 /* PCIE devices - i.e. Inside Secure development boards */
1883 struct device *dev = &pdev->dev; in safexcel_pci_probe()
1890 ent->vendor, ent->device, ent->subvendor, in safexcel_pci_probe()
1891 ent->subdevice, ent->driver_data); in safexcel_pci_probe()
1895 return -ENOMEM; in safexcel_pci_probe()
1897 priv->dev = dev; in safexcel_pci_probe()
1898 priv->data = (struct safexcel_priv_data *)ent->driver_data; in safexcel_pci_probe()
1915 priv->base = pcim_iomap_table(pdev)[0]; in safexcel_pci_probe()
1917 if (priv->data->version == EIP197_DEVBRD) { in safexcel_pci_probe()
1918 dev_dbg(dev, "Device identified as FPGA based development board - applying HW reset\n"); in safexcel_pci_probe()
1948 return -ENODEV; in safexcel_pci_probe()
1953 writel(1, priv->base + EIP197_XLX_GPIO_BASE); in safexcel_pci_probe()
1956 writel(0, priv->base + EIP197_XLX_GPIO_BASE); in safexcel_pci_probe()
1975 for (i = 0; i < priv->config.rings; i++) in safexcel_pci_remove()
1976 destroy_workqueue(priv->ring[i].workqueue); in safexcel_pci_remove()
1993 .name = "crypto-safexcel",
2029 MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
2038 MODULE_FIRMWARE("inside-secure/eip197b/ifpp.bin");
2039 MODULE_FIRMWARE("inside-secure/eip197b/ipue.bin");
2040 MODULE_FIRMWARE("inside-secure/eip197d/ifpp.bin");
2041 MODULE_FIRMWARE("inside-secure/eip197d/ipue.bin");
2042 MODULE_FIRMWARE("inside-secure/eip197_minifw/ifpp.bin");
2043 MODULE_FIRMWARE("inside-secure/eip197_minifw/ipue.bin");