1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2021 Marvell. */ 3 4 #include <linux/soc/marvell/octeontx2/asm.h> 5 #include "otx2_cptpf.h" 6 #include "otx2_cptvf.h" 7 #include "otx2_cptlf.h" 8 #include "cn10k_cpt.h" 9 10 static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, 11 struct otx2_cptlf_info *lf); 12 13 static struct cpt_hw_ops otx2_hw_ops = { 14 .send_cmd = otx2_cpt_send_cmd, 15 .cpt_get_compcode = otx2_cpt_get_compcode, 16 .cpt_get_uc_compcode = otx2_cpt_get_uc_compcode, 17 .cpt_sg_info_create = otx2_sg_info_create, 18 }; 19 20 static struct cpt_hw_ops cn10k_hw_ops = { 21 .send_cmd = cn10k_cpt_send_cmd, 22 .cpt_get_compcode = cn10k_cpt_get_compcode, 23 .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode, 24 .cpt_sg_info_create = otx2_sg_info_create, 25 }; 26 27 static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, 28 struct otx2_cptlf_info *lf) 29 { 30 void __iomem *lmtline = lf->lmtline; 31 u64 val = (lf->slot & 0x7FF); 32 u64 tar_addr = 0; 33 34 /* tar_addr<6:4> = Size of first LMTST - 1 in units of 128b. */ 35 tar_addr |= (__force u64)lf->ioreg | 36 (((OTX2_CPT_INST_SIZE/16) - 1) & 0x7) << 4; 37 /* 38 * Make sure memory areas pointed in CPT_INST_S 39 * are flushed before the instruction is sent to CPT 40 */ 41 dma_wmb(); 42 43 /* Copy CPT command to LMTLINE */ 44 memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); 45 cn10k_lmt_flush(val, tar_addr); 46 } 47 48 int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf) 49 { 50 struct pci_dev *pdev = cptpf->pdev; 51 resource_size_t size; 52 u64 lmt_base; 53 54 if (!test_bit(CN10K_LMTST, &cptpf->cap_flag)) { 55 cptpf->lfs.ops = &otx2_hw_ops; 56 return 0; 57 } 58 59 cptpf->lfs.ops = &cn10k_hw_ops; 60 lmt_base = readq(cptpf->reg_base + RVU_PF_LMTLINE_ADDR); 61 if (!lmt_base) { 62 dev_err(&pdev->dev, "PF LMTLINE address not configured\n"); 63 return -ENOMEM; 64 } 65 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); 66 size -= ((1 + cptpf->max_vfs) * MBOX_SIZE); 67 cptpf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, lmt_base, size); 68 if (!cptpf->lfs.lmt_base) { 69 dev_err(&pdev->dev, 70 "Mapping of PF LMTLINE address failed\n"); 71 return -ENOMEM; 72 } 73 74 return 0; 75 } 76 EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT); 77 78 int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) 79 { 80 struct pci_dev *pdev = cptvf->pdev; 81 resource_size_t offset, size; 82 83 if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) 84 return 0; 85 86 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); 87 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); 88 /* Map VF LMILINE region */ 89 cptvf->lfs.lmt_base = devm_ioremap_wc(&pdev->dev, offset, size); 90 if (!cptvf->lfs.lmt_base) { 91 dev_err(&pdev->dev, "Unable to map BAR4\n"); 92 return -ENOMEM; 93 } 94 95 return 0; 96 } 97 EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT); 98 99 void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev, 100 struct cn10k_cpt_errata_ctx *er_ctx) 101 { 102 u64 cptr_dma; 103 104 if (!is_dev_cn10ka_ax(pdev)) 105 return; 106 107 cptr_dma = er_ctx->cptr_dma & ~(BIT_ULL(60)); 108 cn10k_cpt_ctx_flush(pdev, cptr_dma, true); 109 dma_unmap_single(&pdev->dev, cptr_dma, CN10K_CPT_HW_CTX_SIZE, 110 DMA_BIDIRECTIONAL); 111 kfree(er_ctx->hw_ctx); 112 } 113 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT); 114 115 void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz) 116 { 117 hctx->w0.aop_valid = 1; 118 hctx->w0.ctx_hdr_sz = 0; 119 hctx->w0.ctx_sz = ctx_sz; 120 hctx->w0.ctx_push_sz = 1; 121 } 122 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT); 123 124 int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev, 125 struct cn10k_cpt_errata_ctx *er_ctx) 126 { 127 union cn10k_cpt_hw_ctx *hctx; 128 u64 cptr_dma; 129 130 er_ctx->cptr_dma = 0; 131 er_ctx->hw_ctx = NULL; 132 133 if (!is_dev_cn10ka_ax(pdev)) 134 return 0; 135 136 hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL); 137 if (unlikely(!hctx)) 138 return -ENOMEM; 139 cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE, 140 DMA_BIDIRECTIONAL); 141 if (dma_mapping_error(&pdev->dev, cptr_dma)) { 142 kfree(hctx); 143 return -ENOMEM; 144 } 145 146 cn10k_cpt_hw_ctx_set(hctx, 1); 147 er_ctx->hw_ctx = hctx; 148 er_ctx->cptr_dma = cptr_dma | BIT_ULL(60); 149 150 return 0; 151 } 152 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT); 153 154 void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval) 155 { 156 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev); 157 struct otx2_cptlfs_info *lfs = &cptvf->lfs; 158 u64 reg; 159 160 reg = (uintptr_t)cptr >> 7; 161 if (inval) 162 reg = reg | BIT_ULL(46); 163 164 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot, 165 OTX2_CPT_LF_CTX_FLUSH, reg); 166 /* Make sure that the FLUSH operation is complete */ 167 wmb(); 168 otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot, 169 OTX2_CPT_LF_CTX_ERR); 170 } 171 EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT); 172 173 void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf) 174 { 175 if (test_bit(CN10K_LMTST, &cptvf->cap_flag)) 176 cptvf->lfs.ops = &cn10k_hw_ops; 177 else 178 cptvf->lfs.ops = &otx2_hw_ops; 179 } 180 EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT); 181