1 /* SPDX-License-Identifier: GPL-2.0-only 2 * Copyright (C) 2020 Marvell. 3 */ 4 #ifndef __OTX2_CPTLF_H 5 #define __OTX2_CPTLF_H 6 7 #include <linux/soc/marvell/octeontx2/asm.h> 8 #include <linux/bitfield.h> 9 #include <mbox.h> 10 #include <rvu.h> 11 #include "otx2_cpt_common.h" 12 #include "otx2_cpt_reqmgr.h" 13 14 /* 15 * CPT instruction and pending queues user requested length in CPT_INST_S msgs 16 */ 17 #define OTX2_CPT_USER_REQUESTED_QLEN_MSGS 8200 18 19 /* 20 * CPT instruction queue size passed to HW is in units of 40*CPT_INST_S 21 * messages. 22 */ 23 #define OTX2_CPT_SIZE_DIV40 (OTX2_CPT_USER_REQUESTED_QLEN_MSGS/40) 24 25 /* 26 * CPT instruction and pending queues length in CPT_INST_S messages 27 */ 28 #define OTX2_CPT_INST_QLEN_MSGS ((OTX2_CPT_SIZE_DIV40 - 1) * 40) 29 30 /* 31 * LDWB is getting incorrectly used when IQB_LDWB = 1 and CPT instruction 32 * queue has less than 320 free entries. So, increase HW instruction queue 33 * size by 320 and give 320 entries less for SW/NIX RX as a workaround. 34 */ 35 #define OTX2_CPT_INST_QLEN_EXTRA_BYTES (320 * OTX2_CPT_INST_SIZE) 36 #define OTX2_CPT_EXTRA_SIZE_DIV40 (320/40) 37 38 /* CPT instruction queue length in bytes */ 39 #define OTX2_CPT_INST_QLEN_BYTES \ 40 ((OTX2_CPT_SIZE_DIV40 * 40 * OTX2_CPT_INST_SIZE) + \ 41 OTX2_CPT_INST_QLEN_EXTRA_BYTES) 42 43 /* CPT instruction group queue length in bytes */ 44 #define OTX2_CPT_INST_GRP_QLEN_BYTES \ 45 ((OTX2_CPT_SIZE_DIV40 + OTX2_CPT_EXTRA_SIZE_DIV40) * 16) 46 47 /* CPT FC length in bytes */ 48 #define OTX2_CPT_Q_FC_LEN 128 49 50 /* CPT instruction queue alignment */ 51 #define OTX2_CPT_INST_Q_ALIGNMENT 128 52 53 /* Mask which selects all engine groups */ 54 #define OTX2_CPT_ALL_ENG_GRPS_MASK 0xFF 55 56 /* Maximum LFs supported in OcteonTX2 for CPT */ 57 #define OTX2_CPT_MAX_LFS_NUM 64 58 59 /* Queue priority */ 60 #define OTX2_CPT_QUEUE_HI_PRIO 0x1 61 #define OTX2_CPT_QUEUE_LOW_PRIO 0x0 62 63 enum otx2_cptlf_state { 64 OTX2_CPTLF_IN_RESET, 65 OTX2_CPTLF_STARTED, 66 }; 67 68 struct otx2_cpt_inst_queue { 69 u8 *vaddr; 70 u8 *real_vaddr; 71 dma_addr_t dma_addr; 72 dma_addr_t real_dma_addr; 73 u32 size; 74 }; 75 76 struct otx2_cptlfs_info; 77 struct otx2_cptlf_wqe { 78 struct tasklet_struct work; 79 struct otx2_cptlfs_info *lfs; 80 u8 lf_num; 81 }; 82 83 struct otx2_cptlf_info { 84 struct otx2_cptlfs_info *lfs; /* Ptr to cptlfs_info struct */ 85 void __iomem *lmtline; /* Address of LMTLINE */ 86 void __iomem *ioreg; /* LMTLINE send register */ 87 int msix_offset; /* MSI-X interrupts offset */ 88 cpumask_var_t affinity_mask; /* IRQs affinity mask */ 89 u8 irq_name[OTX2_CPT_LF_MSIX_VECTORS][32];/* Interrupts name */ 90 u8 is_irq_reg[OTX2_CPT_LF_MSIX_VECTORS]; /* Is interrupt registered */ 91 u8 slot; /* Slot number of this LF */ 92 93 struct otx2_cpt_inst_queue iqueue;/* Instruction queue */ 94 struct otx2_cpt_pending_queue pqueue; /* Pending queue */ 95 struct otx2_cptlf_wqe *wqe; /* Tasklet work info */ 96 }; 97 98 struct cpt_hw_ops { 99 void (*send_cmd)(union otx2_cpt_inst_s *cptinst, u32 insts_num, 100 struct otx2_cptlf_info *lf); 101 u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result); 102 u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result); 103 struct otx2_cpt_inst_info * 104 (*cpt_sg_info_create)(struct pci_dev *pdev, struct otx2_cpt_req_info *req, 105 gfp_t gfp); 106 }; 107 108 #define LMTLINE_SIZE 128 109 #define LMTLINE_ALIGN 128 110 struct otx2_lmt_info { 111 void *base; 112 dma_addr_t iova; 113 u32 size; 114 u8 align; 115 }; 116 117 struct otx2_cptlfs_info { 118 /* Registers start address of VF/PF LFs are attached to */ 119 void __iomem *reg_base; 120 struct otx2_lmt_info lmt_info; 121 struct pci_dev *pdev; /* Device LFs are attached to */ 122 struct otx2_cptlf_info lf[OTX2_CPT_MAX_LFS_NUM]; 123 struct otx2_mbox *mbox; 124 struct cpt_hw_ops *ops; 125 u8 are_lfs_attached; /* Whether CPT LFs are attached */ 126 u8 lfs_num; /* Number of CPT LFs */ 127 u8 kcrypto_eng_grp_num; /* Kernel crypto engine group number */ 128 u8 kvf_limits; /* Kernel crypto limits */ 129 atomic_t state; /* LF's state. started/reset */ 130 int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */ 131 int global_slot; /* Global slot across the blocks */ 132 u8 ctx_ilen; 133 u8 ctx_ilen_ovrd; 134 }; 135 136 static inline void otx2_cpt_free_instruction_queues( 137 struct otx2_cptlfs_info *lfs) 138 { 139 struct otx2_cpt_inst_queue *iq; 140 int i; 141 142 for (i = 0; i < lfs->lfs_num; i++) { 143 iq = &lfs->lf[i].iqueue; 144 if (iq->real_vaddr) 145 dma_free_coherent(&lfs->pdev->dev, 146 iq->size, 147 iq->real_vaddr, 148 iq->real_dma_addr); 149 iq->real_vaddr = NULL; 150 iq->vaddr = NULL; 151 } 152 } 153 154 static inline int otx2_cpt_alloc_instruction_queues( 155 struct otx2_cptlfs_info *lfs) 156 { 157 struct otx2_cpt_inst_queue *iq; 158 int ret = 0, i; 159 160 if (!lfs->lfs_num) 161 return -EINVAL; 162 163 for (i = 0; i < lfs->lfs_num; i++) { 164 iq = &lfs->lf[i].iqueue; 165 iq->size = OTX2_CPT_INST_QLEN_BYTES + 166 OTX2_CPT_Q_FC_LEN + 167 OTX2_CPT_INST_GRP_QLEN_BYTES + 168 OTX2_CPT_INST_Q_ALIGNMENT; 169 iq->real_vaddr = dma_alloc_coherent(&lfs->pdev->dev, iq->size, 170 &iq->real_dma_addr, GFP_KERNEL); 171 if (!iq->real_vaddr) { 172 ret = -ENOMEM; 173 goto error; 174 } 175 iq->vaddr = iq->real_vaddr + OTX2_CPT_INST_GRP_QLEN_BYTES; 176 iq->dma_addr = iq->real_dma_addr + OTX2_CPT_INST_GRP_QLEN_BYTES; 177 178 /* Align pointers */ 179 iq->vaddr = PTR_ALIGN(iq->vaddr, OTX2_CPT_INST_Q_ALIGNMENT); 180 iq->dma_addr = PTR_ALIGN(iq->dma_addr, 181 OTX2_CPT_INST_Q_ALIGNMENT); 182 } 183 return 0; 184 185 error: 186 otx2_cpt_free_instruction_queues(lfs); 187 return ret; 188 } 189 190 static inline void otx2_cptlf_set_iqueues_base_addr( 191 struct otx2_cptlfs_info *lfs) 192 { 193 union otx2_cptx_lf_q_base lf_q_base; 194 int slot; 195 196 for (slot = 0; slot < lfs->lfs_num; slot++) { 197 lf_q_base.u = lfs->lf[slot].iqueue.dma_addr; 198 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, 199 OTX2_CPT_LF_Q_BASE, lf_q_base.u); 200 } 201 } 202 203 static inline void otx2_cptlf_do_set_iqueue_size(struct otx2_cptlf_info *lf) 204 { 205 union otx2_cptx_lf_q_size lf_q_size = { .u = 0x0 }; 206 207 lf_q_size.s.size_div40 = OTX2_CPT_SIZE_DIV40 + 208 OTX2_CPT_EXTRA_SIZE_DIV40; 209 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 210 OTX2_CPT_LF_Q_SIZE, lf_q_size.u); 211 } 212 213 static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs) 214 { 215 int slot; 216 217 for (slot = 0; slot < lfs->lfs_num; slot++) 218 otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]); 219 } 220 221 #define INFLIGHT GENMASK_ULL(8, 0) 222 #define GRB_CNT GENMASK_ULL(39, 32) 223 #define GWB_CNT GENMASK_ULL(47, 40) 224 #define XQ_XOR GENMASK_ULL(63, 63) 225 #define DQPTR GENMASK_ULL(19, 0) 226 #define NQPTR GENMASK_ULL(51, 32) 227 228 static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf) 229 { 230 void __iomem *reg_base = lf->lfs->reg_base; 231 struct pci_dev *pdev = lf->lfs->pdev; 232 u8 blkaddr = lf->lfs->blkaddr; 233 int timeout = 1000000; 234 u64 inprog, inst_ptr; 235 u64 slot = lf->slot; 236 u64 qsize, pending; 237 int i = 0; 238 239 /* Disable instructions enqueuing */ 240 otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_CTL, 0x0); 241 242 inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG); 243 inprog |= BIT_ULL(16); 244 otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG, inprog); 245 246 qsize = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_SIZE) & 0x7FFF; 247 do { 248 inst_ptr = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_INST_PTR); 249 pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) + 250 FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr); 251 udelay(1); 252 timeout--; 253 } while ((pending != 0) && (timeout != 0)); 254 255 if (timeout == 0) 256 dev_warn(&pdev->dev, "TIMEOUT: CPT poll on pending instructions\n"); 257 258 timeout = 1000000; 259 /* Wait for CPT queue to become execution-quiescent */ 260 do { 261 inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG); 262 263 if ((FIELD_GET(INFLIGHT, inprog) == 0) && 264 (FIELD_GET(GRB_CNT, inprog) == 0)) { 265 i++; 266 } else { 267 i = 0; 268 timeout--; 269 } 270 } while ((timeout != 0) && (i < 10)); 271 272 if (timeout == 0) 273 dev_warn(&pdev->dev, "TIMEOUT: CPT poll on inflight count\n"); 274 /* Wait for 2 us to flush all queue writes to memory */ 275 udelay(2); 276 } 277 278 static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs) 279 { 280 int slot; 281 282 for (slot = 0; slot < lfs->lfs_num; slot++) { 283 otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]); 284 otx2_cpt_lf_reset_msg(lfs, lfs->global_slot + slot); 285 } 286 } 287 288 static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf, 289 bool enable) 290 { 291 u8 blkaddr = lf->lfs->blkaddr; 292 union otx2_cptx_lf_ctl lf_ctl; 293 294 lf_ctl.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, 295 OTX2_CPT_LF_CTL); 296 297 /* Set iqueue's enqueuing */ 298 lf_ctl.s.ena = enable ? 0x1 : 0x0; 299 otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 300 OTX2_CPT_LF_CTL, lf_ctl.u); 301 } 302 303 static inline void otx2_cptlf_enable_iqueue_enq(struct otx2_cptlf_info *lf) 304 { 305 otx2_cptlf_set_iqueue_enq(lf, true); 306 } 307 308 static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf, 309 bool enable) 310 { 311 union otx2_cptx_lf_inprog lf_inprog; 312 u8 blkaddr = lf->lfs->blkaddr; 313 314 lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, 315 OTX2_CPT_LF_INPROG); 316 317 /* Set iqueue's execution */ 318 lf_inprog.s.eena = enable ? 0x1 : 0x0; 319 otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 320 OTX2_CPT_LF_INPROG, lf_inprog.u); 321 } 322 323 static inline void otx2_cptlf_set_ctx_flr_flush(struct otx2_cptlf_info *lf) 324 { 325 u8 blkaddr = lf->lfs->blkaddr; 326 u64 val; 327 328 val = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, 329 OTX2_CPT_LF_CTX_CTL); 330 val |= BIT_ULL(0); 331 332 otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, 333 OTX2_CPT_LF_CTX_CTL, val); 334 } 335 336 static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf) 337 { 338 otx2_cptlf_set_iqueue_exec(lf, true); 339 } 340 341 static inline void otx2_cptlf_disable_iqueue_exec(struct otx2_cptlf_info *lf) 342 { 343 otx2_cptlf_set_iqueue_exec(lf, false); 344 } 345 346 static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs) 347 { 348 int slot; 349 350 for (slot = 0; slot < lfs->lfs_num; slot++) { 351 /* Enable flush on FLR for Errata */ 352 if (is_dev_cn10kb(lfs->pdev)) 353 otx2_cptlf_set_ctx_flr_flush(&lfs->lf[slot]); 354 355 otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]); 356 otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]); 357 } 358 } 359 360 static inline void otx2_cpt_fill_inst(union otx2_cpt_inst_s *cptinst, 361 struct otx2_cpt_iq_command *iq_cmd, 362 u64 comp_baddr) 363 { 364 cptinst->u[0] = 0x0; 365 cptinst->s.doneint = true; 366 cptinst->s.res_addr = comp_baddr; 367 cptinst->u[2] = 0x0; 368 cptinst->u[3] = 0x0; 369 cptinst->s.ei0 = iq_cmd->cmd.u; 370 cptinst->s.ei1 = iq_cmd->dptr; 371 cptinst->s.ei2 = iq_cmd->rptr; 372 cptinst->s.ei3 = iq_cmd->cptr.u; 373 } 374 375 /* 376 * On OcteonTX2 platform the parameter insts_num is used as a count of 377 * instructions to be enqueued. The valid values for insts_num are: 378 * 1 - 1 CPT instruction will be enqueued during LMTST operation 379 * 2 - 2 CPT instructions will be enqueued during LMTST operation 380 */ 381 static inline void otx2_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, 382 u32 insts_num, struct otx2_cptlf_info *lf) 383 { 384 void __iomem *lmtline = lf->lmtline; 385 long ret; 386 387 /* 388 * Make sure memory areas pointed in CPT_INST_S 389 * are flushed before the instruction is sent to CPT 390 */ 391 dma_wmb(); 392 393 do { 394 /* Copy CPT command to LMTLINE */ 395 memcpy_toio(lmtline, cptinst, insts_num * OTX2_CPT_INST_SIZE); 396 397 /* 398 * LDEOR initiates atomic transfer to I/O device 399 * The following will cause the LMTST to fail (the LDEOR 400 * returns zero): 401 * - No stores have been performed to the LMTLINE since it was 402 * last invalidated. 403 * - The bytes which have been stored to LMTLINE since it was 404 * last invalidated form a pattern that is non-contiguous, does 405 * not start at byte 0, or does not end on a 8-byte boundary. 406 * (i.e.comprises a formation of other than 1–16 8-byte 407 * words.) 408 * 409 * These rules are designed such that an operating system 410 * context switch or hypervisor guest switch need have no 411 * knowledge of the LMTST operations; the switch code does not 412 * need to store to LMTCANCEL. Also note as LMTLINE data cannot 413 * be read, there is no information leakage between processes. 414 */ 415 ret = otx2_lmt_flush(lf->ioreg); 416 417 } while (!ret); 418 } 419 420 static inline bool otx2_cptlf_started(struct otx2_cptlfs_info *lfs) 421 { 422 return atomic_read(&lfs->state) == OTX2_CPTLF_STARTED; 423 } 424 425 static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs, 426 struct pci_dev *pdev, 427 void __iomem *reg_base, 428 struct otx2_mbox *mbox, 429 int blkaddr) 430 { 431 lfs->pdev = pdev; 432 lfs->reg_base = reg_base; 433 lfs->mbox = mbox; 434 lfs->blkaddr = blkaddr; 435 } 436 437 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri, 438 int lfs_num); 439 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs); 440 int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs); 441 int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs); 442 void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs); 443 void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs); 444 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs); 445 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs); 446 447 #endif /* __OTX2_CPTLF_H */ 448