1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptlf.h" 6 #include "rvu_reg.h" 7 8 #define CPT_TIMER_HOLD 0x03F 9 #define CPT_COUNT_HOLD 32 10 11 static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf, 12 int time_wait) 13 { 14 union otx2_cptx_lf_done_wait done_wait; 15 16 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 17 lf->slot, OTX2_CPT_LF_DONE_WAIT); 18 done_wait.s.time_wait = time_wait; 19 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 20 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 21 } 22 23 static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait) 24 { 25 union otx2_cptx_lf_done_wait done_wait; 26 27 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 28 lf->slot, OTX2_CPT_LF_DONE_WAIT); 29 done_wait.s.num_wait = num_wait; 30 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 31 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 32 } 33 34 static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs, 35 int time_wait) 36 { 37 int slot; 38 39 for (slot = 0; slot < lfs->lfs_num; slot++) 40 cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait); 41 } 42 43 static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait) 44 { 45 int slot; 46 47 for (slot = 0; slot < lfs->lfs_num; slot++) 48 cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait); 49 } 50 51 static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) 52 { 53 struct otx2_cptlfs_info *lfs = lf->lfs; 54 union otx2_cptx_af_lf_ctrl lf_ctrl; 55 int ret; 56 57 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 58 CPT_AF_LFX_CTL(lf->slot), 59 &lf_ctrl.u, lfs->blkaddr); 60 if (ret) 61 return ret; 62 63 lf_ctrl.s.pri = pri ? 1 : 0; 64 65 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 66 CPT_AF_LFX_CTL(lf->slot), 67 lf_ctrl.u, lfs->blkaddr); 68 return ret; 69 } 70 71 static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, 72 int eng_grps_mask) 73 { 74 struct otx2_cptlfs_info *lfs = lf->lfs; 75 union otx2_cptx_af_lf_ctrl lf_ctrl; 76 int ret; 77 78 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 79 CPT_AF_LFX_CTL(lf->slot), 80 &lf_ctrl.u, lfs->blkaddr); 81 if (ret) 82 return ret; 83 84 lf_ctrl.s.grp = eng_grps_mask; 85 86 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 87 CPT_AF_LFX_CTL(lf->slot), 88 lf_ctrl.u, lfs->blkaddr); 89 return ret; 90 } 91 92 static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs, 93 int eng_grp_mask, int pri) 94 { 95 int slot, ret = 0; 96 97 for (slot = 0; slot < lfs->lfs_num; slot++) { 98 ret = cptlf_set_pri(&lfs->lf[slot], pri); 99 if (ret) 100 return ret; 101 102 ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask); 103 if (ret) 104 return ret; 105 } 106 return ret; 107 } 108 109 static int cptlf_set_ctx_ilen(struct otx2_cptlfs_info *lfs, int ctx_ilen) 110 { 111 union otx2_cptx_af_lf_ctrl lf_ctrl; 112 struct otx2_cptlf_info *lf; 113 int slot, ret = 0; 114 115 for (slot = 0; slot < lfs->lfs_num; slot++) { 116 lf = &lfs->lf[slot]; 117 118 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 119 CPT_AF_LFX_CTL(lf->slot), 120 &lf_ctrl.u, lfs->blkaddr); 121 if (ret) 122 return ret; 123 124 lf_ctrl.s.ctx_ilen = ctx_ilen; 125 126 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 127 CPT_AF_LFX_CTL(lf->slot), 128 lf_ctrl.u, lfs->blkaddr); 129 if (ret) 130 return ret; 131 } 132 return ret; 133 } 134 135 static void cptlf_hw_init(struct otx2_cptlfs_info *lfs) 136 { 137 /* Disable instruction queues */ 138 otx2_cptlf_disable_iqueues(lfs); 139 140 /* Set instruction queues base addresses */ 141 otx2_cptlf_set_iqueues_base_addr(lfs); 142 143 /* Set instruction queues sizes */ 144 otx2_cptlf_set_iqueues_size(lfs); 145 146 /* Set done interrupts time wait */ 147 cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD); 148 149 /* Set done interrupts num wait */ 150 cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD); 151 152 /* Enable instruction queues */ 153 otx2_cptlf_enable_iqueues(lfs); 154 } 155 156 static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs) 157 { 158 /* Disable instruction queues */ 159 otx2_cptlf_disable_iqueues(lfs); 160 } 161 162 static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable) 163 { 164 union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 }; 165 u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S : 166 OTX2_CPT_LF_MISC_INT_ENA_W1C; 167 int slot; 168 169 irq_misc.s.fault = 0x1; 170 irq_misc.s.hwerr = 0x1; 171 irq_misc.s.irde = 0x1; 172 irq_misc.s.nqerr = 0x1; 173 irq_misc.s.nwrp = 0x1; 174 175 for (slot = 0; slot < lfs->lfs_num; slot++) 176 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 177 irq_misc.u); 178 } 179 180 static void cptlf_set_done_intrs(struct otx2_cptlfs_info *lfs, u8 enable) 181 { 182 u64 reg = enable ? OTX2_CPT_LF_DONE_INT_ENA_W1S : 183 OTX2_CPT_LF_DONE_INT_ENA_W1C; 184 int slot; 185 186 for (slot = 0; slot < lfs->lfs_num; slot++) 187 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 0x1); 188 } 189 190 static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf) 191 { 192 union otx2_cptx_lf_done irq_cnt; 193 194 irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 195 OTX2_CPT_LF_DONE); 196 return irq_cnt.s.done; 197 } 198 199 static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg) 200 { 201 union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack; 202 struct otx2_cptlf_info *lf = arg; 203 struct device *dev; 204 205 dev = &lf->lfs->pdev->dev; 206 irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 207 lf->slot, OTX2_CPT_LF_MISC_INT); 208 irq_misc_ack.u = 0x0; 209 210 if (irq_misc.s.fault) { 211 dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n", 212 lf->slot); 213 irq_misc_ack.s.fault = 0x1; 214 215 } else if (irq_misc.s.hwerr) { 216 dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.", 217 lf->slot); 218 irq_misc_ack.s.hwerr = 0x1; 219 220 } else if (irq_misc.s.nwrp) { 221 dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n", 222 lf->slot); 223 irq_misc_ack.s.nwrp = 0x1; 224 225 } else if (irq_misc.s.irde) { 226 dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n"); 227 irq_misc_ack.s.irde = 0x1; 228 229 } else if (irq_misc.s.nqerr) { 230 dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n"); 231 irq_misc_ack.s.nqerr = 0x1; 232 233 } else { 234 dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot); 235 return IRQ_NONE; 236 } 237 238 /* Acknowledge interrupts */ 239 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 240 OTX2_CPT_LF_MISC_INT, irq_misc_ack.u); 241 242 return IRQ_HANDLED; 243 } 244 245 static irqreturn_t cptlf_done_intr_handler(int irq, void *arg) 246 { 247 union otx2_cptx_lf_done_wait done_wait; 248 struct otx2_cptlf_info *lf = arg; 249 int irq_cnt; 250 251 /* Read the number of completed requests */ 252 irq_cnt = cptlf_read_done_cnt(lf); 253 if (irq_cnt) { 254 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 255 lf->slot, OTX2_CPT_LF_DONE_WAIT); 256 /* Acknowledge the number of completed requests */ 257 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 258 OTX2_CPT_LF_DONE_ACK, irq_cnt); 259 260 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 261 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 262 if (unlikely(!lf->wqe)) { 263 dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n", 264 lf->slot); 265 return IRQ_NONE; 266 } 267 268 /* Schedule processing of completed requests */ 269 tasklet_hi_schedule(&lf->wqe->work); 270 } 271 return IRQ_HANDLED; 272 } 273 274 void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs) 275 { 276 int i, irq_offs, vector; 277 278 irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; 279 for (i = 0; i < lfs->lfs_num; i++) { 280 if (!lfs->lf[i].is_irq_reg[irq_offs]) 281 continue; 282 283 vector = pci_irq_vector(lfs->pdev, 284 lfs->lf[i].msix_offset + irq_offs); 285 free_irq(vector, &lfs->lf[i]); 286 lfs->lf[i].is_irq_reg[irq_offs] = false; 287 } 288 289 cptlf_set_misc_intrs(lfs, false); 290 } 291 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT"); 292 293 void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs) 294 { 295 int i, irq_offs, vector; 296 297 irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; 298 for (i = 0; i < lfs->lfs_num; i++) { 299 if (!lfs->lf[i].is_irq_reg[irq_offs]) 300 continue; 301 302 vector = pci_irq_vector(lfs->pdev, 303 lfs->lf[i].msix_offset + irq_offs); 304 free_irq(vector, &lfs->lf[i]); 305 lfs->lf[i].is_irq_reg[irq_offs] = false; 306 } 307 308 cptlf_set_done_intrs(lfs, false); 309 } 310 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT"); 311 312 static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs, 313 int lf_num, int irq_offset, 314 irq_handler_t handler) 315 { 316 int ret, vector; 317 318 vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset + 319 irq_offset); 320 ret = request_irq(vector, handler, 0, 321 lfs->lf[lf_num].irq_name[irq_offset], 322 &lfs->lf[lf_num]); 323 if (ret) 324 return ret; 325 326 lfs->lf[lf_num].is_irq_reg[irq_offset] = true; 327 328 return ret; 329 } 330 331 int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs) 332 { 333 bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1); 334 int irq_offs, ret, i; 335 336 irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; 337 for (i = 0; i < lfs->lfs_num; i++) { 338 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPT%dLF Misc%d", 339 is_cpt1, i); 340 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, 341 cptlf_misc_intr_handler); 342 if (ret) 343 goto free_irq; 344 } 345 cptlf_set_misc_intrs(lfs, true); 346 return 0; 347 348 free_irq: 349 otx2_cptlf_unregister_misc_interrupts(lfs); 350 return ret; 351 } 352 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT"); 353 354 int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs) 355 { 356 bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1); 357 int irq_offs, ret, i; 358 359 irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; 360 for (i = 0; i < lfs->lfs_num; i++) { 361 snprintf(lfs->lf[i].irq_name[irq_offs], 32, 362 "OTX2_CPT%dLF Done%d", is_cpt1, i); 363 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, 364 cptlf_done_intr_handler); 365 if (ret) 366 goto free_irq; 367 } 368 cptlf_set_done_intrs(lfs, true); 369 return 0; 370 371 free_irq: 372 otx2_cptlf_unregister_done_interrupts(lfs); 373 return ret; 374 } 375 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts, "CRYPTO_DEV_OCTEONTX2_CPT"); 376 377 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs) 378 { 379 int slot, offs; 380 381 for (slot = 0; slot < lfs->lfs_num; slot++) { 382 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) 383 irq_set_affinity_hint(pci_irq_vector(lfs->pdev, 384 lfs->lf[slot].msix_offset + 385 offs), NULL); 386 free_cpumask_var(lfs->lf[slot].affinity_mask); 387 } 388 } 389 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT"); 390 391 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs) 392 { 393 struct otx2_cptlf_info *lf = lfs->lf; 394 int slot, offs, ret; 395 396 for (slot = 0; slot < lfs->lfs_num; slot++) { 397 if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) { 398 dev_err(&lfs->pdev->dev, 399 "cpumask allocation failed for LF %d", slot); 400 ret = -ENOMEM; 401 goto free_affinity_mask; 402 } 403 404 cpumask_set_cpu(cpumask_local_spread(slot, 405 dev_to_node(&lfs->pdev->dev)), 406 lf[slot].affinity_mask); 407 408 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) { 409 ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev, 410 lf[slot].msix_offset + offs), 411 lf[slot].affinity_mask); 412 if (ret) 413 goto free_affinity_mask; 414 } 415 } 416 return 0; 417 418 free_affinity_mask: 419 otx2_cptlf_free_irqs_affinity(lfs); 420 return ret; 421 } 422 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, "CRYPTO_DEV_OCTEONTX2_CPT"); 423 424 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, 425 int lfs_num) 426 { 427 int slot, ret; 428 429 if (!lfs->pdev || !lfs->reg_base) 430 return -EINVAL; 431 432 lfs->lfs_num = lfs_num; 433 for (slot = 0; slot < lfs->lfs_num; slot++) { 434 lfs->lf[slot].lfs = lfs; 435 lfs->lf[slot].slot = slot; 436 if (lfs->lmt_base) 437 lfs->lf[slot].lmtline = lfs->lmt_base + 438 (slot * LMTLINE_SIZE); 439 else 440 lfs->lf[slot].lmtline = lfs->reg_base + 441 OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot, 442 OTX2_CPT_LMT_LF_LMTLINEX(0)); 443 444 lfs->lf[slot].ioreg = lfs->reg_base + 445 OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot, 446 OTX2_CPT_LF_NQX(0)); 447 } 448 /* Send request to attach LFs */ 449 ret = otx2_cpt_attach_rscrs_msg(lfs); 450 if (ret) 451 goto clear_lfs_num; 452 453 ret = otx2_cpt_alloc_instruction_queues(lfs); 454 if (ret) { 455 dev_err(&lfs->pdev->dev, 456 "Allocating instruction queues failed\n"); 457 goto detach_rsrcs; 458 } 459 cptlf_hw_init(lfs); 460 /* 461 * Allow each LF to execute requests destined to any of 8 engine 462 * groups and set queue priority of each LF to high 463 */ 464 ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri); 465 if (ret) 466 goto free_iq; 467 468 if (lfs->ctx_ilen_ovrd) { 469 ret = cptlf_set_ctx_ilen(lfs, lfs->ctx_ilen); 470 if (ret) 471 goto free_iq; 472 } 473 474 return 0; 475 476 free_iq: 477 cptlf_hw_cleanup(lfs); 478 otx2_cpt_free_instruction_queues(lfs); 479 detach_rsrcs: 480 otx2_cpt_detach_rsrcs_msg(lfs); 481 clear_lfs_num: 482 lfs->lfs_num = 0; 483 return ret; 484 } 485 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, "CRYPTO_DEV_OCTEONTX2_CPT"); 486 487 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs) 488 { 489 /* Cleanup LFs hardware side */ 490 cptlf_hw_cleanup(lfs); 491 /* Free instruction queues */ 492 otx2_cpt_free_instruction_queues(lfs); 493 /* Send request to detach LFs */ 494 otx2_cpt_detach_rsrcs_msg(lfs); 495 lfs->lfs_num = 0; 496 } 497 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, "CRYPTO_DEV_OCTEONTX2_CPT"); 498 499 MODULE_AUTHOR("Marvell"); 500 MODULE_DESCRIPTION("Marvell RVU CPT Common module"); 501 MODULE_LICENSE("GPL"); 502