1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptlf.h" 6 #include "rvu_reg.h" 7 8 #define CPT_TIMER_HOLD 0x03F 9 #define CPT_COUNT_HOLD 32 10 11 static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf, 12 int time_wait) 13 { 14 union otx2_cptx_lf_done_wait done_wait; 15 16 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 17 lf->slot, OTX2_CPT_LF_DONE_WAIT); 18 done_wait.s.time_wait = time_wait; 19 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 20 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 21 } 22 23 static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait) 24 { 25 union otx2_cptx_lf_done_wait done_wait; 26 27 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 28 lf->slot, OTX2_CPT_LF_DONE_WAIT); 29 done_wait.s.num_wait = num_wait; 30 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 31 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 32 } 33 34 static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs, 35 int time_wait) 36 { 37 int slot; 38 39 for (slot = 0; slot < lfs->lfs_num; slot++) 40 cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait); 41 } 42 43 static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait) 44 { 45 int slot; 46 47 for (slot = 0; slot < lfs->lfs_num; slot++) 48 cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait); 49 } 50 51 static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri) 52 { 53 struct otx2_cptlfs_info *lfs = lf->lfs; 54 union otx2_cptx_af_lf_ctrl lf_ctrl; 55 int ret; 56 57 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 58 CPT_AF_LFX_CTL(lf->slot), 59 &lf_ctrl.u, lfs->blkaddr); 60 if (ret) 61 return ret; 62 63 lf_ctrl.s.pri = pri ? 1 : 0; 64 65 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 66 CPT_AF_LFX_CTL(lf->slot), 67 lf_ctrl.u, lfs->blkaddr); 68 return ret; 69 } 70 71 static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf, 72 int eng_grps_mask) 73 { 74 struct otx2_cptlfs_info *lfs = lf->lfs; 75 union otx2_cptx_af_lf_ctrl lf_ctrl; 76 int ret; 77 78 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 79 CPT_AF_LFX_CTL(lf->slot), 80 &lf_ctrl.u, lfs->blkaddr); 81 if (ret) 82 return ret; 83 84 lf_ctrl.s.grp = eng_grps_mask; 85 86 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 87 CPT_AF_LFX_CTL(lf->slot), 88 lf_ctrl.u, lfs->blkaddr); 89 return ret; 90 } 91 92 static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs, 93 int eng_grp_mask, int pri) 94 { 95 int slot, ret = 0; 96 97 for (slot = 0; slot < lfs->lfs_num; slot++) { 98 ret = cptlf_set_pri(&lfs->lf[slot], pri); 99 if (ret) 100 return ret; 101 102 ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask); 103 if (ret) 104 return ret; 105 } 106 return ret; 107 } 108 109 static int cptlf_set_ctx_ilen(struct otx2_cptlfs_info *lfs, int ctx_ilen) 110 { 111 union otx2_cptx_af_lf_ctrl lf_ctrl; 112 struct otx2_cptlf_info *lf; 113 int slot, ret = 0; 114 115 for (slot = 0; slot < lfs->lfs_num; slot++) { 116 lf = &lfs->lf[slot]; 117 118 ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, 119 CPT_AF_LFX_CTL(lf->slot), 120 &lf_ctrl.u, lfs->blkaddr); 121 if (ret) 122 return ret; 123 124 lf_ctrl.s.ctx_ilen = ctx_ilen; 125 126 ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, 127 CPT_AF_LFX_CTL(lf->slot), 128 lf_ctrl.u, lfs->blkaddr); 129 if (ret) 130 return ret; 131 } 132 return ret; 133 } 134 135 static void cptlf_hw_init(struct otx2_cptlfs_info *lfs) 136 { 137 /* Disable instruction queues */ 138 otx2_cptlf_disable_iqueues(lfs); 139 140 /* Set instruction queues base addresses */ 141 otx2_cptlf_set_iqueues_base_addr(lfs); 142 143 /* Set instruction queues sizes */ 144 otx2_cptlf_set_iqueues_size(lfs); 145 146 /* Set done interrupts time wait */ 147 cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD); 148 149 /* Set done interrupts num wait */ 150 cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD); 151 152 /* Enable instruction queues */ 153 otx2_cptlf_enable_iqueues(lfs); 154 } 155 156 static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs) 157 { 158 /* Disable instruction queues */ 159 otx2_cptlf_disable_iqueues(lfs); 160 } 161 162 static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable) 163 { 164 union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 }; 165 u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S : 166 OTX2_CPT_LF_MISC_INT_ENA_W1C; 167 int slot; 168 169 irq_misc.s.fault = 0x1; 170 irq_misc.s.hwerr = 0x1; 171 irq_misc.s.irde = 0x1; 172 irq_misc.s.nqerr = 0x1; 173 irq_misc.s.nwrp = 0x1; 174 175 for (slot = 0; slot < lfs->lfs_num; slot++) 176 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 177 irq_misc.u); 178 } 179 180 static void cptlf_set_done_intrs(struct otx2_cptlfs_info *lfs, u8 enable) 181 { 182 u64 reg = enable ? OTX2_CPT_LF_DONE_INT_ENA_W1S : 183 OTX2_CPT_LF_DONE_INT_ENA_W1C; 184 int slot; 185 186 for (slot = 0; slot < lfs->lfs_num; slot++) 187 otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 0x1); 188 } 189 190 static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf) 191 { 192 union otx2_cptx_lf_done irq_cnt; 193 194 irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 195 OTX2_CPT_LF_DONE); 196 return irq_cnt.s.done; 197 } 198 199 static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg) 200 { 201 union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack; 202 struct otx2_cptlf_info *lf = arg; 203 struct device *dev; 204 205 dev = &lf->lfs->pdev->dev; 206 irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 207 lf->slot, OTX2_CPT_LF_MISC_INT); 208 irq_misc_ack.u = 0x0; 209 210 if (irq_misc.s.fault) { 211 dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n", 212 lf->slot); 213 irq_misc_ack.s.fault = 0x1; 214 215 } else if (irq_misc.s.hwerr) { 216 dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.", 217 lf->slot); 218 irq_misc_ack.s.hwerr = 0x1; 219 220 } else if (irq_misc.s.nwrp) { 221 dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n", 222 lf->slot); 223 irq_misc_ack.s.nwrp = 0x1; 224 225 } else if (irq_misc.s.irde) { 226 dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n"); 227 irq_misc_ack.s.irde = 0x1; 228 229 } else if (irq_misc.s.nqerr) { 230 dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n"); 231 irq_misc_ack.s.nqerr = 0x1; 232 233 } else { 234 dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot); 235 return IRQ_NONE; 236 } 237 238 /* Acknowledge interrupts */ 239 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 240 OTX2_CPT_LF_MISC_INT, irq_misc_ack.u); 241 242 return IRQ_HANDLED; 243 } 244 245 static irqreturn_t cptlf_done_intr_handler(int irq, void *arg) 246 { 247 union otx2_cptx_lf_done_wait done_wait; 248 struct otx2_cptlf_info *lf = arg; 249 int irq_cnt; 250 251 /* Read the number of completed requests */ 252 irq_cnt = cptlf_read_done_cnt(lf); 253 if (irq_cnt) { 254 done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, lf->lfs->blkaddr, 255 lf->slot, OTX2_CPT_LF_DONE_WAIT); 256 /* Acknowledge the number of completed requests */ 257 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 258 OTX2_CPT_LF_DONE_ACK, irq_cnt); 259 260 otx2_cpt_write64(lf->lfs->reg_base, lf->lfs->blkaddr, lf->slot, 261 OTX2_CPT_LF_DONE_WAIT, done_wait.u); 262 if (unlikely(!lf->wqe)) { 263 dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n", 264 lf->slot); 265 return IRQ_NONE; 266 } 267 268 /* Schedule processing of completed requests */ 269 tasklet_hi_schedule(&lf->wqe->work); 270 } 271 return IRQ_HANDLED; 272 } 273 274 void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs) 275 { 276 int i, irq_offs, vector; 277 278 irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; 279 for (i = 0; i < lfs->lfs_num; i++) { 280 if (!lfs->lf[i].is_irq_reg[irq_offs]) 281 continue; 282 283 vector = pci_irq_vector(lfs->pdev, 284 lfs->lf[i].msix_offset + irq_offs); 285 free_irq(vector, &lfs->lf[i]); 286 lfs->lf[i].is_irq_reg[irq_offs] = false; 287 } 288 289 cptlf_set_misc_intrs(lfs, false); 290 } 291 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts, 292 CRYPTO_DEV_OCTEONTX2_CPT); 293 294 void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs) 295 { 296 int i, irq_offs, vector; 297 298 irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; 299 for (i = 0; i < lfs->lfs_num; i++) { 300 if (!lfs->lf[i].is_irq_reg[irq_offs]) 301 continue; 302 303 vector = pci_irq_vector(lfs->pdev, 304 lfs->lf[i].msix_offset + irq_offs); 305 free_irq(vector, &lfs->lf[i]); 306 lfs->lf[i].is_irq_reg[irq_offs] = false; 307 } 308 309 cptlf_set_done_intrs(lfs, false); 310 } 311 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts, 312 CRYPTO_DEV_OCTEONTX2_CPT); 313 314 static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs, 315 int lf_num, int irq_offset, 316 irq_handler_t handler) 317 { 318 int ret, vector; 319 320 vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset + 321 irq_offset); 322 ret = request_irq(vector, handler, 0, 323 lfs->lf[lf_num].irq_name[irq_offset], 324 &lfs->lf[lf_num]); 325 if (ret) 326 return ret; 327 328 lfs->lf[lf_num].is_irq_reg[irq_offset] = true; 329 330 return ret; 331 } 332 333 int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs) 334 { 335 bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1); 336 int irq_offs, ret, i; 337 338 irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; 339 for (i = 0; i < lfs->lfs_num; i++) { 340 snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPT%dLF Misc%d", 341 is_cpt1, i); 342 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, 343 cptlf_misc_intr_handler); 344 if (ret) 345 goto free_irq; 346 } 347 cptlf_set_misc_intrs(lfs, true); 348 return 0; 349 350 free_irq: 351 otx2_cptlf_unregister_misc_interrupts(lfs); 352 return ret; 353 } 354 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts, 355 CRYPTO_DEV_OCTEONTX2_CPT); 356 357 int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs) 358 { 359 bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1); 360 int irq_offs, ret, i; 361 362 irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; 363 for (i = 0; i < lfs->lfs_num; i++) { 364 snprintf(lfs->lf[i].irq_name[irq_offs], 32, 365 "OTX2_CPT%dLF Done%d", is_cpt1, i); 366 ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, 367 cptlf_done_intr_handler); 368 if (ret) 369 goto free_irq; 370 } 371 cptlf_set_done_intrs(lfs, true); 372 return 0; 373 374 free_irq: 375 otx2_cptlf_unregister_done_interrupts(lfs); 376 return ret; 377 } 378 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts, 379 CRYPTO_DEV_OCTEONTX2_CPT); 380 381 void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs) 382 { 383 int slot, offs; 384 385 for (slot = 0; slot < lfs->lfs_num; slot++) { 386 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) 387 irq_set_affinity_hint(pci_irq_vector(lfs->pdev, 388 lfs->lf[slot].msix_offset + 389 offs), NULL); 390 free_cpumask_var(lfs->lf[slot].affinity_mask); 391 } 392 } 393 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT); 394 395 int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs) 396 { 397 struct otx2_cptlf_info *lf = lfs->lf; 398 int slot, offs, ret; 399 400 for (slot = 0; slot < lfs->lfs_num; slot++) { 401 if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) { 402 dev_err(&lfs->pdev->dev, 403 "cpumask allocation failed for LF %d", slot); 404 ret = -ENOMEM; 405 goto free_affinity_mask; 406 } 407 408 cpumask_set_cpu(cpumask_local_spread(slot, 409 dev_to_node(&lfs->pdev->dev)), 410 lf[slot].affinity_mask); 411 412 for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) { 413 ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev, 414 lf[slot].msix_offset + offs), 415 lf[slot].affinity_mask); 416 if (ret) 417 goto free_affinity_mask; 418 } 419 } 420 return 0; 421 422 free_affinity_mask: 423 otx2_cptlf_free_irqs_affinity(lfs); 424 return ret; 425 } 426 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT); 427 428 int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, 429 int lfs_num) 430 { 431 int slot, ret; 432 433 if (!lfs->pdev || !lfs->reg_base) 434 return -EINVAL; 435 436 lfs->lfs_num = lfs_num; 437 for (slot = 0; slot < lfs->lfs_num; slot++) { 438 lfs->lf[slot].lfs = lfs; 439 lfs->lf[slot].slot = slot; 440 if (lfs->lmt_base) 441 lfs->lf[slot].lmtline = lfs->lmt_base + 442 (slot * LMTLINE_SIZE); 443 else 444 lfs->lf[slot].lmtline = lfs->reg_base + 445 OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot, 446 OTX2_CPT_LMT_LF_LMTLINEX(0)); 447 448 lfs->lf[slot].ioreg = lfs->reg_base + 449 OTX2_CPT_RVU_FUNC_ADDR_S(lfs->blkaddr, slot, 450 OTX2_CPT_LF_NQX(0)); 451 } 452 /* Send request to attach LFs */ 453 ret = otx2_cpt_attach_rscrs_msg(lfs); 454 if (ret) 455 goto clear_lfs_num; 456 457 ret = otx2_cpt_alloc_instruction_queues(lfs); 458 if (ret) { 459 dev_err(&lfs->pdev->dev, 460 "Allocating instruction queues failed\n"); 461 goto detach_rsrcs; 462 } 463 cptlf_hw_init(lfs); 464 /* 465 * Allow each LF to execute requests destined to any of 8 engine 466 * groups and set queue priority of each LF to high 467 */ 468 ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri); 469 if (ret) 470 goto free_iq; 471 472 if (lfs->ctx_ilen_ovrd) { 473 ret = cptlf_set_ctx_ilen(lfs, lfs->ctx_ilen); 474 if (ret) 475 goto free_iq; 476 } 477 478 return 0; 479 480 free_iq: 481 cptlf_hw_cleanup(lfs); 482 otx2_cpt_free_instruction_queues(lfs); 483 detach_rsrcs: 484 otx2_cpt_detach_rsrcs_msg(lfs); 485 clear_lfs_num: 486 lfs->lfs_num = 0; 487 return ret; 488 } 489 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT); 490 491 void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs) 492 { 493 /* Cleanup LFs hardware side */ 494 cptlf_hw_cleanup(lfs); 495 /* Free instruction queues */ 496 otx2_cpt_free_instruction_queues(lfs); 497 /* Send request to detach LFs */ 498 otx2_cpt_detach_rsrcs_msg(lfs); 499 lfs->lfs_num = 0; 500 } 501 EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT); 502 503 MODULE_AUTHOR("Marvell"); 504 MODULE_DESCRIPTION("Marvell RVU CPT Common module"); 505 MODULE_LICENSE("GPL"); 506