1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptvf.h" 6 #include "otx2_cptlf.h" 7 #include "otx2_cptvf_algs.h" 8 #include "cn10k_cpt.h" 9 #include <rvu_reg.h> 10 11 #define OTX2_CPTVF_DRV_NAME "rvu_cptvf" 12 13 static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf) 14 { 15 /* Clear interrupt if any */ 16 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT, 17 0x1ULL); 18 19 /* Enable PF-VF interrupt */ 20 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, 21 OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL); 22 } 23 24 static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf) 25 { 26 /* Disable PF-VF interrupt */ 27 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, 28 OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL); 29 30 /* Clear interrupt if any */ 31 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT, 32 0x1ULL); 33 } 34 35 static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf) 36 { 37 int ret, irq; 38 int num_vec; 39 40 num_vec = pci_msix_vec_count(cptvf->pdev); 41 if (num_vec <= 0) 42 return -EINVAL; 43 44 /* Enable MSI-X */ 45 ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec, 46 PCI_IRQ_MSIX); 47 if (ret < 0) { 48 dev_err(&cptvf->pdev->dev, 49 "Request for %d msix vectors failed\n", num_vec); 50 return ret; 51 } 52 irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX); 53 /* Register VF<=>PF mailbox interrupt handler */ 54 ret = devm_request_irq(&cptvf->pdev->dev, irq, 55 otx2_cptvf_pfvf_mbox_intr, 0, 56 "CPTPFVF Mbox", cptvf); 57 if (ret) 58 return ret; 59 /* Enable PF-VF mailbox interrupts */ 60 cptvf_enable_pfvf_mbox_intrs(cptvf); 61 62 ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev); 63 if (ret) { 64 dev_warn(&cptvf->pdev->dev, 65 "PF not responding to mailbox, deferring probe\n"); 66 cptvf_disable_pfvf_mbox_intrs(cptvf); 67 return -EPROBE_DEFER; 68 } 69 return 0; 70 } 71 72 static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf) 73 { 74 struct pci_dev *pdev = cptvf->pdev; 75 resource_size_t offset, size; 76 int ret; 77 78 cptvf->pfvf_mbox_wq = 79 alloc_ordered_workqueue("cpt_pfvf_mailbox", 80 WQ_HIGHPRI | WQ_MEM_RECLAIM); 81 if (!cptvf->pfvf_mbox_wq) 82 return -ENOMEM; 83 84 if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) { 85 /* For cn10k platform, VF mailbox region is in its BAR2 86 * register space 87 */ 88 cptvf->pfvf_mbox_base = cptvf->reg_base + 89 CN10K_CPT_VF_MBOX_REGION; 90 } else { 91 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); 92 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); 93 /* Map PF-VF mailbox memory */ 94 cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, 95 size); 96 if (!cptvf->pfvf_mbox_base) { 97 dev_err(&pdev->dev, "Unable to map BAR4\n"); 98 ret = -ENOMEM; 99 goto free_wqe; 100 } 101 } 102 103 ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base, 104 pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1); 105 if (ret) 106 goto free_wqe; 107 108 ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev); 109 if (ret) 110 goto destroy_mbox; 111 112 INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler); 113 return 0; 114 115 destroy_mbox: 116 otx2_mbox_destroy(&cptvf->pfvf_mbox); 117 free_wqe: 118 destroy_workqueue(cptvf->pfvf_mbox_wq); 119 return ret; 120 } 121 122 static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf) 123 { 124 destroy_workqueue(cptvf->pfvf_mbox_wq); 125 otx2_mbox_destroy(&cptvf->pfvf_mbox); 126 } 127 128 static void cptlf_work_handler(unsigned long data) 129 { 130 otx2_cpt_post_process((struct otx2_cptlf_wqe *) data); 131 } 132 133 static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs) 134 { 135 int i; 136 137 for (i = 0; i < lfs->lfs_num; i++) { 138 if (!lfs->lf[i].wqe) 139 continue; 140 141 tasklet_kill(&lfs->lf[i].wqe->work); 142 kfree(lfs->lf[i].wqe); 143 lfs->lf[i].wqe = NULL; 144 } 145 } 146 147 static int init_tasklet_work(struct otx2_cptlfs_info *lfs) 148 { 149 struct otx2_cptlf_wqe *wqe; 150 int i, ret = 0; 151 152 for (i = 0; i < lfs->lfs_num; i++) { 153 wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL); 154 if (!wqe) { 155 ret = -ENOMEM; 156 goto cleanup_tasklet; 157 } 158 159 tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe); 160 wqe->lfs = lfs; 161 wqe->lf_num = i; 162 lfs->lf[i].wqe = wqe; 163 } 164 return 0; 165 166 cleanup_tasklet: 167 cleanup_tasklet_work(lfs); 168 return ret; 169 } 170 171 static void free_pending_queues(struct otx2_cptlfs_info *lfs) 172 { 173 int i; 174 175 for (i = 0; i < lfs->lfs_num; i++) { 176 kfree(lfs->lf[i].pqueue.head); 177 lfs->lf[i].pqueue.head = NULL; 178 } 179 } 180 181 static int alloc_pending_queues(struct otx2_cptlfs_info *lfs) 182 { 183 int size, ret, i; 184 185 if (!lfs->lfs_num) 186 return -EINVAL; 187 188 for (i = 0; i < lfs->lfs_num; i++) { 189 lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS; 190 size = lfs->lf[i].pqueue.qlen * 191 sizeof(struct otx2_cpt_pending_entry); 192 193 lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL); 194 if (!lfs->lf[i].pqueue.head) { 195 ret = -ENOMEM; 196 goto error; 197 } 198 199 /* Initialize spin lock */ 200 spin_lock_init(&lfs->lf[i].pqueue.lock); 201 } 202 return 0; 203 204 error: 205 free_pending_queues(lfs); 206 return ret; 207 } 208 209 static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs) 210 { 211 cleanup_tasklet_work(lfs); 212 free_pending_queues(lfs); 213 } 214 215 static int lf_sw_init(struct otx2_cptlfs_info *lfs) 216 { 217 int ret; 218 219 ret = alloc_pending_queues(lfs); 220 if (ret) { 221 dev_err(&lfs->pdev->dev, 222 "Allocating pending queues failed\n"); 223 return ret; 224 } 225 ret = init_tasklet_work(lfs); 226 if (ret) { 227 dev_err(&lfs->pdev->dev, 228 "Tasklet work init failed\n"); 229 goto pending_queues_free; 230 } 231 return 0; 232 233 pending_queues_free: 234 free_pending_queues(lfs); 235 return ret; 236 } 237 238 static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs) 239 { 240 atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET); 241 242 /* Remove interrupts affinity */ 243 otx2_cptlf_free_irqs_affinity(lfs); 244 /* Disable instruction queue */ 245 otx2_cptlf_disable_iqueues(lfs); 246 /* Unregister crypto algorithms */ 247 otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE); 248 /* Unregister LFs interrupts */ 249 otx2_cptlf_unregister_misc_interrupts(lfs); 250 otx2_cptlf_unregister_done_interrupts(lfs); 251 /* Cleanup LFs software side */ 252 lf_sw_cleanup(lfs); 253 /* Free instruction queues */ 254 otx2_cpt_free_instruction_queues(lfs); 255 /* Send request to detach LFs */ 256 otx2_cpt_detach_rsrcs_msg(lfs); 257 lfs->lfs_num = 0; 258 } 259 260 static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) 261 { 262 struct otx2_cptlfs_info *lfs = &cptvf->lfs; 263 struct device *dev = &cptvf->pdev->dev; 264 int ret, lfs_num; 265 u8 eng_grp_msk; 266 267 /* Get engine group number for symmetric crypto */ 268 cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP; 269 ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES); 270 if (ret) 271 return ret; 272 273 if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { 274 dev_err(dev, "Engine group for kernel crypto not available\n"); 275 ret = -ENOENT; 276 return ret; 277 } 278 eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num; 279 280 ret = otx2_cptvf_send_kvf_limits_msg(cptvf); 281 if (ret) 282 return ret; 283 284 lfs_num = cptvf->lfs.kvf_limits; 285 286 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, 287 lfs_num); 288 if (ret) 289 return ret; 290 291 /* Get msix offsets for attached LFs */ 292 ret = otx2_cpt_msix_offset_msg(lfs); 293 if (ret) 294 goto cleanup_lf; 295 296 /* Initialize LFs software side */ 297 ret = lf_sw_init(lfs); 298 if (ret) 299 goto cleanup_lf; 300 301 /* Register LFs interrupts */ 302 ret = otx2_cptlf_register_misc_interrupts(lfs); 303 if (ret) 304 goto cleanup_lf_sw; 305 306 ret = otx2_cptlf_register_done_interrupts(lfs); 307 if (ret) 308 goto cleanup_lf_sw; 309 310 /* Set interrupts affinity */ 311 ret = otx2_cptlf_set_irqs_affinity(lfs); 312 if (ret) 313 goto unregister_intr; 314 315 atomic_set(&lfs->state, OTX2_CPTLF_STARTED); 316 /* Register crypto algorithms */ 317 ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1); 318 if (ret) { 319 dev_err(&lfs->pdev->dev, "algorithms registration failed\n"); 320 goto disable_irqs; 321 } 322 return 0; 323 324 disable_irqs: 325 otx2_cptlf_free_irqs_affinity(lfs); 326 unregister_intr: 327 otx2_cptlf_unregister_misc_interrupts(lfs); 328 otx2_cptlf_unregister_done_interrupts(lfs); 329 cleanup_lf_sw: 330 lf_sw_cleanup(lfs); 331 cleanup_lf: 332 otx2_cptlf_shutdown(lfs); 333 334 return ret; 335 } 336 337 static int otx2_cptvf_probe(struct pci_dev *pdev, 338 const struct pci_device_id *ent) 339 { 340 struct device *dev = &pdev->dev; 341 struct otx2_cptvf_dev *cptvf; 342 int ret; 343 344 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL); 345 if (!cptvf) 346 return -ENOMEM; 347 348 ret = pcim_enable_device(pdev); 349 if (ret) { 350 dev_err(dev, "Failed to enable PCI device\n"); 351 goto clear_drvdata; 352 } 353 354 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 355 if (ret) { 356 dev_err(dev, "Unable to get usable DMA configuration\n"); 357 goto clear_drvdata; 358 } 359 360 ret = pcim_request_all_regions(pdev, OTX2_CPTVF_DRV_NAME); 361 if (ret) { 362 dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret); 363 goto clear_drvdata; 364 } 365 pci_set_master(pdev); 366 pci_set_drvdata(pdev, cptvf); 367 cptvf->pdev = pdev; 368 369 /* Map VF's configuration registers */ 370 cptvf->reg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0); 371 if (!cptvf->reg_base) { 372 ret = -ENOMEM; 373 dev_err(dev, "Couldn't ioremap PCI resource 0x%x\n", ret); 374 goto clear_drvdata; 375 } 376 377 otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag); 378 379 /* Initialize PF<=>VF mailbox */ 380 ret = cptvf_pfvf_mbox_init(cptvf); 381 if (ret) 382 goto clear_drvdata; 383 384 /* Register interrupts */ 385 ret = cptvf_register_interrupts(cptvf); 386 if (ret) 387 goto destroy_pfvf_mbox; 388 389 cptvf->blkaddr = BLKADDR_CPT0; 390 391 cptvf_hw_ops_get(cptvf); 392 393 otx2_cptlf_set_dev_info(&cptvf->lfs, cptvf->pdev, cptvf->reg_base, 394 &cptvf->pfvf_mbox, cptvf->blkaddr); 395 396 ret = otx2_cptvf_send_caps_msg(cptvf); 397 if (ret) { 398 dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n"); 399 goto unregister_interrupts; 400 } 401 if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35)) 402 cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create; 403 404 ret = cn10k_cptvf_lmtst_init(cptvf); 405 if (ret) 406 goto unregister_interrupts; 407 408 /* Initialize CPT LFs */ 409 ret = cptvf_lf_init(cptvf); 410 if (ret) 411 goto free_lmtst; 412 413 return 0; 414 415 free_lmtst: 416 cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); 417 unregister_interrupts: 418 cptvf_disable_pfvf_mbox_intrs(cptvf); 419 destroy_pfvf_mbox: 420 cptvf_pfvf_mbox_destroy(cptvf); 421 clear_drvdata: 422 pci_set_drvdata(pdev, NULL); 423 424 return ret; 425 } 426 427 static void otx2_cptvf_remove(struct pci_dev *pdev) 428 { 429 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev); 430 431 if (!cptvf) { 432 dev_err(&pdev->dev, "Invalid CPT VF device.\n"); 433 return; 434 } 435 cptvf_lf_shutdown(&cptvf->lfs); 436 /* Disable PF-VF mailbox interrupt */ 437 cptvf_disable_pfvf_mbox_intrs(cptvf); 438 /* Destroy PF-VF mbox */ 439 cptvf_pfvf_mbox_destroy(cptvf); 440 /* Free LMTST memory */ 441 cn10k_cpt_lmtst_free(pdev, &cptvf->lfs); 442 pci_set_drvdata(pdev, NULL); 443 } 444 445 /* Supported devices */ 446 static const struct pci_device_id otx2_cptvf_id_table[] = { 447 {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0}, 448 {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0}, 449 { 0, } /* end of table */ 450 }; 451 452 static struct pci_driver otx2_cptvf_pci_driver = { 453 .name = OTX2_CPTVF_DRV_NAME, 454 .id_table = otx2_cptvf_id_table, 455 .probe = otx2_cptvf_probe, 456 .remove = otx2_cptvf_remove, 457 }; 458 459 module_pci_driver(otx2_cptvf_pci_driver); 460 461 MODULE_IMPORT_NS("CRYPTO_DEV_OCTEONTX2_CPT"); 462 463 MODULE_AUTHOR("Marvell"); 464 MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver"); 465 MODULE_LICENSE("GPL v2"); 466 MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table); 467