1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2020 Marvell. */ 3 4 #include "otx2_cpt_common.h" 5 #include "otx2_cptvf.h" 6 #include "otx2_cptlf.h" 7 #include "otx2_cptvf_algs.h" 8 #include "cn10k_cpt.h" 9 #include <rvu_reg.h> 10 11 #define OTX2_CPTVF_DRV_NAME "rvu_cptvf" 12 13 static void cptvf_enable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf) 14 { 15 /* Clear interrupt if any */ 16 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT, 17 0x1ULL); 18 19 /* Enable PF-VF interrupt */ 20 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, 21 OTX2_RVU_VF_INT_ENA_W1S, 0x1ULL); 22 } 23 24 static void cptvf_disable_pfvf_mbox_intrs(struct otx2_cptvf_dev *cptvf) 25 { 26 /* Disable PF-VF interrupt */ 27 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, 28 OTX2_RVU_VF_INT_ENA_W1C, 0x1ULL); 29 30 /* Clear interrupt if any */ 31 otx2_cpt_write64(cptvf->reg_base, BLKADDR_RVUM, 0, OTX2_RVU_VF_INT, 32 0x1ULL); 33 } 34 35 static int cptvf_register_interrupts(struct otx2_cptvf_dev *cptvf) 36 { 37 int ret, irq; 38 int num_vec; 39 40 num_vec = pci_msix_vec_count(cptvf->pdev); 41 if (num_vec <= 0) 42 return -EINVAL; 43 44 /* Enable MSI-X */ 45 ret = pci_alloc_irq_vectors(cptvf->pdev, num_vec, num_vec, 46 PCI_IRQ_MSIX); 47 if (ret < 0) { 48 dev_err(&cptvf->pdev->dev, 49 "Request for %d msix vectors failed\n", num_vec); 50 return ret; 51 } 52 irq = pci_irq_vector(cptvf->pdev, OTX2_CPT_VF_INT_VEC_E_MBOX); 53 /* Register VF<=>PF mailbox interrupt handler */ 54 ret = devm_request_irq(&cptvf->pdev->dev, irq, 55 otx2_cptvf_pfvf_mbox_intr, 0, 56 "CPTPFVF Mbox", cptvf); 57 if (ret) 58 return ret; 59 /* Enable PF-VF mailbox interrupts */ 60 cptvf_enable_pfvf_mbox_intrs(cptvf); 61 62 ret = otx2_cpt_send_ready_msg(&cptvf->pfvf_mbox, cptvf->pdev); 63 if (ret) { 64 dev_warn(&cptvf->pdev->dev, 65 "PF not responding to mailbox, deferring probe\n"); 66 cptvf_disable_pfvf_mbox_intrs(cptvf); 67 return -EPROBE_DEFER; 68 } 69 return 0; 70 } 71 72 static int cptvf_pfvf_mbox_init(struct otx2_cptvf_dev *cptvf) 73 { 74 struct pci_dev *pdev = cptvf->pdev; 75 resource_size_t offset, size; 76 int ret; 77 78 cptvf->pfvf_mbox_wq = 79 alloc_ordered_workqueue("cpt_pfvf_mailbox", 80 WQ_HIGHPRI | WQ_MEM_RECLAIM); 81 if (!cptvf->pfvf_mbox_wq) 82 return -ENOMEM; 83 84 if (test_bit(CN10K_MBOX, &cptvf->cap_flag)) { 85 /* For cn10k platform, VF mailbox region is in its BAR2 86 * register space 87 */ 88 cptvf->pfvf_mbox_base = cptvf->reg_base + 89 CN10K_CPT_VF_MBOX_REGION; 90 } else { 91 offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); 92 size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); 93 /* Map PF-VF mailbox memory */ 94 cptvf->pfvf_mbox_base = devm_ioremap_wc(&pdev->dev, offset, 95 size); 96 if (!cptvf->pfvf_mbox_base) { 97 dev_err(&pdev->dev, "Unable to map BAR4\n"); 98 ret = -ENOMEM; 99 goto free_wqe; 100 } 101 } 102 103 ret = otx2_mbox_init(&cptvf->pfvf_mbox, cptvf->pfvf_mbox_base, 104 pdev, cptvf->reg_base, MBOX_DIR_VFPF, 1); 105 if (ret) 106 goto free_wqe; 107 108 ret = otx2_cpt_mbox_bbuf_init(cptvf, pdev); 109 if (ret) 110 goto destroy_mbox; 111 112 INIT_WORK(&cptvf->pfvf_mbox_work, otx2_cptvf_pfvf_mbox_handler); 113 return 0; 114 115 destroy_mbox: 116 otx2_mbox_destroy(&cptvf->pfvf_mbox); 117 free_wqe: 118 destroy_workqueue(cptvf->pfvf_mbox_wq); 119 return ret; 120 } 121 122 static void cptvf_pfvf_mbox_destroy(struct otx2_cptvf_dev *cptvf) 123 { 124 destroy_workqueue(cptvf->pfvf_mbox_wq); 125 otx2_mbox_destroy(&cptvf->pfvf_mbox); 126 } 127 128 static void cptlf_work_handler(unsigned long data) 129 { 130 otx2_cpt_post_process((struct otx2_cptlf_wqe *) data); 131 } 132 133 static void cleanup_tasklet_work(struct otx2_cptlfs_info *lfs) 134 { 135 int i; 136 137 for (i = 0; i < lfs->lfs_num; i++) { 138 if (!lfs->lf[i].wqe) 139 continue; 140 141 tasklet_kill(&lfs->lf[i].wqe->work); 142 kfree(lfs->lf[i].wqe); 143 lfs->lf[i].wqe = NULL; 144 } 145 } 146 147 static int init_tasklet_work(struct otx2_cptlfs_info *lfs) 148 { 149 struct otx2_cptlf_wqe *wqe; 150 int i, ret = 0; 151 152 for (i = 0; i < lfs->lfs_num; i++) { 153 wqe = kzalloc(sizeof(struct otx2_cptlf_wqe), GFP_KERNEL); 154 if (!wqe) { 155 ret = -ENOMEM; 156 goto cleanup_tasklet; 157 } 158 159 tasklet_init(&wqe->work, cptlf_work_handler, (u64) wqe); 160 wqe->lfs = lfs; 161 wqe->lf_num = i; 162 lfs->lf[i].wqe = wqe; 163 } 164 return 0; 165 166 cleanup_tasklet: 167 cleanup_tasklet_work(lfs); 168 return ret; 169 } 170 171 static void free_pending_queues(struct otx2_cptlfs_info *lfs) 172 { 173 int i; 174 175 for (i = 0; i < lfs->lfs_num; i++) { 176 kfree(lfs->lf[i].pqueue.head); 177 lfs->lf[i].pqueue.head = NULL; 178 } 179 } 180 181 static int alloc_pending_queues(struct otx2_cptlfs_info *lfs) 182 { 183 int size, ret, i; 184 185 if (!lfs->lfs_num) 186 return -EINVAL; 187 188 for (i = 0; i < lfs->lfs_num; i++) { 189 lfs->lf[i].pqueue.qlen = OTX2_CPT_INST_QLEN_MSGS; 190 size = lfs->lf[i].pqueue.qlen * 191 sizeof(struct otx2_cpt_pending_entry); 192 193 lfs->lf[i].pqueue.head = kzalloc(size, GFP_KERNEL); 194 if (!lfs->lf[i].pqueue.head) { 195 ret = -ENOMEM; 196 goto error; 197 } 198 199 /* Initialize spin lock */ 200 spin_lock_init(&lfs->lf[i].pqueue.lock); 201 } 202 return 0; 203 204 error: 205 free_pending_queues(lfs); 206 return ret; 207 } 208 209 static void lf_sw_cleanup(struct otx2_cptlfs_info *lfs) 210 { 211 cleanup_tasklet_work(lfs); 212 free_pending_queues(lfs); 213 } 214 215 static int lf_sw_init(struct otx2_cptlfs_info *lfs) 216 { 217 int ret; 218 219 ret = alloc_pending_queues(lfs); 220 if (ret) { 221 dev_err(&lfs->pdev->dev, 222 "Allocating pending queues failed\n"); 223 return ret; 224 } 225 ret = init_tasklet_work(lfs); 226 if (ret) { 227 dev_err(&lfs->pdev->dev, 228 "Tasklet work init failed\n"); 229 goto pending_queues_free; 230 } 231 return 0; 232 233 pending_queues_free: 234 free_pending_queues(lfs); 235 return ret; 236 } 237 238 static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs) 239 { 240 atomic_set(&lfs->state, OTX2_CPTLF_IN_RESET); 241 242 /* Remove interrupts affinity */ 243 otx2_cptlf_free_irqs_affinity(lfs); 244 /* Disable instruction queue */ 245 otx2_cptlf_disable_iqueues(lfs); 246 /* Unregister crypto algorithms */ 247 otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE); 248 /* Unregister LFs interrupts */ 249 otx2_cptlf_unregister_interrupts(lfs); 250 /* Cleanup LFs software side */ 251 lf_sw_cleanup(lfs); 252 /* Send request to detach LFs */ 253 otx2_cpt_detach_rsrcs_msg(lfs); 254 } 255 256 static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) 257 { 258 struct otx2_cptlfs_info *lfs = &cptvf->lfs; 259 struct device *dev = &cptvf->pdev->dev; 260 int ret, lfs_num; 261 u8 eng_grp_msk; 262 263 /* Get engine group number for symmetric crypto */ 264 cptvf->lfs.kcrypto_eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP; 265 ret = otx2_cptvf_send_eng_grp_num_msg(cptvf, OTX2_CPT_SE_TYPES); 266 if (ret) 267 return ret; 268 269 if (cptvf->lfs.kcrypto_eng_grp_num == OTX2_CPT_INVALID_CRYPTO_ENG_GRP) { 270 dev_err(dev, "Engine group for kernel crypto not available\n"); 271 ret = -ENOENT; 272 return ret; 273 } 274 eng_grp_msk = 1 << cptvf->lfs.kcrypto_eng_grp_num; 275 276 ret = otx2_cptvf_send_kvf_limits_msg(cptvf); 277 if (ret) 278 return ret; 279 280 lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits : 281 num_online_cpus(); 282 283 otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base, 284 &cptvf->pfvf_mbox, cptvf->blkaddr); 285 ret = otx2_cptlf_init(lfs, eng_grp_msk, OTX2_CPT_QUEUE_HI_PRIO, 286 lfs_num); 287 if (ret) 288 return ret; 289 290 /* Get msix offsets for attached LFs */ 291 ret = otx2_cpt_msix_offset_msg(lfs); 292 if (ret) 293 goto cleanup_lf; 294 295 /* Initialize LFs software side */ 296 ret = lf_sw_init(lfs); 297 if (ret) 298 goto cleanup_lf; 299 300 /* Register LFs interrupts */ 301 ret = otx2_cptlf_register_interrupts(lfs); 302 if (ret) 303 goto cleanup_lf_sw; 304 305 /* Set interrupts affinity */ 306 ret = otx2_cptlf_set_irqs_affinity(lfs); 307 if (ret) 308 goto unregister_intr; 309 310 atomic_set(&lfs->state, OTX2_CPTLF_STARTED); 311 /* Register crypto algorithms */ 312 ret = otx2_cpt_crypto_init(lfs->pdev, THIS_MODULE, lfs_num, 1); 313 if (ret) { 314 dev_err(&lfs->pdev->dev, "algorithms registration failed\n"); 315 goto disable_irqs; 316 } 317 return 0; 318 319 disable_irqs: 320 otx2_cptlf_free_irqs_affinity(lfs); 321 unregister_intr: 322 otx2_cptlf_unregister_interrupts(lfs); 323 cleanup_lf_sw: 324 lf_sw_cleanup(lfs); 325 cleanup_lf: 326 otx2_cptlf_shutdown(lfs); 327 328 return ret; 329 } 330 331 static int otx2_cptvf_probe(struct pci_dev *pdev, 332 const struct pci_device_id *ent) 333 { 334 struct device *dev = &pdev->dev; 335 struct otx2_cptvf_dev *cptvf; 336 int ret; 337 338 cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL); 339 if (!cptvf) 340 return -ENOMEM; 341 342 ret = pcim_enable_device(pdev); 343 if (ret) { 344 dev_err(dev, "Failed to enable PCI device\n"); 345 goto clear_drvdata; 346 } 347 348 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); 349 if (ret) { 350 dev_err(dev, "Unable to get usable DMA configuration\n"); 351 goto clear_drvdata; 352 } 353 /* Map VF's configuration registers */ 354 ret = pcim_iomap_regions_request_all(pdev, 1 << PCI_PF_REG_BAR_NUM, 355 OTX2_CPTVF_DRV_NAME); 356 if (ret) { 357 dev_err(dev, "Couldn't get PCI resources 0x%x\n", ret); 358 goto clear_drvdata; 359 } 360 pci_set_master(pdev); 361 pci_set_drvdata(pdev, cptvf); 362 cptvf->pdev = pdev; 363 364 cptvf->reg_base = pcim_iomap_table(pdev)[PCI_PF_REG_BAR_NUM]; 365 366 otx2_cpt_set_hw_caps(pdev, &cptvf->cap_flag); 367 368 ret = cn10k_cptvf_lmtst_init(cptvf); 369 if (ret) 370 goto clear_drvdata; 371 372 /* Initialize PF<=>VF mailbox */ 373 ret = cptvf_pfvf_mbox_init(cptvf); 374 if (ret) 375 goto clear_drvdata; 376 377 /* Register interrupts */ 378 ret = cptvf_register_interrupts(cptvf); 379 if (ret) 380 goto destroy_pfvf_mbox; 381 382 cptvf->blkaddr = BLKADDR_CPT0; 383 /* Initialize CPT LFs */ 384 ret = cptvf_lf_init(cptvf); 385 if (ret) 386 goto unregister_interrupts; 387 388 return 0; 389 390 unregister_interrupts: 391 cptvf_disable_pfvf_mbox_intrs(cptvf); 392 destroy_pfvf_mbox: 393 cptvf_pfvf_mbox_destroy(cptvf); 394 clear_drvdata: 395 pci_set_drvdata(pdev, NULL); 396 397 return ret; 398 } 399 400 static void otx2_cptvf_remove(struct pci_dev *pdev) 401 { 402 struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev); 403 404 if (!cptvf) { 405 dev_err(&pdev->dev, "Invalid CPT VF device.\n"); 406 return; 407 } 408 cptvf_lf_shutdown(&cptvf->lfs); 409 /* Disable PF-VF mailbox interrupt */ 410 cptvf_disable_pfvf_mbox_intrs(cptvf); 411 /* Destroy PF-VF mbox */ 412 cptvf_pfvf_mbox_destroy(cptvf); 413 pci_set_drvdata(pdev, NULL); 414 } 415 416 /* Supported devices */ 417 static const struct pci_device_id otx2_cptvf_id_table[] = { 418 {PCI_VDEVICE(CAVIUM, OTX2_CPT_PCI_VF_DEVICE_ID), 0}, 419 {PCI_VDEVICE(CAVIUM, CN10K_CPT_PCI_VF_DEVICE_ID), 0}, 420 { 0, } /* end of table */ 421 }; 422 423 static struct pci_driver otx2_cptvf_pci_driver = { 424 .name = OTX2_CPTVF_DRV_NAME, 425 .id_table = otx2_cptvf_id_table, 426 .probe = otx2_cptvf_probe, 427 .remove = otx2_cptvf_remove, 428 }; 429 430 module_pci_driver(otx2_cptvf_pci_driver); 431 432 MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT); 433 434 MODULE_AUTHOR("Marvell"); 435 MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver"); 436 MODULE_LICENSE("GPL v2"); 437 MODULE_DEVICE_TABLE(pci, otx2_cptvf_id_table); 438