1*d9110b0bSSrujanaChalla // SPDX-License-Identifier: GPL-2.0 2*d9110b0bSSrujanaChalla /* Marvell OcteonTX CPT driver 3*d9110b0bSSrujanaChalla * 4*d9110b0bSSrujanaChalla * Copyright (C) 2019 Marvell International Ltd. 5*d9110b0bSSrujanaChalla * 6*d9110b0bSSrujanaChalla * This program is free software; you can redistribute it and/or modify 7*d9110b0bSSrujanaChalla * it under the terms of the GNU General Public License version 2 as 8*d9110b0bSSrujanaChalla * published by the Free Software Foundation. 9*d9110b0bSSrujanaChalla */ 10*d9110b0bSSrujanaChalla 11*d9110b0bSSrujanaChalla #include "otx_cpt_common.h" 12*d9110b0bSSrujanaChalla #include "otx_cptpf.h" 13*d9110b0bSSrujanaChalla 14*d9110b0bSSrujanaChalla #define DRV_NAME "octeontx-cpt" 15*d9110b0bSSrujanaChalla #define DRV_VERSION "1.0" 16*d9110b0bSSrujanaChalla 17*d9110b0bSSrujanaChalla static void otx_cpt_disable_mbox_interrupts(struct otx_cpt_device *cpt) 18*d9110b0bSSrujanaChalla { 19*d9110b0bSSrujanaChalla /* Disable mbox(0) interrupts for all VFs */ 20*d9110b0bSSrujanaChalla writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1CX(0)); 21*d9110b0bSSrujanaChalla } 22*d9110b0bSSrujanaChalla 23*d9110b0bSSrujanaChalla static void otx_cpt_enable_mbox_interrupts(struct otx_cpt_device *cpt) 24*d9110b0bSSrujanaChalla { 25*d9110b0bSSrujanaChalla /* Enable mbox(0) interrupts for all VFs */ 26*d9110b0bSSrujanaChalla writeq(~0ull, cpt->reg_base + OTX_CPT_PF_MBOX_ENA_W1SX(0)); 27*d9110b0bSSrujanaChalla } 28*d9110b0bSSrujanaChalla 29*d9110b0bSSrujanaChalla static irqreturn_t otx_cpt_mbx0_intr_handler(int __always_unused irq, 30*d9110b0bSSrujanaChalla void *cpt) 31*d9110b0bSSrujanaChalla { 32*d9110b0bSSrujanaChalla otx_cpt_mbox_intr_handler(cpt, 0); 33*d9110b0bSSrujanaChalla 34*d9110b0bSSrujanaChalla return IRQ_HANDLED; 35*d9110b0bSSrujanaChalla } 36*d9110b0bSSrujanaChalla 37*d9110b0bSSrujanaChalla static void otx_cpt_reset(struct otx_cpt_device *cpt) 38*d9110b0bSSrujanaChalla { 39*d9110b0bSSrujanaChalla writeq(1, cpt->reg_base + OTX_CPT_PF_RESET); 40*d9110b0bSSrujanaChalla } 41*d9110b0bSSrujanaChalla 42*d9110b0bSSrujanaChalla static void otx_cpt_find_max_enabled_cores(struct otx_cpt_device *cpt) 43*d9110b0bSSrujanaChalla { 44*d9110b0bSSrujanaChalla union otx_cptx_pf_constants pf_cnsts = {0}; 45*d9110b0bSSrujanaChalla 46*d9110b0bSSrujanaChalla pf_cnsts.u = readq(cpt->reg_base + OTX_CPT_PF_CONSTANTS); 47*d9110b0bSSrujanaChalla cpt->eng_grps.avail.max_se_cnt = pf_cnsts.s.se; 48*d9110b0bSSrujanaChalla cpt->eng_grps.avail.max_ae_cnt = pf_cnsts.s.ae; 49*d9110b0bSSrujanaChalla } 50*d9110b0bSSrujanaChalla 51*d9110b0bSSrujanaChalla static u32 otx_cpt_check_bist_status(struct otx_cpt_device *cpt) 52*d9110b0bSSrujanaChalla { 53*d9110b0bSSrujanaChalla union otx_cptx_pf_bist_status bist_sts = {0}; 54*d9110b0bSSrujanaChalla 55*d9110b0bSSrujanaChalla bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_BIST_STATUS); 56*d9110b0bSSrujanaChalla return bist_sts.u; 57*d9110b0bSSrujanaChalla } 58*d9110b0bSSrujanaChalla 59*d9110b0bSSrujanaChalla static u64 otx_cpt_check_exe_bist_status(struct otx_cpt_device *cpt) 60*d9110b0bSSrujanaChalla { 61*d9110b0bSSrujanaChalla union otx_cptx_pf_exe_bist_status bist_sts = {0}; 62*d9110b0bSSrujanaChalla 63*d9110b0bSSrujanaChalla bist_sts.u = readq(cpt->reg_base + OTX_CPT_PF_EXE_BIST_STATUS); 64*d9110b0bSSrujanaChalla return bist_sts.u; 65*d9110b0bSSrujanaChalla } 66*d9110b0bSSrujanaChalla 67*d9110b0bSSrujanaChalla static int otx_cpt_device_init(struct otx_cpt_device *cpt) 68*d9110b0bSSrujanaChalla { 69*d9110b0bSSrujanaChalla struct device *dev = &cpt->pdev->dev; 70*d9110b0bSSrujanaChalla u16 sdevid; 71*d9110b0bSSrujanaChalla u64 bist; 72*d9110b0bSSrujanaChalla 73*d9110b0bSSrujanaChalla /* Reset the PF when probed first */ 74*d9110b0bSSrujanaChalla otx_cpt_reset(cpt); 75*d9110b0bSSrujanaChalla mdelay(100); 76*d9110b0bSSrujanaChalla 77*d9110b0bSSrujanaChalla pci_read_config_word(cpt->pdev, PCI_SUBSYSTEM_ID, &sdevid); 78*d9110b0bSSrujanaChalla 79*d9110b0bSSrujanaChalla /* Check BIST status */ 80*d9110b0bSSrujanaChalla bist = (u64)otx_cpt_check_bist_status(cpt); 81*d9110b0bSSrujanaChalla if (bist) { 82*d9110b0bSSrujanaChalla dev_err(dev, "RAM BIST failed with code 0x%llx", bist); 83*d9110b0bSSrujanaChalla return -ENODEV; 84*d9110b0bSSrujanaChalla } 85*d9110b0bSSrujanaChalla 86*d9110b0bSSrujanaChalla bist = otx_cpt_check_exe_bist_status(cpt); 87*d9110b0bSSrujanaChalla if (bist) { 88*d9110b0bSSrujanaChalla dev_err(dev, "Engine BIST failed with code 0x%llx", bist); 89*d9110b0bSSrujanaChalla return -ENODEV; 90*d9110b0bSSrujanaChalla } 91*d9110b0bSSrujanaChalla 92*d9110b0bSSrujanaChalla /* Get max enabled cores */ 93*d9110b0bSSrujanaChalla otx_cpt_find_max_enabled_cores(cpt); 94*d9110b0bSSrujanaChalla 95*d9110b0bSSrujanaChalla if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) && 96*d9110b0bSSrujanaChalla (cpt->eng_grps.avail.max_se_cnt == 0)) { 97*d9110b0bSSrujanaChalla cpt->pf_type = OTX_CPT_AE; 98*d9110b0bSSrujanaChalla } else if ((sdevid == OTX_CPT_PCI_PF_SUBSYS_ID) && 99*d9110b0bSSrujanaChalla (cpt->eng_grps.avail.max_ae_cnt == 0)) { 100*d9110b0bSSrujanaChalla cpt->pf_type = OTX_CPT_SE; 101*d9110b0bSSrujanaChalla } 102*d9110b0bSSrujanaChalla 103*d9110b0bSSrujanaChalla /* Get max VQs/VFs supported by the device */ 104*d9110b0bSSrujanaChalla cpt->max_vfs = pci_sriov_get_totalvfs(cpt->pdev); 105*d9110b0bSSrujanaChalla 106*d9110b0bSSrujanaChalla /* Disable all cores */ 107*d9110b0bSSrujanaChalla otx_cpt_disable_all_cores(cpt); 108*d9110b0bSSrujanaChalla 109*d9110b0bSSrujanaChalla return 0; 110*d9110b0bSSrujanaChalla } 111*d9110b0bSSrujanaChalla 112*d9110b0bSSrujanaChalla static int otx_cpt_register_interrupts(struct otx_cpt_device *cpt) 113*d9110b0bSSrujanaChalla { 114*d9110b0bSSrujanaChalla struct device *dev = &cpt->pdev->dev; 115*d9110b0bSSrujanaChalla u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT; 116*d9110b0bSSrujanaChalla u32 num_vec = OTX_CPT_PF_MSIX_VECTORS; 117*d9110b0bSSrujanaChalla int ret; 118*d9110b0bSSrujanaChalla 119*d9110b0bSSrujanaChalla /* Enable MSI-X */ 120*d9110b0bSSrujanaChalla ret = pci_alloc_irq_vectors(cpt->pdev, num_vec, num_vec, PCI_IRQ_MSIX); 121*d9110b0bSSrujanaChalla if (ret < 0) { 122*d9110b0bSSrujanaChalla dev_err(&cpt->pdev->dev, 123*d9110b0bSSrujanaChalla "Request for #%d msix vectors failed\n", 124*d9110b0bSSrujanaChalla num_vec); 125*d9110b0bSSrujanaChalla return ret; 126*d9110b0bSSrujanaChalla } 127*d9110b0bSSrujanaChalla 128*d9110b0bSSrujanaChalla /* Register mailbox interrupt handlers */ 129*d9110b0bSSrujanaChalla ret = request_irq(pci_irq_vector(cpt->pdev, 130*d9110b0bSSrujanaChalla OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)), 131*d9110b0bSSrujanaChalla otx_cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt); 132*d9110b0bSSrujanaChalla if (ret) { 133*d9110b0bSSrujanaChalla dev_err(dev, "Request irq failed\n"); 134*d9110b0bSSrujanaChalla pci_free_irq_vectors(cpt->pdev); 135*d9110b0bSSrujanaChalla return ret; 136*d9110b0bSSrujanaChalla } 137*d9110b0bSSrujanaChalla /* Enable mailbox interrupt */ 138*d9110b0bSSrujanaChalla otx_cpt_enable_mbox_interrupts(cpt); 139*d9110b0bSSrujanaChalla return 0; 140*d9110b0bSSrujanaChalla } 141*d9110b0bSSrujanaChalla 142*d9110b0bSSrujanaChalla static void otx_cpt_unregister_interrupts(struct otx_cpt_device *cpt) 143*d9110b0bSSrujanaChalla { 144*d9110b0bSSrujanaChalla u32 mbox_int_idx = OTX_CPT_PF_MBOX_INT; 145*d9110b0bSSrujanaChalla 146*d9110b0bSSrujanaChalla otx_cpt_disable_mbox_interrupts(cpt); 147*d9110b0bSSrujanaChalla free_irq(pci_irq_vector(cpt->pdev, 148*d9110b0bSSrujanaChalla OTX_CPT_PF_INT_VEC_E_MBOXX(mbox_int_idx, 0)), 149*d9110b0bSSrujanaChalla cpt); 150*d9110b0bSSrujanaChalla pci_free_irq_vectors(cpt->pdev); 151*d9110b0bSSrujanaChalla } 152*d9110b0bSSrujanaChalla 153*d9110b0bSSrujanaChalla 154*d9110b0bSSrujanaChalla static int otx_cpt_sriov_configure(struct pci_dev *pdev, int numvfs) 155*d9110b0bSSrujanaChalla { 156*d9110b0bSSrujanaChalla struct otx_cpt_device *cpt = pci_get_drvdata(pdev); 157*d9110b0bSSrujanaChalla int ret = 0; 158*d9110b0bSSrujanaChalla 159*d9110b0bSSrujanaChalla if (numvfs > cpt->max_vfs) 160*d9110b0bSSrujanaChalla numvfs = cpt->max_vfs; 161*d9110b0bSSrujanaChalla 162*d9110b0bSSrujanaChalla if (numvfs > 0) { 163*d9110b0bSSrujanaChalla ret = otx_cpt_try_create_default_eng_grps(cpt->pdev, 164*d9110b0bSSrujanaChalla &cpt->eng_grps, 165*d9110b0bSSrujanaChalla cpt->pf_type); 166*d9110b0bSSrujanaChalla if (ret) 167*d9110b0bSSrujanaChalla return ret; 168*d9110b0bSSrujanaChalla 169*d9110b0bSSrujanaChalla cpt->vfs_enabled = numvfs; 170*d9110b0bSSrujanaChalla ret = pci_enable_sriov(pdev, numvfs); 171*d9110b0bSSrujanaChalla if (ret) { 172*d9110b0bSSrujanaChalla cpt->vfs_enabled = 0; 173*d9110b0bSSrujanaChalla return ret; 174*d9110b0bSSrujanaChalla } 175*d9110b0bSSrujanaChalla otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, true); 176*d9110b0bSSrujanaChalla try_module_get(THIS_MODULE); 177*d9110b0bSSrujanaChalla ret = numvfs; 178*d9110b0bSSrujanaChalla } else { 179*d9110b0bSSrujanaChalla pci_disable_sriov(pdev); 180*d9110b0bSSrujanaChalla otx_cpt_set_eng_grps_is_rdonly(&cpt->eng_grps, false); 181*d9110b0bSSrujanaChalla module_put(THIS_MODULE); 182*d9110b0bSSrujanaChalla cpt->vfs_enabled = 0; 183*d9110b0bSSrujanaChalla } 184*d9110b0bSSrujanaChalla dev_notice(&cpt->pdev->dev, "VFs enabled: %d\n", ret); 185*d9110b0bSSrujanaChalla 186*d9110b0bSSrujanaChalla return ret; 187*d9110b0bSSrujanaChalla } 188*d9110b0bSSrujanaChalla 189*d9110b0bSSrujanaChalla static int otx_cpt_probe(struct pci_dev *pdev, 190*d9110b0bSSrujanaChalla const struct pci_device_id __always_unused *ent) 191*d9110b0bSSrujanaChalla { 192*d9110b0bSSrujanaChalla struct device *dev = &pdev->dev; 193*d9110b0bSSrujanaChalla struct otx_cpt_device *cpt; 194*d9110b0bSSrujanaChalla int err; 195*d9110b0bSSrujanaChalla 196*d9110b0bSSrujanaChalla cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL); 197*d9110b0bSSrujanaChalla if (!cpt) 198*d9110b0bSSrujanaChalla return -ENOMEM; 199*d9110b0bSSrujanaChalla 200*d9110b0bSSrujanaChalla pci_set_drvdata(pdev, cpt); 201*d9110b0bSSrujanaChalla cpt->pdev = pdev; 202*d9110b0bSSrujanaChalla 203*d9110b0bSSrujanaChalla err = pci_enable_device(pdev); 204*d9110b0bSSrujanaChalla if (err) { 205*d9110b0bSSrujanaChalla dev_err(dev, "Failed to enable PCI device\n"); 206*d9110b0bSSrujanaChalla goto err_clear_drvdata; 207*d9110b0bSSrujanaChalla } 208*d9110b0bSSrujanaChalla 209*d9110b0bSSrujanaChalla err = pci_request_regions(pdev, DRV_NAME); 210*d9110b0bSSrujanaChalla if (err) { 211*d9110b0bSSrujanaChalla dev_err(dev, "PCI request regions failed 0x%x\n", err); 212*d9110b0bSSrujanaChalla goto err_disable_device; 213*d9110b0bSSrujanaChalla } 214*d9110b0bSSrujanaChalla 215*d9110b0bSSrujanaChalla err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); 216*d9110b0bSSrujanaChalla if (err) { 217*d9110b0bSSrujanaChalla dev_err(dev, "Unable to get usable DMA configuration\n"); 218*d9110b0bSSrujanaChalla goto err_release_regions; 219*d9110b0bSSrujanaChalla } 220*d9110b0bSSrujanaChalla 221*d9110b0bSSrujanaChalla err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); 222*d9110b0bSSrujanaChalla if (err) { 223*d9110b0bSSrujanaChalla dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); 224*d9110b0bSSrujanaChalla goto err_release_regions; 225*d9110b0bSSrujanaChalla } 226*d9110b0bSSrujanaChalla 227*d9110b0bSSrujanaChalla /* MAP PF's configuration registers */ 228*d9110b0bSSrujanaChalla cpt->reg_base = pci_iomap(pdev, OTX_CPT_PF_PCI_CFG_BAR, 0); 229*d9110b0bSSrujanaChalla if (!cpt->reg_base) { 230*d9110b0bSSrujanaChalla dev_err(dev, "Cannot map config register space, aborting\n"); 231*d9110b0bSSrujanaChalla err = -ENOMEM; 232*d9110b0bSSrujanaChalla goto err_release_regions; 233*d9110b0bSSrujanaChalla } 234*d9110b0bSSrujanaChalla 235*d9110b0bSSrujanaChalla /* CPT device HW initialization */ 236*d9110b0bSSrujanaChalla err = otx_cpt_device_init(cpt); 237*d9110b0bSSrujanaChalla if (err) 238*d9110b0bSSrujanaChalla goto err_unmap_region; 239*d9110b0bSSrujanaChalla 240*d9110b0bSSrujanaChalla /* Register interrupts */ 241*d9110b0bSSrujanaChalla err = otx_cpt_register_interrupts(cpt); 242*d9110b0bSSrujanaChalla if (err) 243*d9110b0bSSrujanaChalla goto err_unmap_region; 244*d9110b0bSSrujanaChalla 245*d9110b0bSSrujanaChalla /* Initialize engine groups */ 246*d9110b0bSSrujanaChalla err = otx_cpt_init_eng_grps(pdev, &cpt->eng_grps, cpt->pf_type); 247*d9110b0bSSrujanaChalla if (err) 248*d9110b0bSSrujanaChalla goto err_unregister_interrupts; 249*d9110b0bSSrujanaChalla 250*d9110b0bSSrujanaChalla return 0; 251*d9110b0bSSrujanaChalla 252*d9110b0bSSrujanaChalla err_unregister_interrupts: 253*d9110b0bSSrujanaChalla otx_cpt_unregister_interrupts(cpt); 254*d9110b0bSSrujanaChalla err_unmap_region: 255*d9110b0bSSrujanaChalla pci_iounmap(pdev, cpt->reg_base); 256*d9110b0bSSrujanaChalla err_release_regions: 257*d9110b0bSSrujanaChalla pci_release_regions(pdev); 258*d9110b0bSSrujanaChalla err_disable_device: 259*d9110b0bSSrujanaChalla pci_disable_device(pdev); 260*d9110b0bSSrujanaChalla err_clear_drvdata: 261*d9110b0bSSrujanaChalla pci_set_drvdata(pdev, NULL); 262*d9110b0bSSrujanaChalla 263*d9110b0bSSrujanaChalla return err; 264*d9110b0bSSrujanaChalla } 265*d9110b0bSSrujanaChalla 266*d9110b0bSSrujanaChalla static void otx_cpt_remove(struct pci_dev *pdev) 267*d9110b0bSSrujanaChalla { 268*d9110b0bSSrujanaChalla struct otx_cpt_device *cpt = pci_get_drvdata(pdev); 269*d9110b0bSSrujanaChalla 270*d9110b0bSSrujanaChalla if (!cpt) 271*d9110b0bSSrujanaChalla return; 272*d9110b0bSSrujanaChalla 273*d9110b0bSSrujanaChalla /* Disable VFs */ 274*d9110b0bSSrujanaChalla pci_disable_sriov(pdev); 275*d9110b0bSSrujanaChalla /* Cleanup engine groups */ 276*d9110b0bSSrujanaChalla otx_cpt_cleanup_eng_grps(pdev, &cpt->eng_grps); 277*d9110b0bSSrujanaChalla /* Disable CPT PF interrupts */ 278*d9110b0bSSrujanaChalla otx_cpt_unregister_interrupts(cpt); 279*d9110b0bSSrujanaChalla /* Disengage SE and AE cores from all groups */ 280*d9110b0bSSrujanaChalla otx_cpt_disable_all_cores(cpt); 281*d9110b0bSSrujanaChalla pci_iounmap(pdev, cpt->reg_base); 282*d9110b0bSSrujanaChalla pci_release_regions(pdev); 283*d9110b0bSSrujanaChalla pci_disable_device(pdev); 284*d9110b0bSSrujanaChalla pci_set_drvdata(pdev, NULL); 285*d9110b0bSSrujanaChalla } 286*d9110b0bSSrujanaChalla 287*d9110b0bSSrujanaChalla /* Supported devices */ 288*d9110b0bSSrujanaChalla static const struct pci_device_id otx_cpt_id_table[] = { 289*d9110b0bSSrujanaChalla { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OTX_CPT_PCI_PF_DEVICE_ID) }, 290*d9110b0bSSrujanaChalla { 0, } /* end of table */ 291*d9110b0bSSrujanaChalla }; 292*d9110b0bSSrujanaChalla 293*d9110b0bSSrujanaChalla static struct pci_driver otx_cpt_pci_driver = { 294*d9110b0bSSrujanaChalla .name = DRV_NAME, 295*d9110b0bSSrujanaChalla .id_table = otx_cpt_id_table, 296*d9110b0bSSrujanaChalla .probe = otx_cpt_probe, 297*d9110b0bSSrujanaChalla .remove = otx_cpt_remove, 298*d9110b0bSSrujanaChalla .sriov_configure = otx_cpt_sriov_configure 299*d9110b0bSSrujanaChalla }; 300*d9110b0bSSrujanaChalla 301*d9110b0bSSrujanaChalla module_pci_driver(otx_cpt_pci_driver); 302*d9110b0bSSrujanaChalla 303*d9110b0bSSrujanaChalla MODULE_AUTHOR("Marvell International Ltd."); 304*d9110b0bSSrujanaChalla MODULE_DESCRIPTION("Marvell OcteonTX CPT Physical Function Driver"); 305*d9110b0bSSrujanaChalla MODULE_LICENSE("GPL v2"); 306*d9110b0bSSrujanaChalla MODULE_VERSION(DRV_VERSION); 307*d9110b0bSSrujanaChalla MODULE_DEVICE_TABLE(pci, otx_cpt_id_table); 308