1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_devids.h" 6 #include "idpf_virtchnl.h" 7 8 #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" 9 10 MODULE_DESCRIPTION(DRV_SUMMARY); 11 MODULE_IMPORT_NS("LIBETH"); 12 MODULE_LICENSE("GPL"); 13 14 /** 15 * idpf_remove - Device removal routine 16 * @pdev: PCI device information struct 17 */ 18 static void idpf_remove(struct pci_dev *pdev) 19 { 20 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 21 int i; 22 23 set_bit(IDPF_REMOVE_IN_PROG, adapter->flags); 24 25 /* Wait until vc_event_task is done to consider if any hard reset is 26 * in progress else we may go ahead and release the resources but the 27 * thread doing the hard reset might continue the init path and 28 * end up in bad state. 29 */ 30 cancel_delayed_work_sync(&adapter->vc_event_task); 31 if (adapter->num_vfs) 32 idpf_sriov_configure(pdev, 0); 33 34 idpf_vc_core_deinit(adapter); 35 36 /* Be a good citizen and leave the device clean on exit */ 37 adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); 38 idpf_deinit_dflt_mbx(adapter); 39 40 if (!adapter->netdevs) 41 goto destroy_wqs; 42 43 /* There are some cases where it's possible to still have netdevs 44 * registered with the stack at this point, e.g. if the driver detected 45 * a HW reset and rmmod is called before it fully recovers. Unregister 46 * any stale netdevs here. 47 */ 48 for (i = 0; i < adapter->max_vports; i++) { 49 if (!adapter->netdevs[i]) 50 continue; 51 if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED) 52 unregister_netdev(adapter->netdevs[i]); 53 free_netdev(adapter->netdevs[i]); 54 adapter->netdevs[i] = NULL; 55 } 56 57 destroy_wqs: 58 destroy_workqueue(adapter->init_wq); 59 destroy_workqueue(adapter->serv_wq); 60 destroy_workqueue(adapter->mbx_wq); 61 destroy_workqueue(adapter->stats_wq); 62 destroy_workqueue(adapter->vc_event_wq); 63 64 for (i = 0; i < adapter->max_vports; i++) { 65 kfree(adapter->vport_config[i]); 66 adapter->vport_config[i] = NULL; 67 } 68 kfree(adapter->vport_config); 69 adapter->vport_config = NULL; 70 kfree(adapter->netdevs); 71 adapter->netdevs = NULL; 72 kfree(adapter->vcxn_mngr); 73 adapter->vcxn_mngr = NULL; 74 75 mutex_destroy(&adapter->vport_ctrl_lock); 76 mutex_destroy(&adapter->vector_lock); 77 mutex_destroy(&adapter->queue_lock); 78 mutex_destroy(&adapter->vc_buf_lock); 79 80 pci_set_drvdata(pdev, NULL); 81 kfree(adapter); 82 } 83 84 /** 85 * idpf_shutdown - PCI callback for shutting down device 86 * @pdev: PCI device information struct 87 */ 88 static void idpf_shutdown(struct pci_dev *pdev) 89 { 90 idpf_remove(pdev); 91 92 if (system_state == SYSTEM_POWER_OFF) 93 pci_set_power_state(pdev, PCI_D3hot); 94 } 95 96 /** 97 * idpf_cfg_hw - Initialize HW struct 98 * @adapter: adapter to setup hw struct for 99 * 100 * Returns 0 on success, negative on failure 101 */ 102 static int idpf_cfg_hw(struct idpf_adapter *adapter) 103 { 104 struct pci_dev *pdev = adapter->pdev; 105 struct idpf_hw *hw = &adapter->hw; 106 107 hw->hw_addr = pcim_iomap_table(pdev)[0]; 108 if (!hw->hw_addr) { 109 pci_err(pdev, "failed to allocate PCI iomap table\n"); 110 111 return -ENOMEM; 112 } 113 114 hw->back = adapter; 115 116 return 0; 117 } 118 119 /** 120 * idpf_probe - Device initialization routine 121 * @pdev: PCI device information struct 122 * @ent: entry in idpf_pci_tbl 123 * 124 * Returns 0 on success, negative on failure 125 */ 126 static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 127 { 128 struct device *dev = &pdev->dev; 129 struct idpf_adapter *adapter; 130 int err; 131 132 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 133 if (!adapter) 134 return -ENOMEM; 135 136 adapter->req_tx_splitq = true; 137 adapter->req_rx_splitq = true; 138 139 switch (ent->device) { 140 case IDPF_DEV_ID_PF: 141 idpf_dev_ops_init(adapter); 142 break; 143 case IDPF_DEV_ID_VF: 144 idpf_vf_dev_ops_init(adapter); 145 adapter->crc_enable = true; 146 break; 147 default: 148 err = -ENODEV; 149 dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", 150 ent->device); 151 goto err_free; 152 } 153 154 adapter->pdev = pdev; 155 err = pcim_enable_device(pdev); 156 if (err) 157 goto err_free; 158 159 err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 160 if (err) { 161 pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err)); 162 163 goto err_free; 164 } 165 166 /* set up for high or low dma */ 167 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 168 if (err) { 169 pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err)); 170 171 goto err_free; 172 } 173 174 pci_set_master(pdev); 175 pci_set_drvdata(pdev, adapter); 176 177 adapter->init_wq = alloc_workqueue("%s-%s-init", 178 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 179 dev_driver_string(dev), 180 dev_name(dev)); 181 if (!adapter->init_wq) { 182 dev_err(dev, "Failed to allocate init workqueue\n"); 183 err = -ENOMEM; 184 goto err_free; 185 } 186 187 adapter->serv_wq = alloc_workqueue("%s-%s-service", 188 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 189 dev_driver_string(dev), 190 dev_name(dev)); 191 if (!adapter->serv_wq) { 192 dev_err(dev, "Failed to allocate service workqueue\n"); 193 err = -ENOMEM; 194 goto err_serv_wq_alloc; 195 } 196 197 adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 198 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 199 dev_driver_string(dev), 200 dev_name(dev)); 201 if (!adapter->mbx_wq) { 202 dev_err(dev, "Failed to allocate mailbox workqueue\n"); 203 err = -ENOMEM; 204 goto err_mbx_wq_alloc; 205 } 206 207 adapter->stats_wq = alloc_workqueue("%s-%s-stats", 208 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 209 dev_driver_string(dev), 210 dev_name(dev)); 211 if (!adapter->stats_wq) { 212 dev_err(dev, "Failed to allocate workqueue\n"); 213 err = -ENOMEM; 214 goto err_stats_wq_alloc; 215 } 216 217 adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 218 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 219 dev_driver_string(dev), 220 dev_name(dev)); 221 if (!adapter->vc_event_wq) { 222 dev_err(dev, "Failed to allocate virtchnl event workqueue\n"); 223 err = -ENOMEM; 224 goto err_vc_event_wq_alloc; 225 } 226 227 /* setup msglvl */ 228 adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M); 229 230 err = idpf_cfg_hw(adapter); 231 if (err) { 232 dev_err(dev, "Failed to configure HW structure for adapter: %d\n", 233 err); 234 goto err_cfg_hw; 235 } 236 237 mutex_init(&adapter->vport_ctrl_lock); 238 mutex_init(&adapter->vector_lock); 239 mutex_init(&adapter->queue_lock); 240 mutex_init(&adapter->vc_buf_lock); 241 242 INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task); 243 INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); 244 INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); 245 INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task); 246 INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task); 247 248 adapter->dev_ops.reg_ops.reset_reg_init(adapter); 249 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 250 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 251 msecs_to_jiffies(10 * (pdev->devfn & 0x07))); 252 253 return 0; 254 255 err_cfg_hw: 256 destroy_workqueue(adapter->vc_event_wq); 257 err_vc_event_wq_alloc: 258 destroy_workqueue(adapter->stats_wq); 259 err_stats_wq_alloc: 260 destroy_workqueue(adapter->mbx_wq); 261 err_mbx_wq_alloc: 262 destroy_workqueue(adapter->serv_wq); 263 err_serv_wq_alloc: 264 destroy_workqueue(adapter->init_wq); 265 err_free: 266 kfree(adapter); 267 return err; 268 } 269 270 /* idpf_pci_tbl - PCI Dev idpf ID Table 271 */ 272 static const struct pci_device_id idpf_pci_tbl[] = { 273 { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)}, 274 { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)}, 275 { /* Sentinel */ } 276 }; 277 MODULE_DEVICE_TABLE(pci, idpf_pci_tbl); 278 279 static struct pci_driver idpf_driver = { 280 .name = KBUILD_MODNAME, 281 .id_table = idpf_pci_tbl, 282 .probe = idpf_probe, 283 .sriov_configure = idpf_sriov_configure, 284 .remove = idpf_remove, 285 .shutdown = idpf_shutdown, 286 }; 287 module_pci_driver(idpf_driver); 288