1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (C) 2023 Intel Corporation */ 3 4 #include "idpf.h" 5 #include "idpf_devids.h" 6 #include "idpf_lan_vf_regs.h" 7 #include "idpf_virtchnl.h" 8 9 #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" 10 11 #define IDPF_NETWORK_ETHERNET_PROGIF 0x01 12 #define IDPF_CLASS_NETWORK_ETHERNET_PROGIF \ 13 (PCI_CLASS_NETWORK_ETHERNET << 8 | IDPF_NETWORK_ETHERNET_PROGIF) 14 #define IDPF_VF_TEST_VAL 0xfeed0000u 15 16 MODULE_DESCRIPTION(DRV_SUMMARY); 17 MODULE_IMPORT_NS("LIBETH"); 18 MODULE_IMPORT_NS("LIBETH_XDP"); 19 MODULE_LICENSE("GPL"); 20 21 /** 22 * idpf_get_device_type - Helper to find if it is a VF or PF device 23 * @pdev: PCI device information struct 24 * 25 * Return: PF/VF device ID or -%errno on failure. 26 */ 27 static int idpf_get_device_type(struct pci_dev *pdev) 28 { 29 void __iomem *addr; 30 int ret; 31 32 addr = ioremap(pci_resource_start(pdev, 0) + VF_ARQBAL, 4); 33 if (!addr) { 34 pci_err(pdev, "Failed to allocate BAR0 mbx region\n"); 35 return -EIO; 36 } 37 38 writel(IDPF_VF_TEST_VAL, addr); 39 if (readl(addr) == IDPF_VF_TEST_VAL) 40 ret = IDPF_DEV_ID_VF; 41 else 42 ret = IDPF_DEV_ID_PF; 43 44 iounmap(addr); 45 46 return ret; 47 } 48 49 /** 50 * idpf_dev_init - Initialize device specific parameters 51 * @adapter: adapter to initialize 52 * @ent: entry in idpf_pci_tbl 53 * 54 * Return: %0 on success, -%errno on failure. 55 */ 56 static int idpf_dev_init(struct idpf_adapter *adapter, 57 const struct pci_device_id *ent) 58 { 59 int ret; 60 61 if (ent->class == IDPF_CLASS_NETWORK_ETHERNET_PROGIF) { 62 ret = idpf_get_device_type(adapter->pdev); 63 switch (ret) { 64 case IDPF_DEV_ID_VF: 65 idpf_vf_dev_ops_init(adapter); 66 adapter->crc_enable = true; 67 break; 68 case IDPF_DEV_ID_PF: 69 idpf_dev_ops_init(adapter); 70 break; 71 default: 72 return ret; 73 } 74 75 return 0; 76 } 77 78 switch (ent->device) { 79 case IDPF_DEV_ID_PF: 80 idpf_dev_ops_init(adapter); 81 break; 82 case IDPF_DEV_ID_VF: 83 idpf_vf_dev_ops_init(adapter); 84 adapter->crc_enable = true; 85 break; 86 default: 87 return -ENODEV; 88 } 89 90 return 0; 91 } 92 93 /** 94 * idpf_remove - Device removal routine 95 * @pdev: PCI device information struct 96 */ 97 static void idpf_remove(struct pci_dev *pdev) 98 { 99 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 100 int i; 101 102 set_bit(IDPF_REMOVE_IN_PROG, adapter->flags); 103 104 /* Wait until vc_event_task is done to consider if any hard reset is 105 * in progress else we may go ahead and release the resources but the 106 * thread doing the hard reset might continue the init path and 107 * end up in bad state. 108 */ 109 cancel_delayed_work_sync(&adapter->vc_event_task); 110 if (adapter->num_vfs) 111 idpf_sriov_configure(pdev, 0); 112 113 idpf_vc_core_deinit(adapter); 114 115 /* Be a good citizen and leave the device clean on exit */ 116 adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); 117 idpf_deinit_dflt_mbx(adapter); 118 119 if (!adapter->netdevs) 120 goto destroy_wqs; 121 122 /* There are some cases where it's possible to still have netdevs 123 * registered with the stack at this point, e.g. if the driver detected 124 * a HW reset and rmmod is called before it fully recovers. Unregister 125 * any stale netdevs here. 126 */ 127 for (i = 0; i < adapter->max_vports; i++) { 128 if (!adapter->netdevs[i]) 129 continue; 130 if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED) 131 unregister_netdev(adapter->netdevs[i]); 132 free_netdev(adapter->netdevs[i]); 133 adapter->netdevs[i] = NULL; 134 } 135 136 destroy_wqs: 137 destroy_workqueue(adapter->init_wq); 138 destroy_workqueue(adapter->serv_wq); 139 destroy_workqueue(adapter->mbx_wq); 140 destroy_workqueue(adapter->stats_wq); 141 destroy_workqueue(adapter->vc_event_wq); 142 143 for (i = 0; i < adapter->max_vports; i++) { 144 if (!adapter->vport_config[i]) 145 continue; 146 kfree(adapter->vport_config[i]->user_config.q_coalesce); 147 kfree(adapter->vport_config[i]); 148 adapter->vport_config[i] = NULL; 149 } 150 kfree(adapter->vport_config); 151 adapter->vport_config = NULL; 152 kfree(adapter->netdevs); 153 adapter->netdevs = NULL; 154 kfree(adapter->vcxn_mngr); 155 adapter->vcxn_mngr = NULL; 156 157 mutex_destroy(&adapter->vport_ctrl_lock); 158 mutex_destroy(&adapter->vector_lock); 159 mutex_destroy(&adapter->queue_lock); 160 mutex_destroy(&adapter->vc_buf_lock); 161 162 pci_set_drvdata(pdev, NULL); 163 kfree(adapter); 164 } 165 166 /** 167 * idpf_shutdown - PCI callback for shutting down device 168 * @pdev: PCI device information struct 169 */ 170 static void idpf_shutdown(struct pci_dev *pdev) 171 { 172 struct idpf_adapter *adapter = pci_get_drvdata(pdev); 173 174 cancel_delayed_work_sync(&adapter->serv_task); 175 cancel_delayed_work_sync(&adapter->vc_event_task); 176 idpf_vc_core_deinit(adapter); 177 idpf_deinit_dflt_mbx(adapter); 178 179 if (system_state == SYSTEM_POWER_OFF) 180 pci_set_power_state(pdev, PCI_D3hot); 181 } 182 183 /** 184 * idpf_cfg_hw - Initialize HW struct 185 * @adapter: adapter to setup hw struct for 186 * 187 * Returns 0 on success, negative on failure 188 */ 189 static int idpf_cfg_hw(struct idpf_adapter *adapter) 190 { 191 resource_size_t res_start, mbx_start, rstat_start; 192 struct pci_dev *pdev = adapter->pdev; 193 struct idpf_hw *hw = &adapter->hw; 194 struct device *dev = &pdev->dev; 195 long len; 196 197 res_start = pci_resource_start(pdev, 0); 198 199 /* Map mailbox space for virtchnl communication */ 200 mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start; 201 len = resource_size(&adapter->dev_ops.static_reg_info[0]); 202 hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len); 203 if (!hw->mbx.vaddr) { 204 pci_err(pdev, "failed to allocate BAR0 mbx region\n"); 205 206 return -ENOMEM; 207 } 208 hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start; 209 hw->mbx.addr_len = len; 210 211 /* Map rstat space for resets */ 212 rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start; 213 len = resource_size(&adapter->dev_ops.static_reg_info[1]); 214 hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len); 215 if (!hw->rstat.vaddr) { 216 pci_err(pdev, "failed to allocate BAR0 rstat region\n"); 217 218 return -ENOMEM; 219 } 220 hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start; 221 hw->rstat.addr_len = len; 222 223 hw->back = adapter; 224 225 return 0; 226 } 227 228 /** 229 * idpf_probe - Device initialization routine 230 * @pdev: PCI device information struct 231 * @ent: entry in idpf_pci_tbl 232 * 233 * Returns 0 on success, negative on failure 234 */ 235 static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 236 { 237 struct device *dev = &pdev->dev; 238 struct idpf_adapter *adapter; 239 int err; 240 241 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 242 if (!adapter) 243 return -ENOMEM; 244 245 adapter->req_tx_splitq = true; 246 adapter->req_rx_splitq = true; 247 248 adapter->pdev = pdev; 249 err = pcim_enable_device(pdev); 250 if (err) 251 goto err_free; 252 253 err = pcim_request_region(pdev, 0, pci_name(pdev)); 254 if (err) { 255 pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err)); 256 257 goto err_free; 258 } 259 260 err = pci_enable_ptm(pdev, NULL); 261 if (err) 262 pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n"); 263 264 /* set up for high or low dma */ 265 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); 266 if (err) { 267 pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err)); 268 269 goto err_free; 270 } 271 272 pci_set_master(pdev); 273 pci_set_drvdata(pdev, adapter); 274 275 adapter->init_wq = alloc_workqueue("%s-%s-init", 276 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 277 dev_driver_string(dev), 278 dev_name(dev)); 279 if (!adapter->init_wq) { 280 dev_err(dev, "Failed to allocate init workqueue\n"); 281 err = -ENOMEM; 282 goto err_free; 283 } 284 285 adapter->serv_wq = alloc_workqueue("%s-%s-service", 286 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 287 dev_driver_string(dev), 288 dev_name(dev)); 289 if (!adapter->serv_wq) { 290 dev_err(dev, "Failed to allocate service workqueue\n"); 291 err = -ENOMEM; 292 goto err_serv_wq_alloc; 293 } 294 295 adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI, 296 0, dev_driver_string(dev), 297 dev_name(dev)); 298 if (!adapter->mbx_wq) { 299 dev_err(dev, "Failed to allocate mailbox workqueue\n"); 300 err = -ENOMEM; 301 goto err_mbx_wq_alloc; 302 } 303 304 adapter->stats_wq = alloc_workqueue("%s-%s-stats", 305 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 306 dev_driver_string(dev), 307 dev_name(dev)); 308 if (!adapter->stats_wq) { 309 dev_err(dev, "Failed to allocate workqueue\n"); 310 err = -ENOMEM; 311 goto err_stats_wq_alloc; 312 } 313 314 adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 315 WQ_UNBOUND | WQ_MEM_RECLAIM, 0, 316 dev_driver_string(dev), 317 dev_name(dev)); 318 if (!adapter->vc_event_wq) { 319 dev_err(dev, "Failed to allocate virtchnl event workqueue\n"); 320 err = -ENOMEM; 321 goto err_vc_event_wq_alloc; 322 } 323 324 /* setup msglvl */ 325 adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M); 326 327 err = idpf_dev_init(adapter, ent); 328 if (err) { 329 dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n", 330 ent->device); 331 goto destroy_vc_event_wq; 332 } 333 334 err = idpf_cfg_hw(adapter); 335 if (err) { 336 dev_err(dev, "Failed to configure HW structure for adapter: %d\n", 337 err); 338 goto destroy_vc_event_wq; 339 } 340 341 mutex_init(&adapter->vport_ctrl_lock); 342 mutex_init(&adapter->vector_lock); 343 mutex_init(&adapter->queue_lock); 344 mutex_init(&adapter->vc_buf_lock); 345 346 INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task); 347 INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); 348 INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); 349 INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task); 350 INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task); 351 352 adapter->dev_ops.reg_ops.reset_reg_init(adapter); 353 set_bit(IDPF_HR_DRV_LOAD, adapter->flags); 354 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, 355 msecs_to_jiffies(10 * (pdev->devfn & 0x07))); 356 357 return 0; 358 359 destroy_vc_event_wq: 360 destroy_workqueue(adapter->vc_event_wq); 361 err_vc_event_wq_alloc: 362 destroy_workqueue(adapter->stats_wq); 363 err_stats_wq_alloc: 364 destroy_workqueue(adapter->mbx_wq); 365 err_mbx_wq_alloc: 366 destroy_workqueue(adapter->serv_wq); 367 err_serv_wq_alloc: 368 destroy_workqueue(adapter->init_wq); 369 err_free: 370 kfree(adapter); 371 return err; 372 } 373 374 /* idpf_pci_tbl - PCI Dev idpf ID Table 375 */ 376 static const struct pci_device_id idpf_pci_tbl[] = { 377 { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)}, 378 { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)}, 379 { PCI_DEVICE_CLASS(IDPF_CLASS_NETWORK_ETHERNET_PROGIF, ~0)}, 380 { /* Sentinel */ } 381 }; 382 MODULE_DEVICE_TABLE(pci, idpf_pci_tbl); 383 384 static struct pci_driver idpf_driver = { 385 .name = KBUILD_MODNAME, 386 .id_table = idpf_pci_tbl, 387 .probe = idpf_probe, 388 .sriov_configure = idpf_sriov_configure, 389 .remove = idpf_remove, 390 .shutdown = idpf_shutdown, 391 }; 392 module_pci_driver(idpf_driver); 393