1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3
4 #include "idpf.h"
5 #include "idpf_devids.h"
6 #include "idpf_virtchnl.h"
7
8 #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
9
10 MODULE_DESCRIPTION(DRV_SUMMARY);
11 MODULE_IMPORT_NS(LIBETH);
12 MODULE_LICENSE("GPL");
13
14 /**
15 * idpf_remove - Device removal routine
16 * @pdev: PCI device information struct
17 */
idpf_remove(struct pci_dev * pdev)18 static void idpf_remove(struct pci_dev *pdev)
19 {
20 struct idpf_adapter *adapter = pci_get_drvdata(pdev);
21 int i;
22
23 set_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
24
25 /* Wait until vc_event_task is done to consider if any hard reset is
26 * in progress else we may go ahead and release the resources but the
27 * thread doing the hard reset might continue the init path and
28 * end up in bad state.
29 */
30 cancel_delayed_work_sync(&adapter->vc_event_task);
31 if (adapter->num_vfs)
32 idpf_sriov_configure(pdev, 0);
33
34 idpf_vc_core_deinit(adapter);
35
36 /* Be a good citizen and leave the device clean on exit */
37 adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
38 idpf_deinit_dflt_mbx(adapter);
39
40 if (!adapter->netdevs)
41 goto destroy_wqs;
42
43 /* There are some cases where it's possible to still have netdevs
44 * registered with the stack at this point, e.g. if the driver detected
45 * a HW reset and rmmod is called before it fully recovers. Unregister
46 * any stale netdevs here.
47 */
48 for (i = 0; i < adapter->max_vports; i++) {
49 if (!adapter->netdevs[i])
50 continue;
51 if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED)
52 unregister_netdev(adapter->netdevs[i]);
53 free_netdev(adapter->netdevs[i]);
54 adapter->netdevs[i] = NULL;
55 }
56
57 destroy_wqs:
58 destroy_workqueue(adapter->init_wq);
59 destroy_workqueue(adapter->serv_wq);
60 destroy_workqueue(adapter->mbx_wq);
61 destroy_workqueue(adapter->stats_wq);
62 destroy_workqueue(adapter->vc_event_wq);
63
64 for (i = 0; i < adapter->max_vports; i++) {
65 kfree(adapter->vport_config[i]);
66 adapter->vport_config[i] = NULL;
67 }
68 kfree(adapter->vport_config);
69 adapter->vport_config = NULL;
70 kfree(adapter->netdevs);
71 adapter->netdevs = NULL;
72 kfree(adapter->vcxn_mngr);
73 adapter->vcxn_mngr = NULL;
74
75 mutex_destroy(&adapter->vport_ctrl_lock);
76 mutex_destroy(&adapter->vector_lock);
77 mutex_destroy(&adapter->queue_lock);
78 mutex_destroy(&adapter->vc_buf_lock);
79
80 pci_set_drvdata(pdev, NULL);
81 kfree(adapter);
82 }
83
84 /**
85 * idpf_shutdown - PCI callback for shutting down device
86 * @pdev: PCI device information struct
87 */
idpf_shutdown(struct pci_dev * pdev)88 static void idpf_shutdown(struct pci_dev *pdev)
89 {
90 idpf_remove(pdev);
91
92 if (system_state == SYSTEM_POWER_OFF)
93 pci_set_power_state(pdev, PCI_D3hot);
94 }
95
96 /**
97 * idpf_cfg_hw - Initialize HW struct
98 * @adapter: adapter to setup hw struct for
99 *
100 * Returns 0 on success, negative on failure
101 */
idpf_cfg_hw(struct idpf_adapter * adapter)102 static int idpf_cfg_hw(struct idpf_adapter *adapter)
103 {
104 struct pci_dev *pdev = adapter->pdev;
105 struct idpf_hw *hw = &adapter->hw;
106
107 hw->hw_addr = pcim_iomap_table(pdev)[0];
108 if (!hw->hw_addr) {
109 pci_err(pdev, "failed to allocate PCI iomap table\n");
110
111 return -ENOMEM;
112 }
113
114 hw->back = adapter;
115
116 return 0;
117 }
118
119 /**
120 * idpf_probe - Device initialization routine
121 * @pdev: PCI device information struct
122 * @ent: entry in idpf_pci_tbl
123 *
124 * Returns 0 on success, negative on failure
125 */
idpf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)126 static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
127 {
128 struct device *dev = &pdev->dev;
129 struct idpf_adapter *adapter;
130 int err;
131
132 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
133 if (!adapter)
134 return -ENOMEM;
135
136 adapter->req_tx_splitq = true;
137 adapter->req_rx_splitq = true;
138
139 switch (ent->device) {
140 case IDPF_DEV_ID_PF:
141 idpf_dev_ops_init(adapter);
142 break;
143 case IDPF_DEV_ID_VF:
144 idpf_vf_dev_ops_init(adapter);
145 adapter->crc_enable = true;
146 break;
147 default:
148 err = -ENODEV;
149 dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
150 ent->device);
151 goto err_free;
152 }
153
154 adapter->pdev = pdev;
155 err = pcim_enable_device(pdev);
156 if (err)
157 goto err_free;
158
159 err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
160 if (err) {
161 pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
162
163 goto err_free;
164 }
165
166 /* set up for high or low dma */
167 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
168 if (err) {
169 pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err));
170
171 goto err_free;
172 }
173
174 pci_set_master(pdev);
175 pci_set_drvdata(pdev, adapter);
176
177 adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
178 dev_driver_string(dev),
179 dev_name(dev));
180 if (!adapter->init_wq) {
181 dev_err(dev, "Failed to allocate init workqueue\n");
182 err = -ENOMEM;
183 goto err_free;
184 }
185
186 adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
187 dev_driver_string(dev),
188 dev_name(dev));
189 if (!adapter->serv_wq) {
190 dev_err(dev, "Failed to allocate service workqueue\n");
191 err = -ENOMEM;
192 goto err_serv_wq_alloc;
193 }
194
195 adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
196 dev_driver_string(dev),
197 dev_name(dev));
198 if (!adapter->mbx_wq) {
199 dev_err(dev, "Failed to allocate mailbox workqueue\n");
200 err = -ENOMEM;
201 goto err_mbx_wq_alloc;
202 }
203
204 adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
205 dev_driver_string(dev),
206 dev_name(dev));
207 if (!adapter->stats_wq) {
208 dev_err(dev, "Failed to allocate workqueue\n");
209 err = -ENOMEM;
210 goto err_stats_wq_alloc;
211 }
212
213 adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
214 dev_driver_string(dev),
215 dev_name(dev));
216 if (!adapter->vc_event_wq) {
217 dev_err(dev, "Failed to allocate virtchnl event workqueue\n");
218 err = -ENOMEM;
219 goto err_vc_event_wq_alloc;
220 }
221
222 /* setup msglvl */
223 adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M);
224
225 err = idpf_cfg_hw(adapter);
226 if (err) {
227 dev_err(dev, "Failed to configure HW structure for adapter: %d\n",
228 err);
229 goto err_cfg_hw;
230 }
231
232 mutex_init(&adapter->vport_ctrl_lock);
233 mutex_init(&adapter->vector_lock);
234 mutex_init(&adapter->queue_lock);
235 mutex_init(&adapter->vc_buf_lock);
236
237 INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
238 INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
239 INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
240 INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task);
241 INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task);
242
243 adapter->dev_ops.reg_ops.reset_reg_init(adapter);
244 set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
245 queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
246 msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
247
248 return 0;
249
250 err_cfg_hw:
251 destroy_workqueue(adapter->vc_event_wq);
252 err_vc_event_wq_alloc:
253 destroy_workqueue(adapter->stats_wq);
254 err_stats_wq_alloc:
255 destroy_workqueue(adapter->mbx_wq);
256 err_mbx_wq_alloc:
257 destroy_workqueue(adapter->serv_wq);
258 err_serv_wq_alloc:
259 destroy_workqueue(adapter->init_wq);
260 err_free:
261 kfree(adapter);
262 return err;
263 }
264
265 /* idpf_pci_tbl - PCI Dev idpf ID Table
266 */
267 static const struct pci_device_id idpf_pci_tbl[] = {
268 { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)},
269 { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)},
270 { /* Sentinel */ }
271 };
272 MODULE_DEVICE_TABLE(pci, idpf_pci_tbl);
273
274 static struct pci_driver idpf_driver = {
275 .name = KBUILD_MODNAME,
276 .id_table = idpf_pci_tbl,
277 .probe = idpf_probe,
278 .sriov_configure = idpf_sriov_configure,
279 .remove = idpf_remove,
280 .shutdown = idpf_shutdown,
281 };
282 module_pci_driver(idpf_driver);
283