xref: /linux/drivers/net/ethernet/intel/idpf/idpf_main.c (revision 8e621c9a337555c914cf1664605edfaa6f839774)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2023 Intel Corporation */
3 
4 #include "idpf.h"
5 #include "idpf_devids.h"
6 #include "idpf_virtchnl.h"
7 
8 #define DRV_SUMMARY	"Intel(R) Infrastructure Data Path Function Linux Driver"
9 
10 MODULE_DESCRIPTION(DRV_SUMMARY);
11 MODULE_IMPORT_NS("LIBETH");
12 MODULE_IMPORT_NS("LIBETH_XDP");
13 MODULE_LICENSE("GPL");
14 
15 /**
16  * idpf_remove - Device removal routine
17  * @pdev: PCI device information struct
18  */
idpf_remove(struct pci_dev * pdev)19 static void idpf_remove(struct pci_dev *pdev)
20 {
21 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
22 	int i;
23 
24 	set_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
25 
26 	/* Wait until vc_event_task is done to consider if any hard reset is
27 	 * in progress else we may go ahead and release the resources but the
28 	 * thread doing the hard reset might continue the init path and
29 	 * end up in bad state.
30 	 */
31 	cancel_delayed_work_sync(&adapter->vc_event_task);
32 	if (adapter->num_vfs)
33 		idpf_sriov_configure(pdev, 0);
34 
35 	idpf_vc_core_deinit(adapter);
36 
37 	/* Be a good citizen and leave the device clean on exit */
38 	adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
39 	idpf_deinit_dflt_mbx(adapter);
40 
41 	if (!adapter->netdevs)
42 		goto destroy_wqs;
43 
44 	/* There are some cases where it's possible to still have netdevs
45 	 * registered with the stack at this point, e.g. if the driver detected
46 	 * a HW reset and rmmod is called before it fully recovers. Unregister
47 	 * any stale netdevs here.
48 	 */
49 	for (i = 0; i < adapter->max_vports; i++) {
50 		if (!adapter->netdevs[i])
51 			continue;
52 		if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED)
53 			unregister_netdev(adapter->netdevs[i]);
54 		free_netdev(adapter->netdevs[i]);
55 		adapter->netdevs[i] = NULL;
56 	}
57 
58 destroy_wqs:
59 	destroy_workqueue(adapter->init_wq);
60 	destroy_workqueue(adapter->serv_wq);
61 	destroy_workqueue(adapter->mbx_wq);
62 	destroy_workqueue(adapter->stats_wq);
63 	destroy_workqueue(adapter->vc_event_wq);
64 
65 	for (i = 0; i < adapter->max_vports; i++) {
66 		if (!adapter->vport_config[i])
67 			continue;
68 		kfree(adapter->vport_config[i]->user_config.q_coalesce);
69 		kfree(adapter->vport_config[i]);
70 		adapter->vport_config[i] = NULL;
71 	}
72 	kfree(adapter->vport_config);
73 	adapter->vport_config = NULL;
74 	kfree(adapter->netdevs);
75 	adapter->netdevs = NULL;
76 	kfree(adapter->vcxn_mngr);
77 	adapter->vcxn_mngr = NULL;
78 
79 	mutex_destroy(&adapter->vport_ctrl_lock);
80 	mutex_destroy(&adapter->vector_lock);
81 	mutex_destroy(&adapter->queue_lock);
82 	mutex_destroy(&adapter->vc_buf_lock);
83 
84 	pci_set_drvdata(pdev, NULL);
85 	kfree(adapter);
86 }
87 
88 /**
89  * idpf_shutdown - PCI callback for shutting down device
90  * @pdev: PCI device information struct
91  */
idpf_shutdown(struct pci_dev * pdev)92 static void idpf_shutdown(struct pci_dev *pdev)
93 {
94 	struct idpf_adapter *adapter = pci_get_drvdata(pdev);
95 
96 	cancel_delayed_work_sync(&adapter->serv_task);
97 	cancel_delayed_work_sync(&adapter->vc_event_task);
98 	idpf_vc_core_deinit(adapter);
99 	idpf_deinit_dflt_mbx(adapter);
100 
101 	if (system_state == SYSTEM_POWER_OFF)
102 		pci_set_power_state(pdev, PCI_D3hot);
103 }
104 
105 /**
106  * idpf_cfg_hw - Initialize HW struct
107  * @adapter: adapter to setup hw struct for
108  *
109  * Returns 0 on success, negative on failure
110  */
idpf_cfg_hw(struct idpf_adapter * adapter)111 static int idpf_cfg_hw(struct idpf_adapter *adapter)
112 {
113 	resource_size_t res_start, mbx_start, rstat_start;
114 	struct pci_dev *pdev = adapter->pdev;
115 	struct idpf_hw *hw = &adapter->hw;
116 	struct device *dev = &pdev->dev;
117 	long len;
118 
119 	res_start = pci_resource_start(pdev, 0);
120 
121 	/* Map mailbox space for virtchnl communication */
122 	mbx_start = res_start + adapter->dev_ops.static_reg_info[0].start;
123 	len = resource_size(&adapter->dev_ops.static_reg_info[0]);
124 	hw->mbx.vaddr = devm_ioremap(dev, mbx_start, len);
125 	if (!hw->mbx.vaddr) {
126 		pci_err(pdev, "failed to allocate BAR0 mbx region\n");
127 
128 		return -ENOMEM;
129 	}
130 	hw->mbx.addr_start = adapter->dev_ops.static_reg_info[0].start;
131 	hw->mbx.addr_len = len;
132 
133 	/* Map rstat space for resets */
134 	rstat_start = res_start + adapter->dev_ops.static_reg_info[1].start;
135 	len = resource_size(&adapter->dev_ops.static_reg_info[1]);
136 	hw->rstat.vaddr = devm_ioremap(dev, rstat_start, len);
137 	if (!hw->rstat.vaddr) {
138 		pci_err(pdev, "failed to allocate BAR0 rstat region\n");
139 
140 		return -ENOMEM;
141 	}
142 	hw->rstat.addr_start = adapter->dev_ops.static_reg_info[1].start;
143 	hw->rstat.addr_len = len;
144 
145 	hw->back = adapter;
146 
147 	return 0;
148 }
149 
150 /**
151  * idpf_probe - Device initialization routine
152  * @pdev: PCI device information struct
153  * @ent: entry in idpf_pci_tbl
154  *
155  * Returns 0 on success, negative on failure
156  */
idpf_probe(struct pci_dev * pdev,const struct pci_device_id * ent)157 static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
158 {
159 	struct device *dev = &pdev->dev;
160 	struct idpf_adapter *adapter;
161 	int err;
162 
163 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
164 	if (!adapter)
165 		return -ENOMEM;
166 
167 	adapter->req_tx_splitq = true;
168 	adapter->req_rx_splitq = true;
169 
170 	switch (ent->device) {
171 	case IDPF_DEV_ID_PF:
172 		idpf_dev_ops_init(adapter);
173 		break;
174 	case IDPF_DEV_ID_VF:
175 		idpf_vf_dev_ops_init(adapter);
176 		adapter->crc_enable = true;
177 		break;
178 	default:
179 		err = -ENODEV;
180 		dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
181 			ent->device);
182 		goto err_free;
183 	}
184 
185 	adapter->pdev = pdev;
186 	err = pcim_enable_device(pdev);
187 	if (err)
188 		goto err_free;
189 
190 	err = pcim_request_region(pdev, 0, pci_name(pdev));
191 	if (err) {
192 		pci_err(pdev, "pcim_request_region failed %pe\n", ERR_PTR(err));
193 
194 		goto err_free;
195 	}
196 
197 	err = pci_enable_ptm(pdev, NULL);
198 	if (err)
199 		pci_dbg(pdev, "PCIe PTM is not supported by PCIe bus/controller\n");
200 
201 	/* set up for high or low dma */
202 	err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
203 	if (err) {
204 		pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err));
205 
206 		goto err_free;
207 	}
208 
209 	pci_set_master(pdev);
210 	pci_set_drvdata(pdev, adapter);
211 
212 	adapter->init_wq = alloc_workqueue("%s-%s-init",
213 					   WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
214 					   dev_driver_string(dev),
215 					   dev_name(dev));
216 	if (!adapter->init_wq) {
217 		dev_err(dev, "Failed to allocate init workqueue\n");
218 		err = -ENOMEM;
219 		goto err_free;
220 	}
221 
222 	adapter->serv_wq = alloc_workqueue("%s-%s-service",
223 					   WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
224 					   dev_driver_string(dev),
225 					   dev_name(dev));
226 	if (!adapter->serv_wq) {
227 		dev_err(dev, "Failed to allocate service workqueue\n");
228 		err = -ENOMEM;
229 		goto err_serv_wq_alloc;
230 	}
231 
232 	adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", WQ_UNBOUND | WQ_HIGHPRI,
233 					  0, dev_driver_string(dev),
234 					  dev_name(dev));
235 	if (!adapter->mbx_wq) {
236 		dev_err(dev, "Failed to allocate mailbox workqueue\n");
237 		err = -ENOMEM;
238 		goto err_mbx_wq_alloc;
239 	}
240 
241 	adapter->stats_wq = alloc_workqueue("%s-%s-stats",
242 					    WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
243 					    dev_driver_string(dev),
244 					    dev_name(dev));
245 	if (!adapter->stats_wq) {
246 		dev_err(dev, "Failed to allocate workqueue\n");
247 		err = -ENOMEM;
248 		goto err_stats_wq_alloc;
249 	}
250 
251 	adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event",
252 					       WQ_UNBOUND | WQ_MEM_RECLAIM, 0,
253 					       dev_driver_string(dev),
254 					       dev_name(dev));
255 	if (!adapter->vc_event_wq) {
256 		dev_err(dev, "Failed to allocate virtchnl event workqueue\n");
257 		err = -ENOMEM;
258 		goto err_vc_event_wq_alloc;
259 	}
260 
261 	/* setup msglvl */
262 	adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M);
263 
264 	err = idpf_cfg_hw(adapter);
265 	if (err) {
266 		dev_err(dev, "Failed to configure HW structure for adapter: %d\n",
267 			err);
268 		goto err_cfg_hw;
269 	}
270 
271 	mutex_init(&adapter->vport_ctrl_lock);
272 	mutex_init(&adapter->vector_lock);
273 	mutex_init(&adapter->queue_lock);
274 	mutex_init(&adapter->vc_buf_lock);
275 
276 	INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
277 	INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
278 	INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
279 	INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task);
280 	INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task);
281 
282 	adapter->dev_ops.reg_ops.reset_reg_init(adapter);
283 	set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
284 	queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
285 			   msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
286 
287 	return 0;
288 
289 err_cfg_hw:
290 	destroy_workqueue(adapter->vc_event_wq);
291 err_vc_event_wq_alloc:
292 	destroy_workqueue(adapter->stats_wq);
293 err_stats_wq_alloc:
294 	destroy_workqueue(adapter->mbx_wq);
295 err_mbx_wq_alloc:
296 	destroy_workqueue(adapter->serv_wq);
297 err_serv_wq_alloc:
298 	destroy_workqueue(adapter->init_wq);
299 err_free:
300 	kfree(adapter);
301 	return err;
302 }
303 
304 /* idpf_pci_tbl - PCI Dev idpf ID Table
305  */
306 static const struct pci_device_id idpf_pci_tbl[] = {
307 	{ PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)},
308 	{ PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)},
309 	{ /* Sentinel */ }
310 };
311 MODULE_DEVICE_TABLE(pci, idpf_pci_tbl);
312 
313 static struct pci_driver idpf_driver = {
314 	.name			= KBUILD_MODNAME,
315 	.id_table		= idpf_pci_tbl,
316 	.probe			= idpf_probe,
317 	.sriov_configure	= idpf_sriov_configure,
318 	.remove			= idpf_remove,
319 	.shutdown		= idpf_shutdown,
320 };
321 module_pci_driver(idpf_driver);
322