xref: /linux/drivers/net/ethernet/netronome/nfp/nfp_net_main.c (revision 32a92f8c89326985e05dce8b22d3f0aa07a3e1bd)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
3 
4 /*
5  * nfp_net_main.c
6  * Netronome network device driver: Main entry point
7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8  *          Alejandro Lucero <alejandro.lucero@netronome.com>
9  *          Jason McMullan <jason.mcmullan@netronome.com>
10  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
11  */
12 
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/random.h>
20 #include <linux/rtnetlink.h>
21 
22 #include "nfpcore/nfp.h"
23 #include "nfpcore/nfp_cpp.h"
24 #include "nfpcore/nfp_dev.h"
25 #include "nfpcore/nfp_nffw.h"
26 #include "nfpcore/nfp_nsp.h"
27 #include "nfpcore/nfp6000_pcie.h"
28 #include "nfp_app.h"
29 #include "nfp_net_ctrl.h"
30 #include "nfp_net_sriov.h"
31 #include "nfp_net.h"
32 #include "nfp_main.h"
33 #include "nfp_port.h"
34 
35 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
36 
37 /**
38  * nfp_net_get_mac_addr() - Get the MAC address.
39  * @pf:       NFP PF handle
40  * @netdev:   net_device to set MAC address on
41  * @port:     NFP port structure
42  *
43  * First try to get the MAC address from NSP ETH table. If that
44  * fails generate a random address.
45  */
46 void
nfp_net_get_mac_addr(struct nfp_pf * pf,struct net_device * netdev,struct nfp_port * port)47 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
48 		     struct nfp_port *port)
49 {
50 	struct nfp_eth_table_port *eth_port;
51 
52 	eth_port = __nfp_port_get_eth_port(port);
53 	if (!eth_port) {
54 		eth_hw_addr_random(netdev);
55 		return;
56 	}
57 
58 	eth_hw_addr_set(netdev, eth_port->mac_addr);
59 	ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
60 }
61 
62 static struct nfp_eth_table_port *
nfp_net_find_port(struct nfp_eth_table * eth_tbl,unsigned int index)63 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index)
64 {
65 	int i;
66 
67 	for (i = 0; eth_tbl && i < eth_tbl->count; i++)
68 		if (eth_tbl->ports[i].index == index)
69 			return &eth_tbl->ports[i];
70 
71 	return NULL;
72 }
73 
nfp_net_pf_get_num_ports(struct nfp_pf * pf)74 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
75 {
76 	return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
77 }
78 
nfp_net_pf_free_vnic(struct nfp_pf * pf,struct nfp_net * nn)79 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
80 {
81 	if (nfp_net_is_data_vnic(nn))
82 		nfp_app_vnic_free(pf->app, nn);
83 	nfp_port_free(nn->port);
84 	list_del(&nn->vnic_list);
85 	pf->num_vnics--;
86 	nfp_net_free(nn);
87 }
88 
nfp_net_pf_free_vnics(struct nfp_pf * pf)89 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
90 {
91 	struct nfp_net *nn, *next;
92 
93 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
94 		if (nfp_net_is_data_vnic(nn))
95 			nfp_net_pf_free_vnic(pf, nn);
96 }
97 
98 static struct nfp_net *
nfp_net_pf_alloc_vnic(struct nfp_pf * pf,bool needs_netdev,void __iomem * ctrl_bar,void __iomem * qc_bar,int stride,unsigned int id)99 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
100 		      void __iomem *ctrl_bar, void __iomem *qc_bar,
101 		      int stride, unsigned int id)
102 {
103 	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
104 	struct nfp_net *nn;
105 	int err;
106 
107 	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
108 	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
109 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
110 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
111 
112 	/* Allocate and initialise the vNIC */
113 	nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev,
114 			   n_tx_rings, n_rx_rings);
115 	if (IS_ERR(nn))
116 		return nn;
117 
118 	nn->app = pf->app;
119 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
120 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
121 	nn->dp.is_vf = 0;
122 	nn->stride_rx = stride;
123 	nn->stride_tx = stride;
124 
125 	if (needs_netdev) {
126 		err = nfp_app_vnic_alloc(pf->app, nn, id);
127 		if (err) {
128 			nfp_net_free(nn);
129 			return ERR_PTR(err);
130 		}
131 	}
132 
133 	pf->num_vnics++;
134 	list_add_tail(&nn->vnic_list, &pf->vnics);
135 
136 	return nn;
137 }
138 
139 static int
nfp_net_pf_init_vnic(struct nfp_pf * pf,struct nfp_net * nn,unsigned int id)140 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
141 {
142 	int err;
143 
144 	nn->id = id;
145 
146 	if (nn->port) {
147 		err = nfp_devlink_port_register(pf->app, nn->port);
148 		if (err)
149 			return err;
150 	}
151 
152 	err = nfp_net_init(nn);
153 	if (err)
154 		goto err_devlink_port_clean;
155 
156 	nfp_net_debugfs_vnic_add(nn, pf->ddir);
157 
158 	nfp_net_info(nn);
159 
160 	if (nfp_net_is_data_vnic(nn)) {
161 		err = nfp_app_vnic_init(pf->app, nn);
162 		if (err)
163 			goto err_debugfs_vnic_clean;
164 	}
165 
166 	return 0;
167 
168 err_debugfs_vnic_clean:
169 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
170 	nfp_net_clean(nn);
171 err_devlink_port_clean:
172 	if (nn->port)
173 		nfp_devlink_port_unregister(nn->port);
174 	return err;
175 }
176 
177 static int
nfp_net_pf_alloc_vnics(struct nfp_pf * pf,void __iomem * ctrl_bar,void __iomem * qc_bar,int stride)178 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
179 		       void __iomem *qc_bar, int stride)
180 {
181 	struct nfp_net *nn;
182 	unsigned int i;
183 	int err;
184 
185 	for (i = 0; i < pf->max_data_vnics; i++) {
186 		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
187 					   stride, i);
188 		if (IS_ERR(nn)) {
189 			err = PTR_ERR(nn);
190 			goto err_free_prev;
191 		}
192 
193 		if (nn->port)
194 			nn->port->link_cb = nfp_net_refresh_port_table;
195 
196 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
197 
198 		/* Kill the vNIC if app init marked it as invalid */
199 		if (nn->port && nn->port->type == NFP_PORT_INVALID)
200 			nfp_net_pf_free_vnic(pf, nn);
201 	}
202 
203 	if (list_empty(&pf->vnics))
204 		return -ENODEV;
205 
206 	return 0;
207 
208 err_free_prev:
209 	nfp_net_pf_free_vnics(pf);
210 	return err;
211 }
212 
nfp_net_pf_clean_vnic(struct nfp_pf * pf,struct nfp_net * nn)213 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
214 {
215 	if (nfp_net_is_data_vnic(nn))
216 		nfp_app_vnic_clean(pf->app, nn);
217 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
218 	nfp_net_clean(nn);
219 	if (nn->port)
220 		nfp_devlink_port_unregister(nn->port);
221 }
222 
nfp_net_pf_alloc_irqs(struct nfp_pf * pf)223 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
224 {
225 	unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
226 	struct nfp_net *nn;
227 
228 	/* Get MSI-X vectors */
229 	wanted_irqs = 0;
230 	list_for_each_entry(nn, &pf->vnics, vnic_list)
231 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
232 	pf->irq_entries = kzalloc_objs(*pf->irq_entries, wanted_irqs);
233 	if (!pf->irq_entries)
234 		return -ENOMEM;
235 
236 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
237 				      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
238 				      wanted_irqs);
239 	if (!num_irqs) {
240 		nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
241 		kfree(pf->irq_entries);
242 		return -ENOMEM;
243 	}
244 
245 	/* Distribute IRQs to vNICs */
246 	irqs_left = num_irqs;
247 	vnics_left = pf->num_vnics;
248 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
249 		unsigned int n;
250 
251 		n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
252 			DIV_ROUND_UP(irqs_left, vnics_left));
253 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
254 				    n);
255 		irqs_left -= n;
256 		vnics_left--;
257 	}
258 
259 	return 0;
260 }
261 
nfp_net_pf_free_irqs(struct nfp_pf * pf)262 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
263 {
264 	nfp_net_irqs_disable(pf->pdev);
265 	kfree(pf->irq_entries);
266 }
267 
nfp_net_pf_init_vnics(struct nfp_pf * pf)268 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
269 {
270 	struct nfp_net *nn;
271 	unsigned int id;
272 	int err;
273 
274 	/* Finish vNIC init and register */
275 	id = 0;
276 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
277 		if (!nfp_net_is_data_vnic(nn))
278 			continue;
279 		err = nfp_net_pf_init_vnic(pf, nn, id);
280 		if (err)
281 			goto err_prev_deinit;
282 
283 		id++;
284 	}
285 
286 	return 0;
287 
288 err_prev_deinit:
289 	list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
290 		if (nfp_net_is_data_vnic(nn))
291 			nfp_net_pf_clean_vnic(pf, nn);
292 	return err;
293 }
294 
295 static int
nfp_net_pf_app_init(struct nfp_pf * pf,u8 __iomem * qc_bar,unsigned int stride)296 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
297 {
298 	struct devlink *devlink = priv_to_devlink(pf);
299 	u8 __iomem *ctrl_bar;
300 	int err;
301 
302 	pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
303 	if (IS_ERR(pf->app))
304 		return PTR_ERR(pf->app);
305 
306 	devl_lock(devlink);
307 	err = nfp_app_init(pf->app);
308 	devl_unlock(devlink);
309 	if (err)
310 		goto err_free;
311 
312 	if (!nfp_app_needs_ctrl_vnic(pf->app))
313 		return 0;
314 
315 	ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
316 				    NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar);
317 	if (IS_ERR(ctrl_bar)) {
318 		nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n");
319 		err = PTR_ERR(ctrl_bar);
320 		goto err_app_clean;
321 	}
322 
323 	pf->ctrl_vnic =	nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
324 					      stride, 0);
325 	if (IS_ERR(pf->ctrl_vnic)) {
326 		err = PTR_ERR(pf->ctrl_vnic);
327 		goto err_unmap;
328 	}
329 
330 	return 0;
331 
332 err_unmap:
333 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
334 err_app_clean:
335 	devl_lock(devlink);
336 	nfp_app_clean(pf->app);
337 	devl_unlock(devlink);
338 err_free:
339 	nfp_app_free(pf->app);
340 	pf->app = NULL;
341 	return err;
342 }
343 
nfp_net_pf_app_clean(struct nfp_pf * pf)344 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
345 {
346 	struct devlink *devlink = priv_to_devlink(pf);
347 
348 	if (pf->ctrl_vnic) {
349 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
350 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
351 	}
352 
353 	devl_lock(devlink);
354 	nfp_app_clean(pf->app);
355 	devl_unlock(devlink);
356 
357 	nfp_app_free(pf->app);
358 	pf->app = NULL;
359 }
360 
nfp_net_pf_app_start_ctrl(struct nfp_pf * pf)361 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
362 {
363 	int err;
364 
365 	if (!pf->ctrl_vnic)
366 		return 0;
367 	err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
368 	if (err)
369 		return err;
370 
371 	err = nfp_ctrl_open(pf->ctrl_vnic);
372 	if (err)
373 		goto err_clean_ctrl;
374 
375 	return 0;
376 
377 err_clean_ctrl:
378 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
379 	return err;
380 }
381 
nfp_net_pf_app_stop_ctrl(struct nfp_pf * pf)382 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
383 {
384 	if (!pf->ctrl_vnic)
385 		return;
386 	nfp_ctrl_close(pf->ctrl_vnic);
387 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
388 }
389 
nfp_net_pf_app_start(struct nfp_pf * pf)390 static int nfp_net_pf_app_start(struct nfp_pf *pf)
391 {
392 	int err;
393 
394 	err = nfp_net_pf_app_start_ctrl(pf);
395 	if (err)
396 		return err;
397 
398 	err = nfp_app_start(pf->app, pf->ctrl_vnic);
399 	if (err)
400 		goto err_ctrl_stop;
401 
402 	if (pf->num_vfs) {
403 		err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
404 		if (err)
405 			goto err_app_stop;
406 	}
407 
408 	return 0;
409 
410 err_app_stop:
411 	nfp_app_stop(pf->app);
412 err_ctrl_stop:
413 	nfp_net_pf_app_stop_ctrl(pf);
414 	return err;
415 }
416 
nfp_net_pf_app_stop(struct nfp_pf * pf)417 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
418 {
419 	if (pf->num_vfs)
420 		nfp_app_sriov_disable(pf->app);
421 	nfp_app_stop(pf->app);
422 	nfp_net_pf_app_stop_ctrl(pf);
423 }
424 
nfp_net_pci_unmap_mem(struct nfp_pf * pf)425 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
426 {
427 	if (pf->vfcfg_tbl2_area)
428 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
429 	if (pf->vf_cfg_bar)
430 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
431 	if (pf->mac_stats_bar)
432 		nfp_cpp_area_release_free(pf->mac_stats_bar);
433 	nfp_cpp_area_release_free(pf->qc_area);
434 	nfp_cpp_area_release_free(pf->data_vnic_bar);
435 }
436 
nfp_net_pci_map_mem(struct nfp_pf * pf)437 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
438 {
439 	u32 min_size, cpp_id;
440 	u8 __iomem *mem;
441 	int err;
442 
443 	min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
444 	mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0",
445 			       min_size, &pf->data_vnic_bar);
446 	if (IS_ERR(mem)) {
447 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
448 		return PTR_ERR(mem);
449 	}
450 
451 	if (pf->eth_tbl) {
452 		min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
453 		pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
454 						  "net.macstats", min_size,
455 						  &pf->mac_stats_bar);
456 		if (IS_ERR(pf->mac_stats_mem)) {
457 			if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
458 				err = PTR_ERR(pf->mac_stats_mem);
459 				goto err_unmap_ctrl;
460 			}
461 			pf->mac_stats_mem = NULL;
462 		}
463 	}
464 
465 	pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar",
466 					  NFP_NET_CFG_BAR_SZ * pf->limit_vfs,
467 					  &pf->vf_cfg_bar);
468 	if (IS_ERR(pf->vf_cfg_mem)) {
469 		if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
470 			err = PTR_ERR(pf->vf_cfg_mem);
471 			goto err_unmap_mac_stats;
472 		}
473 		pf->vf_cfg_mem = NULL;
474 	}
475 
476 	min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ;
477 	pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2",
478 					  "_pf%d_net_vf_cfg2",
479 					  min_size, &pf->vfcfg_tbl2_area);
480 	if (IS_ERR(pf->vfcfg_tbl2)) {
481 		if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) {
482 			err = PTR_ERR(pf->vfcfg_tbl2);
483 			goto err_unmap_vf_cfg;
484 		}
485 		pf->vfcfg_tbl2 = NULL;
486 	}
487 
488 	cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0);
489 	mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id,
490 			       nfp_qcp_queue_offset(pf->dev_info, 0),
491 			       pf->dev_info->qc_area_sz, &pf->qc_area);
492 	if (IS_ERR(mem)) {
493 		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
494 		err = PTR_ERR(mem);
495 		goto err_unmap_vfcfg_tbl2;
496 	}
497 
498 	return 0;
499 
500 err_unmap_vfcfg_tbl2:
501 	if (pf->vfcfg_tbl2_area)
502 		nfp_cpp_area_release_free(pf->vfcfg_tbl2_area);
503 err_unmap_vf_cfg:
504 	if (pf->vf_cfg_bar)
505 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
506 err_unmap_mac_stats:
507 	if (pf->mac_stats_bar)
508 		nfp_cpp_area_release_free(pf->mac_stats_bar);
509 err_unmap_ctrl:
510 	nfp_cpp_area_release_free(pf->data_vnic_bar);
511 	return err;
512 }
513 
514 static const unsigned int lr_to_speed[] = {
515 	[NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED]	= 0,
516 	[NFP_NET_CFG_STS_LINK_RATE_UNKNOWN]	= SPEED_UNKNOWN,
517 	[NFP_NET_CFG_STS_LINK_RATE_1G]		= SPEED_1000,
518 	[NFP_NET_CFG_STS_LINK_RATE_10G]		= SPEED_10000,
519 	[NFP_NET_CFG_STS_LINK_RATE_25G]		= SPEED_25000,
520 	[NFP_NET_CFG_STS_LINK_RATE_40G]		= SPEED_40000,
521 	[NFP_NET_CFG_STS_LINK_RATE_50G]		= SPEED_50000,
522 	[NFP_NET_CFG_STS_LINK_RATE_100G]	= SPEED_100000,
523 };
524 
nfp_net_lr2speed(unsigned int linkrate)525 unsigned int nfp_net_lr2speed(unsigned int linkrate)
526 {
527 	if (linkrate < ARRAY_SIZE(lr_to_speed))
528 		return lr_to_speed[linkrate];
529 
530 	return SPEED_UNKNOWN;
531 }
532 
nfp_net_speed2lr(unsigned int speed)533 unsigned int nfp_net_speed2lr(unsigned int speed)
534 {
535 	int i;
536 
537 	for (i = 0; i < ARRAY_SIZE(lr_to_speed); i++) {
538 		if (speed == lr_to_speed[i])
539 			return i;
540 	}
541 
542 	return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN;
543 }
544 
nfp_net_notify_port_speed(struct nfp_port * port)545 static void nfp_net_notify_port_speed(struct nfp_port *port)
546 {
547 	struct net_device *netdev = port->netdev;
548 	struct nfp_net *nn;
549 	u16 sts;
550 
551 	if (!nfp_netdev_is_nfp_net(netdev))
552 		return;
553 
554 	nn = netdev_priv(netdev);
555 	sts = nn_readw(nn, NFP_NET_CFG_STS);
556 
557 	if (!(sts & NFP_NET_CFG_STS_LINK)) {
558 		nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN);
559 		return;
560 	}
561 
562 	nn_writew(nn, NFP_NET_CFG_STS_NSP_LINK_RATE, nfp_net_speed2lr(port->eth_port->speed));
563 }
564 
565 static int
nfp_net_eth_port_update(struct nfp_cpp * cpp,struct nfp_port * port,struct nfp_eth_table * eth_table)566 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
567 			struct nfp_eth_table *eth_table)
568 {
569 	struct nfp_eth_table_port *eth_port;
570 
571 	ASSERT_RTNL();
572 
573 	eth_port = nfp_net_find_port(eth_table, port->eth_id);
574 	if (!eth_port) {
575 		set_bit(NFP_PORT_CHANGED, &port->flags);
576 		nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
577 			 port->eth_id);
578 		return -EIO;
579 	}
580 	if (eth_port->override_changed) {
581 		nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id);
582 		port->type = NFP_PORT_INVALID;
583 	}
584 
585 	memcpy(port->eth_port, eth_port, sizeof(*eth_port));
586 	nfp_net_notify_port_speed(port);
587 
588 	return 0;
589 }
590 
nfp_net_refresh_port_table_sync(struct nfp_pf * pf)591 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
592 {
593 	struct devlink *devlink = priv_to_devlink(pf);
594 	struct nfp_eth_table *eth_table;
595 	struct nfp_net *nn, *next;
596 	struct nfp_port *port;
597 	int err;
598 
599 	devl_assert_locked(devlink);
600 
601 	/* Check for nfp_net_pci_remove() racing against us */
602 	if (list_empty(&pf->vnics))
603 		return 0;
604 
605 	/* Update state of all ports */
606 	rtnl_lock();
607 	list_for_each_entry(port, &pf->ports, port_list)
608 		clear_bit(NFP_PORT_CHANGED, &port->flags);
609 
610 	eth_table = nfp_eth_read_ports(pf->cpp);
611 	if (!eth_table) {
612 		list_for_each_entry(port, &pf->ports, port_list)
613 			if (__nfp_port_get_eth_port(port))
614 				set_bit(NFP_PORT_CHANGED, &port->flags);
615 		rtnl_unlock();
616 		nfp_err(pf->cpp, "Error refreshing port config!\n");
617 		return -EIO;
618 	}
619 
620 	list_for_each_entry(port, &pf->ports, port_list)
621 		if (__nfp_port_get_eth_port(port))
622 			nfp_net_eth_port_update(pf->cpp, port, eth_table);
623 	rtnl_unlock();
624 
625 	kfree(eth_table);
626 
627 	/* Resync repr state. This may cause reprs to be removed. */
628 	err = nfp_reprs_resync_phys_ports(pf->app);
629 	if (err)
630 		return err;
631 
632 	/* Shoot off the ports which became invalid */
633 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
634 		if (!nn->port || nn->port->type != NFP_PORT_INVALID)
635 			continue;
636 
637 		nfp_net_pf_clean_vnic(pf, nn);
638 		nfp_net_pf_free_vnic(pf, nn);
639 	}
640 
641 	return 0;
642 }
643 
nfp_net_refresh_vnics(struct work_struct * work)644 static void nfp_net_refresh_vnics(struct work_struct *work)
645 {
646 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
647 					 port_refresh_work);
648 	struct devlink *devlink = priv_to_devlink(pf);
649 
650 	devl_lock(devlink);
651 	nfp_net_refresh_port_table_sync(pf);
652 	devl_unlock(devlink);
653 }
654 
nfp_net_refresh_port_table(struct nfp_port * port)655 void nfp_net_refresh_port_table(struct nfp_port *port)
656 {
657 	struct nfp_pf *pf = port->app->pf;
658 
659 	set_bit(NFP_PORT_CHANGED, &port->flags);
660 
661 	queue_work(pf->wq, &pf->port_refresh_work);
662 }
663 
nfp_net_refresh_eth_port(struct nfp_port * port)664 int nfp_net_refresh_eth_port(struct nfp_port *port)
665 {
666 	struct nfp_cpp *cpp = port->app->cpp;
667 	struct nfp_eth_table *eth_table;
668 	int ret;
669 
670 	clear_bit(NFP_PORT_CHANGED, &port->flags);
671 
672 	eth_table = nfp_eth_read_ports(cpp);
673 	if (!eth_table) {
674 		set_bit(NFP_PORT_CHANGED, &port->flags);
675 		nfp_err(cpp, "Error refreshing port state table!\n");
676 		return -EIO;
677 	}
678 
679 	ret = nfp_net_eth_port_update(cpp, port, eth_table);
680 
681 	kfree(eth_table);
682 
683 	return ret;
684 }
685 
686 /*
687  * PCI device functions
688  */
nfp_net_pci_probe(struct nfp_pf * pf)689 int nfp_net_pci_probe(struct nfp_pf *pf)
690 {
691 	struct devlink *devlink = priv_to_devlink(pf);
692 	struct nfp_net_fw_version fw_ver;
693 	u8 __iomem *ctrl_bar, *qc_bar;
694 	int stride;
695 	int err;
696 
697 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
698 
699 	if (!pf->rtbl) {
700 		nfp_err(pf->cpp, "No %s, giving up.\n",
701 			pf->fw_loaded ? "symbol table" : "firmware found");
702 		return -EINVAL;
703 	}
704 
705 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
706 	if ((int)pf->max_data_vnics < 0)
707 		return pf->max_data_vnics;
708 
709 	err = nfp_net_pci_map_mem(pf);
710 	if (err)
711 		return err;
712 
713 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
714 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
715 	if (!ctrl_bar || !qc_bar) {
716 		err = -EIO;
717 		goto err_unmap;
718 	}
719 
720 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
721 	if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK ||
722 	    fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
723 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
724 			fw_ver.extend, fw_ver.class,
725 			fw_ver.major, fw_ver.minor);
726 		err = -EINVAL;
727 		goto err_unmap;
728 	}
729 
730 	/* Determine stride */
731 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
732 		stride = 2;
733 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
734 	} else {
735 		switch (fw_ver.major) {
736 		case 1 ... 5:
737 			stride = 4;
738 			break;
739 		default:
740 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
741 				fw_ver.extend, fw_ver.class,
742 				fw_ver.major, fw_ver.minor);
743 			err = -EINVAL;
744 			goto err_unmap;
745 		}
746 	}
747 
748 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
749 	if (err)
750 		goto err_unmap;
751 
752 	err = nfp_shared_buf_register(pf);
753 	if (err)
754 		goto err_devlink_unreg;
755 
756 	devl_lock(devlink);
757 	err = nfp_devlink_params_register(pf);
758 	if (err)
759 		goto err_shared_buf_unreg;
760 
761 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
762 
763 	/* Allocate the vnics and do basic init */
764 	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
765 	if (err)
766 		goto err_clean_ddir;
767 
768 	err = nfp_net_pf_alloc_irqs(pf);
769 	if (err)
770 		goto err_free_vnics;
771 
772 	err = nfp_net_pf_app_start(pf);
773 	if (err)
774 		goto err_free_irqs;
775 
776 	err = nfp_net_pf_init_vnics(pf);
777 	if (err)
778 		goto err_stop_app;
779 
780 	devl_unlock(devlink);
781 	devlink_register(devlink);
782 
783 	return 0;
784 
785 err_stop_app:
786 	nfp_net_pf_app_stop(pf);
787 err_free_irqs:
788 	nfp_net_pf_free_irqs(pf);
789 err_free_vnics:
790 	nfp_net_pf_free_vnics(pf);
791 err_clean_ddir:
792 	nfp_net_debugfs_dir_clean(&pf->ddir);
793 	nfp_devlink_params_unregister(pf);
794 err_shared_buf_unreg:
795 	devl_unlock(devlink);
796 	nfp_shared_buf_unregister(pf);
797 err_devlink_unreg:
798 	cancel_work_sync(&pf->port_refresh_work);
799 	nfp_net_pf_app_clean(pf);
800 err_unmap:
801 	nfp_net_pci_unmap_mem(pf);
802 	return err;
803 }
804 
nfp_net_pci_remove(struct nfp_pf * pf)805 void nfp_net_pci_remove(struct nfp_pf *pf)
806 {
807 	struct devlink *devlink = priv_to_devlink(pf);
808 	struct nfp_net *nn, *next;
809 
810 	devlink_unregister(priv_to_devlink(pf));
811 	devl_lock(devlink);
812 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
813 		if (!nfp_net_is_data_vnic(nn))
814 			continue;
815 		nfp_net_pf_clean_vnic(pf, nn);
816 		nfp_net_pf_free_vnic(pf, nn);
817 	}
818 
819 	nfp_net_pf_app_stop(pf);
820 	/* stop app first, to avoid double free of ctrl vNIC's ddir */
821 	nfp_net_debugfs_dir_clean(&pf->ddir);
822 
823 	nfp_devlink_params_unregister(pf);
824 
825 	devl_unlock(devlink);
826 
827 	nfp_shared_buf_unregister(pf);
828 
829 	nfp_net_pf_free_irqs(pf);
830 	nfp_net_pf_app_clean(pf);
831 	nfp_net_pci_unmap_mem(pf);
832 
833 	cancel_work_sync(&pf->port_refresh_work);
834 }
835