xref: /linux/drivers/net/ethernet/netronome/nfp/nfp_net_main.c (revision 0408c58be5a475c99b271f08d85859f7b59ec767)
1 /*
2  * Copyright (C) 2015-2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_main.c
36  * Netronome network device driver: Main entry point
37  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38  *          Alejandro Lucero <alejandro.lucero@netronome.com>
39  *          Jason McMullan <jason.mcmullan@netronome.com>
40  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
41  */
42 
43 #include <linux/etherdevice.h>
44 #include <linux/kernel.h>
45 #include <linux/init.h>
46 #include <linux/lockdep.h>
47 #include <linux/pci.h>
48 #include <linux/pci_regs.h>
49 #include <linux/msi.h>
50 #include <linux/random.h>
51 #include <linux/rtnetlink.h>
52 
53 #include "nfpcore/nfp.h"
54 #include "nfpcore/nfp_cpp.h"
55 #include "nfpcore/nfp_nffw.h"
56 #include "nfpcore/nfp_nsp.h"
57 #include "nfpcore/nfp6000_pcie.h"
58 #include "nfp_app.h"
59 #include "nfp_net_ctrl.h"
60 #include "nfp_net.h"
61 #include "nfp_main.h"
62 #include "nfp_port.h"
63 
64 #define NFP_PF_CSR_SLICE_SIZE	(32 * 1024)
65 
66 static int nfp_is_ready(struct nfp_pf *pf)
67 {
68 	const char *cp;
69 	long state;
70 	int err;
71 
72 	cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state");
73 	if (!cp)
74 		return 0;
75 
76 	err = kstrtol(cp, 0, &state);
77 	if (err < 0)
78 		return 0;
79 
80 	return state == 15;
81 }
82 
83 /**
84  * nfp_net_get_mac_addr() - Get the MAC address.
85  * @pf:       NFP PF handle
86  * @port:     NFP port structure
87  * @id:	      NFP port id
88  *
89  * First try to get the MAC address from NSP ETH table. If that
90  * fails try HWInfo.  As a last resort generate a random address.
91  */
92 void
93 nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port, unsigned int id)
94 {
95 	struct nfp_eth_table_port *eth_port;
96 	u8 mac_addr[ETH_ALEN];
97 	const char *mac_str;
98 	char name[32];
99 
100 	eth_port = __nfp_port_get_eth_port(port);
101 	if (eth_port) {
102 		ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
103 		ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
104 		return;
105 	}
106 
107 	snprintf(name, sizeof(name), "eth%d.mac", id);
108 
109 	mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
110 	if (!mac_str) {
111 		nfp_warn(pf->cpp, "Can't lookup MAC address. Generate\n");
112 		eth_hw_addr_random(port->netdev);
113 		return;
114 	}
115 
116 	if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
117 		   &mac_addr[0], &mac_addr[1], &mac_addr[2],
118 		   &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
119 		nfp_warn(pf->cpp, "Can't parse MAC address (%s). Generate.\n",
120 			 mac_str);
121 		eth_hw_addr_random(port->netdev);
122 		return;
123 	}
124 
125 	ether_addr_copy(port->netdev->dev_addr, mac_addr);
126 	ether_addr_copy(port->netdev->perm_addr, mac_addr);
127 }
128 
129 struct nfp_eth_table_port *
130 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id)
131 {
132 	int i;
133 
134 	for (i = 0; eth_tbl && i < eth_tbl->count; i++)
135 		if (eth_tbl->ports[i].eth_index == id)
136 			return &eth_tbl->ports[i];
137 
138 	return NULL;
139 }
140 
141 static int
142 nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format,
143 			       unsigned int default_val)
144 {
145 	char name[256];
146 	int err = 0;
147 	u64 val;
148 
149 	snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp));
150 
151 	val = nfp_rtsym_read_le(pf->rtbl, name, &err);
152 	if (err) {
153 		if (err == -ENOENT)
154 			return default_val;
155 		nfp_err(pf->cpp, "Unable to read symbol %s\n", name);
156 		return err;
157 	}
158 
159 	return val;
160 }
161 
162 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
163 {
164 	return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1);
165 }
166 
167 static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
168 {
169 	return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id",
170 					      NFP_APP_CORE_NIC);
171 }
172 
173 static u8 __iomem *
174 nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
175 		     unsigned int min_size, struct nfp_cpp_area **area)
176 {
177 	char pf_symbol[256];
178 
179 	snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt,
180 		 nfp_cppcore_pcie_unit(pf->cpp));
181 
182 	return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area);
183 }
184 
185 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn)
186 {
187 	nfp_port_free(nn->port);
188 	list_del(&nn->vnic_list);
189 	pf->num_vnics--;
190 	nfp_net_free(nn);
191 }
192 
193 static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
194 {
195 	struct nfp_net *nn, *next;
196 
197 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list)
198 		if (nfp_net_is_data_vnic(nn))
199 			nfp_net_pf_free_vnic(pf, nn);
200 }
201 
202 static struct nfp_net *
203 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
204 		      void __iomem *ctrl_bar, void __iomem *qc_bar,
205 		      int stride, unsigned int eth_id)
206 {
207 	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
208 	struct nfp_net *nn;
209 	int err;
210 
211 	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
212 	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
213 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
214 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
215 
216 	/* Allocate and initialise the vNIC */
217 	nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
218 	if (IS_ERR(nn))
219 		return nn;
220 
221 	nn->app = pf->app;
222 	nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
223 	nn->dp.ctrl_bar = ctrl_bar;
224 	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
225 	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
226 	nn->dp.is_vf = 0;
227 	nn->stride_rx = stride;
228 	nn->stride_tx = stride;
229 
230 	if (needs_netdev) {
231 		err = nfp_app_vnic_init(pf->app, nn, eth_id);
232 		if (err) {
233 			nfp_net_free(nn);
234 			return ERR_PTR(err);
235 		}
236 	}
237 
238 	pf->num_vnics++;
239 	list_add_tail(&nn->vnic_list, &pf->vnics);
240 
241 	return nn;
242 }
243 
244 static int
245 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
246 {
247 	int err;
248 
249 	/* Get ME clock frequency from ctrl BAR
250 	 * XXX for now frequency is hardcoded until we figure out how
251 	 * to get the value from nfp-hwinfo into ctrl bar
252 	 */
253 	nn->me_freq_mhz = 1200;
254 
255 	err = nfp_net_init(nn);
256 	if (err)
257 		return err;
258 
259 	nfp_net_debugfs_vnic_add(nn, pf->ddir, id);
260 
261 	if (nn->port) {
262 		err = nfp_devlink_port_register(pf->app, nn->port);
263 		if (err)
264 			goto err_dfs_clean;
265 	}
266 
267 	nfp_net_info(nn);
268 
269 	return 0;
270 
271 err_dfs_clean:
272 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
273 	nfp_net_clean(nn);
274 	return err;
275 }
276 
277 static int
278 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
279 		       void __iomem *qc_bar, int stride)
280 {
281 	struct nfp_net *nn;
282 	unsigned int i;
283 	int err;
284 
285 	for (i = 0; i < pf->max_data_vnics; i++) {
286 		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
287 					   stride, i);
288 		if (IS_ERR(nn)) {
289 			err = PTR_ERR(nn);
290 			goto err_free_prev;
291 		}
292 
293 		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
294 
295 		/* Kill the vNIC if app init marked it as invalid */
296 		if (nn->port && nn->port->type == NFP_PORT_INVALID) {
297 			nfp_net_pf_free_vnic(pf, nn);
298 			continue;
299 		}
300 	}
301 
302 	if (list_empty(&pf->vnics))
303 		return -ENODEV;
304 
305 	return 0;
306 
307 err_free_prev:
308 	nfp_net_pf_free_vnics(pf);
309 	return err;
310 }
311 
312 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
313 {
314 	if (nn->port)
315 		nfp_devlink_port_unregister(nn->port);
316 	nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
317 	nfp_net_clean(nn);
318 	nfp_app_vnic_clean(pf->app, nn);
319 }
320 
321 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
322 {
323 	unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left;
324 	struct nfp_net *nn;
325 
326 	/* Get MSI-X vectors */
327 	wanted_irqs = 0;
328 	list_for_each_entry(nn, &pf->vnics, vnic_list)
329 		wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
330 	pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
331 				  GFP_KERNEL);
332 	if (!pf->irq_entries)
333 		return -ENOMEM;
334 
335 	num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries,
336 				      NFP_NET_MIN_VNIC_IRQS * pf->num_vnics,
337 				      wanted_irqs);
338 	if (!num_irqs) {
339 		nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n");
340 		kfree(pf->irq_entries);
341 		return -ENOMEM;
342 	}
343 
344 	/* Distribute IRQs to vNICs */
345 	irqs_left = num_irqs;
346 	vnics_left = pf->num_vnics;
347 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
348 		unsigned int n;
349 
350 		n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs,
351 			DIV_ROUND_UP(irqs_left, vnics_left));
352 		nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left],
353 				    n);
354 		irqs_left -= n;
355 		vnics_left--;
356 	}
357 
358 	return 0;
359 }
360 
361 static void nfp_net_pf_free_irqs(struct nfp_pf *pf)
362 {
363 	nfp_net_irqs_disable(pf->pdev);
364 	kfree(pf->irq_entries);
365 }
366 
367 static int nfp_net_pf_init_vnics(struct nfp_pf *pf)
368 {
369 	struct nfp_net *nn;
370 	unsigned int id;
371 	int err;
372 
373 	/* Finish vNIC init and register */
374 	id = 0;
375 	list_for_each_entry(nn, &pf->vnics, vnic_list) {
376 		if (!nfp_net_is_data_vnic(nn))
377 			continue;
378 		err = nfp_net_pf_init_vnic(pf, nn, id);
379 		if (err)
380 			goto err_prev_deinit;
381 
382 		id++;
383 	}
384 
385 	return 0;
386 
387 err_prev_deinit:
388 	list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list)
389 		if (nfp_net_is_data_vnic(nn))
390 			nfp_net_pf_clean_vnic(pf, nn);
391 	return err;
392 }
393 
394 static int
395 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
396 {
397 	u8 __iomem *ctrl_bar;
398 	int err;
399 
400 	pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf));
401 	if (IS_ERR(pf->app))
402 		return PTR_ERR(pf->app);
403 
404 	err = nfp_app_init(pf->app);
405 	if (err)
406 		goto err_free;
407 
408 	if (!nfp_app_needs_ctrl_vnic(pf->app))
409 		return 0;
410 
411 	ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar",
412 					NFP_PF_CSR_SLICE_SIZE,
413 					&pf->ctrl_vnic_bar);
414 	if (IS_ERR(ctrl_bar)) {
415 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
416 		err = PTR_ERR(ctrl_bar);
417 		goto err_app_clean;
418 	}
419 
420 	pf->ctrl_vnic =	nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar,
421 					      stride, 0);
422 	if (IS_ERR(pf->ctrl_vnic)) {
423 		err = PTR_ERR(pf->ctrl_vnic);
424 		goto err_unmap;
425 	}
426 
427 	return 0;
428 
429 err_unmap:
430 	nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
431 err_app_clean:
432 	nfp_app_clean(pf->app);
433 err_free:
434 	nfp_app_free(pf->app);
435 	pf->app = NULL;
436 	return err;
437 }
438 
439 static void nfp_net_pf_app_clean(struct nfp_pf *pf)
440 {
441 	if (pf->ctrl_vnic) {
442 		nfp_net_pf_free_vnic(pf, pf->ctrl_vnic);
443 		nfp_cpp_area_release_free(pf->ctrl_vnic_bar);
444 	}
445 	nfp_app_clean(pf->app);
446 	nfp_app_free(pf->app);
447 	pf->app = NULL;
448 }
449 
450 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf)
451 {
452 	int err;
453 
454 	if (!pf->ctrl_vnic)
455 		return 0;
456 	err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0);
457 	if (err)
458 		return err;
459 
460 	err = nfp_ctrl_open(pf->ctrl_vnic);
461 	if (err)
462 		goto err_clean_ctrl;
463 
464 	return 0;
465 
466 err_clean_ctrl:
467 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
468 	return err;
469 }
470 
471 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf)
472 {
473 	if (!pf->ctrl_vnic)
474 		return;
475 	nfp_ctrl_close(pf->ctrl_vnic);
476 	nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic);
477 }
478 
479 static int nfp_net_pf_app_start(struct nfp_pf *pf)
480 {
481 	int err;
482 
483 	err = nfp_net_pf_app_start_ctrl(pf);
484 	if (err)
485 		return err;
486 
487 	err = nfp_app_start(pf->app, pf->ctrl_vnic);
488 	if (err)
489 		goto err_ctrl_stop;
490 
491 	if (pf->num_vfs) {
492 		err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
493 		if (err)
494 			goto err_app_stop;
495 	}
496 
497 	return 0;
498 
499 err_app_stop:
500 	nfp_app_stop(pf->app);
501 err_ctrl_stop:
502 	nfp_net_pf_app_stop_ctrl(pf);
503 	return err;
504 }
505 
506 static void nfp_net_pf_app_stop(struct nfp_pf *pf)
507 {
508 	if (pf->num_vfs)
509 		nfp_app_sriov_disable(pf->app);
510 	nfp_app_stop(pf->app);
511 	nfp_net_pf_app_stop_ctrl(pf);
512 }
513 
514 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
515 {
516 	if (pf->vf_cfg_bar)
517 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
518 	if (pf->mac_stats_bar)
519 		nfp_cpp_area_release_free(pf->mac_stats_bar);
520 	nfp_cpp_area_release_free(pf->qc_area);
521 	nfp_cpp_area_release_free(pf->data_vnic_bar);
522 }
523 
524 static int nfp_net_pci_map_mem(struct nfp_pf *pf)
525 {
526 	u8 __iomem *mem;
527 	u32 min_size;
528 	int err;
529 
530 	min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
531 	mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
532 				   min_size, &pf->data_vnic_bar);
533 	if (IS_ERR(mem)) {
534 		nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
535 		return PTR_ERR(mem);
536 	}
537 
538 	min_size =  NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1);
539 	pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats",
540 					  "net.macstats", min_size,
541 					  &pf->mac_stats_bar);
542 	if (IS_ERR(pf->mac_stats_mem)) {
543 		if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
544 			err = PTR_ERR(pf->mac_stats_mem);
545 			goto err_unmap_ctrl;
546 		}
547 		pf->mac_stats_mem = NULL;
548 	}
549 
550 	pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
551 					      "_pf%d_net_vf_bar",
552 					      NFP_NET_CFG_BAR_SZ *
553 					      pf->limit_vfs, &pf->vf_cfg_bar);
554 	if (IS_ERR(pf->vf_cfg_mem)) {
555 		if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
556 			err = PTR_ERR(pf->vf_cfg_mem);
557 			goto err_unmap_mac_stats;
558 		}
559 		pf->vf_cfg_mem = NULL;
560 	}
561 
562 	mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0,
563 			       NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
564 			       &pf->qc_area);
565 	if (IS_ERR(mem)) {
566 		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
567 		err = PTR_ERR(mem);
568 		goto err_unmap_vf_cfg;
569 	}
570 
571 	return 0;
572 
573 err_unmap_vf_cfg:
574 	if (pf->vf_cfg_bar)
575 		nfp_cpp_area_release_free(pf->vf_cfg_bar);
576 err_unmap_mac_stats:
577 	if (pf->mac_stats_bar)
578 		nfp_cpp_area_release_free(pf->mac_stats_bar);
579 err_unmap_ctrl:
580 	nfp_cpp_area_release_free(pf->data_vnic_bar);
581 	return err;
582 }
583 
584 static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
585 {
586 	nfp_net_pf_app_stop(pf);
587 	/* stop app first, to avoid double free of ctrl vNIC's ddir */
588 	nfp_net_debugfs_dir_clean(&pf->ddir);
589 
590 	nfp_net_pf_free_irqs(pf);
591 	nfp_net_pf_app_clean(pf);
592 	nfp_net_pci_unmap_mem(pf);
593 }
594 
595 static int
596 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port,
597 			struct nfp_eth_table *eth_table)
598 {
599 	struct nfp_eth_table_port *eth_port;
600 
601 	ASSERT_RTNL();
602 
603 	eth_port = nfp_net_find_port(eth_table, port->eth_id);
604 	if (!eth_port) {
605 		set_bit(NFP_PORT_CHANGED, &port->flags);
606 		nfp_warn(cpp, "Warning: port #%d not present after reconfig\n",
607 			 port->eth_id);
608 		return -EIO;
609 	}
610 	if (eth_port->override_changed) {
611 		nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id);
612 		port->type = NFP_PORT_INVALID;
613 	}
614 
615 	memcpy(port->eth_port, eth_port, sizeof(*eth_port));
616 
617 	return 0;
618 }
619 
620 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf)
621 {
622 	struct nfp_eth_table *eth_table;
623 	struct nfp_net *nn, *next;
624 	struct nfp_port *port;
625 
626 	lockdep_assert_held(&pf->lock);
627 
628 	/* Check for nfp_net_pci_remove() racing against us */
629 	if (list_empty(&pf->vnics))
630 		return 0;
631 
632 	/* Update state of all ports */
633 	rtnl_lock();
634 	list_for_each_entry(port, &pf->ports, port_list)
635 		clear_bit(NFP_PORT_CHANGED, &port->flags);
636 
637 	eth_table = nfp_eth_read_ports(pf->cpp);
638 	if (!eth_table) {
639 		list_for_each_entry(port, &pf->ports, port_list)
640 			if (__nfp_port_get_eth_port(port))
641 				set_bit(NFP_PORT_CHANGED, &port->flags);
642 		rtnl_unlock();
643 		nfp_err(pf->cpp, "Error refreshing port config!\n");
644 		return -EIO;
645 	}
646 
647 	list_for_each_entry(port, &pf->ports, port_list)
648 		if (__nfp_port_get_eth_port(port))
649 			nfp_net_eth_port_update(pf->cpp, port, eth_table);
650 	rtnl_unlock();
651 
652 	kfree(eth_table);
653 
654 	/* Shoot off the ports which became invalid */
655 	list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
656 		if (!nn->port || nn->port->type != NFP_PORT_INVALID)
657 			continue;
658 
659 		nfp_net_pf_clean_vnic(pf, nn);
660 		nfp_net_pf_free_vnic(pf, nn);
661 	}
662 
663 	if (list_empty(&pf->vnics))
664 		nfp_net_pci_remove_finish(pf);
665 
666 	return 0;
667 }
668 
669 static void nfp_net_refresh_vnics(struct work_struct *work)
670 {
671 	struct nfp_pf *pf = container_of(work, struct nfp_pf,
672 					 port_refresh_work);
673 
674 	mutex_lock(&pf->lock);
675 	nfp_net_refresh_port_table_sync(pf);
676 	mutex_unlock(&pf->lock);
677 }
678 
679 void nfp_net_refresh_port_table(struct nfp_port *port)
680 {
681 	struct nfp_pf *pf = port->app->pf;
682 
683 	set_bit(NFP_PORT_CHANGED, &port->flags);
684 
685 	queue_work(pf->wq, &pf->port_refresh_work);
686 }
687 
688 int nfp_net_refresh_eth_port(struct nfp_port *port)
689 {
690 	struct nfp_cpp *cpp = port->app->cpp;
691 	struct nfp_eth_table *eth_table;
692 	int ret;
693 
694 	clear_bit(NFP_PORT_CHANGED, &port->flags);
695 
696 	eth_table = nfp_eth_read_ports(cpp);
697 	if (!eth_table) {
698 		set_bit(NFP_PORT_CHANGED, &port->flags);
699 		nfp_err(cpp, "Error refreshing port state table!\n");
700 		return -EIO;
701 	}
702 
703 	ret = nfp_net_eth_port_update(cpp, port, eth_table);
704 
705 	kfree(eth_table);
706 
707 	return ret;
708 }
709 
710 /*
711  * PCI device functions
712  */
713 int nfp_net_pci_probe(struct nfp_pf *pf)
714 {
715 	struct nfp_net_fw_version fw_ver;
716 	u8 __iomem *ctrl_bar, *qc_bar;
717 	int stride;
718 	int err;
719 
720 	INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics);
721 
722 	/* Verify that the board has completed initialization */
723 	if (!nfp_is_ready(pf)) {
724 		nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n");
725 		return -EINVAL;
726 	}
727 
728 	if (!pf->rtbl) {
729 		nfp_err(pf->cpp, "No %s, giving up.\n",
730 			pf->fw_loaded ? "symbol table" : "firmware found");
731 		return -EPROBE_DEFER;
732 	}
733 
734 	mutex_lock(&pf->lock);
735 	pf->max_data_vnics = nfp_net_pf_get_num_ports(pf);
736 	if ((int)pf->max_data_vnics < 0) {
737 		err = pf->max_data_vnics;
738 		goto err_unlock;
739 	}
740 
741 	err = nfp_net_pci_map_mem(pf);
742 	if (err)
743 		goto err_unlock;
744 
745 	ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
746 	qc_bar = nfp_cpp_area_iomem(pf->qc_area);
747 	if (!ctrl_bar || !qc_bar) {
748 		err = -EIO;
749 		goto err_unmap;
750 	}
751 
752 	nfp_net_get_fw_version(&fw_ver, ctrl_bar);
753 	if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
754 		nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
755 			fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
756 		err = -EINVAL;
757 		goto err_unmap;
758 	}
759 
760 	/* Determine stride */
761 	if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
762 		stride = 2;
763 		nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n");
764 	} else {
765 		switch (fw_ver.major) {
766 		case 1 ... 5:
767 			stride = 4;
768 			break;
769 		default:
770 			nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n",
771 				fw_ver.resv, fw_ver.class,
772 				fw_ver.major, fw_ver.minor);
773 			err = -EINVAL;
774 			goto err_unmap;
775 		}
776 	}
777 
778 	err = nfp_net_pf_app_init(pf, qc_bar, stride);
779 	if (err)
780 		goto err_unmap;
781 
782 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
783 
784 	/* Allocate the vnics and do basic init */
785 	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride);
786 	if (err)
787 		goto err_clean_ddir;
788 
789 	err = nfp_net_pf_alloc_irqs(pf);
790 	if (err)
791 		goto err_free_vnics;
792 
793 	err = nfp_net_pf_app_start(pf);
794 	if (err)
795 		goto err_free_irqs;
796 
797 	err = nfp_net_pf_init_vnics(pf);
798 	if (err)
799 		goto err_stop_app;
800 
801 	mutex_unlock(&pf->lock);
802 
803 	return 0;
804 
805 err_stop_app:
806 	nfp_net_pf_app_stop(pf);
807 err_free_irqs:
808 	nfp_net_pf_free_irqs(pf);
809 err_free_vnics:
810 	nfp_net_pf_free_vnics(pf);
811 err_clean_ddir:
812 	nfp_net_debugfs_dir_clean(&pf->ddir);
813 	nfp_net_pf_app_clean(pf);
814 err_unmap:
815 	nfp_net_pci_unmap_mem(pf);
816 err_unlock:
817 	mutex_unlock(&pf->lock);
818 	cancel_work_sync(&pf->port_refresh_work);
819 	return err;
820 }
821 
822 void nfp_net_pci_remove(struct nfp_pf *pf)
823 {
824 	struct nfp_net *nn;
825 
826 	mutex_lock(&pf->lock);
827 	if (list_empty(&pf->vnics))
828 		goto out;
829 
830 	list_for_each_entry(nn, &pf->vnics, vnic_list)
831 		if (nfp_net_is_data_vnic(nn))
832 			nfp_net_pf_clean_vnic(pf, nn);
833 
834 	nfp_net_pf_free_vnics(pf);
835 
836 	nfp_net_pci_remove_finish(pf);
837 out:
838 	mutex_unlock(&pf->lock);
839 
840 	cancel_work_sync(&pf->port_refresh_work);
841 }
842