1 /* 2 * Copyright (C) 2015-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 /* 35 * nfp_net_main.c 36 * Netronome network device driver: Main entry point 37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 38 * Alejandro Lucero <alejandro.lucero@netronome.com> 39 * Jason McMullan <jason.mcmullan@netronome.com> 40 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 41 */ 42 43 #include <linux/etherdevice.h> 44 #include <linux/kernel.h> 45 #include <linux/init.h> 46 #include <linux/pci.h> 47 #include <linux/pci_regs.h> 48 #include <linux/msi.h> 49 #include <linux/random.h> 50 #include <linux/rtnetlink.h> 51 52 #include "nfpcore/nfp.h" 53 #include "nfpcore/nfp_cpp.h" 54 #include "nfpcore/nfp_nffw.h" 55 #include "nfpcore/nfp_nsp.h" 56 #include "nfpcore/nfp6000_pcie.h" 57 58 #include "nfp_net_ctrl.h" 59 #include "nfp_net.h" 60 #include "nfp_main.h" 61 62 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) 63 64 static int nfp_is_ready(struct nfp_cpp *cpp) 65 { 66 const char *cp; 67 long state; 68 int err; 69 70 cp = nfp_hwinfo_lookup(cpp, "board.state"); 71 if (!cp) 72 return 0; 73 74 err = kstrtol(cp, 0, &state); 75 if (err < 0) 76 return 0; 77 78 return state == 15; 79 } 80 81 /** 82 * nfp_net_map_area() - Help function to map an area 83 * @cpp: NFP CPP handler 84 * @name: Name for the area 85 * @target: CPP target 86 * @addr: CPP address 87 * @size: Size of the area 88 * @area: Area handle (returned). 89 * 90 * This function is primarily to simplify the code in the main probe 91 * function. To undo the effect of this functions call 92 * @nfp_cpp_area_release_free(*area); 93 * 94 * Return: Pointer to memory mapped area or ERR_PTR 95 */ 96 static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp, 97 const char *name, int isl, int target, 98 unsigned long long addr, unsigned long size, 99 struct nfp_cpp_area **area) 100 { 101 u8 __iomem *res; 102 u32 dest; 103 int err; 104 105 dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, isl); 106 107 *area = nfp_cpp_area_alloc_with_name(cpp, dest, name, addr, size); 108 if (!*area) { 109 err = -EIO; 110 goto err_area; 111 } 112 113 err = nfp_cpp_area_acquire(*area); 114 if (err < 0) 115 goto err_acquire; 116 117 res = nfp_cpp_area_iomem(*area); 118 if (!res) { 119 err = -EIO; 120 goto err_map; 121 } 122 123 return res; 124 125 err_map: 126 nfp_cpp_area_release(*area); 127 err_acquire: 128 nfp_cpp_area_free(*area); 129 err_area: 130 return (u8 __iomem *)ERR_PTR(err); 131 } 132 133 /** 134 * nfp_net_get_mac_addr() - Get the MAC address. 135 * @nn: NFP Network structure 136 * @cpp: NFP CPP handle 137 * @id: NFP port id 138 * 139 * First try to get the MAC address from NSP ETH table. If that 140 * fails try HWInfo. As a last resort generate a random address. 141 */ 142 static void 143 nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_cpp *cpp, unsigned int id) 144 { 145 struct nfp_net_dp *dp = &nn->dp; 146 u8 mac_addr[ETH_ALEN]; 147 const char *mac_str; 148 char name[32]; 149 150 if (nn->eth_port) { 151 ether_addr_copy(dp->netdev->dev_addr, nn->eth_port->mac_addr); 152 ether_addr_copy(dp->netdev->perm_addr, nn->eth_port->mac_addr); 153 return; 154 } 155 156 snprintf(name, sizeof(name), "eth%d.mac", id); 157 158 mac_str = nfp_hwinfo_lookup(cpp, name); 159 if (!mac_str) { 160 dev_warn(dp->dev, "Can't lookup MAC address. Generate\n"); 161 eth_hw_addr_random(dp->netdev); 162 return; 163 } 164 165 if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", 166 &mac_addr[0], &mac_addr[1], &mac_addr[2], 167 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { 168 dev_warn(dp->dev, 169 "Can't parse MAC address (%s). Generate.\n", mac_str); 170 eth_hw_addr_random(dp->netdev); 171 return; 172 } 173 174 ether_addr_copy(dp->netdev->dev_addr, mac_addr); 175 ether_addr_copy(dp->netdev->perm_addr, mac_addr); 176 } 177 178 static struct nfp_eth_table_port * 179 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id) 180 { 181 int i; 182 183 for (i = 0; eth_tbl && i < eth_tbl->count; i++) 184 if (eth_tbl->ports[i].eth_index == id) 185 return ð_tbl->ports[i]; 186 187 return NULL; 188 } 189 190 static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf) 191 { 192 char name[256]; 193 u16 interface; 194 int pcie_pf; 195 int err = 0; 196 u64 val; 197 198 interface = nfp_cpp_interface(pf->cpp); 199 pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface); 200 201 snprintf(name, sizeof(name), "nfd_cfg_pf%d_num_ports", pcie_pf); 202 203 val = nfp_rtsym_read_le(pf->cpp, name, &err); 204 /* Default to one port */ 205 if (err) { 206 if (err != -ENOENT) 207 nfp_err(pf->cpp, "Unable to read adapter port count\n"); 208 val = 1; 209 } 210 211 return val; 212 } 213 214 static unsigned int 215 nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar, 216 unsigned int stride, u32 start_off, u32 num_off) 217 { 218 unsigned int i, min_qc, max_qc; 219 220 min_qc = readl(ctrl_bar + start_off); 221 max_qc = min_qc; 222 223 for (i = 0; i < pf->num_ports; i++) { 224 /* To make our lives simpler only accept configuration where 225 * queues are allocated to PFs in order (queues of PFn all have 226 * indexes lower than PFn+1). 227 */ 228 if (max_qc > readl(ctrl_bar + start_off)) 229 return 0; 230 231 max_qc = readl(ctrl_bar + start_off); 232 max_qc += readl(ctrl_bar + num_off) * stride; 233 ctrl_bar += NFP_PF_CSR_SLICE_SIZE; 234 } 235 236 return max_qc - min_qc; 237 } 238 239 static u8 __iomem *nfp_net_pf_map_ctrl_bar(struct nfp_pf *pf) 240 { 241 const struct nfp_rtsym *ctrl_sym; 242 u8 __iomem *ctrl_bar; 243 char pf_symbol[256]; 244 u16 interface; 245 int pcie_pf; 246 247 interface = nfp_cpp_interface(pf->cpp); 248 pcie_pf = NFP_CPP_INTERFACE_UNIT_of(interface); 249 250 snprintf(pf_symbol, sizeof(pf_symbol), "_pf%d_net_bar0", pcie_pf); 251 252 ctrl_sym = nfp_rtsym_lookup(pf->cpp, pf_symbol); 253 if (!ctrl_sym) { 254 dev_err(&pf->pdev->dev, 255 "Failed to find PF BAR0 symbol %s\n", pf_symbol); 256 return NULL; 257 } 258 259 if (ctrl_sym->size < pf->num_ports * NFP_PF_CSR_SLICE_SIZE) { 260 dev_err(&pf->pdev->dev, 261 "PF BAR0 too small to contain %d ports\n", 262 pf->num_ports); 263 return NULL; 264 } 265 266 ctrl_bar = nfp_net_map_area(pf->cpp, "net.ctrl", 267 ctrl_sym->domain, ctrl_sym->target, 268 ctrl_sym->addr, ctrl_sym->size, 269 &pf->ctrl_area); 270 if (IS_ERR(ctrl_bar)) { 271 dev_err(&pf->pdev->dev, "Failed to map PF BAR0: %ld\n", 272 PTR_ERR(ctrl_bar)); 273 return NULL; 274 } 275 276 return ctrl_bar; 277 } 278 279 static void nfp_net_pf_free_netdevs(struct nfp_pf *pf) 280 { 281 struct nfp_net *nn; 282 283 while (!list_empty(&pf->ports)) { 284 nn = list_first_entry(&pf->ports, struct nfp_net, port_list); 285 list_del(&nn->port_list); 286 pf->num_netdevs--; 287 288 nfp_net_netdev_free(nn); 289 } 290 } 291 292 static struct nfp_net * 293 nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar, 294 void __iomem *tx_bar, void __iomem *rx_bar, 295 int stride, struct nfp_net_fw_version *fw_ver, 296 struct nfp_eth_table_port *eth_port) 297 { 298 u32 n_tx_rings, n_rx_rings; 299 struct nfp_net *nn; 300 301 n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); 302 n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); 303 304 /* Allocate and initialise the netdev */ 305 nn = nfp_net_netdev_alloc(pf->pdev, n_tx_rings, n_rx_rings); 306 if (IS_ERR(nn)) 307 return nn; 308 309 nn->cpp = pf->cpp; 310 nn->fw_ver = *fw_ver; 311 nn->dp.ctrl_bar = ctrl_bar; 312 nn->tx_bar = tx_bar; 313 nn->rx_bar = rx_bar; 314 nn->dp.is_vf = 0; 315 nn->stride_rx = stride; 316 nn->stride_tx = stride; 317 nn->eth_port = eth_port; 318 319 return nn; 320 } 321 322 static int 323 nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn, 324 unsigned int id) 325 { 326 int err; 327 328 /* Get MAC address */ 329 nfp_net_get_mac_addr(nn, pf->cpp, id); 330 331 /* Get ME clock frequency from ctrl BAR 332 * XXX for now frequency is hardcoded until we figure out how 333 * to get the value from nfp-hwinfo into ctrl bar 334 */ 335 nn->me_freq_mhz = 1200; 336 337 err = nfp_net_netdev_init(nn->dp.netdev); 338 if (err) 339 return err; 340 341 nfp_net_debugfs_port_add(nn, pf->ddir, id); 342 343 nfp_net_info(nn); 344 345 return 0; 346 } 347 348 static int 349 nfp_net_pf_alloc_netdevs(struct nfp_pf *pf, void __iomem *ctrl_bar, 350 void __iomem *tx_bar, void __iomem *rx_bar, 351 int stride, struct nfp_net_fw_version *fw_ver) 352 { 353 u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base; 354 struct nfp_eth_table_port *eth_port; 355 struct nfp_net *nn; 356 unsigned int i; 357 int err; 358 359 prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); 360 prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); 361 362 for (i = 0; i < pf->num_ports; i++) { 363 tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); 364 tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); 365 tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ; 366 rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ; 367 prev_tx_base = tgt_tx_base; 368 prev_rx_base = tgt_rx_base; 369 370 eth_port = nfp_net_find_port(pf->eth_tbl, i); 371 if (eth_port && eth_port->override_changed) { 372 nfp_warn(pf->cpp, "Config changed for port #%d, reboot required before port will be operational\n", i); 373 } else { 374 nn = nfp_net_pf_alloc_port_netdev(pf, ctrl_bar, tx_bar, 375 rx_bar, stride, 376 fw_ver, eth_port); 377 if (IS_ERR(nn)) { 378 err = PTR_ERR(nn); 379 goto err_free_prev; 380 } 381 list_add_tail(&nn->port_list, &pf->ports); 382 pf->num_netdevs++; 383 } 384 385 ctrl_bar += NFP_PF_CSR_SLICE_SIZE; 386 } 387 388 if (list_empty(&pf->ports)) 389 return -ENODEV; 390 391 return 0; 392 393 err_free_prev: 394 nfp_net_pf_free_netdevs(pf); 395 return err; 396 } 397 398 static int 399 nfp_net_pf_spawn_netdevs(struct nfp_pf *pf, 400 void __iomem *ctrl_bar, void __iomem *tx_bar, 401 void __iomem *rx_bar, int stride, 402 struct nfp_net_fw_version *fw_ver) 403 { 404 unsigned int id, wanted_irqs, num_irqs, ports_left, irqs_left; 405 struct nfp_net *nn; 406 int err; 407 408 /* Allocate the netdevs and do basic init */ 409 err = nfp_net_pf_alloc_netdevs(pf, ctrl_bar, tx_bar, rx_bar, 410 stride, fw_ver); 411 if (err) 412 return err; 413 414 /* Get MSI-X vectors */ 415 wanted_irqs = 0; 416 list_for_each_entry(nn, &pf->ports, port_list) 417 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; 418 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), 419 GFP_KERNEL); 420 if (!pf->irq_entries) { 421 err = -ENOMEM; 422 goto err_nn_free; 423 } 424 425 num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, 426 NFP_NET_MIN_PORT_IRQS * pf->num_netdevs, 427 wanted_irqs); 428 if (!num_irqs) { 429 nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n"); 430 err = -ENOMEM; 431 goto err_vec_free; 432 } 433 434 /* Distribute IRQs to ports */ 435 irqs_left = num_irqs; 436 ports_left = pf->num_netdevs; 437 list_for_each_entry(nn, &pf->ports, port_list) { 438 unsigned int n; 439 440 n = DIV_ROUND_UP(irqs_left, ports_left); 441 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], 442 n); 443 irqs_left -= n; 444 ports_left--; 445 } 446 447 /* Finish netdev init and register */ 448 id = 0; 449 list_for_each_entry(nn, &pf->ports, port_list) { 450 err = nfp_net_pf_init_port_netdev(pf, nn, id); 451 if (err) 452 goto err_prev_deinit; 453 454 id++; 455 } 456 457 return 0; 458 459 err_prev_deinit: 460 list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) { 461 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 462 nfp_net_netdev_clean(nn->dp.netdev); 463 } 464 nfp_net_irqs_disable(pf->pdev); 465 err_vec_free: 466 kfree(pf->irq_entries); 467 err_nn_free: 468 nfp_net_pf_free_netdevs(pf); 469 return err; 470 } 471 472 static void nfp_net_pci_remove_finish(struct nfp_pf *pf) 473 { 474 nfp_net_debugfs_dir_clean(&pf->ddir); 475 476 nfp_net_irqs_disable(pf->pdev); 477 kfree(pf->irq_entries); 478 479 nfp_cpp_area_release_free(pf->rx_area); 480 nfp_cpp_area_release_free(pf->tx_area); 481 nfp_cpp_area_release_free(pf->ctrl_area); 482 } 483 484 static void nfp_net_refresh_netdevs(struct work_struct *work) 485 { 486 struct nfp_pf *pf = container_of(work, struct nfp_pf, 487 port_refresh_work); 488 struct nfp_eth_table *eth_table; 489 struct nfp_net *nn, *next; 490 491 mutex_lock(&pf->port_lock); 492 493 /* Check for nfp_net_pci_remove() racing against us */ 494 if (list_empty(&pf->ports)) 495 goto out; 496 497 list_for_each_entry(nn, &pf->ports, port_list) 498 nfp_net_link_changed_read_clear(nn); 499 500 eth_table = nfp_eth_read_ports(pf->cpp); 501 if (!eth_table) { 502 nfp_err(pf->cpp, "Error refreshing port config!\n"); 503 goto out; 504 } 505 506 rtnl_lock(); 507 list_for_each_entry(nn, &pf->ports, port_list) { 508 if (!nn->eth_port) 509 continue; 510 nn->eth_port = nfp_net_find_port(eth_table, 511 nn->eth_port->eth_index); 512 } 513 rtnl_unlock(); 514 515 kfree(pf->eth_tbl); 516 pf->eth_tbl = eth_table; 517 518 list_for_each_entry_safe(nn, next, &pf->ports, port_list) { 519 if (!nn->eth_port) { 520 nfp_warn(pf->cpp, "Warning: port not present after reconfig\n"); 521 continue; 522 } 523 if (!nn->eth_port->override_changed) 524 continue; 525 526 nn_warn(nn, "Port config changed, unregistering. Reboot required before port will be operational again.\n"); 527 528 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 529 nfp_net_netdev_clean(nn->dp.netdev); 530 531 list_del(&nn->port_list); 532 pf->num_netdevs--; 533 nfp_net_netdev_free(nn); 534 } 535 536 if (list_empty(&pf->ports)) 537 nfp_net_pci_remove_finish(pf); 538 out: 539 mutex_unlock(&pf->port_lock); 540 } 541 542 void nfp_net_refresh_port_table(struct nfp_net *nn) 543 { 544 struct nfp_pf *pf = pci_get_drvdata(nn->pdev); 545 546 schedule_work(&pf->port_refresh_work); 547 } 548 549 int nfp_net_refresh_eth_port(struct nfp_net *nn) 550 { 551 struct nfp_eth_table_port *eth_port; 552 struct nfp_eth_table *eth_table; 553 554 eth_table = nfp_eth_read_ports(nn->cpp); 555 if (!eth_table) { 556 nn_err(nn, "Error refreshing port state table!\n"); 557 return -EIO; 558 } 559 560 eth_port = nfp_net_find_port(eth_table, nn->eth_port->eth_index); 561 if (!eth_port) { 562 nn_err(nn, "Error finding state of the port!\n"); 563 kfree(eth_table); 564 return -EIO; 565 } 566 567 memcpy(nn->eth_port, eth_port, sizeof(*eth_port)); 568 569 kfree(eth_table); 570 571 return 0; 572 } 573 574 /* 575 * PCI device functions 576 */ 577 int nfp_net_pci_probe(struct nfp_pf *pf) 578 { 579 u8 __iomem *ctrl_bar, *tx_bar, *rx_bar; 580 u32 total_tx_qcs, total_rx_qcs; 581 struct nfp_net_fw_version fw_ver; 582 u32 tx_area_sz, rx_area_sz; 583 u32 start_q; 584 int stride; 585 int err; 586 587 INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_netdevs); 588 mutex_init(&pf->port_lock); 589 590 /* Verify that the board has completed initialization */ 591 if (!nfp_is_ready(pf->cpp)) { 592 nfp_err(pf->cpp, "NFP is not ready for NIC operation.\n"); 593 return -EINVAL; 594 } 595 596 mutex_lock(&pf->port_lock); 597 pf->num_ports = nfp_net_pf_get_num_ports(pf); 598 599 ctrl_bar = nfp_net_pf_map_ctrl_bar(pf); 600 if (!ctrl_bar) { 601 err = pf->fw_loaded ? -EINVAL : -EPROBE_DEFER; 602 goto err_unlock; 603 } 604 605 nfp_net_get_fw_version(&fw_ver, ctrl_bar); 606 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { 607 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", 608 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); 609 err = -EINVAL; 610 goto err_ctrl_unmap; 611 } 612 613 /* Determine stride */ 614 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { 615 stride = 2; 616 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); 617 } else { 618 switch (fw_ver.major) { 619 case 1 ... 4: 620 stride = 4; 621 break; 622 default: 623 nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", 624 fw_ver.resv, fw_ver.class, 625 fw_ver.major, fw_ver.minor); 626 err = -EINVAL; 627 goto err_ctrl_unmap; 628 } 629 } 630 631 /* Find how many QC structs need to be mapped */ 632 total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, 633 NFP_NET_CFG_START_TXQ, 634 NFP_NET_CFG_MAX_TXRINGS); 635 total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride, 636 NFP_NET_CFG_START_RXQ, 637 NFP_NET_CFG_MAX_RXRINGS); 638 if (!total_tx_qcs || !total_rx_qcs) { 639 nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n", 640 total_tx_qcs, total_rx_qcs); 641 err = -EINVAL; 642 goto err_ctrl_unmap; 643 } 644 645 tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs; 646 rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs; 647 648 /* Map TX queues */ 649 start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); 650 tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0, 651 NFP_PCIE_QUEUE(start_q), 652 tx_area_sz, &pf->tx_area); 653 if (IS_ERR(tx_bar)) { 654 nfp_err(pf->cpp, "Failed to map TX area.\n"); 655 err = PTR_ERR(tx_bar); 656 goto err_ctrl_unmap; 657 } 658 659 /* Map RX queues */ 660 start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); 661 rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0, 662 NFP_PCIE_QUEUE(start_q), 663 rx_area_sz, &pf->rx_area); 664 if (IS_ERR(rx_bar)) { 665 nfp_err(pf->cpp, "Failed to map RX area.\n"); 666 err = PTR_ERR(rx_bar); 667 goto err_unmap_tx; 668 } 669 670 pf->ddir = nfp_net_debugfs_device_add(pf->pdev); 671 672 err = nfp_net_pf_spawn_netdevs(pf, ctrl_bar, tx_bar, rx_bar, 673 stride, &fw_ver); 674 if (err) 675 goto err_clean_ddir; 676 677 mutex_unlock(&pf->port_lock); 678 679 return 0; 680 681 err_clean_ddir: 682 nfp_net_debugfs_dir_clean(&pf->ddir); 683 nfp_cpp_area_release_free(pf->rx_area); 684 err_unmap_tx: 685 nfp_cpp_area_release_free(pf->tx_area); 686 err_ctrl_unmap: 687 nfp_cpp_area_release_free(pf->ctrl_area); 688 err_unlock: 689 mutex_unlock(&pf->port_lock); 690 return err; 691 } 692 693 void nfp_net_pci_remove(struct nfp_pf *pf) 694 { 695 struct nfp_net *nn; 696 697 mutex_lock(&pf->port_lock); 698 if (list_empty(&pf->ports)) 699 goto out; 700 701 list_for_each_entry(nn, &pf->ports, port_list) { 702 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 703 704 nfp_net_netdev_clean(nn->dp.netdev); 705 } 706 707 nfp_net_pf_free_netdevs(pf); 708 709 nfp_net_pci_remove_finish(pf); 710 out: 711 mutex_unlock(&pf->port_lock); 712 713 cancel_work_sync(&pf->port_refresh_work); 714 } 715