1 /* 2 * Copyright (C) 2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/lockdep.h> 36 #include <linux/pci.h> 37 #include <linux/skbuff.h> 38 #include <linux/vmalloc.h> 39 #include <net/devlink.h> 40 #include <net/dst_metadata.h> 41 42 #include "main.h" 43 #include "../nfpcore/nfp_cpp.h" 44 #include "../nfpcore/nfp_nffw.h" 45 #include "../nfpcore/nfp_nsp.h" 46 #include "../nfp_app.h" 47 #include "../nfp_main.h" 48 #include "../nfp_net.h" 49 #include "../nfp_net_repr.h" 50 #include "../nfp_port.h" 51 #include "./cmsg.h" 52 53 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL 54 55 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) 56 { 57 return "FLOWER"; 58 } 59 60 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) 61 { 62 return DEVLINK_ESWITCH_MODE_SWITCHDEV; 63 } 64 65 static enum nfp_repr_type 66 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) 67 { 68 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) { 69 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT: 70 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, 71 port_id); 72 return NFP_REPR_TYPE_PHYS_PORT; 73 74 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: 75 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id); 76 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) == 77 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF) 78 return NFP_REPR_TYPE_PF; 79 else 80 return NFP_REPR_TYPE_VF; 81 } 82 83 return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; 84 } 85 86 static struct net_device * 87 nfp_flower_repr_get(struct nfp_app *app, u32 port_id) 88 { 89 enum nfp_repr_type repr_type; 90 struct nfp_reprs *reprs; 91 u8 port = 0; 92 93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 94 95 reprs = rcu_dereference(app->reprs[repr_type]); 96 if (!reprs) 97 return NULL; 98 99 if (port >= reprs->num_reprs) 100 return NULL; 101 102 return rcu_dereference(reprs->reprs[port]); 103 } 104 105 static int 106 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, 107 bool exists) 108 { 109 struct nfp_reprs *reprs; 110 int i, err, count = 0; 111 112 reprs = rcu_dereference_protected(app->reprs[type], 113 lockdep_is_held(&app->pf->lock)); 114 if (!reprs) 115 return 0; 116 117 for (i = 0; i < reprs->num_reprs; i++) { 118 struct net_device *netdev; 119 120 netdev = nfp_repr_get_locked(app, reprs, i); 121 if (netdev) { 122 struct nfp_repr *repr = netdev_priv(netdev); 123 124 err = nfp_flower_cmsg_portreify(repr, exists); 125 if (err) 126 return err; 127 count++; 128 } 129 } 130 131 return count; 132 } 133 134 static int 135 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) 136 { 137 struct nfp_flower_priv *priv = app->priv; 138 int err; 139 140 if (!tot_repl) 141 return 0; 142 143 lockdep_assert_held(&app->pf->lock); 144 err = wait_event_interruptible_timeout(priv->reify_wait_queue, 145 atomic_read(replies) >= tot_repl, 146 msecs_to_jiffies(10)); 147 if (err <= 0) { 148 nfp_warn(app->cpp, "Not all reprs responded to reify\n"); 149 return -EIO; 150 } 151 152 return 0; 153 } 154 155 static int 156 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) 157 { 158 int err; 159 160 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); 161 if (err) 162 return err; 163 164 netif_tx_wake_all_queues(repr->netdev); 165 166 return 0; 167 } 168 169 static int 170 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) 171 { 172 netif_tx_disable(repr->netdev); 173 174 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); 175 } 176 177 static int 178 nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) 179 { 180 return tc_setup_cb_egdev_register(netdev, 181 nfp_flower_setup_tc_egress_cb, 182 netdev_priv(netdev)); 183 } 184 185 static void 186 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) 187 { 188 struct nfp_repr *repr = netdev_priv(netdev); 189 190 kfree(repr->app_priv); 191 192 tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, 193 netdev_priv(netdev)); 194 } 195 196 static void 197 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) 198 { 199 struct nfp_repr *repr = netdev_priv(netdev); 200 struct nfp_flower_priv *priv = app->priv; 201 atomic_t *replies = &priv->reify_replies; 202 int err; 203 204 atomic_set(replies, 0); 205 err = nfp_flower_cmsg_portreify(repr, false); 206 if (err) { 207 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); 208 return; 209 } 210 211 nfp_flower_wait_repr_reify(app, replies, 1); 212 } 213 214 static void nfp_flower_sriov_disable(struct nfp_app *app) 215 { 216 struct nfp_flower_priv *priv = app->priv; 217 218 if (!priv->nn) 219 return; 220 221 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 222 } 223 224 static int 225 nfp_flower_spawn_vnic_reprs(struct nfp_app *app, 226 enum nfp_flower_cmsg_port_vnic_type vnic_type, 227 enum nfp_repr_type repr_type, unsigned int cnt) 228 { 229 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); 230 struct nfp_flower_priv *priv = app->priv; 231 atomic_t *replies = &priv->reify_replies; 232 struct nfp_flower_repr_priv *repr_priv; 233 enum nfp_port_type port_type; 234 struct nfp_repr *nfp_repr; 235 struct nfp_reprs *reprs; 236 int i, err, reify_cnt; 237 const u8 queue = 0; 238 239 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : 240 NFP_PORT_VF_PORT; 241 242 reprs = nfp_reprs_alloc(cnt); 243 if (!reprs) 244 return -ENOMEM; 245 246 for (i = 0; i < cnt; i++) { 247 struct net_device *repr; 248 struct nfp_port *port; 249 u32 port_id; 250 251 repr = nfp_repr_alloc(app); 252 if (!repr) { 253 err = -ENOMEM; 254 goto err_reprs_clean; 255 } 256 257 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 258 if (!repr_priv) { 259 err = -ENOMEM; 260 goto err_reprs_clean; 261 } 262 263 nfp_repr = netdev_priv(repr); 264 nfp_repr->app_priv = repr_priv; 265 266 /* For now we only support 1 PF */ 267 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); 268 269 port = nfp_port_alloc(app, port_type, repr); 270 if (IS_ERR(port)) { 271 err = PTR_ERR(port); 272 nfp_repr_free(repr); 273 goto err_reprs_clean; 274 } 275 if (repr_type == NFP_REPR_TYPE_PF) { 276 port->pf_id = i; 277 port->vnic = priv->nn->dp.ctrl_bar; 278 } else { 279 port->pf_id = 0; 280 port->vf_id = i; 281 port->vnic = 282 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; 283 } 284 285 eth_hw_addr_random(repr); 286 287 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, 288 i, queue); 289 err = nfp_repr_init(app, repr, 290 port_id, port, priv->nn->dp.netdev); 291 if (err) { 292 nfp_port_free(port); 293 nfp_repr_free(repr); 294 goto err_reprs_clean; 295 } 296 297 RCU_INIT_POINTER(reprs->reprs[i], repr); 298 nfp_info(app->cpp, "%s%d Representor(%s) created\n", 299 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, 300 repr->name); 301 } 302 303 nfp_app_reprs_set(app, repr_type, reprs); 304 305 atomic_set(replies, 0); 306 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); 307 if (reify_cnt < 0) { 308 err = reify_cnt; 309 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 310 goto err_reprs_remove; 311 } 312 313 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 314 if (err) 315 goto err_reprs_remove; 316 317 return 0; 318 err_reprs_remove: 319 reprs = nfp_app_reprs_set(app, repr_type, NULL); 320 err_reprs_clean: 321 nfp_reprs_clean_and_free(app, reprs); 322 return err; 323 } 324 325 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) 326 { 327 struct nfp_flower_priv *priv = app->priv; 328 329 if (!priv->nn) 330 return 0; 331 332 return nfp_flower_spawn_vnic_reprs(app, 333 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 334 NFP_REPR_TYPE_VF, num_vfs); 335 } 336 337 static int 338 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) 339 { 340 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; 341 atomic_t *replies = &priv->reify_replies; 342 struct nfp_flower_repr_priv *repr_priv; 343 struct nfp_repr *nfp_repr; 344 struct sk_buff *ctrl_skb; 345 struct nfp_reprs *reprs; 346 int err, reify_cnt; 347 unsigned int i; 348 349 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); 350 if (!ctrl_skb) 351 return -ENOMEM; 352 353 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); 354 if (!reprs) { 355 err = -ENOMEM; 356 goto err_free_ctrl_skb; 357 } 358 359 for (i = 0; i < eth_tbl->count; i++) { 360 unsigned int phys_port = eth_tbl->ports[i].index; 361 struct net_device *repr; 362 struct nfp_port *port; 363 u32 cmsg_port_id; 364 365 repr = nfp_repr_alloc(app); 366 if (!repr) { 367 err = -ENOMEM; 368 goto err_reprs_clean; 369 } 370 371 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 372 if (!repr_priv) { 373 err = -ENOMEM; 374 goto err_reprs_clean; 375 } 376 377 nfp_repr = netdev_priv(repr); 378 nfp_repr->app_priv = repr_priv; 379 380 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); 381 if (IS_ERR(port)) { 382 err = PTR_ERR(port); 383 nfp_repr_free(repr); 384 goto err_reprs_clean; 385 } 386 err = nfp_port_init_phy_port(app->pf, app, port, i); 387 if (err) { 388 nfp_port_free(port); 389 nfp_repr_free(repr); 390 goto err_reprs_clean; 391 } 392 393 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); 394 nfp_net_get_mac_addr(app->pf, repr, port); 395 396 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); 397 err = nfp_repr_init(app, repr, 398 cmsg_port_id, port, priv->nn->dp.netdev); 399 if (err) { 400 nfp_port_free(port); 401 nfp_repr_free(repr); 402 goto err_reprs_clean; 403 } 404 405 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, 406 eth_tbl->ports[i].nbi, 407 eth_tbl->ports[i].base, 408 phys_port); 409 410 RCU_INIT_POINTER(reprs->reprs[phys_port], repr); 411 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", 412 phys_port, repr->name); 413 } 414 415 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); 416 417 /* The REIFY/MAC_REPR control messages should be sent after the MAC 418 * representors are registered using nfp_app_reprs_set(). This is 419 * because the firmware may respond with control messages for the 420 * MAC representors, f.e. to provide the driver with information 421 * about their state, and without registration the driver will drop 422 * any such messages. 423 */ 424 atomic_set(replies, 0); 425 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); 426 if (reify_cnt < 0) { 427 err = reify_cnt; 428 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 429 goto err_reprs_remove; 430 } 431 432 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 433 if (err) 434 goto err_reprs_remove; 435 436 nfp_ctrl_tx(app->ctrl, ctrl_skb); 437 438 return 0; 439 err_reprs_remove: 440 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); 441 err_reprs_clean: 442 nfp_reprs_clean_and_free(app, reprs); 443 err_free_ctrl_skb: 444 kfree_skb(ctrl_skb); 445 return err; 446 } 447 448 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, 449 unsigned int id) 450 { 451 if (id > 0) { 452 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); 453 goto err_invalid_port; 454 } 455 456 eth_hw_addr_random(nn->dp.netdev); 457 netif_keep_dst(nn->dp.netdev); 458 nn->vnic_no_name = true; 459 460 return 0; 461 462 err_invalid_port: 463 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); 464 return PTR_ERR_OR_ZERO(nn->port); 465 } 466 467 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) 468 { 469 struct nfp_flower_priv *priv = app->priv; 470 471 if (app->pf->num_vfs) 472 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 473 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 474 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 475 476 priv->nn = NULL; 477 } 478 479 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) 480 { 481 struct nfp_flower_priv *priv = app->priv; 482 int err; 483 484 priv->nn = nn; 485 486 err = nfp_flower_spawn_phy_reprs(app, app->priv); 487 if (err) 488 goto err_clear_nn; 489 490 err = nfp_flower_spawn_vnic_reprs(app, 491 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, 492 NFP_REPR_TYPE_PF, 1); 493 if (err) 494 goto err_destroy_reprs_phy; 495 496 if (app->pf->num_vfs) { 497 err = nfp_flower_spawn_vnic_reprs(app, 498 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 499 NFP_REPR_TYPE_VF, 500 app->pf->num_vfs); 501 if (err) 502 goto err_destroy_reprs_pf; 503 } 504 505 return 0; 506 507 err_destroy_reprs_pf: 508 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 509 err_destroy_reprs_phy: 510 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 511 err_clear_nn: 512 priv->nn = NULL; 513 return err; 514 } 515 516 static int nfp_flower_init(struct nfp_app *app) 517 { 518 const struct nfp_pf *pf = app->pf; 519 struct nfp_flower_priv *app_priv; 520 u64 version, features; 521 int err; 522 523 if (!pf->eth_tbl) { 524 nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); 525 return -EINVAL; 526 } 527 528 if (!pf->mac_stats_bar) { 529 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); 530 return -EINVAL; 531 } 532 533 if (!pf->vf_cfg_bar) { 534 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); 535 return -EINVAL; 536 } 537 538 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); 539 if (err) { 540 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); 541 return err; 542 } 543 544 /* We need to ensure hardware has enough flower capabilities. */ 545 if (version != NFP_FLOWER_ALLOWED_VER) { 546 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); 547 return -EINVAL; 548 } 549 550 app_priv = vzalloc(sizeof(struct nfp_flower_priv)); 551 if (!app_priv) 552 return -ENOMEM; 553 554 app->priv = app_priv; 555 app_priv->app = app; 556 skb_queue_head_init(&app_priv->cmsg_skbs_high); 557 skb_queue_head_init(&app_priv->cmsg_skbs_low); 558 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 559 init_waitqueue_head(&app_priv->reify_wait_queue); 560 561 init_waitqueue_head(&app_priv->mtu_conf.wait_q); 562 spin_lock_init(&app_priv->mtu_conf.lock); 563 564 err = nfp_flower_metadata_init(app); 565 if (err) 566 goto err_free_app_priv; 567 568 /* Extract the extra features supported by the firmware. */ 569 features = nfp_rtsym_read_le(app->pf->rtbl, 570 "_abi_flower_extra_features", &err); 571 if (err) 572 app_priv->flower_ext_feats = 0; 573 else 574 app_priv->flower_ext_feats = features; 575 576 /* Tell the firmware that the driver supports lag. */ 577 err = nfp_rtsym_write_le(app->pf->rtbl, 578 "_abi_flower_balance_sync_enable", 1); 579 if (!err) { 580 app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; 581 nfp_flower_lag_init(&app_priv->nfp_lag); 582 } else if (err == -ENOENT) { 583 nfp_warn(app->cpp, "LAG not supported by FW.\n"); 584 } else { 585 goto err_cleanup_metadata; 586 } 587 588 return 0; 589 590 err_cleanup_metadata: 591 nfp_flower_metadata_cleanup(app); 592 err_free_app_priv: 593 vfree(app->priv); 594 return err; 595 } 596 597 static void nfp_flower_clean(struct nfp_app *app) 598 { 599 struct nfp_flower_priv *app_priv = app->priv; 600 601 skb_queue_purge(&app_priv->cmsg_skbs_high); 602 skb_queue_purge(&app_priv->cmsg_skbs_low); 603 flush_work(&app_priv->cmsg_work); 604 605 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 606 nfp_flower_lag_cleanup(&app_priv->nfp_lag); 607 608 nfp_flower_metadata_cleanup(app); 609 vfree(app->priv); 610 app->priv = NULL; 611 } 612 613 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) 614 { 615 bool ret; 616 617 spin_lock_bh(&app_priv->mtu_conf.lock); 618 ret = app_priv->mtu_conf.ack; 619 spin_unlock_bh(&app_priv->mtu_conf.lock); 620 621 return ret; 622 } 623 624 static int 625 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, 626 int new_mtu) 627 { 628 struct nfp_flower_priv *app_priv = app->priv; 629 struct nfp_repr *repr = netdev_priv(netdev); 630 int err, ack; 631 632 /* Only need to config FW for physical port MTU change. */ 633 if (repr->port->type != NFP_PORT_PHYS_PORT) 634 return 0; 635 636 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { 637 nfp_err(app->cpp, "Physical port MTU setting not supported\n"); 638 return -EINVAL; 639 } 640 641 spin_lock_bh(&app_priv->mtu_conf.lock); 642 app_priv->mtu_conf.ack = false; 643 app_priv->mtu_conf.requested_val = new_mtu; 644 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; 645 spin_unlock_bh(&app_priv->mtu_conf.lock); 646 647 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, 648 true); 649 if (err) { 650 spin_lock_bh(&app_priv->mtu_conf.lock); 651 app_priv->mtu_conf.requested_val = 0; 652 spin_unlock_bh(&app_priv->mtu_conf.lock); 653 return err; 654 } 655 656 /* Wait for fw to ack the change. */ 657 ack = wait_event_timeout(app_priv->mtu_conf.wait_q, 658 nfp_flower_check_ack(app_priv), 659 msecs_to_jiffies(10)); 660 661 if (!ack) { 662 spin_lock_bh(&app_priv->mtu_conf.lock); 663 app_priv->mtu_conf.requested_val = 0; 664 spin_unlock_bh(&app_priv->mtu_conf.lock); 665 nfp_warn(app->cpp, "MTU change not verified with fw\n"); 666 return -EIO; 667 } 668 669 return 0; 670 } 671 672 static int nfp_flower_start(struct nfp_app *app) 673 { 674 struct nfp_flower_priv *app_priv = app->priv; 675 int err; 676 677 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 678 err = nfp_flower_lag_reset(&app_priv->nfp_lag); 679 if (err) 680 return err; 681 682 err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb); 683 if (err) 684 return err; 685 } 686 687 return nfp_tunnel_config_start(app); 688 } 689 690 static void nfp_flower_stop(struct nfp_app *app) 691 { 692 struct nfp_flower_priv *app_priv = app->priv; 693 694 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 695 unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb); 696 697 nfp_tunnel_config_stop(app); 698 } 699 700 const struct nfp_app_type app_flower = { 701 .id = NFP_APP_FLOWER_NIC, 702 .name = "flower", 703 704 .ctrl_cap_mask = ~0U, 705 .ctrl_has_meta = true, 706 707 .extra_cap = nfp_flower_extra_cap, 708 709 .init = nfp_flower_init, 710 .clean = nfp_flower_clean, 711 712 .repr_change_mtu = nfp_flower_repr_change_mtu, 713 714 .vnic_alloc = nfp_flower_vnic_alloc, 715 .vnic_init = nfp_flower_vnic_init, 716 .vnic_clean = nfp_flower_vnic_clean, 717 718 .repr_init = nfp_flower_repr_netdev_init, 719 .repr_preclean = nfp_flower_repr_netdev_preclean, 720 .repr_clean = nfp_flower_repr_netdev_clean, 721 722 .repr_open = nfp_flower_repr_netdev_open, 723 .repr_stop = nfp_flower_repr_netdev_stop, 724 725 .start = nfp_flower_start, 726 .stop = nfp_flower_stop, 727 728 .ctrl_msg_rx = nfp_flower_cmsg_rx, 729 730 .sriov_enable = nfp_flower_sriov_enable, 731 .sriov_disable = nfp_flower_sriov_disable, 732 733 .eswitch_mode_get = eswitch_mode_get, 734 .repr_get = nfp_flower_repr_get, 735 736 .setup_tc = nfp_flower_setup_tc, 737 }; 738