Lines Matching full:dp

407 	struct nfp_net_dp *dp = &nn->dp;  in nfp_net_irqs_assign()  local
410 dp->num_r_vecs = nn->max_r_vecs; in nfp_net_irqs_assign()
414 if (dp->num_rx_rings > dp->num_r_vecs || in nfp_net_irqs_assign()
415 dp->num_tx_rings > dp->num_r_vecs) in nfp_net_irqs_assign()
416 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n", in nfp_net_irqs_assign()
417 dp->num_rx_rings, dp->num_tx_rings, in nfp_net_irqs_assign()
418 dp->num_r_vecs); in nfp_net_irqs_assign()
420 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings); in nfp_net_irqs_assign()
421 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings); in nfp_net_irqs_assign()
422 dp->num_stack_tx_rings = dp->num_tx_rings; in nfp_net_irqs_assign()
497 netif_carrier_on(nn->dp.netdev); in nfp_net_read_link_status()
498 netdev_info(nn->dp.netdev, "NIC Link is Up\n"); in nfp_net_read_link_status()
500 netif_carrier_off(nn->dp.netdev); in nfp_net_read_link_status()
501 netdev_info(nn->dp.netdev, "NIC Link is Down\n"); in nfp_net_read_link_status()
592 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, in nfp_net_tls_tx() argument
601 if (likely(!dp->ktls_tx)) in nfp_net_tls_tx()
631 nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n"); in nfp_net_tls_tx()
694 nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp) in nfp_net_calc_fl_bufsz_data() argument
698 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) in nfp_net_calc_fl_bufsz_data()
701 fl_bufsz += dp->rx_offset; in nfp_net_calc_fl_bufsz_data()
702 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu; in nfp_net_calc_fl_bufsz_data()
707 static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp) in nfp_net_calc_fl_bufsz() argument
712 fl_bufsz += dp->rx_dma_off; in nfp_net_calc_fl_bufsz()
713 fl_bufsz += nfp_net_calc_fl_bufsz_data(dp); in nfp_net_calc_fl_bufsz()
721 static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp) in nfp_net_calc_fl_bufsz_xsk() argument
726 fl_bufsz += nfp_net_calc_fl_bufsz_data(dp); in nfp_net_calc_fl_bufsz_xsk()
757 if (nn->dp.netdev) { in nfp_net_vecs_init()
764 tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll); in nfp_net_vecs_init()
773 nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx) in nfp_net_napi_add() argument
775 if (dp->netdev) in nfp_net_napi_add()
776 netif_napi_add(dp->netdev, &r_vec->napi, in nfp_net_napi_add()
777 nfp_net_has_xsk_pool_slow(dp, idx) ? dp->ops->xsk_poll : dp->ops->poll); in nfp_net_napi_add()
783 nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec) in nfp_net_napi_del() argument
785 if (dp->netdev) in nfp_net_napi_del()
792 nfp_net_vector_assign_rings(struct nfp_net_dp *dp, in nfp_net_vector_assign_rings() argument
795 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL; in nfp_net_vector_assign_rings()
797 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL; in nfp_net_vector_assign_rings()
799 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ? in nfp_net_vector_assign_rings()
800 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL; in nfp_net_vector_assign_rings()
802 if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) { in nfp_net_vector_assign_rings()
803 r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL; in nfp_net_vector_assign_rings()
809 nfp_net_napi_del(dp, r_vec); in nfp_net_vector_assign_rings()
810 nfp_net_napi_add(dp, r_vec, idx); in nfp_net_vector_assign_rings()
820 nfp_net_napi_add(&nn->dp, r_vec, idx); in nfp_net_prepare_vector()
827 nfp_net_napi_del(&nn->dp, r_vec); in nfp_net_prepare_vector()
844 nfp_net_napi_del(&nn->dp, r_vec); in nfp_net_cleanup_vector()
893 for (i = 0; i < nn->dp.num_rx_rings; i++) in nfp_net_coalesce_write_cfg()
899 for (i = 0; i < nn->dp.num_tx_rings; i++) in nfp_net_coalesce_write_cfg()
930 new_ctrl = nn->dp.ctrl; in nfp_net_clear_config_and_disable()
950 new_ctrl_w1 = nn->dp.ctrl_w1; in nfp_net_clear_config_and_disable()
959 nn->dp.ctrl_w1 = new_ctrl_w1; in nfp_net_clear_config_and_disable()
962 for (r = 0; r < nn->dp.num_rx_rings; r++) { in nfp_net_clear_config_and_disable()
963 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]); in nfp_net_clear_config_and_disable()
964 if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx)) in nfp_net_clear_config_and_disable()
965 nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]); in nfp_net_clear_config_and_disable()
967 for (r = 0; r < nn->dp.num_tx_rings; r++) in nfp_net_clear_config_and_disable()
968 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]); in nfp_net_clear_config_and_disable()
969 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_net_clear_config_and_disable()
972 nn->dp.ctrl = new_ctrl; in nfp_net_clear_config_and_disable()
985 new_ctrl = nn->dp.ctrl; in nfp_net_set_config_and_enable()
986 new_ctrl_w1 = nn->dp.ctrl_w1; in nfp_net_set_config_and_enable()
988 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) { in nfp_net_set_config_and_enable()
995 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) { in nfp_net_set_config_and_enable()
1000 for (r = 0; r < nn->dp.num_tx_rings; r++) in nfp_net_set_config_and_enable()
1001 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r); in nfp_net_set_config_and_enable()
1002 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_set_config_and_enable()
1003 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r); in nfp_net_set_config_and_enable()
1006 U64_MAX >> (64 - nn->dp.num_tx_rings)); in nfp_net_set_config_and_enable()
1009 U64_MAX >> (64 - nn->dp.num_rx_rings)); in nfp_net_set_config_and_enable()
1011 if (nn->dp.netdev) in nfp_net_set_config_and_enable()
1012 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); in nfp_net_set_config_and_enable()
1014 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu); in nfp_net_set_config_and_enable()
1016 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA; in nfp_net_set_config_and_enable()
1044 nn->dp.ctrl = new_ctrl; in nfp_net_set_config_and_enable()
1045 nn->dp.ctrl_w1 = new_ctrl_w1; in nfp_net_set_config_and_enable()
1047 for (r = 0; r < nn->dp.num_rx_rings; r++) in nfp_net_set_config_and_enable()
1048 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]); in nfp_net_set_config_and_enable()
1061 nn->dp.ctrl = new_ctrl; in nfp_net_set_config_and_enable()
1077 netif_carrier_off(nn->dp.netdev); in nfp_net_close_stack()
1080 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_close_stack()
1093 netif_tx_disable(nn->dp.netdev); in nfp_net_close_stack()
1104 nfp_net_tx_rings_free(&nn->dp); in nfp_net_close_free_all()
1105 nfp_net_rx_rings_free(&nn->dp); in nfp_net_close_free_all()
1107 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_net_close_free_all()
1148 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_ctrl_close()
1229 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_open_stack()
1246 netif_tx_wake_all_queues(nn->dp.netdev); in nfp_net_open_stack()
1268 for (r = 0; r < nn->dp.num_r_vecs; r++) { in nfp_net_open_alloc_all()
1274 err = nfp_net_rx_rings_prepare(nn, &nn->dp); in nfp_net_open_alloc_all()
1278 err = nfp_net_tx_rings_prepare(nn, &nn->dp); in nfp_net_open_alloc_all()
1283 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); in nfp_net_open_alloc_all()
1288 nfp_net_rx_rings_free(&nn->dp); in nfp_net_open_alloc_all()
1290 r = nn->dp.num_r_vecs; in nfp_net_open_alloc_all()
1314 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings); in nfp_net_netdev_open()
1318 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings); in nfp_net_netdev_open()
1370 for (r = 0; r < nn->dp.num_r_vecs; r++) in nfp_ctrl_open()
1473 new_ctrl = nn->dp.ctrl; in nfp_net_set_rx_mode()
1474 new_ctrl_w1 = nn->dp.ctrl_w1; in nfp_net_set_rx_mode()
1499 if (new_ctrl == nn->dp.ctrl && new_ctrl_w1 == nn->dp.ctrl_w1) in nfp_net_set_rx_mode()
1502 if (new_ctrl != nn->dp.ctrl) in nfp_net_set_rx_mode()
1504 if (new_ctrl_w1 != nn->dp.ctrl_w1) in nfp_net_set_rx_mode()
1508 nn->dp.ctrl = new_ctrl; in nfp_net_set_rx_mode()
1509 nn->dp.ctrl_w1 = new_ctrl_w1; in nfp_net_set_rx_mode()
1518 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings); in nfp_net_rss_init_itbl()
1521 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_dp_swap() argument
1523 struct nfp_net_dp new_dp = *dp; in nfp_net_dp_swap()
1525 *dp = nn->dp; in nfp_net_dp_swap()
1526 nn->dp = new_dp; in nfp_net_dp_swap()
1528 WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu); in nfp_net_dp_swap()
1530 if (!netif_is_rxfh_configured(nn->dp.netdev)) in nfp_net_dp_swap()
1534 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) in nfp_net_dp_swap_enable() argument
1539 nfp_net_dp_swap(nn, dp); in nfp_net_dp_swap_enable()
1542 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); in nfp_net_dp_swap_enable()
1544 err = netif_set_real_num_queues(nn->dp.netdev, in nfp_net_dp_swap_enable()
1545 nn->dp.num_stack_tx_rings, in nfp_net_dp_swap_enable()
1546 nn->dp.num_rx_rings); in nfp_net_dp_swap_enable()
1561 *new = nn->dp; in nfp_net_clone_dp()
1584 static void nfp_net_free_dp(struct nfp_net_dp *dp) in nfp_net_free_dp() argument
1586 kfree(dp->xsk_pools); in nfp_net_free_dp()
1587 kfree(dp); in nfp_net_free_dp()
1591 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_net_check_config() argument
1597 if (!dp->xdp_prog) in nfp_net_check_config()
1599 if (dp->fl_bufsz > PAGE_SIZE) { in nfp_net_check_config()
1603 if (dp->num_tx_rings > nn->max_tx_rings) { in nfp_net_check_config()
1608 xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp); in nfp_net_check_config()
1610 if (!dp->xsk_pools[r]) in nfp_net_check_config()
1613 if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) { in nfp_net_check_config()
1623 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp, in nfp_net_ring_reconfig() argument
1628 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp); in nfp_net_ring_reconfig()
1630 dp->num_stack_tx_rings = dp->num_tx_rings; in nfp_net_ring_reconfig()
1631 if (dp->xdp_prog) in nfp_net_ring_reconfig()
1632 dp->num_stack_tx_rings -= dp->num_rx_rings; in nfp_net_ring_reconfig()
1634 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings); in nfp_net_ring_reconfig()
1636 err = nfp_net_check_config(nn, dp, extack); in nfp_net_ring_reconfig()
1640 if (!netif_running(dp->netdev)) { in nfp_net_ring_reconfig()
1641 nfp_net_dp_swap(nn, dp); in nfp_net_ring_reconfig()
1647 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) { in nfp_net_ring_reconfig()
1650 dp->num_r_vecs = r; in nfp_net_ring_reconfig()
1655 err = nfp_net_rx_rings_prepare(nn, dp); in nfp_net_ring_reconfig()
1659 err = nfp_net_tx_rings_prepare(nn, dp); in nfp_net_ring_reconfig()
1667 err = nfp_net_dp_swap_enable(nn, dp); in nfp_net_ring_reconfig()
1674 err2 = nfp_net_dp_swap_enable(nn, dp); in nfp_net_ring_reconfig()
1679 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) in nfp_net_ring_reconfig()
1682 nfp_net_rx_rings_free(dp); in nfp_net_ring_reconfig()
1683 nfp_net_tx_rings_free(dp); in nfp_net_ring_reconfig()
1687 nfp_net_free_dp(dp); in nfp_net_ring_reconfig()
1692 nfp_net_rx_rings_free(dp); in nfp_net_ring_reconfig()
1694 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--) in nfp_net_ring_reconfig()
1696 nfp_net_free_dp(dp); in nfp_net_ring_reconfig()
1703 struct nfp_net_dp *dp; in nfp_net_change_mtu() local
1710 dp = nfp_net_clone_dp(nn); in nfp_net_change_mtu()
1711 if (!dp) in nfp_net_change_mtu()
1714 dp->mtu = new_mtu; in nfp_net_change_mtu()
1716 return nfp_net_ring_reconfig(nn, dp, NULL); in nfp_net_change_mtu()
1999 new_ctrl = nn->dp.ctrl; in nfp_net_set_features()
2067 if (new_ctrl == nn->dp.ctrl) in nfp_net_set_features()
2070 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl); in nfp_net_set_features()
2076 nn->dp.ctrl = new_ctrl; in nfp_net_set_features()
2168 if (nn->dp.is_vf || nn->vnic_no_name) in nfp_net_get_phys_port_name()
2181 struct nfp_net_dp *dp; in nfp_net_xdp_setup_drv() local
2184 if (!prog == !nn->dp.xdp_prog) { in nfp_net_xdp_setup_drv()
2185 WRITE_ONCE(nn->dp.xdp_prog, prog); in nfp_net_xdp_setup_drv()
2190 dp = nfp_net_clone_dp(nn); in nfp_net_xdp_setup_drv()
2191 if (!dp) in nfp_net_xdp_setup_drv()
2194 dp->xdp_prog = prog; in nfp_net_xdp_setup_drv()
2195 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings; in nfp_net_xdp_setup_drv()
2196 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; in nfp_net_xdp_setup_drv()
2197 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0; in nfp_net_xdp_setup_drv()
2200 err = nfp_net_ring_reconfig(nn, dp, bpf->extack); in nfp_net_xdp_setup_drv()
2268 mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ? in nfp_net_bridge_getlink()
2292 new_ctrl = nn->dp.ctrl; in nfp_net_bridge_setlink()
2301 if (new_ctrl == nn->dp.ctrl) in nfp_net_bridge_setlink()
2307 nn->dp.ctrl = new_ctrl; in nfp_net_bridge_setlink()
2414 nn->dp.is_vf ? "VF " : "", in nfp_net_info()
2415 nn->dp.num_tx_rings, nn->max_tx_rings, in nfp_net_info()
2416 nn->dp.num_rx_rings, nn->max_rx_rings); in nfp_net_info()
2488 nn->dp.netdev = netdev; in nfp_net_alloc()
2495 nn->dp.dev = &pdev->dev; in nfp_net_alloc()
2496 nn->dp.ctrl_bar = ctrl_bar; in nfp_net_alloc()
2503 nn->dp.ops = &nfp_nfd3_ops; in nfp_net_alloc()
2513 nn->dp.ops = &nfp_nfdk_ops; in nfp_net_alloc()
2520 if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) { in nfp_net_alloc()
2523 nn->dp.ops->dma_mask, dma_mask); in nfp_net_alloc()
2531 nn->dp.num_tx_rings = min_t(unsigned int, in nfp_net_alloc()
2533 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings, in nfp_net_alloc()
2536 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings); in nfp_net_alloc()
2537 nn->dp.num_r_vecs = min_t(unsigned int, in nfp_net_alloc()
2538 nn->dp.num_r_vecs, num_online_cpus()); in nfp_net_alloc()
2539 nn->max_r_vecs = nn->dp.num_r_vecs; in nfp_net_alloc()
2541 nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools), in nfp_net_alloc()
2543 if (!nn->dp.xsk_pools) { in nfp_net_alloc()
2548 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT; in nfp_net_alloc()
2549 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT; in nfp_net_alloc()
2558 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, in nfp_net_alloc()
2570 if (nn->dp.netdev) in nfp_net_alloc()
2571 free_netdev(nn->dp.netdev); in nfp_net_alloc()
2586 kfree(nn->dp.xsk_pools); in nfp_net_free()
2587 if (nn->dp.netdev) in nfp_net_free()
2588 free_netdev(nn->dp.netdev); in nfp_net_free()
2632 dev_warn(nn->dp.dev, in nfp_net_rss_init()
2668 struct net_device *netdev = nn->dp.netdev; in nfp_net_netdev_init()
2670 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr); in nfp_net_netdev_init()
2672 netdev->mtu = nn->dp.mtu; in nfp_net_netdev_init()
2686 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY; in nfp_net_netdev_init()
2690 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM; in nfp_net_netdev_init()
2694 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER; in nfp_net_netdev_init()
2701 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?: in nfp_net_netdev_init()
2720 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; in nfp_net_netdev_init()
2725 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; in nfp_net_netdev_init()
2734 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?: in nfp_net_netdev_init()
2742 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?: in nfp_net_netdev_init()
2748 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER; in nfp_net_netdev_init()
2752 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ; in nfp_net_netdev_init()
2764 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ; in nfp_net_netdev_init()
2771 switch (nn->dp.ops->version) { in nfp_net_netdev_init()
2806 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 || in nfp_net_read_caps()
2807 !nn->dp.netdev || in nfp_net_read_caps()
2813 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4) in nfp_net_read_caps()
2825 nn->dp.rx_offset = reg; in nfp_net_read_caps()
2827 nn->dp.rx_offset = NFP_NET_RX_OFFSET; in nfp_net_read_caps()
2831 nn->cap &= nn->dp.ops->cap_mask; in nfp_net_read_caps()
2834 if (!nn->dp.netdev) in nfp_net_read_caps()
2850 nn->dp.rx_dma_dir = DMA_FROM_DEVICE; in nfp_net_init()
2858 nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); in nfp_net_init()
2860 nn->dp.mtu = nn->max_mtu; in nfp_net_init()
2862 nn->dp.mtu = NFP_NET_DEFAULT_MTU; in nfp_net_init()
2864 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp); in nfp_net_init()
2867 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA; in nfp_net_init()
2871 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?: in nfp_net_init()
2877 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC; in nfp_net_init()
2882 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; in nfp_net_init()
2887 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB; in nfp_net_init()
2890 nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER; in nfp_net_init()
2905 if (nn->dp.netdev) { in nfp_net_init()
2921 if (!nn->dp.netdev) in nfp_net_init()
2930 return register_netdev(nn->dp.netdev); in nfp_net_init()
2943 if (!nn->dp.netdev) in nfp_net_clean()
2946 unregister_netdev(nn->dp.netdev); in nfp_net_clean()