Lines Matching refs:nn

220 	struct nfp_net *nn = netdev_priv(netdev);  in nfp_net_get_drvinfo()  local
223 nn->fw_ver.extend, nn->fw_ver.class, in nfp_net_get_drvinfo()
224 nn->fw_ver.major, nn->fw_ver.minor); in nfp_net_get_drvinfo()
225 strscpy(drvinfo->bus_info, pci_name(nn->pdev), in nfp_net_get_drvinfo()
228 nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo); in nfp_net_get_drvinfo()
489 struct nfp_net *nn; in nfp_net_get_link_ksettings() local
530 nn = netdev_priv(netdev); in nfp_net_get_link_ksettings()
532 sts = nn_readw(nn, NFP_NET_CFG_STS); in nfp_net_get_link_ksettings()
627 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_ringparam() local
628 u32 qc_max = nn->dev_info->max_qc_size; in nfp_net_get_ringparam()
631 ring->tx_max_pending = qc_max / nn->dp.ops->tx_min_desc_per_pkt; in nfp_net_get_ringparam()
632 ring->rx_pending = nn->dp.rxd_cnt; in nfp_net_get_ringparam()
633 ring->tx_pending = nn->dp.txd_cnt; in nfp_net_get_ringparam()
636 static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt, in nfp_net_set_ring_size() argument
641 dp = nfp_net_clone_dp(nn); in nfp_net_set_ring_size()
648 return nfp_net_ring_reconfig(nn, dp, extack); in nfp_net_set_ring_size()
657 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_set_ringparam() local
663 qc_min = nn->dev_info->min_qc_size; in nfp_net_set_ringparam()
664 qc_max = nn->dev_info->max_qc_size; in nfp_net_set_ringparam()
665 tx_dpp = nn->dp.ops->tx_min_desc_per_pkt; in nfp_net_set_ringparam()
680 if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt) in nfp_net_set_ringparam()
683 nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", in nfp_net_set_ringparam()
684 nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt); in nfp_net_set_ringparam()
686 return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt, extack); in nfp_net_set_ringparam()
735 struct nfp_net *nn = netdev_priv(netdev); in nfp_test_fw() local
738 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); in nfp_test_fw()
845 struct nfp_net *nn = netdev_priv(netdev); in nfp_vnic_get_sw_stats_count() local
847 return NN_RVEC_GATHER_STATS + nn->max_r_vecs * NN_RVEC_PER_Q_STATS + in nfp_vnic_get_sw_stats_count()
853 struct nfp_net *nn = netdev_priv(netdev); in nfp_vnic_get_sw_stats_strings() local
856 for (i = 0; i < nn->max_r_vecs; i++) { in nfp_vnic_get_sw_stats_strings()
887 struct nfp_net *nn = netdev_priv(netdev); in nfp_vnic_get_sw_stats() local
891 for (i = 0; i < nn->max_r_vecs; i++) { in nfp_vnic_get_sw_stats()
895 start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); in nfp_vnic_get_sw_stats()
896 data[0] = nn->r_vecs[i].rx_pkts; in nfp_vnic_get_sw_stats()
897 tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; in nfp_vnic_get_sw_stats()
898 tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; in nfp_vnic_get_sw_stats()
899 tmp[2] = nn->r_vecs[i].hw_csum_rx_complete; in nfp_vnic_get_sw_stats()
900 tmp[3] = nn->r_vecs[i].hw_csum_rx_error; in nfp_vnic_get_sw_stats()
901 tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail; in nfp_vnic_get_sw_stats()
902 tmp[5] = nn->r_vecs[i].hw_tls_rx; in nfp_vnic_get_sw_stats()
903 } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); in nfp_vnic_get_sw_stats()
906 start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); in nfp_vnic_get_sw_stats()
907 data[1] = nn->r_vecs[i].tx_pkts; in nfp_vnic_get_sw_stats()
908 data[2] = nn->r_vecs[i].tx_busy; in nfp_vnic_get_sw_stats()
909 tmp[6] = nn->r_vecs[i].hw_csum_tx; in nfp_vnic_get_sw_stats()
910 tmp[7] = nn->r_vecs[i].hw_csum_tx_inner; in nfp_vnic_get_sw_stats()
911 tmp[8] = nn->r_vecs[i].tx_gather; in nfp_vnic_get_sw_stats()
912 tmp[9] = nn->r_vecs[i].tx_lso; in nfp_vnic_get_sw_stats()
913 tmp[10] = nn->r_vecs[i].hw_tls_tx; in nfp_vnic_get_sw_stats()
914 tmp[11] = nn->r_vecs[i].tls_tx_fallback; in nfp_vnic_get_sw_stats()
915 tmp[12] = nn->r_vecs[i].tls_tx_no_fallback; in nfp_vnic_get_sw_stats()
916 } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); in nfp_vnic_get_sw_stats()
927 *data++ = atomic_read(&nn->ktls_no_space); in nfp_vnic_get_sw_stats()
928 *data++ = atomic_read(&nn->ktls_rx_resync_req); in nfp_vnic_get_sw_stats()
929 *data++ = atomic_read(&nn->ktls_rx_resync_ign); in nfp_vnic_get_sw_stats()
930 *data++ = atomic_read(&nn->ktls_rx_resync_sent); in nfp_vnic_get_sw_stats()
989 static unsigned int nfp_vnic_get_tlv_stats_count(struct nfp_net *nn) in nfp_vnic_get_tlv_stats_count() argument
991 return nn->tlv_caps.vnic_stats_cnt + nn->max_r_vecs * 4; in nfp_vnic_get_tlv_stats_count()
994 static u8 *nfp_vnic_get_tlv_stats_strings(struct nfp_net *nn, u8 *data) in nfp_vnic_get_tlv_stats_strings() argument
1000 mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off; in nfp_vnic_get_tlv_stats_strings()
1001 for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) { in nfp_vnic_get_tlv_stats_strings()
1017 for (i = 0; i < nn->max_r_vecs; i++) { in nfp_vnic_get_tlv_stats_strings()
1027 static u64 *nfp_vnic_get_tlv_stats(struct nfp_net *nn, u64 *data) in nfp_vnic_get_tlv_stats() argument
1032 mem = nn->dp.ctrl_bar + nn->tlv_caps.vnic_stats_off; in nfp_vnic_get_tlv_stats()
1033 mem += roundup(2 * nn->tlv_caps.vnic_stats_cnt, 8); in nfp_vnic_get_tlv_stats()
1034 for (i = 0; i < nn->tlv_caps.vnic_stats_cnt; i++) in nfp_vnic_get_tlv_stats()
1037 mem = nn->dp.ctrl_bar; in nfp_vnic_get_tlv_stats()
1038 for (i = 0; i < nn->max_r_vecs; i++) { in nfp_vnic_get_tlv_stats()
1092 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_strings() local
1097 if (!nn->tlv_caps.vnic_stats_off) in nfp_net_get_strings()
1099 nn->max_r_vecs, in nfp_net_get_strings()
1102 data = nfp_vnic_get_tlv_stats_strings(nn, data); in nfp_net_get_strings()
1104 data = nfp_app_port_get_stats_strings(nn->port, data); in nfp_net_get_strings()
1116 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_stats() local
1119 if (!nn->tlv_caps.vnic_stats_off) in nfp_net_get_stats()
1120 data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, in nfp_net_get_stats()
1121 nn->max_r_vecs); in nfp_net_get_stats()
1123 data = nfp_vnic_get_tlv_stats(nn, data); in nfp_net_get_stats()
1125 data = nfp_app_port_get_stats(nn->port, data); in nfp_net_get_stats()
1130 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_sset_count() local
1136 if (!nn->tlv_caps.vnic_stats_off) in nfp_net_get_sset_count()
1137 cnt += nfp_vnic_get_hw_stats_count(nn->max_r_vecs); in nfp_net_get_sset_count()
1139 cnt += nfp_vnic_get_tlv_stats_count(nn); in nfp_net_get_sset_count()
1141 cnt += nfp_app_port_get_stats_count(nn->port); in nfp_net_get_sset_count()
1306 static int nfp_net_get_rss_hash_opts(struct nfp_net *nn, in nfp_net_get_rss_hash_opts() argument
1313 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) in nfp_net_get_rss_hash_opts()
1321 if (nn->rss_cfg & nfp_rss_flag) in nfp_net_get_rss_hash_opts()
1401 static int nfp_net_get_fs_rule(struct nfp_net *nn, struct ethtool_rxnfc *cmd) in nfp_net_get_fs_rule() argument
1405 if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER)) in nfp_net_get_fs_rule()
1411 list_for_each_entry(entry, &nn->fs.list, node) { in nfp_net_get_fs_rule()
1423 static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs) in nfp_net_get_fs_loc() argument
1428 if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER)) in nfp_net_get_fs_loc()
1431 list_for_each_entry(entry, &nn->fs.list, node) in nfp_net_get_fs_loc()
1440 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_rxnfc() local
1444 cmd->data = nn->dp.num_rx_rings; in nfp_net_get_rxnfc()
1447 cmd->rule_cnt = nn->fs.count; in nfp_net_get_rxnfc()
1450 return nfp_net_get_fs_rule(nn, cmd); in nfp_net_get_rxnfc()
1453 return nfp_net_get_fs_loc(nn, rule_locs); in nfp_net_get_rxnfc()
1455 return nfp_net_get_rss_hash_opts(nn, cmd); in nfp_net_get_rxnfc()
1461 static int nfp_net_set_rss_hash_opt(struct nfp_net *nn, in nfp_net_set_rss_hash_opt() argument
1464 u32 new_rss_cfg = nn->rss_cfg; in nfp_net_set_rss_hash_opt()
1468 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) in nfp_net_set_rss_hash_opt()
1496 new_rss_cfg |= FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc); in nfp_net_set_rss_hash_opt()
1499 if (new_rss_cfg == nn->rss_cfg) in nfp_net_set_rss_hash_opt()
1502 writel(new_rss_cfg, nn->dp.ctrl_bar + NFP_NET_CFG_RSS_CTRL); in nfp_net_set_rss_hash_opt()
1503 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); in nfp_net_set_rss_hash_opt()
1507 nn->rss_cfg = new_rss_cfg; in nfp_net_set_rss_hash_opt()
1509 nn_dbg(nn, "Changed RSS config to 0x%x\n", nn->rss_cfg); in nfp_net_set_rss_hash_opt()
1596 static int nfp_net_fs_check_existing(struct nfp_net *nn, struct nfp_fs_entry *new) in nfp_net_fs_check_existing() argument
1600 list_for_each_entry(entry, &nn->fs.list, node) { in nfp_net_fs_check_existing()
1612 static int nfp_net_fs_add(struct nfp_net *nn, struct ethtool_rxnfc *cmd) in nfp_net_fs_add() argument
1619 if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER)) in nfp_net_fs_add()
1630 fs->ring_cookie >= nn->dp.num_rx_rings) in nfp_net_fs_add()
1659 nn_err(nn, "Please use ip4/ip6 flow type instead.\n"); in nfp_net_fs_add()
1680 id = nfp_net_fs_check_existing(nn, new); in nfp_net_fs_add()
1682 nn_err(nn, "Identical rule is existing in %d.\n", id); in nfp_net_fs_add()
1688 list_for_each_entry(entry, &nn->fs.list, node) { in nfp_net_fs_add()
1690 err = nfp_net_fs_del_hw(nn, entry); in nfp_net_fs_add()
1694 nn->fs.count--; in nfp_net_fs_add()
1695 err = nfp_net_fs_add_hw(nn, new); in nfp_net_fs_add()
1699 nn->fs.count++; in nfp_net_fs_add()
1710 if (nn->fs.count == NFP_FS_MAX_ENTRY) { in nfp_net_fs_add()
1715 err = nfp_net_fs_add_hw(nn, new); in nfp_net_fs_add()
1720 nn->fs.count++; in nfp_net_fs_add()
1729 static int nfp_net_fs_del(struct nfp_net *nn, struct ethtool_rxnfc *cmd) in nfp_net_fs_del() argument
1734 if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER)) in nfp_net_fs_del()
1737 if (!nn->fs.count || cmd->fs.location >= NFP_FS_MAX_ENTRY) in nfp_net_fs_del()
1740 list_for_each_entry(entry, &nn->fs.list, node) { in nfp_net_fs_del()
1742 err = nfp_net_fs_del_hw(nn, entry); in nfp_net_fs_del()
1748 nn->fs.count--; in nfp_net_fs_del()
1763 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_set_rxnfc() local
1767 return nfp_net_set_rss_hash_opt(nn, cmd); in nfp_net_set_rxnfc()
1769 return nfp_net_fs_add(nn, cmd); in nfp_net_set_rxnfc()
1771 return nfp_net_fs_del(nn, cmd); in nfp_net_set_rxnfc()
1779 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_rxfh_indir_size() local
1781 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) in nfp_net_get_rxfh_indir_size()
1784 return ARRAY_SIZE(nn->rss_itbl); in nfp_net_get_rxfh_indir_size()
1789 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_rxfh_key_size() local
1791 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) in nfp_net_get_rxfh_key_size()
1794 return nfp_net_rss_key_sz(nn); in nfp_net_get_rxfh_key_size()
1800 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_rxfh() local
1803 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)) in nfp_net_get_rxfh()
1807 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) in nfp_net_get_rxfh()
1808 rxfh->indir[i] = nn->rss_itbl[i]; in nfp_net_get_rxfh()
1810 memcpy(rxfh->key, nn->rss_key, nfp_net_rss_key_sz(nn)); in nfp_net_get_rxfh()
1812 rxfh->hfunc = nn->rss_hfunc; in nfp_net_get_rxfh()
1823 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_set_rxfh() local
1826 if (!(nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) || in nfp_net_set_rxfh()
1828 rxfh->hfunc == nn->rss_hfunc)) in nfp_net_set_rxfh()
1835 memcpy(nn->rss_key, rxfh->key, nfp_net_rss_key_sz(nn)); in nfp_net_set_rxfh()
1836 nfp_net_rss_write_key(nn); in nfp_net_set_rxfh()
1839 for (i = 0; i < ARRAY_SIZE(nn->rss_itbl); i++) in nfp_net_set_rxfh()
1840 nn->rss_itbl[i] = rxfh->indir[i]; in nfp_net_set_rxfh()
1842 nfp_net_rss_write_itbl(nn); in nfp_net_set_rxfh()
1845 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RSS); in nfp_net_set_rxfh()
1858 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_regs() local
1862 regs->version = nn_readl(nn, NFP_NET_CFG_VERSION); in nfp_net_get_regs()
1865 regs_buf[i] = readl(nn->dp.ctrl_bar + (i * sizeof(u32))); in nfp_net_get_regs()
1873 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_coalesce() local
1875 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) in nfp_net_get_coalesce()
1878 ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on; in nfp_net_get_coalesce()
1879 ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on; in nfp_net_get_coalesce()
1881 ec->rx_coalesce_usecs = nn->rx_coalesce_usecs; in nfp_net_get_coalesce()
1882 ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames; in nfp_net_get_coalesce()
1883 ec->tx_coalesce_usecs = nn->tx_coalesce_usecs; in nfp_net_get_coalesce()
1884 ec->tx_max_coalesced_frames = nn->tx_coalesce_max_frames; in nfp_net_get_coalesce()
2132 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_set_coalesce() local
2139 factor = nn->tlv_caps.me_freq_mhz / 16; in nfp_net_set_coalesce()
2154 if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) in nfp_net_set_coalesce()
2191 nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce; in nfp_net_set_coalesce()
2192 nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce; in nfp_net_set_coalesce()
2194 nn->rx_coalesce_usecs = ec->rx_coalesce_usecs; in nfp_net_set_coalesce()
2195 nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames; in nfp_net_set_coalesce()
2196 nn->tx_coalesce_usecs = ec->tx_coalesce_usecs; in nfp_net_set_coalesce()
2197 nn->tx_coalesce_max_frames = ec->tx_max_coalesced_frames; in nfp_net_set_coalesce()
2200 nfp_net_coalesce_write_cfg(nn); in nfp_net_set_coalesce()
2201 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); in nfp_net_set_coalesce()
2207 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_get_channels() local
2210 num_tx_rings = nn->dp.num_tx_rings; in nfp_net_get_channels()
2211 if (nn->dp.xdp_prog) in nfp_net_get_channels()
2212 num_tx_rings -= nn->dp.num_rx_rings; in nfp_net_get_channels()
2214 channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs); in nfp_net_get_channels()
2215 channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs); in nfp_net_get_channels()
2218 channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings); in nfp_net_get_channels()
2219 channel->rx_count = nn->dp.num_rx_rings - channel->combined_count; in nfp_net_get_channels()
2224 static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx, in nfp_net_set_num_rings() argument
2229 dp = nfp_net_clone_dp(nn); in nfp_net_set_num_rings()
2239 return nfp_net_ring_reconfig(nn, dp, NULL); in nfp_net_set_num_rings()
2245 struct nfp_net *nn = netdev_priv(netdev); in nfp_net_set_channels() local
2256 if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) || in nfp_net_set_channels()
2257 total_tx > min(nn->max_tx_rings, nn->max_r_vecs)) in nfp_net_set_channels()
2260 return nfp_net_set_num_rings(nn, total_rx, total_tx); in nfp_net_set_channels()