1ca9c54d2SDexuan Cui // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2ca9c54d2SDexuan Cui /* Copyright (c) 2021, Microsoft Corporation. */
3ca9c54d2SDexuan Cui
4ca9c54d2SDexuan Cui #include <linux/inetdevice.h>
5ca9c54d2SDexuan Cui #include <linux/etherdevice.h>
6ca9c54d2SDexuan Cui #include <linux/ethtool.h>
7ca9c54d2SDexuan Cui
8fd325cd6SLong Li #include <net/mana/mana.h>
9ca9c54d2SDexuan Cui
10ca9c54d2SDexuan Cui static const struct {
11ca9c54d2SDexuan Cui char name[ETH_GSTRING_LEN];
12ca9c54d2SDexuan Cui u16 offset;
13ca9c54d2SDexuan Cui } mana_eth_stats[] = {
14ca9c54d2SDexuan Cui {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
15ca9c54d2SDexuan Cui {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
16e1df5202SShradha Gupta {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
17e1df5202SShradha Gupta hc_rx_discards_no_wqe)},
18e1df5202SShradha Gupta {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
19e1df5202SShradha Gupta hc_rx_err_vport_disabled)},
20e1df5202SShradha Gupta {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
21e1df5202SShradha Gupta {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
22e1df5202SShradha Gupta hc_rx_ucast_pkts)},
23e1df5202SShradha Gupta {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
24e1df5202SShradha Gupta hc_rx_ucast_bytes)},
25e1df5202SShradha Gupta {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
26e1df5202SShradha Gupta hc_rx_bcast_pkts)},
27e1df5202SShradha Gupta {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
28e1df5202SShradha Gupta hc_rx_bcast_bytes)},
29e1df5202SShradha Gupta {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
30e1df5202SShradha Gupta hc_rx_mcast_pkts)},
31e1df5202SShradha Gupta {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
32e1df5202SShradha Gupta hc_rx_mcast_bytes)},
33e1df5202SShradha Gupta {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
34e1df5202SShradha Gupta hc_tx_err_gf_disabled)},
35e1df5202SShradha Gupta {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
36e1df5202SShradha Gupta hc_tx_err_vport_disabled)},
37e1df5202SShradha Gupta {"hc_tx_err_inval_vportoffset_pkt",
38e1df5202SShradha Gupta offsetof(struct mana_ethtool_stats,
39e1df5202SShradha Gupta hc_tx_err_inval_vportoffset_pkt)},
40e1df5202SShradha Gupta {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
41e1df5202SShradha Gupta hc_tx_err_vlan_enforcement)},
42e1df5202SShradha Gupta {"hc_tx_err_eth_type_enforcement",
43e1df5202SShradha Gupta offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
44e1df5202SShradha Gupta {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
45e1df5202SShradha Gupta hc_tx_err_sa_enforcement)},
46f4225441SColin Ian King {"hc_tx_err_sqpdid_enforcement",
47f4225441SColin Ian King offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
48e1df5202SShradha Gupta {"hc_tx_err_cqpdid_enforcement",
49e1df5202SShradha Gupta offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
50e1df5202SShradha Gupta {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
51e1df5202SShradha Gupta hc_tx_err_mtu_violation)},
52e1df5202SShradha Gupta {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
53e1df5202SShradha Gupta hc_tx_err_inval_oob)},
54e1df5202SShradha Gupta {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
55e1df5202SShradha Gupta hc_tx_err_gdma)},
56ac3899c6SShradha Gupta {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
57ac3899c6SShradha Gupta {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
58ac3899c6SShradha Gupta hc_tx_ucast_pkts)},
59ac3899c6SShradha Gupta {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
60ac3899c6SShradha Gupta hc_tx_ucast_bytes)},
61ac3899c6SShradha Gupta {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
62ac3899c6SShradha Gupta hc_tx_bcast_pkts)},
63ac3899c6SShradha Gupta {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
64ac3899c6SShradha Gupta hc_tx_bcast_bytes)},
65ac3899c6SShradha Gupta {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
66ac3899c6SShradha Gupta hc_tx_mcast_pkts)},
67ac3899c6SShradha Gupta {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
68ac3899c6SShradha Gupta hc_tx_mcast_bytes)},
69bd7fc6e1SShradha Gupta {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
70bd7fc6e1SShradha Gupta {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
71bd7fc6e1SShradha Gupta tx_cqe_unknown_type)},
72bd7fc6e1SShradha Gupta {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
73bd7fc6e1SShradha Gupta rx_coalesced_err)},
74bd7fc6e1SShradha Gupta {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
75bd7fc6e1SShradha Gupta rx_cqe_unknown_type)},
76ca9c54d2SDexuan Cui };
77ca9c54d2SDexuan Cui
mana_get_sset_count(struct net_device * ndev,int stringset)78ca9c54d2SDexuan Cui static int mana_get_sset_count(struct net_device *ndev, int stringset)
79ca9c54d2SDexuan Cui {
80ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
81ca9c54d2SDexuan Cui unsigned int num_queues = apc->num_queues;
82ca9c54d2SDexuan Cui
83ca9c54d2SDexuan Cui if (stringset != ETH_SS_STATS)
84ca9c54d2SDexuan Cui return -EINVAL;
85ca9c54d2SDexuan Cui
86bd7fc6e1SShradha Gupta return ARRAY_SIZE(mana_eth_stats) + num_queues *
87bd7fc6e1SShradha Gupta (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
88ca9c54d2SDexuan Cui }
89ca9c54d2SDexuan Cui
mana_get_strings(struct net_device * ndev,u32 stringset,u8 * data)90ca9c54d2SDexuan Cui static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
91ca9c54d2SDexuan Cui {
92ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
93ca9c54d2SDexuan Cui unsigned int num_queues = apc->num_queues;
94ca9c54d2SDexuan Cui u8 *p = data;
95ca9c54d2SDexuan Cui int i;
96ca9c54d2SDexuan Cui
97ca9c54d2SDexuan Cui if (stringset != ETH_SS_STATS)
98ca9c54d2SDexuan Cui return;
99ca9c54d2SDexuan Cui
100ca9c54d2SDexuan Cui for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
101ca9c54d2SDexuan Cui memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
102ca9c54d2SDexuan Cui p += ETH_GSTRING_LEN;
103ca9c54d2SDexuan Cui }
104ca9c54d2SDexuan Cui
105ca9c54d2SDexuan Cui for (i = 0; i < num_queues; i++) {
106ca9c54d2SDexuan Cui sprintf(p, "rx_%d_packets", i);
107ca9c54d2SDexuan Cui p += ETH_GSTRING_LEN;
108ca9c54d2SDexuan Cui sprintf(p, "rx_%d_bytes", i);
109ca9c54d2SDexuan Cui p += ETH_GSTRING_LEN;
110f90f8420SHaiyang Zhang sprintf(p, "rx_%d_xdp_drop", i);
111f90f8420SHaiyang Zhang p += ETH_GSTRING_LEN;
112d356abb9SHaiyang Zhang sprintf(p, "rx_%d_xdp_tx", i);
113d356abb9SHaiyang Zhang p += ETH_GSTRING_LEN;
1147a8938cdSHaiyang Zhang sprintf(p, "rx_%d_xdp_redirect", i);
1157a8938cdSHaiyang Zhang p += ETH_GSTRING_LEN;
116ca9c54d2SDexuan Cui }
117ca9c54d2SDexuan Cui
118ca9c54d2SDexuan Cui for (i = 0; i < num_queues; i++) {
119ca9c54d2SDexuan Cui sprintf(p, "tx_%d_packets", i);
120ca9c54d2SDexuan Cui p += ETH_GSTRING_LEN;
121ca9c54d2SDexuan Cui sprintf(p, "tx_%d_bytes", i);
122ca9c54d2SDexuan Cui p += ETH_GSTRING_LEN;
1237a8938cdSHaiyang Zhang sprintf(p, "tx_%d_xdp_xmit", i);
1247a8938cdSHaiyang Zhang p += ETH_GSTRING_LEN;
125bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_tso_packets", i);
126bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
127bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_tso_bytes", i);
128bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
129bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_tso_inner_packets", i);
130bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
131bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_tso_inner_bytes", i);
132bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
133bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_long_pkt_fmt", i);
134bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
135bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_short_pkt_fmt", i);
136bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
137bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_csum_partial", i);
138bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
139bd7fc6e1SShradha Gupta sprintf(p, "tx_%d_mana_map_err", i);
140bd7fc6e1SShradha Gupta p += ETH_GSTRING_LEN;
141ca9c54d2SDexuan Cui }
142ca9c54d2SDexuan Cui }
143ca9c54d2SDexuan Cui
mana_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * e_stats,u64 * data)144ca9c54d2SDexuan Cui static void mana_get_ethtool_stats(struct net_device *ndev,
145ca9c54d2SDexuan Cui struct ethtool_stats *e_stats, u64 *data)
146ca9c54d2SDexuan Cui {
147ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
148ca9c54d2SDexuan Cui unsigned int num_queues = apc->num_queues;
149ca9c54d2SDexuan Cui void *eth_stats = &apc->eth_stats;
150f90f8420SHaiyang Zhang struct mana_stats_rx *rx_stats;
151f90f8420SHaiyang Zhang struct mana_stats_tx *tx_stats;
152ca9c54d2SDexuan Cui unsigned int start;
153ca9c54d2SDexuan Cui u64 packets, bytes;
1547a8938cdSHaiyang Zhang u64 xdp_redirect;
1557a8938cdSHaiyang Zhang u64 xdp_xmit;
156f90f8420SHaiyang Zhang u64 xdp_drop;
157d356abb9SHaiyang Zhang u64 xdp_tx;
158bd7fc6e1SShradha Gupta u64 tso_packets;
159bd7fc6e1SShradha Gupta u64 tso_bytes;
160bd7fc6e1SShradha Gupta u64 tso_inner_packets;
161bd7fc6e1SShradha Gupta u64 tso_inner_bytes;
162bd7fc6e1SShradha Gupta u64 long_pkt_fmt;
163bd7fc6e1SShradha Gupta u64 short_pkt_fmt;
164bd7fc6e1SShradha Gupta u64 csum_partial;
165bd7fc6e1SShradha Gupta u64 mana_map_err;
166ca9c54d2SDexuan Cui int q, i = 0;
167ca9c54d2SDexuan Cui
168ca9c54d2SDexuan Cui if (!apc->port_is_up)
169ca9c54d2SDexuan Cui return;
170ac3899c6SShradha Gupta /* we call mana function to update stats from GDMA */
171ac3899c6SShradha Gupta mana_query_gf_stats(apc);
172ca9c54d2SDexuan Cui
173ca9c54d2SDexuan Cui for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
174ca9c54d2SDexuan Cui data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
175ca9c54d2SDexuan Cui
176ca9c54d2SDexuan Cui for (q = 0; q < num_queues; q++) {
177f90f8420SHaiyang Zhang rx_stats = &apc->rxqs[q]->stats;
178ca9c54d2SDexuan Cui
179ca9c54d2SDexuan Cui do {
180068c38adSThomas Gleixner start = u64_stats_fetch_begin(&rx_stats->syncp);
181f90f8420SHaiyang Zhang packets = rx_stats->packets;
182f90f8420SHaiyang Zhang bytes = rx_stats->bytes;
183f90f8420SHaiyang Zhang xdp_drop = rx_stats->xdp_drop;
184d356abb9SHaiyang Zhang xdp_tx = rx_stats->xdp_tx;
1857a8938cdSHaiyang Zhang xdp_redirect = rx_stats->xdp_redirect;
186068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
187ca9c54d2SDexuan Cui
188ca9c54d2SDexuan Cui data[i++] = packets;
189ca9c54d2SDexuan Cui data[i++] = bytes;
190f90f8420SHaiyang Zhang data[i++] = xdp_drop;
191d356abb9SHaiyang Zhang data[i++] = xdp_tx;
1927a8938cdSHaiyang Zhang data[i++] = xdp_redirect;
193ca9c54d2SDexuan Cui }
194ca9c54d2SDexuan Cui
195ca9c54d2SDexuan Cui for (q = 0; q < num_queues; q++) {
196f90f8420SHaiyang Zhang tx_stats = &apc->tx_qp[q].txq.stats;
197ca9c54d2SDexuan Cui
198ca9c54d2SDexuan Cui do {
199068c38adSThomas Gleixner start = u64_stats_fetch_begin(&tx_stats->syncp);
200f90f8420SHaiyang Zhang packets = tx_stats->packets;
201f90f8420SHaiyang Zhang bytes = tx_stats->bytes;
2027a8938cdSHaiyang Zhang xdp_xmit = tx_stats->xdp_xmit;
203bd7fc6e1SShradha Gupta tso_packets = tx_stats->tso_packets;
204bd7fc6e1SShradha Gupta tso_bytes = tx_stats->tso_bytes;
205bd7fc6e1SShradha Gupta tso_inner_packets = tx_stats->tso_inner_packets;
206bd7fc6e1SShradha Gupta tso_inner_bytes = tx_stats->tso_inner_bytes;
207bd7fc6e1SShradha Gupta long_pkt_fmt = tx_stats->long_pkt_fmt;
208bd7fc6e1SShradha Gupta short_pkt_fmt = tx_stats->short_pkt_fmt;
209bd7fc6e1SShradha Gupta csum_partial = tx_stats->csum_partial;
210bd7fc6e1SShradha Gupta mana_map_err = tx_stats->mana_map_err;
211068c38adSThomas Gleixner } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
212ca9c54d2SDexuan Cui
213ca9c54d2SDexuan Cui data[i++] = packets;
214ca9c54d2SDexuan Cui data[i++] = bytes;
2157a8938cdSHaiyang Zhang data[i++] = xdp_xmit;
216bd7fc6e1SShradha Gupta data[i++] = tso_packets;
217bd7fc6e1SShradha Gupta data[i++] = tso_bytes;
218bd7fc6e1SShradha Gupta data[i++] = tso_inner_packets;
219bd7fc6e1SShradha Gupta data[i++] = tso_inner_bytes;
220bd7fc6e1SShradha Gupta data[i++] = long_pkt_fmt;
221bd7fc6e1SShradha Gupta data[i++] = short_pkt_fmt;
222bd7fc6e1SShradha Gupta data[i++] = csum_partial;
223bd7fc6e1SShradha Gupta data[i++] = mana_map_err;
224ca9c54d2SDexuan Cui }
225ca9c54d2SDexuan Cui }
226ca9c54d2SDexuan Cui
mana_get_rxnfc(struct net_device * ndev,struct ethtool_rxnfc * cmd,u32 * rules)227ca9c54d2SDexuan Cui static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
228ca9c54d2SDexuan Cui u32 *rules)
229ca9c54d2SDexuan Cui {
230ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
231ca9c54d2SDexuan Cui
232ca9c54d2SDexuan Cui switch (cmd->cmd) {
233ca9c54d2SDexuan Cui case ETHTOOL_GRXRINGS:
234ca9c54d2SDexuan Cui cmd->data = apc->num_queues;
235ca9c54d2SDexuan Cui return 0;
236ca9c54d2SDexuan Cui }
237ca9c54d2SDexuan Cui
238ca9c54d2SDexuan Cui return -EOPNOTSUPP;
239ca9c54d2SDexuan Cui }
240ca9c54d2SDexuan Cui
mana_get_rxfh_key_size(struct net_device * ndev)241ca9c54d2SDexuan Cui static u32 mana_get_rxfh_key_size(struct net_device *ndev)
242ca9c54d2SDexuan Cui {
243ca9c54d2SDexuan Cui return MANA_HASH_KEY_SIZE;
244ca9c54d2SDexuan Cui }
245ca9c54d2SDexuan Cui
mana_rss_indir_size(struct net_device * ndev)246ca9c54d2SDexuan Cui static u32 mana_rss_indir_size(struct net_device *ndev)
247ca9c54d2SDexuan Cui {
2487fc45cb6SShradha Gupta struct mana_port_context *apc = netdev_priv(ndev);
2497fc45cb6SShradha Gupta
2507fc45cb6SShradha Gupta return apc->indir_table_sz;
251ca9c54d2SDexuan Cui }
252ca9c54d2SDexuan Cui
mana_get_rxfh(struct net_device * ndev,struct ethtool_rxfh_param * rxfh)253fb6e30a7SAhmed Zaki static int mana_get_rxfh(struct net_device *ndev,
254fb6e30a7SAhmed Zaki struct ethtool_rxfh_param *rxfh)
255ca9c54d2SDexuan Cui {
256ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
257ca9c54d2SDexuan Cui int i;
258ca9c54d2SDexuan Cui
259fb6e30a7SAhmed Zaki rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
260ca9c54d2SDexuan Cui
261fb6e30a7SAhmed Zaki if (rxfh->indir) {
2627fc45cb6SShradha Gupta for (i = 0; i < apc->indir_table_sz; i++)
263fb6e30a7SAhmed Zaki rxfh->indir[i] = apc->indir_table[i];
264ca9c54d2SDexuan Cui }
265ca9c54d2SDexuan Cui
266fb6e30a7SAhmed Zaki if (rxfh->key)
267fb6e30a7SAhmed Zaki memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
268ca9c54d2SDexuan Cui
269ca9c54d2SDexuan Cui return 0;
270ca9c54d2SDexuan Cui }
271ca9c54d2SDexuan Cui
mana_set_rxfh(struct net_device * ndev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)272fb6e30a7SAhmed Zaki static int mana_set_rxfh(struct net_device *ndev,
273fb6e30a7SAhmed Zaki struct ethtool_rxfh_param *rxfh,
274fb6e30a7SAhmed Zaki struct netlink_ext_ack *extack)
275ca9c54d2SDexuan Cui {
276ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
277ca9c54d2SDexuan Cui bool update_hash = false, update_table = false;
278ca9c54d2SDexuan Cui u8 save_key[MANA_HASH_KEY_SIZE];
2797fc45cb6SShradha Gupta u32 *save_table;
280ca9c54d2SDexuan Cui int i, err;
281ca9c54d2SDexuan Cui
282ca9c54d2SDexuan Cui if (!apc->port_is_up)
283ca9c54d2SDexuan Cui return -EOPNOTSUPP;
284ca9c54d2SDexuan Cui
285fb6e30a7SAhmed Zaki if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
286fb6e30a7SAhmed Zaki rxfh->hfunc != ETH_RSS_HASH_TOP)
287ca9c54d2SDexuan Cui return -EOPNOTSUPP;
288ca9c54d2SDexuan Cui
2897fc45cb6SShradha Gupta save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2907fc45cb6SShradha Gupta if (!save_table)
2917fc45cb6SShradha Gupta return -ENOMEM;
2927fc45cb6SShradha Gupta
293fb6e30a7SAhmed Zaki if (rxfh->indir) {
2947fc45cb6SShradha Gupta for (i = 0; i < apc->indir_table_sz; i++)
2957fc45cb6SShradha Gupta if (rxfh->indir[i] >= apc->num_queues) {
2967fc45cb6SShradha Gupta err = -EINVAL;
2977fc45cb6SShradha Gupta goto cleanup;
2987fc45cb6SShradha Gupta }
299ca9c54d2SDexuan Cui
300ca9c54d2SDexuan Cui update_table = true;
3017fc45cb6SShradha Gupta for (i = 0; i < apc->indir_table_sz; i++) {
302ca9c54d2SDexuan Cui save_table[i] = apc->indir_table[i];
303fb6e30a7SAhmed Zaki apc->indir_table[i] = rxfh->indir[i];
304ca9c54d2SDexuan Cui }
305ca9c54d2SDexuan Cui }
306ca9c54d2SDexuan Cui
307fb6e30a7SAhmed Zaki if (rxfh->key) {
308ca9c54d2SDexuan Cui update_hash = true;
309ca9c54d2SDexuan Cui memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
310fb6e30a7SAhmed Zaki memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
311ca9c54d2SDexuan Cui }
312ca9c54d2SDexuan Cui
313ca9c54d2SDexuan Cui err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
314ca9c54d2SDexuan Cui
315ca9c54d2SDexuan Cui if (err) { /* recover to original values */
316ca9c54d2SDexuan Cui if (update_table) {
3177fc45cb6SShradha Gupta for (i = 0; i < apc->indir_table_sz; i++)
318ca9c54d2SDexuan Cui apc->indir_table[i] = save_table[i];
319ca9c54d2SDexuan Cui }
320ca9c54d2SDexuan Cui
321ca9c54d2SDexuan Cui if (update_hash)
322ca9c54d2SDexuan Cui memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
323ca9c54d2SDexuan Cui
324ca9c54d2SDexuan Cui mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
325ca9c54d2SDexuan Cui }
326ca9c54d2SDexuan Cui
3277fc45cb6SShradha Gupta cleanup:
3287fc45cb6SShradha Gupta kfree(save_table);
3297fc45cb6SShradha Gupta
330ca9c54d2SDexuan Cui return err;
331ca9c54d2SDexuan Cui }
332ca9c54d2SDexuan Cui
mana_get_channels(struct net_device * ndev,struct ethtool_channels * channel)333ca9c54d2SDexuan Cui static void mana_get_channels(struct net_device *ndev,
334ca9c54d2SDexuan Cui struct ethtool_channels *channel)
335ca9c54d2SDexuan Cui {
336ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
337ca9c54d2SDexuan Cui
338ca9c54d2SDexuan Cui channel->max_combined = apc->max_queues;
339ca9c54d2SDexuan Cui channel->combined_count = apc->num_queues;
340ca9c54d2SDexuan Cui }
341ca9c54d2SDexuan Cui
mana_set_channels(struct net_device * ndev,struct ethtool_channels * channels)342ca9c54d2SDexuan Cui static int mana_set_channels(struct net_device *ndev,
343ca9c54d2SDexuan Cui struct ethtool_channels *channels)
344ca9c54d2SDexuan Cui {
345ca9c54d2SDexuan Cui struct mana_port_context *apc = netdev_priv(ndev);
346ca9c54d2SDexuan Cui unsigned int new_count = channels->combined_count;
347ca9c54d2SDexuan Cui unsigned int old_count = apc->num_queues;
348*17053414SShradha Gupta int err;
349*17053414SShradha Gupta
350*17053414SShradha Gupta err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
351*17053414SShradha Gupta if (err) {
352*17053414SShradha Gupta netdev_err(ndev, "Insufficient memory for new allocations");
353*17053414SShradha Gupta return err;
354*17053414SShradha Gupta }
355ca9c54d2SDexuan Cui
356ca9c54d2SDexuan Cui err = mana_detach(ndev, false);
357ca9c54d2SDexuan Cui if (err) {
358ca9c54d2SDexuan Cui netdev_err(ndev, "mana_detach failed: %d\n", err);
359*17053414SShradha Gupta goto out;
360ca9c54d2SDexuan Cui }
361ca9c54d2SDexuan Cui
362ca9c54d2SDexuan Cui apc->num_queues = new_count;
363ca9c54d2SDexuan Cui err = mana_attach(ndev);
364*17053414SShradha Gupta if (err) {
365ca9c54d2SDexuan Cui apc->num_queues = old_count;
366*17053414SShradha Gupta netdev_err(ndev, "mana_attach failed: %d\n", err);
367*17053414SShradha Gupta }
368ca9c54d2SDexuan Cui
369*17053414SShradha Gupta out:
370*17053414SShradha Gupta mana_pre_dealloc_rxbufs(apc);
371ca9c54d2SDexuan Cui return err;
372ca9c54d2SDexuan Cui }
373ca9c54d2SDexuan Cui
mana_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3743410d0e1SShradha Gupta static void mana_get_ringparam(struct net_device *ndev,
3753410d0e1SShradha Gupta struct ethtool_ringparam *ring,
3763410d0e1SShradha Gupta struct kernel_ethtool_ringparam *kernel_ring,
3773410d0e1SShradha Gupta struct netlink_ext_ack *extack)
3783410d0e1SShradha Gupta {
3793410d0e1SShradha Gupta struct mana_port_context *apc = netdev_priv(ndev);
3803410d0e1SShradha Gupta
3813410d0e1SShradha Gupta ring->rx_pending = apc->rx_queue_size;
3823410d0e1SShradha Gupta ring->tx_pending = apc->tx_queue_size;
3833410d0e1SShradha Gupta ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
3843410d0e1SShradha Gupta ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
3853410d0e1SShradha Gupta }
3863410d0e1SShradha Gupta
mana_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3873410d0e1SShradha Gupta static int mana_set_ringparam(struct net_device *ndev,
3883410d0e1SShradha Gupta struct ethtool_ringparam *ring,
3893410d0e1SShradha Gupta struct kernel_ethtool_ringparam *kernel_ring,
3903410d0e1SShradha Gupta struct netlink_ext_ack *extack)
3913410d0e1SShradha Gupta {
3923410d0e1SShradha Gupta struct mana_port_context *apc = netdev_priv(ndev);
3933410d0e1SShradha Gupta u32 new_tx, new_rx;
3943410d0e1SShradha Gupta u32 old_tx, old_rx;
3953410d0e1SShradha Gupta int err;
3963410d0e1SShradha Gupta
3973410d0e1SShradha Gupta old_tx = apc->tx_queue_size;
3983410d0e1SShradha Gupta old_rx = apc->rx_queue_size;
3993410d0e1SShradha Gupta
4003410d0e1SShradha Gupta if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
4013410d0e1SShradha Gupta NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
4023410d0e1SShradha Gupta MIN_TX_BUFFERS_PER_QUEUE);
4033410d0e1SShradha Gupta return -EINVAL;
4043410d0e1SShradha Gupta }
4053410d0e1SShradha Gupta
4063410d0e1SShradha Gupta if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
4073410d0e1SShradha Gupta NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
4083410d0e1SShradha Gupta MIN_RX_BUFFERS_PER_QUEUE);
4093410d0e1SShradha Gupta return -EINVAL;
4103410d0e1SShradha Gupta }
4113410d0e1SShradha Gupta
4123410d0e1SShradha Gupta new_rx = roundup_pow_of_two(ring->rx_pending);
4133410d0e1SShradha Gupta new_tx = roundup_pow_of_two(ring->tx_pending);
4143410d0e1SShradha Gupta netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
4153410d0e1SShradha Gupta new_tx, new_rx);
4163410d0e1SShradha Gupta
4173410d0e1SShradha Gupta /* pre-allocating new buffers to prevent failures in mana_attach() later */
4183410d0e1SShradha Gupta apc->rx_queue_size = new_rx;
419*17053414SShradha Gupta err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
4203410d0e1SShradha Gupta apc->rx_queue_size = old_rx;
4213410d0e1SShradha Gupta if (err) {
4223410d0e1SShradha Gupta netdev_err(ndev, "Insufficient memory for new allocations\n");
4233410d0e1SShradha Gupta return err;
4243410d0e1SShradha Gupta }
4253410d0e1SShradha Gupta
4263410d0e1SShradha Gupta err = mana_detach(ndev, false);
4273410d0e1SShradha Gupta if (err) {
4283410d0e1SShradha Gupta netdev_err(ndev, "mana_detach failed: %d\n", err);
4293410d0e1SShradha Gupta goto out;
4303410d0e1SShradha Gupta }
4313410d0e1SShradha Gupta
4323410d0e1SShradha Gupta apc->tx_queue_size = new_tx;
4333410d0e1SShradha Gupta apc->rx_queue_size = new_rx;
4343410d0e1SShradha Gupta
4353410d0e1SShradha Gupta err = mana_attach(ndev);
4363410d0e1SShradha Gupta if (err) {
4373410d0e1SShradha Gupta netdev_err(ndev, "mana_attach failed: %d\n", err);
4383410d0e1SShradha Gupta apc->tx_queue_size = old_tx;
4393410d0e1SShradha Gupta apc->rx_queue_size = old_rx;
4403410d0e1SShradha Gupta }
4413410d0e1SShradha Gupta out:
4423410d0e1SShradha Gupta mana_pre_dealloc_rxbufs(apc);
4433410d0e1SShradha Gupta return err;
4443410d0e1SShradha Gupta }
4453410d0e1SShradha Gupta
446ca9c54d2SDexuan Cui const struct ethtool_ops mana_ethtool_ops = {
447ca9c54d2SDexuan Cui .get_ethtool_stats = mana_get_ethtool_stats,
448ca9c54d2SDexuan Cui .get_sset_count = mana_get_sset_count,
449ca9c54d2SDexuan Cui .get_strings = mana_get_strings,
450ca9c54d2SDexuan Cui .get_rxnfc = mana_get_rxnfc,
451ca9c54d2SDexuan Cui .get_rxfh_key_size = mana_get_rxfh_key_size,
452ca9c54d2SDexuan Cui .get_rxfh_indir_size = mana_rss_indir_size,
453ca9c54d2SDexuan Cui .get_rxfh = mana_get_rxfh,
454ca9c54d2SDexuan Cui .set_rxfh = mana_set_rxfh,
455ca9c54d2SDexuan Cui .get_channels = mana_get_channels,
456ca9c54d2SDexuan Cui .set_channels = mana_set_channels,
4573410d0e1SShradha Gupta .get_ringparam = mana_get_ringparam,
4583410d0e1SShradha Gupta .set_ringparam = mana_set_ringparam,
459ca9c54d2SDexuan Cui };
460