xref: /linux/drivers/net/ethernet/microsoft/mana/mana_ethtool.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7 
8 #include <net/mana/mana.h>
9 
10 struct mana_stats_desc {
11 	char name[ETH_GSTRING_LEN];
12 	u16 offset;
13 };
14 
15 static const struct mana_stats_desc mana_eth_stats[] = {
16 	{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
17 	{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
18 	{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
19 					   hc_rx_discards_no_wqe)},
20 	{"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
21 					      hc_rx_err_vport_disabled)},
22 	{"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
23 	{"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
24 				      hc_rx_ucast_pkts)},
25 	{"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
26 				       hc_rx_ucast_bytes)},
27 	{"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
28 				      hc_rx_bcast_pkts)},
29 	{"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
30 				       hc_rx_bcast_bytes)},
31 	{"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
32 			hc_rx_mcast_pkts)},
33 	{"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
34 				       hc_rx_mcast_bytes)},
35 	{"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
36 					   hc_tx_err_gf_disabled)},
37 	{"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
38 					      hc_tx_err_vport_disabled)},
39 	{"hc_tx_err_inval_vportoffset_pkt",
40 	 offsetof(struct mana_ethtool_stats,
41 		  hc_tx_err_inval_vportoffset_pkt)},
42 	{"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
43 						hc_tx_err_vlan_enforcement)},
44 	{"hc_tx_err_eth_type_enforcement",
45 	 offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
46 	{"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
47 					      hc_tx_err_sa_enforcement)},
48 	{"hc_tx_err_sqpdid_enforcement",
49 	 offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
50 	{"hc_tx_err_cqpdid_enforcement",
51 	 offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
52 	{"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
53 					     hc_tx_err_mtu_violation)},
54 	{"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
55 					 hc_tx_err_inval_oob)},
56 	{"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
57 				    hc_tx_err_gdma)},
58 	{"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
59 	{"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
60 					hc_tx_ucast_pkts)},
61 	{"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
62 					hc_tx_ucast_bytes)},
63 	{"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
64 					hc_tx_bcast_pkts)},
65 	{"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
66 					hc_tx_bcast_bytes)},
67 	{"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
68 					hc_tx_mcast_pkts)},
69 	{"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
70 					hc_tx_mcast_bytes)},
71 	{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
72 	{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
73 					tx_cqe_unknown_type)},
74 	{"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
75 					rx_coalesced_err)},
76 	{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
77 					rx_cqe_unknown_type)},
78 };
79 
80 static const struct mana_stats_desc mana_phy_stats[] = {
81 	{ "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
82 	{ "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
83 	{ "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
84 	{ "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
85 	{ "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
86 	{ "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
87 	{ "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
88 	{ "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
89 	{ "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
90 	{ "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
91 	{ "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
92 	{ "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
93 	{ "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
94 	{ "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
95 	{ "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
96 	{ "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
97 	{ "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
98 	{ "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
99 	{ "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
100 	{ "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
101 	{ "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
102 	{ "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
103 	{ "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
104 	{ "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
105 	{ "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
106 	{ "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
107 	{ "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
108 	{ "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
109 	{ "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
110 	{ "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
111 	{ "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
112 	{ "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
113 	{ "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
114 	{ "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
115 	{ "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
116 	{ "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
117 	{ "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
118 	{ "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
119 	{ "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
120 	{ "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
121 	{ "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
122 	{ "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
123 	{ "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
124 	{ "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
125 	{ "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
126 	{ "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
127 	{ "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
128 	{ "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
129 	{ "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
130 	{ "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
131 };
132 
133 static int mana_get_sset_count(struct net_device *ndev, int stringset)
134 {
135 	struct mana_port_context *apc = netdev_priv(ndev);
136 	unsigned int num_queues = apc->num_queues;
137 
138 	if (stringset != ETH_SS_STATS)
139 		return -EINVAL;
140 
141 	return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
142 			num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
143 }
144 
145 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
146 {
147 	struct mana_port_context *apc = netdev_priv(ndev);
148 	unsigned int num_queues = apc->num_queues;
149 	int i;
150 
151 	if (stringset != ETH_SS_STATS)
152 		return;
153 
154 	for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
155 		ethtool_puts(&data, mana_eth_stats[i].name);
156 
157 	for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
158 		ethtool_puts(&data, mana_phy_stats[i].name);
159 
160 	for (i = 0; i < num_queues; i++) {
161 		ethtool_sprintf(&data, "rx_%d_packets", i);
162 		ethtool_sprintf(&data, "rx_%d_bytes", i);
163 		ethtool_sprintf(&data, "rx_%d_xdp_drop", i);
164 		ethtool_sprintf(&data, "rx_%d_xdp_tx", i);
165 		ethtool_sprintf(&data, "rx_%d_xdp_redirect", i);
166 	}
167 
168 	for (i = 0; i < num_queues; i++) {
169 		ethtool_sprintf(&data, "tx_%d_packets", i);
170 		ethtool_sprintf(&data, "tx_%d_bytes", i);
171 		ethtool_sprintf(&data, "tx_%d_xdp_xmit", i);
172 		ethtool_sprintf(&data, "tx_%d_tso_packets", i);
173 		ethtool_sprintf(&data, "tx_%d_tso_bytes", i);
174 		ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i);
175 		ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i);
176 		ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i);
177 		ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i);
178 		ethtool_sprintf(&data, "tx_%d_csum_partial", i);
179 		ethtool_sprintf(&data, "tx_%d_mana_map_err", i);
180 	}
181 }
182 
183 static void mana_get_ethtool_stats(struct net_device *ndev,
184 				   struct ethtool_stats *e_stats, u64 *data)
185 {
186 	struct mana_port_context *apc = netdev_priv(ndev);
187 	unsigned int num_queues = apc->num_queues;
188 	void *eth_stats = &apc->eth_stats;
189 	void *phy_stats = &apc->phy_stats;
190 	struct mana_stats_rx *rx_stats;
191 	struct mana_stats_tx *tx_stats;
192 	unsigned int start;
193 	u64 packets, bytes;
194 	u64 xdp_redirect;
195 	u64 xdp_xmit;
196 	u64 xdp_drop;
197 	u64 xdp_tx;
198 	u64 tso_packets;
199 	u64 tso_bytes;
200 	u64 tso_inner_packets;
201 	u64 tso_inner_bytes;
202 	u64 long_pkt_fmt;
203 	u64 short_pkt_fmt;
204 	u64 csum_partial;
205 	u64 mana_map_err;
206 	int q, i = 0;
207 
208 	if (!apc->port_is_up)
209 		return;
210 	/* we call mana function to update stats from GDMA */
211 	mana_query_gf_stats(apc);
212 
213 	/* We call this mana function to get the phy stats from GDMA and includes
214 	 * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
215 	 * counters.
216 	 */
217 	mana_query_phy_stats(apc);
218 
219 	for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
220 		data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
221 
222 	for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
223 		data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
224 
225 	for (q = 0; q < num_queues; q++) {
226 		rx_stats = &apc->rxqs[q]->stats;
227 
228 		do {
229 			start = u64_stats_fetch_begin(&rx_stats->syncp);
230 			packets = rx_stats->packets;
231 			bytes = rx_stats->bytes;
232 			xdp_drop = rx_stats->xdp_drop;
233 			xdp_tx = rx_stats->xdp_tx;
234 			xdp_redirect = rx_stats->xdp_redirect;
235 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
236 
237 		data[i++] = packets;
238 		data[i++] = bytes;
239 		data[i++] = xdp_drop;
240 		data[i++] = xdp_tx;
241 		data[i++] = xdp_redirect;
242 	}
243 
244 	for (q = 0; q < num_queues; q++) {
245 		tx_stats = &apc->tx_qp[q].txq.stats;
246 
247 		do {
248 			start = u64_stats_fetch_begin(&tx_stats->syncp);
249 			packets = tx_stats->packets;
250 			bytes = tx_stats->bytes;
251 			xdp_xmit = tx_stats->xdp_xmit;
252 			tso_packets = tx_stats->tso_packets;
253 			tso_bytes = tx_stats->tso_bytes;
254 			tso_inner_packets = tx_stats->tso_inner_packets;
255 			tso_inner_bytes = tx_stats->tso_inner_bytes;
256 			long_pkt_fmt = tx_stats->long_pkt_fmt;
257 			short_pkt_fmt = tx_stats->short_pkt_fmt;
258 			csum_partial = tx_stats->csum_partial;
259 			mana_map_err = tx_stats->mana_map_err;
260 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
261 
262 		data[i++] = packets;
263 		data[i++] = bytes;
264 		data[i++] = xdp_xmit;
265 		data[i++] = tso_packets;
266 		data[i++] = tso_bytes;
267 		data[i++] = tso_inner_packets;
268 		data[i++] = tso_inner_bytes;
269 		data[i++] = long_pkt_fmt;
270 		data[i++] = short_pkt_fmt;
271 		data[i++] = csum_partial;
272 		data[i++] = mana_map_err;
273 	}
274 }
275 
276 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
277 			  u32 *rules)
278 {
279 	struct mana_port_context *apc = netdev_priv(ndev);
280 
281 	switch (cmd->cmd) {
282 	case ETHTOOL_GRXRINGS:
283 		cmd->data = apc->num_queues;
284 		return 0;
285 	}
286 
287 	return -EOPNOTSUPP;
288 }
289 
290 static u32 mana_get_rxfh_key_size(struct net_device *ndev)
291 {
292 	return MANA_HASH_KEY_SIZE;
293 }
294 
295 static u32 mana_rss_indir_size(struct net_device *ndev)
296 {
297 	struct mana_port_context *apc = netdev_priv(ndev);
298 
299 	return apc->indir_table_sz;
300 }
301 
302 static int mana_get_rxfh(struct net_device *ndev,
303 			 struct ethtool_rxfh_param *rxfh)
304 {
305 	struct mana_port_context *apc = netdev_priv(ndev);
306 	int i;
307 
308 	rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
309 
310 	if (rxfh->indir) {
311 		for (i = 0; i < apc->indir_table_sz; i++)
312 			rxfh->indir[i] = apc->indir_table[i];
313 	}
314 
315 	if (rxfh->key)
316 		memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
317 
318 	return 0;
319 }
320 
321 static int mana_set_rxfh(struct net_device *ndev,
322 			 struct ethtool_rxfh_param *rxfh,
323 			 struct netlink_ext_ack *extack)
324 {
325 	struct mana_port_context *apc = netdev_priv(ndev);
326 	bool update_hash = false, update_table = false;
327 	u8 save_key[MANA_HASH_KEY_SIZE];
328 	u32 *save_table;
329 	int i, err;
330 
331 	if (!apc->port_is_up)
332 		return -EOPNOTSUPP;
333 
334 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
335 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
336 		return -EOPNOTSUPP;
337 
338 	save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
339 	if (!save_table)
340 		return -ENOMEM;
341 
342 	if (rxfh->indir) {
343 		for (i = 0; i < apc->indir_table_sz; i++)
344 			if (rxfh->indir[i] >= apc->num_queues) {
345 				err = -EINVAL;
346 				goto cleanup;
347 			}
348 
349 		update_table = true;
350 		for (i = 0; i < apc->indir_table_sz; i++) {
351 			save_table[i] = apc->indir_table[i];
352 			apc->indir_table[i] = rxfh->indir[i];
353 		}
354 	}
355 
356 	if (rxfh->key) {
357 		update_hash = true;
358 		memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
359 		memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
360 	}
361 
362 	err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
363 
364 	if (err) { /* recover to original values */
365 		if (update_table) {
366 			for (i = 0; i < apc->indir_table_sz; i++)
367 				apc->indir_table[i] = save_table[i];
368 		}
369 
370 		if (update_hash)
371 			memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
372 
373 		mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
374 	}
375 
376 cleanup:
377 	kfree(save_table);
378 
379 	return err;
380 }
381 
382 static void mana_get_channels(struct net_device *ndev,
383 			      struct ethtool_channels *channel)
384 {
385 	struct mana_port_context *apc = netdev_priv(ndev);
386 
387 	channel->max_combined = apc->max_queues;
388 	channel->combined_count = apc->num_queues;
389 }
390 
391 static int mana_set_channels(struct net_device *ndev,
392 			     struct ethtool_channels *channels)
393 {
394 	struct mana_port_context *apc = netdev_priv(ndev);
395 	unsigned int new_count = channels->combined_count;
396 	unsigned int old_count = apc->num_queues;
397 	int err;
398 
399 	err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
400 	if (err) {
401 		netdev_err(ndev, "Insufficient memory for new allocations");
402 		return err;
403 	}
404 
405 	err = mana_detach(ndev, false);
406 	if (err) {
407 		netdev_err(ndev, "mana_detach failed: %d\n", err);
408 		goto out;
409 	}
410 
411 	apc->num_queues = new_count;
412 	err = mana_attach(ndev);
413 	if (err) {
414 		apc->num_queues = old_count;
415 		netdev_err(ndev, "mana_attach failed: %d\n", err);
416 	}
417 
418 out:
419 	mana_pre_dealloc_rxbufs(apc);
420 	return err;
421 }
422 
423 static void mana_get_ringparam(struct net_device *ndev,
424 			       struct ethtool_ringparam *ring,
425 			       struct kernel_ethtool_ringparam *kernel_ring,
426 			       struct netlink_ext_ack *extack)
427 {
428 	struct mana_port_context *apc = netdev_priv(ndev);
429 
430 	ring->rx_pending = apc->rx_queue_size;
431 	ring->tx_pending = apc->tx_queue_size;
432 	ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
433 	ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
434 }
435 
436 static int mana_set_ringparam(struct net_device *ndev,
437 			      struct ethtool_ringparam *ring,
438 			      struct kernel_ethtool_ringparam *kernel_ring,
439 			      struct netlink_ext_ack *extack)
440 {
441 	struct mana_port_context *apc = netdev_priv(ndev);
442 	u32 new_tx, new_rx;
443 	u32 old_tx, old_rx;
444 	int err;
445 
446 	old_tx = apc->tx_queue_size;
447 	old_rx = apc->rx_queue_size;
448 
449 	if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
450 		NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
451 				   MIN_TX_BUFFERS_PER_QUEUE);
452 		return -EINVAL;
453 	}
454 
455 	if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
456 		NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
457 				   MIN_RX_BUFFERS_PER_QUEUE);
458 		return -EINVAL;
459 	}
460 
461 	new_rx = roundup_pow_of_two(ring->rx_pending);
462 	new_tx = roundup_pow_of_two(ring->tx_pending);
463 	netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
464 		    new_tx, new_rx);
465 
466 	/* pre-allocating new buffers to prevent failures in mana_attach() later */
467 	apc->rx_queue_size = new_rx;
468 	err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
469 	apc->rx_queue_size = old_rx;
470 	if (err) {
471 		netdev_err(ndev, "Insufficient memory for new allocations\n");
472 		return err;
473 	}
474 
475 	err = mana_detach(ndev, false);
476 	if (err) {
477 		netdev_err(ndev, "mana_detach failed: %d\n", err);
478 		goto out;
479 	}
480 
481 	apc->tx_queue_size = new_tx;
482 	apc->rx_queue_size = new_rx;
483 
484 	err = mana_attach(ndev);
485 	if (err) {
486 		netdev_err(ndev, "mana_attach failed: %d\n", err);
487 		apc->tx_queue_size = old_tx;
488 		apc->rx_queue_size = old_rx;
489 	}
490 out:
491 	mana_pre_dealloc_rxbufs(apc);
492 	return err;
493 }
494 
495 static int mana_get_link_ksettings(struct net_device *ndev,
496 				   struct ethtool_link_ksettings *cmd)
497 {
498 	struct mana_port_context *apc = netdev_priv(ndev);
499 	int err;
500 
501 	err = mana_query_link_cfg(apc);
502 	cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
503 
504 	cmd->base.duplex = DUPLEX_FULL;
505 	cmd->base.port = PORT_OTHER;
506 
507 	return 0;
508 }
509 
510 const struct ethtool_ops mana_ethtool_ops = {
511 	.get_ethtool_stats	= mana_get_ethtool_stats,
512 	.get_sset_count		= mana_get_sset_count,
513 	.get_strings		= mana_get_strings,
514 	.get_rxnfc		= mana_get_rxnfc,
515 	.get_rxfh_key_size	= mana_get_rxfh_key_size,
516 	.get_rxfh_indir_size	= mana_rss_indir_size,
517 	.get_rxfh		= mana_get_rxfh,
518 	.set_rxfh		= mana_set_rxfh,
519 	.get_channels		= mana_get_channels,
520 	.set_channels		= mana_set_channels,
521 	.get_ringparam          = mana_get_ringparam,
522 	.set_ringparam          = mana_set_ringparam,
523 	.get_link_ksettings	= mana_get_link_ksettings,
524 	.get_link		= ethtool_op_get_link,
525 };
526