xref: /linux/drivers/net/ethernet/microsoft/mana/mana_ethtool.c (revision 4ce06406958b67fdddcc2e6948237dd6ff6ba112)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3 
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7 
8 #include <net/mana/mana.h>
9 
10 struct mana_stats_desc {
11 	char name[ETH_GSTRING_LEN];
12 	u16 offset;
13 };
14 
15 static const struct mana_stats_desc mana_eth_stats[] = {
16 	{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
17 	{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
18 	{"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
19 	{"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
20 					tx_cqe_unknown_type)},
21 	{"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats,
22 				       tx_linear_pkt_cnt)},
23 	{"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
24 					rx_cqe_unknown_type)},
25 };
26 
27 static const struct mana_stats_desc mana_hc_stats[] = {
28 	{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats,
29 					   hc_rx_discards_no_wqe)},
30 	{"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
31 					      hc_rx_err_vport_disabled)},
32 	{"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)},
33 	{"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
34 				      hc_rx_ucast_pkts)},
35 	{"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
36 				       hc_rx_ucast_bytes)},
37 	{"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
38 				      hc_rx_bcast_pkts)},
39 	{"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
40 				       hc_rx_bcast_bytes)},
41 	{"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
42 				      hc_rx_mcast_pkts)},
43 	{"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
44 				       hc_rx_mcast_bytes)},
45 	{"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats,
46 					   hc_tx_err_gf_disabled)},
47 	{"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats,
48 					      hc_tx_err_vport_disabled)},
49 	{"hc_tx_err_inval_vportoffset_pkt",
50 	 offsetof(struct mana_ethtool_hc_stats,
51 		  hc_tx_err_inval_vportoffset_pkt)},
52 	{"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats,
53 						hc_tx_err_vlan_enforcement)},
54 	{"hc_tx_err_eth_type_enforcement",
55 	 offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)},
56 	{"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats,
57 					      hc_tx_err_sa_enforcement)},
58 	{"hc_tx_err_sqpdid_enforcement",
59 	 offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)},
60 	{"hc_tx_err_cqpdid_enforcement",
61 	 offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)},
62 	{"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats,
63 					     hc_tx_err_mtu_violation)},
64 	{"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats,
65 					 hc_tx_err_inval_oob)},
66 	{"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats,
67 				    hc_tx_err_gdma)},
68 	{"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)},
69 	{"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats,
70 					hc_tx_ucast_pkts)},
71 	{"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats,
72 					hc_tx_ucast_bytes)},
73 	{"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats,
74 					hc_tx_bcast_pkts)},
75 	{"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats,
76 					hc_tx_bcast_bytes)},
77 	{"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats,
78 					hc_tx_mcast_pkts)},
79 	{"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats,
80 					hc_tx_mcast_bytes)},
81 };
82 
83 static const struct mana_stats_desc mana_phy_stats[] = {
84 	{ "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
85 	{ "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
86 	{ "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
87 	{ "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
88 	{ "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
89 	{ "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
90 	{ "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
91 	{ "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
92 	{ "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
93 	{ "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
94 	{ "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
95 	{ "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
96 	{ "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
97 	{ "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
98 	{ "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
99 	{ "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
100 	{ "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
101 	{ "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
102 	{ "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
103 	{ "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
104 	{ "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
105 	{ "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
106 	{ "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
107 	{ "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
108 	{ "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
109 	{ "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
110 	{ "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
111 	{ "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
112 	{ "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
113 	{ "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
114 	{ "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
115 	{ "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
116 	{ "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
117 	{ "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
118 	{ "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
119 	{ "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
120 	{ "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
121 	{ "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
122 	{ "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
123 	{ "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
124 	{ "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
125 	{ "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
126 	{ "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
127 	{ "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
128 	{ "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
129 	{ "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
130 	{ "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
131 	{ "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
132 	{ "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
133 	{ "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
134 };
135 
136 static int mana_get_sset_count(struct net_device *ndev, int stringset)
137 {
138 	struct mana_port_context *apc = netdev_priv(ndev);
139 	unsigned int num_queues = apc->num_queues;
140 
141 	if (stringset != ETH_SS_STATS)
142 		return -EINVAL;
143 
144 	return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) +
145 			num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
146 }
147 
148 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
149 {
150 	struct mana_port_context *apc = netdev_priv(ndev);
151 	unsigned int num_queues = apc->num_queues;
152 	int i, j;
153 
154 	if (stringset != ETH_SS_STATS)
155 		return;
156 	for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
157 		ethtool_puts(&data, mana_eth_stats[i].name);
158 
159 	for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++)
160 		ethtool_puts(&data, mana_hc_stats[i].name);
161 
162 	for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
163 		ethtool_puts(&data, mana_phy_stats[i].name);
164 
165 	for (i = 0; i < num_queues; i++) {
166 		ethtool_sprintf(&data, "rx_%d_packets", i);
167 		ethtool_sprintf(&data, "rx_%d_bytes", i);
168 		ethtool_sprintf(&data, "rx_%d_xdp_drop", i);
169 		ethtool_sprintf(&data, "rx_%d_xdp_tx", i);
170 		ethtool_sprintf(&data, "rx_%d_xdp_redirect", i);
171 		ethtool_sprintf(&data, "rx_%d_pkt_len0_err", i);
172 		for (j = 0; j < MANA_RXCOMP_OOB_NUM_PPI - 1; j++)
173 			ethtool_sprintf(&data, "rx_%d_coalesced_cqe_%d", i, j + 2);
174 	}
175 
176 	for (i = 0; i < num_queues; i++) {
177 		ethtool_sprintf(&data, "tx_%d_packets", i);
178 		ethtool_sprintf(&data, "tx_%d_bytes", i);
179 		ethtool_sprintf(&data, "tx_%d_xdp_xmit", i);
180 		ethtool_sprintf(&data, "tx_%d_tso_packets", i);
181 		ethtool_sprintf(&data, "tx_%d_tso_bytes", i);
182 		ethtool_sprintf(&data, "tx_%d_tso_inner_packets", i);
183 		ethtool_sprintf(&data, "tx_%d_tso_inner_bytes", i);
184 		ethtool_sprintf(&data, "tx_%d_long_pkt_fmt", i);
185 		ethtool_sprintf(&data, "tx_%d_short_pkt_fmt", i);
186 		ethtool_sprintf(&data, "tx_%d_csum_partial", i);
187 		ethtool_sprintf(&data, "tx_%d_mana_map_err", i);
188 	}
189 }
190 
191 static void mana_get_ethtool_stats(struct net_device *ndev,
192 				   struct ethtool_stats *e_stats, u64 *data)
193 {
194 	struct mana_port_context *apc = netdev_priv(ndev);
195 	unsigned int num_queues = apc->num_queues;
196 	void *eth_stats = &apc->eth_stats;
197 	void *hc_stats = &apc->ac->hc_stats;
198 	void *phy_stats = &apc->phy_stats;
199 	struct mana_stats_rx *rx_stats;
200 	struct mana_stats_tx *tx_stats;
201 	unsigned int start;
202 	u64 packets, bytes;
203 	u64 xdp_redirect;
204 	u64 xdp_xmit;
205 	u64 xdp_drop;
206 	u64 xdp_tx;
207 	u64 pkt_len0_err;
208 	u64 coalesced_cqe[MANA_RXCOMP_OOB_NUM_PPI - 1];
209 	u64 tso_packets;
210 	u64 tso_bytes;
211 	u64 tso_inner_packets;
212 	u64 tso_inner_bytes;
213 	u64 long_pkt_fmt;
214 	u64 short_pkt_fmt;
215 	u64 csum_partial;
216 	u64 mana_map_err;
217 	int q, i = 0, j;
218 
219 	if (!apc->port_is_up)
220 		return;
221 
222 	/* We call this mana function to get the phy stats from GDMA and includes
223 	 * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
224 	 * counters.
225 	 */
226 	mana_query_phy_stats(apc);
227 
228 	for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
229 		data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
230 
231 	for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++)
232 		data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset);
233 
234 	for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
235 		data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
236 
237 	for (q = 0; q < num_queues; q++) {
238 		rx_stats = &apc->rxqs[q]->stats;
239 
240 		do {
241 			start = u64_stats_fetch_begin(&rx_stats->syncp);
242 			packets = rx_stats->packets;
243 			bytes = rx_stats->bytes;
244 			xdp_drop = rx_stats->xdp_drop;
245 			xdp_tx = rx_stats->xdp_tx;
246 			xdp_redirect = rx_stats->xdp_redirect;
247 			pkt_len0_err = rx_stats->pkt_len0_err;
248 			for (j = 0; j < MANA_RXCOMP_OOB_NUM_PPI - 1; j++)
249 				coalesced_cqe[j] = rx_stats->coalesced_cqe[j];
250 		} while (u64_stats_fetch_retry(&rx_stats->syncp, start));
251 
252 		data[i++] = packets;
253 		data[i++] = bytes;
254 		data[i++] = xdp_drop;
255 		data[i++] = xdp_tx;
256 		data[i++] = xdp_redirect;
257 		data[i++] = pkt_len0_err;
258 		for (j = 0; j < MANA_RXCOMP_OOB_NUM_PPI - 1; j++)
259 			data[i++] = coalesced_cqe[j];
260 	}
261 
262 	for (q = 0; q < num_queues; q++) {
263 		tx_stats = &apc->tx_qp[q].txq.stats;
264 
265 		do {
266 			start = u64_stats_fetch_begin(&tx_stats->syncp);
267 			packets = tx_stats->packets;
268 			bytes = tx_stats->bytes;
269 			xdp_xmit = tx_stats->xdp_xmit;
270 			tso_packets = tx_stats->tso_packets;
271 			tso_bytes = tx_stats->tso_bytes;
272 			tso_inner_packets = tx_stats->tso_inner_packets;
273 			tso_inner_bytes = tx_stats->tso_inner_bytes;
274 			long_pkt_fmt = tx_stats->long_pkt_fmt;
275 			short_pkt_fmt = tx_stats->short_pkt_fmt;
276 			csum_partial = tx_stats->csum_partial;
277 			mana_map_err = tx_stats->mana_map_err;
278 		} while (u64_stats_fetch_retry(&tx_stats->syncp, start));
279 
280 		data[i++] = packets;
281 		data[i++] = bytes;
282 		data[i++] = xdp_xmit;
283 		data[i++] = tso_packets;
284 		data[i++] = tso_bytes;
285 		data[i++] = tso_inner_packets;
286 		data[i++] = tso_inner_bytes;
287 		data[i++] = long_pkt_fmt;
288 		data[i++] = short_pkt_fmt;
289 		data[i++] = csum_partial;
290 		data[i++] = mana_map_err;
291 	}
292 }
293 
294 static u32 mana_get_rx_ring_count(struct net_device *ndev)
295 {
296 	struct mana_port_context *apc = netdev_priv(ndev);
297 
298 	return apc->num_queues;
299 }
300 
301 static u32 mana_get_rxfh_key_size(struct net_device *ndev)
302 {
303 	return MANA_HASH_KEY_SIZE;
304 }
305 
306 static u32 mana_rss_indir_size(struct net_device *ndev)
307 {
308 	struct mana_port_context *apc = netdev_priv(ndev);
309 
310 	return apc->indir_table_sz;
311 }
312 
313 static int mana_get_rxfh(struct net_device *ndev,
314 			 struct ethtool_rxfh_param *rxfh)
315 {
316 	struct mana_port_context *apc = netdev_priv(ndev);
317 	int i;
318 
319 	rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
320 
321 	if (rxfh->indir) {
322 		for (i = 0; i < apc->indir_table_sz; i++)
323 			rxfh->indir[i] = apc->indir_table[i];
324 	}
325 
326 	if (rxfh->key)
327 		memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
328 
329 	return 0;
330 }
331 
332 static int mana_set_rxfh(struct net_device *ndev,
333 			 struct ethtool_rxfh_param *rxfh,
334 			 struct netlink_ext_ack *extack)
335 {
336 	struct mana_port_context *apc = netdev_priv(ndev);
337 	bool update_hash = false, update_table = false;
338 	u8 save_key[MANA_HASH_KEY_SIZE];
339 	u32 *save_table;
340 	int i, err;
341 
342 	if (!apc->port_is_up)
343 		return -EOPNOTSUPP;
344 
345 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
346 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
347 		return -EOPNOTSUPP;
348 
349 	save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
350 	if (!save_table)
351 		return -ENOMEM;
352 
353 	if (rxfh->indir) {
354 		for (i = 0; i < apc->indir_table_sz; i++)
355 			if (rxfh->indir[i] >= apc->num_queues) {
356 				err = -EINVAL;
357 				goto cleanup;
358 			}
359 
360 		update_table = true;
361 		for (i = 0; i < apc->indir_table_sz; i++) {
362 			save_table[i] = apc->indir_table[i];
363 			apc->indir_table[i] = rxfh->indir[i];
364 		}
365 	}
366 
367 	if (rxfh->key) {
368 		update_hash = true;
369 		memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
370 		memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
371 	}
372 
373 	err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
374 
375 	if (err) { /* recover to original values */
376 		if (update_table) {
377 			for (i = 0; i < apc->indir_table_sz; i++)
378 				apc->indir_table[i] = save_table[i];
379 		}
380 
381 		if (update_hash)
382 			memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
383 
384 		mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
385 	}
386 
387 cleanup:
388 	kfree(save_table);
389 
390 	return err;
391 }
392 
393 static void mana_get_channels(struct net_device *ndev,
394 			      struct ethtool_channels *channel)
395 {
396 	struct mana_port_context *apc = netdev_priv(ndev);
397 
398 	channel->max_combined = apc->max_queues;
399 	channel->combined_count = apc->num_queues;
400 }
401 
402 #define MANA_RX_CQE_NSEC_DEF 2048
403 static int mana_get_coalesce(struct net_device *ndev,
404 			     struct ethtool_coalesce *ec,
405 			     struct kernel_ethtool_coalesce *kernel_coal,
406 			     struct netlink_ext_ack *extack)
407 {
408 	struct mana_port_context *apc = netdev_priv(ndev);
409 
410 	kernel_coal->rx_cqe_frames =
411 		apc->cqe_coalescing_enable ? MANA_RXCOMP_OOB_NUM_PPI : 1;
412 
413 	kernel_coal->rx_cqe_nsecs = apc->cqe_coalescing_timeout_ns;
414 
415 	/* Return the default timeout value for old FW not providing
416 	 * this value.
417 	 */
418 	if (apc->port_is_up && apc->cqe_coalescing_enable &&
419 	    !kernel_coal->rx_cqe_nsecs)
420 		kernel_coal->rx_cqe_nsecs = MANA_RX_CQE_NSEC_DEF;
421 
422 	return 0;
423 }
424 
425 static int mana_set_coalesce(struct net_device *ndev,
426 			     struct ethtool_coalesce *ec,
427 			     struct kernel_ethtool_coalesce *kernel_coal,
428 			     struct netlink_ext_ack *extack)
429 {
430 	struct mana_port_context *apc = netdev_priv(ndev);
431 	u8 saved_cqe_coalescing_enable;
432 	int err;
433 
434 	if (kernel_coal->rx_cqe_frames != 1 &&
435 	    kernel_coal->rx_cqe_frames != MANA_RXCOMP_OOB_NUM_PPI) {
436 		NL_SET_ERR_MSG_FMT(extack,
437 				   "rx-frames must be 1 or %u, got %u",
438 				   MANA_RXCOMP_OOB_NUM_PPI,
439 				   kernel_coal->rx_cqe_frames);
440 		return -EINVAL;
441 	}
442 
443 	saved_cqe_coalescing_enable = apc->cqe_coalescing_enable;
444 	apc->cqe_coalescing_enable =
445 		kernel_coal->rx_cqe_frames == MANA_RXCOMP_OOB_NUM_PPI;
446 
447 	if (!apc->port_is_up)
448 		return 0;
449 
450 	err = mana_config_rss(apc, TRI_STATE_TRUE, false, false);
451 	if (err)
452 		apc->cqe_coalescing_enable = saved_cqe_coalescing_enable;
453 
454 	return err;
455 }
456 
457 static int mana_set_channels(struct net_device *ndev,
458 			     struct ethtool_channels *channels)
459 {
460 	struct mana_port_context *apc = netdev_priv(ndev);
461 	unsigned int new_count = channels->combined_count;
462 	unsigned int old_count = apc->num_queues;
463 	int err;
464 
465 	err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
466 	if (err) {
467 		netdev_err(ndev, "Insufficient memory for new allocations");
468 		return err;
469 	}
470 
471 	err = mana_detach(ndev, false);
472 	if (err) {
473 		netdev_err(ndev, "mana_detach failed: %d\n", err);
474 		goto out;
475 	}
476 
477 	apc->num_queues = new_count;
478 	err = mana_attach(ndev);
479 	if (err) {
480 		apc->num_queues = old_count;
481 		netdev_err(ndev, "mana_attach failed: %d\n", err);
482 	}
483 
484 out:
485 	mana_pre_dealloc_rxbufs(apc);
486 	return err;
487 }
488 
489 static void mana_get_ringparam(struct net_device *ndev,
490 			       struct ethtool_ringparam *ring,
491 			       struct kernel_ethtool_ringparam *kernel_ring,
492 			       struct netlink_ext_ack *extack)
493 {
494 	struct mana_port_context *apc = netdev_priv(ndev);
495 
496 	ring->rx_pending = apc->rx_queue_size;
497 	ring->tx_pending = apc->tx_queue_size;
498 	ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
499 	ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
500 }
501 
502 static int mana_set_ringparam(struct net_device *ndev,
503 			      struct ethtool_ringparam *ring,
504 			      struct kernel_ethtool_ringparam *kernel_ring,
505 			      struct netlink_ext_ack *extack)
506 {
507 	struct mana_port_context *apc = netdev_priv(ndev);
508 	u32 new_tx, new_rx;
509 	u32 old_tx, old_rx;
510 	int err;
511 
512 	old_tx = apc->tx_queue_size;
513 	old_rx = apc->rx_queue_size;
514 
515 	if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
516 		NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
517 				   MIN_TX_BUFFERS_PER_QUEUE);
518 		return -EINVAL;
519 	}
520 
521 	if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
522 		NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
523 				   MIN_RX_BUFFERS_PER_QUEUE);
524 		return -EINVAL;
525 	}
526 
527 	new_rx = roundup_pow_of_two(ring->rx_pending);
528 	new_tx = roundup_pow_of_two(ring->tx_pending);
529 	netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
530 		    new_tx, new_rx);
531 
532 	/* pre-allocating new buffers to prevent failures in mana_attach() later */
533 	apc->rx_queue_size = new_rx;
534 	err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
535 	apc->rx_queue_size = old_rx;
536 	if (err) {
537 		netdev_err(ndev, "Insufficient memory for new allocations\n");
538 		return err;
539 	}
540 
541 	err = mana_detach(ndev, false);
542 	if (err) {
543 		netdev_err(ndev, "mana_detach failed: %d\n", err);
544 		goto out;
545 	}
546 
547 	apc->tx_queue_size = new_tx;
548 	apc->rx_queue_size = new_rx;
549 
550 	err = mana_attach(ndev);
551 	if (err) {
552 		netdev_err(ndev, "mana_attach failed: %d\n", err);
553 		apc->tx_queue_size = old_tx;
554 		apc->rx_queue_size = old_rx;
555 	}
556 out:
557 	mana_pre_dealloc_rxbufs(apc);
558 	return err;
559 }
560 
561 static int mana_get_link_ksettings(struct net_device *ndev,
562 				   struct ethtool_link_ksettings *cmd)
563 {
564 	struct mana_port_context *apc = netdev_priv(ndev);
565 	int err;
566 
567 	err = mana_query_link_cfg(apc);
568 	cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
569 
570 	cmd->base.duplex = DUPLEX_FULL;
571 	cmd->base.port = PORT_OTHER;
572 
573 	return 0;
574 }
575 
576 const struct ethtool_ops mana_ethtool_ops = {
577 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_CQE_FRAMES,
578 	.get_ethtool_stats	= mana_get_ethtool_stats,
579 	.get_sset_count		= mana_get_sset_count,
580 	.get_strings		= mana_get_strings,
581 	.get_rx_ring_count	= mana_get_rx_ring_count,
582 	.get_rxfh_key_size	= mana_get_rxfh_key_size,
583 	.get_rxfh_indir_size	= mana_rss_indir_size,
584 	.get_rxfh		= mana_get_rxfh,
585 	.set_rxfh		= mana_set_rxfh,
586 	.get_channels		= mana_get_channels,
587 	.set_channels		= mana_set_channels,
588 	.get_coalesce		= mana_get_coalesce,
589 	.set_coalesce		= mana_set_coalesce,
590 	.get_ringparam          = mana_get_ringparam,
591 	.set_ringparam          = mana_set_ringparam,
592 	.get_link_ksettings	= mana_get_link_ksettings,
593 	.get_link		= ethtool_op_get_link,
594 };
595