1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
7
8 #include <net/mana/mana.h>
9
10 static const struct {
11 char name[ETH_GSTRING_LEN];
12 u16 offset;
13 } mana_eth_stats[] = {
14 {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
15 {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
16 {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
17 hc_rx_discards_no_wqe)},
18 {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
19 hc_rx_err_vport_disabled)},
20 {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)},
21 {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats,
22 hc_rx_ucast_pkts)},
23 {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats,
24 hc_rx_ucast_bytes)},
25 {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats,
26 hc_rx_bcast_pkts)},
27 {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats,
28 hc_rx_bcast_bytes)},
29 {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats,
30 hc_rx_mcast_pkts)},
31 {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats,
32 hc_rx_mcast_bytes)},
33 {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats,
34 hc_tx_err_gf_disabled)},
35 {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats,
36 hc_tx_err_vport_disabled)},
37 {"hc_tx_err_inval_vportoffset_pkt",
38 offsetof(struct mana_ethtool_stats,
39 hc_tx_err_inval_vportoffset_pkt)},
40 {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats,
41 hc_tx_err_vlan_enforcement)},
42 {"hc_tx_err_eth_type_enforcement",
43 offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)},
44 {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats,
45 hc_tx_err_sa_enforcement)},
46 {"hc_tx_err_sqpdid_enforcement",
47 offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)},
48 {"hc_tx_err_cqpdid_enforcement",
49 offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)},
50 {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats,
51 hc_tx_err_mtu_violation)},
52 {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats,
53 hc_tx_err_inval_oob)},
54 {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats,
55 hc_tx_err_gdma)},
56 {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)},
57 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats,
58 hc_tx_ucast_pkts)},
59 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats,
60 hc_tx_ucast_bytes)},
61 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats,
62 hc_tx_bcast_pkts)},
63 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats,
64 hc_tx_bcast_bytes)},
65 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats,
66 hc_tx_mcast_pkts)},
67 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats,
68 hc_tx_mcast_bytes)},
69 {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
70 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
71 tx_cqe_unknown_type)},
72 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
73 rx_coalesced_err)},
74 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
75 rx_cqe_unknown_type)},
76 };
77
mana_get_sset_count(struct net_device * ndev,int stringset)78 static int mana_get_sset_count(struct net_device *ndev, int stringset)
79 {
80 struct mana_port_context *apc = netdev_priv(ndev);
81 unsigned int num_queues = apc->num_queues;
82
83 if (stringset != ETH_SS_STATS)
84 return -EINVAL;
85
86 return ARRAY_SIZE(mana_eth_stats) + num_queues *
87 (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
88 }
89
mana_get_strings(struct net_device * ndev,u32 stringset,u8 * data)90 static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
91 {
92 struct mana_port_context *apc = netdev_priv(ndev);
93 unsigned int num_queues = apc->num_queues;
94 u8 *p = data;
95 int i;
96
97 if (stringset != ETH_SS_STATS)
98 return;
99
100 for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) {
101 memcpy(p, mana_eth_stats[i].name, ETH_GSTRING_LEN);
102 p += ETH_GSTRING_LEN;
103 }
104
105 for (i = 0; i < num_queues; i++) {
106 sprintf(p, "rx_%d_packets", i);
107 p += ETH_GSTRING_LEN;
108 sprintf(p, "rx_%d_bytes", i);
109 p += ETH_GSTRING_LEN;
110 sprintf(p, "rx_%d_xdp_drop", i);
111 p += ETH_GSTRING_LEN;
112 sprintf(p, "rx_%d_xdp_tx", i);
113 p += ETH_GSTRING_LEN;
114 sprintf(p, "rx_%d_xdp_redirect", i);
115 p += ETH_GSTRING_LEN;
116 }
117
118 for (i = 0; i < num_queues; i++) {
119 sprintf(p, "tx_%d_packets", i);
120 p += ETH_GSTRING_LEN;
121 sprintf(p, "tx_%d_bytes", i);
122 p += ETH_GSTRING_LEN;
123 sprintf(p, "tx_%d_xdp_xmit", i);
124 p += ETH_GSTRING_LEN;
125 sprintf(p, "tx_%d_tso_packets", i);
126 p += ETH_GSTRING_LEN;
127 sprintf(p, "tx_%d_tso_bytes", i);
128 p += ETH_GSTRING_LEN;
129 sprintf(p, "tx_%d_tso_inner_packets", i);
130 p += ETH_GSTRING_LEN;
131 sprintf(p, "tx_%d_tso_inner_bytes", i);
132 p += ETH_GSTRING_LEN;
133 sprintf(p, "tx_%d_long_pkt_fmt", i);
134 p += ETH_GSTRING_LEN;
135 sprintf(p, "tx_%d_short_pkt_fmt", i);
136 p += ETH_GSTRING_LEN;
137 sprintf(p, "tx_%d_csum_partial", i);
138 p += ETH_GSTRING_LEN;
139 sprintf(p, "tx_%d_mana_map_err", i);
140 p += ETH_GSTRING_LEN;
141 }
142 }
143
mana_get_ethtool_stats(struct net_device * ndev,struct ethtool_stats * e_stats,u64 * data)144 static void mana_get_ethtool_stats(struct net_device *ndev,
145 struct ethtool_stats *e_stats, u64 *data)
146 {
147 struct mana_port_context *apc = netdev_priv(ndev);
148 unsigned int num_queues = apc->num_queues;
149 void *eth_stats = &apc->eth_stats;
150 struct mana_stats_rx *rx_stats;
151 struct mana_stats_tx *tx_stats;
152 unsigned int start;
153 u64 packets, bytes;
154 u64 xdp_redirect;
155 u64 xdp_xmit;
156 u64 xdp_drop;
157 u64 xdp_tx;
158 u64 tso_packets;
159 u64 tso_bytes;
160 u64 tso_inner_packets;
161 u64 tso_inner_bytes;
162 u64 long_pkt_fmt;
163 u64 short_pkt_fmt;
164 u64 csum_partial;
165 u64 mana_map_err;
166 int q, i = 0;
167
168 if (!apc->port_is_up)
169 return;
170 /* we call mana function to update stats from GDMA */
171 mana_query_gf_stats(apc);
172
173 for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
174 data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
175
176 for (q = 0; q < num_queues; q++) {
177 rx_stats = &apc->rxqs[q]->stats;
178
179 do {
180 start = u64_stats_fetch_begin(&rx_stats->syncp);
181 packets = rx_stats->packets;
182 bytes = rx_stats->bytes;
183 xdp_drop = rx_stats->xdp_drop;
184 xdp_tx = rx_stats->xdp_tx;
185 xdp_redirect = rx_stats->xdp_redirect;
186 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
187
188 data[i++] = packets;
189 data[i++] = bytes;
190 data[i++] = xdp_drop;
191 data[i++] = xdp_tx;
192 data[i++] = xdp_redirect;
193 }
194
195 for (q = 0; q < num_queues; q++) {
196 tx_stats = &apc->tx_qp[q].txq.stats;
197
198 do {
199 start = u64_stats_fetch_begin(&tx_stats->syncp);
200 packets = tx_stats->packets;
201 bytes = tx_stats->bytes;
202 xdp_xmit = tx_stats->xdp_xmit;
203 tso_packets = tx_stats->tso_packets;
204 tso_bytes = tx_stats->tso_bytes;
205 tso_inner_packets = tx_stats->tso_inner_packets;
206 tso_inner_bytes = tx_stats->tso_inner_bytes;
207 long_pkt_fmt = tx_stats->long_pkt_fmt;
208 short_pkt_fmt = tx_stats->short_pkt_fmt;
209 csum_partial = tx_stats->csum_partial;
210 mana_map_err = tx_stats->mana_map_err;
211 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
212
213 data[i++] = packets;
214 data[i++] = bytes;
215 data[i++] = xdp_xmit;
216 data[i++] = tso_packets;
217 data[i++] = tso_bytes;
218 data[i++] = tso_inner_packets;
219 data[i++] = tso_inner_bytes;
220 data[i++] = long_pkt_fmt;
221 data[i++] = short_pkt_fmt;
222 data[i++] = csum_partial;
223 data[i++] = mana_map_err;
224 }
225 }
226
mana_get_rxnfc(struct net_device * ndev,struct ethtool_rxnfc * cmd,u32 * rules)227 static int mana_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd,
228 u32 *rules)
229 {
230 struct mana_port_context *apc = netdev_priv(ndev);
231
232 switch (cmd->cmd) {
233 case ETHTOOL_GRXRINGS:
234 cmd->data = apc->num_queues;
235 return 0;
236 }
237
238 return -EOPNOTSUPP;
239 }
240
mana_get_rxfh_key_size(struct net_device * ndev)241 static u32 mana_get_rxfh_key_size(struct net_device *ndev)
242 {
243 return MANA_HASH_KEY_SIZE;
244 }
245
mana_rss_indir_size(struct net_device * ndev)246 static u32 mana_rss_indir_size(struct net_device *ndev)
247 {
248 struct mana_port_context *apc = netdev_priv(ndev);
249
250 return apc->indir_table_sz;
251 }
252
mana_get_rxfh(struct net_device * ndev,struct ethtool_rxfh_param * rxfh)253 static int mana_get_rxfh(struct net_device *ndev,
254 struct ethtool_rxfh_param *rxfh)
255 {
256 struct mana_port_context *apc = netdev_priv(ndev);
257 int i;
258
259 rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
260
261 if (rxfh->indir) {
262 for (i = 0; i < apc->indir_table_sz; i++)
263 rxfh->indir[i] = apc->indir_table[i];
264 }
265
266 if (rxfh->key)
267 memcpy(rxfh->key, apc->hashkey, MANA_HASH_KEY_SIZE);
268
269 return 0;
270 }
271
mana_set_rxfh(struct net_device * ndev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)272 static int mana_set_rxfh(struct net_device *ndev,
273 struct ethtool_rxfh_param *rxfh,
274 struct netlink_ext_ack *extack)
275 {
276 struct mana_port_context *apc = netdev_priv(ndev);
277 bool update_hash = false, update_table = false;
278 u8 save_key[MANA_HASH_KEY_SIZE];
279 u32 *save_table;
280 int i, err;
281
282 if (!apc->port_is_up)
283 return -EOPNOTSUPP;
284
285 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
286 rxfh->hfunc != ETH_RSS_HASH_TOP)
287 return -EOPNOTSUPP;
288
289 save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
290 if (!save_table)
291 return -ENOMEM;
292
293 if (rxfh->indir) {
294 for (i = 0; i < apc->indir_table_sz; i++)
295 if (rxfh->indir[i] >= apc->num_queues) {
296 err = -EINVAL;
297 goto cleanup;
298 }
299
300 update_table = true;
301 for (i = 0; i < apc->indir_table_sz; i++) {
302 save_table[i] = apc->indir_table[i];
303 apc->indir_table[i] = rxfh->indir[i];
304 }
305 }
306
307 if (rxfh->key) {
308 update_hash = true;
309 memcpy(save_key, apc->hashkey, MANA_HASH_KEY_SIZE);
310 memcpy(apc->hashkey, rxfh->key, MANA_HASH_KEY_SIZE);
311 }
312
313 err = mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
314
315 if (err) { /* recover to original values */
316 if (update_table) {
317 for (i = 0; i < apc->indir_table_sz; i++)
318 apc->indir_table[i] = save_table[i];
319 }
320
321 if (update_hash)
322 memcpy(apc->hashkey, save_key, MANA_HASH_KEY_SIZE);
323
324 mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
325 }
326
327 cleanup:
328 kfree(save_table);
329
330 return err;
331 }
332
mana_get_channels(struct net_device * ndev,struct ethtool_channels * channel)333 static void mana_get_channels(struct net_device *ndev,
334 struct ethtool_channels *channel)
335 {
336 struct mana_port_context *apc = netdev_priv(ndev);
337
338 channel->max_combined = apc->max_queues;
339 channel->combined_count = apc->num_queues;
340 }
341
mana_set_channels(struct net_device * ndev,struct ethtool_channels * channels)342 static int mana_set_channels(struct net_device *ndev,
343 struct ethtool_channels *channels)
344 {
345 struct mana_port_context *apc = netdev_priv(ndev);
346 unsigned int new_count = channels->combined_count;
347 unsigned int old_count = apc->num_queues;
348 int err;
349
350 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, new_count);
351 if (err) {
352 netdev_err(ndev, "Insufficient memory for new allocations");
353 return err;
354 }
355
356 err = mana_detach(ndev, false);
357 if (err) {
358 netdev_err(ndev, "mana_detach failed: %d\n", err);
359 goto out;
360 }
361
362 apc->num_queues = new_count;
363 err = mana_attach(ndev);
364 if (err) {
365 apc->num_queues = old_count;
366 netdev_err(ndev, "mana_attach failed: %d\n", err);
367 }
368
369 out:
370 mana_pre_dealloc_rxbufs(apc);
371 return err;
372 }
373
mana_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)374 static void mana_get_ringparam(struct net_device *ndev,
375 struct ethtool_ringparam *ring,
376 struct kernel_ethtool_ringparam *kernel_ring,
377 struct netlink_ext_ack *extack)
378 {
379 struct mana_port_context *apc = netdev_priv(ndev);
380
381 ring->rx_pending = apc->rx_queue_size;
382 ring->tx_pending = apc->tx_queue_size;
383 ring->rx_max_pending = MAX_RX_BUFFERS_PER_QUEUE;
384 ring->tx_max_pending = MAX_TX_BUFFERS_PER_QUEUE;
385 }
386
mana_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)387 static int mana_set_ringparam(struct net_device *ndev,
388 struct ethtool_ringparam *ring,
389 struct kernel_ethtool_ringparam *kernel_ring,
390 struct netlink_ext_ack *extack)
391 {
392 struct mana_port_context *apc = netdev_priv(ndev);
393 u32 new_tx, new_rx;
394 u32 old_tx, old_rx;
395 int err;
396
397 old_tx = apc->tx_queue_size;
398 old_rx = apc->rx_queue_size;
399
400 if (ring->tx_pending < MIN_TX_BUFFERS_PER_QUEUE) {
401 NL_SET_ERR_MSG_FMT(extack, "tx:%d less than the min:%d", ring->tx_pending,
402 MIN_TX_BUFFERS_PER_QUEUE);
403 return -EINVAL;
404 }
405
406 if (ring->rx_pending < MIN_RX_BUFFERS_PER_QUEUE) {
407 NL_SET_ERR_MSG_FMT(extack, "rx:%d less than the min:%d", ring->rx_pending,
408 MIN_RX_BUFFERS_PER_QUEUE);
409 return -EINVAL;
410 }
411
412 new_rx = roundup_pow_of_two(ring->rx_pending);
413 new_tx = roundup_pow_of_two(ring->tx_pending);
414 netdev_info(ndev, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
415 new_tx, new_rx);
416
417 /* pre-allocating new buffers to prevent failures in mana_attach() later */
418 apc->rx_queue_size = new_rx;
419 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
420 apc->rx_queue_size = old_rx;
421 if (err) {
422 netdev_err(ndev, "Insufficient memory for new allocations\n");
423 return err;
424 }
425
426 err = mana_detach(ndev, false);
427 if (err) {
428 netdev_err(ndev, "mana_detach failed: %d\n", err);
429 goto out;
430 }
431
432 apc->tx_queue_size = new_tx;
433 apc->rx_queue_size = new_rx;
434
435 err = mana_attach(ndev);
436 if (err) {
437 netdev_err(ndev, "mana_attach failed: %d\n", err);
438 apc->tx_queue_size = old_tx;
439 apc->rx_queue_size = old_rx;
440 }
441 out:
442 mana_pre_dealloc_rxbufs(apc);
443 return err;
444 }
445
446 const struct ethtool_ops mana_ethtool_ops = {
447 .get_ethtool_stats = mana_get_ethtool_stats,
448 .get_sset_count = mana_get_sset_count,
449 .get_strings = mana_get_strings,
450 .get_rxnfc = mana_get_rxnfc,
451 .get_rxfh_key_size = mana_get_rxfh_key_size,
452 .get_rxfh_indir_size = mana_rss_indir_size,
453 .get_rxfh = mana_get_rxfh,
454 .set_rxfh = mana_set_rxfh,
455 .get_channels = mana_get_channels,
456 .set_channels = mana_set_channels,
457 .get_ringparam = mana_get_ringparam,
458 .set_ringparam = mana_set_ringparam,
459 };
460