1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/ethtool.h>
5 #include <linux/netdevice.h>
6 #include <linux/pci.h>
7 #include <net/ipv6.h>
8
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_tlv.h"
12
13 struct fbnic_stat {
14 u8 string[ETH_GSTRING_LEN];
15 unsigned int size;
16 unsigned int offset;
17 };
18
19 #define FBNIC_STAT_FIELDS(type, name, stat) { \
20 .string = name, \
21 .size = sizeof_field(struct type, stat), \
22 .offset = offsetof(struct type, stat), \
23 }
24
25 /* Hardware statistics not captured in rtnl_link_stats */
26 #define FBNIC_HW_STAT(name, stat) \
27 FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
28
29 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
30 /* TTI */
31 FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
32 FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
33 FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
34 FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
35 FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
36 FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
37
38 /* TMI */
39 FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
40 FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
41 FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
42
43 /* RPC */
44 FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
45 FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
46 FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
47 FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
48 FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
49 FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
50 FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
51 FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
52 };
53
54 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
55
56 #define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
57 FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
58
59 static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
60 FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
61 FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
62 FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
63 FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
64
65 FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
66 FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
67 };
68
69 #define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
70 ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
71
72 #define FBNIC_RXB_FIFO_STAT(name, stat) \
73 FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
74
75 static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
76 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
77 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
78 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
79 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
80 };
81
82 #define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
83
84 #define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
85 FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
86
87 static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
88 FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
89 FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
90 FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
91 FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
92 };
93
94 #define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
95 ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
96
97 #define FBNIC_HW_Q_STAT(name, stat) \
98 FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
99
100 static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
101 FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
102 FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
103 FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
104 };
105
106 #define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
107 #define FBNIC_HW_STATS_LEN \
108 (FBNIC_HW_FIXED_STATS_LEN + \
109 FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
110 FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
111 FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
112 FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
113
114 static void
fbnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)115 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
116 {
117 struct fbnic_net *fbn = netdev_priv(netdev);
118 struct fbnic_dev *fbd = fbn->fbd;
119
120 fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
121 sizeof(drvinfo->fw_version));
122 }
123
fbnic_get_regs_len(struct net_device * netdev)124 static int fbnic_get_regs_len(struct net_device *netdev)
125 {
126 struct fbnic_net *fbn = netdev_priv(netdev);
127
128 return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
129 }
130
fbnic_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * data)131 static void fbnic_get_regs(struct net_device *netdev,
132 struct ethtool_regs *regs, void *data)
133 {
134 struct fbnic_net *fbn = netdev_priv(netdev);
135
136 fbnic_csr_get_regs(fbn->fbd, data, ®s->version);
137 }
138
fbnic_clone_create(struct fbnic_net * orig)139 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
140 {
141 struct fbnic_net *clone;
142
143 clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
144 if (!clone)
145 return NULL;
146
147 memset(clone->tx, 0, sizeof(clone->tx));
148 memset(clone->rx, 0, sizeof(clone->rx));
149 memset(clone->napi, 0, sizeof(clone->napi));
150 return clone;
151 }
152
fbnic_clone_swap_cfg(struct fbnic_net * orig,struct fbnic_net * clone)153 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
154 struct fbnic_net *clone)
155 {
156 swap(clone->rcq_size, orig->rcq_size);
157 swap(clone->hpq_size, orig->hpq_size);
158 swap(clone->ppq_size, orig->ppq_size);
159 swap(clone->txq_size, orig->txq_size);
160 swap(clone->num_rx_queues, orig->num_rx_queues);
161 swap(clone->num_tx_queues, orig->num_tx_queues);
162 swap(clone->num_napi, orig->num_napi);
163 }
164
fbnic_aggregate_vector_counters(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)165 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
166 struct fbnic_napi_vector *nv)
167 {
168 int i, j;
169
170 for (i = 0; i < nv->txt_count; i++) {
171 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
172 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
173 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
174 }
175
176 for (j = 0; j < nv->rxt_count; j++, i++) {
177 fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
178 fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
179 fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
180 }
181 }
182
fbnic_clone_swap(struct fbnic_net * orig,struct fbnic_net * clone)183 static void fbnic_clone_swap(struct fbnic_net *orig,
184 struct fbnic_net *clone)
185 {
186 struct fbnic_dev *fbd = orig->fbd;
187 unsigned int i;
188
189 for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
190 fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
191 for (i = 0; i < orig->num_napi; i++)
192 fbnic_aggregate_vector_counters(orig, orig->napi[i]);
193
194 fbnic_clone_swap_cfg(orig, clone);
195
196 for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
197 swap(clone->napi[i], orig->napi[i]);
198 for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
199 swap(clone->tx[i], orig->tx[i]);
200 for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
201 swap(clone->rx[i], orig->rx[i]);
202 }
203
fbnic_clone_free(struct fbnic_net * clone)204 static void fbnic_clone_free(struct fbnic_net *clone)
205 {
206 kfree(clone);
207 }
208
fbnic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)209 static int fbnic_get_coalesce(struct net_device *netdev,
210 struct ethtool_coalesce *ec,
211 struct kernel_ethtool_coalesce *kernel_coal,
212 struct netlink_ext_ack *extack)
213 {
214 struct fbnic_net *fbn = netdev_priv(netdev);
215
216 ec->tx_coalesce_usecs = fbn->tx_usecs;
217 ec->rx_coalesce_usecs = fbn->rx_usecs;
218 ec->rx_max_coalesced_frames = fbn->rx_max_frames;
219
220 return 0;
221 }
222
fbnic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)223 static int fbnic_set_coalesce(struct net_device *netdev,
224 struct ethtool_coalesce *ec,
225 struct kernel_ethtool_coalesce *kernel_coal,
226 struct netlink_ext_ack *extack)
227 {
228 struct fbnic_net *fbn = netdev_priv(netdev);
229
230 /* Verify against hardware limits */
231 if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
232 NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
233 return -EINVAL;
234 }
235 if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
236 NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
237 return -EINVAL;
238 }
239 if (ec->rx_max_coalesced_frames >
240 FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
241 FBNIC_MIN_RXD_PER_FRAME) {
242 NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
243 return -EINVAL;
244 }
245
246 fbn->tx_usecs = ec->tx_coalesce_usecs;
247 fbn->rx_usecs = ec->rx_coalesce_usecs;
248 fbn->rx_max_frames = ec->rx_max_coalesced_frames;
249
250 if (netif_running(netdev)) {
251 int i;
252
253 for (i = 0; i < fbn->num_napi; i++) {
254 struct fbnic_napi_vector *nv = fbn->napi[i];
255
256 fbnic_config_txrx_usecs(nv, 0);
257 fbnic_config_rx_frames(nv);
258 }
259 }
260
261 return 0;
262 }
263
264 static void
fbnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)265 fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
266 struct kernel_ethtool_ringparam *kernel_ring,
267 struct netlink_ext_ack *extack)
268 {
269 struct fbnic_net *fbn = netdev_priv(netdev);
270
271 ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
272 ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
273 ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
274 ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
275
276 ring->rx_pending = fbn->rcq_size;
277 ring->rx_mini_pending = fbn->hpq_size;
278 ring->rx_jumbo_pending = fbn->ppq_size;
279 ring->tx_pending = fbn->txq_size;
280 }
281
fbnic_set_rings(struct fbnic_net * fbn,struct ethtool_ringparam * ring)282 static void fbnic_set_rings(struct fbnic_net *fbn,
283 struct ethtool_ringparam *ring)
284 {
285 fbn->rcq_size = ring->rx_pending;
286 fbn->hpq_size = ring->rx_mini_pending;
287 fbn->ppq_size = ring->rx_jumbo_pending;
288 fbn->txq_size = ring->tx_pending;
289 }
290
291 static int
fbnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)292 fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
293 struct kernel_ethtool_ringparam *kernel_ring,
294 struct netlink_ext_ack *extack)
295
296 {
297 struct fbnic_net *fbn = netdev_priv(netdev);
298 struct fbnic_net *clone;
299 int err;
300
301 ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
302 ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
303 ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
304 ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
305
306 /* These are absolute minimums allowing the device and driver to operate
307 * but not necessarily guarantee reasonable performance. Settings below
308 * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
309 * at best.
310 */
311 if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
312 ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
313 ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
314 ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
315 NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
316 return -EINVAL;
317 }
318
319 if (!netif_running(netdev)) {
320 fbnic_set_rings(fbn, ring);
321 return 0;
322 }
323
324 clone = fbnic_clone_create(fbn);
325 if (!clone)
326 return -ENOMEM;
327
328 fbnic_set_rings(clone, ring);
329
330 err = fbnic_alloc_napi_vectors(clone);
331 if (err)
332 goto err_free_clone;
333
334 err = fbnic_alloc_resources(clone);
335 if (err)
336 goto err_free_napis;
337
338 fbnic_down_noidle(fbn);
339 err = fbnic_wait_all_queues_idle(fbn->fbd, true);
340 if (err)
341 goto err_start_stack;
342
343 err = fbnic_set_netif_queues(clone);
344 if (err)
345 goto err_start_stack;
346
347 /* Nothing can fail past this point */
348 fbnic_flush(fbn);
349
350 fbnic_clone_swap(fbn, clone);
351
352 fbnic_up(fbn);
353
354 fbnic_free_resources(clone);
355 fbnic_free_napi_vectors(clone);
356 fbnic_clone_free(clone);
357
358 return 0;
359
360 err_start_stack:
361 fbnic_flush(fbn);
362 fbnic_up(fbn);
363 fbnic_free_resources(clone);
364 err_free_napis:
365 fbnic_free_napi_vectors(clone);
366 err_free_clone:
367 fbnic_clone_free(clone);
368 return err;
369 }
370
fbnic_get_rxb_enqueue_strings(u8 ** data,unsigned int idx)371 static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
372 {
373 const struct fbnic_stat *stat;
374 int i;
375
376 stat = fbnic_gstrings_rxb_enqueue_stats;
377 for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
378 ethtool_sprintf(data, stat->string, idx);
379 }
380
fbnic_get_rxb_fifo_strings(u8 ** data,unsigned int idx)381 static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
382 {
383 const struct fbnic_stat *stat;
384 int i;
385
386 stat = fbnic_gstrings_rxb_fifo_stats;
387 for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
388 ethtool_sprintf(data, stat->string, idx);
389 }
390
fbnic_get_rxb_dequeue_strings(u8 ** data,unsigned int idx)391 static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
392 {
393 const struct fbnic_stat *stat;
394 int i;
395
396 stat = fbnic_gstrings_rxb_dequeue_stats;
397 for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
398 ethtool_sprintf(data, stat->string, idx);
399 }
400
fbnic_get_strings(struct net_device * dev,u32 sset,u8 * data)401 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
402 {
403 const struct fbnic_stat *stat;
404 int i, idx;
405
406 switch (sset) {
407 case ETH_SS_STATS:
408 for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
409 ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
410
411 for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
412 fbnic_get_rxb_enqueue_strings(&data, i);
413
414 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
415 fbnic_get_rxb_fifo_strings(&data, i);
416
417 for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
418 fbnic_get_rxb_dequeue_strings(&data, i);
419
420 for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
421 stat = fbnic_gstrings_hw_q_stats;
422
423 for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
424 ethtool_sprintf(&data, stat->string, idx);
425 }
426 break;
427 }
428 }
429
fbnic_report_hw_stats(const struct fbnic_stat * stat,const void * base,int len,u64 ** data)430 static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
431 const void *base, int len, u64 **data)
432 {
433 while (len--) {
434 u8 *curr = (u8 *)base + stat->offset;
435
436 **data = *(u64 *)curr;
437
438 stat++;
439 (*data)++;
440 }
441 }
442
fbnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)443 static void fbnic_get_ethtool_stats(struct net_device *dev,
444 struct ethtool_stats *stats, u64 *data)
445 {
446 struct fbnic_net *fbn = netdev_priv(dev);
447 struct fbnic_dev *fbd = fbn->fbd;
448 int i;
449
450 fbnic_get_hw_stats(fbn->fbd);
451
452 spin_lock(&fbd->hw_stats_lock);
453 fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
454 FBNIC_HW_FIXED_STATS_LEN, &data);
455
456 for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
457 const struct fbnic_rxb_enqueue_stats *enq;
458
459 enq = &fbd->hw_stats.rxb.enq[i];
460 fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
461 enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
462 &data);
463 }
464
465 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
466 const struct fbnic_rxb_fifo_stats *fifo;
467
468 fifo = &fbd->hw_stats.rxb.fifo[i];
469 fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
470 fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
471 &data);
472 }
473
474 for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
475 const struct fbnic_rxb_dequeue_stats *deq;
476
477 deq = &fbd->hw_stats.rxb.deq[i];
478 fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
479 deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
480 &data);
481 }
482
483 for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
484 const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
485
486 fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
487 FBNIC_HW_Q_STATS_LEN, &data);
488 }
489 spin_unlock(&fbd->hw_stats_lock);
490 }
491
fbnic_get_sset_count(struct net_device * dev,int sset)492 static int fbnic_get_sset_count(struct net_device *dev, int sset)
493 {
494 switch (sset) {
495 case ETH_SS_STATS:
496 return FBNIC_HW_STATS_LEN;
497 default:
498 return -EOPNOTSUPP;
499 }
500 }
501
fbnic_get_rss_hash_idx(u32 flow_type)502 static int fbnic_get_rss_hash_idx(u32 flow_type)
503 {
504 switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
505 case TCP_V4_FLOW:
506 return FBNIC_TCP4_HASH_OPT;
507 case TCP_V6_FLOW:
508 return FBNIC_TCP6_HASH_OPT;
509 case UDP_V4_FLOW:
510 return FBNIC_UDP4_HASH_OPT;
511 case UDP_V6_FLOW:
512 return FBNIC_UDP6_HASH_OPT;
513 case AH_V4_FLOW:
514 case ESP_V4_FLOW:
515 case AH_ESP_V4_FLOW:
516 case SCTP_V4_FLOW:
517 case IPV4_FLOW:
518 case IPV4_USER_FLOW:
519 return FBNIC_IPV4_HASH_OPT;
520 case AH_V6_FLOW:
521 case ESP_V6_FLOW:
522 case AH_ESP_V6_FLOW:
523 case SCTP_V6_FLOW:
524 case IPV6_FLOW:
525 case IPV6_USER_FLOW:
526 return FBNIC_IPV6_HASH_OPT;
527 case ETHER_FLOW:
528 return FBNIC_ETHER_HASH_OPT;
529 }
530
531 return -1;
532 }
533
fbnic_get_cls_rule_all(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd,u32 * rule_locs)534 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
535 struct ethtool_rxnfc *cmd,
536 u32 *rule_locs)
537 {
538 struct fbnic_dev *fbd = fbn->fbd;
539 int i, cnt = 0;
540
541 /* Report maximum rule count */
542 cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
543
544 for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
545 int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
546 struct fbnic_act_tcam *act_tcam;
547
548 act_tcam = &fbd->act_tcam[idx];
549 if (act_tcam->state != FBNIC_TCAM_S_VALID)
550 continue;
551
552 if (rule_locs) {
553 if (cnt == cmd->rule_cnt)
554 return -EMSGSIZE;
555
556 rule_locs[cnt] = i;
557 }
558
559 cnt++;
560 }
561
562 return cnt;
563 }
564
fbnic_get_cls_rule(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd)565 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
566 {
567 struct ethtool_rx_flow_spec *fsp;
568 struct fbnic_dev *fbd = fbn->fbd;
569 struct fbnic_act_tcam *act_tcam;
570 int idx;
571
572 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
573
574 if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
575 return -EINVAL;
576
577 idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
578 act_tcam = &fbd->act_tcam[idx];
579
580 if (act_tcam->state != FBNIC_TCAM_S_VALID)
581 return -EINVAL;
582
583 /* Report maximum rule count */
584 cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
585
586 /* Set flow type field */
587 if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
588 fsp->flow_type = ETHER_FLOW;
589 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
590 act_tcam->mask.tcam[1])) {
591 struct fbnic_mac_addr *mac_addr;
592
593 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
594 act_tcam->value.tcam[1]);
595 mac_addr = &fbd->mac_addr[idx];
596
597 ether_addr_copy(fsp->h_u.ether_spec.h_dest,
598 mac_addr->value.addr8);
599 eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
600 }
601 } else if (act_tcam->value.tcam[1] &
602 FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
603 fsp->flow_type = IPV6_USER_FLOW;
604 fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
605 fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
606
607 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
608 act_tcam->mask.tcam[0])) {
609 struct fbnic_ip_addr *ip_addr;
610 int i;
611
612 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
613 act_tcam->value.tcam[0]);
614 ip_addr = &fbd->ipo_src[idx];
615
616 for (i = 0; i < 4; i++) {
617 fsp->h_u.usr_ip6_spec.ip6src[i] =
618 ip_addr->value.s6_addr32[i];
619 fsp->m_u.usr_ip6_spec.ip6src[i] =
620 ~ip_addr->mask.s6_addr32[i];
621 }
622 }
623
624 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
625 act_tcam->mask.tcam[0])) {
626 struct fbnic_ip_addr *ip_addr;
627 int i;
628
629 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
630 act_tcam->value.tcam[0]);
631 ip_addr = &fbd->ipo_dst[idx];
632
633 for (i = 0; i < 4; i++) {
634 fsp->h_u.usr_ip6_spec.ip6dst[i] =
635 ip_addr->value.s6_addr32[i];
636 fsp->m_u.usr_ip6_spec.ip6dst[i] =
637 ~ip_addr->mask.s6_addr32[i];
638 }
639 }
640 } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
641 if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
642 if (act_tcam->value.tcam[1] &
643 FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
644 fsp->flow_type = UDP_V6_FLOW;
645 else
646 fsp->flow_type = TCP_V6_FLOW;
647 fsp->h_u.tcp_ip6_spec.psrc =
648 cpu_to_be16(act_tcam->value.tcam[3]);
649 fsp->m_u.tcp_ip6_spec.psrc =
650 cpu_to_be16(~act_tcam->mask.tcam[3]);
651 fsp->h_u.tcp_ip6_spec.pdst =
652 cpu_to_be16(act_tcam->value.tcam[4]);
653 fsp->m_u.tcp_ip6_spec.pdst =
654 cpu_to_be16(~act_tcam->mask.tcam[4]);
655 } else {
656 fsp->flow_type = IPV6_USER_FLOW;
657 }
658
659 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
660 act_tcam->mask.tcam[0])) {
661 struct fbnic_ip_addr *ip_addr;
662 int i;
663
664 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
665 act_tcam->value.tcam[0]);
666 ip_addr = &fbd->ip_src[idx];
667
668 for (i = 0; i < 4; i++) {
669 fsp->h_u.usr_ip6_spec.ip6src[i] =
670 ip_addr->value.s6_addr32[i];
671 fsp->m_u.usr_ip6_spec.ip6src[i] =
672 ~ip_addr->mask.s6_addr32[i];
673 }
674 }
675
676 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
677 act_tcam->mask.tcam[0])) {
678 struct fbnic_ip_addr *ip_addr;
679 int i;
680
681 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
682 act_tcam->value.tcam[0]);
683 ip_addr = &fbd->ip_dst[idx];
684
685 for (i = 0; i < 4; i++) {
686 fsp->h_u.usr_ip6_spec.ip6dst[i] =
687 ip_addr->value.s6_addr32[i];
688 fsp->m_u.usr_ip6_spec.ip6dst[i] =
689 ~ip_addr->mask.s6_addr32[i];
690 }
691 }
692 } else {
693 if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
694 if (act_tcam->value.tcam[1] &
695 FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
696 fsp->flow_type = UDP_V4_FLOW;
697 else
698 fsp->flow_type = TCP_V4_FLOW;
699 fsp->h_u.tcp_ip4_spec.psrc =
700 cpu_to_be16(act_tcam->value.tcam[3]);
701 fsp->m_u.tcp_ip4_spec.psrc =
702 cpu_to_be16(~act_tcam->mask.tcam[3]);
703 fsp->h_u.tcp_ip4_spec.pdst =
704 cpu_to_be16(act_tcam->value.tcam[4]);
705 fsp->m_u.tcp_ip4_spec.pdst =
706 cpu_to_be16(~act_tcam->mask.tcam[4]);
707 } else {
708 fsp->flow_type = IPV4_USER_FLOW;
709 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
710 }
711
712 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
713 act_tcam->mask.tcam[0])) {
714 struct fbnic_ip_addr *ip_addr;
715
716 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
717 act_tcam->value.tcam[0]);
718 ip_addr = &fbd->ip_src[idx];
719
720 fsp->h_u.usr_ip4_spec.ip4src =
721 ip_addr->value.s6_addr32[3];
722 fsp->m_u.usr_ip4_spec.ip4src =
723 ~ip_addr->mask.s6_addr32[3];
724 }
725
726 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
727 act_tcam->mask.tcam[0])) {
728 struct fbnic_ip_addr *ip_addr;
729
730 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
731 act_tcam->value.tcam[0]);
732 ip_addr = &fbd->ip_dst[idx];
733
734 fsp->h_u.usr_ip4_spec.ip4dst =
735 ip_addr->value.s6_addr32[3];
736 fsp->m_u.usr_ip4_spec.ip4dst =
737 ~ip_addr->mask.s6_addr32[3];
738 }
739 }
740
741 /* Record action */
742 if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
743 fsp->ring_cookie = RX_CLS_FLOW_DISC;
744 else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
745 fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
746 act_tcam->dest);
747 else
748 fsp->flow_type |= FLOW_RSS;
749
750 cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
751 act_tcam->dest);
752
753 return 0;
754 }
755
fbnic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)756 static int fbnic_get_rxnfc(struct net_device *netdev,
757 struct ethtool_rxnfc *cmd, u32 *rule_locs)
758 {
759 struct fbnic_net *fbn = netdev_priv(netdev);
760 int ret = -EOPNOTSUPP;
761 u32 special = 0;
762
763 switch (cmd->cmd) {
764 case ETHTOOL_GRXRINGS:
765 cmd->data = fbn->num_rx_queues;
766 ret = 0;
767 break;
768 case ETHTOOL_GRXCLSRULE:
769 ret = fbnic_get_cls_rule(fbn, cmd);
770 break;
771 case ETHTOOL_GRXCLSRLCNT:
772 rule_locs = NULL;
773 special = RX_CLS_LOC_SPECIAL;
774 fallthrough;
775 case ETHTOOL_GRXCLSRLALL:
776 ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
777 if (ret < 0)
778 break;
779
780 cmd->data |= special;
781 cmd->rule_cnt = ret;
782 ret = 0;
783 break;
784 }
785
786 return ret;
787 }
788
fbnic_cls_rule_any_loc(struct fbnic_dev * fbd)789 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
790 {
791 int i;
792
793 for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
794 int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
795
796 if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
797 return i;
798 }
799
800 return -ENOSPC;
801 }
802
fbnic_set_cls_rule_ins(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)803 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
804 const struct ethtool_rxnfc *cmd)
805 {
806 u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
807 u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
808 u16 misc = 0, misc_mask = ~0;
809 u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
810 FBNIC_RPC_ACT_TBL0_DEST_HOST);
811 struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
812 struct fbnic_mac_addr *mac_addr = NULL;
813 struct ethtool_rx_flow_spec *fsp;
814 struct fbnic_dev *fbd = fbn->fbd;
815 struct fbnic_act_tcam *act_tcam;
816 struct in6_addr *addr6, *mask6;
817 struct in_addr *addr4, *mask4;
818 int hash_idx, location;
819 u32 flow_type;
820 int idx, j;
821
822 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
823
824 if (fsp->location != RX_CLS_LOC_ANY)
825 return -EINVAL;
826 location = fbnic_cls_rule_any_loc(fbd);
827 if (location < 0)
828 return location;
829
830 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
831 dest = FBNIC_RPC_ACT_TBL0_DROP;
832 } else if (fsp->flow_type & FLOW_RSS) {
833 if (cmd->rss_context == 1)
834 dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
835 } else {
836 u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
837
838 if (ring_idx >= fbn->num_rx_queues)
839 return -EINVAL;
840
841 dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
842 FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
843 }
844
845 idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
846 act_tcam = &fbd->act_tcam[idx];
847
848 /* Do not allow overwriting for now.
849 * To support overwriting rules we will need to add logic to free
850 * any IP or MACDA TCAMs that may be associated with the old rule.
851 */
852 if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
853 return -EBUSY;
854
855 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
856 hash_idx = fbnic_get_rss_hash_idx(flow_type);
857
858 switch (flow_type) {
859 case UDP_V4_FLOW:
860 udp4_flow:
861 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
862 fallthrough;
863 case TCP_V4_FLOW:
864 tcp4_flow:
865 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
866 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
867 FBNIC_RPC_TCAM_ACT1_L4_VALID);
868
869 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
870 sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
871 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
872 dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
873 goto ip4_flow;
874 case IP_USER_FLOW:
875 if (!fsp->m_u.usr_ip4_spec.proto)
876 goto ip4_flow;
877 if (fsp->m_u.usr_ip4_spec.proto != 0xff)
878 return -EINVAL;
879 if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
880 goto udp4_flow;
881 if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
882 goto tcp4_flow;
883 return -EINVAL;
884 ip4_flow:
885 addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
886 mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
887 if (mask4->s_addr) {
888 ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
889 addr4, mask4);
890 if (!ip_src)
891 return -ENOSPC;
892
893 set_bit(idx, ip_src->act_tcam);
894 ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
895 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
896 ip_src - fbd->ip_src);
897 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
898 FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
899 }
900
901 addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
902 mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
903 if (mask4->s_addr) {
904 ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
905 addr4, mask4);
906 if (!ip_dst) {
907 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
908 memset(ip_src, 0, sizeof(*ip_src));
909 return -ENOSPC;
910 }
911
912 set_bit(idx, ip_dst->act_tcam);
913 ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
914 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
915 ip_dst - fbd->ip_dst);
916 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
917 FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
918 }
919 flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
920 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
921 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
922 FBNIC_RPC_TCAM_ACT1_IP_VALID |
923 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
924 break;
925 case UDP_V6_FLOW:
926 udp6_flow:
927 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
928 fallthrough;
929 case TCP_V6_FLOW:
930 tcp6_flow:
931 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
932 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
933 FBNIC_RPC_TCAM_ACT1_L4_VALID);
934
935 sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
936 sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
937 dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
938 dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
939 goto ipv6_flow;
940 case IPV6_USER_FLOW:
941 if (!fsp->m_u.usr_ip6_spec.l4_proto)
942 goto ipv6_flow;
943
944 if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
945 return -EINVAL;
946 if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
947 goto udp6_flow;
948 if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
949 goto tcp6_flow;
950 if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
951 return -EINVAL;
952
953 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
954 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
955 if (!ipv6_addr_any(mask6)) {
956 ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
957 addr6, mask6);
958 if (!ip_src)
959 return -ENOSPC;
960
961 set_bit(idx, ip_src->act_tcam);
962 ip_value |=
963 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
964 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
965 ip_src - fbd->ipo_src);
966 ip_mask &=
967 ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
968 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
969 }
970
971 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
972 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
973 if (!ipv6_addr_any(mask6)) {
974 ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
975 addr6, mask6);
976 if (!ip_dst) {
977 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
978 memset(ip_src, 0, sizeof(*ip_src));
979 return -ENOSPC;
980 }
981
982 set_bit(idx, ip_dst->act_tcam);
983 ip_value |=
984 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
985 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
986 ip_dst - fbd->ipo_dst);
987 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
988 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
989 }
990
991 flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
992 flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
993 ipv6_flow:
994 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
995 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
996 if (!ip_src && !ipv6_addr_any(mask6)) {
997 ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
998 addr6, mask6);
999 if (!ip_src)
1000 return -ENOSPC;
1001
1002 set_bit(idx, ip_src->act_tcam);
1003 ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1004 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
1005 ip_src - fbd->ip_src);
1006 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1007 FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
1008 }
1009
1010 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1011 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1012 if (!ip_dst && !ipv6_addr_any(mask6)) {
1013 ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
1014 addr6, mask6);
1015 if (!ip_dst) {
1016 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1017 memset(ip_src, 0, sizeof(*ip_src));
1018 return -ENOSPC;
1019 }
1020
1021 set_bit(idx, ip_dst->act_tcam);
1022 ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1023 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
1024 ip_dst - fbd->ip_dst);
1025 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1026 FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
1027 }
1028
1029 flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1030 FBNIC_RPC_TCAM_ACT1_IP_VALID |
1031 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1032 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1033 FBNIC_RPC_TCAM_ACT1_IP_VALID |
1034 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
1035 break;
1036 case ETHER_FLOW:
1037 if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
1038 u8 *addr = fsp->h_u.ether_spec.h_dest;
1039 u8 *mask = fsp->m_u.ether_spec.h_dest;
1040
1041 /* Do not allow MAC addr of 0 */
1042 if (is_zero_ether_addr(addr))
1043 return -EINVAL;
1044
1045 /* Only support full MAC address to avoid
1046 * conflicts with other MAC addresses.
1047 */
1048 if (!is_broadcast_ether_addr(mask))
1049 return -EINVAL;
1050
1051 if (is_multicast_ether_addr(addr))
1052 mac_addr = __fbnic_mc_sync(fbd, addr);
1053 else
1054 mac_addr = __fbnic_uc_sync(fbd, addr);
1055
1056 if (!mac_addr)
1057 return -ENOSPC;
1058
1059 set_bit(idx, mac_addr->act_tcam);
1060 flow_value |=
1061 FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
1062 mac_addr - fbd->mac_addr);
1063 flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
1064 }
1065
1066 flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1067 flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1068 break;
1069 default:
1070 return -EINVAL;
1071 }
1072
1073 /* Write action table values */
1074 act_tcam->dest = dest;
1075 act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
1076
1077 /* Write IP Match value/mask to action_tcam[0] */
1078 act_tcam->value.tcam[0] = ip_value;
1079 act_tcam->mask.tcam[0] = ip_mask;
1080
1081 /* Write flow type value/mask to action_tcam[1] */
1082 act_tcam->value.tcam[1] = flow_value;
1083 act_tcam->mask.tcam[1] = flow_mask;
1084
1085 /* Write error, DSCP, extra L4 matches to action_tcam[2] */
1086 act_tcam->value.tcam[2] = misc;
1087 act_tcam->mask.tcam[2] = misc_mask;
1088
1089 /* Write source/destination port values */
1090 act_tcam->value.tcam[3] = sport;
1091 act_tcam->mask.tcam[3] = sport_mask;
1092 act_tcam->value.tcam[4] = dport;
1093 act_tcam->mask.tcam[4] = dport_mask;
1094
1095 for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
1096 act_tcam->mask.tcam[j] = 0xffff;
1097
1098 act_tcam->state = FBNIC_TCAM_S_UPDATE;
1099 fsp->location = location;
1100
1101 if (netif_running(fbn->netdev)) {
1102 fbnic_write_rules(fbd);
1103 if (ip_src || ip_dst)
1104 fbnic_write_ip_addr(fbd);
1105 if (mac_addr)
1106 fbnic_write_macda(fbd);
1107 }
1108
1109 return 0;
1110 }
1111
fbnic_clear_nfc_macda(struct fbnic_net * fbn,unsigned int tcam_idx)1112 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
1113 unsigned int tcam_idx)
1114 {
1115 struct fbnic_dev *fbd = fbn->fbd;
1116 int idx;
1117
1118 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
1119 __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
1120
1121 /* Write updates to hardware */
1122 if (netif_running(fbn->netdev))
1123 fbnic_write_macda(fbd);
1124 }
1125
fbnic_clear_nfc_ip_addr(struct fbnic_net * fbn,unsigned int tcam_idx)1126 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
1127 unsigned int tcam_idx)
1128 {
1129 struct fbnic_dev *fbd = fbn->fbd;
1130 int idx;
1131
1132 for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
1133 __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
1134 for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
1135 __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
1136 for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
1137 __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
1138 for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
1139 __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
1140
1141 /* Write updates to hardware */
1142 if (netif_running(fbn->netdev))
1143 fbnic_write_ip_addr(fbd);
1144 }
1145
fbnic_set_cls_rule_del(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)1146 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
1147 const struct ethtool_rxnfc *cmd)
1148 {
1149 struct ethtool_rx_flow_spec *fsp;
1150 struct fbnic_dev *fbd = fbn->fbd;
1151 struct fbnic_act_tcam *act_tcam;
1152 int idx;
1153
1154 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1155
1156 if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
1157 return -EINVAL;
1158
1159 idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
1160 act_tcam = &fbd->act_tcam[idx];
1161
1162 if (act_tcam->state != FBNIC_TCAM_S_VALID)
1163 return -EINVAL;
1164
1165 act_tcam->state = FBNIC_TCAM_S_DELETE;
1166
1167 if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
1168 (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
1169 fbnic_clear_nfc_macda(fbn, idx);
1170
1171 if ((act_tcam->value.tcam[0] &
1172 (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1173 FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1174 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1175 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
1176 (~act_tcam->mask.tcam[0] &
1177 (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
1178 FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
1179 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
1180 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
1181 fbnic_clear_nfc_ip_addr(fbn, idx);
1182
1183 if (netif_running(fbn->netdev))
1184 fbnic_write_rules(fbd);
1185
1186 return 0;
1187 }
1188
fbnic_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1189 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1190 {
1191 struct fbnic_net *fbn = netdev_priv(netdev);
1192 int ret = -EOPNOTSUPP;
1193
1194 switch (cmd->cmd) {
1195 case ETHTOOL_SRXCLSRLINS:
1196 ret = fbnic_set_cls_rule_ins(fbn, cmd);
1197 break;
1198 case ETHTOOL_SRXCLSRLDEL:
1199 ret = fbnic_set_cls_rule_del(fbn, cmd);
1200 break;
1201 }
1202
1203 return ret;
1204 }
1205
fbnic_get_rxfh_key_size(struct net_device * netdev)1206 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
1207 {
1208 return FBNIC_RPC_RSS_KEY_BYTE_LEN;
1209 }
1210
fbnic_get_rxfh_indir_size(struct net_device * netdev)1211 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
1212 {
1213 return FBNIC_RPC_RSS_TBL_SIZE;
1214 }
1215
1216 static int
fbnic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1217 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1218 {
1219 struct fbnic_net *fbn = netdev_priv(netdev);
1220 unsigned int i;
1221
1222 rxfh->hfunc = ETH_RSS_HASH_TOP;
1223
1224 if (rxfh->key) {
1225 for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1226 u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1227
1228 rxfh->key[i] = rss_key >> 24;
1229 }
1230 }
1231
1232 if (rxfh->indir) {
1233 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1234 rxfh->indir[i] = fbn->indir_tbl[0][i];
1235 }
1236
1237 return 0;
1238 }
1239
1240 static unsigned int
fbnic_set_indir(struct fbnic_net * fbn,unsigned int idx,const u32 * indir)1241 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1242 {
1243 unsigned int i, changes = 0;
1244
1245 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1246 if (fbn->indir_tbl[idx][i] == indir[i])
1247 continue;
1248
1249 fbn->indir_tbl[idx][i] = indir[i];
1250 changes++;
1251 }
1252
1253 return changes;
1254 }
1255
1256 static int
fbnic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1257 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1258 struct netlink_ext_ack *extack)
1259 {
1260 struct fbnic_net *fbn = netdev_priv(netdev);
1261 unsigned int i, changes = 0;
1262
1263 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1264 rxfh->hfunc != ETH_RSS_HASH_TOP)
1265 return -EINVAL;
1266
1267 if (rxfh->key) {
1268 u32 rss_key = 0;
1269
1270 for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1271 rss_key >>= 8;
1272 rss_key |= (u32)(rxfh->key[i]) << 24;
1273
1274 if (i % 4)
1275 continue;
1276
1277 if (fbn->rss_key[i / 4] == rss_key)
1278 continue;
1279
1280 fbn->rss_key[i / 4] = rss_key;
1281 changes++;
1282 }
1283 }
1284
1285 if (rxfh->indir)
1286 changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1287
1288 if (changes && netif_running(netdev))
1289 fbnic_rss_reinit_hw(fbn->fbd, fbn);
1290
1291 return 0;
1292 }
1293
1294 static int
fbnic_get_rss_hash_opts(struct net_device * netdev,struct ethtool_rxfh_fields * cmd)1295 fbnic_get_rss_hash_opts(struct net_device *netdev,
1296 struct ethtool_rxfh_fields *cmd)
1297 {
1298 int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1299 struct fbnic_net *fbn = netdev_priv(netdev);
1300
1301 if (hash_opt_idx < 0)
1302 return -EINVAL;
1303
1304 /* Report options from rss_en table in fbn */
1305 cmd->data = fbn->rss_flow_hash[hash_opt_idx];
1306
1307 return 0;
1308 }
1309
1310 #define FBNIC_L2_HASH_OPTIONS \
1311 (RXH_L2DA | RXH_DISCARD)
1312 #define FBNIC_L3_HASH_OPTIONS \
1313 (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
1314 #define FBNIC_L4_HASH_OPTIONS \
1315 (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1316
1317 static int
fbnic_set_rss_hash_opts(struct net_device * netdev,const struct ethtool_rxfh_fields * cmd,struct netlink_ext_ack * extack)1318 fbnic_set_rss_hash_opts(struct net_device *netdev,
1319 const struct ethtool_rxfh_fields *cmd,
1320 struct netlink_ext_ack *extack)
1321 {
1322 struct fbnic_net *fbn = netdev_priv(netdev);
1323 int hash_opt_idx;
1324
1325 /* Verify the type requested is correct */
1326 hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1327 if (hash_opt_idx < 0)
1328 return -EINVAL;
1329
1330 /* Verify the fields asked for can actually be assigned based on type */
1331 if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
1332 (hash_opt_idx > FBNIC_L4_HASH_OPT &&
1333 cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
1334 (hash_opt_idx > FBNIC_IP_HASH_OPT &&
1335 cmd->data & ~FBNIC_L2_HASH_OPTIONS))
1336 return -EINVAL;
1337
1338 fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
1339
1340 if (netif_running(fbn->netdev)) {
1341 fbnic_rss_reinit(fbn->fbd, fbn);
1342 fbnic_write_rules(fbn->fbd);
1343 }
1344
1345 return 0;
1346 }
1347
1348 static int
fbnic_modify_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1349 fbnic_modify_rxfh_context(struct net_device *netdev,
1350 struct ethtool_rxfh_context *ctx,
1351 const struct ethtool_rxfh_param *rxfh,
1352 struct netlink_ext_ack *extack)
1353 {
1354 struct fbnic_net *fbn = netdev_priv(netdev);
1355 const u32 *indir = rxfh->indir;
1356 unsigned int changes;
1357
1358 if (!indir)
1359 indir = ethtool_rxfh_context_indir(ctx);
1360
1361 changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1362 if (changes && netif_running(netdev))
1363 fbnic_rss_reinit_hw(fbn->fbd, fbn);
1364
1365 return 0;
1366 }
1367
1368 static int
fbnic_create_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1369 fbnic_create_rxfh_context(struct net_device *netdev,
1370 struct ethtool_rxfh_context *ctx,
1371 const struct ethtool_rxfh_param *rxfh,
1372 struct netlink_ext_ack *extack)
1373 {
1374 struct fbnic_net *fbn = netdev_priv(netdev);
1375
1376 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1377 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1378 return -EOPNOTSUPP;
1379 }
1380 ctx->hfunc = ETH_RSS_HASH_TOP;
1381
1382 if (!rxfh->indir) {
1383 u32 *indir = ethtool_rxfh_context_indir(ctx);
1384 unsigned int num_rx = fbn->num_rx_queues;
1385 unsigned int i;
1386
1387 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1388 indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1389 }
1390
1391 return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1392 }
1393
1394 static int
fbnic_remove_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)1395 fbnic_remove_rxfh_context(struct net_device *netdev,
1396 struct ethtool_rxfh_context *ctx, u32 rss_context,
1397 struct netlink_ext_ack *extack)
1398 {
1399 /* Nothing to do, contexts are allocated statically */
1400 return 0;
1401 }
1402
fbnic_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1403 static void fbnic_get_channels(struct net_device *netdev,
1404 struct ethtool_channels *ch)
1405 {
1406 struct fbnic_net *fbn = netdev_priv(netdev);
1407 struct fbnic_dev *fbd = fbn->fbd;
1408
1409 ch->max_rx = fbd->max_num_queues;
1410 ch->max_tx = fbd->max_num_queues;
1411 ch->max_combined = min(ch->max_rx, ch->max_tx);
1412 ch->max_other = FBNIC_NON_NAPI_VECTORS;
1413
1414 if (fbn->num_rx_queues > fbn->num_napi ||
1415 fbn->num_tx_queues > fbn->num_napi)
1416 ch->combined_count = min(fbn->num_rx_queues,
1417 fbn->num_tx_queues);
1418 else
1419 ch->combined_count =
1420 fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1421 ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1422 ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1423 ch->other_count = FBNIC_NON_NAPI_VECTORS;
1424 }
1425
fbnic_set_queues(struct fbnic_net * fbn,struct ethtool_channels * ch,unsigned int max_napis)1426 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1427 unsigned int max_napis)
1428 {
1429 fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1430 fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1431 fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1432 max_napis);
1433 }
1434
fbnic_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1435 static int fbnic_set_channels(struct net_device *netdev,
1436 struct ethtool_channels *ch)
1437 {
1438 struct fbnic_net *fbn = netdev_priv(netdev);
1439 unsigned int max_napis, standalone;
1440 struct fbnic_dev *fbd = fbn->fbd;
1441 struct fbnic_net *clone;
1442 int err;
1443
1444 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1445 standalone = ch->rx_count + ch->tx_count;
1446
1447 /* Limits for standalone queues:
1448 * - each queue has its own NAPI (num_napi >= rx + tx + combined)
1449 * - combining queues (combined not 0, rx or tx must be 0)
1450 */
1451 if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1452 (standalone && standalone + ch->combined_count > max_napis) ||
1453 ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1454 ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1455 ch->other_count != FBNIC_NON_NAPI_VECTORS)
1456 return -EINVAL;
1457
1458 if (!netif_running(netdev)) {
1459 fbnic_set_queues(fbn, ch, max_napis);
1460 fbnic_reset_indir_tbl(fbn);
1461 return 0;
1462 }
1463
1464 clone = fbnic_clone_create(fbn);
1465 if (!clone)
1466 return -ENOMEM;
1467
1468 fbnic_set_queues(clone, ch, max_napis);
1469
1470 err = fbnic_alloc_napi_vectors(clone);
1471 if (err)
1472 goto err_free_clone;
1473
1474 err = fbnic_alloc_resources(clone);
1475 if (err)
1476 goto err_free_napis;
1477
1478 fbnic_down_noidle(fbn);
1479 err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1480 if (err)
1481 goto err_start_stack;
1482
1483 err = fbnic_set_netif_queues(clone);
1484 if (err)
1485 goto err_start_stack;
1486
1487 /* Nothing can fail past this point */
1488 fbnic_flush(fbn);
1489
1490 fbnic_clone_swap(fbn, clone);
1491
1492 /* Reset RSS indirection table */
1493 fbnic_reset_indir_tbl(fbn);
1494
1495 fbnic_up(fbn);
1496
1497 fbnic_free_resources(clone);
1498 fbnic_free_napi_vectors(clone);
1499 fbnic_clone_free(clone);
1500
1501 return 0;
1502
1503 err_start_stack:
1504 fbnic_flush(fbn);
1505 fbnic_up(fbn);
1506 fbnic_free_resources(clone);
1507 err_free_napis:
1508 fbnic_free_napi_vectors(clone);
1509 err_free_clone:
1510 fbnic_clone_free(clone);
1511 return err;
1512 }
1513
1514 static int
fbnic_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * tsinfo)1515 fbnic_get_ts_info(struct net_device *netdev,
1516 struct kernel_ethtool_ts_info *tsinfo)
1517 {
1518 struct fbnic_net *fbn = netdev_priv(netdev);
1519
1520 tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1521
1522 tsinfo->so_timestamping =
1523 SOF_TIMESTAMPING_TX_SOFTWARE |
1524 SOF_TIMESTAMPING_TX_HARDWARE |
1525 SOF_TIMESTAMPING_RX_HARDWARE |
1526 SOF_TIMESTAMPING_RAW_HARDWARE;
1527
1528 tsinfo->tx_types =
1529 BIT(HWTSTAMP_TX_OFF) |
1530 BIT(HWTSTAMP_TX_ON);
1531
1532 tsinfo->rx_filters =
1533 BIT(HWTSTAMP_FILTER_NONE) |
1534 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1535 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1536 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1537 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1538 BIT(HWTSTAMP_FILTER_ALL);
1539
1540 return 0;
1541 }
1542
fbnic_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)1543 static void fbnic_get_ts_stats(struct net_device *netdev,
1544 struct ethtool_ts_stats *ts_stats)
1545 {
1546 struct fbnic_net *fbn = netdev_priv(netdev);
1547 u64 ts_packets, ts_lost;
1548 struct fbnic_ring *ring;
1549 unsigned int start;
1550 int i;
1551
1552 ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1553 ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1554 for (i = 0; i < fbn->num_tx_queues; i++) {
1555 ring = fbn->tx[i];
1556 do {
1557 start = u64_stats_fetch_begin(&ring->stats.syncp);
1558 ts_packets = ring->stats.twq.ts_packets;
1559 ts_lost = ring->stats.twq.ts_lost;
1560 } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1561 ts_stats->pkts += ts_packets;
1562 ts_stats->lost += ts_lost;
1563 }
1564 }
1565
fbnic_set_counter(u64 * stat,struct fbnic_stat_counter * counter)1566 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1567 {
1568 if (counter->reported)
1569 *stat = counter->value;
1570 }
1571
1572 static void
fbnic_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * eth_mac_stats)1573 fbnic_get_eth_mac_stats(struct net_device *netdev,
1574 struct ethtool_eth_mac_stats *eth_mac_stats)
1575 {
1576 struct fbnic_net *fbn = netdev_priv(netdev);
1577 struct fbnic_mac_stats *mac_stats;
1578 struct fbnic_dev *fbd = fbn->fbd;
1579 const struct fbnic_mac *mac;
1580
1581 mac_stats = &fbd->hw_stats.mac;
1582 mac = fbd->mac;
1583
1584 mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1585
1586 fbnic_set_counter(ð_mac_stats->FramesTransmittedOK,
1587 &mac_stats->eth_mac.FramesTransmittedOK);
1588 fbnic_set_counter(ð_mac_stats->FramesReceivedOK,
1589 &mac_stats->eth_mac.FramesReceivedOK);
1590 fbnic_set_counter(ð_mac_stats->FrameCheckSequenceErrors,
1591 &mac_stats->eth_mac.FrameCheckSequenceErrors);
1592 fbnic_set_counter(ð_mac_stats->AlignmentErrors,
1593 &mac_stats->eth_mac.AlignmentErrors);
1594 fbnic_set_counter(ð_mac_stats->OctetsTransmittedOK,
1595 &mac_stats->eth_mac.OctetsTransmittedOK);
1596 fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACXmitError,
1597 &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1598 fbnic_set_counter(ð_mac_stats->OctetsReceivedOK,
1599 &mac_stats->eth_mac.OctetsReceivedOK);
1600 fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACRcvError,
1601 &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1602 fbnic_set_counter(ð_mac_stats->MulticastFramesXmittedOK,
1603 &mac_stats->eth_mac.MulticastFramesXmittedOK);
1604 fbnic_set_counter(ð_mac_stats->BroadcastFramesXmittedOK,
1605 &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1606 fbnic_set_counter(ð_mac_stats->MulticastFramesReceivedOK,
1607 &mac_stats->eth_mac.MulticastFramesReceivedOK);
1608 fbnic_set_counter(ð_mac_stats->BroadcastFramesReceivedOK,
1609 &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1610 fbnic_set_counter(ð_mac_stats->FrameTooLongErrors,
1611 &mac_stats->eth_mac.FrameTooLongErrors);
1612 }
1613
1614 static void
fbnic_get_eth_ctrl_stats(struct net_device * netdev,struct ethtool_eth_ctrl_stats * eth_ctrl_stats)1615 fbnic_get_eth_ctrl_stats(struct net_device *netdev,
1616 struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
1617 {
1618 struct fbnic_net *fbn = netdev_priv(netdev);
1619 struct fbnic_mac_stats *mac_stats;
1620 struct fbnic_dev *fbd = fbn->fbd;
1621
1622 mac_stats = &fbd->hw_stats.mac;
1623
1624 fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
1625
1626 eth_ctrl_stats->MACControlFramesReceived =
1627 mac_stats->eth_ctrl.MACControlFramesReceived.value;
1628 eth_ctrl_stats->MACControlFramesTransmitted =
1629 mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
1630 }
1631
1632 static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
1633 { 0, 64 },
1634 { 65, 127 },
1635 { 128, 255 },
1636 { 256, 511 },
1637 { 512, 1023 },
1638 { 1024, 1518 },
1639 { 1519, 2047 },
1640 { 2048, 4095 },
1641 { 4096, 8191 },
1642 { 8192, 9216 },
1643 { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
1644 {}
1645 };
1646
1647 static void
fbnic_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1648 fbnic_get_rmon_stats(struct net_device *netdev,
1649 struct ethtool_rmon_stats *rmon_stats,
1650 const struct ethtool_rmon_hist_range **ranges)
1651 {
1652 struct fbnic_net *fbn = netdev_priv(netdev);
1653 struct fbnic_mac_stats *mac_stats;
1654 struct fbnic_dev *fbd = fbn->fbd;
1655 int i;
1656
1657 mac_stats = &fbd->hw_stats.mac;
1658
1659 fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
1660
1661 rmon_stats->undersize_pkts =
1662 mac_stats->rmon.undersize_pkts.value;
1663 rmon_stats->oversize_pkts =
1664 mac_stats->rmon.oversize_pkts.value;
1665 rmon_stats->fragments =
1666 mac_stats->rmon.fragments.value;
1667 rmon_stats->jabbers =
1668 mac_stats->rmon.jabbers.value;
1669
1670 for (i = 0; fbnic_rmon_ranges[i].high; i++) {
1671 rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
1672 rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
1673 }
1674
1675 *ranges = fbnic_rmon_ranges;
1676 }
1677
1678 static const struct ethtool_ops fbnic_ethtool_ops = {
1679 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1680 ETHTOOL_COALESCE_RX_MAX_FRAMES,
1681 .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
1682 .get_drvinfo = fbnic_get_drvinfo,
1683 .get_regs_len = fbnic_get_regs_len,
1684 .get_regs = fbnic_get_regs,
1685 .get_link = ethtool_op_get_link,
1686 .get_coalesce = fbnic_get_coalesce,
1687 .set_coalesce = fbnic_set_coalesce,
1688 .get_ringparam = fbnic_get_ringparam,
1689 .set_ringparam = fbnic_set_ringparam,
1690 .get_pauseparam = fbnic_phylink_get_pauseparam,
1691 .set_pauseparam = fbnic_phylink_set_pauseparam,
1692 .get_strings = fbnic_get_strings,
1693 .get_ethtool_stats = fbnic_get_ethtool_stats,
1694 .get_sset_count = fbnic_get_sset_count,
1695 .get_rxnfc = fbnic_get_rxnfc,
1696 .set_rxnfc = fbnic_set_rxnfc,
1697 .get_rxfh_key_size = fbnic_get_rxfh_key_size,
1698 .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
1699 .get_rxfh = fbnic_get_rxfh,
1700 .set_rxfh = fbnic_set_rxfh,
1701 .get_rxfh_fields = fbnic_get_rss_hash_opts,
1702 .set_rxfh_fields = fbnic_set_rss_hash_opts,
1703 .create_rxfh_context = fbnic_create_rxfh_context,
1704 .modify_rxfh_context = fbnic_modify_rxfh_context,
1705 .remove_rxfh_context = fbnic_remove_rxfh_context,
1706 .get_channels = fbnic_get_channels,
1707 .set_channels = fbnic_set_channels,
1708 .get_ts_info = fbnic_get_ts_info,
1709 .get_ts_stats = fbnic_get_ts_stats,
1710 .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
1711 .get_fecparam = fbnic_phylink_get_fecparam,
1712 .get_eth_mac_stats = fbnic_get_eth_mac_stats,
1713 .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
1714 .get_rmon_stats = fbnic_get_rmon_stats,
1715 };
1716
fbnic_set_ethtool_ops(struct net_device * dev)1717 void fbnic_set_ethtool_ops(struct net_device *dev)
1718 {
1719 dev->ethtool_ops = &fbnic_ethtool_ops;
1720 }
1721