1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/ethtool.h>
5 #include <linux/ethtool_netlink.h>
6 #include <linux/netdevice.h>
7 #include <linux/pci.h>
8 #include <net/ipv6.h>
9
10 #include "fbnic.h"
11 #include "fbnic_netdev.h"
12 #include "fbnic_tlv.h"
13
14 struct fbnic_stat {
15 u8 string[ETH_GSTRING_LEN];
16 unsigned int size;
17 unsigned int offset;
18 };
19
20 #define FBNIC_STAT_FIELDS(type, name, stat) { \
21 .string = name, \
22 .size = sizeof_field(struct type, stat), \
23 .offset = offsetof(struct type, stat), \
24 }
25
26 /* Hardware statistics not captured in rtnl_link_stats */
27 #define FBNIC_HW_STAT(name, stat) \
28 FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
29
30 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
31 /* TTI */
32 FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
33 FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
34 FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
35 FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
36 FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
37 FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
38
39 /* TMI */
40 FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
41 FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
42 FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
43
44 /* RPC */
45 FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
46 FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
47 FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
48 FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
49 FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
50 FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
51 FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
52 FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
53 };
54
55 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
56
57 #define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
58 FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
59
60 static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
61 FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
62 FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
63 FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
64 FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
65
66 FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
67 FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
68 };
69
70 #define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
71 ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
72
73 #define FBNIC_RXB_FIFO_STAT(name, stat) \
74 FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
75
76 static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
77 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
78 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
79 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
80 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
81 };
82
83 #define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
84
85 #define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
86 FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
87
88 static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
89 FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
90 FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
91 FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
92 FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
93 };
94
95 #define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
96 ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
97
98 #define FBNIC_HW_Q_STAT(name, stat) \
99 FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
100
101 static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
102 FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
103 FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
104 FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
105 };
106
107 #define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
108 #define FBNIC_HW_STATS_LEN \
109 (FBNIC_HW_FIXED_STATS_LEN + \
110 FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
111 FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
112 FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
113 FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
114
115 #define FBNIC_QUEUE_STAT(name, stat) \
116 FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
117
118 static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
119 FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
120 FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
121 FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
122 };
123
124 #define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
125
126 #define FBNIC_STATS_LEN \
127 (FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
128
129 static void
fbnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)130 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
131 {
132 struct fbnic_net *fbn = netdev_priv(netdev);
133 struct fbnic_dev *fbd = fbn->fbd;
134
135 fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
136 sizeof(drvinfo->fw_version));
137 }
138
fbnic_get_regs_len(struct net_device * netdev)139 static int fbnic_get_regs_len(struct net_device *netdev)
140 {
141 struct fbnic_net *fbn = netdev_priv(netdev);
142
143 return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
144 }
145
fbnic_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * data)146 static void fbnic_get_regs(struct net_device *netdev,
147 struct ethtool_regs *regs, void *data)
148 {
149 struct fbnic_net *fbn = netdev_priv(netdev);
150
151 fbnic_csr_get_regs(fbn->fbd, data, ®s->version);
152 }
153
fbnic_clone_create(struct fbnic_net * orig)154 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
155 {
156 struct fbnic_net *clone;
157
158 clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
159 if (!clone)
160 return NULL;
161
162 memset(clone->tx, 0, sizeof(clone->tx));
163 memset(clone->rx, 0, sizeof(clone->rx));
164 memset(clone->napi, 0, sizeof(clone->napi));
165 return clone;
166 }
167
fbnic_clone_swap_cfg(struct fbnic_net * orig,struct fbnic_net * clone)168 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
169 struct fbnic_net *clone)
170 {
171 swap(clone->rcq_size, orig->rcq_size);
172 swap(clone->hpq_size, orig->hpq_size);
173 swap(clone->ppq_size, orig->ppq_size);
174 swap(clone->txq_size, orig->txq_size);
175 swap(clone->num_rx_queues, orig->num_rx_queues);
176 swap(clone->num_tx_queues, orig->num_tx_queues);
177 swap(clone->num_napi, orig->num_napi);
178 swap(clone->hds_thresh, orig->hds_thresh);
179 }
180
fbnic_aggregate_vector_counters(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)181 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
182 struct fbnic_napi_vector *nv)
183 {
184 int i, j;
185
186 for (i = 0; i < nv->txt_count; i++) {
187 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
188 fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1);
189 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
190 }
191
192 for (j = 0; j < nv->rxt_count; j++, i++) {
193 fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
194 fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
195 fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
196 }
197 }
198
fbnic_clone_swap(struct fbnic_net * orig,struct fbnic_net * clone)199 static void fbnic_clone_swap(struct fbnic_net *orig,
200 struct fbnic_net *clone)
201 {
202 struct fbnic_dev *fbd = orig->fbd;
203 unsigned int i;
204
205 for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
206 fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
207 for (i = 0; i < orig->num_napi; i++)
208 fbnic_aggregate_vector_counters(orig, orig->napi[i]);
209
210 fbnic_clone_swap_cfg(orig, clone);
211
212 for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
213 swap(clone->napi[i], orig->napi[i]);
214 for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
215 swap(clone->tx[i], orig->tx[i]);
216 for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
217 swap(clone->rx[i], orig->rx[i]);
218 }
219
fbnic_clone_free(struct fbnic_net * clone)220 static void fbnic_clone_free(struct fbnic_net *clone)
221 {
222 kfree(clone);
223 }
224
fbnic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)225 static int fbnic_get_coalesce(struct net_device *netdev,
226 struct ethtool_coalesce *ec,
227 struct kernel_ethtool_coalesce *kernel_coal,
228 struct netlink_ext_ack *extack)
229 {
230 struct fbnic_net *fbn = netdev_priv(netdev);
231
232 ec->tx_coalesce_usecs = fbn->tx_usecs;
233 ec->rx_coalesce_usecs = fbn->rx_usecs;
234 ec->rx_max_coalesced_frames = fbn->rx_max_frames;
235
236 return 0;
237 }
238
fbnic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)239 static int fbnic_set_coalesce(struct net_device *netdev,
240 struct ethtool_coalesce *ec,
241 struct kernel_ethtool_coalesce *kernel_coal,
242 struct netlink_ext_ack *extack)
243 {
244 struct fbnic_net *fbn = netdev_priv(netdev);
245
246 /* Verify against hardware limits */
247 if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
248 NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
249 return -EINVAL;
250 }
251 if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
252 NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
253 return -EINVAL;
254 }
255 if (ec->rx_max_coalesced_frames >
256 FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
257 FBNIC_MIN_RXD_PER_FRAME) {
258 NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
259 return -EINVAL;
260 }
261
262 fbn->tx_usecs = ec->tx_coalesce_usecs;
263 fbn->rx_usecs = ec->rx_coalesce_usecs;
264 fbn->rx_max_frames = ec->rx_max_coalesced_frames;
265
266 if (netif_running(netdev)) {
267 int i;
268
269 for (i = 0; i < fbn->num_napi; i++) {
270 struct fbnic_napi_vector *nv = fbn->napi[i];
271
272 fbnic_config_txrx_usecs(nv, 0);
273 fbnic_config_rx_frames(nv);
274 }
275 }
276
277 return 0;
278 }
279
280 static void
fbnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)281 fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
282 struct kernel_ethtool_ringparam *kernel_ring,
283 struct netlink_ext_ack *extack)
284 {
285 struct fbnic_net *fbn = netdev_priv(netdev);
286
287 ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
288 ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
289 ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
290 ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
291
292 ring->rx_pending = fbn->rcq_size;
293 ring->rx_mini_pending = fbn->hpq_size;
294 ring->rx_jumbo_pending = fbn->ppq_size;
295 ring->tx_pending = fbn->txq_size;
296
297 kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
298 kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
299 kernel_ring->hds_thresh = fbn->hds_thresh;
300 }
301
fbnic_set_rings(struct fbnic_net * fbn,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring)302 static void fbnic_set_rings(struct fbnic_net *fbn,
303 struct ethtool_ringparam *ring,
304 struct kernel_ethtool_ringparam *kernel_ring)
305 {
306 fbn->rcq_size = ring->rx_pending;
307 fbn->hpq_size = ring->rx_mini_pending;
308 fbn->ppq_size = ring->rx_jumbo_pending;
309 fbn->txq_size = ring->tx_pending;
310 fbn->hds_thresh = kernel_ring->hds_thresh;
311 }
312
313 static int
fbnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)314 fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
315 struct kernel_ethtool_ringparam *kernel_ring,
316 struct netlink_ext_ack *extack)
317
318 {
319 struct fbnic_net *fbn = netdev_priv(netdev);
320 struct fbnic_net *clone;
321 int err;
322
323 ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
324 ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
325 ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
326 ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
327
328 /* These are absolute minimums allowing the device and driver to operate
329 * but not necessarily guarantee reasonable performance. Settings below
330 * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
331 * at best.
332 */
333 if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
334 ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
335 ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
336 ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
337 NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
338 return -EINVAL;
339 }
340
341 if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
342 NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
343 return -EINVAL;
344 }
345
346 /* If an XDP program is attached, we should check for potential frame
347 * splitting. If the new HDS threshold can cause splitting, we should
348 * only allow if the attached XDP program can handle frags.
349 */
350 if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
351 kernel_ring->hds_thresh)) {
352 NL_SET_ERR_MSG_MOD(extack,
353 "Use higher HDS threshold or multi-buf capable program");
354 return -EINVAL;
355 }
356
357 if (!netif_running(netdev)) {
358 fbnic_set_rings(fbn, ring, kernel_ring);
359 return 0;
360 }
361
362 clone = fbnic_clone_create(fbn);
363 if (!clone)
364 return -ENOMEM;
365
366 fbnic_set_rings(clone, ring, kernel_ring);
367
368 err = fbnic_alloc_napi_vectors(clone);
369 if (err)
370 goto err_free_clone;
371
372 err = fbnic_alloc_resources(clone);
373 if (err)
374 goto err_free_napis;
375
376 fbnic_down_noidle(fbn);
377 err = fbnic_wait_all_queues_idle(fbn->fbd, true);
378 if (err)
379 goto err_start_stack;
380
381 err = fbnic_set_netif_queues(clone);
382 if (err)
383 goto err_start_stack;
384
385 /* Nothing can fail past this point */
386 fbnic_flush(fbn);
387
388 fbnic_clone_swap(fbn, clone);
389
390 fbnic_up(fbn);
391
392 fbnic_free_resources(clone);
393 fbnic_free_napi_vectors(clone);
394 fbnic_clone_free(clone);
395
396 return 0;
397
398 err_start_stack:
399 fbnic_flush(fbn);
400 fbnic_up(fbn);
401 fbnic_free_resources(clone);
402 err_free_napis:
403 fbnic_free_napi_vectors(clone);
404 err_free_clone:
405 fbnic_clone_free(clone);
406 return err;
407 }
408
fbnic_get_rxb_enqueue_strings(u8 ** data,unsigned int idx)409 static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
410 {
411 const struct fbnic_stat *stat;
412 int i;
413
414 stat = fbnic_gstrings_rxb_enqueue_stats;
415 for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
416 ethtool_sprintf(data, stat->string, idx);
417 }
418
fbnic_get_rxb_fifo_strings(u8 ** data,unsigned int idx)419 static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
420 {
421 const struct fbnic_stat *stat;
422 int i;
423
424 stat = fbnic_gstrings_rxb_fifo_stats;
425 for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
426 ethtool_sprintf(data, stat->string, idx);
427 }
428
fbnic_get_rxb_dequeue_strings(u8 ** data,unsigned int idx)429 static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
430 {
431 const struct fbnic_stat *stat;
432 int i;
433
434 stat = fbnic_gstrings_rxb_dequeue_stats;
435 for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
436 ethtool_sprintf(data, stat->string, idx);
437 }
438
fbnic_get_xdp_queue_strings(u8 ** data,unsigned int idx)439 static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
440 {
441 const struct fbnic_stat *stat;
442 int i;
443
444 stat = fbnic_gstrings_xdp_stats;
445 for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
446 ethtool_sprintf(data, stat->string, idx);
447 }
448
fbnic_get_strings(struct net_device * dev,u32 sset,u8 * data)449 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
450 {
451 const struct fbnic_stat *stat;
452 int i, idx;
453
454 switch (sset) {
455 case ETH_SS_STATS:
456 for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
457 ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
458
459 for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
460 fbnic_get_rxb_enqueue_strings(&data, i);
461
462 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
463 fbnic_get_rxb_fifo_strings(&data, i);
464
465 for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
466 fbnic_get_rxb_dequeue_strings(&data, i);
467
468 for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
469 stat = fbnic_gstrings_hw_q_stats;
470
471 for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
472 ethtool_sprintf(&data, stat->string, idx);
473 }
474
475 for (i = 0; i < FBNIC_MAX_XDPQS; i++)
476 fbnic_get_xdp_queue_strings(&data, i);
477 break;
478 }
479 }
480
fbnic_report_hw_stats(const struct fbnic_stat * stat,const void * base,int len,u64 ** data)481 static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
482 const void *base, int len, u64 **data)
483 {
484 while (len--) {
485 u8 *curr = (u8 *)base + stat->offset;
486
487 **data = *(u64 *)curr;
488
489 stat++;
490 (*data)++;
491 }
492 }
493
fbnic_get_xdp_queue_stats(struct fbnic_ring * ring,u64 ** data)494 static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
495 {
496 const struct fbnic_stat *stat;
497 int i;
498
499 if (!ring) {
500 *data += FBNIC_XDP_STATS_LEN;
501 return;
502 }
503
504 stat = fbnic_gstrings_xdp_stats;
505 for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
506 u8 *p = (u8 *)ring + stat->offset;
507
508 **data = *(u64 *)p;
509 }
510 }
511
fbnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)512 static void fbnic_get_ethtool_stats(struct net_device *dev,
513 struct ethtool_stats *stats, u64 *data)
514 {
515 struct fbnic_net *fbn = netdev_priv(dev);
516 struct fbnic_dev *fbd = fbn->fbd;
517 int i;
518
519 fbnic_get_hw_stats(fbn->fbd);
520
521 spin_lock(&fbd->hw_stats.lock);
522 fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
523 FBNIC_HW_FIXED_STATS_LEN, &data);
524
525 for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
526 const struct fbnic_rxb_enqueue_stats *enq;
527
528 enq = &fbd->hw_stats.rxb.enq[i];
529 fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
530 enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
531 &data);
532 }
533
534 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
535 const struct fbnic_rxb_fifo_stats *fifo;
536
537 fifo = &fbd->hw_stats.rxb.fifo[i];
538 fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
539 fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
540 &data);
541 }
542
543 for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
544 const struct fbnic_rxb_dequeue_stats *deq;
545
546 deq = &fbd->hw_stats.rxb.deq[i];
547 fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
548 deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
549 &data);
550 }
551
552 for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
553 const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
554
555 fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
556 FBNIC_HW_Q_STATS_LEN, &data);
557 }
558 spin_unlock(&fbd->hw_stats.lock);
559
560 for (i = 0; i < FBNIC_MAX_XDPQS; i++)
561 fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
562 }
563
fbnic_get_sset_count(struct net_device * dev,int sset)564 static int fbnic_get_sset_count(struct net_device *dev, int sset)
565 {
566 switch (sset) {
567 case ETH_SS_STATS:
568 return FBNIC_STATS_LEN;
569 default:
570 return -EOPNOTSUPP;
571 }
572 }
573
fbnic_get_rss_hash_idx(u32 flow_type)574 static int fbnic_get_rss_hash_idx(u32 flow_type)
575 {
576 switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
577 case TCP_V4_FLOW:
578 return FBNIC_TCP4_HASH_OPT;
579 case TCP_V6_FLOW:
580 return FBNIC_TCP6_HASH_OPT;
581 case UDP_V4_FLOW:
582 return FBNIC_UDP4_HASH_OPT;
583 case UDP_V6_FLOW:
584 return FBNIC_UDP6_HASH_OPT;
585 case AH_V4_FLOW:
586 case ESP_V4_FLOW:
587 case AH_ESP_V4_FLOW:
588 case SCTP_V4_FLOW:
589 case IPV4_FLOW:
590 case IPV4_USER_FLOW:
591 return FBNIC_IPV4_HASH_OPT;
592 case AH_V6_FLOW:
593 case ESP_V6_FLOW:
594 case AH_ESP_V6_FLOW:
595 case SCTP_V6_FLOW:
596 case IPV6_FLOW:
597 case IPV6_USER_FLOW:
598 return FBNIC_IPV6_HASH_OPT;
599 case ETHER_FLOW:
600 return FBNIC_ETHER_HASH_OPT;
601 }
602
603 return -1;
604 }
605
fbnic_get_cls_rule_all(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd,u32 * rule_locs)606 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
607 struct ethtool_rxnfc *cmd,
608 u32 *rule_locs)
609 {
610 struct fbnic_dev *fbd = fbn->fbd;
611 int i, cnt = 0;
612
613 /* Report maximum rule count */
614 cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
615
616 for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
617 int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
618 struct fbnic_act_tcam *act_tcam;
619
620 act_tcam = &fbd->act_tcam[idx];
621 if (act_tcam->state != FBNIC_TCAM_S_VALID)
622 continue;
623
624 if (rule_locs) {
625 if (cnt == cmd->rule_cnt)
626 return -EMSGSIZE;
627
628 rule_locs[cnt] = i;
629 }
630
631 cnt++;
632 }
633
634 return cnt;
635 }
636
fbnic_get_cls_rule(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd)637 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
638 {
639 struct ethtool_rx_flow_spec *fsp;
640 struct fbnic_dev *fbd = fbn->fbd;
641 struct fbnic_act_tcam *act_tcam;
642 int idx;
643
644 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
645
646 if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
647 return -EINVAL;
648
649 idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
650 act_tcam = &fbd->act_tcam[idx];
651
652 if (act_tcam->state != FBNIC_TCAM_S_VALID)
653 return -EINVAL;
654
655 /* Report maximum rule count */
656 cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
657
658 /* Set flow type field */
659 if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
660 fsp->flow_type = ETHER_FLOW;
661 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
662 act_tcam->mask.tcam[1])) {
663 struct fbnic_mac_addr *mac_addr;
664
665 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
666 act_tcam->value.tcam[1]);
667 mac_addr = &fbd->mac_addr[idx];
668
669 ether_addr_copy(fsp->h_u.ether_spec.h_dest,
670 mac_addr->value.addr8);
671 eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
672 }
673 } else if (act_tcam->value.tcam[1] &
674 FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
675 fsp->flow_type = IPV6_USER_FLOW;
676 fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
677 fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
678
679 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
680 act_tcam->mask.tcam[0])) {
681 struct fbnic_ip_addr *ip_addr;
682 int i;
683
684 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
685 act_tcam->value.tcam[0]);
686 ip_addr = &fbd->ipo_src[idx];
687
688 for (i = 0; i < 4; i++) {
689 fsp->h_u.usr_ip6_spec.ip6src[i] =
690 ip_addr->value.s6_addr32[i];
691 fsp->m_u.usr_ip6_spec.ip6src[i] =
692 ~ip_addr->mask.s6_addr32[i];
693 }
694 }
695
696 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
697 act_tcam->mask.tcam[0])) {
698 struct fbnic_ip_addr *ip_addr;
699 int i;
700
701 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
702 act_tcam->value.tcam[0]);
703 ip_addr = &fbd->ipo_dst[idx];
704
705 for (i = 0; i < 4; i++) {
706 fsp->h_u.usr_ip6_spec.ip6dst[i] =
707 ip_addr->value.s6_addr32[i];
708 fsp->m_u.usr_ip6_spec.ip6dst[i] =
709 ~ip_addr->mask.s6_addr32[i];
710 }
711 }
712 } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
713 if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
714 if (act_tcam->value.tcam[1] &
715 FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
716 fsp->flow_type = UDP_V6_FLOW;
717 else
718 fsp->flow_type = TCP_V6_FLOW;
719 fsp->h_u.tcp_ip6_spec.psrc =
720 cpu_to_be16(act_tcam->value.tcam[3]);
721 fsp->m_u.tcp_ip6_spec.psrc =
722 cpu_to_be16(~act_tcam->mask.tcam[3]);
723 fsp->h_u.tcp_ip6_spec.pdst =
724 cpu_to_be16(act_tcam->value.tcam[4]);
725 fsp->m_u.tcp_ip6_spec.pdst =
726 cpu_to_be16(~act_tcam->mask.tcam[4]);
727 } else {
728 fsp->flow_type = IPV6_USER_FLOW;
729 }
730
731 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
732 act_tcam->mask.tcam[0])) {
733 struct fbnic_ip_addr *ip_addr;
734 int i;
735
736 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
737 act_tcam->value.tcam[0]);
738 ip_addr = &fbd->ip_src[idx];
739
740 for (i = 0; i < 4; i++) {
741 fsp->h_u.usr_ip6_spec.ip6src[i] =
742 ip_addr->value.s6_addr32[i];
743 fsp->m_u.usr_ip6_spec.ip6src[i] =
744 ~ip_addr->mask.s6_addr32[i];
745 }
746 }
747
748 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
749 act_tcam->mask.tcam[0])) {
750 struct fbnic_ip_addr *ip_addr;
751 int i;
752
753 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
754 act_tcam->value.tcam[0]);
755 ip_addr = &fbd->ip_dst[idx];
756
757 for (i = 0; i < 4; i++) {
758 fsp->h_u.usr_ip6_spec.ip6dst[i] =
759 ip_addr->value.s6_addr32[i];
760 fsp->m_u.usr_ip6_spec.ip6dst[i] =
761 ~ip_addr->mask.s6_addr32[i];
762 }
763 }
764 } else {
765 if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
766 if (act_tcam->value.tcam[1] &
767 FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
768 fsp->flow_type = UDP_V4_FLOW;
769 else
770 fsp->flow_type = TCP_V4_FLOW;
771 fsp->h_u.tcp_ip4_spec.psrc =
772 cpu_to_be16(act_tcam->value.tcam[3]);
773 fsp->m_u.tcp_ip4_spec.psrc =
774 cpu_to_be16(~act_tcam->mask.tcam[3]);
775 fsp->h_u.tcp_ip4_spec.pdst =
776 cpu_to_be16(act_tcam->value.tcam[4]);
777 fsp->m_u.tcp_ip4_spec.pdst =
778 cpu_to_be16(~act_tcam->mask.tcam[4]);
779 } else {
780 fsp->flow_type = IPV4_USER_FLOW;
781 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
782 }
783
784 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
785 act_tcam->mask.tcam[0])) {
786 struct fbnic_ip_addr *ip_addr;
787
788 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
789 act_tcam->value.tcam[0]);
790 ip_addr = &fbd->ip_src[idx];
791
792 fsp->h_u.usr_ip4_spec.ip4src =
793 ip_addr->value.s6_addr32[3];
794 fsp->m_u.usr_ip4_spec.ip4src =
795 ~ip_addr->mask.s6_addr32[3];
796 }
797
798 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
799 act_tcam->mask.tcam[0])) {
800 struct fbnic_ip_addr *ip_addr;
801
802 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
803 act_tcam->value.tcam[0]);
804 ip_addr = &fbd->ip_dst[idx];
805
806 fsp->h_u.usr_ip4_spec.ip4dst =
807 ip_addr->value.s6_addr32[3];
808 fsp->m_u.usr_ip4_spec.ip4dst =
809 ~ip_addr->mask.s6_addr32[3];
810 }
811 }
812
813 /* Record action */
814 if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
815 fsp->ring_cookie = RX_CLS_FLOW_DISC;
816 else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
817 fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
818 act_tcam->dest);
819 else
820 fsp->flow_type |= FLOW_RSS;
821
822 cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
823 act_tcam->dest);
824
825 return 0;
826 }
827
fbnic_get_rx_ring_count(struct net_device * netdev)828 static u32 fbnic_get_rx_ring_count(struct net_device *netdev)
829 {
830 struct fbnic_net *fbn = netdev_priv(netdev);
831
832 return fbn->num_rx_queues;
833 }
834
fbnic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)835 static int fbnic_get_rxnfc(struct net_device *netdev,
836 struct ethtool_rxnfc *cmd, u32 *rule_locs)
837 {
838 struct fbnic_net *fbn = netdev_priv(netdev);
839 int ret = -EOPNOTSUPP;
840 u32 special = 0;
841
842 switch (cmd->cmd) {
843 case ETHTOOL_GRXCLSRULE:
844 ret = fbnic_get_cls_rule(fbn, cmd);
845 break;
846 case ETHTOOL_GRXCLSRLCNT:
847 rule_locs = NULL;
848 special = RX_CLS_LOC_SPECIAL;
849 fallthrough;
850 case ETHTOOL_GRXCLSRLALL:
851 ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
852 if (ret < 0)
853 break;
854
855 cmd->data |= special;
856 cmd->rule_cnt = ret;
857 ret = 0;
858 break;
859 }
860
861 return ret;
862 }
863
fbnic_cls_rule_any_loc(struct fbnic_dev * fbd)864 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
865 {
866 int i;
867
868 for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
869 int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
870
871 if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
872 return i;
873 }
874
875 return -ENOSPC;
876 }
877
fbnic_set_cls_rule_ins(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)878 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
879 const struct ethtool_rxnfc *cmd)
880 {
881 u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
882 u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
883 u16 misc = 0, misc_mask = ~0;
884 u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
885 FBNIC_RPC_ACT_TBL0_DEST_HOST);
886 struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
887 struct fbnic_mac_addr *mac_addr = NULL;
888 struct ethtool_rx_flow_spec *fsp;
889 struct fbnic_dev *fbd = fbn->fbd;
890 struct fbnic_act_tcam *act_tcam;
891 struct in6_addr *addr6, *mask6;
892 struct in_addr *addr4, *mask4;
893 int hash_idx, location;
894 u32 flow_type;
895 int idx, j;
896
897 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
898
899 if (fsp->location != RX_CLS_LOC_ANY)
900 return -EINVAL;
901 location = fbnic_cls_rule_any_loc(fbd);
902 if (location < 0)
903 return location;
904
905 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
906 dest = FBNIC_RPC_ACT_TBL0_DROP;
907 } else if (fsp->flow_type & FLOW_RSS) {
908 if (cmd->rss_context == 1)
909 dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
910 } else {
911 u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
912
913 if (ring_idx >= fbn->num_rx_queues)
914 return -EINVAL;
915
916 dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
917 FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
918 }
919
920 idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
921 act_tcam = &fbd->act_tcam[idx];
922
923 /* Do not allow overwriting for now.
924 * To support overwriting rules we will need to add logic to free
925 * any IP or MACDA TCAMs that may be associated with the old rule.
926 */
927 if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
928 return -EBUSY;
929
930 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
931 hash_idx = fbnic_get_rss_hash_idx(flow_type);
932
933 switch (flow_type) {
934 case UDP_V4_FLOW:
935 udp4_flow:
936 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
937 fallthrough;
938 case TCP_V4_FLOW:
939 tcp4_flow:
940 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
941 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
942 FBNIC_RPC_TCAM_ACT1_L4_VALID);
943
944 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
945 sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
946 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
947 dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
948 goto ip4_flow;
949 case IP_USER_FLOW:
950 if (!fsp->m_u.usr_ip4_spec.proto)
951 goto ip4_flow;
952 if (fsp->m_u.usr_ip4_spec.proto != 0xff)
953 return -EINVAL;
954 if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
955 goto udp4_flow;
956 if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
957 goto tcp4_flow;
958 return -EINVAL;
959 ip4_flow:
960 addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
961 mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
962 if (mask4->s_addr) {
963 ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
964 addr4, mask4);
965 if (!ip_src)
966 return -ENOSPC;
967
968 set_bit(idx, ip_src->act_tcam);
969 ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
970 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
971 ip_src - fbd->ip_src);
972 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
973 FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
974 }
975
976 addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
977 mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
978 if (mask4->s_addr) {
979 ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
980 addr4, mask4);
981 if (!ip_dst) {
982 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
983 memset(ip_src, 0, sizeof(*ip_src));
984 return -ENOSPC;
985 }
986
987 set_bit(idx, ip_dst->act_tcam);
988 ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
989 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
990 ip_dst - fbd->ip_dst);
991 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
992 FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
993 }
994 flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
995 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
996 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
997 FBNIC_RPC_TCAM_ACT1_IP_VALID |
998 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
999 break;
1000 case UDP_V6_FLOW:
1001 udp6_flow:
1002 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
1003 fallthrough;
1004 case TCP_V6_FLOW:
1005 tcp6_flow:
1006 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
1007 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
1008 FBNIC_RPC_TCAM_ACT1_L4_VALID);
1009
1010 sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
1011 sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
1012 dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
1013 dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
1014 goto ipv6_flow;
1015 case IPV6_USER_FLOW:
1016 if (!fsp->m_u.usr_ip6_spec.l4_proto)
1017 goto ipv6_flow;
1018
1019 if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
1020 return -EINVAL;
1021 if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
1022 goto udp6_flow;
1023 if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
1024 goto tcp6_flow;
1025 if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
1026 return -EINVAL;
1027
1028 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1029 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1030 if (!ipv6_addr_any(mask6)) {
1031 ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
1032 addr6, mask6);
1033 if (!ip_src)
1034 return -ENOSPC;
1035
1036 set_bit(idx, ip_src->act_tcam);
1037 ip_value |=
1038 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1039 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
1040 ip_src - fbd->ipo_src);
1041 ip_mask &=
1042 ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1043 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
1044 }
1045
1046 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1047 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1048 if (!ipv6_addr_any(mask6)) {
1049 ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
1050 addr6, mask6);
1051 if (!ip_dst) {
1052 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1053 memset(ip_src, 0, sizeof(*ip_src));
1054 return -ENOSPC;
1055 }
1056
1057 set_bit(idx, ip_dst->act_tcam);
1058 ip_value |=
1059 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1060 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
1061 ip_dst - fbd->ipo_dst);
1062 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1063 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
1064 }
1065
1066 flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1067 flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1068 ipv6_flow:
1069 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1070 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1071 if (!ip_src && !ipv6_addr_any(mask6)) {
1072 ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
1073 addr6, mask6);
1074 if (!ip_src)
1075 return -ENOSPC;
1076
1077 set_bit(idx, ip_src->act_tcam);
1078 ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1079 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
1080 ip_src - fbd->ip_src);
1081 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1082 FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
1083 }
1084
1085 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1086 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1087 if (!ip_dst && !ipv6_addr_any(mask6)) {
1088 ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
1089 addr6, mask6);
1090 if (!ip_dst) {
1091 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1092 memset(ip_src, 0, sizeof(*ip_src));
1093 return -ENOSPC;
1094 }
1095
1096 set_bit(idx, ip_dst->act_tcam);
1097 ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1098 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
1099 ip_dst - fbd->ip_dst);
1100 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1101 FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
1102 }
1103
1104 flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1105 FBNIC_RPC_TCAM_ACT1_IP_VALID |
1106 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1107 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1108 FBNIC_RPC_TCAM_ACT1_IP_VALID |
1109 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
1110 break;
1111 case ETHER_FLOW:
1112 if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
1113 u8 *addr = fsp->h_u.ether_spec.h_dest;
1114 u8 *mask = fsp->m_u.ether_spec.h_dest;
1115
1116 /* Do not allow MAC addr of 0 */
1117 if (is_zero_ether_addr(addr))
1118 return -EINVAL;
1119
1120 /* Only support full MAC address to avoid
1121 * conflicts with other MAC addresses.
1122 */
1123 if (!is_broadcast_ether_addr(mask))
1124 return -EINVAL;
1125
1126 if (is_multicast_ether_addr(addr))
1127 mac_addr = __fbnic_mc_sync(fbd, addr);
1128 else
1129 mac_addr = __fbnic_uc_sync(fbd, addr);
1130
1131 if (!mac_addr)
1132 return -ENOSPC;
1133
1134 set_bit(idx, mac_addr->act_tcam);
1135 flow_value |=
1136 FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
1137 mac_addr - fbd->mac_addr);
1138 flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
1139 }
1140
1141 flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1142 flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1143 break;
1144 default:
1145 return -EINVAL;
1146 }
1147
1148 dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
1149 FBNIC_RCD_HDR_AL_DMA_HINT_L4);
1150
1151 /* Write action table values */
1152 act_tcam->dest = dest;
1153 act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
1154
1155 /* Write IP Match value/mask to action_tcam[0] */
1156 act_tcam->value.tcam[0] = ip_value;
1157 act_tcam->mask.tcam[0] = ip_mask;
1158
1159 /* Write flow type value/mask to action_tcam[1] */
1160 act_tcam->value.tcam[1] = flow_value;
1161 act_tcam->mask.tcam[1] = flow_mask;
1162
1163 /* Write error, DSCP, extra L4 matches to action_tcam[2] */
1164 act_tcam->value.tcam[2] = misc;
1165 act_tcam->mask.tcam[2] = misc_mask;
1166
1167 /* Write source/destination port values */
1168 act_tcam->value.tcam[3] = sport;
1169 act_tcam->mask.tcam[3] = sport_mask;
1170 act_tcam->value.tcam[4] = dport;
1171 act_tcam->mask.tcam[4] = dport_mask;
1172
1173 for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
1174 act_tcam->mask.tcam[j] = 0xffff;
1175
1176 act_tcam->state = FBNIC_TCAM_S_UPDATE;
1177 fsp->location = location;
1178
1179 if (netif_running(fbn->netdev)) {
1180 fbnic_write_rules(fbd);
1181 if (ip_src || ip_dst)
1182 fbnic_write_ip_addr(fbd);
1183 if (mac_addr)
1184 fbnic_write_macda(fbd);
1185 }
1186
1187 return 0;
1188 }
1189
fbnic_clear_nfc_macda(struct fbnic_net * fbn,unsigned int tcam_idx)1190 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
1191 unsigned int tcam_idx)
1192 {
1193 struct fbnic_dev *fbd = fbn->fbd;
1194 int idx;
1195
1196 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
1197 __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
1198
1199 /* Write updates to hardware */
1200 if (netif_running(fbn->netdev))
1201 fbnic_write_macda(fbd);
1202 }
1203
fbnic_clear_nfc_ip_addr(struct fbnic_net * fbn,unsigned int tcam_idx)1204 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
1205 unsigned int tcam_idx)
1206 {
1207 struct fbnic_dev *fbd = fbn->fbd;
1208 int idx;
1209
1210 for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
1211 __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
1212 for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
1213 __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
1214 for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
1215 __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
1216 for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
1217 __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
1218
1219 /* Write updates to hardware */
1220 if (netif_running(fbn->netdev))
1221 fbnic_write_ip_addr(fbd);
1222 }
1223
fbnic_set_cls_rule_del(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)1224 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
1225 const struct ethtool_rxnfc *cmd)
1226 {
1227 struct ethtool_rx_flow_spec *fsp;
1228 struct fbnic_dev *fbd = fbn->fbd;
1229 struct fbnic_act_tcam *act_tcam;
1230 int idx;
1231
1232 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1233
1234 if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
1235 return -EINVAL;
1236
1237 idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
1238 act_tcam = &fbd->act_tcam[idx];
1239
1240 if (act_tcam->state != FBNIC_TCAM_S_VALID)
1241 return -EINVAL;
1242
1243 act_tcam->state = FBNIC_TCAM_S_DELETE;
1244
1245 if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
1246 (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
1247 fbnic_clear_nfc_macda(fbn, idx);
1248
1249 if ((act_tcam->value.tcam[0] &
1250 (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1251 FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1252 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1253 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
1254 (~act_tcam->mask.tcam[0] &
1255 (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
1256 FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
1257 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
1258 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
1259 fbnic_clear_nfc_ip_addr(fbn, idx);
1260
1261 if (netif_running(fbn->netdev))
1262 fbnic_write_rules(fbd);
1263
1264 return 0;
1265 }
1266
fbnic_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1267 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1268 {
1269 struct fbnic_net *fbn = netdev_priv(netdev);
1270 int ret = -EOPNOTSUPP;
1271
1272 switch (cmd->cmd) {
1273 case ETHTOOL_SRXCLSRLINS:
1274 ret = fbnic_set_cls_rule_ins(fbn, cmd);
1275 break;
1276 case ETHTOOL_SRXCLSRLDEL:
1277 ret = fbnic_set_cls_rule_del(fbn, cmd);
1278 break;
1279 }
1280
1281 return ret;
1282 }
1283
fbnic_get_rxfh_key_size(struct net_device * netdev)1284 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
1285 {
1286 return FBNIC_RPC_RSS_KEY_BYTE_LEN;
1287 }
1288
fbnic_get_rxfh_indir_size(struct net_device * netdev)1289 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
1290 {
1291 return FBNIC_RPC_RSS_TBL_SIZE;
1292 }
1293
1294 static int
fbnic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1295 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1296 {
1297 struct fbnic_net *fbn = netdev_priv(netdev);
1298 unsigned int i;
1299
1300 rxfh->hfunc = ETH_RSS_HASH_TOP;
1301
1302 if (rxfh->key) {
1303 for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1304 u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1305
1306 rxfh->key[i] = rss_key >> 24;
1307 }
1308 }
1309
1310 if (rxfh->indir) {
1311 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1312 rxfh->indir[i] = fbn->indir_tbl[0][i];
1313 }
1314
1315 return 0;
1316 }
1317
1318 static unsigned int
fbnic_set_indir(struct fbnic_net * fbn,unsigned int idx,const u32 * indir)1319 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1320 {
1321 unsigned int i, changes = 0;
1322
1323 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1324 if (fbn->indir_tbl[idx][i] == indir[i])
1325 continue;
1326
1327 fbn->indir_tbl[idx][i] = indir[i];
1328 changes++;
1329 }
1330
1331 return changes;
1332 }
1333
1334 static int
fbnic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1335 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1336 struct netlink_ext_ack *extack)
1337 {
1338 struct fbnic_net *fbn = netdev_priv(netdev);
1339 unsigned int i, changes = 0;
1340
1341 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1342 rxfh->hfunc != ETH_RSS_HASH_TOP)
1343 return -EINVAL;
1344
1345 if (rxfh->key) {
1346 u32 rss_key = 0;
1347
1348 for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1349 rss_key >>= 8;
1350 rss_key |= (u32)(rxfh->key[i]) << 24;
1351
1352 if (i % 4)
1353 continue;
1354
1355 if (fbn->rss_key[i / 4] == rss_key)
1356 continue;
1357
1358 fbn->rss_key[i / 4] = rss_key;
1359 changes++;
1360 }
1361 }
1362
1363 if (rxfh->indir)
1364 changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1365
1366 if (changes && netif_running(netdev))
1367 fbnic_rss_reinit_hw(fbn->fbd, fbn);
1368
1369 return 0;
1370 }
1371
1372 static int
fbnic_get_rss_hash_opts(struct net_device * netdev,struct ethtool_rxfh_fields * cmd)1373 fbnic_get_rss_hash_opts(struct net_device *netdev,
1374 struct ethtool_rxfh_fields *cmd)
1375 {
1376 int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1377 struct fbnic_net *fbn = netdev_priv(netdev);
1378
1379 if (hash_opt_idx < 0)
1380 return -EINVAL;
1381
1382 /* Report options from rss_en table in fbn */
1383 cmd->data = fbn->rss_flow_hash[hash_opt_idx];
1384
1385 return 0;
1386 }
1387
1388 #define FBNIC_L2_HASH_OPTIONS \
1389 (RXH_L2DA | RXH_DISCARD)
1390 #define FBNIC_L3_HASH_OPTIONS \
1391 (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
1392 #define FBNIC_L4_HASH_OPTIONS \
1393 (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1394
1395 static int
fbnic_set_rss_hash_opts(struct net_device * netdev,const struct ethtool_rxfh_fields * cmd,struct netlink_ext_ack * extack)1396 fbnic_set_rss_hash_opts(struct net_device *netdev,
1397 const struct ethtool_rxfh_fields *cmd,
1398 struct netlink_ext_ack *extack)
1399 {
1400 struct fbnic_net *fbn = netdev_priv(netdev);
1401 int hash_opt_idx;
1402
1403 /* Verify the type requested is correct */
1404 hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1405 if (hash_opt_idx < 0)
1406 return -EINVAL;
1407
1408 /* Verify the fields asked for can actually be assigned based on type */
1409 if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
1410 (hash_opt_idx > FBNIC_L4_HASH_OPT &&
1411 cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
1412 (hash_opt_idx > FBNIC_IP_HASH_OPT &&
1413 cmd->data & ~FBNIC_L2_HASH_OPTIONS))
1414 return -EINVAL;
1415
1416 fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
1417
1418 if (netif_running(fbn->netdev)) {
1419 fbnic_rss_reinit(fbn->fbd, fbn);
1420 fbnic_write_rules(fbn->fbd);
1421 }
1422
1423 return 0;
1424 }
1425
1426 static int
fbnic_modify_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1427 fbnic_modify_rxfh_context(struct net_device *netdev,
1428 struct ethtool_rxfh_context *ctx,
1429 const struct ethtool_rxfh_param *rxfh,
1430 struct netlink_ext_ack *extack)
1431 {
1432 struct fbnic_net *fbn = netdev_priv(netdev);
1433 const u32 *indir = rxfh->indir;
1434 unsigned int changes;
1435
1436 if (!indir)
1437 indir = ethtool_rxfh_context_indir(ctx);
1438
1439 changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1440 if (changes && netif_running(netdev))
1441 fbnic_rss_reinit_hw(fbn->fbd, fbn);
1442
1443 return 0;
1444 }
1445
1446 static int
fbnic_create_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1447 fbnic_create_rxfh_context(struct net_device *netdev,
1448 struct ethtool_rxfh_context *ctx,
1449 const struct ethtool_rxfh_param *rxfh,
1450 struct netlink_ext_ack *extack)
1451 {
1452 struct fbnic_net *fbn = netdev_priv(netdev);
1453
1454 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1455 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1456 return -EOPNOTSUPP;
1457 }
1458 ctx->hfunc = ETH_RSS_HASH_TOP;
1459
1460 if (!rxfh->indir) {
1461 u32 *indir = ethtool_rxfh_context_indir(ctx);
1462 unsigned int num_rx = fbn->num_rx_queues;
1463 unsigned int i;
1464
1465 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1466 indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1467 }
1468
1469 return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1470 }
1471
1472 static int
fbnic_remove_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)1473 fbnic_remove_rxfh_context(struct net_device *netdev,
1474 struct ethtool_rxfh_context *ctx, u32 rss_context,
1475 struct netlink_ext_ack *extack)
1476 {
1477 /* Nothing to do, contexts are allocated statically */
1478 return 0;
1479 }
1480
fbnic_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1481 static void fbnic_get_channels(struct net_device *netdev,
1482 struct ethtool_channels *ch)
1483 {
1484 struct fbnic_net *fbn = netdev_priv(netdev);
1485 struct fbnic_dev *fbd = fbn->fbd;
1486
1487 ch->max_rx = fbd->max_num_queues;
1488 ch->max_tx = fbd->max_num_queues;
1489 ch->max_combined = min(ch->max_rx, ch->max_tx);
1490 ch->max_other = FBNIC_NON_NAPI_VECTORS;
1491
1492 if (fbn->num_rx_queues > fbn->num_napi ||
1493 fbn->num_tx_queues > fbn->num_napi)
1494 ch->combined_count = min(fbn->num_rx_queues,
1495 fbn->num_tx_queues);
1496 else
1497 ch->combined_count =
1498 fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1499 ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1500 ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1501 ch->other_count = FBNIC_NON_NAPI_VECTORS;
1502 }
1503
fbnic_set_queues(struct fbnic_net * fbn,struct ethtool_channels * ch,unsigned int max_napis)1504 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1505 unsigned int max_napis)
1506 {
1507 fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1508 fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1509 fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1510 max_napis);
1511 }
1512
fbnic_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1513 static int fbnic_set_channels(struct net_device *netdev,
1514 struct ethtool_channels *ch)
1515 {
1516 struct fbnic_net *fbn = netdev_priv(netdev);
1517 unsigned int max_napis, standalone;
1518 struct fbnic_dev *fbd = fbn->fbd;
1519 struct fbnic_net *clone;
1520 int err;
1521
1522 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1523 standalone = ch->rx_count + ch->tx_count;
1524
1525 /* Limits for standalone queues:
1526 * - each queue has its own NAPI (num_napi >= rx + tx + combined)
1527 * - combining queues (combined not 0, rx or tx must be 0)
1528 */
1529 if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1530 (standalone && standalone + ch->combined_count > max_napis) ||
1531 ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1532 ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1533 ch->other_count != FBNIC_NON_NAPI_VECTORS)
1534 return -EINVAL;
1535
1536 if (!netif_running(netdev)) {
1537 fbnic_set_queues(fbn, ch, max_napis);
1538 fbnic_reset_indir_tbl(fbn);
1539 return 0;
1540 }
1541
1542 clone = fbnic_clone_create(fbn);
1543 if (!clone)
1544 return -ENOMEM;
1545
1546 fbnic_set_queues(clone, ch, max_napis);
1547
1548 err = fbnic_alloc_napi_vectors(clone);
1549 if (err)
1550 goto err_free_clone;
1551
1552 err = fbnic_alloc_resources(clone);
1553 if (err)
1554 goto err_free_napis;
1555
1556 fbnic_down_noidle(fbn);
1557 err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1558 if (err)
1559 goto err_start_stack;
1560
1561 err = fbnic_set_netif_queues(clone);
1562 if (err)
1563 goto err_start_stack;
1564
1565 /* Nothing can fail past this point */
1566 fbnic_flush(fbn);
1567
1568 fbnic_clone_swap(fbn, clone);
1569
1570 /* Reset RSS indirection table */
1571 fbnic_reset_indir_tbl(fbn);
1572
1573 fbnic_up(fbn);
1574
1575 fbnic_free_resources(clone);
1576 fbnic_free_napi_vectors(clone);
1577 fbnic_clone_free(clone);
1578
1579 return 0;
1580
1581 err_start_stack:
1582 fbnic_flush(fbn);
1583 fbnic_up(fbn);
1584 fbnic_free_resources(clone);
1585 err_free_napis:
1586 fbnic_free_napi_vectors(clone);
1587 err_free_clone:
1588 fbnic_clone_free(clone);
1589 return err;
1590 }
1591
1592 static int
fbnic_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * tsinfo)1593 fbnic_get_ts_info(struct net_device *netdev,
1594 struct kernel_ethtool_ts_info *tsinfo)
1595 {
1596 struct fbnic_net *fbn = netdev_priv(netdev);
1597
1598 tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1599
1600 tsinfo->so_timestamping =
1601 SOF_TIMESTAMPING_TX_SOFTWARE |
1602 SOF_TIMESTAMPING_TX_HARDWARE |
1603 SOF_TIMESTAMPING_RX_HARDWARE |
1604 SOF_TIMESTAMPING_RAW_HARDWARE;
1605
1606 tsinfo->tx_types =
1607 BIT(HWTSTAMP_TX_OFF) |
1608 BIT(HWTSTAMP_TX_ON);
1609
1610 tsinfo->rx_filters =
1611 BIT(HWTSTAMP_FILTER_NONE) |
1612 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1613 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1614 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1615 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1616 BIT(HWTSTAMP_FILTER_ALL);
1617
1618 return 0;
1619 }
1620
fbnic_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)1621 static void fbnic_get_ts_stats(struct net_device *netdev,
1622 struct ethtool_ts_stats *ts_stats)
1623 {
1624 struct fbnic_net *fbn = netdev_priv(netdev);
1625 u64 ts_packets, ts_lost;
1626 struct fbnic_ring *ring;
1627 unsigned int start;
1628 int i;
1629
1630 ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1631 ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1632 for (i = 0; i < fbn->num_tx_queues; i++) {
1633 ring = fbn->tx[i];
1634 do {
1635 start = u64_stats_fetch_begin(&ring->stats.syncp);
1636 ts_packets = ring->stats.twq.ts_packets;
1637 ts_lost = ring->stats.twq.ts_lost;
1638 } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1639 ts_stats->pkts += ts_packets;
1640 ts_stats->lost += ts_lost;
1641 }
1642 }
1643
1644 static int
fbnic_get_module_eeprom_by_page(struct net_device * netdev,const struct ethtool_module_eeprom * page_data,struct netlink_ext_ack * extack)1645 fbnic_get_module_eeprom_by_page(struct net_device *netdev,
1646 const struct ethtool_module_eeprom *page_data,
1647 struct netlink_ext_ack *extack)
1648 {
1649 struct fbnic_net *fbn = netdev_priv(netdev);
1650 struct fbnic_fw_completion *fw_cmpl;
1651 struct fbnic_dev *fbd = fbn->fbd;
1652 int err;
1653
1654 if (page_data->i2c_address != 0x50) {
1655 NL_SET_ERR_MSG_MOD(extack,
1656 "Invalid i2c address. Only 0x50 is supported");
1657 return -EINVAL;
1658 }
1659
1660 fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
1661 page_data->length);
1662 if (!fw_cmpl)
1663 return -ENOMEM;
1664
1665 /* Initialize completion and queue it for FW to process */
1666 fw_cmpl->u.qsfp.length = page_data->length;
1667 fw_cmpl->u.qsfp.offset = page_data->offset;
1668 fw_cmpl->u.qsfp.page = page_data->page;
1669 fw_cmpl->u.qsfp.bank = page_data->bank;
1670
1671 err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
1672 page_data->bank, page_data->offset,
1673 page_data->length);
1674 if (err) {
1675 NL_SET_ERR_MSG_MOD(extack,
1676 "Failed to transmit EEPROM read request");
1677 goto exit_free;
1678 }
1679
1680 if (!fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
1681 err = -ETIMEDOUT;
1682 NL_SET_ERR_MSG_MOD(extack,
1683 "Timed out waiting for firmware response");
1684 goto exit_cleanup;
1685 }
1686
1687 if (fw_cmpl->result) {
1688 err = fw_cmpl->result;
1689 NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
1690 goto exit_cleanup;
1691 }
1692
1693 memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
1694
1695 exit_cleanup:
1696 fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
1697 exit_free:
1698 fbnic_fw_put_cmpl(fw_cmpl);
1699
1700 return err ? : page_data->length;
1701 }
1702
fbnic_set_counter(u64 * stat,struct fbnic_stat_counter * counter)1703 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1704 {
1705 if (counter->reported)
1706 *stat = counter->value;
1707 }
1708
1709 static void
fbnic_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * pause_stats)1710 fbnic_get_pause_stats(struct net_device *netdev,
1711 struct ethtool_pause_stats *pause_stats)
1712 {
1713 struct fbnic_net *fbn = netdev_priv(netdev);
1714 struct fbnic_mac_stats *mac_stats;
1715 struct fbnic_dev *fbd = fbn->fbd;
1716
1717 mac_stats = &fbd->hw_stats.mac;
1718
1719 fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
1720
1721 pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
1722 pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
1723 }
1724
1725 static void
fbnic_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)1726 fbnic_get_fec_stats(struct net_device *netdev,
1727 struct ethtool_fec_stats *fec_stats,
1728 struct ethtool_fec_hist *hist)
1729 {
1730 struct fbnic_net *fbn = netdev_priv(netdev);
1731 struct fbnic_phy_stats *phy_stats;
1732 struct fbnic_dev *fbd = fbn->fbd;
1733
1734 fbnic_get_hw_stats32(fbd);
1735 phy_stats = &fbd->hw_stats.phy;
1736
1737 spin_lock(&fbd->hw_stats.lock);
1738 fec_stats->corrected_blocks.total =
1739 phy_stats->fec.corrected_blocks.value;
1740 fec_stats->uncorrectable_blocks.total =
1741 phy_stats->fec.uncorrectable_blocks.value;
1742 spin_unlock(&fbd->hw_stats.lock);
1743 }
1744
1745 static void
fbnic_get_eth_phy_stats(struct net_device * netdev,struct ethtool_eth_phy_stats * eth_phy_stats)1746 fbnic_get_eth_phy_stats(struct net_device *netdev,
1747 struct ethtool_eth_phy_stats *eth_phy_stats)
1748 {
1749 struct fbnic_net *fbn = netdev_priv(netdev);
1750 struct fbnic_phy_stats *phy_stats;
1751 struct fbnic_dev *fbd = fbn->fbd;
1752 u64 total = 0;
1753 int i;
1754
1755 fbnic_get_hw_stats32(fbd);
1756 phy_stats = &fbd->hw_stats.phy;
1757
1758 spin_lock(&fbd->hw_stats.lock);
1759 for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
1760 total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
1761
1762 eth_phy_stats->SymbolErrorDuringCarrier = total;
1763 spin_unlock(&fbd->hw_stats.lock);
1764 }
1765
1766 static void
fbnic_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * eth_mac_stats)1767 fbnic_get_eth_mac_stats(struct net_device *netdev,
1768 struct ethtool_eth_mac_stats *eth_mac_stats)
1769 {
1770 struct fbnic_net *fbn = netdev_priv(netdev);
1771 struct fbnic_mac_stats *mac_stats;
1772 struct fbnic_dev *fbd = fbn->fbd;
1773 const struct fbnic_mac *mac;
1774
1775 mac_stats = &fbd->hw_stats.mac;
1776 mac = fbd->mac;
1777
1778 mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1779
1780 fbnic_set_counter(ð_mac_stats->FramesTransmittedOK,
1781 &mac_stats->eth_mac.FramesTransmittedOK);
1782 fbnic_set_counter(ð_mac_stats->FramesReceivedOK,
1783 &mac_stats->eth_mac.FramesReceivedOK);
1784 fbnic_set_counter(ð_mac_stats->FrameCheckSequenceErrors,
1785 &mac_stats->eth_mac.FrameCheckSequenceErrors);
1786 fbnic_set_counter(ð_mac_stats->AlignmentErrors,
1787 &mac_stats->eth_mac.AlignmentErrors);
1788 fbnic_set_counter(ð_mac_stats->OctetsTransmittedOK,
1789 &mac_stats->eth_mac.OctetsTransmittedOK);
1790 fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACXmitError,
1791 &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1792 fbnic_set_counter(ð_mac_stats->OctetsReceivedOK,
1793 &mac_stats->eth_mac.OctetsReceivedOK);
1794 fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACRcvError,
1795 &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1796 fbnic_set_counter(ð_mac_stats->MulticastFramesXmittedOK,
1797 &mac_stats->eth_mac.MulticastFramesXmittedOK);
1798 fbnic_set_counter(ð_mac_stats->BroadcastFramesXmittedOK,
1799 &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1800 fbnic_set_counter(ð_mac_stats->MulticastFramesReceivedOK,
1801 &mac_stats->eth_mac.MulticastFramesReceivedOK);
1802 fbnic_set_counter(ð_mac_stats->BroadcastFramesReceivedOK,
1803 &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1804 fbnic_set_counter(ð_mac_stats->FrameTooLongErrors,
1805 &mac_stats->eth_mac.FrameTooLongErrors);
1806 }
1807
1808 static void
fbnic_get_eth_ctrl_stats(struct net_device * netdev,struct ethtool_eth_ctrl_stats * eth_ctrl_stats)1809 fbnic_get_eth_ctrl_stats(struct net_device *netdev,
1810 struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
1811 {
1812 struct fbnic_net *fbn = netdev_priv(netdev);
1813 struct fbnic_mac_stats *mac_stats;
1814 struct fbnic_dev *fbd = fbn->fbd;
1815
1816 mac_stats = &fbd->hw_stats.mac;
1817
1818 fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
1819
1820 eth_ctrl_stats->MACControlFramesReceived =
1821 mac_stats->eth_ctrl.MACControlFramesReceived.value;
1822 eth_ctrl_stats->MACControlFramesTransmitted =
1823 mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
1824 }
1825
1826 static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
1827 { 0, 64 },
1828 { 65, 127 },
1829 { 128, 255 },
1830 { 256, 511 },
1831 { 512, 1023 },
1832 { 1024, 1518 },
1833 { 1519, 2047 },
1834 { 2048, 4095 },
1835 { 4096, 8191 },
1836 { 8192, 9216 },
1837 { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
1838 {}
1839 };
1840
1841 static void
fbnic_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1842 fbnic_get_rmon_stats(struct net_device *netdev,
1843 struct ethtool_rmon_stats *rmon_stats,
1844 const struct ethtool_rmon_hist_range **ranges)
1845 {
1846 struct fbnic_net *fbn = netdev_priv(netdev);
1847 struct fbnic_mac_stats *mac_stats;
1848 struct fbnic_dev *fbd = fbn->fbd;
1849 int i;
1850
1851 mac_stats = &fbd->hw_stats.mac;
1852
1853 fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
1854
1855 rmon_stats->undersize_pkts =
1856 mac_stats->rmon.undersize_pkts.value;
1857 rmon_stats->oversize_pkts =
1858 mac_stats->rmon.oversize_pkts.value;
1859 rmon_stats->fragments =
1860 mac_stats->rmon.fragments.value;
1861 rmon_stats->jabbers =
1862 mac_stats->rmon.jabbers.value;
1863
1864 for (i = 0; fbnic_rmon_ranges[i].high; i++) {
1865 rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
1866 rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
1867 }
1868
1869 *ranges = fbnic_rmon_ranges;
1870 }
1871
fbnic_get_link_ext_stats(struct net_device * netdev,struct ethtool_link_ext_stats * stats)1872 static void fbnic_get_link_ext_stats(struct net_device *netdev,
1873 struct ethtool_link_ext_stats *stats)
1874 {
1875 struct fbnic_net *fbn = netdev_priv(netdev);
1876
1877 stats->link_down_events = fbn->link_down_events;
1878 }
1879
1880 static const struct ethtool_ops fbnic_ethtool_ops = {
1881 .cap_link_lanes_supported = true,
1882 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1883 ETHTOOL_COALESCE_RX_MAX_FRAMES,
1884 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
1885 ETHTOOL_RING_USE_HDS_THRS,
1886 .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
1887 .get_drvinfo = fbnic_get_drvinfo,
1888 .get_regs_len = fbnic_get_regs_len,
1889 .get_regs = fbnic_get_regs,
1890 .get_link = ethtool_op_get_link,
1891 .get_link_ext_stats = fbnic_get_link_ext_stats,
1892 .get_coalesce = fbnic_get_coalesce,
1893 .set_coalesce = fbnic_set_coalesce,
1894 .get_ringparam = fbnic_get_ringparam,
1895 .set_ringparam = fbnic_set_ringparam,
1896 .get_pause_stats = fbnic_get_pause_stats,
1897 .get_pauseparam = fbnic_phylink_get_pauseparam,
1898 .set_pauseparam = fbnic_phylink_set_pauseparam,
1899 .get_strings = fbnic_get_strings,
1900 .get_ethtool_stats = fbnic_get_ethtool_stats,
1901 .get_sset_count = fbnic_get_sset_count,
1902 .get_rxnfc = fbnic_get_rxnfc,
1903 .set_rxnfc = fbnic_set_rxnfc,
1904 .get_rx_ring_count = fbnic_get_rx_ring_count,
1905 .get_rxfh_key_size = fbnic_get_rxfh_key_size,
1906 .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
1907 .get_rxfh = fbnic_get_rxfh,
1908 .set_rxfh = fbnic_set_rxfh,
1909 .get_rxfh_fields = fbnic_get_rss_hash_opts,
1910 .set_rxfh_fields = fbnic_set_rss_hash_opts,
1911 .create_rxfh_context = fbnic_create_rxfh_context,
1912 .modify_rxfh_context = fbnic_modify_rxfh_context,
1913 .remove_rxfh_context = fbnic_remove_rxfh_context,
1914 .get_channels = fbnic_get_channels,
1915 .set_channels = fbnic_set_channels,
1916 .get_ts_info = fbnic_get_ts_info,
1917 .get_ts_stats = fbnic_get_ts_stats,
1918 .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
1919 .get_fec_stats = fbnic_get_fec_stats,
1920 .get_fecparam = fbnic_phylink_get_fecparam,
1921 .get_module_eeprom_by_page = fbnic_get_module_eeprom_by_page,
1922 .get_eth_phy_stats = fbnic_get_eth_phy_stats,
1923 .get_eth_mac_stats = fbnic_get_eth_mac_stats,
1924 .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
1925 .get_rmon_stats = fbnic_get_rmon_stats,
1926 };
1927
fbnic_set_ethtool_ops(struct net_device * dev)1928 void fbnic_set_ethtool_ops(struct net_device *dev)
1929 {
1930 dev->ethtool_ops = &fbnic_ethtool_ops;
1931 }
1932