1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/ethtool.h>
5 #include <linux/ethtool_netlink.h>
6 #include <linux/netdevice.h>
7 #include <linux/pci.h>
8 #include <net/ipv6.h>
9
10 #include "fbnic.h"
11 #include "fbnic_netdev.h"
12 #include "fbnic_tlv.h"
13
14 struct fbnic_stat {
15 u8 string[ETH_GSTRING_LEN];
16 unsigned int size;
17 unsigned int offset;
18 };
19
20 #define FBNIC_STAT_FIELDS(type, name, stat) { \
21 .string = name, \
22 .size = sizeof_field(struct type, stat), \
23 .offset = offsetof(struct type, stat), \
24 }
25
26 /* Hardware statistics not captured in rtnl_link_stats */
27 #define FBNIC_HW_STAT(name, stat) \
28 FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
29
30 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
31 /* TTI */
32 FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
33 FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
34 FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
35 FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
36 FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
37 FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
38
39 /* TMI */
40 FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
41 FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
42 FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
43
44 /* RPC */
45 FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
46 FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
47 FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
48 FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
49 FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
50 FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
51 FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
52 FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
53 };
54
55 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
56
57 #define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
58 FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
59
60 static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
61 FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
62 FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
63 FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
64 FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
65
66 FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
67 FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
68 };
69
70 #define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
71 ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
72
73 #define FBNIC_RXB_FIFO_STAT(name, stat) \
74 FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
75
76 static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
77 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
78 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
79 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
80 FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
81 };
82
83 #define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
84
85 #define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
86 FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
87
88 static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
89 FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
90 FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
91 FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
92 FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
93 };
94
95 #define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
96 ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
97
98 #define FBNIC_HW_Q_STAT(name, stat) \
99 FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
100
101 static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
102 FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
103 FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
104 FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
105 };
106
107 #define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
108 #define FBNIC_HW_STATS_LEN \
109 (FBNIC_HW_FIXED_STATS_LEN + \
110 FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
111 FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
112 FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
113 FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
114
115 #define FBNIC_QUEUE_STAT(name, stat) \
116 FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
117
118 static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
119 FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
120 FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
121 FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
122 };
123
124 #define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
125
126 #define FBNIC_STATS_LEN \
127 (FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
128
129 static void
fbnic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)130 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
131 {
132 struct fbnic_net *fbn = netdev_priv(netdev);
133 struct fbnic_dev *fbd = fbn->fbd;
134
135 fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
136 sizeof(drvinfo->fw_version));
137 }
138
fbnic_get_regs_len(struct net_device * netdev)139 static int fbnic_get_regs_len(struct net_device *netdev)
140 {
141 struct fbnic_net *fbn = netdev_priv(netdev);
142
143 return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
144 }
145
fbnic_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * data)146 static void fbnic_get_regs(struct net_device *netdev,
147 struct ethtool_regs *regs, void *data)
148 {
149 struct fbnic_net *fbn = netdev_priv(netdev);
150
151 fbnic_csr_get_regs(fbn->fbd, data, ®s->version);
152 }
153
fbnic_clone_create(struct fbnic_net * orig)154 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
155 {
156 struct fbnic_net *clone;
157
158 clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
159 if (!clone)
160 return NULL;
161
162 memset(clone->tx, 0, sizeof(clone->tx));
163 memset(clone->rx, 0, sizeof(clone->rx));
164 memset(clone->napi, 0, sizeof(clone->napi));
165 return clone;
166 }
167
fbnic_clone_swap_cfg(struct fbnic_net * orig,struct fbnic_net * clone)168 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
169 struct fbnic_net *clone)
170 {
171 swap(clone->rcq_size, orig->rcq_size);
172 swap(clone->hpq_size, orig->hpq_size);
173 swap(clone->ppq_size, orig->ppq_size);
174 swap(clone->txq_size, orig->txq_size);
175 swap(clone->num_rx_queues, orig->num_rx_queues);
176 swap(clone->num_tx_queues, orig->num_tx_queues);
177 swap(clone->num_napi, orig->num_napi);
178 swap(clone->hds_thresh, orig->hds_thresh);
179 }
180
fbnic_aggregate_vector_counters(struct fbnic_net * fbn,struct fbnic_napi_vector * nv)181 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
182 struct fbnic_napi_vector *nv)
183 {
184 int i, j;
185
186 for (i = 0; i < nv->txt_count; i++) {
187 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
188 fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1);
189 fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
190 }
191
192 for (j = 0; j < nv->rxt_count; j++, i++) {
193 fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
194 fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
195 fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
196 }
197 }
198
fbnic_clone_swap(struct fbnic_net * orig,struct fbnic_net * clone)199 static void fbnic_clone_swap(struct fbnic_net *orig,
200 struct fbnic_net *clone)
201 {
202 struct fbnic_dev *fbd = orig->fbd;
203 unsigned int i;
204
205 for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
206 fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
207 for (i = 0; i < orig->num_napi; i++)
208 fbnic_aggregate_vector_counters(orig, orig->napi[i]);
209
210 fbnic_clone_swap_cfg(orig, clone);
211
212 for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
213 swap(clone->napi[i], orig->napi[i]);
214 for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
215 swap(clone->tx[i], orig->tx[i]);
216 for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
217 swap(clone->rx[i], orig->rx[i]);
218 }
219
fbnic_clone_free(struct fbnic_net * clone)220 static void fbnic_clone_free(struct fbnic_net *clone)
221 {
222 kfree(clone);
223 }
224
fbnic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)225 static int fbnic_get_coalesce(struct net_device *netdev,
226 struct ethtool_coalesce *ec,
227 struct kernel_ethtool_coalesce *kernel_coal,
228 struct netlink_ext_ack *extack)
229 {
230 struct fbnic_net *fbn = netdev_priv(netdev);
231
232 ec->tx_coalesce_usecs = fbn->tx_usecs;
233 ec->rx_coalesce_usecs = fbn->rx_usecs;
234 ec->rx_max_coalesced_frames = fbn->rx_max_frames;
235
236 return 0;
237 }
238
fbnic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)239 static int fbnic_set_coalesce(struct net_device *netdev,
240 struct ethtool_coalesce *ec,
241 struct kernel_ethtool_coalesce *kernel_coal,
242 struct netlink_ext_ack *extack)
243 {
244 struct fbnic_net *fbn = netdev_priv(netdev);
245
246 /* Verify against hardware limits */
247 if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
248 NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
249 return -EINVAL;
250 }
251 if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
252 NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
253 return -EINVAL;
254 }
255 if (ec->rx_max_coalesced_frames >
256 FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
257 FBNIC_MIN_RXD_PER_FRAME) {
258 NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
259 return -EINVAL;
260 }
261
262 fbn->tx_usecs = ec->tx_coalesce_usecs;
263 fbn->rx_usecs = ec->rx_coalesce_usecs;
264 fbn->rx_max_frames = ec->rx_max_coalesced_frames;
265
266 if (netif_running(netdev)) {
267 int i;
268
269 for (i = 0; i < fbn->num_napi; i++) {
270 struct fbnic_napi_vector *nv = fbn->napi[i];
271
272 fbnic_config_txrx_usecs(nv, 0);
273 fbnic_config_rx_frames(nv);
274 }
275 }
276
277 return 0;
278 }
279
280 static void
fbnic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)281 fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
282 struct kernel_ethtool_ringparam *kernel_ring,
283 struct netlink_ext_ack *extack)
284 {
285 struct fbnic_net *fbn = netdev_priv(netdev);
286
287 ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
288 ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
289 ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
290 ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
291
292 ring->rx_pending = fbn->rcq_size;
293 ring->rx_mini_pending = fbn->hpq_size;
294 ring->rx_jumbo_pending = fbn->ppq_size;
295 ring->tx_pending = fbn->txq_size;
296
297 kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
298 kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
299 kernel_ring->hds_thresh = fbn->hds_thresh;
300 }
301
fbnic_set_rings(struct fbnic_net * fbn,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring)302 static void fbnic_set_rings(struct fbnic_net *fbn,
303 struct ethtool_ringparam *ring,
304 struct kernel_ethtool_ringparam *kernel_ring)
305 {
306 fbn->rcq_size = ring->rx_pending;
307 fbn->hpq_size = ring->rx_mini_pending;
308 fbn->ppq_size = ring->rx_jumbo_pending;
309 fbn->txq_size = ring->tx_pending;
310 fbn->hds_thresh = kernel_ring->hds_thresh;
311 }
312
313 static int
fbnic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)314 fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
315 struct kernel_ethtool_ringparam *kernel_ring,
316 struct netlink_ext_ack *extack)
317
318 {
319 struct fbnic_net *fbn = netdev_priv(netdev);
320 struct fbnic_net *clone;
321 int err;
322
323 ring->rx_pending = roundup_pow_of_two(ring->rx_pending);
324 ring->rx_mini_pending = roundup_pow_of_two(ring->rx_mini_pending);
325 ring->rx_jumbo_pending = roundup_pow_of_two(ring->rx_jumbo_pending);
326 ring->tx_pending = roundup_pow_of_two(ring->tx_pending);
327
328 /* These are absolute minimums allowing the device and driver to operate
329 * but not necessarily guarantee reasonable performance. Settings below
330 * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
331 * at best.
332 */
333 if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
334 ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
335 ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
336 ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
337 NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
338 return -EINVAL;
339 }
340
341 if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
342 NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
343 return -EINVAL;
344 }
345
346 /* If an XDP program is attached, we should check for potential frame
347 * splitting. If the new HDS threshold can cause splitting, we should
348 * only allow if the attached XDP program can handle frags.
349 */
350 if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
351 kernel_ring->hds_thresh)) {
352 NL_SET_ERR_MSG_MOD(extack,
353 "Use higher HDS threshold or multi-buf capable program");
354 return -EINVAL;
355 }
356
357 if (!netif_running(netdev)) {
358 fbnic_set_rings(fbn, ring, kernel_ring);
359 return 0;
360 }
361
362 clone = fbnic_clone_create(fbn);
363 if (!clone)
364 return -ENOMEM;
365
366 fbnic_set_rings(clone, ring, kernel_ring);
367
368 err = fbnic_alloc_napi_vectors(clone);
369 if (err)
370 goto err_free_clone;
371
372 err = fbnic_alloc_resources(clone);
373 if (err)
374 goto err_free_napis;
375
376 fbnic_down_noidle(fbn);
377 err = fbnic_wait_all_queues_idle(fbn->fbd, true);
378 if (err)
379 goto err_start_stack;
380
381 err = fbnic_set_netif_queues(clone);
382 if (err)
383 goto err_start_stack;
384
385 /* Nothing can fail past this point */
386 fbnic_flush(fbn);
387
388 fbnic_clone_swap(fbn, clone);
389
390 fbnic_up(fbn);
391
392 fbnic_free_resources(clone);
393 fbnic_free_napi_vectors(clone);
394 fbnic_clone_free(clone);
395
396 return 0;
397
398 err_start_stack:
399 fbnic_flush(fbn);
400 fbnic_up(fbn);
401 fbnic_free_resources(clone);
402 err_free_napis:
403 fbnic_free_napi_vectors(clone);
404 err_free_clone:
405 fbnic_clone_free(clone);
406 return err;
407 }
408
fbnic_get_rxb_enqueue_strings(u8 ** data,unsigned int idx)409 static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
410 {
411 const struct fbnic_stat *stat;
412 int i;
413
414 stat = fbnic_gstrings_rxb_enqueue_stats;
415 for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
416 ethtool_sprintf(data, stat->string, idx);
417 }
418
fbnic_get_rxb_fifo_strings(u8 ** data,unsigned int idx)419 static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
420 {
421 const struct fbnic_stat *stat;
422 int i;
423
424 stat = fbnic_gstrings_rxb_fifo_stats;
425 for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
426 ethtool_sprintf(data, stat->string, idx);
427 }
428
fbnic_get_rxb_dequeue_strings(u8 ** data,unsigned int idx)429 static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
430 {
431 const struct fbnic_stat *stat;
432 int i;
433
434 stat = fbnic_gstrings_rxb_dequeue_stats;
435 for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
436 ethtool_sprintf(data, stat->string, idx);
437 }
438
fbnic_get_xdp_queue_strings(u8 ** data,unsigned int idx)439 static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
440 {
441 const struct fbnic_stat *stat;
442 int i;
443
444 stat = fbnic_gstrings_xdp_stats;
445 for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
446 ethtool_sprintf(data, stat->string, idx);
447 }
448
fbnic_get_strings(struct net_device * dev,u32 sset,u8 * data)449 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
450 {
451 const struct fbnic_stat *stat;
452 int i, idx;
453
454 switch (sset) {
455 case ETH_SS_STATS:
456 for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
457 ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
458
459 for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
460 fbnic_get_rxb_enqueue_strings(&data, i);
461
462 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
463 fbnic_get_rxb_fifo_strings(&data, i);
464
465 for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
466 fbnic_get_rxb_dequeue_strings(&data, i);
467
468 for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
469 stat = fbnic_gstrings_hw_q_stats;
470
471 for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
472 ethtool_sprintf(&data, stat->string, idx);
473 }
474
475 for (i = 0; i < FBNIC_MAX_XDPQS; i++)
476 fbnic_get_xdp_queue_strings(&data, i);
477 break;
478 }
479 }
480
fbnic_report_hw_stats(const struct fbnic_stat * stat,const void * base,int len,u64 ** data)481 static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
482 const void *base, int len, u64 **data)
483 {
484 while (len--) {
485 u8 *curr = (u8 *)base + stat->offset;
486
487 **data = *(u64 *)curr;
488
489 stat++;
490 (*data)++;
491 }
492 }
493
fbnic_get_xdp_queue_stats(struct fbnic_ring * ring,u64 ** data)494 static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
495 {
496 const struct fbnic_stat *stat;
497 int i;
498
499 if (!ring) {
500 *data += FBNIC_XDP_STATS_LEN;
501 return;
502 }
503
504 stat = fbnic_gstrings_xdp_stats;
505 for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
506 u8 *p = (u8 *)ring + stat->offset;
507
508 **data = *(u64 *)p;
509 }
510 }
511
fbnic_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)512 static void fbnic_get_ethtool_stats(struct net_device *dev,
513 struct ethtool_stats *stats, u64 *data)
514 {
515 struct fbnic_net *fbn = netdev_priv(dev);
516 struct fbnic_dev *fbd = fbn->fbd;
517 int i;
518
519 fbnic_get_hw_stats(fbn->fbd);
520
521 spin_lock(&fbd->hw_stats.lock);
522 fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
523 FBNIC_HW_FIXED_STATS_LEN, &data);
524
525 for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
526 const struct fbnic_rxb_enqueue_stats *enq;
527
528 enq = &fbd->hw_stats.rxb.enq[i];
529 fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
530 enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
531 &data);
532 }
533
534 for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
535 const struct fbnic_rxb_fifo_stats *fifo;
536
537 fifo = &fbd->hw_stats.rxb.fifo[i];
538 fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
539 fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
540 &data);
541 }
542
543 for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
544 const struct fbnic_rxb_dequeue_stats *deq;
545
546 deq = &fbd->hw_stats.rxb.deq[i];
547 fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
548 deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
549 &data);
550 }
551
552 for (i = 0; i < FBNIC_MAX_QUEUES; i++) {
553 const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
554
555 fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
556 FBNIC_HW_Q_STATS_LEN, &data);
557 }
558 spin_unlock(&fbd->hw_stats.lock);
559
560 for (i = 0; i < FBNIC_MAX_XDPQS; i++)
561 fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
562 }
563
fbnic_get_sset_count(struct net_device * dev,int sset)564 static int fbnic_get_sset_count(struct net_device *dev, int sset)
565 {
566 switch (sset) {
567 case ETH_SS_STATS:
568 return FBNIC_STATS_LEN;
569 default:
570 return -EOPNOTSUPP;
571 }
572 }
573
fbnic_get_rss_hash_idx(u32 flow_type)574 static int fbnic_get_rss_hash_idx(u32 flow_type)
575 {
576 switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
577 case TCP_V4_FLOW:
578 return FBNIC_TCP4_HASH_OPT;
579 case TCP_V6_FLOW:
580 return FBNIC_TCP6_HASH_OPT;
581 case UDP_V4_FLOW:
582 return FBNIC_UDP4_HASH_OPT;
583 case UDP_V6_FLOW:
584 return FBNIC_UDP6_HASH_OPT;
585 case AH_V4_FLOW:
586 case ESP_V4_FLOW:
587 case AH_ESP_V4_FLOW:
588 case SCTP_V4_FLOW:
589 case IPV4_FLOW:
590 case IPV4_USER_FLOW:
591 return FBNIC_IPV4_HASH_OPT;
592 case AH_V6_FLOW:
593 case ESP_V6_FLOW:
594 case AH_ESP_V6_FLOW:
595 case SCTP_V6_FLOW:
596 case IPV6_FLOW:
597 case IPV6_USER_FLOW:
598 return FBNIC_IPV6_HASH_OPT;
599 case ETHER_FLOW:
600 return FBNIC_ETHER_HASH_OPT;
601 }
602
603 return -1;
604 }
605
fbnic_get_cls_rule_all(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd,u32 * rule_locs)606 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
607 struct ethtool_rxnfc *cmd,
608 u32 *rule_locs)
609 {
610 struct fbnic_dev *fbd = fbn->fbd;
611 int i, cnt = 0;
612
613 /* Report maximum rule count */
614 cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
615
616 for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
617 int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
618 struct fbnic_act_tcam *act_tcam;
619
620 act_tcam = &fbd->act_tcam[idx];
621 if (act_tcam->state != FBNIC_TCAM_S_VALID)
622 continue;
623
624 if (rule_locs) {
625 if (cnt == cmd->rule_cnt)
626 return -EMSGSIZE;
627
628 rule_locs[cnt] = i;
629 }
630
631 cnt++;
632 }
633
634 return cnt;
635 }
636
fbnic_get_cls_rule(struct fbnic_net * fbn,struct ethtool_rxnfc * cmd)637 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
638 {
639 struct ethtool_rx_flow_spec *fsp;
640 struct fbnic_dev *fbd = fbn->fbd;
641 struct fbnic_act_tcam *act_tcam;
642 int idx;
643
644 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
645
646 if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
647 return -EINVAL;
648
649 idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
650 act_tcam = &fbd->act_tcam[idx];
651
652 if (act_tcam->state != FBNIC_TCAM_S_VALID)
653 return -EINVAL;
654
655 /* Report maximum rule count */
656 cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
657
658 /* Set flow type field */
659 if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
660 fsp->flow_type = ETHER_FLOW;
661 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
662 act_tcam->mask.tcam[1])) {
663 struct fbnic_mac_addr *mac_addr;
664
665 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
666 act_tcam->value.tcam[1]);
667 mac_addr = &fbd->mac_addr[idx];
668
669 ether_addr_copy(fsp->h_u.ether_spec.h_dest,
670 mac_addr->value.addr8);
671 eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
672 }
673 } else if (act_tcam->value.tcam[1] &
674 FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
675 fsp->flow_type = IPV6_USER_FLOW;
676 fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
677 fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
678
679 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
680 act_tcam->mask.tcam[0])) {
681 struct fbnic_ip_addr *ip_addr;
682 int i;
683
684 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
685 act_tcam->value.tcam[0]);
686 ip_addr = &fbd->ipo_src[idx];
687
688 for (i = 0; i < 4; i++) {
689 fsp->h_u.usr_ip6_spec.ip6src[i] =
690 ip_addr->value.s6_addr32[i];
691 fsp->m_u.usr_ip6_spec.ip6src[i] =
692 ~ip_addr->mask.s6_addr32[i];
693 }
694 }
695
696 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
697 act_tcam->mask.tcam[0])) {
698 struct fbnic_ip_addr *ip_addr;
699 int i;
700
701 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
702 act_tcam->value.tcam[0]);
703 ip_addr = &fbd->ipo_dst[idx];
704
705 for (i = 0; i < 4; i++) {
706 fsp->h_u.usr_ip6_spec.ip6dst[i] =
707 ip_addr->value.s6_addr32[i];
708 fsp->m_u.usr_ip6_spec.ip6dst[i] =
709 ~ip_addr->mask.s6_addr32[i];
710 }
711 }
712 } else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
713 if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
714 if (act_tcam->value.tcam[1] &
715 FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
716 fsp->flow_type = UDP_V6_FLOW;
717 else
718 fsp->flow_type = TCP_V6_FLOW;
719 fsp->h_u.tcp_ip6_spec.psrc =
720 cpu_to_be16(act_tcam->value.tcam[3]);
721 fsp->m_u.tcp_ip6_spec.psrc =
722 cpu_to_be16(~act_tcam->mask.tcam[3]);
723 fsp->h_u.tcp_ip6_spec.pdst =
724 cpu_to_be16(act_tcam->value.tcam[4]);
725 fsp->m_u.tcp_ip6_spec.pdst =
726 cpu_to_be16(~act_tcam->mask.tcam[4]);
727 } else {
728 fsp->flow_type = IPV6_USER_FLOW;
729 }
730
731 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
732 act_tcam->mask.tcam[0])) {
733 struct fbnic_ip_addr *ip_addr;
734 int i;
735
736 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
737 act_tcam->value.tcam[0]);
738 ip_addr = &fbd->ip_src[idx];
739
740 for (i = 0; i < 4; i++) {
741 fsp->h_u.usr_ip6_spec.ip6src[i] =
742 ip_addr->value.s6_addr32[i];
743 fsp->m_u.usr_ip6_spec.ip6src[i] =
744 ~ip_addr->mask.s6_addr32[i];
745 }
746 }
747
748 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
749 act_tcam->mask.tcam[0])) {
750 struct fbnic_ip_addr *ip_addr;
751 int i;
752
753 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
754 act_tcam->value.tcam[0]);
755 ip_addr = &fbd->ip_dst[idx];
756
757 for (i = 0; i < 4; i++) {
758 fsp->h_u.usr_ip6_spec.ip6dst[i] =
759 ip_addr->value.s6_addr32[i];
760 fsp->m_u.usr_ip6_spec.ip6dst[i] =
761 ~ip_addr->mask.s6_addr32[i];
762 }
763 }
764 } else {
765 if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
766 if (act_tcam->value.tcam[1] &
767 FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
768 fsp->flow_type = UDP_V4_FLOW;
769 else
770 fsp->flow_type = TCP_V4_FLOW;
771 fsp->h_u.tcp_ip4_spec.psrc =
772 cpu_to_be16(act_tcam->value.tcam[3]);
773 fsp->m_u.tcp_ip4_spec.psrc =
774 cpu_to_be16(~act_tcam->mask.tcam[3]);
775 fsp->h_u.tcp_ip4_spec.pdst =
776 cpu_to_be16(act_tcam->value.tcam[4]);
777 fsp->m_u.tcp_ip4_spec.pdst =
778 cpu_to_be16(~act_tcam->mask.tcam[4]);
779 } else {
780 fsp->flow_type = IPV4_USER_FLOW;
781 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
782 }
783
784 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
785 act_tcam->mask.tcam[0])) {
786 struct fbnic_ip_addr *ip_addr;
787
788 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
789 act_tcam->value.tcam[0]);
790 ip_addr = &fbd->ip_src[idx];
791
792 fsp->h_u.usr_ip4_spec.ip4src =
793 ip_addr->value.s6_addr32[3];
794 fsp->m_u.usr_ip4_spec.ip4src =
795 ~ip_addr->mask.s6_addr32[3];
796 }
797
798 if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
799 act_tcam->mask.tcam[0])) {
800 struct fbnic_ip_addr *ip_addr;
801
802 idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
803 act_tcam->value.tcam[0]);
804 ip_addr = &fbd->ip_dst[idx];
805
806 fsp->h_u.usr_ip4_spec.ip4dst =
807 ip_addr->value.s6_addr32[3];
808 fsp->m_u.usr_ip4_spec.ip4dst =
809 ~ip_addr->mask.s6_addr32[3];
810 }
811 }
812
813 /* Record action */
814 if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
815 fsp->ring_cookie = RX_CLS_FLOW_DISC;
816 else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
817 fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
818 act_tcam->dest);
819 else
820 fsp->flow_type |= FLOW_RSS;
821
822 cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
823 act_tcam->dest);
824
825 return 0;
826 }
827
fbnic_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)828 static int fbnic_get_rxnfc(struct net_device *netdev,
829 struct ethtool_rxnfc *cmd, u32 *rule_locs)
830 {
831 struct fbnic_net *fbn = netdev_priv(netdev);
832 int ret = -EOPNOTSUPP;
833 u32 special = 0;
834
835 switch (cmd->cmd) {
836 case ETHTOOL_GRXRINGS:
837 cmd->data = fbn->num_rx_queues;
838 ret = 0;
839 break;
840 case ETHTOOL_GRXCLSRULE:
841 ret = fbnic_get_cls_rule(fbn, cmd);
842 break;
843 case ETHTOOL_GRXCLSRLCNT:
844 rule_locs = NULL;
845 special = RX_CLS_LOC_SPECIAL;
846 fallthrough;
847 case ETHTOOL_GRXCLSRLALL:
848 ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
849 if (ret < 0)
850 break;
851
852 cmd->data |= special;
853 cmd->rule_cnt = ret;
854 ret = 0;
855 break;
856 }
857
858 return ret;
859 }
860
fbnic_cls_rule_any_loc(struct fbnic_dev * fbd)861 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
862 {
863 int i;
864
865 for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
866 int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
867
868 if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
869 return i;
870 }
871
872 return -ENOSPC;
873 }
874
fbnic_set_cls_rule_ins(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)875 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
876 const struct ethtool_rxnfc *cmd)
877 {
878 u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
879 u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
880 u16 misc = 0, misc_mask = ~0;
881 u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
882 FBNIC_RPC_ACT_TBL0_DEST_HOST);
883 struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
884 struct fbnic_mac_addr *mac_addr = NULL;
885 struct ethtool_rx_flow_spec *fsp;
886 struct fbnic_dev *fbd = fbn->fbd;
887 struct fbnic_act_tcam *act_tcam;
888 struct in6_addr *addr6, *mask6;
889 struct in_addr *addr4, *mask4;
890 int hash_idx, location;
891 u32 flow_type;
892 int idx, j;
893
894 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
895
896 if (fsp->location != RX_CLS_LOC_ANY)
897 return -EINVAL;
898 location = fbnic_cls_rule_any_loc(fbd);
899 if (location < 0)
900 return location;
901
902 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
903 dest = FBNIC_RPC_ACT_TBL0_DROP;
904 } else if (fsp->flow_type & FLOW_RSS) {
905 if (cmd->rss_context == 1)
906 dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
907 } else {
908 u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
909
910 if (ring_idx >= fbn->num_rx_queues)
911 return -EINVAL;
912
913 dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
914 FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
915 }
916
917 idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
918 act_tcam = &fbd->act_tcam[idx];
919
920 /* Do not allow overwriting for now.
921 * To support overwriting rules we will need to add logic to free
922 * any IP or MACDA TCAMs that may be associated with the old rule.
923 */
924 if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
925 return -EBUSY;
926
927 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
928 hash_idx = fbnic_get_rss_hash_idx(flow_type);
929
930 switch (flow_type) {
931 case UDP_V4_FLOW:
932 udp4_flow:
933 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
934 fallthrough;
935 case TCP_V4_FLOW:
936 tcp4_flow:
937 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
938 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
939 FBNIC_RPC_TCAM_ACT1_L4_VALID);
940
941 sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
942 sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
943 dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
944 dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
945 goto ip4_flow;
946 case IP_USER_FLOW:
947 if (!fsp->m_u.usr_ip4_spec.proto)
948 goto ip4_flow;
949 if (fsp->m_u.usr_ip4_spec.proto != 0xff)
950 return -EINVAL;
951 if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
952 goto udp4_flow;
953 if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
954 goto tcp4_flow;
955 return -EINVAL;
956 ip4_flow:
957 addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
958 mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
959 if (mask4->s_addr) {
960 ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
961 addr4, mask4);
962 if (!ip_src)
963 return -ENOSPC;
964
965 set_bit(idx, ip_src->act_tcam);
966 ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
967 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
968 ip_src - fbd->ip_src);
969 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
970 FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
971 }
972
973 addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
974 mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
975 if (mask4->s_addr) {
976 ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
977 addr4, mask4);
978 if (!ip_dst) {
979 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
980 memset(ip_src, 0, sizeof(*ip_src));
981 return -ENOSPC;
982 }
983
984 set_bit(idx, ip_dst->act_tcam);
985 ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
986 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
987 ip_dst - fbd->ip_dst);
988 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
989 FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
990 }
991 flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
992 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
993 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
994 FBNIC_RPC_TCAM_ACT1_IP_VALID |
995 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
996 break;
997 case UDP_V6_FLOW:
998 udp6_flow:
999 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
1000 fallthrough;
1001 case TCP_V6_FLOW:
1002 tcp6_flow:
1003 flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
1004 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
1005 FBNIC_RPC_TCAM_ACT1_L4_VALID);
1006
1007 sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
1008 sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
1009 dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
1010 dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
1011 goto ipv6_flow;
1012 case IPV6_USER_FLOW:
1013 if (!fsp->m_u.usr_ip6_spec.l4_proto)
1014 goto ipv6_flow;
1015
1016 if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
1017 return -EINVAL;
1018 if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
1019 goto udp6_flow;
1020 if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
1021 goto tcp6_flow;
1022 if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
1023 return -EINVAL;
1024
1025 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1026 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1027 if (!ipv6_addr_any(mask6)) {
1028 ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
1029 addr6, mask6);
1030 if (!ip_src)
1031 return -ENOSPC;
1032
1033 set_bit(idx, ip_src->act_tcam);
1034 ip_value |=
1035 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1036 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
1037 ip_src - fbd->ipo_src);
1038 ip_mask &=
1039 ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1040 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
1041 }
1042
1043 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1044 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1045 if (!ipv6_addr_any(mask6)) {
1046 ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
1047 addr6, mask6);
1048 if (!ip_dst) {
1049 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1050 memset(ip_src, 0, sizeof(*ip_src));
1051 return -ENOSPC;
1052 }
1053
1054 set_bit(idx, ip_dst->act_tcam);
1055 ip_value |=
1056 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1057 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
1058 ip_dst - fbd->ipo_dst);
1059 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1060 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
1061 }
1062
1063 flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1064 flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1065 ipv6_flow:
1066 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1067 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1068 if (!ip_src && !ipv6_addr_any(mask6)) {
1069 ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
1070 addr6, mask6);
1071 if (!ip_src)
1072 return -ENOSPC;
1073
1074 set_bit(idx, ip_src->act_tcam);
1075 ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1076 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
1077 ip_src - fbd->ip_src);
1078 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1079 FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
1080 }
1081
1082 addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1083 mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1084 if (!ip_dst && !ipv6_addr_any(mask6)) {
1085 ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
1086 addr6, mask6);
1087 if (!ip_dst) {
1088 if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1089 memset(ip_src, 0, sizeof(*ip_src));
1090 return -ENOSPC;
1091 }
1092
1093 set_bit(idx, ip_dst->act_tcam);
1094 ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1095 FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
1096 ip_dst - fbd->ip_dst);
1097 ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1098 FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
1099 }
1100
1101 flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1102 FBNIC_RPC_TCAM_ACT1_IP_VALID |
1103 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1104 flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1105 FBNIC_RPC_TCAM_ACT1_IP_VALID |
1106 FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
1107 break;
1108 case ETHER_FLOW:
1109 if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
1110 u8 *addr = fsp->h_u.ether_spec.h_dest;
1111 u8 *mask = fsp->m_u.ether_spec.h_dest;
1112
1113 /* Do not allow MAC addr of 0 */
1114 if (is_zero_ether_addr(addr))
1115 return -EINVAL;
1116
1117 /* Only support full MAC address to avoid
1118 * conflicts with other MAC addresses.
1119 */
1120 if (!is_broadcast_ether_addr(mask))
1121 return -EINVAL;
1122
1123 if (is_multicast_ether_addr(addr))
1124 mac_addr = __fbnic_mc_sync(fbd, addr);
1125 else
1126 mac_addr = __fbnic_uc_sync(fbd, addr);
1127
1128 if (!mac_addr)
1129 return -ENOSPC;
1130
1131 set_bit(idx, mac_addr->act_tcam);
1132 flow_value |=
1133 FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
1134 mac_addr - fbd->mac_addr);
1135 flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
1136 }
1137
1138 flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1139 flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1140 break;
1141 default:
1142 return -EINVAL;
1143 }
1144
1145 /* Write action table values */
1146 act_tcam->dest = dest;
1147 act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
1148
1149 /* Write IP Match value/mask to action_tcam[0] */
1150 act_tcam->value.tcam[0] = ip_value;
1151 act_tcam->mask.tcam[0] = ip_mask;
1152
1153 /* Write flow type value/mask to action_tcam[1] */
1154 act_tcam->value.tcam[1] = flow_value;
1155 act_tcam->mask.tcam[1] = flow_mask;
1156
1157 /* Write error, DSCP, extra L4 matches to action_tcam[2] */
1158 act_tcam->value.tcam[2] = misc;
1159 act_tcam->mask.tcam[2] = misc_mask;
1160
1161 /* Write source/destination port values */
1162 act_tcam->value.tcam[3] = sport;
1163 act_tcam->mask.tcam[3] = sport_mask;
1164 act_tcam->value.tcam[4] = dport;
1165 act_tcam->mask.tcam[4] = dport_mask;
1166
1167 for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
1168 act_tcam->mask.tcam[j] = 0xffff;
1169
1170 act_tcam->state = FBNIC_TCAM_S_UPDATE;
1171 fsp->location = location;
1172
1173 if (netif_running(fbn->netdev)) {
1174 fbnic_write_rules(fbd);
1175 if (ip_src || ip_dst)
1176 fbnic_write_ip_addr(fbd);
1177 if (mac_addr)
1178 fbnic_write_macda(fbd);
1179 }
1180
1181 return 0;
1182 }
1183
fbnic_clear_nfc_macda(struct fbnic_net * fbn,unsigned int tcam_idx)1184 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
1185 unsigned int tcam_idx)
1186 {
1187 struct fbnic_dev *fbd = fbn->fbd;
1188 int idx;
1189
1190 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
1191 __fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
1192
1193 /* Write updates to hardware */
1194 if (netif_running(fbn->netdev))
1195 fbnic_write_macda(fbd);
1196 }
1197
fbnic_clear_nfc_ip_addr(struct fbnic_net * fbn,unsigned int tcam_idx)1198 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
1199 unsigned int tcam_idx)
1200 {
1201 struct fbnic_dev *fbd = fbn->fbd;
1202 int idx;
1203
1204 for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
1205 __fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
1206 for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
1207 __fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
1208 for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
1209 __fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
1210 for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
1211 __fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
1212
1213 /* Write updates to hardware */
1214 if (netif_running(fbn->netdev))
1215 fbnic_write_ip_addr(fbd);
1216 }
1217
fbnic_set_cls_rule_del(struct fbnic_net * fbn,const struct ethtool_rxnfc * cmd)1218 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
1219 const struct ethtool_rxnfc *cmd)
1220 {
1221 struct ethtool_rx_flow_spec *fsp;
1222 struct fbnic_dev *fbd = fbn->fbd;
1223 struct fbnic_act_tcam *act_tcam;
1224 int idx;
1225
1226 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1227
1228 if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
1229 return -EINVAL;
1230
1231 idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
1232 act_tcam = &fbd->act_tcam[idx];
1233
1234 if (act_tcam->state != FBNIC_TCAM_S_VALID)
1235 return -EINVAL;
1236
1237 act_tcam->state = FBNIC_TCAM_S_DELETE;
1238
1239 if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
1240 (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
1241 fbnic_clear_nfc_macda(fbn, idx);
1242
1243 if ((act_tcam->value.tcam[0] &
1244 (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1245 FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1246 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1247 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
1248 (~act_tcam->mask.tcam[0] &
1249 (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
1250 FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
1251 FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
1252 FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
1253 fbnic_clear_nfc_ip_addr(fbn, idx);
1254
1255 if (netif_running(fbn->netdev))
1256 fbnic_write_rules(fbd);
1257
1258 return 0;
1259 }
1260
fbnic_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)1261 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1262 {
1263 struct fbnic_net *fbn = netdev_priv(netdev);
1264 int ret = -EOPNOTSUPP;
1265
1266 switch (cmd->cmd) {
1267 case ETHTOOL_SRXCLSRLINS:
1268 ret = fbnic_set_cls_rule_ins(fbn, cmd);
1269 break;
1270 case ETHTOOL_SRXCLSRLDEL:
1271 ret = fbnic_set_cls_rule_del(fbn, cmd);
1272 break;
1273 }
1274
1275 return ret;
1276 }
1277
fbnic_get_rxfh_key_size(struct net_device * netdev)1278 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
1279 {
1280 return FBNIC_RPC_RSS_KEY_BYTE_LEN;
1281 }
1282
fbnic_get_rxfh_indir_size(struct net_device * netdev)1283 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
1284 {
1285 return FBNIC_RPC_RSS_TBL_SIZE;
1286 }
1287
1288 static int
fbnic_get_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh)1289 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1290 {
1291 struct fbnic_net *fbn = netdev_priv(netdev);
1292 unsigned int i;
1293
1294 rxfh->hfunc = ETH_RSS_HASH_TOP;
1295
1296 if (rxfh->key) {
1297 for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1298 u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1299
1300 rxfh->key[i] = rss_key >> 24;
1301 }
1302 }
1303
1304 if (rxfh->indir) {
1305 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1306 rxfh->indir[i] = fbn->indir_tbl[0][i];
1307 }
1308
1309 return 0;
1310 }
1311
1312 static unsigned int
fbnic_set_indir(struct fbnic_net * fbn,unsigned int idx,const u32 * indir)1313 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1314 {
1315 unsigned int i, changes = 0;
1316
1317 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1318 if (fbn->indir_tbl[idx][i] == indir[i])
1319 continue;
1320
1321 fbn->indir_tbl[idx][i] = indir[i];
1322 changes++;
1323 }
1324
1325 return changes;
1326 }
1327
1328 static int
fbnic_set_rxfh(struct net_device * netdev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1329 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1330 struct netlink_ext_ack *extack)
1331 {
1332 struct fbnic_net *fbn = netdev_priv(netdev);
1333 unsigned int i, changes = 0;
1334
1335 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1336 rxfh->hfunc != ETH_RSS_HASH_TOP)
1337 return -EINVAL;
1338
1339 if (rxfh->key) {
1340 u32 rss_key = 0;
1341
1342 for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1343 rss_key >>= 8;
1344 rss_key |= (u32)(rxfh->key[i]) << 24;
1345
1346 if (i % 4)
1347 continue;
1348
1349 if (fbn->rss_key[i / 4] == rss_key)
1350 continue;
1351
1352 fbn->rss_key[i / 4] = rss_key;
1353 changes++;
1354 }
1355 }
1356
1357 if (rxfh->indir)
1358 changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1359
1360 if (changes && netif_running(netdev))
1361 fbnic_rss_reinit_hw(fbn->fbd, fbn);
1362
1363 return 0;
1364 }
1365
1366 static int
fbnic_get_rss_hash_opts(struct net_device * netdev,struct ethtool_rxfh_fields * cmd)1367 fbnic_get_rss_hash_opts(struct net_device *netdev,
1368 struct ethtool_rxfh_fields *cmd)
1369 {
1370 int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1371 struct fbnic_net *fbn = netdev_priv(netdev);
1372
1373 if (hash_opt_idx < 0)
1374 return -EINVAL;
1375
1376 /* Report options from rss_en table in fbn */
1377 cmd->data = fbn->rss_flow_hash[hash_opt_idx];
1378
1379 return 0;
1380 }
1381
1382 #define FBNIC_L2_HASH_OPTIONS \
1383 (RXH_L2DA | RXH_DISCARD)
1384 #define FBNIC_L3_HASH_OPTIONS \
1385 (FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
1386 #define FBNIC_L4_HASH_OPTIONS \
1387 (FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1388
1389 static int
fbnic_set_rss_hash_opts(struct net_device * netdev,const struct ethtool_rxfh_fields * cmd,struct netlink_ext_ack * extack)1390 fbnic_set_rss_hash_opts(struct net_device *netdev,
1391 const struct ethtool_rxfh_fields *cmd,
1392 struct netlink_ext_ack *extack)
1393 {
1394 struct fbnic_net *fbn = netdev_priv(netdev);
1395 int hash_opt_idx;
1396
1397 /* Verify the type requested is correct */
1398 hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1399 if (hash_opt_idx < 0)
1400 return -EINVAL;
1401
1402 /* Verify the fields asked for can actually be assigned based on type */
1403 if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
1404 (hash_opt_idx > FBNIC_L4_HASH_OPT &&
1405 cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
1406 (hash_opt_idx > FBNIC_IP_HASH_OPT &&
1407 cmd->data & ~FBNIC_L2_HASH_OPTIONS))
1408 return -EINVAL;
1409
1410 fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
1411
1412 if (netif_running(fbn->netdev)) {
1413 fbnic_rss_reinit(fbn->fbd, fbn);
1414 fbnic_write_rules(fbn->fbd);
1415 }
1416
1417 return 0;
1418 }
1419
1420 static int
fbnic_modify_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1421 fbnic_modify_rxfh_context(struct net_device *netdev,
1422 struct ethtool_rxfh_context *ctx,
1423 const struct ethtool_rxfh_param *rxfh,
1424 struct netlink_ext_ack *extack)
1425 {
1426 struct fbnic_net *fbn = netdev_priv(netdev);
1427 const u32 *indir = rxfh->indir;
1428 unsigned int changes;
1429
1430 if (!indir)
1431 indir = ethtool_rxfh_context_indir(ctx);
1432
1433 changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1434 if (changes && netif_running(netdev))
1435 fbnic_rss_reinit_hw(fbn->fbd, fbn);
1436
1437 return 0;
1438 }
1439
1440 static int
fbnic_create_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,const struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)1441 fbnic_create_rxfh_context(struct net_device *netdev,
1442 struct ethtool_rxfh_context *ctx,
1443 const struct ethtool_rxfh_param *rxfh,
1444 struct netlink_ext_ack *extack)
1445 {
1446 struct fbnic_net *fbn = netdev_priv(netdev);
1447
1448 if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1449 NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1450 return -EOPNOTSUPP;
1451 }
1452 ctx->hfunc = ETH_RSS_HASH_TOP;
1453
1454 if (!rxfh->indir) {
1455 u32 *indir = ethtool_rxfh_context_indir(ctx);
1456 unsigned int num_rx = fbn->num_rx_queues;
1457 unsigned int i;
1458
1459 for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1460 indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1461 }
1462
1463 return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1464 }
1465
1466 static int
fbnic_remove_rxfh_context(struct net_device * netdev,struct ethtool_rxfh_context * ctx,u32 rss_context,struct netlink_ext_ack * extack)1467 fbnic_remove_rxfh_context(struct net_device *netdev,
1468 struct ethtool_rxfh_context *ctx, u32 rss_context,
1469 struct netlink_ext_ack *extack)
1470 {
1471 /* Nothing to do, contexts are allocated statically */
1472 return 0;
1473 }
1474
fbnic_get_channels(struct net_device * netdev,struct ethtool_channels * ch)1475 static void fbnic_get_channels(struct net_device *netdev,
1476 struct ethtool_channels *ch)
1477 {
1478 struct fbnic_net *fbn = netdev_priv(netdev);
1479 struct fbnic_dev *fbd = fbn->fbd;
1480
1481 ch->max_rx = fbd->max_num_queues;
1482 ch->max_tx = fbd->max_num_queues;
1483 ch->max_combined = min(ch->max_rx, ch->max_tx);
1484 ch->max_other = FBNIC_NON_NAPI_VECTORS;
1485
1486 if (fbn->num_rx_queues > fbn->num_napi ||
1487 fbn->num_tx_queues > fbn->num_napi)
1488 ch->combined_count = min(fbn->num_rx_queues,
1489 fbn->num_tx_queues);
1490 else
1491 ch->combined_count =
1492 fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1493 ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1494 ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1495 ch->other_count = FBNIC_NON_NAPI_VECTORS;
1496 }
1497
fbnic_set_queues(struct fbnic_net * fbn,struct ethtool_channels * ch,unsigned int max_napis)1498 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1499 unsigned int max_napis)
1500 {
1501 fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1502 fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1503 fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1504 max_napis);
1505 }
1506
fbnic_set_channels(struct net_device * netdev,struct ethtool_channels * ch)1507 static int fbnic_set_channels(struct net_device *netdev,
1508 struct ethtool_channels *ch)
1509 {
1510 struct fbnic_net *fbn = netdev_priv(netdev);
1511 unsigned int max_napis, standalone;
1512 struct fbnic_dev *fbd = fbn->fbd;
1513 struct fbnic_net *clone;
1514 int err;
1515
1516 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1517 standalone = ch->rx_count + ch->tx_count;
1518
1519 /* Limits for standalone queues:
1520 * - each queue has its own NAPI (num_napi >= rx + tx + combined)
1521 * - combining queues (combined not 0, rx or tx must be 0)
1522 */
1523 if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1524 (standalone && standalone + ch->combined_count > max_napis) ||
1525 ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1526 ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1527 ch->other_count != FBNIC_NON_NAPI_VECTORS)
1528 return -EINVAL;
1529
1530 if (!netif_running(netdev)) {
1531 fbnic_set_queues(fbn, ch, max_napis);
1532 fbnic_reset_indir_tbl(fbn);
1533 return 0;
1534 }
1535
1536 clone = fbnic_clone_create(fbn);
1537 if (!clone)
1538 return -ENOMEM;
1539
1540 fbnic_set_queues(clone, ch, max_napis);
1541
1542 err = fbnic_alloc_napi_vectors(clone);
1543 if (err)
1544 goto err_free_clone;
1545
1546 err = fbnic_alloc_resources(clone);
1547 if (err)
1548 goto err_free_napis;
1549
1550 fbnic_down_noidle(fbn);
1551 err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1552 if (err)
1553 goto err_start_stack;
1554
1555 err = fbnic_set_netif_queues(clone);
1556 if (err)
1557 goto err_start_stack;
1558
1559 /* Nothing can fail past this point */
1560 fbnic_flush(fbn);
1561
1562 fbnic_clone_swap(fbn, clone);
1563
1564 /* Reset RSS indirection table */
1565 fbnic_reset_indir_tbl(fbn);
1566
1567 fbnic_up(fbn);
1568
1569 fbnic_free_resources(clone);
1570 fbnic_free_napi_vectors(clone);
1571 fbnic_clone_free(clone);
1572
1573 return 0;
1574
1575 err_start_stack:
1576 fbnic_flush(fbn);
1577 fbnic_up(fbn);
1578 fbnic_free_resources(clone);
1579 err_free_napis:
1580 fbnic_free_napi_vectors(clone);
1581 err_free_clone:
1582 fbnic_clone_free(clone);
1583 return err;
1584 }
1585
1586 static int
fbnic_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * tsinfo)1587 fbnic_get_ts_info(struct net_device *netdev,
1588 struct kernel_ethtool_ts_info *tsinfo)
1589 {
1590 struct fbnic_net *fbn = netdev_priv(netdev);
1591
1592 tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1593
1594 tsinfo->so_timestamping =
1595 SOF_TIMESTAMPING_TX_SOFTWARE |
1596 SOF_TIMESTAMPING_TX_HARDWARE |
1597 SOF_TIMESTAMPING_RX_HARDWARE |
1598 SOF_TIMESTAMPING_RAW_HARDWARE;
1599
1600 tsinfo->tx_types =
1601 BIT(HWTSTAMP_TX_OFF) |
1602 BIT(HWTSTAMP_TX_ON);
1603
1604 tsinfo->rx_filters =
1605 BIT(HWTSTAMP_FILTER_NONE) |
1606 BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1607 BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1608 BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1609 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1610 BIT(HWTSTAMP_FILTER_ALL);
1611
1612 return 0;
1613 }
1614
fbnic_get_ts_stats(struct net_device * netdev,struct ethtool_ts_stats * ts_stats)1615 static void fbnic_get_ts_stats(struct net_device *netdev,
1616 struct ethtool_ts_stats *ts_stats)
1617 {
1618 struct fbnic_net *fbn = netdev_priv(netdev);
1619 u64 ts_packets, ts_lost;
1620 struct fbnic_ring *ring;
1621 unsigned int start;
1622 int i;
1623
1624 ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1625 ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1626 for (i = 0; i < fbn->num_tx_queues; i++) {
1627 ring = fbn->tx[i];
1628 do {
1629 start = u64_stats_fetch_begin(&ring->stats.syncp);
1630 ts_packets = ring->stats.twq.ts_packets;
1631 ts_lost = ring->stats.twq.ts_lost;
1632 } while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1633 ts_stats->pkts += ts_packets;
1634 ts_stats->lost += ts_lost;
1635 }
1636 }
1637
1638 static int
fbnic_get_module_eeprom_by_page(struct net_device * netdev,const struct ethtool_module_eeprom * page_data,struct netlink_ext_ack * extack)1639 fbnic_get_module_eeprom_by_page(struct net_device *netdev,
1640 const struct ethtool_module_eeprom *page_data,
1641 struct netlink_ext_ack *extack)
1642 {
1643 struct fbnic_net *fbn = netdev_priv(netdev);
1644 struct fbnic_fw_completion *fw_cmpl;
1645 struct fbnic_dev *fbd = fbn->fbd;
1646 int err;
1647
1648 if (page_data->i2c_address != 0x50) {
1649 NL_SET_ERR_MSG_MOD(extack,
1650 "Invalid i2c address. Only 0x50 is supported");
1651 return -EINVAL;
1652 }
1653
1654 fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
1655 page_data->length);
1656 if (!fw_cmpl)
1657 return -ENOMEM;
1658
1659 /* Initialize completion and queue it for FW to process */
1660 fw_cmpl->u.qsfp.length = page_data->length;
1661 fw_cmpl->u.qsfp.offset = page_data->offset;
1662 fw_cmpl->u.qsfp.page = page_data->page;
1663 fw_cmpl->u.qsfp.bank = page_data->bank;
1664
1665 err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
1666 page_data->bank, page_data->offset,
1667 page_data->length);
1668 if (err) {
1669 NL_SET_ERR_MSG_MOD(extack,
1670 "Failed to transmit EEPROM read request");
1671 goto exit_free;
1672 }
1673
1674 if (!wait_for_completion_timeout(&fw_cmpl->done, 2 * HZ)) {
1675 err = -ETIMEDOUT;
1676 NL_SET_ERR_MSG_MOD(extack,
1677 "Timed out waiting for firmware response");
1678 goto exit_cleanup;
1679 }
1680
1681 if (fw_cmpl->result) {
1682 err = fw_cmpl->result;
1683 NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
1684 goto exit_cleanup;
1685 }
1686
1687 memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
1688
1689 exit_cleanup:
1690 fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
1691 exit_free:
1692 fbnic_fw_put_cmpl(fw_cmpl);
1693
1694 return err ? : page_data->length;
1695 }
1696
fbnic_set_counter(u64 * stat,struct fbnic_stat_counter * counter)1697 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1698 {
1699 if (counter->reported)
1700 *stat = counter->value;
1701 }
1702
1703 static void
fbnic_get_pause_stats(struct net_device * netdev,struct ethtool_pause_stats * pause_stats)1704 fbnic_get_pause_stats(struct net_device *netdev,
1705 struct ethtool_pause_stats *pause_stats)
1706 {
1707 struct fbnic_net *fbn = netdev_priv(netdev);
1708 struct fbnic_mac_stats *mac_stats;
1709 struct fbnic_dev *fbd = fbn->fbd;
1710
1711 mac_stats = &fbd->hw_stats.mac;
1712
1713 fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
1714
1715 pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
1716 pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
1717 }
1718
1719 static void
fbnic_get_fec_stats(struct net_device * netdev,struct ethtool_fec_stats * fec_stats,struct ethtool_fec_hist * hist)1720 fbnic_get_fec_stats(struct net_device *netdev,
1721 struct ethtool_fec_stats *fec_stats,
1722 struct ethtool_fec_hist *hist)
1723 {
1724 struct fbnic_net *fbn = netdev_priv(netdev);
1725 struct fbnic_phy_stats *phy_stats;
1726 struct fbnic_dev *fbd = fbn->fbd;
1727
1728 fbnic_get_hw_stats32(fbd);
1729 phy_stats = &fbd->hw_stats.phy;
1730
1731 spin_lock(&fbd->hw_stats.lock);
1732 fec_stats->corrected_blocks.total =
1733 phy_stats->fec.corrected_blocks.value;
1734 fec_stats->uncorrectable_blocks.total =
1735 phy_stats->fec.uncorrectable_blocks.value;
1736 spin_unlock(&fbd->hw_stats.lock);
1737 }
1738
1739 static void
fbnic_get_eth_phy_stats(struct net_device * netdev,struct ethtool_eth_phy_stats * eth_phy_stats)1740 fbnic_get_eth_phy_stats(struct net_device *netdev,
1741 struct ethtool_eth_phy_stats *eth_phy_stats)
1742 {
1743 struct fbnic_net *fbn = netdev_priv(netdev);
1744 struct fbnic_phy_stats *phy_stats;
1745 struct fbnic_dev *fbd = fbn->fbd;
1746 u64 total = 0;
1747 int i;
1748
1749 fbnic_get_hw_stats32(fbd);
1750 phy_stats = &fbd->hw_stats.phy;
1751
1752 spin_lock(&fbd->hw_stats.lock);
1753 for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
1754 total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
1755
1756 eth_phy_stats->SymbolErrorDuringCarrier = total;
1757 spin_unlock(&fbd->hw_stats.lock);
1758 }
1759
1760 static void
fbnic_get_eth_mac_stats(struct net_device * netdev,struct ethtool_eth_mac_stats * eth_mac_stats)1761 fbnic_get_eth_mac_stats(struct net_device *netdev,
1762 struct ethtool_eth_mac_stats *eth_mac_stats)
1763 {
1764 struct fbnic_net *fbn = netdev_priv(netdev);
1765 struct fbnic_mac_stats *mac_stats;
1766 struct fbnic_dev *fbd = fbn->fbd;
1767 const struct fbnic_mac *mac;
1768
1769 mac_stats = &fbd->hw_stats.mac;
1770 mac = fbd->mac;
1771
1772 mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1773
1774 fbnic_set_counter(ð_mac_stats->FramesTransmittedOK,
1775 &mac_stats->eth_mac.FramesTransmittedOK);
1776 fbnic_set_counter(ð_mac_stats->FramesReceivedOK,
1777 &mac_stats->eth_mac.FramesReceivedOK);
1778 fbnic_set_counter(ð_mac_stats->FrameCheckSequenceErrors,
1779 &mac_stats->eth_mac.FrameCheckSequenceErrors);
1780 fbnic_set_counter(ð_mac_stats->AlignmentErrors,
1781 &mac_stats->eth_mac.AlignmentErrors);
1782 fbnic_set_counter(ð_mac_stats->OctetsTransmittedOK,
1783 &mac_stats->eth_mac.OctetsTransmittedOK);
1784 fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACXmitError,
1785 &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1786 fbnic_set_counter(ð_mac_stats->OctetsReceivedOK,
1787 &mac_stats->eth_mac.OctetsReceivedOK);
1788 fbnic_set_counter(ð_mac_stats->FramesLostDueToIntMACRcvError,
1789 &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1790 fbnic_set_counter(ð_mac_stats->MulticastFramesXmittedOK,
1791 &mac_stats->eth_mac.MulticastFramesXmittedOK);
1792 fbnic_set_counter(ð_mac_stats->BroadcastFramesXmittedOK,
1793 &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1794 fbnic_set_counter(ð_mac_stats->MulticastFramesReceivedOK,
1795 &mac_stats->eth_mac.MulticastFramesReceivedOK);
1796 fbnic_set_counter(ð_mac_stats->BroadcastFramesReceivedOK,
1797 &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1798 fbnic_set_counter(ð_mac_stats->FrameTooLongErrors,
1799 &mac_stats->eth_mac.FrameTooLongErrors);
1800 }
1801
1802 static void
fbnic_get_eth_ctrl_stats(struct net_device * netdev,struct ethtool_eth_ctrl_stats * eth_ctrl_stats)1803 fbnic_get_eth_ctrl_stats(struct net_device *netdev,
1804 struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
1805 {
1806 struct fbnic_net *fbn = netdev_priv(netdev);
1807 struct fbnic_mac_stats *mac_stats;
1808 struct fbnic_dev *fbd = fbn->fbd;
1809
1810 mac_stats = &fbd->hw_stats.mac;
1811
1812 fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
1813
1814 eth_ctrl_stats->MACControlFramesReceived =
1815 mac_stats->eth_ctrl.MACControlFramesReceived.value;
1816 eth_ctrl_stats->MACControlFramesTransmitted =
1817 mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
1818 }
1819
1820 static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
1821 { 0, 64 },
1822 { 65, 127 },
1823 { 128, 255 },
1824 { 256, 511 },
1825 { 512, 1023 },
1826 { 1024, 1518 },
1827 { 1519, 2047 },
1828 { 2048, 4095 },
1829 { 4096, 8191 },
1830 { 8192, 9216 },
1831 { 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
1832 {}
1833 };
1834
1835 static void
fbnic_get_rmon_stats(struct net_device * netdev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)1836 fbnic_get_rmon_stats(struct net_device *netdev,
1837 struct ethtool_rmon_stats *rmon_stats,
1838 const struct ethtool_rmon_hist_range **ranges)
1839 {
1840 struct fbnic_net *fbn = netdev_priv(netdev);
1841 struct fbnic_mac_stats *mac_stats;
1842 struct fbnic_dev *fbd = fbn->fbd;
1843 int i;
1844
1845 mac_stats = &fbd->hw_stats.mac;
1846
1847 fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
1848
1849 rmon_stats->undersize_pkts =
1850 mac_stats->rmon.undersize_pkts.value;
1851 rmon_stats->oversize_pkts =
1852 mac_stats->rmon.oversize_pkts.value;
1853 rmon_stats->fragments =
1854 mac_stats->rmon.fragments.value;
1855 rmon_stats->jabbers =
1856 mac_stats->rmon.jabbers.value;
1857
1858 for (i = 0; fbnic_rmon_ranges[i].high; i++) {
1859 rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
1860 rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
1861 }
1862
1863 *ranges = fbnic_rmon_ranges;
1864 }
1865
1866 static const struct ethtool_ops fbnic_ethtool_ops = {
1867 .cap_link_lanes_supported = true,
1868 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1869 ETHTOOL_COALESCE_RX_MAX_FRAMES,
1870 .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
1871 ETHTOOL_RING_USE_HDS_THRS,
1872 .rxfh_max_num_contexts = FBNIC_RPC_RSS_TBL_COUNT,
1873 .get_drvinfo = fbnic_get_drvinfo,
1874 .get_regs_len = fbnic_get_regs_len,
1875 .get_regs = fbnic_get_regs,
1876 .get_link = ethtool_op_get_link,
1877 .get_coalesce = fbnic_get_coalesce,
1878 .set_coalesce = fbnic_set_coalesce,
1879 .get_ringparam = fbnic_get_ringparam,
1880 .set_ringparam = fbnic_set_ringparam,
1881 .get_pause_stats = fbnic_get_pause_stats,
1882 .get_pauseparam = fbnic_phylink_get_pauseparam,
1883 .set_pauseparam = fbnic_phylink_set_pauseparam,
1884 .get_strings = fbnic_get_strings,
1885 .get_ethtool_stats = fbnic_get_ethtool_stats,
1886 .get_sset_count = fbnic_get_sset_count,
1887 .get_rxnfc = fbnic_get_rxnfc,
1888 .set_rxnfc = fbnic_set_rxnfc,
1889 .get_rxfh_key_size = fbnic_get_rxfh_key_size,
1890 .get_rxfh_indir_size = fbnic_get_rxfh_indir_size,
1891 .get_rxfh = fbnic_get_rxfh,
1892 .set_rxfh = fbnic_set_rxfh,
1893 .get_rxfh_fields = fbnic_get_rss_hash_opts,
1894 .set_rxfh_fields = fbnic_set_rss_hash_opts,
1895 .create_rxfh_context = fbnic_create_rxfh_context,
1896 .modify_rxfh_context = fbnic_modify_rxfh_context,
1897 .remove_rxfh_context = fbnic_remove_rxfh_context,
1898 .get_channels = fbnic_get_channels,
1899 .set_channels = fbnic_set_channels,
1900 .get_ts_info = fbnic_get_ts_info,
1901 .get_ts_stats = fbnic_get_ts_stats,
1902 .get_link_ksettings = fbnic_phylink_ethtool_ksettings_get,
1903 .get_fec_stats = fbnic_get_fec_stats,
1904 .get_fecparam = fbnic_phylink_get_fecparam,
1905 .get_module_eeprom_by_page = fbnic_get_module_eeprom_by_page,
1906 .get_eth_phy_stats = fbnic_get_eth_phy_stats,
1907 .get_eth_mac_stats = fbnic_get_eth_mac_stats,
1908 .get_eth_ctrl_stats = fbnic_get_eth_ctrl_stats,
1909 .get_rmon_stats = fbnic_get_rmon_stats,
1910 };
1911
fbnic_set_ethtool_ops(struct net_device * dev)1912 void fbnic_set_ethtool_ops(struct net_device *dev)
1913 {
1914 dev->ethtool_ops = &fbnic_ethtool_ops;
1915 }
1916