xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/ethtool.h>
5 #include <linux/ethtool_netlink.h>
6 #include <linux/netdevice.h>
7 #include <linux/pci.h>
8 #include <net/ipv6.h>
9 
10 #include "fbnic.h"
11 #include "fbnic_netdev.h"
12 #include "fbnic_tlv.h"
13 
14 struct fbnic_stat {
15 	u8 string[ETH_GSTRING_LEN];
16 	unsigned int size;
17 	unsigned int offset;
18 };
19 
20 #define FBNIC_STAT_FIELDS(type, name, stat) { \
21 	.string = name, \
22 	.size = sizeof_field(struct type, stat), \
23 	.offset = offsetof(struct type, stat), \
24 }
25 
26 /* Hardware statistics not captured in rtnl_link_stats */
27 #define FBNIC_HW_STAT(name, stat) \
28 	FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
29 
30 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
31 	/* TTI */
32 	FBNIC_HW_STAT("tti_cm_drop_frames", tti.cm_drop.frames),
33 	FBNIC_HW_STAT("tti_cm_drop_bytes", tti.cm_drop.bytes),
34 	FBNIC_HW_STAT("tti_frame_drop_frames", tti.frame_drop.frames),
35 	FBNIC_HW_STAT("tti_frame_drop_bytes", tti.frame_drop.bytes),
36 	FBNIC_HW_STAT("tti_tbi_drop_frames", tti.tbi_drop.frames),
37 	FBNIC_HW_STAT("tti_tbi_drop_bytes", tti.tbi_drop.bytes),
38 
39 	/* TMI */
40 	FBNIC_HW_STAT("ptp_illegal_req", tmi.ptp_illegal_req),
41 	FBNIC_HW_STAT("ptp_good_ts", tmi.ptp_good_ts),
42 	FBNIC_HW_STAT("ptp_bad_ts", tmi.ptp_bad_ts),
43 
44 	/* RPC */
45 	FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
46 	FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
47 	FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
48 	FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
49 	FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
50 	FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
51 	FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
52 	FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
53 };
54 
55 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
56 
57 #define FBNIC_RXB_ENQUEUE_STAT(name, stat) \
58 	FBNIC_STAT_FIELDS(fbnic_rxb_enqueue_stats, name, stat)
59 
60 static const struct fbnic_stat fbnic_gstrings_rxb_enqueue_stats[] = {
61 	FBNIC_RXB_ENQUEUE_STAT("rxb_integrity_err%u", integrity_err),
62 	FBNIC_RXB_ENQUEUE_STAT("rxb_mac_err%u", mac_err),
63 	FBNIC_RXB_ENQUEUE_STAT("rxb_parser_err%u", parser_err),
64 	FBNIC_RXB_ENQUEUE_STAT("rxb_frm_err%u", frm_err),
65 
66 	FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_frames", drbo.frames),
67 	FBNIC_RXB_ENQUEUE_STAT("rxb_drbo%u_bytes", drbo.bytes),
68 };
69 
70 #define FBNIC_HW_RXB_ENQUEUE_STATS_LEN \
71 	ARRAY_SIZE(fbnic_gstrings_rxb_enqueue_stats)
72 
73 #define FBNIC_RXB_FIFO_STAT(name, stat) \
74 	FBNIC_STAT_FIELDS(fbnic_rxb_fifo_stats, name, stat)
75 
76 static const struct fbnic_stat fbnic_gstrings_rxb_fifo_stats[] = {
77 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_drop", trans_drop),
78 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_dropped_frames", drop.frames),
79 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_ecn", trans_ecn),
80 	FBNIC_RXB_FIFO_STAT("rxb_fifo%u_level", level),
81 };
82 
83 #define FBNIC_HW_RXB_FIFO_STATS_LEN ARRAY_SIZE(fbnic_gstrings_rxb_fifo_stats)
84 
85 #define FBNIC_RXB_DEQUEUE_STAT(name, stat) \
86 	FBNIC_STAT_FIELDS(fbnic_rxb_dequeue_stats, name, stat)
87 
88 static const struct fbnic_stat fbnic_gstrings_rxb_dequeue_stats[] = {
89 	FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_frames", intf.frames),
90 	FBNIC_RXB_DEQUEUE_STAT("rxb_intf%u_bytes", intf.bytes),
91 	FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_frames", pbuf.frames),
92 	FBNIC_RXB_DEQUEUE_STAT("rxb_pbuf%u_bytes", pbuf.bytes),
93 };
94 
95 #define FBNIC_HW_RXB_DEQUEUE_STATS_LEN \
96 	ARRAY_SIZE(fbnic_gstrings_rxb_dequeue_stats)
97 
98 #define FBNIC_HW_Q_STAT(name, stat) \
99 	FBNIC_STAT_FIELDS(fbnic_hw_q_stats, name, stat.value)
100 
101 static const struct fbnic_stat fbnic_gstrings_hw_q_stats[] = {
102 	FBNIC_HW_Q_STAT("rde_%u_pkt_err", rde_pkt_err),
103 	FBNIC_HW_Q_STAT("rde_%u_pkt_cq_drop", rde_pkt_cq_drop),
104 	FBNIC_HW_Q_STAT("rde_%u_pkt_bdq_drop", rde_pkt_bdq_drop),
105 };
106 
107 #define FBNIC_HW_Q_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_q_stats)
108 #define FBNIC_HW_STATS_LEN \
109 	(FBNIC_HW_FIXED_STATS_LEN + \
110 	 FBNIC_HW_RXB_ENQUEUE_STATS_LEN * FBNIC_RXB_ENQUEUE_INDICES + \
111 	 FBNIC_HW_RXB_FIFO_STATS_LEN * FBNIC_RXB_FIFO_INDICES + \
112 	 FBNIC_HW_RXB_DEQUEUE_STATS_LEN * FBNIC_RXB_DEQUEUE_INDICES + \
113 	 FBNIC_HW_Q_STATS_LEN * FBNIC_MAX_QUEUES)
114 
115 #define FBNIC_QUEUE_STAT(name, stat) \
116 	FBNIC_STAT_FIELDS(fbnic_ring, name, stat)
117 
118 static const struct fbnic_stat fbnic_gstrings_xdp_stats[] = {
119 	FBNIC_QUEUE_STAT("xdp_tx_queue_%u_packets", stats.packets),
120 	FBNIC_QUEUE_STAT("xdp_tx_queue_%u_bytes", stats.bytes),
121 	FBNIC_QUEUE_STAT("xdp_tx_queue_%u_dropped", stats.dropped),
122 };
123 
124 #define FBNIC_XDP_STATS_LEN ARRAY_SIZE(fbnic_gstrings_xdp_stats)
125 
126 #define FBNIC_STATS_LEN \
127 	(FBNIC_HW_STATS_LEN + FBNIC_XDP_STATS_LEN * FBNIC_MAX_XDPQS)
128 
129 static void
130 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
131 {
132 	struct fbnic_net *fbn = netdev_priv(netdev);
133 	struct fbnic_dev *fbd = fbn->fbd;
134 
135 	fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
136 				    sizeof(drvinfo->fw_version));
137 }
138 
139 static int fbnic_get_regs_len(struct net_device *netdev)
140 {
141 	struct fbnic_net *fbn = netdev_priv(netdev);
142 
143 	return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
144 }
145 
146 static void fbnic_get_regs(struct net_device *netdev,
147 			   struct ethtool_regs *regs, void *data)
148 {
149 	struct fbnic_net *fbn = netdev_priv(netdev);
150 
151 	fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
152 }
153 
154 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
155 {
156 	struct fbnic_net *clone;
157 
158 	clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
159 	if (!clone)
160 		return NULL;
161 
162 	memset(clone->tx, 0, sizeof(clone->tx));
163 	memset(clone->rx, 0, sizeof(clone->rx));
164 	memset(clone->napi, 0, sizeof(clone->napi));
165 	return clone;
166 }
167 
168 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
169 				 struct fbnic_net *clone)
170 {
171 	swap(clone->rcq_size, orig->rcq_size);
172 	swap(clone->hpq_size, orig->hpq_size);
173 	swap(clone->ppq_size, orig->ppq_size);
174 	swap(clone->txq_size, orig->txq_size);
175 	swap(clone->num_rx_queues, orig->num_rx_queues);
176 	swap(clone->num_tx_queues, orig->num_tx_queues);
177 	swap(clone->num_napi, orig->num_napi);
178 	swap(clone->hds_thresh, orig->hds_thresh);
179 }
180 
181 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
182 					    struct fbnic_napi_vector *nv)
183 {
184 	int i, j;
185 
186 	for (i = 0; i < nv->txt_count; i++) {
187 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
188 		fbnic_aggregate_ring_xdp_counters(fbn, &nv->qt[i].sub1);
189 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
190 	}
191 
192 	for (j = 0; j < nv->rxt_count; j++, i++) {
193 		fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub0);
194 		fbnic_aggregate_ring_bdq_counters(fbn, &nv->qt[i].sub1);
195 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
196 	}
197 }
198 
199 static void fbnic_clone_swap(struct fbnic_net *orig,
200 			     struct fbnic_net *clone)
201 {
202 	struct fbnic_dev *fbd = orig->fbd;
203 	unsigned int i;
204 
205 	for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
206 		fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
207 	for (i = 0; i < orig->num_napi; i++)
208 		fbnic_aggregate_vector_counters(orig, orig->napi[i]);
209 
210 	fbnic_clone_swap_cfg(orig, clone);
211 
212 	for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
213 		swap(clone->napi[i], orig->napi[i]);
214 	for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
215 		swap(clone->tx[i], orig->tx[i]);
216 	for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
217 		swap(clone->rx[i], orig->rx[i]);
218 }
219 
220 static void fbnic_clone_free(struct fbnic_net *clone)
221 {
222 	kfree(clone);
223 }
224 
225 static int fbnic_get_coalesce(struct net_device *netdev,
226 			      struct ethtool_coalesce *ec,
227 			      struct kernel_ethtool_coalesce *kernel_coal,
228 			      struct netlink_ext_ack *extack)
229 {
230 	struct fbnic_net *fbn = netdev_priv(netdev);
231 
232 	ec->tx_coalesce_usecs = fbn->tx_usecs;
233 	ec->rx_coalesce_usecs = fbn->rx_usecs;
234 	ec->rx_max_coalesced_frames = fbn->rx_max_frames;
235 
236 	return 0;
237 }
238 
239 static int fbnic_set_coalesce(struct net_device *netdev,
240 			      struct ethtool_coalesce *ec,
241 			      struct kernel_ethtool_coalesce *kernel_coal,
242 			      struct netlink_ext_ack *extack)
243 {
244 	struct fbnic_net *fbn = netdev_priv(netdev);
245 
246 	/* Verify against hardware limits */
247 	if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
248 		NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
249 		return -EINVAL;
250 	}
251 	if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
252 		NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
253 		return -EINVAL;
254 	}
255 	if (ec->rx_max_coalesced_frames >
256 	    FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
257 	    FBNIC_MIN_RXD_PER_FRAME) {
258 		NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
259 		return -EINVAL;
260 	}
261 
262 	fbn->tx_usecs = ec->tx_coalesce_usecs;
263 	fbn->rx_usecs = ec->rx_coalesce_usecs;
264 	fbn->rx_max_frames = ec->rx_max_coalesced_frames;
265 
266 	if (netif_running(netdev)) {
267 		int i;
268 
269 		for (i = 0; i < fbn->num_napi; i++) {
270 			struct fbnic_napi_vector *nv = fbn->napi[i];
271 
272 			fbnic_config_txrx_usecs(nv, 0);
273 			fbnic_config_rx_frames(nv);
274 		}
275 	}
276 
277 	return 0;
278 }
279 
280 static void
281 fbnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
282 		    struct kernel_ethtool_ringparam *kernel_ring,
283 		    struct netlink_ext_ack *extack)
284 {
285 	struct fbnic_net *fbn = netdev_priv(netdev);
286 
287 	ring->rx_max_pending = FBNIC_QUEUE_SIZE_MAX;
288 	ring->rx_mini_max_pending = FBNIC_QUEUE_SIZE_MAX;
289 	ring->rx_jumbo_max_pending = FBNIC_QUEUE_SIZE_MAX;
290 	ring->tx_max_pending = FBNIC_QUEUE_SIZE_MAX;
291 
292 	ring->rx_pending = fbn->rcq_size;
293 	ring->rx_mini_pending = fbn->hpq_size;
294 	ring->rx_jumbo_pending = fbn->ppq_size;
295 	ring->tx_pending = fbn->txq_size;
296 
297 	kernel_ring->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_ENABLED;
298 	kernel_ring->hds_thresh_max = FBNIC_HDS_THRESH_MAX;
299 	kernel_ring->hds_thresh = fbn->hds_thresh;
300 }
301 
302 static void fbnic_set_rings(struct fbnic_net *fbn,
303 			    struct ethtool_ringparam *ring,
304 			    struct kernel_ethtool_ringparam *kernel_ring)
305 {
306 	fbn->rcq_size = ring->rx_pending;
307 	fbn->hpq_size = ring->rx_mini_pending;
308 	fbn->ppq_size = ring->rx_jumbo_pending;
309 	fbn->txq_size = ring->tx_pending;
310 	fbn->hds_thresh = kernel_ring->hds_thresh;
311 }
312 
313 static int
314 fbnic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
315 		    struct kernel_ethtool_ringparam *kernel_ring,
316 		    struct netlink_ext_ack *extack)
317 
318 {
319 	struct fbnic_net *fbn = netdev_priv(netdev);
320 	struct fbnic_net *clone;
321 	int err;
322 
323 	ring->rx_pending	= roundup_pow_of_two(ring->rx_pending);
324 	ring->rx_mini_pending	= roundup_pow_of_two(ring->rx_mini_pending);
325 	ring->rx_jumbo_pending	= roundup_pow_of_two(ring->rx_jumbo_pending);
326 	ring->tx_pending	= roundup_pow_of_two(ring->tx_pending);
327 
328 	/* These are absolute minimums allowing the device and driver to operate
329 	 * but not necessarily guarantee reasonable performance. Settings below
330 	 * Rx queue size of 128 and BDQs smaller than 64 are likely suboptimal
331 	 * at best.
332 	 */
333 	if (ring->rx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_RX_DESC_MIN) ||
334 	    ring->rx_mini_pending < FBNIC_QUEUE_SIZE_MIN ||
335 	    ring->rx_jumbo_pending < FBNIC_QUEUE_SIZE_MIN ||
336 	    ring->tx_pending < max(FBNIC_QUEUE_SIZE_MIN, FBNIC_TX_DESC_MIN)) {
337 		NL_SET_ERR_MSG_MOD(extack, "requested ring size too small");
338 		return -EINVAL;
339 	}
340 
341 	if (kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_DISABLED) {
342 		NL_SET_ERR_MSG_MOD(extack, "Cannot disable TCP data split");
343 		return -EINVAL;
344 	}
345 
346 	/* If an XDP program is attached, we should check for potential frame
347 	 * splitting. If the new HDS threshold can cause splitting, we should
348 	 * only allow if the attached XDP program can handle frags.
349 	 */
350 	if (fbnic_check_split_frames(fbn->xdp_prog, netdev->mtu,
351 				     kernel_ring->hds_thresh)) {
352 		NL_SET_ERR_MSG_MOD(extack,
353 				   "Use higher HDS threshold or multi-buf capable program");
354 		return -EINVAL;
355 	}
356 
357 	if (!netif_running(netdev)) {
358 		fbnic_set_rings(fbn, ring, kernel_ring);
359 		return 0;
360 	}
361 
362 	clone = fbnic_clone_create(fbn);
363 	if (!clone)
364 		return -ENOMEM;
365 
366 	fbnic_set_rings(clone, ring, kernel_ring);
367 
368 	err = fbnic_alloc_napi_vectors(clone);
369 	if (err)
370 		goto err_free_clone;
371 
372 	err = fbnic_alloc_resources(clone);
373 	if (err)
374 		goto err_free_napis;
375 
376 	fbnic_down_noidle(fbn);
377 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
378 	if (err)
379 		goto err_start_stack;
380 
381 	err = fbnic_set_netif_queues(clone);
382 	if (err)
383 		goto err_start_stack;
384 
385 	/* Nothing can fail past this point */
386 	fbnic_flush(fbn);
387 
388 	fbnic_clone_swap(fbn, clone);
389 
390 	fbnic_up(fbn);
391 
392 	fbnic_free_resources(clone);
393 	fbnic_free_napi_vectors(clone);
394 	fbnic_clone_free(clone);
395 
396 	return 0;
397 
398 err_start_stack:
399 	fbnic_flush(fbn);
400 	fbnic_up(fbn);
401 	fbnic_free_resources(clone);
402 err_free_napis:
403 	fbnic_free_napi_vectors(clone);
404 err_free_clone:
405 	fbnic_clone_free(clone);
406 	return err;
407 }
408 
409 static void fbnic_get_rxb_enqueue_strings(u8 **data, unsigned int idx)
410 {
411 	const struct fbnic_stat *stat;
412 	int i;
413 
414 	stat = fbnic_gstrings_rxb_enqueue_stats;
415 	for (i = 0; i < FBNIC_HW_RXB_ENQUEUE_STATS_LEN; i++, stat++)
416 		ethtool_sprintf(data, stat->string, idx);
417 }
418 
419 static void fbnic_get_rxb_fifo_strings(u8 **data, unsigned int idx)
420 {
421 	const struct fbnic_stat *stat;
422 	int i;
423 
424 	stat = fbnic_gstrings_rxb_fifo_stats;
425 	for (i = 0; i < FBNIC_HW_RXB_FIFO_STATS_LEN; i++, stat++)
426 		ethtool_sprintf(data, stat->string, idx);
427 }
428 
429 static void fbnic_get_rxb_dequeue_strings(u8 **data, unsigned int idx)
430 {
431 	const struct fbnic_stat *stat;
432 	int i;
433 
434 	stat = fbnic_gstrings_rxb_dequeue_stats;
435 	for (i = 0; i < FBNIC_HW_RXB_DEQUEUE_STATS_LEN; i++, stat++)
436 		ethtool_sprintf(data, stat->string, idx);
437 }
438 
439 static void fbnic_get_xdp_queue_strings(u8 **data, unsigned int idx)
440 {
441 	const struct fbnic_stat *stat;
442 	int i;
443 
444 	stat = fbnic_gstrings_xdp_stats;
445 	for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++)
446 		ethtool_sprintf(data, stat->string, idx);
447 }
448 
449 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
450 {
451 	const struct fbnic_stat *stat;
452 	int i, idx;
453 
454 	switch (sset) {
455 	case ETH_SS_STATS:
456 		for (i = 0; i < FBNIC_HW_FIXED_STATS_LEN; i++)
457 			ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
458 
459 		for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++)
460 			fbnic_get_rxb_enqueue_strings(&data, i);
461 
462 		for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++)
463 			fbnic_get_rxb_fifo_strings(&data, i);
464 
465 		for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++)
466 			fbnic_get_rxb_dequeue_strings(&data, i);
467 
468 		for (idx = 0; idx < FBNIC_MAX_QUEUES; idx++) {
469 			stat = fbnic_gstrings_hw_q_stats;
470 
471 			for (i = 0; i < FBNIC_HW_Q_STATS_LEN; i++, stat++)
472 				ethtool_sprintf(&data, stat->string, idx);
473 		}
474 
475 		for (i = 0; i < FBNIC_MAX_XDPQS; i++)
476 			fbnic_get_xdp_queue_strings(&data, i);
477 		break;
478 	}
479 }
480 
481 static void fbnic_report_hw_stats(const struct fbnic_stat *stat,
482 				  const void *base, int len, u64 **data)
483 {
484 	while (len--) {
485 		u8 *curr = (u8 *)base + stat->offset;
486 
487 		**data = *(u64 *)curr;
488 
489 		stat++;
490 		(*data)++;
491 	}
492 }
493 
494 static void fbnic_get_xdp_queue_stats(struct fbnic_ring *ring, u64 **data)
495 {
496 	const struct fbnic_stat *stat;
497 	int i;
498 
499 	if (!ring) {
500 		*data += FBNIC_XDP_STATS_LEN;
501 		return;
502 	}
503 
504 	stat = fbnic_gstrings_xdp_stats;
505 	for (i = 0; i < FBNIC_XDP_STATS_LEN; i++, stat++, (*data)++) {
506 		u8 *p = (u8 *)ring + stat->offset;
507 
508 		**data = *(u64 *)p;
509 	}
510 }
511 
512 static void fbnic_get_ethtool_stats(struct net_device *dev,
513 				    struct ethtool_stats *stats, u64 *data)
514 {
515 	struct fbnic_net *fbn = netdev_priv(dev);
516 	struct fbnic_dev *fbd = fbn->fbd;
517 	int i;
518 
519 	fbnic_get_hw_stats(fbn->fbd);
520 
521 	spin_lock(&fbd->hw_stats.lock);
522 	fbnic_report_hw_stats(fbnic_gstrings_hw_stats, &fbd->hw_stats,
523 			      FBNIC_HW_FIXED_STATS_LEN, &data);
524 
525 	for (i = 0; i < FBNIC_RXB_ENQUEUE_INDICES; i++) {
526 		const struct fbnic_rxb_enqueue_stats *enq;
527 
528 		enq = &fbd->hw_stats.rxb.enq[i];
529 		fbnic_report_hw_stats(fbnic_gstrings_rxb_enqueue_stats,
530 				      enq, FBNIC_HW_RXB_ENQUEUE_STATS_LEN,
531 				      &data);
532 	}
533 
534 	for (i = 0; i < FBNIC_RXB_FIFO_INDICES; i++) {
535 		const struct fbnic_rxb_fifo_stats *fifo;
536 
537 		fifo = &fbd->hw_stats.rxb.fifo[i];
538 		fbnic_report_hw_stats(fbnic_gstrings_rxb_fifo_stats,
539 				      fifo, FBNIC_HW_RXB_FIFO_STATS_LEN,
540 				      &data);
541 	}
542 
543 	for (i = 0; i < FBNIC_RXB_DEQUEUE_INDICES; i++) {
544 		const struct fbnic_rxb_dequeue_stats *deq;
545 
546 		deq = &fbd->hw_stats.rxb.deq[i];
547 		fbnic_report_hw_stats(fbnic_gstrings_rxb_dequeue_stats,
548 				      deq, FBNIC_HW_RXB_DEQUEUE_STATS_LEN,
549 				      &data);
550 	}
551 
552 	for (i  = 0; i < FBNIC_MAX_QUEUES; i++) {
553 		const struct fbnic_hw_q_stats *hw_q = &fbd->hw_stats.hw_q[i];
554 
555 		fbnic_report_hw_stats(fbnic_gstrings_hw_q_stats, hw_q,
556 				      FBNIC_HW_Q_STATS_LEN, &data);
557 	}
558 	spin_unlock(&fbd->hw_stats.lock);
559 
560 	for (i = 0; i < FBNIC_MAX_XDPQS; i++)
561 		fbnic_get_xdp_queue_stats(fbn->tx[i + FBNIC_MAX_TXQS], &data);
562 }
563 
564 static int fbnic_get_sset_count(struct net_device *dev, int sset)
565 {
566 	switch (sset) {
567 	case ETH_SS_STATS:
568 		return FBNIC_STATS_LEN;
569 	default:
570 		return -EOPNOTSUPP;
571 	}
572 }
573 
574 static int fbnic_get_rss_hash_idx(u32 flow_type)
575 {
576 	switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
577 	case TCP_V4_FLOW:
578 		return FBNIC_TCP4_HASH_OPT;
579 	case TCP_V6_FLOW:
580 		return FBNIC_TCP6_HASH_OPT;
581 	case UDP_V4_FLOW:
582 		return FBNIC_UDP4_HASH_OPT;
583 	case UDP_V6_FLOW:
584 		return FBNIC_UDP6_HASH_OPT;
585 	case AH_V4_FLOW:
586 	case ESP_V4_FLOW:
587 	case AH_ESP_V4_FLOW:
588 	case SCTP_V4_FLOW:
589 	case IPV4_FLOW:
590 	case IPV4_USER_FLOW:
591 		return FBNIC_IPV4_HASH_OPT;
592 	case AH_V6_FLOW:
593 	case ESP_V6_FLOW:
594 	case AH_ESP_V6_FLOW:
595 	case SCTP_V6_FLOW:
596 	case IPV6_FLOW:
597 	case IPV6_USER_FLOW:
598 		return FBNIC_IPV6_HASH_OPT;
599 	case ETHER_FLOW:
600 		return FBNIC_ETHER_HASH_OPT;
601 	}
602 
603 	return -1;
604 }
605 
606 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
607 				  struct ethtool_rxnfc *cmd,
608 				  u32 *rule_locs)
609 {
610 	struct fbnic_dev *fbd = fbn->fbd;
611 	int i, cnt = 0;
612 
613 	/* Report maximum rule count */
614 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
615 
616 	for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
617 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
618 		struct fbnic_act_tcam *act_tcam;
619 
620 		act_tcam = &fbd->act_tcam[idx];
621 		if (act_tcam->state != FBNIC_TCAM_S_VALID)
622 			continue;
623 
624 		if (rule_locs) {
625 			if (cnt == cmd->rule_cnt)
626 				return -EMSGSIZE;
627 
628 			rule_locs[cnt] = i;
629 		}
630 
631 		cnt++;
632 	}
633 
634 	return cnt;
635 }
636 
637 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
638 {
639 	struct ethtool_rx_flow_spec *fsp;
640 	struct fbnic_dev *fbd = fbn->fbd;
641 	struct fbnic_act_tcam *act_tcam;
642 	int idx;
643 
644 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
645 
646 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
647 		return -EINVAL;
648 
649 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
650 	act_tcam = &fbd->act_tcam[idx];
651 
652 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
653 		return -EINVAL;
654 
655 	/* Report maximum rule count */
656 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
657 
658 	/* Set flow type field */
659 	if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
660 		fsp->flow_type = ETHER_FLOW;
661 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
662 			       act_tcam->mask.tcam[1])) {
663 			struct fbnic_mac_addr *mac_addr;
664 
665 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
666 					act_tcam->value.tcam[1]);
667 			mac_addr = &fbd->mac_addr[idx];
668 
669 			ether_addr_copy(fsp->h_u.ether_spec.h_dest,
670 					mac_addr->value.addr8);
671 			eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
672 		}
673 	} else if (act_tcam->value.tcam[1] &
674 		   FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
675 		fsp->flow_type = IPV6_USER_FLOW;
676 		fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
677 		fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
678 
679 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
680 			       act_tcam->mask.tcam[0])) {
681 			struct fbnic_ip_addr *ip_addr;
682 			int i;
683 
684 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
685 					act_tcam->value.tcam[0]);
686 			ip_addr = &fbd->ipo_src[idx];
687 
688 			for (i = 0; i < 4; i++) {
689 				fsp->h_u.usr_ip6_spec.ip6src[i] =
690 					ip_addr->value.s6_addr32[i];
691 				fsp->m_u.usr_ip6_spec.ip6src[i] =
692 					~ip_addr->mask.s6_addr32[i];
693 			}
694 		}
695 
696 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
697 			       act_tcam->mask.tcam[0])) {
698 			struct fbnic_ip_addr *ip_addr;
699 			int i;
700 
701 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
702 					act_tcam->value.tcam[0]);
703 			ip_addr = &fbd->ipo_dst[idx];
704 
705 			for (i = 0; i < 4; i++) {
706 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
707 					ip_addr->value.s6_addr32[i];
708 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
709 					~ip_addr->mask.s6_addr32[i];
710 			}
711 		}
712 	} else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
713 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
714 			if (act_tcam->value.tcam[1] &
715 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
716 				fsp->flow_type = UDP_V6_FLOW;
717 			else
718 				fsp->flow_type = TCP_V6_FLOW;
719 			fsp->h_u.tcp_ip6_spec.psrc =
720 				cpu_to_be16(act_tcam->value.tcam[3]);
721 			fsp->m_u.tcp_ip6_spec.psrc =
722 				cpu_to_be16(~act_tcam->mask.tcam[3]);
723 			fsp->h_u.tcp_ip6_spec.pdst =
724 				cpu_to_be16(act_tcam->value.tcam[4]);
725 			fsp->m_u.tcp_ip6_spec.pdst =
726 				cpu_to_be16(~act_tcam->mask.tcam[4]);
727 		} else {
728 			fsp->flow_type = IPV6_USER_FLOW;
729 		}
730 
731 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
732 			       act_tcam->mask.tcam[0])) {
733 			struct fbnic_ip_addr *ip_addr;
734 			int i;
735 
736 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
737 					act_tcam->value.tcam[0]);
738 			ip_addr = &fbd->ip_src[idx];
739 
740 			for (i = 0; i < 4; i++) {
741 				fsp->h_u.usr_ip6_spec.ip6src[i] =
742 					ip_addr->value.s6_addr32[i];
743 				fsp->m_u.usr_ip6_spec.ip6src[i] =
744 					~ip_addr->mask.s6_addr32[i];
745 			}
746 		}
747 
748 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
749 			       act_tcam->mask.tcam[0])) {
750 			struct fbnic_ip_addr *ip_addr;
751 			int i;
752 
753 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
754 					act_tcam->value.tcam[0]);
755 			ip_addr = &fbd->ip_dst[idx];
756 
757 			for (i = 0; i < 4; i++) {
758 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
759 					ip_addr->value.s6_addr32[i];
760 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
761 					~ip_addr->mask.s6_addr32[i];
762 			}
763 		}
764 	} else {
765 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
766 			if (act_tcam->value.tcam[1] &
767 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
768 				fsp->flow_type = UDP_V4_FLOW;
769 			else
770 				fsp->flow_type = TCP_V4_FLOW;
771 			fsp->h_u.tcp_ip4_spec.psrc =
772 				cpu_to_be16(act_tcam->value.tcam[3]);
773 			fsp->m_u.tcp_ip4_spec.psrc =
774 				cpu_to_be16(~act_tcam->mask.tcam[3]);
775 			fsp->h_u.tcp_ip4_spec.pdst =
776 				cpu_to_be16(act_tcam->value.tcam[4]);
777 			fsp->m_u.tcp_ip4_spec.pdst =
778 				cpu_to_be16(~act_tcam->mask.tcam[4]);
779 		} else {
780 			fsp->flow_type = IPV4_USER_FLOW;
781 			fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
782 		}
783 
784 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
785 			       act_tcam->mask.tcam[0])) {
786 			struct fbnic_ip_addr *ip_addr;
787 
788 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
789 					act_tcam->value.tcam[0]);
790 			ip_addr = &fbd->ip_src[idx];
791 
792 			fsp->h_u.usr_ip4_spec.ip4src =
793 				ip_addr->value.s6_addr32[3];
794 			fsp->m_u.usr_ip4_spec.ip4src =
795 				~ip_addr->mask.s6_addr32[3];
796 		}
797 
798 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
799 			       act_tcam->mask.tcam[0])) {
800 			struct fbnic_ip_addr *ip_addr;
801 
802 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
803 					act_tcam->value.tcam[0]);
804 			ip_addr = &fbd->ip_dst[idx];
805 
806 			fsp->h_u.usr_ip4_spec.ip4dst =
807 				ip_addr->value.s6_addr32[3];
808 			fsp->m_u.usr_ip4_spec.ip4dst =
809 				~ip_addr->mask.s6_addr32[3];
810 		}
811 	}
812 
813 	/* Record action */
814 	if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
815 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
816 	else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
817 		fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
818 					     act_tcam->dest);
819 	else
820 		fsp->flow_type |= FLOW_RSS;
821 
822 	cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
823 				     act_tcam->dest);
824 
825 	return 0;
826 }
827 
828 static u32 fbnic_get_rx_ring_count(struct net_device *netdev)
829 {
830 	struct fbnic_net *fbn = netdev_priv(netdev);
831 
832 	return fbn->num_rx_queues;
833 }
834 
835 static int fbnic_get_rxnfc(struct net_device *netdev,
836 			   struct ethtool_rxnfc *cmd, u32 *rule_locs)
837 {
838 	struct fbnic_net *fbn = netdev_priv(netdev);
839 	int ret = -EOPNOTSUPP;
840 	u32 special = 0;
841 
842 	switch (cmd->cmd) {
843 	case ETHTOOL_GRXCLSRULE:
844 		ret = fbnic_get_cls_rule(fbn, cmd);
845 		break;
846 	case ETHTOOL_GRXCLSRLCNT:
847 		rule_locs = NULL;
848 		special = RX_CLS_LOC_SPECIAL;
849 		fallthrough;
850 	case ETHTOOL_GRXCLSRLALL:
851 		ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
852 		if (ret < 0)
853 			break;
854 
855 		cmd->data |= special;
856 		cmd->rule_cnt = ret;
857 		ret = 0;
858 		break;
859 	}
860 
861 	return ret;
862 }
863 
864 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
865 {
866 	int i;
867 
868 	for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
869 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
870 
871 		if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
872 			return i;
873 	}
874 
875 	return -ENOSPC;
876 }
877 
878 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
879 				  const struct ethtool_rxnfc *cmd)
880 {
881 	u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
882 	u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
883 	u16 misc = 0, misc_mask = ~0;
884 	u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
885 			      FBNIC_RPC_ACT_TBL0_DEST_HOST);
886 	struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
887 	struct fbnic_mac_addr *mac_addr = NULL;
888 	struct ethtool_rx_flow_spec *fsp;
889 	struct fbnic_dev *fbd = fbn->fbd;
890 	struct fbnic_act_tcam *act_tcam;
891 	struct in6_addr *addr6, *mask6;
892 	struct in_addr *addr4, *mask4;
893 	int hash_idx, location;
894 	u32 flow_type;
895 	int idx, j;
896 
897 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
898 
899 	if (fsp->location != RX_CLS_LOC_ANY)
900 		return -EINVAL;
901 	location = fbnic_cls_rule_any_loc(fbd);
902 	if (location < 0)
903 		return location;
904 
905 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
906 		dest = FBNIC_RPC_ACT_TBL0_DROP;
907 	} else if (fsp->flow_type & FLOW_RSS) {
908 		if (cmd->rss_context == 1)
909 			dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
910 	} else {
911 		u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
912 
913 		if (ring_idx >= fbn->num_rx_queues)
914 			return -EINVAL;
915 
916 		dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
917 			FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
918 	}
919 
920 	idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
921 	act_tcam = &fbd->act_tcam[idx];
922 
923 	/* Do not allow overwriting for now.
924 	 * To support overwriting rules we will need to add logic to free
925 	 * any IP or MACDA TCAMs that may be associated with the old rule.
926 	 */
927 	if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
928 		return -EBUSY;
929 
930 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
931 	hash_idx = fbnic_get_rss_hash_idx(flow_type);
932 
933 	switch (flow_type) {
934 	case UDP_V4_FLOW:
935 udp4_flow:
936 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
937 		fallthrough;
938 	case TCP_V4_FLOW:
939 tcp4_flow:
940 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
941 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
942 			       FBNIC_RPC_TCAM_ACT1_L4_VALID);
943 
944 		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
945 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
946 		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
947 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
948 		goto ip4_flow;
949 	case IP_USER_FLOW:
950 		if (!fsp->m_u.usr_ip4_spec.proto)
951 			goto ip4_flow;
952 		if (fsp->m_u.usr_ip4_spec.proto != 0xff)
953 			return -EINVAL;
954 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
955 			goto udp4_flow;
956 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
957 			goto tcp4_flow;
958 		return -EINVAL;
959 ip4_flow:
960 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
961 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
962 		if (mask4->s_addr) {
963 			ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
964 						  addr4, mask4);
965 			if (!ip_src)
966 				return -ENOSPC;
967 
968 			set_bit(idx, ip_src->act_tcam);
969 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
970 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
971 					       ip_src - fbd->ip_src);
972 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
973 				     FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
974 		}
975 
976 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
977 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
978 		if (mask4->s_addr) {
979 			ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
980 						  addr4, mask4);
981 			if (!ip_dst) {
982 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
983 					memset(ip_src, 0, sizeof(*ip_src));
984 				return -ENOSPC;
985 			}
986 
987 			set_bit(idx, ip_dst->act_tcam);
988 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
989 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
990 					       ip_dst - fbd->ip_dst);
991 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
992 				     FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
993 		}
994 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
995 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
996 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
997 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
998 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
999 		break;
1000 	case UDP_V6_FLOW:
1001 udp6_flow:
1002 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
1003 		fallthrough;
1004 	case TCP_V6_FLOW:
1005 tcp6_flow:
1006 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
1007 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
1008 			  FBNIC_RPC_TCAM_ACT1_L4_VALID);
1009 
1010 		sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
1011 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
1012 		dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
1013 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
1014 		goto ipv6_flow;
1015 	case IPV6_USER_FLOW:
1016 		if (!fsp->m_u.usr_ip6_spec.l4_proto)
1017 			goto ipv6_flow;
1018 
1019 		if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
1020 			return -EINVAL;
1021 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
1022 			goto udp6_flow;
1023 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
1024 			goto tcp6_flow;
1025 		if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
1026 			return -EINVAL;
1027 
1028 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1029 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1030 		if (!ipv6_addr_any(mask6)) {
1031 			ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
1032 						  addr6, mask6);
1033 			if (!ip_src)
1034 				return -ENOSPC;
1035 
1036 			set_bit(idx, ip_src->act_tcam);
1037 			ip_value |=
1038 				FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1039 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
1040 					   ip_src - fbd->ipo_src);
1041 			ip_mask &=
1042 				~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1043 				  FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
1044 		}
1045 
1046 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1047 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1048 		if (!ipv6_addr_any(mask6)) {
1049 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
1050 						  addr6, mask6);
1051 			if (!ip_dst) {
1052 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1053 					memset(ip_src, 0, sizeof(*ip_src));
1054 				return -ENOSPC;
1055 			}
1056 
1057 			set_bit(idx, ip_dst->act_tcam);
1058 			ip_value |=
1059 				FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1060 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
1061 					   ip_dst - fbd->ipo_dst);
1062 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
1063 				     FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
1064 		}
1065 
1066 		flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1067 		flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
1068 ipv6_flow:
1069 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
1070 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
1071 		if (!ip_src && !ipv6_addr_any(mask6)) {
1072 			ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
1073 						  addr6, mask6);
1074 			if (!ip_src)
1075 				return -ENOSPC;
1076 
1077 			set_bit(idx, ip_src->act_tcam);
1078 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1079 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
1080 					       ip_src - fbd->ip_src);
1081 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1082 				       FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
1083 		}
1084 
1085 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
1086 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
1087 		if (!ip_dst && !ipv6_addr_any(mask6)) {
1088 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
1089 						  addr6, mask6);
1090 			if (!ip_dst) {
1091 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
1092 					memset(ip_src, 0, sizeof(*ip_src));
1093 				return -ENOSPC;
1094 			}
1095 
1096 			set_bit(idx, ip_dst->act_tcam);
1097 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1098 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
1099 					       ip_dst - fbd->ip_dst);
1100 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1101 				       FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
1102 		}
1103 
1104 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1105 			      FBNIC_RPC_TCAM_ACT1_IP_VALID |
1106 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1107 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
1108 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
1109 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
1110 		break;
1111 	case ETHER_FLOW:
1112 		if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
1113 			u8 *addr = fsp->h_u.ether_spec.h_dest;
1114 			u8 *mask = fsp->m_u.ether_spec.h_dest;
1115 
1116 			/* Do not allow MAC addr of 0 */
1117 			if (is_zero_ether_addr(addr))
1118 				return -EINVAL;
1119 
1120 			/* Only support full MAC address to avoid
1121 			 * conflicts with other MAC addresses.
1122 			 */
1123 			if (!is_broadcast_ether_addr(mask))
1124 				return -EINVAL;
1125 
1126 			if (is_multicast_ether_addr(addr))
1127 				mac_addr = __fbnic_mc_sync(fbd, addr);
1128 			else
1129 				mac_addr = __fbnic_uc_sync(fbd, addr);
1130 
1131 			if (!mac_addr)
1132 				return -ENOSPC;
1133 
1134 			set_bit(idx, mac_addr->act_tcam);
1135 			flow_value |=
1136 				FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
1137 					   mac_addr - fbd->mac_addr);
1138 			flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
1139 		}
1140 
1141 		flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1142 		flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
1143 		break;
1144 	default:
1145 		return -EINVAL;
1146 	}
1147 
1148 	/* Write action table values */
1149 	act_tcam->dest = dest;
1150 	act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
1151 
1152 	/* Write IP Match value/mask to action_tcam[0] */
1153 	act_tcam->value.tcam[0] = ip_value;
1154 	act_tcam->mask.tcam[0] = ip_mask;
1155 
1156 	/* Write flow type value/mask to action_tcam[1] */
1157 	act_tcam->value.tcam[1] = flow_value;
1158 	act_tcam->mask.tcam[1] = flow_mask;
1159 
1160 	/* Write error, DSCP, extra L4 matches to action_tcam[2] */
1161 	act_tcam->value.tcam[2] = misc;
1162 	act_tcam->mask.tcam[2] = misc_mask;
1163 
1164 	/* Write source/destination port values */
1165 	act_tcam->value.tcam[3] = sport;
1166 	act_tcam->mask.tcam[3] = sport_mask;
1167 	act_tcam->value.tcam[4] = dport;
1168 	act_tcam->mask.tcam[4] = dport_mask;
1169 
1170 	for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
1171 		act_tcam->mask.tcam[j] = 0xffff;
1172 
1173 	act_tcam->state = FBNIC_TCAM_S_UPDATE;
1174 	fsp->location = location;
1175 
1176 	if (netif_running(fbn->netdev)) {
1177 		fbnic_write_rules(fbd);
1178 		if (ip_src || ip_dst)
1179 			fbnic_write_ip_addr(fbd);
1180 		if (mac_addr)
1181 			fbnic_write_macda(fbd);
1182 	}
1183 
1184 	return 0;
1185 }
1186 
1187 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
1188 				  unsigned int tcam_idx)
1189 {
1190 	struct fbnic_dev *fbd = fbn->fbd;
1191 	int idx;
1192 
1193 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
1194 		__fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
1195 
1196 	/* Write updates to hardware */
1197 	if (netif_running(fbn->netdev))
1198 		fbnic_write_macda(fbd);
1199 }
1200 
1201 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
1202 				    unsigned int tcam_idx)
1203 {
1204 	struct fbnic_dev *fbd = fbn->fbd;
1205 	int idx;
1206 
1207 	for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
1208 		__fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
1209 	for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
1210 		__fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
1211 	for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
1212 		__fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
1213 	for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
1214 		__fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
1215 
1216 	/* Write updates to hardware */
1217 	if (netif_running(fbn->netdev))
1218 		fbnic_write_ip_addr(fbd);
1219 }
1220 
1221 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
1222 				  const struct ethtool_rxnfc *cmd)
1223 {
1224 	struct ethtool_rx_flow_spec *fsp;
1225 	struct fbnic_dev *fbd = fbn->fbd;
1226 	struct fbnic_act_tcam *act_tcam;
1227 	int idx;
1228 
1229 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1230 
1231 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
1232 		return -EINVAL;
1233 
1234 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
1235 	act_tcam = &fbd->act_tcam[idx];
1236 
1237 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
1238 		return -EINVAL;
1239 
1240 	act_tcam->state = FBNIC_TCAM_S_DELETE;
1241 
1242 	if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
1243 	    (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
1244 		fbnic_clear_nfc_macda(fbn, idx);
1245 
1246 	if ((act_tcam->value.tcam[0] &
1247 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
1248 	      FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
1249 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
1250 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
1251 	    (~act_tcam->mask.tcam[0] &
1252 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
1253 	      FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
1254 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
1255 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
1256 		fbnic_clear_nfc_ip_addr(fbn, idx);
1257 
1258 	if (netif_running(fbn->netdev))
1259 		fbnic_write_rules(fbd);
1260 
1261 	return 0;
1262 }
1263 
1264 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1265 {
1266 	struct fbnic_net *fbn = netdev_priv(netdev);
1267 	int ret = -EOPNOTSUPP;
1268 
1269 	switch (cmd->cmd) {
1270 	case ETHTOOL_SRXCLSRLINS:
1271 		ret = fbnic_set_cls_rule_ins(fbn, cmd);
1272 		break;
1273 	case ETHTOOL_SRXCLSRLDEL:
1274 		ret = fbnic_set_cls_rule_del(fbn, cmd);
1275 		break;
1276 	}
1277 
1278 	return ret;
1279 }
1280 
1281 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
1282 {
1283 	return FBNIC_RPC_RSS_KEY_BYTE_LEN;
1284 }
1285 
1286 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
1287 {
1288 	return FBNIC_RPC_RSS_TBL_SIZE;
1289 }
1290 
1291 static int
1292 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1293 {
1294 	struct fbnic_net *fbn = netdev_priv(netdev);
1295 	unsigned int i;
1296 
1297 	rxfh->hfunc = ETH_RSS_HASH_TOP;
1298 
1299 	if (rxfh->key) {
1300 		for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1301 			u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1302 
1303 			rxfh->key[i] = rss_key >> 24;
1304 		}
1305 	}
1306 
1307 	if (rxfh->indir) {
1308 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1309 			rxfh->indir[i] = fbn->indir_tbl[0][i];
1310 	}
1311 
1312 	return 0;
1313 }
1314 
1315 static unsigned int
1316 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1317 {
1318 	unsigned int i, changes = 0;
1319 
1320 	for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1321 		if (fbn->indir_tbl[idx][i] == indir[i])
1322 			continue;
1323 
1324 		fbn->indir_tbl[idx][i] = indir[i];
1325 		changes++;
1326 	}
1327 
1328 	return changes;
1329 }
1330 
1331 static int
1332 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1333 	       struct netlink_ext_ack *extack)
1334 {
1335 	struct fbnic_net *fbn = netdev_priv(netdev);
1336 	unsigned int i, changes = 0;
1337 
1338 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1339 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1340 		return -EINVAL;
1341 
1342 	if (rxfh->key) {
1343 		u32 rss_key = 0;
1344 
1345 		for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1346 			rss_key >>= 8;
1347 			rss_key |= (u32)(rxfh->key[i]) << 24;
1348 
1349 			if (i % 4)
1350 				continue;
1351 
1352 			if (fbn->rss_key[i / 4] == rss_key)
1353 				continue;
1354 
1355 			fbn->rss_key[i / 4] = rss_key;
1356 			changes++;
1357 		}
1358 	}
1359 
1360 	if (rxfh->indir)
1361 		changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1362 
1363 	if (changes && netif_running(netdev))
1364 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1365 
1366 	return 0;
1367 }
1368 
1369 static int
1370 fbnic_get_rss_hash_opts(struct net_device *netdev,
1371 			struct ethtool_rxfh_fields *cmd)
1372 {
1373 	int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1374 	struct fbnic_net *fbn = netdev_priv(netdev);
1375 
1376 	if (hash_opt_idx < 0)
1377 		return -EINVAL;
1378 
1379 	/* Report options from rss_en table in fbn */
1380 	cmd->data = fbn->rss_flow_hash[hash_opt_idx];
1381 
1382 	return 0;
1383 }
1384 
1385 #define FBNIC_L2_HASH_OPTIONS \
1386 	(RXH_L2DA | RXH_DISCARD)
1387 #define FBNIC_L3_HASH_OPTIONS \
1388 	(FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST | RXH_IP6_FL)
1389 #define FBNIC_L4_HASH_OPTIONS \
1390 	(FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1391 
1392 static int
1393 fbnic_set_rss_hash_opts(struct net_device *netdev,
1394 			const struct ethtool_rxfh_fields *cmd,
1395 			struct netlink_ext_ack *extack)
1396 {
1397 	struct fbnic_net *fbn = netdev_priv(netdev);
1398 	int hash_opt_idx;
1399 
1400 	/* Verify the type requested is correct */
1401 	hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
1402 	if (hash_opt_idx < 0)
1403 		return -EINVAL;
1404 
1405 	/* Verify the fields asked for can actually be assigned based on type */
1406 	if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
1407 	    (hash_opt_idx > FBNIC_L4_HASH_OPT &&
1408 	     cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
1409 	    (hash_opt_idx > FBNIC_IP_HASH_OPT &&
1410 	     cmd->data & ~FBNIC_L2_HASH_OPTIONS))
1411 		return -EINVAL;
1412 
1413 	fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
1414 
1415 	if (netif_running(fbn->netdev)) {
1416 		fbnic_rss_reinit(fbn->fbd, fbn);
1417 		fbnic_write_rules(fbn->fbd);
1418 	}
1419 
1420 	return 0;
1421 }
1422 
1423 static int
1424 fbnic_modify_rxfh_context(struct net_device *netdev,
1425 			  struct ethtool_rxfh_context *ctx,
1426 			  const struct ethtool_rxfh_param *rxfh,
1427 			  struct netlink_ext_ack *extack)
1428 {
1429 	struct fbnic_net *fbn = netdev_priv(netdev);
1430 	const u32 *indir = rxfh->indir;
1431 	unsigned int changes;
1432 
1433 	if (!indir)
1434 		indir = ethtool_rxfh_context_indir(ctx);
1435 
1436 	changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1437 	if (changes && netif_running(netdev))
1438 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1439 
1440 	return 0;
1441 }
1442 
1443 static int
1444 fbnic_create_rxfh_context(struct net_device *netdev,
1445 			  struct ethtool_rxfh_context *ctx,
1446 			  const struct ethtool_rxfh_param *rxfh,
1447 			  struct netlink_ext_ack *extack)
1448 {
1449 	struct fbnic_net *fbn = netdev_priv(netdev);
1450 
1451 	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1452 		NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1453 		return -EOPNOTSUPP;
1454 	}
1455 	ctx->hfunc = ETH_RSS_HASH_TOP;
1456 
1457 	if (!rxfh->indir) {
1458 		u32 *indir = ethtool_rxfh_context_indir(ctx);
1459 		unsigned int num_rx = fbn->num_rx_queues;
1460 		unsigned int i;
1461 
1462 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1463 			indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1464 	}
1465 
1466 	return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1467 }
1468 
1469 static int
1470 fbnic_remove_rxfh_context(struct net_device *netdev,
1471 			  struct ethtool_rxfh_context *ctx, u32 rss_context,
1472 			  struct netlink_ext_ack *extack)
1473 {
1474 	/* Nothing to do, contexts are allocated statically */
1475 	return 0;
1476 }
1477 
1478 static void fbnic_get_channels(struct net_device *netdev,
1479 			       struct ethtool_channels *ch)
1480 {
1481 	struct fbnic_net *fbn = netdev_priv(netdev);
1482 	struct fbnic_dev *fbd = fbn->fbd;
1483 
1484 	ch->max_rx = fbd->max_num_queues;
1485 	ch->max_tx = fbd->max_num_queues;
1486 	ch->max_combined = min(ch->max_rx, ch->max_tx);
1487 	ch->max_other =	FBNIC_NON_NAPI_VECTORS;
1488 
1489 	if (fbn->num_rx_queues > fbn->num_napi ||
1490 	    fbn->num_tx_queues > fbn->num_napi)
1491 		ch->combined_count = min(fbn->num_rx_queues,
1492 					 fbn->num_tx_queues);
1493 	else
1494 		ch->combined_count =
1495 			fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1496 	ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1497 	ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1498 	ch->other_count = FBNIC_NON_NAPI_VECTORS;
1499 }
1500 
1501 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1502 			     unsigned int max_napis)
1503 {
1504 	fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1505 	fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1506 	fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1507 			    max_napis);
1508 }
1509 
1510 static int fbnic_set_channels(struct net_device *netdev,
1511 			      struct ethtool_channels *ch)
1512 {
1513 	struct fbnic_net *fbn = netdev_priv(netdev);
1514 	unsigned int max_napis, standalone;
1515 	struct fbnic_dev *fbd = fbn->fbd;
1516 	struct fbnic_net *clone;
1517 	int err;
1518 
1519 	max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1520 	standalone = ch->rx_count + ch->tx_count;
1521 
1522 	/* Limits for standalone queues:
1523 	 *  - each queue has its own NAPI (num_napi >= rx + tx + combined)
1524 	 *  - combining queues (combined not 0, rx or tx must be 0)
1525 	 */
1526 	if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1527 	    (standalone && standalone + ch->combined_count > max_napis) ||
1528 	    ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1529 	    ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1530 	    ch->other_count != FBNIC_NON_NAPI_VECTORS)
1531 		return -EINVAL;
1532 
1533 	if (!netif_running(netdev)) {
1534 		fbnic_set_queues(fbn, ch, max_napis);
1535 		fbnic_reset_indir_tbl(fbn);
1536 		return 0;
1537 	}
1538 
1539 	clone = fbnic_clone_create(fbn);
1540 	if (!clone)
1541 		return -ENOMEM;
1542 
1543 	fbnic_set_queues(clone, ch, max_napis);
1544 
1545 	err = fbnic_alloc_napi_vectors(clone);
1546 	if (err)
1547 		goto err_free_clone;
1548 
1549 	err = fbnic_alloc_resources(clone);
1550 	if (err)
1551 		goto err_free_napis;
1552 
1553 	fbnic_down_noidle(fbn);
1554 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1555 	if (err)
1556 		goto err_start_stack;
1557 
1558 	err = fbnic_set_netif_queues(clone);
1559 	if (err)
1560 		goto err_start_stack;
1561 
1562 	/* Nothing can fail past this point */
1563 	fbnic_flush(fbn);
1564 
1565 	fbnic_clone_swap(fbn, clone);
1566 
1567 	/* Reset RSS indirection table */
1568 	fbnic_reset_indir_tbl(fbn);
1569 
1570 	fbnic_up(fbn);
1571 
1572 	fbnic_free_resources(clone);
1573 	fbnic_free_napi_vectors(clone);
1574 	fbnic_clone_free(clone);
1575 
1576 	return 0;
1577 
1578 err_start_stack:
1579 	fbnic_flush(fbn);
1580 	fbnic_up(fbn);
1581 	fbnic_free_resources(clone);
1582 err_free_napis:
1583 	fbnic_free_napi_vectors(clone);
1584 err_free_clone:
1585 	fbnic_clone_free(clone);
1586 	return err;
1587 }
1588 
1589 static int
1590 fbnic_get_ts_info(struct net_device *netdev,
1591 		  struct kernel_ethtool_ts_info *tsinfo)
1592 {
1593 	struct fbnic_net *fbn = netdev_priv(netdev);
1594 
1595 	tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1596 
1597 	tsinfo->so_timestamping =
1598 		SOF_TIMESTAMPING_TX_SOFTWARE |
1599 		SOF_TIMESTAMPING_TX_HARDWARE |
1600 		SOF_TIMESTAMPING_RX_HARDWARE |
1601 		SOF_TIMESTAMPING_RAW_HARDWARE;
1602 
1603 	tsinfo->tx_types =
1604 		BIT(HWTSTAMP_TX_OFF) |
1605 		BIT(HWTSTAMP_TX_ON);
1606 
1607 	tsinfo->rx_filters =
1608 		BIT(HWTSTAMP_FILTER_NONE) |
1609 		BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1610 		BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1611 		BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1612 		BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1613 		BIT(HWTSTAMP_FILTER_ALL);
1614 
1615 	return 0;
1616 }
1617 
1618 static void fbnic_get_ts_stats(struct net_device *netdev,
1619 			       struct ethtool_ts_stats *ts_stats)
1620 {
1621 	struct fbnic_net *fbn = netdev_priv(netdev);
1622 	u64 ts_packets, ts_lost;
1623 	struct fbnic_ring *ring;
1624 	unsigned int start;
1625 	int i;
1626 
1627 	ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1628 	ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1629 	for (i = 0; i < fbn->num_tx_queues; i++) {
1630 		ring = fbn->tx[i];
1631 		do {
1632 			start = u64_stats_fetch_begin(&ring->stats.syncp);
1633 			ts_packets = ring->stats.twq.ts_packets;
1634 			ts_lost = ring->stats.twq.ts_lost;
1635 		} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1636 		ts_stats->pkts += ts_packets;
1637 		ts_stats->lost += ts_lost;
1638 	}
1639 }
1640 
1641 static int
1642 fbnic_get_module_eeprom_by_page(struct net_device *netdev,
1643 				const struct ethtool_module_eeprom *page_data,
1644 				struct netlink_ext_ack *extack)
1645 {
1646 	struct fbnic_net *fbn = netdev_priv(netdev);
1647 	struct fbnic_fw_completion *fw_cmpl;
1648 	struct fbnic_dev *fbd = fbn->fbd;
1649 	int err;
1650 
1651 	if (page_data->i2c_address != 0x50) {
1652 		NL_SET_ERR_MSG_MOD(extack,
1653 				   "Invalid i2c address. Only 0x50 is supported");
1654 		return -EINVAL;
1655 	}
1656 
1657 	fw_cmpl = __fbnic_fw_alloc_cmpl(FBNIC_TLV_MSG_ID_QSFP_READ_RESP,
1658 					page_data->length);
1659 	if (!fw_cmpl)
1660 		return -ENOMEM;
1661 
1662 	/* Initialize completion and queue it for FW to process */
1663 	fw_cmpl->u.qsfp.length = page_data->length;
1664 	fw_cmpl->u.qsfp.offset = page_data->offset;
1665 	fw_cmpl->u.qsfp.page = page_data->page;
1666 	fw_cmpl->u.qsfp.bank = page_data->bank;
1667 
1668 	err = fbnic_fw_xmit_qsfp_read_msg(fbd, fw_cmpl, page_data->page,
1669 					  page_data->bank, page_data->offset,
1670 					  page_data->length);
1671 	if (err) {
1672 		NL_SET_ERR_MSG_MOD(extack,
1673 				   "Failed to transmit EEPROM read request");
1674 		goto exit_free;
1675 	}
1676 
1677 	if (!fbnic_mbx_wait_for_cmpl(fw_cmpl)) {
1678 		err = -ETIMEDOUT;
1679 		NL_SET_ERR_MSG_MOD(extack,
1680 				   "Timed out waiting for firmware response");
1681 		goto exit_cleanup;
1682 	}
1683 
1684 	if (fw_cmpl->result) {
1685 		err = fw_cmpl->result;
1686 		NL_SET_ERR_MSG_MOD(extack, "Failed to read EEPROM");
1687 		goto exit_cleanup;
1688 	}
1689 
1690 	memcpy(page_data->data, fw_cmpl->u.qsfp.data, page_data->length);
1691 
1692 exit_cleanup:
1693 	fbnic_mbx_clear_cmpl(fbd, fw_cmpl);
1694 exit_free:
1695 	fbnic_fw_put_cmpl(fw_cmpl);
1696 
1697 	return err ? : page_data->length;
1698 }
1699 
1700 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1701 {
1702 	if (counter->reported)
1703 		*stat = counter->value;
1704 }
1705 
1706 static void
1707 fbnic_get_pause_stats(struct net_device *netdev,
1708 		      struct ethtool_pause_stats *pause_stats)
1709 {
1710 	struct fbnic_net *fbn = netdev_priv(netdev);
1711 	struct fbnic_mac_stats *mac_stats;
1712 	struct fbnic_dev *fbd = fbn->fbd;
1713 
1714 	mac_stats = &fbd->hw_stats.mac;
1715 
1716 	fbd->mac->get_pause_stats(fbd, false, &mac_stats->pause);
1717 
1718 	pause_stats->tx_pause_frames = mac_stats->pause.tx_pause_frames.value;
1719 	pause_stats->rx_pause_frames = mac_stats->pause.rx_pause_frames.value;
1720 }
1721 
1722 static void
1723 fbnic_get_fec_stats(struct net_device *netdev,
1724 		    struct ethtool_fec_stats *fec_stats,
1725 		    struct ethtool_fec_hist *hist)
1726 {
1727 	struct fbnic_net *fbn = netdev_priv(netdev);
1728 	struct fbnic_phy_stats *phy_stats;
1729 	struct fbnic_dev *fbd = fbn->fbd;
1730 
1731 	fbnic_get_hw_stats32(fbd);
1732 	phy_stats = &fbd->hw_stats.phy;
1733 
1734 	spin_lock(&fbd->hw_stats.lock);
1735 	fec_stats->corrected_blocks.total =
1736 		phy_stats->fec.corrected_blocks.value;
1737 	fec_stats->uncorrectable_blocks.total =
1738 		phy_stats->fec.uncorrectable_blocks.value;
1739 	spin_unlock(&fbd->hw_stats.lock);
1740 }
1741 
1742 static void
1743 fbnic_get_eth_phy_stats(struct net_device *netdev,
1744 			struct ethtool_eth_phy_stats *eth_phy_stats)
1745 {
1746 	struct fbnic_net *fbn = netdev_priv(netdev);
1747 	struct fbnic_phy_stats *phy_stats;
1748 	struct fbnic_dev *fbd = fbn->fbd;
1749 	u64 total = 0;
1750 	int i;
1751 
1752 	fbnic_get_hw_stats32(fbd);
1753 	phy_stats = &fbd->hw_stats.phy;
1754 
1755 	spin_lock(&fbd->hw_stats.lock);
1756 	for (i = 0; i < FBNIC_PCS_MAX_LANES; i++)
1757 		total += phy_stats->pcs.SymbolErrorDuringCarrier.lanes[i].value;
1758 
1759 	eth_phy_stats->SymbolErrorDuringCarrier = total;
1760 	spin_unlock(&fbd->hw_stats.lock);
1761 }
1762 
1763 static void
1764 fbnic_get_eth_mac_stats(struct net_device *netdev,
1765 			struct ethtool_eth_mac_stats *eth_mac_stats)
1766 {
1767 	struct fbnic_net *fbn = netdev_priv(netdev);
1768 	struct fbnic_mac_stats *mac_stats;
1769 	struct fbnic_dev *fbd = fbn->fbd;
1770 	const struct fbnic_mac *mac;
1771 
1772 	mac_stats = &fbd->hw_stats.mac;
1773 	mac = fbd->mac;
1774 
1775 	mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1776 
1777 	fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
1778 			  &mac_stats->eth_mac.FramesTransmittedOK);
1779 	fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
1780 			  &mac_stats->eth_mac.FramesReceivedOK);
1781 	fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
1782 			  &mac_stats->eth_mac.FrameCheckSequenceErrors);
1783 	fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
1784 			  &mac_stats->eth_mac.AlignmentErrors);
1785 	fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
1786 			  &mac_stats->eth_mac.OctetsTransmittedOK);
1787 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
1788 			  &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1789 	fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
1790 			  &mac_stats->eth_mac.OctetsReceivedOK);
1791 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
1792 			  &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1793 	fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
1794 			  &mac_stats->eth_mac.MulticastFramesXmittedOK);
1795 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
1796 			  &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1797 	fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
1798 			  &mac_stats->eth_mac.MulticastFramesReceivedOK);
1799 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
1800 			  &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1801 	fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
1802 			  &mac_stats->eth_mac.FrameTooLongErrors);
1803 }
1804 
1805 static void
1806 fbnic_get_eth_ctrl_stats(struct net_device *netdev,
1807 			 struct ethtool_eth_ctrl_stats *eth_ctrl_stats)
1808 {
1809 	struct fbnic_net *fbn = netdev_priv(netdev);
1810 	struct fbnic_mac_stats *mac_stats;
1811 	struct fbnic_dev *fbd = fbn->fbd;
1812 
1813 	mac_stats = &fbd->hw_stats.mac;
1814 
1815 	fbd->mac->get_eth_ctrl_stats(fbd, false, &mac_stats->eth_ctrl);
1816 
1817 	eth_ctrl_stats->MACControlFramesReceived =
1818 		mac_stats->eth_ctrl.MACControlFramesReceived.value;
1819 	eth_ctrl_stats->MACControlFramesTransmitted =
1820 		mac_stats->eth_ctrl.MACControlFramesTransmitted.value;
1821 }
1822 
1823 static const struct ethtool_rmon_hist_range fbnic_rmon_ranges[] = {
1824 	{    0,   64 },
1825 	{   65,  127 },
1826 	{  128,  255 },
1827 	{  256,  511 },
1828 	{  512, 1023 },
1829 	{ 1024, 1518 },
1830 	{ 1519, 2047 },
1831 	{ 2048, 4095 },
1832 	{ 4096, 8191 },
1833 	{ 8192, 9216 },
1834 	{ 9217, FBNIC_MAX_JUMBO_FRAME_SIZE },
1835 	{}
1836 };
1837 
1838 static void
1839 fbnic_get_rmon_stats(struct net_device *netdev,
1840 		     struct ethtool_rmon_stats *rmon_stats,
1841 		     const struct ethtool_rmon_hist_range **ranges)
1842 {
1843 	struct fbnic_net *fbn = netdev_priv(netdev);
1844 	struct fbnic_mac_stats *mac_stats;
1845 	struct fbnic_dev *fbd = fbn->fbd;
1846 	int i;
1847 
1848 	mac_stats = &fbd->hw_stats.mac;
1849 
1850 	fbd->mac->get_rmon_stats(fbd, false, &mac_stats->rmon);
1851 
1852 	rmon_stats->undersize_pkts =
1853 		mac_stats->rmon.undersize_pkts.value;
1854 	rmon_stats->oversize_pkts =
1855 		mac_stats->rmon.oversize_pkts.value;
1856 	rmon_stats->fragments =
1857 		mac_stats->rmon.fragments.value;
1858 	rmon_stats->jabbers =
1859 		mac_stats->rmon.jabbers.value;
1860 
1861 	for (i = 0; fbnic_rmon_ranges[i].high; i++) {
1862 		rmon_stats->hist[i] = mac_stats->rmon.hist[i].value;
1863 		rmon_stats->hist_tx[i] = mac_stats->rmon.hist_tx[i].value;
1864 	}
1865 
1866 	*ranges = fbnic_rmon_ranges;
1867 }
1868 
1869 static void fbnic_get_link_ext_stats(struct net_device *netdev,
1870 				     struct ethtool_link_ext_stats *stats)
1871 {
1872 	struct fbnic_net *fbn = netdev_priv(netdev);
1873 
1874 	stats->link_down_events = fbn->link_down_events;
1875 }
1876 
1877 static const struct ethtool_ops fbnic_ethtool_ops = {
1878 	.cap_link_lanes_supported	= true,
1879 	.supported_coalesce_params	= ETHTOOL_COALESCE_USECS |
1880 					  ETHTOOL_COALESCE_RX_MAX_FRAMES,
1881 	.supported_ring_params		= ETHTOOL_RING_USE_TCP_DATA_SPLIT |
1882 					  ETHTOOL_RING_USE_HDS_THRS,
1883 	.rxfh_max_num_contexts		= FBNIC_RPC_RSS_TBL_COUNT,
1884 	.get_drvinfo			= fbnic_get_drvinfo,
1885 	.get_regs_len			= fbnic_get_regs_len,
1886 	.get_regs			= fbnic_get_regs,
1887 	.get_link			= ethtool_op_get_link,
1888 	.get_link_ext_stats		= fbnic_get_link_ext_stats,
1889 	.get_coalesce			= fbnic_get_coalesce,
1890 	.set_coalesce			= fbnic_set_coalesce,
1891 	.get_ringparam			= fbnic_get_ringparam,
1892 	.set_ringparam			= fbnic_set_ringparam,
1893 	.get_pause_stats		= fbnic_get_pause_stats,
1894 	.get_pauseparam			= fbnic_phylink_get_pauseparam,
1895 	.set_pauseparam			= fbnic_phylink_set_pauseparam,
1896 	.get_strings			= fbnic_get_strings,
1897 	.get_ethtool_stats		= fbnic_get_ethtool_stats,
1898 	.get_sset_count			= fbnic_get_sset_count,
1899 	.get_rxnfc			= fbnic_get_rxnfc,
1900 	.set_rxnfc			= fbnic_set_rxnfc,
1901 	.get_rx_ring_count		= fbnic_get_rx_ring_count,
1902 	.get_rxfh_key_size		= fbnic_get_rxfh_key_size,
1903 	.get_rxfh_indir_size		= fbnic_get_rxfh_indir_size,
1904 	.get_rxfh			= fbnic_get_rxfh,
1905 	.set_rxfh			= fbnic_set_rxfh,
1906 	.get_rxfh_fields		= fbnic_get_rss_hash_opts,
1907 	.set_rxfh_fields		= fbnic_set_rss_hash_opts,
1908 	.create_rxfh_context		= fbnic_create_rxfh_context,
1909 	.modify_rxfh_context		= fbnic_modify_rxfh_context,
1910 	.remove_rxfh_context		= fbnic_remove_rxfh_context,
1911 	.get_channels			= fbnic_get_channels,
1912 	.set_channels			= fbnic_set_channels,
1913 	.get_ts_info			= fbnic_get_ts_info,
1914 	.get_ts_stats			= fbnic_get_ts_stats,
1915 	.get_link_ksettings		= fbnic_phylink_ethtool_ksettings_get,
1916 	.get_fec_stats			= fbnic_get_fec_stats,
1917 	.get_fecparam			= fbnic_phylink_get_fecparam,
1918 	.get_module_eeprom_by_page	= fbnic_get_module_eeprom_by_page,
1919 	.get_eth_phy_stats		= fbnic_get_eth_phy_stats,
1920 	.get_eth_mac_stats		= fbnic_get_eth_mac_stats,
1921 	.get_eth_ctrl_stats		= fbnic_get_eth_ctrl_stats,
1922 	.get_rmon_stats			= fbnic_get_rmon_stats,
1923 };
1924 
1925 void fbnic_set_ethtool_ops(struct net_device *dev)
1926 {
1927 	dev->ethtool_ops = &fbnic_ethtool_ops;
1928 }
1929