xref: /linux/drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c (revision 1cc3462159babb69c84c39cb1b4e262aef3ea325)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3 
4 #include <linux/ethtool.h>
5 #include <linux/netdevice.h>
6 #include <linux/pci.h>
7 #include <net/ipv6.h>
8 
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_tlv.h"
12 
13 struct fbnic_stat {
14 	u8 string[ETH_GSTRING_LEN];
15 	unsigned int size;
16 	unsigned int offset;
17 };
18 
19 #define FBNIC_STAT_FIELDS(type, name, stat) { \
20 	.string = name, \
21 	.size = sizeof_field(struct type, stat), \
22 	.offset = offsetof(struct type, stat), \
23 }
24 
25 /* Hardware statistics not captured in rtnl_link_stats */
26 #define FBNIC_HW_STAT(name, stat) \
27 	FBNIC_STAT_FIELDS(fbnic_hw_stats, name, stat)
28 
29 static const struct fbnic_stat fbnic_gstrings_hw_stats[] = {
30 	/* RPC */
31 	FBNIC_HW_STAT("rpc_unkn_etype", rpc.unkn_etype),
32 	FBNIC_HW_STAT("rpc_unkn_ext_hdr", rpc.unkn_ext_hdr),
33 	FBNIC_HW_STAT("rpc_ipv4_frag", rpc.ipv4_frag),
34 	FBNIC_HW_STAT("rpc_ipv6_frag", rpc.ipv6_frag),
35 	FBNIC_HW_STAT("rpc_ipv4_esp", rpc.ipv4_esp),
36 	FBNIC_HW_STAT("rpc_ipv6_esp", rpc.ipv6_esp),
37 	FBNIC_HW_STAT("rpc_tcp_opt_err", rpc.tcp_opt_err),
38 	FBNIC_HW_STAT("rpc_out_of_hdr_err", rpc.out_of_hdr_err),
39 };
40 
41 #define FBNIC_HW_FIXED_STATS_LEN ARRAY_SIZE(fbnic_gstrings_hw_stats)
42 #define FBNIC_HW_STATS_LEN	FBNIC_HW_FIXED_STATS_LEN
43 
44 static void
45 fbnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
46 {
47 	struct fbnic_net *fbn = netdev_priv(netdev);
48 	struct fbnic_dev *fbd = fbn->fbd;
49 
50 	fbnic_get_fw_ver_commit_str(fbd, drvinfo->fw_version,
51 				    sizeof(drvinfo->fw_version));
52 }
53 
54 static int fbnic_get_regs_len(struct net_device *netdev)
55 {
56 	struct fbnic_net *fbn = netdev_priv(netdev);
57 
58 	return fbnic_csr_regs_len(fbn->fbd) * sizeof(u32);
59 }
60 
61 static void fbnic_get_regs(struct net_device *netdev,
62 			   struct ethtool_regs *regs, void *data)
63 {
64 	struct fbnic_net *fbn = netdev_priv(netdev);
65 
66 	fbnic_csr_get_regs(fbn->fbd, data, &regs->version);
67 }
68 
69 static struct fbnic_net *fbnic_clone_create(struct fbnic_net *orig)
70 {
71 	struct fbnic_net *clone;
72 
73 	clone = kmemdup(orig, sizeof(*orig), GFP_KERNEL);
74 	if (!clone)
75 		return NULL;
76 
77 	memset(clone->tx, 0, sizeof(clone->tx));
78 	memset(clone->rx, 0, sizeof(clone->rx));
79 	memset(clone->napi, 0, sizeof(clone->napi));
80 	return clone;
81 }
82 
83 static void fbnic_clone_swap_cfg(struct fbnic_net *orig,
84 				 struct fbnic_net *clone)
85 {
86 	swap(clone->rcq_size, orig->rcq_size);
87 	swap(clone->hpq_size, orig->hpq_size);
88 	swap(clone->ppq_size, orig->ppq_size);
89 	swap(clone->txq_size, orig->txq_size);
90 	swap(clone->num_rx_queues, orig->num_rx_queues);
91 	swap(clone->num_tx_queues, orig->num_tx_queues);
92 	swap(clone->num_napi, orig->num_napi);
93 }
94 
95 static void fbnic_aggregate_vector_counters(struct fbnic_net *fbn,
96 					    struct fbnic_napi_vector *nv)
97 {
98 	int i, j;
99 
100 	for (i = 0; i < nv->txt_count; i++) {
101 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub0);
102 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].sub1);
103 		fbnic_aggregate_ring_tx_counters(fbn, &nv->qt[i].cmpl);
104 	}
105 
106 	for (j = 0; j < nv->rxt_count; j++, i++) {
107 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub0);
108 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].sub1);
109 		fbnic_aggregate_ring_rx_counters(fbn, &nv->qt[i].cmpl);
110 	}
111 }
112 
113 static void fbnic_clone_swap(struct fbnic_net *orig,
114 			     struct fbnic_net *clone)
115 {
116 	struct fbnic_dev *fbd = orig->fbd;
117 	unsigned int i;
118 
119 	for (i = 0; i < max(clone->num_napi, orig->num_napi); i++)
120 		fbnic_synchronize_irq(fbd, FBNIC_NON_NAPI_VECTORS + i);
121 	for (i = 0; i < orig->num_napi; i++)
122 		fbnic_aggregate_vector_counters(orig, orig->napi[i]);
123 
124 	fbnic_clone_swap_cfg(orig, clone);
125 
126 	for (i = 0; i < ARRAY_SIZE(orig->napi); i++)
127 		swap(clone->napi[i], orig->napi[i]);
128 	for (i = 0; i < ARRAY_SIZE(orig->tx); i++)
129 		swap(clone->tx[i], orig->tx[i]);
130 	for (i = 0; i < ARRAY_SIZE(orig->rx); i++)
131 		swap(clone->rx[i], orig->rx[i]);
132 }
133 
134 static void fbnic_clone_free(struct fbnic_net *clone)
135 {
136 	kfree(clone);
137 }
138 
139 static int fbnic_get_coalesce(struct net_device *netdev,
140 			      struct ethtool_coalesce *ec,
141 			      struct kernel_ethtool_coalesce *kernel_coal,
142 			      struct netlink_ext_ack *extack)
143 {
144 	struct fbnic_net *fbn = netdev_priv(netdev);
145 
146 	ec->tx_coalesce_usecs = fbn->tx_usecs;
147 	ec->rx_coalesce_usecs = fbn->rx_usecs;
148 	ec->rx_max_coalesced_frames = fbn->rx_max_frames;
149 
150 	return 0;
151 }
152 
153 static int fbnic_set_coalesce(struct net_device *netdev,
154 			      struct ethtool_coalesce *ec,
155 			      struct kernel_ethtool_coalesce *kernel_coal,
156 			      struct netlink_ext_ack *extack)
157 {
158 	struct fbnic_net *fbn = netdev_priv(netdev);
159 
160 	/* Verify against hardware limits */
161 	if (ec->rx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT)) {
162 		NL_SET_ERR_MSG_MOD(extack, "rx_usecs is above device max");
163 		return -EINVAL;
164 	}
165 	if (ec->tx_coalesce_usecs > FIELD_MAX(FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT)) {
166 		NL_SET_ERR_MSG_MOD(extack, "tx_usecs is above device max");
167 		return -EINVAL;
168 	}
169 	if (ec->rx_max_coalesced_frames >
170 	    FIELD_MAX(FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK) /
171 	    FBNIC_MIN_RXD_PER_FRAME) {
172 		NL_SET_ERR_MSG_MOD(extack, "rx_frames is above device max");
173 		return -EINVAL;
174 	}
175 
176 	fbn->tx_usecs = ec->tx_coalesce_usecs;
177 	fbn->rx_usecs = ec->rx_coalesce_usecs;
178 	fbn->rx_max_frames = ec->rx_max_coalesced_frames;
179 
180 	if (netif_running(netdev)) {
181 		int i;
182 
183 		for (i = 0; i < fbn->num_napi; i++) {
184 			struct fbnic_napi_vector *nv = fbn->napi[i];
185 
186 			fbnic_config_txrx_usecs(nv, 0);
187 			fbnic_config_rx_frames(nv);
188 		}
189 	}
190 
191 	return 0;
192 }
193 
194 static void fbnic_get_strings(struct net_device *dev, u32 sset, u8 *data)
195 {
196 	int i;
197 
198 	switch (sset) {
199 	case ETH_SS_STATS:
200 		for (i = 0; i < FBNIC_HW_STATS_LEN; i++)
201 			ethtool_puts(&data, fbnic_gstrings_hw_stats[i].string);
202 		break;
203 	}
204 }
205 
206 static void fbnic_get_ethtool_stats(struct net_device *dev,
207 				    struct ethtool_stats *stats, u64 *data)
208 {
209 	struct fbnic_net *fbn = netdev_priv(dev);
210 	const struct fbnic_stat *stat;
211 	int i;
212 
213 	fbnic_get_hw_stats(fbn->fbd);
214 
215 	for (i = 0; i < FBNIC_HW_STATS_LEN; i++) {
216 		stat = &fbnic_gstrings_hw_stats[i];
217 		data[i] = *(u64 *)((u8 *)&fbn->fbd->hw_stats + stat->offset);
218 	}
219 }
220 
221 static int fbnic_get_sset_count(struct net_device *dev, int sset)
222 {
223 	switch (sset) {
224 	case ETH_SS_STATS:
225 		return FBNIC_HW_STATS_LEN;
226 	default:
227 		return -EOPNOTSUPP;
228 	}
229 }
230 
231 static int fbnic_get_rss_hash_idx(u32 flow_type)
232 {
233 	switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
234 	case TCP_V4_FLOW:
235 		return FBNIC_TCP4_HASH_OPT;
236 	case TCP_V6_FLOW:
237 		return FBNIC_TCP6_HASH_OPT;
238 	case UDP_V4_FLOW:
239 		return FBNIC_UDP4_HASH_OPT;
240 	case UDP_V6_FLOW:
241 		return FBNIC_UDP6_HASH_OPT;
242 	case AH_V4_FLOW:
243 	case ESP_V4_FLOW:
244 	case AH_ESP_V4_FLOW:
245 	case SCTP_V4_FLOW:
246 	case IPV4_FLOW:
247 	case IPV4_USER_FLOW:
248 		return FBNIC_IPV4_HASH_OPT;
249 	case AH_V6_FLOW:
250 	case ESP_V6_FLOW:
251 	case AH_ESP_V6_FLOW:
252 	case SCTP_V6_FLOW:
253 	case IPV6_FLOW:
254 	case IPV6_USER_FLOW:
255 		return FBNIC_IPV6_HASH_OPT;
256 	case ETHER_FLOW:
257 		return FBNIC_ETHER_HASH_OPT;
258 	}
259 
260 	return -1;
261 }
262 
263 static int
264 fbnic_get_rss_hash_opts(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
265 {
266 	int hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
267 
268 	if (hash_opt_idx < 0)
269 		return -EINVAL;
270 
271 	/* Report options from rss_en table in fbn */
272 	cmd->data = fbn->rss_flow_hash[hash_opt_idx];
273 
274 	return 0;
275 }
276 
277 static int fbnic_get_cls_rule_all(struct fbnic_net *fbn,
278 				  struct ethtool_rxnfc *cmd,
279 				  u32 *rule_locs)
280 {
281 	struct fbnic_dev *fbd = fbn->fbd;
282 	int i, cnt = 0;
283 
284 	/* Report maximum rule count */
285 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
286 
287 	for (i = 0; i < FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i++) {
288 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
289 		struct fbnic_act_tcam *act_tcam;
290 
291 		act_tcam = &fbd->act_tcam[idx];
292 		if (act_tcam->state != FBNIC_TCAM_S_VALID)
293 			continue;
294 
295 		if (rule_locs) {
296 			if (cnt == cmd->rule_cnt)
297 				return -EMSGSIZE;
298 
299 			rule_locs[cnt] = i;
300 		}
301 
302 		cnt++;
303 	}
304 
305 	return cnt;
306 }
307 
308 static int fbnic_get_cls_rule(struct fbnic_net *fbn, struct ethtool_rxnfc *cmd)
309 {
310 	struct ethtool_rx_flow_spec *fsp;
311 	struct fbnic_dev *fbd = fbn->fbd;
312 	struct fbnic_act_tcam *act_tcam;
313 	int idx;
314 
315 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
316 
317 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
318 		return -EINVAL;
319 
320 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
321 	act_tcam = &fbd->act_tcam[idx];
322 
323 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
324 		return -EINVAL;
325 
326 	/* Report maximum rule count */
327 	cmd->data = FBNIC_RPC_ACT_TBL_NFC_ENTRIES;
328 
329 	/* Set flow type field */
330 	if (!(act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_VALID)) {
331 		fsp->flow_type = ETHER_FLOW;
332 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
333 			       act_tcam->mask.tcam[1])) {
334 			struct fbnic_mac_addr *mac_addr;
335 
336 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
337 					act_tcam->value.tcam[1]);
338 			mac_addr = &fbd->mac_addr[idx];
339 
340 			ether_addr_copy(fsp->h_u.ether_spec.h_dest,
341 					mac_addr->value.addr8);
342 			eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
343 		}
344 	} else if (act_tcam->value.tcam[1] &
345 		   FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID) {
346 		fsp->flow_type = IPV6_USER_FLOW;
347 		fsp->h_u.usr_ip6_spec.l4_proto = IPPROTO_IPV6;
348 		fsp->m_u.usr_ip6_spec.l4_proto = 0xff;
349 
350 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
351 			       act_tcam->mask.tcam[0])) {
352 			struct fbnic_ip_addr *ip_addr;
353 			int i;
354 
355 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
356 					act_tcam->value.tcam[0]);
357 			ip_addr = &fbd->ipo_src[idx];
358 
359 			for (i = 0; i < 4; i++) {
360 				fsp->h_u.usr_ip6_spec.ip6src[i] =
361 					ip_addr->value.s6_addr32[i];
362 				fsp->m_u.usr_ip6_spec.ip6src[i] =
363 					~ip_addr->mask.s6_addr32[i];
364 			}
365 		}
366 
367 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
368 			       act_tcam->mask.tcam[0])) {
369 			struct fbnic_ip_addr *ip_addr;
370 			int i;
371 
372 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
373 					act_tcam->value.tcam[0]);
374 			ip_addr = &fbd->ipo_dst[idx];
375 
376 			for (i = 0; i < 4; i++) {
377 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
378 					ip_addr->value.s6_addr32[i];
379 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
380 					~ip_addr->mask.s6_addr32[i];
381 			}
382 		}
383 	} else if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_IP_IS_V6)) {
384 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
385 			if (act_tcam->value.tcam[1] &
386 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
387 				fsp->flow_type = UDP_V6_FLOW;
388 			else
389 				fsp->flow_type = TCP_V6_FLOW;
390 			fsp->h_u.tcp_ip6_spec.psrc =
391 				cpu_to_be16(act_tcam->value.tcam[3]);
392 			fsp->m_u.tcp_ip6_spec.psrc =
393 				cpu_to_be16(~act_tcam->mask.tcam[3]);
394 			fsp->h_u.tcp_ip6_spec.pdst =
395 				cpu_to_be16(act_tcam->value.tcam[4]);
396 			fsp->m_u.tcp_ip6_spec.pdst =
397 				cpu_to_be16(~act_tcam->mask.tcam[4]);
398 		} else {
399 			fsp->flow_type = IPV6_USER_FLOW;
400 		}
401 
402 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
403 			       act_tcam->mask.tcam[0])) {
404 			struct fbnic_ip_addr *ip_addr;
405 			int i;
406 
407 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
408 					act_tcam->value.tcam[0]);
409 			ip_addr = &fbd->ip_src[idx];
410 
411 			for (i = 0; i < 4; i++) {
412 				fsp->h_u.usr_ip6_spec.ip6src[i] =
413 					ip_addr->value.s6_addr32[i];
414 				fsp->m_u.usr_ip6_spec.ip6src[i] =
415 					~ip_addr->mask.s6_addr32[i];
416 			}
417 		}
418 
419 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
420 			       act_tcam->mask.tcam[0])) {
421 			struct fbnic_ip_addr *ip_addr;
422 			int i;
423 
424 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
425 					act_tcam->value.tcam[0]);
426 			ip_addr = &fbd->ip_dst[idx];
427 
428 			for (i = 0; i < 4; i++) {
429 				fsp->h_u.usr_ip6_spec.ip6dst[i] =
430 					ip_addr->value.s6_addr32[i];
431 				fsp->m_u.usr_ip6_spec.ip6dst[i] =
432 					~ip_addr->mask.s6_addr32[i];
433 			}
434 		}
435 	} else {
436 		if (act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L4_VALID) {
437 			if (act_tcam->value.tcam[1] &
438 			    FBNIC_RPC_TCAM_ACT1_L4_IS_UDP)
439 				fsp->flow_type = UDP_V4_FLOW;
440 			else
441 				fsp->flow_type = TCP_V4_FLOW;
442 			fsp->h_u.tcp_ip4_spec.psrc =
443 				cpu_to_be16(act_tcam->value.tcam[3]);
444 			fsp->m_u.tcp_ip4_spec.psrc =
445 				cpu_to_be16(~act_tcam->mask.tcam[3]);
446 			fsp->h_u.tcp_ip4_spec.pdst =
447 				cpu_to_be16(act_tcam->value.tcam[4]);
448 			fsp->m_u.tcp_ip4_spec.pdst =
449 				cpu_to_be16(~act_tcam->mask.tcam[4]);
450 		} else {
451 			fsp->flow_type = IPV4_USER_FLOW;
452 			fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
453 		}
454 
455 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
456 			       act_tcam->mask.tcam[0])) {
457 			struct fbnic_ip_addr *ip_addr;
458 
459 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
460 					act_tcam->value.tcam[0]);
461 			ip_addr = &fbd->ip_src[idx];
462 
463 			fsp->h_u.usr_ip4_spec.ip4src =
464 				ip_addr->value.s6_addr32[3];
465 			fsp->m_u.usr_ip4_spec.ip4src =
466 				~ip_addr->mask.s6_addr32[3];
467 		}
468 
469 		if (!FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
470 			       act_tcam->mask.tcam[0])) {
471 			struct fbnic_ip_addr *ip_addr;
472 
473 			idx = FIELD_GET(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
474 					act_tcam->value.tcam[0]);
475 			ip_addr = &fbd->ip_dst[idx];
476 
477 			fsp->h_u.usr_ip4_spec.ip4dst =
478 				ip_addr->value.s6_addr32[3];
479 			fsp->m_u.usr_ip4_spec.ip4dst =
480 				~ip_addr->mask.s6_addr32[3];
481 		}
482 	}
483 
484 	/* Record action */
485 	if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_DROP)
486 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
487 	else if (act_tcam->dest & FBNIC_RPC_ACT_TBL0_Q_SEL)
488 		fsp->ring_cookie = FIELD_GET(FBNIC_RPC_ACT_TBL0_Q_ID,
489 					     act_tcam->dest);
490 	else
491 		fsp->flow_type |= FLOW_RSS;
492 
493 	cmd->rss_context = FIELD_GET(FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID,
494 				     act_tcam->dest);
495 
496 	return 0;
497 }
498 
499 static int fbnic_get_rxnfc(struct net_device *netdev,
500 			   struct ethtool_rxnfc *cmd, u32 *rule_locs)
501 {
502 	struct fbnic_net *fbn = netdev_priv(netdev);
503 	int ret = -EOPNOTSUPP;
504 	u32 special = 0;
505 
506 	switch (cmd->cmd) {
507 	case ETHTOOL_GRXRINGS:
508 		cmd->data = fbn->num_rx_queues;
509 		ret = 0;
510 		break;
511 	case ETHTOOL_GRXFH:
512 		ret = fbnic_get_rss_hash_opts(fbn, cmd);
513 		break;
514 	case ETHTOOL_GRXCLSRULE:
515 		ret = fbnic_get_cls_rule(fbn, cmd);
516 		break;
517 	case ETHTOOL_GRXCLSRLCNT:
518 		rule_locs = NULL;
519 		special = RX_CLS_LOC_SPECIAL;
520 		fallthrough;
521 	case ETHTOOL_GRXCLSRLALL:
522 		ret = fbnic_get_cls_rule_all(fbn, cmd, rule_locs);
523 		if (ret < 0)
524 			break;
525 
526 		cmd->data |= special;
527 		cmd->rule_cnt = ret;
528 		ret = 0;
529 		break;
530 	}
531 
532 	return ret;
533 }
534 
535 #define FBNIC_L2_HASH_OPTIONS \
536 	(RXH_L2DA | RXH_DISCARD)
537 #define FBNIC_L3_HASH_OPTIONS \
538 	(FBNIC_L2_HASH_OPTIONS | RXH_IP_SRC | RXH_IP_DST)
539 #define FBNIC_L4_HASH_OPTIONS \
540 	(FBNIC_L3_HASH_OPTIONS | RXH_L4_B_0_1 | RXH_L4_B_2_3)
541 
542 static int
543 fbnic_set_rss_hash_opts(struct fbnic_net *fbn, const struct ethtool_rxnfc *cmd)
544 {
545 	int hash_opt_idx;
546 
547 	/* Verify the type requested is correct */
548 	hash_opt_idx = fbnic_get_rss_hash_idx(cmd->flow_type);
549 	if (hash_opt_idx < 0)
550 		return -EINVAL;
551 
552 	/* Verify the fields asked for can actually be assigned based on type */
553 	if (cmd->data & ~FBNIC_L4_HASH_OPTIONS ||
554 	    (hash_opt_idx > FBNIC_L4_HASH_OPT &&
555 	     cmd->data & ~FBNIC_L3_HASH_OPTIONS) ||
556 	    (hash_opt_idx > FBNIC_IP_HASH_OPT &&
557 	     cmd->data & ~FBNIC_L2_HASH_OPTIONS))
558 		return -EINVAL;
559 
560 	fbn->rss_flow_hash[hash_opt_idx] = cmd->data;
561 
562 	if (netif_running(fbn->netdev)) {
563 		fbnic_rss_reinit(fbn->fbd, fbn);
564 		fbnic_write_rules(fbn->fbd);
565 	}
566 
567 	return 0;
568 }
569 
570 static int fbnic_cls_rule_any_loc(struct fbnic_dev *fbd)
571 {
572 	int i;
573 
574 	for (i = FBNIC_RPC_ACT_TBL_NFC_ENTRIES; i--;) {
575 		int idx = i + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
576 
577 		if (fbd->act_tcam[idx].state != FBNIC_TCAM_S_VALID)
578 			return i;
579 	}
580 
581 	return -ENOSPC;
582 }
583 
584 static int fbnic_set_cls_rule_ins(struct fbnic_net *fbn,
585 				  const struct ethtool_rxnfc *cmd)
586 {
587 	u16 flow_value = 0, flow_mask = 0xffff, ip_value = 0, ip_mask = 0xffff;
588 	u16 sport = 0, sport_mask = ~0, dport = 0, dport_mask = ~0;
589 	u16 misc = 0, misc_mask = ~0;
590 	u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
591 			      FBNIC_RPC_ACT_TBL0_DEST_HOST);
592 	struct fbnic_ip_addr *ip_src = NULL, *ip_dst = NULL;
593 	struct fbnic_mac_addr *mac_addr = NULL;
594 	struct ethtool_rx_flow_spec *fsp;
595 	struct fbnic_dev *fbd = fbn->fbd;
596 	struct fbnic_act_tcam *act_tcam;
597 	struct in6_addr *addr6, *mask6;
598 	struct in_addr *addr4, *mask4;
599 	int hash_idx, location;
600 	u32 flow_type;
601 	int idx, j;
602 
603 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
604 
605 	if (fsp->location != RX_CLS_LOC_ANY)
606 		return -EINVAL;
607 	location = fbnic_cls_rule_any_loc(fbd);
608 	if (location < 0)
609 		return location;
610 
611 	if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
612 		dest = FBNIC_RPC_ACT_TBL0_DROP;
613 	} else if (fsp->flow_type & FLOW_RSS) {
614 		if (cmd->rss_context == 1)
615 			dest |= FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID;
616 	} else {
617 		u32 ring_idx = ethtool_get_flow_spec_ring(fsp->ring_cookie);
618 
619 		if (ring_idx >= fbn->num_rx_queues)
620 			return -EINVAL;
621 
622 		dest |= FBNIC_RPC_ACT_TBL0_Q_SEL |
623 			FIELD_PREP(FBNIC_RPC_ACT_TBL0_Q_ID, ring_idx);
624 	}
625 
626 	idx = location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
627 	act_tcam = &fbd->act_tcam[idx];
628 
629 	/* Do not allow overwriting for now.
630 	 * To support overwriting rules we will need to add logic to free
631 	 * any IP or MACDA TCAMs that may be associated with the old rule.
632 	 */
633 	if (act_tcam->state != FBNIC_TCAM_S_DISABLED)
634 		return -EBUSY;
635 
636 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_RSS);
637 	hash_idx = fbnic_get_rss_hash_idx(flow_type);
638 
639 	switch (flow_type) {
640 	case UDP_V4_FLOW:
641 udp4_flow:
642 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
643 		fallthrough;
644 	case TCP_V4_FLOW:
645 tcp4_flow:
646 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
647 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
648 			       FBNIC_RPC_TCAM_ACT1_L4_VALID);
649 
650 		sport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.psrc);
651 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.psrc);
652 		dport = be16_to_cpu(fsp->h_u.tcp_ip4_spec.pdst);
653 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip4_spec.pdst);
654 		goto ip4_flow;
655 	case IP_USER_FLOW:
656 		if (!fsp->m_u.usr_ip4_spec.proto)
657 			goto ip4_flow;
658 		if (fsp->m_u.usr_ip4_spec.proto != 0xff)
659 			return -EINVAL;
660 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_UDP)
661 			goto udp4_flow;
662 		if (fsp->h_u.usr_ip4_spec.proto == IPPROTO_TCP)
663 			goto tcp4_flow;
664 		return -EINVAL;
665 ip4_flow:
666 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4src;
667 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4src;
668 		if (mask4->s_addr) {
669 			ip_src = __fbnic_ip4_sync(fbd, fbd->ip_src,
670 						  addr4, mask4);
671 			if (!ip_src)
672 				return -ENOSPC;
673 
674 			set_bit(idx, ip_src->act_tcam);
675 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
676 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
677 					       ip_src - fbd->ip_src);
678 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
679 				     FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
680 		}
681 
682 		addr4 = (struct in_addr *)&fsp->h_u.usr_ip4_spec.ip4dst;
683 		mask4 = (struct in_addr *)&fsp->m_u.usr_ip4_spec.ip4dst;
684 		if (mask4->s_addr) {
685 			ip_dst = __fbnic_ip4_sync(fbd, fbd->ip_dst,
686 						  addr4, mask4);
687 			if (!ip_dst) {
688 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
689 					memset(ip_src, 0, sizeof(*ip_src));
690 				return -ENOSPC;
691 			}
692 
693 			set_bit(idx, ip_dst->act_tcam);
694 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
695 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
696 					       ip_dst - fbd->ip_dst);
697 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
698 				     FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
699 		}
700 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_VALID |
701 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
702 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
703 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
704 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
705 		break;
706 	case UDP_V6_FLOW:
707 udp6_flow:
708 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_IS_UDP;
709 		fallthrough;
710 	case TCP_V6_FLOW:
711 tcp6_flow:
712 		flow_value |= FBNIC_RPC_TCAM_ACT1_L4_VALID;
713 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_L4_IS_UDP |
714 			  FBNIC_RPC_TCAM_ACT1_L4_VALID);
715 
716 		sport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.psrc);
717 		sport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.psrc);
718 		dport = be16_to_cpu(fsp->h_u.tcp_ip6_spec.pdst);
719 		dport_mask = ~be16_to_cpu(fsp->m_u.tcp_ip6_spec.pdst);
720 		goto ipv6_flow;
721 	case IPV6_USER_FLOW:
722 		if (!fsp->m_u.usr_ip6_spec.l4_proto)
723 			goto ipv6_flow;
724 
725 		if (fsp->m_u.usr_ip6_spec.l4_proto != 0xff)
726 			return -EINVAL;
727 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_UDP)
728 			goto udp6_flow;
729 		if (fsp->h_u.usr_ip6_spec.l4_proto == IPPROTO_TCP)
730 			goto tcp6_flow;
731 		if (fsp->h_u.usr_ip6_spec.l4_proto != IPPROTO_IPV6)
732 			return -EINVAL;
733 
734 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
735 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
736 		if (!ipv6_addr_any(mask6)) {
737 			ip_src = __fbnic_ip6_sync(fbd, fbd->ipo_src,
738 						  addr6, mask6);
739 			if (!ip_src)
740 				return -ENOSPC;
741 
742 			set_bit(idx, ip_src->act_tcam);
743 			ip_value |=
744 				FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
745 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX,
746 					   ip_src - fbd->ipo_src);
747 			ip_mask &=
748 				~(FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
749 				  FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX);
750 		}
751 
752 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
753 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
754 		if (!ipv6_addr_any(mask6)) {
755 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ipo_dst,
756 						  addr6, mask6);
757 			if (!ip_dst) {
758 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
759 					memset(ip_src, 0, sizeof(*ip_src));
760 				return -ENOSPC;
761 			}
762 
763 			set_bit(idx, ip_dst->act_tcam);
764 			ip_value |=
765 				FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
766 				FIELD_PREP(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX,
767 					   ip_dst - fbd->ipo_dst);
768 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID |
769 				     FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX);
770 		}
771 
772 		flow_value |= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
773 		flow_mask &= FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID;
774 ipv6_flow:
775 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6src;
776 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6src;
777 		if (!ip_src && !ipv6_addr_any(mask6)) {
778 			ip_src = __fbnic_ip6_sync(fbd, fbd->ip_src,
779 						  addr6, mask6);
780 			if (!ip_src)
781 				return -ENOSPC;
782 
783 			set_bit(idx, ip_src->act_tcam);
784 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
785 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPSRC_IDX,
786 					       ip_src - fbd->ip_src);
787 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
788 				       FBNIC_RPC_TCAM_ACT0_IPSRC_IDX);
789 		}
790 
791 		addr6 = (struct in6_addr *)fsp->h_u.usr_ip6_spec.ip6dst;
792 		mask6 = (struct in6_addr *)fsp->m_u.usr_ip6_spec.ip6dst;
793 		if (!ip_dst && !ipv6_addr_any(mask6)) {
794 			ip_dst = __fbnic_ip6_sync(fbd, fbd->ip_dst,
795 						  addr6, mask6);
796 			if (!ip_dst) {
797 				if (ip_src && ip_src->state == FBNIC_TCAM_S_ADD)
798 					memset(ip_src, 0, sizeof(*ip_src));
799 				return -ENOSPC;
800 			}
801 
802 			set_bit(idx, ip_dst->act_tcam);
803 			ip_value |= FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
804 				    FIELD_PREP(FBNIC_RPC_TCAM_ACT0_IPDST_IDX,
805 					       ip_dst - fbd->ip_dst);
806 			ip_mask &= ~(FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
807 				       FBNIC_RPC_TCAM_ACT0_IPDST_IDX);
808 		}
809 
810 		flow_value |= FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
811 			      FBNIC_RPC_TCAM_ACT1_IP_VALID |
812 			      FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
813 		flow_mask &= ~(FBNIC_RPC_TCAM_ACT1_IP_IS_V6 |
814 			       FBNIC_RPC_TCAM_ACT1_IP_VALID |
815 			       FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID);
816 		break;
817 	case ETHER_FLOW:
818 		if (!is_zero_ether_addr(fsp->m_u.ether_spec.h_dest)) {
819 			u8 *addr = fsp->h_u.ether_spec.h_dest;
820 			u8 *mask = fsp->m_u.ether_spec.h_dest;
821 
822 			/* Do not allow MAC addr of 0 */
823 			if (is_zero_ether_addr(addr))
824 				return -EINVAL;
825 
826 			/* Only support full MAC address to avoid
827 			 * conflicts with other MAC addresses.
828 			 */
829 			if (!is_broadcast_ether_addr(mask))
830 				return -EINVAL;
831 
832 			if (is_multicast_ether_addr(addr))
833 				mac_addr = __fbnic_mc_sync(fbd, addr);
834 			else
835 				mac_addr = __fbnic_uc_sync(fbd, addr);
836 
837 			if (!mac_addr)
838 				return -ENOSPC;
839 
840 			set_bit(idx, mac_addr->act_tcam);
841 			flow_value |=
842 				FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
843 					   mac_addr - fbd->mac_addr);
844 			flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX;
845 		}
846 
847 		flow_value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
848 		flow_mask &= ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
849 		break;
850 	default:
851 		return -EINVAL;
852 	}
853 
854 	/* Write action table values */
855 	act_tcam->dest = dest;
856 	act_tcam->rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, hash_idx);
857 
858 	/* Write IP Match value/mask to action_tcam[0] */
859 	act_tcam->value.tcam[0] = ip_value;
860 	act_tcam->mask.tcam[0] = ip_mask;
861 
862 	/* Write flow type value/mask to action_tcam[1] */
863 	act_tcam->value.tcam[1] = flow_value;
864 	act_tcam->mask.tcam[1] = flow_mask;
865 
866 	/* Write error, DSCP, extra L4 matches to action_tcam[2] */
867 	act_tcam->value.tcam[2] = misc;
868 	act_tcam->mask.tcam[2] = misc_mask;
869 
870 	/* Write source/destination port values */
871 	act_tcam->value.tcam[3] = sport;
872 	act_tcam->mask.tcam[3] = sport_mask;
873 	act_tcam->value.tcam[4] = dport;
874 	act_tcam->mask.tcam[4] = dport_mask;
875 
876 	for (j = 5; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
877 		act_tcam->mask.tcam[j] = 0xffff;
878 
879 	act_tcam->state = FBNIC_TCAM_S_UPDATE;
880 	fsp->location = location;
881 
882 	if (netif_running(fbn->netdev)) {
883 		fbnic_write_rules(fbd);
884 		if (ip_src || ip_dst)
885 			fbnic_write_ip_addr(fbd);
886 		if (mac_addr)
887 			fbnic_write_macda(fbd);
888 	}
889 
890 	return 0;
891 }
892 
893 static void fbnic_clear_nfc_macda(struct fbnic_net *fbn,
894 				  unsigned int tcam_idx)
895 {
896 	struct fbnic_dev *fbd = fbn->fbd;
897 	int idx;
898 
899 	for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;)
900 		__fbnic_xc_unsync(&fbd->mac_addr[idx], tcam_idx);
901 
902 	/* Write updates to hardware */
903 	if (netif_running(fbn->netdev))
904 		fbnic_write_macda(fbd);
905 }
906 
907 static void fbnic_clear_nfc_ip_addr(struct fbnic_net *fbn,
908 				    unsigned int tcam_idx)
909 {
910 	struct fbnic_dev *fbd = fbn->fbd;
911 	int idx;
912 
913 	for (idx = ARRAY_SIZE(fbd->ip_src); idx--;)
914 		__fbnic_ip_unsync(&fbd->ip_src[idx], tcam_idx);
915 	for (idx = ARRAY_SIZE(fbd->ip_dst); idx--;)
916 		__fbnic_ip_unsync(&fbd->ip_dst[idx], tcam_idx);
917 	for (idx = ARRAY_SIZE(fbd->ipo_src); idx--;)
918 		__fbnic_ip_unsync(&fbd->ipo_src[idx], tcam_idx);
919 	for (idx = ARRAY_SIZE(fbd->ipo_dst); idx--;)
920 		__fbnic_ip_unsync(&fbd->ipo_dst[idx], tcam_idx);
921 
922 	/* Write updates to hardware */
923 	if (netif_running(fbn->netdev))
924 		fbnic_write_ip_addr(fbd);
925 }
926 
927 static int fbnic_set_cls_rule_del(struct fbnic_net *fbn,
928 				  const struct ethtool_rxnfc *cmd)
929 {
930 	struct ethtool_rx_flow_spec *fsp;
931 	struct fbnic_dev *fbd = fbn->fbd;
932 	struct fbnic_act_tcam *act_tcam;
933 	int idx;
934 
935 	fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
936 
937 	if (fsp->location >= FBNIC_RPC_ACT_TBL_NFC_ENTRIES)
938 		return -EINVAL;
939 
940 	idx = fsp->location + FBNIC_RPC_ACT_TBL_NFC_OFFSET;
941 	act_tcam = &fbd->act_tcam[idx];
942 
943 	if (act_tcam->state != FBNIC_TCAM_S_VALID)
944 		return -EINVAL;
945 
946 	act_tcam->state = FBNIC_TCAM_S_DELETE;
947 
948 	if ((act_tcam->value.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID) &&
949 	    (~act_tcam->mask.tcam[1] & FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX))
950 		fbnic_clear_nfc_macda(fbn, idx);
951 
952 	if ((act_tcam->value.tcam[0] &
953 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_VALID |
954 	      FBNIC_RPC_TCAM_ACT0_IPDST_VALID |
955 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID |
956 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID)) &&
957 	    (~act_tcam->mask.tcam[0] &
958 	     (FBNIC_RPC_TCAM_ACT0_IPSRC_IDX |
959 	      FBNIC_RPC_TCAM_ACT0_IPDST_IDX |
960 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX |
961 	      FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX)))
962 		fbnic_clear_nfc_ip_addr(fbn, idx);
963 
964 	if (netif_running(fbn->netdev))
965 		fbnic_write_rules(fbd);
966 
967 	return 0;
968 }
969 
970 static int fbnic_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
971 {
972 	struct fbnic_net *fbn = netdev_priv(netdev);
973 	int ret = -EOPNOTSUPP;
974 
975 	switch (cmd->cmd) {
976 	case ETHTOOL_SRXFH:
977 		ret = fbnic_set_rss_hash_opts(fbn, cmd);
978 		break;
979 	case ETHTOOL_SRXCLSRLINS:
980 		ret = fbnic_set_cls_rule_ins(fbn, cmd);
981 		break;
982 	case ETHTOOL_SRXCLSRLDEL:
983 		ret = fbnic_set_cls_rule_del(fbn, cmd);
984 		break;
985 	}
986 
987 	return ret;
988 }
989 
990 static u32 fbnic_get_rxfh_key_size(struct net_device *netdev)
991 {
992 	return FBNIC_RPC_RSS_KEY_BYTE_LEN;
993 }
994 
995 static u32 fbnic_get_rxfh_indir_size(struct net_device *netdev)
996 {
997 	return FBNIC_RPC_RSS_TBL_SIZE;
998 }
999 
1000 static int
1001 fbnic_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
1002 {
1003 	struct fbnic_net *fbn = netdev_priv(netdev);
1004 	unsigned int i;
1005 
1006 	rxfh->hfunc = ETH_RSS_HASH_TOP;
1007 
1008 	if (rxfh->key) {
1009 		for (i = 0; i < FBNIC_RPC_RSS_KEY_BYTE_LEN; i++) {
1010 			u32 rss_key = fbn->rss_key[i / 4] << ((i % 4) * 8);
1011 
1012 			rxfh->key[i] = rss_key >> 24;
1013 		}
1014 	}
1015 
1016 	if (rxfh->indir) {
1017 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1018 			rxfh->indir[i] = fbn->indir_tbl[0][i];
1019 	}
1020 
1021 	return 0;
1022 }
1023 
1024 static unsigned int
1025 fbnic_set_indir(struct fbnic_net *fbn, unsigned int idx, const u32 *indir)
1026 {
1027 	unsigned int i, changes = 0;
1028 
1029 	for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
1030 		if (fbn->indir_tbl[idx][i] == indir[i])
1031 			continue;
1032 
1033 		fbn->indir_tbl[idx][i] = indir[i];
1034 		changes++;
1035 	}
1036 
1037 	return changes;
1038 }
1039 
1040 static int
1041 fbnic_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
1042 	       struct netlink_ext_ack *extack)
1043 {
1044 	struct fbnic_net *fbn = netdev_priv(netdev);
1045 	unsigned int i, changes = 0;
1046 
1047 	if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
1048 	    rxfh->hfunc != ETH_RSS_HASH_TOP)
1049 		return -EINVAL;
1050 
1051 	if (rxfh->key) {
1052 		u32 rss_key = 0;
1053 
1054 		for (i = FBNIC_RPC_RSS_KEY_BYTE_LEN; i--;) {
1055 			rss_key >>= 8;
1056 			rss_key |= (u32)(rxfh->key[i]) << 24;
1057 
1058 			if (i % 4)
1059 				continue;
1060 
1061 			if (fbn->rss_key[i / 4] == rss_key)
1062 				continue;
1063 
1064 			fbn->rss_key[i / 4] = rss_key;
1065 			changes++;
1066 		}
1067 	}
1068 
1069 	if (rxfh->indir)
1070 		changes += fbnic_set_indir(fbn, 0, rxfh->indir);
1071 
1072 	if (changes && netif_running(netdev))
1073 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1074 
1075 	return 0;
1076 }
1077 
1078 static int
1079 fbnic_modify_rxfh_context(struct net_device *netdev,
1080 			  struct ethtool_rxfh_context *ctx,
1081 			  const struct ethtool_rxfh_param *rxfh,
1082 			  struct netlink_ext_ack *extack)
1083 {
1084 	struct fbnic_net *fbn = netdev_priv(netdev);
1085 	const u32 *indir = rxfh->indir;
1086 	unsigned int changes;
1087 
1088 	if (!indir)
1089 		indir = ethtool_rxfh_context_indir(ctx);
1090 
1091 	changes = fbnic_set_indir(fbn, rxfh->rss_context, indir);
1092 	if (changes && netif_running(netdev))
1093 		fbnic_rss_reinit_hw(fbn->fbd, fbn);
1094 
1095 	return 0;
1096 }
1097 
1098 static int
1099 fbnic_create_rxfh_context(struct net_device *netdev,
1100 			  struct ethtool_rxfh_context *ctx,
1101 			  const struct ethtool_rxfh_param *rxfh,
1102 			  struct netlink_ext_ack *extack)
1103 {
1104 	struct fbnic_net *fbn = netdev_priv(netdev);
1105 
1106 	if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
1107 		NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
1108 		return -EOPNOTSUPP;
1109 	}
1110 	ctx->hfunc = ETH_RSS_HASH_TOP;
1111 
1112 	if (!rxfh->indir) {
1113 		u32 *indir = ethtool_rxfh_context_indir(ctx);
1114 		unsigned int num_rx = fbn->num_rx_queues;
1115 		unsigned int i;
1116 
1117 		for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++)
1118 			indir[i] = ethtool_rxfh_indir_default(i, num_rx);
1119 	}
1120 
1121 	return fbnic_modify_rxfh_context(netdev, ctx, rxfh, extack);
1122 }
1123 
1124 static int
1125 fbnic_remove_rxfh_context(struct net_device *netdev,
1126 			  struct ethtool_rxfh_context *ctx, u32 rss_context,
1127 			  struct netlink_ext_ack *extack)
1128 {
1129 	/* Nothing to do, contexts are allocated statically */
1130 	return 0;
1131 }
1132 
1133 static void fbnic_get_channels(struct net_device *netdev,
1134 			       struct ethtool_channels *ch)
1135 {
1136 	struct fbnic_net *fbn = netdev_priv(netdev);
1137 	struct fbnic_dev *fbd = fbn->fbd;
1138 
1139 	ch->max_rx = fbd->max_num_queues;
1140 	ch->max_tx = fbd->max_num_queues;
1141 	ch->max_combined = min(ch->max_rx, ch->max_tx);
1142 	ch->max_other =	FBNIC_NON_NAPI_VECTORS;
1143 
1144 	if (fbn->num_rx_queues > fbn->num_napi ||
1145 	    fbn->num_tx_queues > fbn->num_napi)
1146 		ch->combined_count = min(fbn->num_rx_queues,
1147 					 fbn->num_tx_queues);
1148 	else
1149 		ch->combined_count =
1150 			fbn->num_rx_queues + fbn->num_tx_queues - fbn->num_napi;
1151 	ch->rx_count = fbn->num_rx_queues - ch->combined_count;
1152 	ch->tx_count = fbn->num_tx_queues - ch->combined_count;
1153 	ch->other_count = FBNIC_NON_NAPI_VECTORS;
1154 }
1155 
1156 static void fbnic_set_queues(struct fbnic_net *fbn, struct ethtool_channels *ch,
1157 			     unsigned int max_napis)
1158 {
1159 	fbn->num_rx_queues = ch->rx_count + ch->combined_count;
1160 	fbn->num_tx_queues = ch->tx_count + ch->combined_count;
1161 	fbn->num_napi = min(ch->rx_count + ch->tx_count + ch->combined_count,
1162 			    max_napis);
1163 }
1164 
1165 static int fbnic_set_channels(struct net_device *netdev,
1166 			      struct ethtool_channels *ch)
1167 {
1168 	struct fbnic_net *fbn = netdev_priv(netdev);
1169 	unsigned int max_napis, standalone;
1170 	struct fbnic_dev *fbd = fbn->fbd;
1171 	struct fbnic_net *clone;
1172 	int err;
1173 
1174 	max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
1175 	standalone = ch->rx_count + ch->tx_count;
1176 
1177 	/* Limits for standalone queues:
1178 	 *  - each queue has it's own NAPI (num_napi >= rx + tx + combined)
1179 	 *  - combining queues (combined not 0, rx or tx must be 0)
1180 	 */
1181 	if ((ch->rx_count && ch->tx_count && ch->combined_count) ||
1182 	    (standalone && standalone + ch->combined_count > max_napis) ||
1183 	    ch->rx_count + ch->combined_count > fbd->max_num_queues ||
1184 	    ch->tx_count + ch->combined_count > fbd->max_num_queues ||
1185 	    ch->other_count != FBNIC_NON_NAPI_VECTORS)
1186 		return -EINVAL;
1187 
1188 	if (!netif_running(netdev)) {
1189 		fbnic_set_queues(fbn, ch, max_napis);
1190 		fbnic_reset_indir_tbl(fbn);
1191 		return 0;
1192 	}
1193 
1194 	clone = fbnic_clone_create(fbn);
1195 	if (!clone)
1196 		return -ENOMEM;
1197 
1198 	fbnic_set_queues(clone, ch, max_napis);
1199 
1200 	err = fbnic_alloc_napi_vectors(clone);
1201 	if (err)
1202 		goto err_free_clone;
1203 
1204 	err = fbnic_alloc_resources(clone);
1205 	if (err)
1206 		goto err_free_napis;
1207 
1208 	fbnic_down_noidle(fbn);
1209 	err = fbnic_wait_all_queues_idle(fbn->fbd, true);
1210 	if (err)
1211 		goto err_start_stack;
1212 
1213 	err = fbnic_set_netif_queues(clone);
1214 	if (err)
1215 		goto err_start_stack;
1216 
1217 	/* Nothing can fail past this point */
1218 	fbnic_flush(fbn);
1219 
1220 	fbnic_clone_swap(fbn, clone);
1221 
1222 	/* Reset RSS indirection table */
1223 	fbnic_reset_indir_tbl(fbn);
1224 
1225 	fbnic_up(fbn);
1226 
1227 	fbnic_free_resources(clone);
1228 	fbnic_free_napi_vectors(clone);
1229 	fbnic_clone_free(clone);
1230 
1231 	return 0;
1232 
1233 err_start_stack:
1234 	fbnic_flush(fbn);
1235 	fbnic_up(fbn);
1236 	fbnic_free_resources(clone);
1237 err_free_napis:
1238 	fbnic_free_napi_vectors(clone);
1239 err_free_clone:
1240 	fbnic_clone_free(clone);
1241 	return err;
1242 }
1243 
1244 static int
1245 fbnic_get_ts_info(struct net_device *netdev,
1246 		  struct kernel_ethtool_ts_info *tsinfo)
1247 {
1248 	struct fbnic_net *fbn = netdev_priv(netdev);
1249 
1250 	tsinfo->phc_index = ptp_clock_index(fbn->fbd->ptp);
1251 
1252 	tsinfo->so_timestamping =
1253 		SOF_TIMESTAMPING_TX_SOFTWARE |
1254 		SOF_TIMESTAMPING_TX_HARDWARE |
1255 		SOF_TIMESTAMPING_RX_HARDWARE |
1256 		SOF_TIMESTAMPING_RAW_HARDWARE;
1257 
1258 	tsinfo->tx_types =
1259 		BIT(HWTSTAMP_TX_OFF) |
1260 		BIT(HWTSTAMP_TX_ON);
1261 
1262 	tsinfo->rx_filters =
1263 		BIT(HWTSTAMP_FILTER_NONE) |
1264 		BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1265 		BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1266 		BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1267 		BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
1268 		BIT(HWTSTAMP_FILTER_ALL);
1269 
1270 	return 0;
1271 }
1272 
1273 static void fbnic_get_ts_stats(struct net_device *netdev,
1274 			       struct ethtool_ts_stats *ts_stats)
1275 {
1276 	struct fbnic_net *fbn = netdev_priv(netdev);
1277 	u64 ts_packets, ts_lost;
1278 	struct fbnic_ring *ring;
1279 	unsigned int start;
1280 	int i;
1281 
1282 	ts_stats->pkts = fbn->tx_stats.twq.ts_packets;
1283 	ts_stats->lost = fbn->tx_stats.twq.ts_lost;
1284 	for (i = 0; i < fbn->num_tx_queues; i++) {
1285 		ring = fbn->tx[i];
1286 		do {
1287 			start = u64_stats_fetch_begin(&ring->stats.syncp);
1288 			ts_packets = ring->stats.twq.ts_packets;
1289 			ts_lost = ring->stats.twq.ts_lost;
1290 		} while (u64_stats_fetch_retry(&ring->stats.syncp, start));
1291 		ts_stats->pkts += ts_packets;
1292 		ts_stats->lost += ts_lost;
1293 	}
1294 }
1295 
1296 static void fbnic_set_counter(u64 *stat, struct fbnic_stat_counter *counter)
1297 {
1298 	if (counter->reported)
1299 		*stat = counter->value;
1300 }
1301 
1302 static void
1303 fbnic_get_eth_mac_stats(struct net_device *netdev,
1304 			struct ethtool_eth_mac_stats *eth_mac_stats)
1305 {
1306 	struct fbnic_net *fbn = netdev_priv(netdev);
1307 	struct fbnic_mac_stats *mac_stats;
1308 	struct fbnic_dev *fbd = fbn->fbd;
1309 	const struct fbnic_mac *mac;
1310 
1311 	mac_stats = &fbd->hw_stats.mac;
1312 	mac = fbd->mac;
1313 
1314 	mac->get_eth_mac_stats(fbd, false, &mac_stats->eth_mac);
1315 
1316 	fbnic_set_counter(&eth_mac_stats->FramesTransmittedOK,
1317 			  &mac_stats->eth_mac.FramesTransmittedOK);
1318 	fbnic_set_counter(&eth_mac_stats->FramesReceivedOK,
1319 			  &mac_stats->eth_mac.FramesReceivedOK);
1320 	fbnic_set_counter(&eth_mac_stats->FrameCheckSequenceErrors,
1321 			  &mac_stats->eth_mac.FrameCheckSequenceErrors);
1322 	fbnic_set_counter(&eth_mac_stats->AlignmentErrors,
1323 			  &mac_stats->eth_mac.AlignmentErrors);
1324 	fbnic_set_counter(&eth_mac_stats->OctetsTransmittedOK,
1325 			  &mac_stats->eth_mac.OctetsTransmittedOK);
1326 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACXmitError,
1327 			  &mac_stats->eth_mac.FramesLostDueToIntMACXmitError);
1328 	fbnic_set_counter(&eth_mac_stats->OctetsReceivedOK,
1329 			  &mac_stats->eth_mac.OctetsReceivedOK);
1330 	fbnic_set_counter(&eth_mac_stats->FramesLostDueToIntMACRcvError,
1331 			  &mac_stats->eth_mac.FramesLostDueToIntMACRcvError);
1332 	fbnic_set_counter(&eth_mac_stats->MulticastFramesXmittedOK,
1333 			  &mac_stats->eth_mac.MulticastFramesXmittedOK);
1334 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesXmittedOK,
1335 			  &mac_stats->eth_mac.BroadcastFramesXmittedOK);
1336 	fbnic_set_counter(&eth_mac_stats->MulticastFramesReceivedOK,
1337 			  &mac_stats->eth_mac.MulticastFramesReceivedOK);
1338 	fbnic_set_counter(&eth_mac_stats->BroadcastFramesReceivedOK,
1339 			  &mac_stats->eth_mac.BroadcastFramesReceivedOK);
1340 	fbnic_set_counter(&eth_mac_stats->FrameTooLongErrors,
1341 			  &mac_stats->eth_mac.FrameTooLongErrors);
1342 }
1343 
1344 static const struct ethtool_ops fbnic_ethtool_ops = {
1345 	.supported_coalesce_params	=
1346 				  ETHTOOL_COALESCE_USECS |
1347 				  ETHTOOL_COALESCE_RX_MAX_FRAMES,
1348 	.rxfh_max_num_contexts	= FBNIC_RPC_RSS_TBL_COUNT,
1349 	.get_drvinfo		= fbnic_get_drvinfo,
1350 	.get_regs_len		= fbnic_get_regs_len,
1351 	.get_regs		= fbnic_get_regs,
1352 	.get_coalesce		= fbnic_get_coalesce,
1353 	.set_coalesce		= fbnic_set_coalesce,
1354 	.get_strings		= fbnic_get_strings,
1355 	.get_ethtool_stats	= fbnic_get_ethtool_stats,
1356 	.get_sset_count		= fbnic_get_sset_count,
1357 	.get_rxnfc		= fbnic_get_rxnfc,
1358 	.set_rxnfc		= fbnic_set_rxnfc,
1359 	.get_rxfh_key_size	= fbnic_get_rxfh_key_size,
1360 	.get_rxfh_indir_size	= fbnic_get_rxfh_indir_size,
1361 	.get_rxfh		= fbnic_get_rxfh,
1362 	.set_rxfh		= fbnic_set_rxfh,
1363 	.create_rxfh_context	= fbnic_create_rxfh_context,
1364 	.modify_rxfh_context	= fbnic_modify_rxfh_context,
1365 	.remove_rxfh_context	= fbnic_remove_rxfh_context,
1366 	.get_channels		= fbnic_get_channels,
1367 	.set_channels		= fbnic_set_channels,
1368 	.get_ts_info		= fbnic_get_ts_info,
1369 	.get_ts_stats		= fbnic_get_ts_stats,
1370 	.get_eth_mac_stats	= fbnic_get_eth_mac_stats,
1371 };
1372 
1373 void fbnic_set_ethtool_ops(struct net_device *dev)
1374 {
1375 	dev->ethtool_ops = &fbnic_ethtool_ops;
1376 }
1377