xref: /linux/drivers/net/ethernet/qlogic/qede/qede_filter.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <net/udp_tunnel.h>
10 #include <linux/bitops.h>
11 #include <linux/vmalloc.h>
12 
13 #include <linux/qed/qed_if.h>
14 #include "qede.h"
15 
16 #define QEDE_FILTER_PRINT_MAX_LEN	(64)
17 struct qede_arfs_tuple {
18 	union {
19 		__be32 src_ipv4;
20 		struct in6_addr src_ipv6;
21 	};
22 	union {
23 		__be32 dst_ipv4;
24 		struct in6_addr dst_ipv6;
25 	};
26 	__be16  src_port;
27 	__be16  dst_port;
28 	__be16  eth_proto;
29 	u8      ip_proto;
30 
31 	/* Describe filtering mode needed for this kind of filter */
32 	enum qed_filter_config_mode mode;
33 
34 	/* Used to compare new/old filters. Return true if IPs match */
35 	bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
36 
37 	/* Given an address into ethhdr build a header from tuple info */
38 	void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
39 
40 	/* Stringify the tuple for a print into the provided buffer */
41 	void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
42 };
43 
44 struct qede_arfs_fltr_node {
45 #define QEDE_FLTR_VALID	 0
46 	unsigned long state;
47 
48 	/* pointer to aRFS packet buffer */
49 	void *data;
50 
51 	/* dma map address of aRFS packet buffer */
52 	dma_addr_t mapping;
53 
54 	/* length of aRFS packet buffer */
55 	int buf_len;
56 
57 	/* tuples to hold from aRFS packet buffer */
58 	struct qede_arfs_tuple tuple;
59 
60 	u32 flow_id;
61 	u64 sw_id;
62 	u16 rxq_id;
63 	u16 next_rxq_id;
64 	u8 vfid;
65 	bool filter_op;
66 	bool used;
67 	u8 fw_rc;
68 	bool b_is_drop;
69 	struct hlist_node node;
70 };
71 
72 struct qede_arfs {
73 #define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
74 #define QEDE_ARFS_POLL_COUNT	100
75 #define QEDE_RFS_FLW_BITSHIFT	(4)
76 #define QEDE_RFS_FLW_MASK	((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
77 	struct hlist_head	arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
78 
79 	/* lock for filter list access */
80 	spinlock_t		arfs_list_lock;
81 	unsigned long		*arfs_fltr_bmap;
82 	int			filter_count;
83 
84 	/* Currently configured filtering mode */
85 	enum qed_filter_config_mode mode;
86 };
87 
qede_configure_arfs_fltr(struct qede_dev * edev,struct qede_arfs_fltr_node * n,u16 rxq_id,bool add_fltr)88 static void qede_configure_arfs_fltr(struct qede_dev *edev,
89 				     struct qede_arfs_fltr_node *n,
90 				     u16 rxq_id, bool add_fltr)
91 {
92 	const struct qed_eth_ops *op = edev->ops;
93 	struct qed_ntuple_filter_params params;
94 
95 	if (n->used)
96 		return;
97 
98 	memset(&params, 0, sizeof(params));
99 
100 	params.addr = n->mapping;
101 	params.length = n->buf_len;
102 	params.qid = rxq_id;
103 	params.b_is_add = add_fltr;
104 	params.b_is_drop = n->b_is_drop;
105 
106 	if (n->vfid) {
107 		params.b_is_vf = true;
108 		params.vf_id = n->vfid - 1;
109 	}
110 
111 	if (n->tuple.stringify) {
112 		char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
113 
114 		n->tuple.stringify(&n->tuple, tuple_buffer);
115 		DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
116 			   "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
117 			   add_fltr ? "Adding" : "Deleting",
118 			   n->sw_id, tuple_buffer, n->vfid, rxq_id);
119 	}
120 
121 	n->used = true;
122 	n->filter_op = add_fltr;
123 	op->ntuple_filter_config(edev->cdev, n, &params);
124 }
125 
126 static void
qede_free_arfs_filter(struct qede_dev * edev,struct qede_arfs_fltr_node * fltr)127 qede_free_arfs_filter(struct qede_dev *edev,  struct qede_arfs_fltr_node *fltr)
128 {
129 	kfree(fltr->data);
130 
131 	if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
132 		clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
133 
134 	kfree(fltr);
135 }
136 
137 static int
qede_enqueue_fltr_and_config_searcher(struct qede_dev * edev,struct qede_arfs_fltr_node * fltr,u16 bucket_idx)138 qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
139 				      struct qede_arfs_fltr_node *fltr,
140 				      u16 bucket_idx)
141 {
142 	fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
143 				       fltr->buf_len, DMA_TO_DEVICE);
144 	if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
145 		DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
146 		qede_free_arfs_filter(edev, fltr);
147 		return -ENOMEM;
148 	}
149 
150 	INIT_HLIST_NODE(&fltr->node);
151 	hlist_add_head(&fltr->node,
152 		       QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
153 
154 	edev->arfs->filter_count++;
155 	if (edev->arfs->filter_count == 1 &&
156 	    edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
157 		edev->ops->configure_arfs_searcher(edev->cdev,
158 						   fltr->tuple.mode);
159 		edev->arfs->mode = fltr->tuple.mode;
160 	}
161 
162 	return 0;
163 }
164 
165 static void
qede_dequeue_fltr_and_config_searcher(struct qede_dev * edev,struct qede_arfs_fltr_node * fltr)166 qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
167 				      struct qede_arfs_fltr_node *fltr)
168 {
169 	hlist_del(&fltr->node);
170 	dma_unmap_single(&edev->pdev->dev, fltr->mapping,
171 			 fltr->buf_len, DMA_TO_DEVICE);
172 
173 	qede_free_arfs_filter(edev, fltr);
174 
175 	edev->arfs->filter_count--;
176 	if (!edev->arfs->filter_count &&
177 	    edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
178 		enum qed_filter_config_mode mode;
179 
180 		mode = QED_FILTER_CONFIG_MODE_DISABLE;
181 		edev->ops->configure_arfs_searcher(edev->cdev, mode);
182 		edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
183 	}
184 }
185 
qede_arfs_filter_op(void * dev,void * filter,u8 fw_rc)186 void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
187 {
188 	struct qede_arfs_fltr_node *fltr = filter;
189 	struct qede_dev *edev = dev;
190 
191 	fltr->fw_rc = fw_rc;
192 
193 	if (fw_rc) {
194 		DP_NOTICE(edev,
195 			  "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
196 			  fw_rc, fltr->flow_id, fltr->sw_id,
197 			  ntohs(fltr->tuple.src_port),
198 			  ntohs(fltr->tuple.dst_port), fltr->rxq_id);
199 
200 		spin_lock_bh(&edev->arfs->arfs_list_lock);
201 
202 		fltr->used = false;
203 		clear_bit(QEDE_FLTR_VALID, &fltr->state);
204 
205 		spin_unlock_bh(&edev->arfs->arfs_list_lock);
206 		return;
207 	}
208 
209 	spin_lock_bh(&edev->arfs->arfs_list_lock);
210 
211 	fltr->used = false;
212 
213 	if (fltr->filter_op) {
214 		set_bit(QEDE_FLTR_VALID, &fltr->state);
215 		if (fltr->rxq_id != fltr->next_rxq_id)
216 			qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
217 						 false);
218 	} else {
219 		clear_bit(QEDE_FLTR_VALID, &fltr->state);
220 		if (fltr->rxq_id != fltr->next_rxq_id) {
221 			fltr->rxq_id = fltr->next_rxq_id;
222 			qede_configure_arfs_fltr(edev, fltr,
223 						 fltr->rxq_id, true);
224 		}
225 	}
226 
227 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
228 }
229 
230 /* Should be called while qede_lock is held */
qede_process_arfs_filters(struct qede_dev * edev,bool free_fltr)231 void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
232 {
233 	int i;
234 
235 	for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
236 		struct hlist_node *temp;
237 		struct hlist_head *head;
238 		struct qede_arfs_fltr_node *fltr;
239 
240 		head = &edev->arfs->arfs_hl_head[i];
241 
242 		hlist_for_each_entry_safe(fltr, temp, head, node) {
243 			bool del = false;
244 
245 			if (edev->state != QEDE_STATE_OPEN)
246 				del = true;
247 
248 			spin_lock_bh(&edev->arfs->arfs_list_lock);
249 
250 			if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
251 			     !fltr->used) || free_fltr) {
252 				qede_dequeue_fltr_and_config_searcher(edev,
253 								      fltr);
254 			} else {
255 				bool flow_exp = false;
256 #ifdef CONFIG_RFS_ACCEL
257 				flow_exp = rps_may_expire_flow(edev->ndev,
258 							       fltr->rxq_id,
259 							       fltr->flow_id,
260 							       fltr->sw_id);
261 #endif
262 				if ((flow_exp || del) && !free_fltr)
263 					qede_configure_arfs_fltr(edev, fltr,
264 								 fltr->rxq_id,
265 								 false);
266 			}
267 
268 			spin_unlock_bh(&edev->arfs->arfs_list_lock);
269 		}
270 	}
271 
272 #ifdef CONFIG_RFS_ACCEL
273 	spin_lock_bh(&edev->arfs->arfs_list_lock);
274 
275 	if (edev->arfs->filter_count) {
276 		set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
277 		schedule_delayed_work(&edev->sp_task,
278 				      QEDE_SP_TASK_POLL_DELAY);
279 	}
280 
281 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
282 #endif
283 }
284 
285 /* This function waits until all aRFS filters get deleted and freed.
286  * On timeout it frees all filters forcefully.
287  */
qede_poll_for_freeing_arfs_filters(struct qede_dev * edev)288 void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
289 {
290 	int count = QEDE_ARFS_POLL_COUNT;
291 
292 	while (count) {
293 		qede_process_arfs_filters(edev, false);
294 
295 		if (!edev->arfs->filter_count)
296 			break;
297 
298 		msleep(100);
299 		count--;
300 	}
301 
302 	if (!count) {
303 		DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
304 
305 		/* Something is terribly wrong, free forcefully */
306 		qede_process_arfs_filters(edev, true);
307 	}
308 }
309 
qede_alloc_arfs(struct qede_dev * edev)310 int qede_alloc_arfs(struct qede_dev *edev)
311 {
312 	int i;
313 
314 	if (!edev->dev_info.common.b_arfs_capable)
315 		return -EINVAL;
316 
317 	edev->arfs = vzalloc(sizeof(*edev->arfs));
318 	if (!edev->arfs)
319 		return -ENOMEM;
320 
321 	spin_lock_init(&edev->arfs->arfs_list_lock);
322 
323 	for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
324 		INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
325 
326 	edev->arfs->arfs_fltr_bmap =
327 		vzalloc(array_size(sizeof(long),
328 				   BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
329 	if (!edev->arfs->arfs_fltr_bmap) {
330 		vfree(edev->arfs);
331 		edev->arfs = NULL;
332 		return -ENOMEM;
333 	}
334 
335 #ifdef CONFIG_RFS_ACCEL
336 	edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
337 	if (!edev->ndev->rx_cpu_rmap) {
338 		vfree(edev->arfs->arfs_fltr_bmap);
339 		edev->arfs->arfs_fltr_bmap = NULL;
340 		vfree(edev->arfs);
341 		edev->arfs = NULL;
342 		return -ENOMEM;
343 	}
344 #endif
345 	return 0;
346 }
347 
qede_free_arfs(struct qede_dev * edev)348 void qede_free_arfs(struct qede_dev *edev)
349 {
350 	if (!edev->arfs)
351 		return;
352 
353 #ifdef CONFIG_RFS_ACCEL
354 	if (edev->ndev->rx_cpu_rmap)
355 		free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
356 
357 	edev->ndev->rx_cpu_rmap = NULL;
358 #endif
359 	vfree(edev->arfs->arfs_fltr_bmap);
360 	edev->arfs->arfs_fltr_bmap = NULL;
361 	vfree(edev->arfs);
362 	edev->arfs = NULL;
363 }
364 
365 #ifdef CONFIG_RFS_ACCEL
qede_compare_ip_addr(struct qede_arfs_fltr_node * tpos,const struct sk_buff * skb)366 static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
367 				 const struct sk_buff *skb)
368 {
369 	if (skb->protocol == htons(ETH_P_IP)) {
370 		if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
371 		    tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
372 			return true;
373 		else
374 			return false;
375 	} else {
376 		struct in6_addr *src = &tpos->tuple.src_ipv6;
377 		u8 size = sizeof(struct in6_addr);
378 
379 		if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
380 		    !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
381 			return true;
382 		else
383 			return false;
384 	}
385 }
386 
387 static struct qede_arfs_fltr_node *
qede_arfs_htbl_key_search(struct hlist_head * h,const struct sk_buff * skb,__be16 src_port,__be16 dst_port,u8 ip_proto)388 qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
389 			  __be16 src_port, __be16 dst_port, u8 ip_proto)
390 {
391 	struct qede_arfs_fltr_node *tpos;
392 
393 	hlist_for_each_entry(tpos, h, node)
394 		if (tpos->tuple.ip_proto == ip_proto &&
395 		    tpos->tuple.eth_proto == skb->protocol &&
396 		    qede_compare_ip_addr(tpos, skb) &&
397 		    tpos->tuple.src_port == src_port &&
398 		    tpos->tuple.dst_port == dst_port)
399 			return tpos;
400 
401 	return NULL;
402 }
403 
404 static struct qede_arfs_fltr_node *
qede_alloc_filter(struct qede_dev * edev,int min_hlen)405 qede_alloc_filter(struct qede_dev *edev, int min_hlen)
406 {
407 	struct qede_arfs_fltr_node *n;
408 	int bit_id;
409 
410 	bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
411 				     QEDE_RFS_MAX_FLTR);
412 
413 	if (bit_id >= QEDE_RFS_MAX_FLTR)
414 		return NULL;
415 
416 	n = kzalloc(sizeof(*n), GFP_ATOMIC);
417 	if (!n)
418 		return NULL;
419 
420 	n->data = kzalloc(min_hlen, GFP_ATOMIC);
421 	if (!n->data) {
422 		kfree(n);
423 		return NULL;
424 	}
425 
426 	n->sw_id = (u16)bit_id;
427 	set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
428 	return n;
429 }
430 
qede_rx_flow_steer(struct net_device * dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)431 int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
432 		       u16 rxq_index, u32 flow_id)
433 {
434 	struct qede_dev *edev = netdev_priv(dev);
435 	struct qede_arfs_fltr_node *n;
436 	int min_hlen, rc, tp_offset;
437 	struct ethhdr *eth;
438 	__be16 *ports;
439 	u16 tbl_idx;
440 	u8 ip_proto;
441 
442 	if (skb->encapsulation)
443 		return -EPROTONOSUPPORT;
444 
445 	if (skb->protocol != htons(ETH_P_IP) &&
446 	    skb->protocol != htons(ETH_P_IPV6))
447 		return -EPROTONOSUPPORT;
448 
449 	if (skb->protocol == htons(ETH_P_IP)) {
450 		ip_proto = ip_hdr(skb)->protocol;
451 		tp_offset = sizeof(struct iphdr);
452 	} else {
453 		ip_proto = ipv6_hdr(skb)->nexthdr;
454 		tp_offset = sizeof(struct ipv6hdr);
455 	}
456 
457 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
458 		return -EPROTONOSUPPORT;
459 
460 	ports = (__be16 *)(skb->data + tp_offset);
461 	tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
462 
463 	spin_lock_bh(&edev->arfs->arfs_list_lock);
464 
465 	n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
466 				      skb, ports[0], ports[1], ip_proto);
467 	if (n) {
468 		/* Filter match */
469 		n->next_rxq_id = rxq_index;
470 
471 		if (test_bit(QEDE_FLTR_VALID, &n->state)) {
472 			if (n->rxq_id != rxq_index)
473 				qede_configure_arfs_fltr(edev, n, n->rxq_id,
474 							 false);
475 		} else {
476 			if (!n->used) {
477 				n->rxq_id = rxq_index;
478 				qede_configure_arfs_fltr(edev, n, n->rxq_id,
479 							 true);
480 			}
481 		}
482 
483 		rc = n->sw_id;
484 		goto ret_unlock;
485 	}
486 
487 	min_hlen = ETH_HLEN + skb_headlen(skb);
488 
489 	n = qede_alloc_filter(edev, min_hlen);
490 	if (!n) {
491 		rc = -ENOMEM;
492 		goto ret_unlock;
493 	}
494 
495 	n->buf_len = min_hlen;
496 	n->rxq_id = rxq_index;
497 	n->next_rxq_id = rxq_index;
498 	n->tuple.src_port = ports[0];
499 	n->tuple.dst_port = ports[1];
500 	n->flow_id = flow_id;
501 
502 	if (skb->protocol == htons(ETH_P_IP)) {
503 		n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
504 		n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
505 	} else {
506 		memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
507 		       sizeof(struct in6_addr));
508 		memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
509 		       sizeof(struct in6_addr));
510 	}
511 
512 	eth = (struct ethhdr *)n->data;
513 	eth->h_proto = skb->protocol;
514 	n->tuple.eth_proto = skb->protocol;
515 	n->tuple.ip_proto = ip_proto;
516 	n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
517 	memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
518 
519 	rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
520 	if (rc)
521 		goto ret_unlock;
522 
523 	qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
524 
525 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
526 
527 	set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
528 	schedule_delayed_work(&edev->sp_task, 0);
529 
530 	return n->sw_id;
531 
532 ret_unlock:
533 	spin_unlock_bh(&edev->arfs->arfs_list_lock);
534 	return rc;
535 }
536 #endif
537 
qede_udp_ports_update(void * dev,u16 vxlan_port,u16 geneve_port)538 void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
539 {
540 	struct qede_dev *edev = dev;
541 
542 	if (edev->vxlan_dst_port != vxlan_port)
543 		edev->vxlan_dst_port = 0;
544 
545 	if (edev->geneve_dst_port != geneve_port)
546 		edev->geneve_dst_port = 0;
547 }
548 
qede_force_mac(void * dev,u8 * mac,bool forced)549 void qede_force_mac(void *dev, u8 *mac, bool forced)
550 {
551 	struct qede_dev *edev = dev;
552 
553 	__qede_lock(edev);
554 
555 	if (!is_valid_ether_addr(mac)) {
556 		__qede_unlock(edev);
557 		return;
558 	}
559 
560 	eth_hw_addr_set(edev->ndev, mac);
561 	__qede_unlock(edev);
562 }
563 
qede_fill_rss_params(struct qede_dev * edev,struct qed_update_vport_rss_params * rss,u8 * update)564 void qede_fill_rss_params(struct qede_dev *edev,
565 			  struct qed_update_vport_rss_params *rss, u8 *update)
566 {
567 	bool need_reset = false;
568 	int i;
569 
570 	if (QEDE_RSS_COUNT(edev) <= 1) {
571 		memset(rss, 0, sizeof(*rss));
572 		*update = 0;
573 		return;
574 	}
575 
576 	/* Need to validate current RSS config uses valid entries */
577 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
578 		if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
579 			need_reset = true;
580 			break;
581 		}
582 	}
583 
584 	if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
585 		for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
586 			u16 indir_val, val;
587 
588 			val = QEDE_RSS_COUNT(edev);
589 			indir_val = ethtool_rxfh_indir_default(i, val);
590 			edev->rss_ind_table[i] = indir_val;
591 		}
592 		edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
593 	}
594 
595 	/* Now that we have the queue-indirection, prepare the handles */
596 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
597 		u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
598 
599 		rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
600 	}
601 
602 	if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
603 		netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
604 		edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
605 	}
606 	memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
607 
608 	if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
609 		edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
610 		    QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
611 		edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
612 	}
613 	rss->rss_caps = edev->rss_caps;
614 
615 	*update = 1;
616 }
617 
qede_set_ucast_rx_mac(struct qede_dev * edev,enum qed_filter_xcast_params_type opcode,const unsigned char mac[ETH_ALEN])618 static int qede_set_ucast_rx_mac(struct qede_dev *edev,
619 				 enum qed_filter_xcast_params_type opcode,
620 				 const unsigned char mac[ETH_ALEN])
621 {
622 	struct qed_filter_ucast_params ucast;
623 
624 	memset(&ucast, 0, sizeof(ucast));
625 	ucast.type = opcode;
626 	ucast.mac_valid = 1;
627 	ether_addr_copy(ucast.mac, mac);
628 
629 	return edev->ops->filter_config_ucast(edev->cdev, &ucast);
630 }
631 
qede_set_ucast_rx_vlan(struct qede_dev * edev,enum qed_filter_xcast_params_type opcode,u16 vid)632 static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
633 				  enum qed_filter_xcast_params_type opcode,
634 				  u16 vid)
635 {
636 	struct qed_filter_ucast_params ucast;
637 
638 	memset(&ucast, 0, sizeof(ucast));
639 	ucast.type = opcode;
640 	ucast.vlan_valid = 1;
641 	ucast.vlan = vid;
642 
643 	return edev->ops->filter_config_ucast(edev->cdev, &ucast);
644 }
645 
qede_config_accept_any_vlan(struct qede_dev * edev,bool action)646 static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
647 {
648 	struct qed_update_vport_params *params;
649 	int rc;
650 
651 	/* Proceed only if action actually needs to be performed */
652 	if (edev->accept_any_vlan == action)
653 		return 0;
654 
655 	params = vzalloc(sizeof(*params));
656 	if (!params)
657 		return -ENOMEM;
658 
659 	params->vport_id = 0;
660 	params->accept_any_vlan = action;
661 	params->update_accept_any_vlan_flg = 1;
662 
663 	rc = edev->ops->vport_update(edev->cdev, params);
664 	if (rc) {
665 		DP_ERR(edev, "Failed to %s accept-any-vlan\n",
666 		       action ? "enable" : "disable");
667 	} else {
668 		DP_INFO(edev, "%s accept-any-vlan\n",
669 			action ? "enabled" : "disabled");
670 		edev->accept_any_vlan = action;
671 	}
672 
673 	vfree(params);
674 	return 0;
675 }
676 
qede_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)677 int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
678 {
679 	struct qede_dev *edev = netdev_priv(dev);
680 	struct qede_vlan *vlan, *tmp;
681 	int rc = 0;
682 
683 	DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
684 
685 	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
686 	if (!vlan) {
687 		DP_INFO(edev, "Failed to allocate struct for vlan\n");
688 		return -ENOMEM;
689 	}
690 	INIT_LIST_HEAD(&vlan->list);
691 	vlan->vid = vid;
692 	vlan->configured = false;
693 
694 	/* Verify vlan isn't already configured */
695 	list_for_each_entry(tmp, &edev->vlan_list, list) {
696 		if (tmp->vid == vlan->vid) {
697 			DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
698 				   "vlan already configured\n");
699 			kfree(vlan);
700 			return -EEXIST;
701 		}
702 	}
703 
704 	/* If interface is down, cache this VLAN ID and return */
705 	__qede_lock(edev);
706 	if (edev->state != QEDE_STATE_OPEN) {
707 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
708 			   "Interface is down, VLAN %d will be configured when interface is up\n",
709 			   vid);
710 		if (vid != 0)
711 			edev->non_configured_vlans++;
712 		list_add(&vlan->list, &edev->vlan_list);
713 		goto out;
714 	}
715 
716 	/* Check for the filter limit.
717 	 * Note - vlan0 has a reserved filter and can be added without
718 	 * worrying about quota
719 	 */
720 	if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
721 	    (vlan->vid == 0)) {
722 		rc = qede_set_ucast_rx_vlan(edev,
723 					    QED_FILTER_XCAST_TYPE_ADD,
724 					    vlan->vid);
725 		if (rc) {
726 			DP_ERR(edev, "Failed to configure VLAN %d\n",
727 			       vlan->vid);
728 			kfree(vlan);
729 			goto out;
730 		}
731 		vlan->configured = true;
732 
733 		/* vlan0 filter isn't consuming out of our quota */
734 		if (vlan->vid != 0)
735 			edev->configured_vlans++;
736 	} else {
737 		/* Out of quota; Activate accept-any-VLAN mode */
738 		if (!edev->non_configured_vlans) {
739 			rc = qede_config_accept_any_vlan(edev, true);
740 			if (rc) {
741 				kfree(vlan);
742 				goto out;
743 			}
744 		}
745 
746 		edev->non_configured_vlans++;
747 	}
748 
749 	list_add(&vlan->list, &edev->vlan_list);
750 
751 out:
752 	__qede_unlock(edev);
753 	return rc;
754 }
755 
qede_del_vlan_from_list(struct qede_dev * edev,struct qede_vlan * vlan)756 static void qede_del_vlan_from_list(struct qede_dev *edev,
757 				    struct qede_vlan *vlan)
758 {
759 	/* vlan0 filter isn't consuming out of our quota */
760 	if (vlan->vid != 0) {
761 		if (vlan->configured)
762 			edev->configured_vlans--;
763 		else
764 			edev->non_configured_vlans--;
765 	}
766 
767 	list_del(&vlan->list);
768 	kfree(vlan);
769 }
770 
qede_configure_vlan_filters(struct qede_dev * edev)771 int qede_configure_vlan_filters(struct qede_dev *edev)
772 {
773 	int rc = 0, real_rc = 0, accept_any_vlan = 0;
774 	struct qed_dev_eth_info *dev_info;
775 	struct qede_vlan *vlan = NULL;
776 
777 	if (list_empty(&edev->vlan_list))
778 		return 0;
779 
780 	dev_info = &edev->dev_info;
781 
782 	/* Configure non-configured vlans */
783 	list_for_each_entry(vlan, &edev->vlan_list, list) {
784 		if (vlan->configured)
785 			continue;
786 
787 		/* We have used all our credits, now enable accept_any_vlan */
788 		if ((vlan->vid != 0) &&
789 		    (edev->configured_vlans == dev_info->num_vlan_filters)) {
790 			accept_any_vlan = 1;
791 			continue;
792 		}
793 
794 		DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
795 
796 		rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
797 					    vlan->vid);
798 		if (rc) {
799 			DP_ERR(edev, "Failed to configure VLAN %u\n",
800 			       vlan->vid);
801 			real_rc = rc;
802 			continue;
803 		}
804 
805 		vlan->configured = true;
806 		/* vlan0 filter doesn't consume our VLAN filter's quota */
807 		if (vlan->vid != 0) {
808 			edev->non_configured_vlans--;
809 			edev->configured_vlans++;
810 		}
811 	}
812 
813 	/* enable accept_any_vlan mode if we have more VLANs than credits,
814 	 * or remove accept_any_vlan mode if we've actually removed
815 	 * a non-configured vlan, and all remaining vlans are truly configured.
816 	 */
817 
818 	if (accept_any_vlan)
819 		rc = qede_config_accept_any_vlan(edev, true);
820 	else if (!edev->non_configured_vlans)
821 		rc = qede_config_accept_any_vlan(edev, false);
822 
823 	if (rc && !real_rc)
824 		real_rc = rc;
825 
826 	return real_rc;
827 }
828 
qede_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)829 int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
830 {
831 	struct qede_dev *edev = netdev_priv(dev);
832 	struct qede_vlan *vlan;
833 	int rc = 0;
834 
835 	DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
836 
837 	/* Find whether entry exists */
838 	__qede_lock(edev);
839 	list_for_each_entry(vlan, &edev->vlan_list, list)
840 		if (vlan->vid == vid)
841 			break;
842 
843 	if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
844 		DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
845 			   "Vlan isn't configured\n");
846 		goto out;
847 	}
848 
849 	if (edev->state != QEDE_STATE_OPEN) {
850 		/* As interface is already down, we don't have a VPORT
851 		 * instance to remove vlan filter. So just update vlan list
852 		 */
853 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
854 			   "Interface is down, removing VLAN from list only\n");
855 		qede_del_vlan_from_list(edev, vlan);
856 		goto out;
857 	}
858 
859 	/* Remove vlan */
860 	if (vlan->configured) {
861 		rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
862 					    vid);
863 		if (rc) {
864 			DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
865 			goto out;
866 		}
867 	}
868 
869 	qede_del_vlan_from_list(edev, vlan);
870 
871 	/* We have removed a VLAN - try to see if we can
872 	 * configure non-configured VLAN from the list.
873 	 */
874 	rc = qede_configure_vlan_filters(edev);
875 
876 out:
877 	__qede_unlock(edev);
878 	return rc;
879 }
880 
qede_vlan_mark_nonconfigured(struct qede_dev * edev)881 void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
882 {
883 	struct qede_vlan *vlan = NULL;
884 
885 	if (list_empty(&edev->vlan_list))
886 		return;
887 
888 	list_for_each_entry(vlan, &edev->vlan_list, list) {
889 		if (!vlan->configured)
890 			continue;
891 
892 		vlan->configured = false;
893 
894 		/* vlan0 filter isn't consuming out of our quota */
895 		if (vlan->vid != 0) {
896 			edev->non_configured_vlans++;
897 			edev->configured_vlans--;
898 		}
899 
900 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
901 			   "marked vlan %d as non-configured\n", vlan->vid);
902 	}
903 
904 	edev->accept_any_vlan = false;
905 }
906 
qede_set_features_reload(struct qede_dev * edev,struct qede_reload_args * args)907 static void qede_set_features_reload(struct qede_dev *edev,
908 				     struct qede_reload_args *args)
909 {
910 	edev->ndev->features = args->u.features;
911 }
912 
qede_fix_features(struct net_device * dev,netdev_features_t features)913 netdev_features_t qede_fix_features(struct net_device *dev,
914 				    netdev_features_t features)
915 {
916 	struct qede_dev *edev = netdev_priv(dev);
917 
918 	if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
919 	    !(features & NETIF_F_GRO))
920 		features &= ~NETIF_F_GRO_HW;
921 
922 	return features;
923 }
924 
qede_set_features(struct net_device * dev,netdev_features_t features)925 int qede_set_features(struct net_device *dev, netdev_features_t features)
926 {
927 	struct qede_dev *edev = netdev_priv(dev);
928 	netdev_features_t changes = features ^ dev->features;
929 	bool need_reload = false;
930 
931 	if (changes & NETIF_F_GRO_HW)
932 		need_reload = true;
933 
934 	if (need_reload) {
935 		struct qede_reload_args args;
936 
937 		args.u.features = features;
938 		args.func = &qede_set_features_reload;
939 
940 		/* Make sure that we definitely need to reload.
941 		 * In case of an eBPF attached program, there will be no FW
942 		 * aggregations, so no need to actually reload.
943 		 */
944 		__qede_lock(edev);
945 		if (edev->xdp_prog)
946 			args.func(edev, &args);
947 		else
948 			qede_reload(edev, &args, true);
949 		__qede_unlock(edev);
950 
951 		return 1;
952 	}
953 
954 	return 0;
955 }
956 
qede_udp_tunnel_sync(struct net_device * dev,unsigned int table)957 static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
958 {
959 	struct qede_dev *edev = netdev_priv(dev);
960 	struct qed_tunn_params tunn_params;
961 	struct udp_tunnel_info ti;
962 	u16 *save_port;
963 	int rc;
964 
965 	memset(&tunn_params, 0, sizeof(tunn_params));
966 
967 	udp_tunnel_nic_get_port(dev, table, 0, &ti);
968 	if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
969 		tunn_params.update_vxlan_port = 1;
970 		tunn_params.vxlan_port = ntohs(ti.port);
971 		save_port = &edev->vxlan_dst_port;
972 	} else {
973 		tunn_params.update_geneve_port = 1;
974 		tunn_params.geneve_port = ntohs(ti.port);
975 		save_port = &edev->geneve_dst_port;
976 	}
977 
978 	__qede_lock(edev);
979 	rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
980 	__qede_unlock(edev);
981 	if (rc)
982 		return rc;
983 
984 	*save_port = ntohs(ti.port);
985 	return 0;
986 }
987 
988 static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
989 	.sync_table	= qede_udp_tunnel_sync,
990 	.tables		= {
991 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
992 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
993 	},
994 }, qede_udp_tunnels_vxlan = {
995 	.sync_table	= qede_udp_tunnel_sync,
996 	.tables		= {
997 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
998 	},
999 }, qede_udp_tunnels_geneve = {
1000 	.sync_table	= qede_udp_tunnel_sync,
1001 	.tables		= {
1002 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
1003 	},
1004 };
1005 
qede_set_udp_tunnels(struct qede_dev * edev)1006 void qede_set_udp_tunnels(struct qede_dev *edev)
1007 {
1008 	if (edev->dev_info.common.vxlan_enable &&
1009 	    edev->dev_info.common.geneve_enable)
1010 		edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both;
1011 	else if (edev->dev_info.common.vxlan_enable)
1012 		edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan;
1013 	else if (edev->dev_info.common.geneve_enable)
1014 		edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve;
1015 }
1016 
qede_xdp_reload_func(struct qede_dev * edev,struct qede_reload_args * args)1017 static void qede_xdp_reload_func(struct qede_dev *edev,
1018 				 struct qede_reload_args *args)
1019 {
1020 	struct bpf_prog *old;
1021 
1022 	old = xchg(&edev->xdp_prog, args->u.new_prog);
1023 	if (old)
1024 		bpf_prog_put(old);
1025 }
1026 
qede_xdp_set(struct qede_dev * edev,struct bpf_prog * prog)1027 static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
1028 {
1029 	struct qede_reload_args args;
1030 
1031 	/* If we're called, there was already a bpf reference increment */
1032 	args.func = &qede_xdp_reload_func;
1033 	args.u.new_prog = prog;
1034 	qede_reload(edev, &args, false);
1035 
1036 	return 0;
1037 }
1038 
qede_xdp(struct net_device * dev,struct netdev_bpf * xdp)1039 int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1040 {
1041 	struct qede_dev *edev = netdev_priv(dev);
1042 
1043 	switch (xdp->command) {
1044 	case XDP_SETUP_PROG:
1045 		return qede_xdp_set(edev, xdp->prog);
1046 	default:
1047 		return -EINVAL;
1048 	}
1049 }
1050 
qede_set_mcast_rx_mac(struct qede_dev * edev,enum qed_filter_xcast_params_type opcode,unsigned char * mac,int num_macs)1051 static int qede_set_mcast_rx_mac(struct qede_dev *edev,
1052 				 enum qed_filter_xcast_params_type opcode,
1053 				 unsigned char *mac, int num_macs)
1054 {
1055 	struct qed_filter_mcast_params mcast;
1056 	int i;
1057 
1058 	memset(&mcast, 0, sizeof(mcast));
1059 	mcast.type = opcode;
1060 	mcast.num = num_macs;
1061 
1062 	for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
1063 		ether_addr_copy(mcast.mac[i], mac);
1064 
1065 	return edev->ops->filter_config_mcast(edev->cdev, &mcast);
1066 }
1067 
qede_set_mac_addr(struct net_device * ndev,void * p)1068 int qede_set_mac_addr(struct net_device *ndev, void *p)
1069 {
1070 	struct qede_dev *edev = netdev_priv(ndev);
1071 	struct sockaddr *addr = p;
1072 	int rc = 0;
1073 
1074 	/* Make sure the state doesn't transition while changing the MAC.
1075 	 * Also, all flows accessing the dev_addr field are doing that under
1076 	 * this lock.
1077 	 */
1078 	__qede_lock(edev);
1079 
1080 	if (!is_valid_ether_addr(addr->sa_data)) {
1081 		DP_NOTICE(edev, "The MAC address is not valid\n");
1082 		rc = -EFAULT;
1083 		goto out;
1084 	}
1085 
1086 	if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
1087 		DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
1088 			  addr->sa_data);
1089 		rc = -EINVAL;
1090 		goto out;
1091 	}
1092 
1093 	if (edev->state == QEDE_STATE_OPEN) {
1094 		/* Remove the previous primary mac */
1095 		rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1096 					   ndev->dev_addr);
1097 		if (rc)
1098 			goto out;
1099 	}
1100 
1101 	eth_hw_addr_set(ndev, addr->sa_data);
1102 	DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
1103 
1104 	if (edev->state != QEDE_STATE_OPEN) {
1105 		DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1106 			   "The device is currently down\n");
1107 		/* Ask PF to explicitly update a copy in bulletin board */
1108 		if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
1109 			edev->ops->req_bulletin_update_mac(edev->cdev,
1110 							   ndev->dev_addr);
1111 		goto out;
1112 	}
1113 
1114 	edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
1115 
1116 	rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1117 				   ndev->dev_addr);
1118 out:
1119 	__qede_unlock(edev);
1120 	return rc;
1121 }
1122 
1123 static int
qede_configure_mcast_filtering(struct net_device * ndev,enum qed_filter_rx_mode_type * accept_flags)1124 qede_configure_mcast_filtering(struct net_device *ndev,
1125 			       enum qed_filter_rx_mode_type *accept_flags)
1126 {
1127 	struct qede_dev *edev = netdev_priv(ndev);
1128 	unsigned char *mc_macs, *temp;
1129 	struct netdev_hw_addr *ha;
1130 	int rc = 0, mc_count;
1131 	size_t size;
1132 
1133 	size = 64 * ETH_ALEN;
1134 
1135 	mc_macs = kzalloc(size, GFP_KERNEL);
1136 	if (!mc_macs) {
1137 		DP_NOTICE(edev,
1138 			  "Failed to allocate memory for multicast MACs\n");
1139 		rc = -ENOMEM;
1140 		goto exit;
1141 	}
1142 
1143 	temp = mc_macs;
1144 
1145 	/* Remove all previously configured MAC filters */
1146 	rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
1147 				   mc_macs, 1);
1148 	if (rc)
1149 		goto exit;
1150 
1151 	netif_addr_lock_bh(ndev);
1152 
1153 	mc_count = netdev_mc_count(ndev);
1154 	if (mc_count <= 64) {
1155 		netdev_for_each_mc_addr(ha, ndev) {
1156 			ether_addr_copy(temp, ha->addr);
1157 			temp += ETH_ALEN;
1158 		}
1159 	}
1160 
1161 	netif_addr_unlock_bh(ndev);
1162 
1163 	/* Check for all multicast @@@TBD resource allocation */
1164 	if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
1165 		if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
1166 			*accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
1167 	} else {
1168 		/* Add all multicast MAC filters */
1169 		rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
1170 					   mc_macs, mc_count);
1171 	}
1172 
1173 exit:
1174 	kfree(mc_macs);
1175 	return rc;
1176 }
1177 
qede_set_rx_mode(struct net_device * ndev)1178 void qede_set_rx_mode(struct net_device *ndev)
1179 {
1180 	struct qede_dev *edev = netdev_priv(ndev);
1181 
1182 	set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
1183 	schedule_delayed_work(&edev->sp_task, 0);
1184 }
1185 
1186 /* Must be called with qede_lock held */
qede_config_rx_mode(struct net_device * ndev)1187 void qede_config_rx_mode(struct net_device *ndev)
1188 {
1189 	enum qed_filter_rx_mode_type accept_flags;
1190 	struct qede_dev *edev = netdev_priv(ndev);
1191 	unsigned char *uc_macs, *temp;
1192 	struct netdev_hw_addr *ha;
1193 	int rc, uc_count;
1194 	size_t size;
1195 
1196 	netif_addr_lock_bh(ndev);
1197 
1198 	uc_count = netdev_uc_count(ndev);
1199 	size = uc_count * ETH_ALEN;
1200 
1201 	uc_macs = kzalloc(size, GFP_ATOMIC);
1202 	if (!uc_macs) {
1203 		DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
1204 		netif_addr_unlock_bh(ndev);
1205 		return;
1206 	}
1207 
1208 	temp = uc_macs;
1209 	netdev_for_each_uc_addr(ha, ndev) {
1210 		ether_addr_copy(temp, ha->addr);
1211 		temp += ETH_ALEN;
1212 	}
1213 
1214 	netif_addr_unlock_bh(ndev);
1215 
1216 	/* Remove all previous unicast secondary macs and multicast macs
1217 	 * (configure / leave the primary mac)
1218 	 */
1219 	rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
1220 				   edev->ndev->dev_addr);
1221 	if (rc)
1222 		goto out;
1223 
1224 	/* Check for promiscuous */
1225 	if (ndev->flags & IFF_PROMISC)
1226 		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1227 	else
1228 		accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
1229 
1230 	/* Configure all filters regardless, in case promisc is rejected */
1231 	if (uc_count < edev->dev_info.num_mac_filters) {
1232 		int i;
1233 
1234 		temp = uc_macs;
1235 		for (i = 0; i < uc_count; i++) {
1236 			rc = qede_set_ucast_rx_mac(edev,
1237 						   QED_FILTER_XCAST_TYPE_ADD,
1238 						   temp);
1239 			if (rc)
1240 				goto out;
1241 
1242 			temp += ETH_ALEN;
1243 		}
1244 	} else {
1245 		accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
1246 	}
1247 
1248 	rc = qede_configure_mcast_filtering(ndev, &accept_flags);
1249 	if (rc)
1250 		goto out;
1251 
1252 	/* take care of VLAN mode */
1253 	if (ndev->flags & IFF_PROMISC) {
1254 		qede_config_accept_any_vlan(edev, true);
1255 	} else if (!edev->non_configured_vlans) {
1256 		/* It's possible that accept_any_vlan mode is set due to a
1257 		 * previous setting of IFF_PROMISC. If vlan credits are
1258 		 * sufficient, disable accept_any_vlan.
1259 		 */
1260 		qede_config_accept_any_vlan(edev, false);
1261 	}
1262 
1263 	edev->ops->filter_config_rx_mode(edev->cdev, accept_flags);
1264 out:
1265 	kfree(uc_macs);
1266 }
1267 
1268 static struct qede_arfs_fltr_node *
qede_get_arfs_fltr_by_loc(struct hlist_head * head,u64 location)1269 qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
1270 {
1271 	struct qede_arfs_fltr_node *fltr;
1272 
1273 	hlist_for_each_entry(fltr, head, node)
1274 		if (location == fltr->sw_id)
1275 			return fltr;
1276 
1277 	return NULL;
1278 }
1279 
qede_get_cls_rule_all(struct qede_dev * edev,struct ethtool_rxnfc * info,u32 * rule_locs)1280 int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
1281 			  u32 *rule_locs)
1282 {
1283 	struct qede_arfs_fltr_node *fltr;
1284 	struct hlist_head *head;
1285 	int cnt = 0, rc = 0;
1286 
1287 	info->data = QEDE_RFS_MAX_FLTR;
1288 
1289 	__qede_lock(edev);
1290 
1291 	if (!edev->arfs) {
1292 		rc = -EPERM;
1293 		goto unlock;
1294 	}
1295 
1296 	head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1297 
1298 	hlist_for_each_entry(fltr, head, node) {
1299 		if (cnt == info->rule_cnt) {
1300 			rc = -EMSGSIZE;
1301 			goto unlock;
1302 		}
1303 
1304 		rule_locs[cnt] = fltr->sw_id;
1305 		cnt++;
1306 	}
1307 
1308 	info->rule_cnt = cnt;
1309 
1310 unlock:
1311 	__qede_unlock(edev);
1312 	return rc;
1313 }
1314 
qede_get_cls_rule_entry(struct qede_dev * edev,struct ethtool_rxnfc * cmd)1315 int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
1316 {
1317 	struct ethtool_rx_flow_spec *fsp = &cmd->fs;
1318 	struct qede_arfs_fltr_node *fltr = NULL;
1319 	int rc = 0;
1320 
1321 	cmd->data = QEDE_RFS_MAX_FLTR;
1322 
1323 	__qede_lock(edev);
1324 
1325 	if (!edev->arfs) {
1326 		rc = -EPERM;
1327 		goto unlock;
1328 	}
1329 
1330 	fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1331 					 fsp->location);
1332 	if (!fltr) {
1333 		DP_NOTICE(edev, "Rule not found - location=0x%x\n",
1334 			  fsp->location);
1335 		rc = -EINVAL;
1336 		goto unlock;
1337 	}
1338 
1339 	if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
1340 		if (fltr->tuple.ip_proto == IPPROTO_TCP)
1341 			fsp->flow_type = TCP_V4_FLOW;
1342 		else
1343 			fsp->flow_type = UDP_V4_FLOW;
1344 
1345 		fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
1346 		fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
1347 		fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
1348 		fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
1349 	} else {
1350 		if (fltr->tuple.ip_proto == IPPROTO_TCP)
1351 			fsp->flow_type = TCP_V6_FLOW;
1352 		else
1353 			fsp->flow_type = UDP_V6_FLOW;
1354 		fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
1355 		fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
1356 		memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
1357 		       &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
1358 		memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
1359 		       &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
1360 	}
1361 
1362 	fsp->ring_cookie = fltr->rxq_id;
1363 
1364 	if (fltr->vfid) {
1365 		fsp->ring_cookie |= ((u64)fltr->vfid) <<
1366 					ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
1367 	}
1368 
1369 	if (fltr->b_is_drop)
1370 		fsp->ring_cookie = RX_CLS_FLOW_DISC;
1371 unlock:
1372 	__qede_unlock(edev);
1373 	return rc;
1374 }
1375 
1376 static int
qede_poll_arfs_filter_config(struct qede_dev * edev,struct qede_arfs_fltr_node * fltr)1377 qede_poll_arfs_filter_config(struct qede_dev *edev,
1378 			     struct qede_arfs_fltr_node *fltr)
1379 {
1380 	int count = QEDE_ARFS_POLL_COUNT;
1381 
1382 	while (fltr->used && count) {
1383 		msleep(20);
1384 		count--;
1385 	}
1386 
1387 	if (count == 0 || fltr->fw_rc) {
1388 		DP_NOTICE(edev, "Timeout in polling filter config\n");
1389 		qede_dequeue_fltr_and_config_searcher(edev, fltr);
1390 		return -EIO;
1391 	}
1392 
1393 	return fltr->fw_rc;
1394 }
1395 
qede_flow_get_min_header_size(struct qede_arfs_tuple * t)1396 static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
1397 {
1398 	int size = ETH_HLEN;
1399 
1400 	if (t->eth_proto == htons(ETH_P_IP))
1401 		size += sizeof(struct iphdr);
1402 	else
1403 		size += sizeof(struct ipv6hdr);
1404 
1405 	if (t->ip_proto == IPPROTO_TCP)
1406 		size += sizeof(struct tcphdr);
1407 	else
1408 		size += sizeof(struct udphdr);
1409 
1410 	return size;
1411 }
1412 
qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple * a,struct qede_arfs_tuple * b)1413 static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
1414 				    struct qede_arfs_tuple *b)
1415 {
1416 	if (a->eth_proto != htons(ETH_P_IP) ||
1417 	    b->eth_proto != htons(ETH_P_IP))
1418 		return false;
1419 
1420 	return (a->src_ipv4 == b->src_ipv4) &&
1421 	       (a->dst_ipv4 == b->dst_ipv4);
1422 }
1423 
qede_flow_build_ipv4_hdr(struct qede_arfs_tuple * t,void * header)1424 static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
1425 				     void *header)
1426 {
1427 	__be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
1428 	struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
1429 	struct ethhdr *eth = (struct ethhdr *)header;
1430 
1431 	eth->h_proto = t->eth_proto;
1432 	ip->saddr = t->src_ipv4;
1433 	ip->daddr = t->dst_ipv4;
1434 	ip->version = 0x4;
1435 	ip->ihl = 0x5;
1436 	ip->protocol = t->ip_proto;
1437 	ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
1438 
1439 	/* ports is weakly typed to suit both TCP and UDP ports */
1440 	ports[0] = t->src_port;
1441 	ports[1] = t->dst_port;
1442 }
1443 
qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple * t,void * buffer)1444 static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
1445 					 void *buffer)
1446 {
1447 	const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
1448 
1449 	snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
1450 		 "%s %pI4 (%04x) -> %pI4 (%04x)",
1451 		 prefix, &t->src_ipv4, t->src_port,
1452 		 &t->dst_ipv4, t->dst_port);
1453 }
1454 
qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple * a,struct qede_arfs_tuple * b)1455 static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
1456 				    struct qede_arfs_tuple *b)
1457 {
1458 	if (a->eth_proto != htons(ETH_P_IPV6) ||
1459 	    b->eth_proto != htons(ETH_P_IPV6))
1460 		return false;
1461 
1462 	if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
1463 		return false;
1464 
1465 	if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
1466 		return false;
1467 
1468 	return true;
1469 }
1470 
qede_flow_build_ipv6_hdr(struct qede_arfs_tuple * t,void * header)1471 static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
1472 				     void *header)
1473 {
1474 	__be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
1475 	struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
1476 	struct ethhdr *eth = (struct ethhdr *)header;
1477 
1478 	eth->h_proto = t->eth_proto;
1479 	memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
1480 	memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
1481 	ip6->version = 0x6;
1482 
1483 	if (t->ip_proto == IPPROTO_TCP) {
1484 		ip6->nexthdr = NEXTHDR_TCP;
1485 		ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
1486 	} else {
1487 		ip6->nexthdr = NEXTHDR_UDP;
1488 		ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
1489 	}
1490 
1491 	/* ports is weakly typed to suit both TCP and UDP ports */
1492 	ports[0] = t->src_port;
1493 	ports[1] = t->dst_port;
1494 }
1495 
1496 /* Validate fields which are set and not accepted by the driver */
qede_flow_spec_validate_unused(struct qede_dev * edev,struct ethtool_rx_flow_spec * fs)1497 static int qede_flow_spec_validate_unused(struct qede_dev *edev,
1498 					  struct ethtool_rx_flow_spec *fs)
1499 {
1500 	if (fs->flow_type & FLOW_MAC_EXT) {
1501 		DP_INFO(edev, "Don't support MAC extensions\n");
1502 		return -EOPNOTSUPP;
1503 	}
1504 
1505 	if ((fs->flow_type & FLOW_EXT) &&
1506 	    (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
1507 		DP_INFO(edev, "Don't support vlan-based classification\n");
1508 		return -EOPNOTSUPP;
1509 	}
1510 
1511 	if ((fs->flow_type & FLOW_EXT) &&
1512 	    (fs->h_ext.data[0] || fs->h_ext.data[1])) {
1513 		DP_INFO(edev, "Don't support user defined data\n");
1514 		return -EOPNOTSUPP;
1515 	}
1516 
1517 	return 0;
1518 }
1519 
qede_set_v4_tuple_to_profile(struct qede_arfs_tuple * t,struct netlink_ext_ack * extack)1520 static int qede_set_v4_tuple_to_profile(struct qede_arfs_tuple *t,
1521 					struct netlink_ext_ack *extack)
1522 {
1523 	/* We must have Only 4-tuples/l4 port/src ip/dst ip
1524 	 * as an input.
1525 	 */
1526 	if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
1527 		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1528 	} else if (!t->src_port && t->dst_port &&
1529 		   !t->src_ipv4 && !t->dst_ipv4) {
1530 		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1531 	} else if (!t->src_port && !t->dst_port &&
1532 		   !t->dst_ipv4 && t->src_ipv4) {
1533 		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1534 	} else if (!t->src_port && !t->dst_port &&
1535 		   t->dst_ipv4 && !t->src_ipv4) {
1536 		t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1537 	} else {
1538 		NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
1539 		return -EOPNOTSUPP;
1540 	}
1541 
1542 	t->ip_comp = qede_flow_spec_ipv4_cmp;
1543 	t->build_hdr = qede_flow_build_ipv4_hdr;
1544 	t->stringify = qede_flow_stringify_ipv4_hdr;
1545 
1546 	return 0;
1547 }
1548 
qede_set_v6_tuple_to_profile(struct qede_arfs_tuple * t,struct in6_addr * zaddr,struct netlink_ext_ack * extack)1549 static int qede_set_v6_tuple_to_profile(struct qede_arfs_tuple *t,
1550 					struct in6_addr *zaddr,
1551 					struct netlink_ext_ack *extack)
1552 {
1553 	/* We must have Only 4-tuples/l4 port/src ip/dst ip
1554 	 * as an input.
1555 	 */
1556 	if (t->src_port && t->dst_port &&
1557 	    memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1558 	    memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1559 		t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
1560 	} else if (!t->src_port && t->dst_port &&
1561 		   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
1562 		   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
1563 		t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
1564 	} else if (!t->src_port && !t->dst_port &&
1565 		   !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1566 		   memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1567 		t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
1568 	} else if (!t->src_port && !t->dst_port &&
1569 		   memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
1570 		   !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
1571 		t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
1572 	} else {
1573 		NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
1574 		return -EOPNOTSUPP;
1575 	}
1576 
1577 	t->ip_comp = qede_flow_spec_ipv6_cmp;
1578 	t->build_hdr = qede_flow_build_ipv6_hdr;
1579 
1580 	return 0;
1581 }
1582 
1583 /* Must be called while qede lock is held */
1584 static struct qede_arfs_fltr_node *
qede_flow_find_fltr(struct qede_dev * edev,struct qede_arfs_tuple * t)1585 qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
1586 {
1587 	struct qede_arfs_fltr_node *fltr;
1588 	struct hlist_node *temp;
1589 	struct hlist_head *head;
1590 
1591 	head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
1592 
1593 	hlist_for_each_entry_safe(fltr, temp, head, node) {
1594 		if (fltr->tuple.ip_proto == t->ip_proto &&
1595 		    fltr->tuple.src_port == t->src_port &&
1596 		    fltr->tuple.dst_port == t->dst_port &&
1597 		    t->ip_comp(&fltr->tuple, t))
1598 			return fltr;
1599 	}
1600 
1601 	return NULL;
1602 }
1603 
qede_flow_set_destination(struct qede_dev * edev,struct qede_arfs_fltr_node * n,struct ethtool_rx_flow_spec * fs)1604 static void qede_flow_set_destination(struct qede_dev *edev,
1605 				      struct qede_arfs_fltr_node *n,
1606 				      struct ethtool_rx_flow_spec *fs)
1607 {
1608 	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
1609 		n->b_is_drop = true;
1610 		return;
1611 	}
1612 
1613 	n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
1614 	n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
1615 	n->next_rxq_id = n->rxq_id;
1616 
1617 	if (n->vfid)
1618 		DP_VERBOSE(edev, QED_MSG_SP,
1619 			   "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
1620 }
1621 
qede_delete_flow_filter(struct qede_dev * edev,u64 cookie)1622 int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
1623 {
1624 	struct qede_arfs_fltr_node *fltr = NULL;
1625 	int rc = -EPERM;
1626 
1627 	__qede_lock(edev);
1628 	if (!edev->arfs)
1629 		goto unlock;
1630 
1631 	fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
1632 					 cookie);
1633 	if (!fltr)
1634 		goto unlock;
1635 
1636 	qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
1637 
1638 	rc = qede_poll_arfs_filter_config(edev, fltr);
1639 	if (rc == 0)
1640 		qede_dequeue_fltr_and_config_searcher(edev, fltr);
1641 
1642 unlock:
1643 	__qede_unlock(edev);
1644 	return rc;
1645 }
1646 
qede_get_arfs_filter_count(struct qede_dev * edev)1647 int qede_get_arfs_filter_count(struct qede_dev *edev)
1648 {
1649 	int count = 0;
1650 
1651 	__qede_lock(edev);
1652 
1653 	if (!edev->arfs)
1654 		goto unlock;
1655 
1656 	count = edev->arfs->filter_count;
1657 
1658 unlock:
1659 	__qede_unlock(edev);
1660 	return count;
1661 }
1662 
qede_parse_actions(struct qede_dev * edev,struct flow_action * flow_action,struct netlink_ext_ack * extack)1663 static int qede_parse_actions(struct qede_dev *edev,
1664 			      struct flow_action *flow_action,
1665 			      struct netlink_ext_ack *extack)
1666 {
1667 	const struct flow_action_entry *act;
1668 	int i;
1669 
1670 	if (!flow_action_has_entries(flow_action)) {
1671 		NL_SET_ERR_MSG_MOD(extack, "No actions received");
1672 		return -EINVAL;
1673 	}
1674 
1675 	if (!flow_action_basic_hw_stats_check(flow_action, extack))
1676 		return -EOPNOTSUPP;
1677 
1678 	flow_action_for_each(i, act, flow_action) {
1679 		switch (act->id) {
1680 		case FLOW_ACTION_DROP:
1681 			break;
1682 		case FLOW_ACTION_QUEUE:
1683 			if (act->queue.vf)
1684 				break;
1685 
1686 			if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
1687 				NL_SET_ERR_MSG_MOD(extack,
1688 						   "Queue out-of-bounds");
1689 				return -EINVAL;
1690 			}
1691 			break;
1692 		default:
1693 			return -EINVAL;
1694 		}
1695 	}
1696 
1697 	return 0;
1698 }
1699 
1700 static int
qede_flow_parse_ports(struct flow_rule * rule,struct qede_arfs_tuple * t,struct netlink_ext_ack * extack)1701 qede_flow_parse_ports(struct flow_rule *rule, struct qede_arfs_tuple *t,
1702 		      struct netlink_ext_ack *extack)
1703 {
1704 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1705 		struct flow_match_ports match;
1706 
1707 		flow_rule_match_ports(rule, &match);
1708 		if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
1709 		    (match.key->dst && match.mask->dst != htons(U16_MAX))) {
1710 			NL_SET_ERR_MSG_MOD(extack,
1711 					   "Do not support ports masks");
1712 			return -EINVAL;
1713 		}
1714 
1715 		t->src_port = match.key->src;
1716 		t->dst_port = match.key->dst;
1717 	}
1718 
1719 	return 0;
1720 }
1721 
1722 static int
qede_flow_parse_v6_common(struct flow_rule * rule,struct qede_arfs_tuple * t,struct netlink_ext_ack * extack)1723 qede_flow_parse_v6_common(struct flow_rule *rule,
1724 			  struct qede_arfs_tuple *t,
1725 			  struct netlink_ext_ack *extack)
1726 {
1727 	struct in6_addr zero_addr, addr;
1728 	int err;
1729 
1730 	memset(&zero_addr, 0, sizeof(addr));
1731 	memset(&addr, 0xff, sizeof(addr));
1732 
1733 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
1734 		struct flow_match_ipv6_addrs match;
1735 
1736 		flow_rule_match_ipv6_addrs(rule, &match);
1737 		if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
1738 		     memcmp(&match.mask->src, &addr, sizeof(addr))) ||
1739 		    (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
1740 		     memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
1741 			NL_SET_ERR_MSG_MOD(extack,
1742 					   "Do not support IPv6 address prefix/mask");
1743 			return -EINVAL;
1744 		}
1745 
1746 		memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
1747 		memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
1748 	}
1749 
1750 	err = qede_flow_parse_ports(rule, t, extack);
1751 	if (err)
1752 		return err;
1753 
1754 	return qede_set_v6_tuple_to_profile(t, &zero_addr, extack);
1755 }
1756 
1757 static int
qede_flow_parse_v4_common(struct flow_rule * rule,struct qede_arfs_tuple * t,struct netlink_ext_ack * extack)1758 qede_flow_parse_v4_common(struct flow_rule *rule,
1759 			  struct qede_arfs_tuple *t,
1760 			  struct netlink_ext_ack *extack)
1761 {
1762 	int err;
1763 
1764 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
1765 		struct flow_match_ipv4_addrs match;
1766 
1767 		flow_rule_match_ipv4_addrs(rule, &match);
1768 		if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
1769 		    (match.key->dst && match.mask->dst != htonl(U32_MAX))) {
1770 			NL_SET_ERR_MSG_MOD(extack,
1771 					   "Do not support ipv4 prefix/masks");
1772 			return -EINVAL;
1773 		}
1774 
1775 		t->src_ipv4 = match.key->src;
1776 		t->dst_ipv4 = match.key->dst;
1777 	}
1778 
1779 	err = qede_flow_parse_ports(rule, t, extack);
1780 	if (err)
1781 		return err;
1782 
1783 	return qede_set_v4_tuple_to_profile(t, extack);
1784 }
1785 
1786 static int
qede_flow_parse_tcp_v6(struct flow_rule * rule,struct qede_arfs_tuple * tuple,struct netlink_ext_ack * extack)1787 qede_flow_parse_tcp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
1788 		       struct netlink_ext_ack *extack)
1789 {
1790 	tuple->ip_proto = IPPROTO_TCP;
1791 	tuple->eth_proto = htons(ETH_P_IPV6);
1792 
1793 	return qede_flow_parse_v6_common(rule, tuple, extack);
1794 }
1795 
1796 static int
qede_flow_parse_tcp_v4(struct flow_rule * rule,struct qede_arfs_tuple * tuple,struct netlink_ext_ack * extack)1797 qede_flow_parse_tcp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
1798 		       struct netlink_ext_ack *extack)
1799 {
1800 	tuple->ip_proto = IPPROTO_TCP;
1801 	tuple->eth_proto = htons(ETH_P_IP);
1802 
1803 	return qede_flow_parse_v4_common(rule, tuple, extack);
1804 }
1805 
1806 static int
qede_flow_parse_udp_v6(struct flow_rule * rule,struct qede_arfs_tuple * tuple,struct netlink_ext_ack * extack)1807 qede_flow_parse_udp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
1808 		       struct netlink_ext_ack *extack)
1809 {
1810 	tuple->ip_proto = IPPROTO_UDP;
1811 	tuple->eth_proto = htons(ETH_P_IPV6);
1812 
1813 	return qede_flow_parse_v6_common(rule, tuple, extack);
1814 }
1815 
1816 static int
qede_flow_parse_udp_v4(struct flow_rule * rule,struct qede_arfs_tuple * tuple,struct netlink_ext_ack * extack)1817 qede_flow_parse_udp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
1818 		       struct netlink_ext_ack *extack)
1819 {
1820 	tuple->ip_proto = IPPROTO_UDP;
1821 	tuple->eth_proto = htons(ETH_P_IP);
1822 
1823 	return qede_flow_parse_v4_common(rule, tuple, extack);
1824 }
1825 
1826 static int
qede_parse_flow_attr(__be16 proto,struct flow_rule * rule,struct qede_arfs_tuple * tuple,struct netlink_ext_ack * extack)1827 qede_parse_flow_attr(__be16 proto, struct flow_rule *rule,
1828 		     struct qede_arfs_tuple *tuple,
1829 		     struct netlink_ext_ack *extack)
1830 {
1831 	struct flow_dissector *dissector = rule->match.dissector;
1832 	int rc = -EINVAL;
1833 	u8 ip_proto = 0;
1834 
1835 	memset(tuple, 0, sizeof(*tuple));
1836 
1837 	if (dissector->used_keys &
1838 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1839 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1840 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1841 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1842 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
1843 		NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported key used: 0x%llx",
1844 				       dissector->used_keys);
1845 		return -EOPNOTSUPP;
1846 	}
1847 
1848 	if (flow_rule_match_has_control_flags(rule, extack))
1849 		return -EOPNOTSUPP;
1850 
1851 	if (proto != htons(ETH_P_IP) &&
1852 	    proto != htons(ETH_P_IPV6)) {
1853 		NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported proto=0x%x",
1854 				       proto);
1855 		return -EPROTONOSUPPORT;
1856 	}
1857 
1858 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1859 		struct flow_match_basic match;
1860 
1861 		flow_rule_match_basic(rule, &match);
1862 		ip_proto = match.key->ip_proto;
1863 	}
1864 
1865 	if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
1866 		rc = qede_flow_parse_tcp_v4(rule, tuple, extack);
1867 	else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
1868 		rc = qede_flow_parse_tcp_v6(rule, tuple, extack);
1869 	else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
1870 		rc = qede_flow_parse_udp_v4(rule, tuple, extack);
1871 	else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
1872 		rc = qede_flow_parse_udp_v6(rule, tuple, extack);
1873 	else
1874 		NL_SET_ERR_MSG_MOD(extack, "Invalid protocol request");
1875 
1876 	return rc;
1877 }
1878 
qede_add_tc_flower_fltr(struct qede_dev * edev,__be16 proto,struct flow_cls_offload * f)1879 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
1880 			    struct flow_cls_offload *f)
1881 {
1882 	struct netlink_ext_ack *extack = f->common.extack;
1883 	struct qede_arfs_fltr_node *n;
1884 	struct qede_arfs_tuple t;
1885 	int min_hlen, rc;
1886 
1887 	__qede_lock(edev);
1888 
1889 	if (!edev->arfs) {
1890 		rc = -EPERM;
1891 		goto unlock;
1892 	}
1893 
1894 	/* parse flower attribute and prepare filter */
1895 	rc = qede_parse_flow_attr(proto, f->rule, &t, extack);
1896 	if (rc)
1897 		goto unlock;
1898 
1899 	/* Validate profile mode and number of filters */
1900 	if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
1901 	    edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
1902 		DP_NOTICE(edev,
1903 			  "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
1904 			  t.mode, edev->arfs->mode, edev->arfs->filter_count);
1905 		rc = -EINVAL;
1906 		goto unlock;
1907 	}
1908 
1909 	/* parse tc actions and get the vf_id */
1910 	rc = qede_parse_actions(edev, &f->rule->action, extack);
1911 	if (rc)
1912 		goto unlock;
1913 
1914 	if (qede_flow_find_fltr(edev, &t)) {
1915 		rc = -EEXIST;
1916 		goto unlock;
1917 	}
1918 
1919 	n = kzalloc(sizeof(*n), GFP_KERNEL);
1920 	if (!n) {
1921 		rc = -ENOMEM;
1922 		goto unlock;
1923 	}
1924 
1925 	min_hlen = qede_flow_get_min_header_size(&t);
1926 
1927 	n->data = kzalloc(min_hlen, GFP_KERNEL);
1928 	if (!n->data) {
1929 		kfree(n);
1930 		rc = -ENOMEM;
1931 		goto unlock;
1932 	}
1933 
1934 	memcpy(&n->tuple, &t, sizeof(n->tuple));
1935 
1936 	n->buf_len = min_hlen;
1937 	n->b_is_drop = true;
1938 	n->sw_id = f->cookie;
1939 
1940 	n->tuple.build_hdr(&n->tuple, n->data);
1941 
1942 	rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
1943 	if (rc)
1944 		goto unlock;
1945 
1946 	qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
1947 	rc = qede_poll_arfs_filter_config(edev, n);
1948 
1949 unlock:
1950 	__qede_unlock(edev);
1951 	return rc;
1952 }
1953 
qede_flow_spec_validate(struct qede_dev * edev,struct flow_action * flow_action,struct qede_arfs_tuple * t,__u32 location,struct netlink_ext_ack * extack)1954 static int qede_flow_spec_validate(struct qede_dev *edev,
1955 				   struct flow_action *flow_action,
1956 				   struct qede_arfs_tuple *t,
1957 				   __u32 location,
1958 				   struct netlink_ext_ack *extack)
1959 {
1960 	int err;
1961 
1962 	if (location >= QEDE_RFS_MAX_FLTR) {
1963 		DP_INFO(edev, "Location out-of-bounds\n");
1964 		return -EINVAL;
1965 	}
1966 
1967 	/* Check location isn't already in use */
1968 	if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
1969 		DP_INFO(edev, "Location already in use\n");
1970 		return -EINVAL;
1971 	}
1972 
1973 	/* Check if the filtering-mode could support the filter */
1974 	if (edev->arfs->filter_count &&
1975 	    edev->arfs->mode != t->mode) {
1976 		DP_INFO(edev,
1977 			"flow_spec would require filtering mode %08x, but %08x is configured\n",
1978 			t->mode, edev->arfs->filter_count);
1979 		return -EINVAL;
1980 	}
1981 
1982 	err = qede_parse_actions(edev, flow_action, extack);
1983 	if (err)
1984 		return err;
1985 
1986 	return 0;
1987 }
1988 
qede_flow_spec_to_rule(struct qede_dev * edev,struct qede_arfs_tuple * t,struct ethtool_rx_flow_spec * fs)1989 static int qede_flow_spec_to_rule(struct qede_dev *edev,
1990 				  struct qede_arfs_tuple *t,
1991 				  struct ethtool_rx_flow_spec *fs)
1992 {
1993 	struct ethtool_rx_flow_spec_input input = {};
1994 	struct ethtool_rx_flow_rule *flow;
1995 	struct netlink_ext_ack extack;
1996 	__be16 proto;
1997 	int err;
1998 
1999 	err = qede_flow_spec_validate_unused(edev, fs);
2000 	if (err)
2001 		return err;
2002 
2003 	switch ((fs->flow_type & ~FLOW_EXT)) {
2004 	case TCP_V4_FLOW:
2005 	case UDP_V4_FLOW:
2006 		proto = htons(ETH_P_IP);
2007 		break;
2008 	case TCP_V6_FLOW:
2009 	case UDP_V6_FLOW:
2010 		proto = htons(ETH_P_IPV6);
2011 		break;
2012 	default:
2013 		DP_VERBOSE(edev, NETIF_MSG_IFUP,
2014 			   "Can't support flow of type %08x\n", fs->flow_type);
2015 		return -EOPNOTSUPP;
2016 	}
2017 
2018 	input.fs = fs;
2019 	flow = ethtool_rx_flow_rule_create(&input);
2020 	if (IS_ERR(flow))
2021 		return PTR_ERR(flow);
2022 
2023 	err = qede_parse_flow_attr(proto, flow->rule, t, &extack);
2024 	if (err)
2025 		goto err_out;
2026 
2027 	/* Make sure location is valid and filter isn't already set */
2028 	err = qede_flow_spec_validate(edev, &flow->rule->action, t,
2029 				      fs->location, &extack);
2030 err_out:
2031 	if (extack._msg)
2032 		DP_NOTICE(edev, "%s\n", extack._msg);
2033 	ethtool_rx_flow_rule_destroy(flow);
2034 	return err;
2035 
2036 }
2037 
qede_add_cls_rule(struct qede_dev * edev,struct ethtool_rxnfc * info)2038 int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
2039 {
2040 	struct ethtool_rx_flow_spec *fsp = &info->fs;
2041 	struct qede_arfs_fltr_node *n;
2042 	struct qede_arfs_tuple t;
2043 	int min_hlen, rc;
2044 
2045 	__qede_lock(edev);
2046 
2047 	if (!edev->arfs) {
2048 		rc = -EPERM;
2049 		goto unlock;
2050 	}
2051 
2052 	/* Translate the flow specification into something fittign our DB */
2053 	rc = qede_flow_spec_to_rule(edev, &t, fsp);
2054 	if (rc)
2055 		goto unlock;
2056 
2057 	if (qede_flow_find_fltr(edev, &t)) {
2058 		rc = -EINVAL;
2059 		goto unlock;
2060 	}
2061 
2062 	n = kzalloc(sizeof(*n), GFP_KERNEL);
2063 	if (!n) {
2064 		rc = -ENOMEM;
2065 		goto unlock;
2066 	}
2067 
2068 	min_hlen = qede_flow_get_min_header_size(&t);
2069 	n->data = kzalloc(min_hlen, GFP_KERNEL);
2070 	if (!n->data) {
2071 		kfree(n);
2072 		rc = -ENOMEM;
2073 		goto unlock;
2074 	}
2075 
2076 	n->sw_id = fsp->location;
2077 	set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
2078 	n->buf_len = min_hlen;
2079 
2080 	memcpy(&n->tuple, &t, sizeof(n->tuple));
2081 
2082 	qede_flow_set_destination(edev, n, fsp);
2083 
2084 	/* Build a minimal header according to the flow */
2085 	n->tuple.build_hdr(&n->tuple, n->data);
2086 
2087 	rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
2088 	if (rc)
2089 		goto unlock;
2090 
2091 	qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
2092 	rc = qede_poll_arfs_filter_config(edev, n);
2093 unlock:
2094 	__qede_unlock(edev);
2095 
2096 	return rc;
2097 }
2098