xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
3  *
4  * Copyright (C) 2020 Marvell.
5  */
6 
7 #include <net/ipv6.h>
8 
9 #include "otx2_common.h"
10 
11 #define OTX2_DEFAULT_ACTION	0x1
12 
13 struct otx2_flow {
14 	struct ethtool_rx_flow_spec flow_spec;
15 	struct list_head list;
16 	u32 location;
17 	u16 entry;
18 	bool is_vf;
19 	int vf;
20 };
21 
22 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
23 {
24 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
25 	struct npc_mcam_alloc_entry_req *req;
26 	struct npc_mcam_alloc_entry_rsp *rsp;
27 	int vf_vlan_max_flows;
28 	int i;
29 
30 	mutex_lock(&pfvf->mbox.lock);
31 
32 	req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
33 	if (!req) {
34 		mutex_unlock(&pfvf->mbox.lock);
35 		return -ENOMEM;
36 	}
37 
38 	vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
39 	req->contig = false;
40 	req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
41 
42 	/* Send message to AF */
43 	if (otx2_sync_mbox_msg(&pfvf->mbox)) {
44 		mutex_unlock(&pfvf->mbox.lock);
45 		return -EINVAL;
46 	}
47 
48 	rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
49 	       (&pfvf->mbox.mbox, 0, &req->hdr);
50 
51 	if (rsp->count != req->count) {
52 		netdev_info(pfvf->netdev,
53 			    "Unable to allocate %d MCAM entries, got %d\n",
54 			    req->count, rsp->count);
55 		/* support only ntuples here */
56 		flow_cfg->ntuple_max_flows = rsp->count;
57 		flow_cfg->ntuple_offset = 0;
58 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
59 	} else {
60 		flow_cfg->vf_vlan_offset = 0;
61 		flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
62 						vf_vlan_max_flows;
63 		flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
64 						OTX2_MAX_NTUPLE_FLOWS;
65 		flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
66 						OTX2_MAX_UNICAST_FLOWS;
67 		pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
68 		pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
69 		pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
70 		pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
71 	}
72 
73 	for (i = 0; i < rsp->count; i++)
74 		flow_cfg->entry[i] = rsp->entry_list[i];
75 
76 	pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
77 
78 	mutex_unlock(&pfvf->mbox.lock);
79 
80 	return 0;
81 }
82 
83 int otx2_mcam_flow_init(struct otx2_nic *pf)
84 {
85 	int err;
86 
87 	pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
88 				    GFP_KERNEL);
89 	if (!pf->flow_cfg)
90 		return -ENOMEM;
91 
92 	INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
93 
94 	pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
95 
96 	err = otx2_alloc_mcam_entries(pf);
97 	if (err)
98 		return err;
99 
100 	pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
101 					* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
102 	if (!pf->mac_table)
103 		return -ENOMEM;
104 
105 	return 0;
106 }
107 
108 void otx2_mcam_flow_del(struct otx2_nic *pf)
109 {
110 	otx2_destroy_mcam_flows(pf);
111 }
112 
113 /*  On success adds mcam entry
114  *  On failure enable promisous mode
115  */
116 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
117 {
118 	struct otx2_flow_config *flow_cfg = pf->flow_cfg;
119 	struct npc_install_flow_req *req;
120 	int err, i;
121 
122 	if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
123 		return -ENOMEM;
124 
125 	/* dont have free mcam entries or uc list is greater than alloted */
126 	if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
127 		return -ENOMEM;
128 
129 	mutex_lock(&pf->mbox.lock);
130 	req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
131 	if (!req) {
132 		mutex_unlock(&pf->mbox.lock);
133 		return -ENOMEM;
134 	}
135 
136 	/* unicast offset starts with 32 0..31 for ntuple */
137 	for (i = 0; i <  OTX2_MAX_UNICAST_FLOWS; i++) {
138 		if (pf->mac_table[i].inuse)
139 			continue;
140 		ether_addr_copy(pf->mac_table[i].addr, mac);
141 		pf->mac_table[i].inuse = true;
142 		pf->mac_table[i].mcam_entry =
143 			flow_cfg->entry[i + flow_cfg->unicast_offset];
144 		req->entry =  pf->mac_table[i].mcam_entry;
145 		break;
146 	}
147 
148 	ether_addr_copy(req->packet.dmac, mac);
149 	eth_broadcast_addr((u8 *)&req->mask.dmac);
150 	req->features = BIT_ULL(NPC_DMAC);
151 	req->channel = pf->hw.rx_chan_base;
152 	req->intf = NIX_INTF_RX;
153 	req->op = NIX_RX_ACTION_DEFAULT;
154 	req->set_cntr = 1;
155 
156 	err = otx2_sync_mbox_msg(&pf->mbox);
157 	mutex_unlock(&pf->mbox.lock);
158 
159 	return err;
160 }
161 
162 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
163 {
164 	struct otx2_nic *pf = netdev_priv(netdev);
165 
166 	return otx2_do_add_macfilter(pf, mac);
167 }
168 
169 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
170 				       int *mcam_entry)
171 {
172 	int i;
173 
174 	for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
175 		if (!pf->mac_table[i].inuse)
176 			continue;
177 
178 		if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
179 			*mcam_entry = pf->mac_table[i].mcam_entry;
180 			pf->mac_table[i].inuse = false;
181 			return true;
182 		}
183 	}
184 	return false;
185 }
186 
187 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
188 {
189 	struct otx2_nic *pf = netdev_priv(netdev);
190 	struct npc_delete_flow_req *req;
191 	int err, mcam_entry;
192 
193 	/* check does mcam entry exists for given mac */
194 	if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
195 		return 0;
196 
197 	mutex_lock(&pf->mbox.lock);
198 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
199 	if (!req) {
200 		mutex_unlock(&pf->mbox.lock);
201 		return -ENOMEM;
202 	}
203 	req->entry = mcam_entry;
204 	/* Send message to AF */
205 	err = otx2_sync_mbox_msg(&pf->mbox);
206 	mutex_unlock(&pf->mbox.lock);
207 
208 	return err;
209 }
210 
211 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
212 {
213 	struct otx2_flow *iter;
214 
215 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
216 		if (iter->location == location)
217 			return iter;
218 	}
219 
220 	return NULL;
221 }
222 
223 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
224 {
225 	struct list_head *head = &pfvf->flow_cfg->flow_list;
226 	struct otx2_flow *iter;
227 
228 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
229 		if (iter->location > flow->location)
230 			break;
231 		head = &iter->list;
232 	}
233 
234 	list_add(&flow->list, head);
235 }
236 
237 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
238 		  u32 location)
239 {
240 	struct otx2_flow *iter;
241 
242 	if (location >= pfvf->flow_cfg->ntuple_max_flows)
243 		return -EINVAL;
244 
245 	list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
246 		if (iter->location == location) {
247 			nfc->fs = iter->flow_spec;
248 			return 0;
249 		}
250 	}
251 
252 	return -ENOENT;
253 }
254 
255 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
256 		       u32 *rule_locs)
257 {
258 	u32 location = 0;
259 	int idx = 0;
260 	int err = 0;
261 
262 	nfc->data = pfvf->flow_cfg->ntuple_max_flows;
263 	while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
264 		err = otx2_get_flow(pfvf, nfc, location);
265 		if (!err)
266 			rule_locs[idx++] = location;
267 		location++;
268 	}
269 
270 	return err;
271 }
272 
273 static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
274 				   struct npc_install_flow_req *req,
275 				   u32 flow_type)
276 {
277 	struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
278 	struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
279 	struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
280 	struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
281 	struct flow_msg *pmask = &req->mask;
282 	struct flow_msg *pkt = &req->packet;
283 
284 	switch (flow_type) {
285 	case IP_USER_FLOW:
286 		if (ipv4_usr_mask->ip4src) {
287 			memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
288 			       sizeof(pkt->ip4src));
289 			memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
290 			       sizeof(pmask->ip4src));
291 			req->features |= BIT_ULL(NPC_SIP_IPV4);
292 		}
293 		if (ipv4_usr_mask->ip4dst) {
294 			memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
295 			       sizeof(pkt->ip4dst));
296 			memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
297 			       sizeof(pmask->ip4dst));
298 			req->features |= BIT_ULL(NPC_DIP_IPV4);
299 		}
300 		break;
301 	case TCP_V4_FLOW:
302 	case UDP_V4_FLOW:
303 	case SCTP_V4_FLOW:
304 		if (ipv4_l4_mask->ip4src) {
305 			memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
306 			       sizeof(pkt->ip4src));
307 			memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
308 			       sizeof(pmask->ip4src));
309 			req->features |= BIT_ULL(NPC_SIP_IPV4);
310 		}
311 		if (ipv4_l4_mask->ip4dst) {
312 			memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
313 			       sizeof(pkt->ip4dst));
314 			memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
315 			       sizeof(pmask->ip4dst));
316 			req->features |= BIT_ULL(NPC_DIP_IPV4);
317 		}
318 		if (ipv4_l4_mask->psrc) {
319 			memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
320 			       sizeof(pkt->sport));
321 			memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
322 			       sizeof(pmask->sport));
323 			if (flow_type == UDP_V4_FLOW)
324 				req->features |= BIT_ULL(NPC_SPORT_UDP);
325 			else if (flow_type == TCP_V4_FLOW)
326 				req->features |= BIT_ULL(NPC_SPORT_TCP);
327 			else
328 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
329 		}
330 		if (ipv4_l4_mask->pdst) {
331 			memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
332 			       sizeof(pkt->dport));
333 			memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
334 			       sizeof(pmask->dport));
335 			if (flow_type == UDP_V4_FLOW)
336 				req->features |= BIT_ULL(NPC_DPORT_UDP);
337 			else if (flow_type == TCP_V4_FLOW)
338 				req->features |= BIT_ULL(NPC_DPORT_TCP);
339 			else
340 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
341 		}
342 		break;
343 	default:
344 		break;
345 	}
346 }
347 
348 static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
349 				   struct npc_install_flow_req *req,
350 				   u32 flow_type)
351 {
352 	struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
353 	struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
354 	struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
355 	struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
356 	struct flow_msg *pmask = &req->mask;
357 	struct flow_msg *pkt = &req->packet;
358 
359 	switch (flow_type) {
360 	case IPV6_USER_FLOW:
361 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
362 			memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
363 			       sizeof(pkt->ip6src));
364 			memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
365 			       sizeof(pmask->ip6src));
366 			req->features |= BIT_ULL(NPC_SIP_IPV6);
367 		}
368 		if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
369 			memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
370 			       sizeof(pkt->ip6dst));
371 			memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
372 			       sizeof(pmask->ip6dst));
373 			req->features |= BIT_ULL(NPC_DIP_IPV6);
374 		}
375 		break;
376 	case TCP_V6_FLOW:
377 	case UDP_V6_FLOW:
378 	case SCTP_V6_FLOW:
379 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
380 			memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
381 			       sizeof(pkt->ip6src));
382 			memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
383 			       sizeof(pmask->ip6src));
384 			req->features |= BIT_ULL(NPC_SIP_IPV6);
385 		}
386 		if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
387 			memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
388 			       sizeof(pkt->ip6dst));
389 			memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
390 			       sizeof(pmask->ip6dst));
391 			req->features |= BIT_ULL(NPC_DIP_IPV6);
392 		}
393 		if (ipv6_l4_mask->psrc) {
394 			memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
395 			       sizeof(pkt->sport));
396 			memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
397 			       sizeof(pmask->sport));
398 			if (flow_type == UDP_V6_FLOW)
399 				req->features |= BIT_ULL(NPC_SPORT_UDP);
400 			else if (flow_type == TCP_V6_FLOW)
401 				req->features |= BIT_ULL(NPC_SPORT_TCP);
402 			else
403 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
404 		}
405 		if (ipv6_l4_mask->pdst) {
406 			memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
407 			       sizeof(pkt->dport));
408 			memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
409 			       sizeof(pmask->dport));
410 			if (flow_type == UDP_V6_FLOW)
411 				req->features |= BIT_ULL(NPC_DPORT_UDP);
412 			else if (flow_type == TCP_V6_FLOW)
413 				req->features |= BIT_ULL(NPC_DPORT_TCP);
414 			else
415 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
416 		}
417 		break;
418 	default:
419 		break;
420 	}
421 }
422 
423 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
424 			      struct npc_install_flow_req *req)
425 {
426 	struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
427 	struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
428 	struct flow_msg *pmask = &req->mask;
429 	struct flow_msg *pkt = &req->packet;
430 	u32 flow_type;
431 
432 	flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
433 	switch (flow_type) {
434 	/* bits not set in mask are don't care */
435 	case ETHER_FLOW:
436 		if (!is_zero_ether_addr(eth_mask->h_source)) {
437 			ether_addr_copy(pkt->smac, eth_hdr->h_source);
438 			ether_addr_copy(pmask->smac, eth_mask->h_source);
439 			req->features |= BIT_ULL(NPC_SMAC);
440 		}
441 		if (!is_zero_ether_addr(eth_mask->h_dest)) {
442 			ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
443 			ether_addr_copy(pmask->dmac, eth_mask->h_dest);
444 			req->features |= BIT_ULL(NPC_DMAC);
445 		}
446 		if (eth_mask->h_proto) {
447 			memcpy(&pkt->etype, &eth_hdr->h_proto,
448 			       sizeof(pkt->etype));
449 			memcpy(&pmask->etype, &eth_mask->h_proto,
450 			       sizeof(pmask->etype));
451 			req->features |= BIT_ULL(NPC_ETYPE);
452 		}
453 		break;
454 	case IP_USER_FLOW:
455 	case TCP_V4_FLOW:
456 	case UDP_V4_FLOW:
457 	case SCTP_V4_FLOW:
458 		otx2_prepare_ipv4_flow(fsp, req, flow_type);
459 		break;
460 	case IPV6_USER_FLOW:
461 	case TCP_V6_FLOW:
462 	case UDP_V6_FLOW:
463 	case SCTP_V6_FLOW:
464 		otx2_prepare_ipv6_flow(fsp, req, flow_type);
465 		break;
466 	default:
467 		return -EOPNOTSUPP;
468 	}
469 	if (fsp->flow_type & FLOW_EXT) {
470 		if (fsp->m_ext.vlan_etype)
471 			return -EINVAL;
472 		if (fsp->m_ext.vlan_tci) {
473 			if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
474 				return -EINVAL;
475 			if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
476 				return -EINVAL;
477 
478 			memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
479 			       sizeof(pkt->vlan_tci));
480 			memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
481 			       sizeof(pmask->vlan_tci));
482 			req->features |= BIT_ULL(NPC_OUTER_VID);
483 		}
484 
485 		/* Not Drop/Direct to queue but use action in default entry */
486 		if (fsp->m_ext.data[1] &&
487 		    fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
488 			req->op = NIX_RX_ACTION_DEFAULT;
489 	}
490 
491 	if (fsp->flow_type & FLOW_MAC_EXT &&
492 	    !is_zero_ether_addr(fsp->m_ext.h_dest)) {
493 		ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
494 		ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
495 		req->features |= BIT_ULL(NPC_DMAC);
496 	}
497 
498 	if (!req->features)
499 		return -EOPNOTSUPP;
500 
501 	return 0;
502 }
503 
504 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
505 {
506 	u64 ring_cookie = flow->flow_spec.ring_cookie;
507 	struct npc_install_flow_req *req;
508 	int err, vf = 0;
509 
510 	mutex_lock(&pfvf->mbox.lock);
511 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
512 	if (!req) {
513 		mutex_unlock(&pfvf->mbox.lock);
514 		return -ENOMEM;
515 	}
516 
517 	err = otx2_prepare_flow_request(&flow->flow_spec, req);
518 	if (err) {
519 		/* free the allocated msg above */
520 		otx2_mbox_reset(&pfvf->mbox.mbox, 0);
521 		mutex_unlock(&pfvf->mbox.lock);
522 		return err;
523 	}
524 
525 	req->entry = flow->entry;
526 	req->intf = NIX_INTF_RX;
527 	req->set_cntr = 1;
528 	req->channel = pfvf->hw.rx_chan_base;
529 	if (ring_cookie == RX_CLS_FLOW_DISC) {
530 		req->op = NIX_RX_ACTIONOP_DROP;
531 	} else {
532 		/* change to unicast only if action of default entry is not
533 		 * requested by user
534 		 */
535 		if (req->op != NIX_RX_ACTION_DEFAULT)
536 			req->op = NIX_RX_ACTIONOP_UCAST;
537 		req->index = ethtool_get_flow_spec_ring(ring_cookie);
538 		vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
539 		if (vf > pci_num_vf(pfvf->pdev)) {
540 			mutex_unlock(&pfvf->mbox.lock);
541 			return -EINVAL;
542 		}
543 	}
544 
545 	/* ethtool ring_cookie has (VF + 1) for VF */
546 	if (vf) {
547 		req->vf = vf;
548 		flow->is_vf = true;
549 		flow->vf = vf;
550 	}
551 
552 	/* Send message to AF */
553 	err = otx2_sync_mbox_msg(&pfvf->mbox);
554 	mutex_unlock(&pfvf->mbox.lock);
555 	return err;
556 }
557 
558 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
559 {
560 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
561 	u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
562 	struct otx2_flow *flow;
563 	bool new = false;
564 	int err;
565 
566 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
567 		return -ENOMEM;
568 
569 	if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
570 		return -EINVAL;
571 
572 	if (fsp->location >= flow_cfg->ntuple_max_flows)
573 		return -EINVAL;
574 
575 	flow = otx2_find_flow(pfvf, fsp->location);
576 	if (!flow) {
577 		flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
578 		if (!flow)
579 			return -ENOMEM;
580 		flow->location = fsp->location;
581 		flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
582 						flow->location];
583 		new = true;
584 	}
585 	/* struct copy */
586 	flow->flow_spec = *fsp;
587 
588 	err = otx2_add_flow_msg(pfvf, flow);
589 	if (err) {
590 		if (new)
591 			kfree(flow);
592 		return err;
593 	}
594 
595 	/* add the new flow installed to list */
596 	if (new) {
597 		otx2_add_flow_to_list(pfvf, flow);
598 		flow_cfg->nr_flows++;
599 	}
600 
601 	return 0;
602 }
603 
604 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
605 {
606 	struct npc_delete_flow_req *req;
607 	int err;
608 
609 	mutex_lock(&pfvf->mbox.lock);
610 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
611 	if (!req) {
612 		mutex_unlock(&pfvf->mbox.lock);
613 		return -ENOMEM;
614 	}
615 
616 	req->entry = entry;
617 	if (all)
618 		req->all = 1;
619 
620 	/* Send message to AF */
621 	err = otx2_sync_mbox_msg(&pfvf->mbox);
622 	mutex_unlock(&pfvf->mbox.lock);
623 	return err;
624 }
625 
626 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
627 {
628 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
629 	struct otx2_flow *flow;
630 	int err;
631 
632 	if (location >= flow_cfg->ntuple_max_flows)
633 		return -EINVAL;
634 
635 	flow = otx2_find_flow(pfvf, location);
636 	if (!flow)
637 		return -ENOENT;
638 
639 	err = otx2_remove_flow_msg(pfvf, flow->entry, false);
640 	if (err)
641 		return err;
642 
643 	list_del(&flow->list);
644 	kfree(flow);
645 	flow_cfg->nr_flows--;
646 
647 	return 0;
648 }
649 
650 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
651 {
652 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
653 	struct npc_delete_flow_req *req;
654 	struct otx2_flow *iter, *tmp;
655 	int err;
656 
657 	if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
658 		return 0;
659 
660 	mutex_lock(&pfvf->mbox.lock);
661 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
662 	if (!req) {
663 		mutex_unlock(&pfvf->mbox.lock);
664 		return -ENOMEM;
665 	}
666 
667 	req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
668 	req->end   = flow_cfg->entry[flow_cfg->ntuple_offset +
669 				      flow_cfg->ntuple_max_flows - 1];
670 	err = otx2_sync_mbox_msg(&pfvf->mbox);
671 	mutex_unlock(&pfvf->mbox.lock);
672 
673 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
674 		list_del(&iter->list);
675 		kfree(iter);
676 		flow_cfg->nr_flows--;
677 	}
678 	return err;
679 }
680 
681 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
682 {
683 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
684 	struct npc_mcam_free_entry_req *req;
685 	struct otx2_flow *iter, *tmp;
686 	int err;
687 
688 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
689 		return 0;
690 
691 	/* remove all flows */
692 	err = otx2_remove_flow_msg(pfvf, 0, true);
693 	if (err)
694 		return err;
695 
696 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
697 		list_del(&iter->list);
698 		kfree(iter);
699 		flow_cfg->nr_flows--;
700 	}
701 
702 	mutex_lock(&pfvf->mbox.lock);
703 	req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
704 	if (!req) {
705 		mutex_unlock(&pfvf->mbox.lock);
706 		return -ENOMEM;
707 	}
708 
709 	req->all = 1;
710 	/* Send message to AF to free MCAM entries */
711 	err = otx2_sync_mbox_msg(&pfvf->mbox);
712 	if (err) {
713 		mutex_unlock(&pfvf->mbox.lock);
714 		return err;
715 	}
716 
717 	pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
718 	mutex_unlock(&pfvf->mbox.lock);
719 
720 	return 0;
721 }
722 
723 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
724 {
725 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
726 	struct npc_install_flow_req *req;
727 	int err;
728 
729 	mutex_lock(&pfvf->mbox.lock);
730 	req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
731 	if (!req) {
732 		mutex_unlock(&pfvf->mbox.lock);
733 		return -ENOMEM;
734 	}
735 
736 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
737 	req->intf = NIX_INTF_RX;
738 	ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
739 	eth_broadcast_addr((u8 *)&req->mask.dmac);
740 	req->channel = pfvf->hw.rx_chan_base;
741 	req->op = NIX_RX_ACTION_DEFAULT;
742 	req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
743 	req->vtag0_valid = true;
744 	req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
745 
746 	/* Send message to AF */
747 	err = otx2_sync_mbox_msg(&pfvf->mbox);
748 	mutex_unlock(&pfvf->mbox.lock);
749 	return err;
750 }
751 
752 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
753 {
754 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
755 	struct npc_delete_flow_req *req;
756 	int err;
757 
758 	mutex_lock(&pfvf->mbox.lock);
759 	req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
760 	if (!req) {
761 		mutex_unlock(&pfvf->mbox.lock);
762 		return -ENOMEM;
763 	}
764 
765 	req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
766 	/* Send message to AF */
767 	err = otx2_sync_mbox_msg(&pfvf->mbox);
768 	mutex_unlock(&pfvf->mbox.lock);
769 	return err;
770 }
771 
772 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
773 {
774 	struct nix_vtag_config *req;
775 	struct mbox_msghdr *rsp_hdr;
776 	int err;
777 
778 	/* Dont have enough mcam entries */
779 	if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
780 		return -ENOMEM;
781 
782 	if (enable) {
783 		err = otx2_install_rxvlan_offload_flow(pf);
784 		if (err)
785 			return err;
786 	} else {
787 		err = otx2_delete_rxvlan_offload_flow(pf);
788 		if (err)
789 			return err;
790 	}
791 
792 	mutex_lock(&pf->mbox.lock);
793 	req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
794 	if (!req) {
795 		mutex_unlock(&pf->mbox.lock);
796 		return -ENOMEM;
797 	}
798 
799 	/* config strip, capture and size */
800 	req->vtag_size = VTAGSIZE_T4;
801 	req->cfg_type = 1; /* rx vlan cfg */
802 	req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
803 	req->rx.strip_vtag = enable;
804 	req->rx.capture_vtag = enable;
805 
806 	err = otx2_sync_mbox_msg(&pf->mbox);
807 	if (err) {
808 		mutex_unlock(&pf->mbox.lock);
809 		return err;
810 	}
811 
812 	rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
813 	if (IS_ERR(rsp_hdr)) {
814 		mutex_unlock(&pf->mbox.lock);
815 		return PTR_ERR(rsp_hdr);
816 	}
817 
818 	mutex_unlock(&pf->mbox.lock);
819 	return rsp_hdr->rc;
820 }
821