xref: /linux/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c (revision 576d7fed09c7edbae7600f29a8a3ed6c1ead904f)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7 
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/rhashtable.h>
12 #include <linux/bitfield.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_mirred.h>
17 #include <net/tc_act/tc_vlan.h>
18 #include <net/ipv6.h>
19 
20 #include "cn10k.h"
21 #include "otx2_common.h"
22 #include "qos.h"
23 
24 #define CN10K_MAX_BURST_MANTISSA	0x7FFFULL
25 #define CN10K_MAX_BURST_SIZE		8453888ULL
26 
27 #define CN10K_TLX_BURST_MANTISSA	GENMASK_ULL(43, 29)
28 #define CN10K_TLX_BURST_EXPONENT	GENMASK_ULL(47, 44)
29 
30 #define OTX2_UNSUPP_LSE_DEPTH		GENMASK(6, 4)
31 
32 struct otx2_tc_flow_stats {
33 	u64 bytes;
34 	u64 pkts;
35 	u64 used;
36 };
37 
38 struct otx2_tc_flow {
39 	struct list_head		list;
40 	unsigned long			cookie;
41 	struct rcu_head			rcu;
42 	struct otx2_tc_flow_stats	stats;
43 	spinlock_t			lock; /* lock for stats */
44 	u16				rq;
45 	u16				entry;
46 	u16				leaf_profile;
47 	bool				is_act_police;
48 	u32				prio;
49 	struct npc_install_flow_req	req;
50 	u64				rate;
51 	u32				burst;
52 	bool				is_pps;
53 };
54 
55 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
56 				      u32 *burst_exp, u32 *burst_mantissa)
57 {
58 	int max_burst, max_mantissa;
59 	unsigned int tmp;
60 
61 	if (is_dev_otx2(nic->pdev)) {
62 		max_burst = MAX_BURST_SIZE;
63 		max_mantissa = MAX_BURST_MANTISSA;
64 	} else {
65 		max_burst = CN10K_MAX_BURST_SIZE;
66 		max_mantissa = CN10K_MAX_BURST_MANTISSA;
67 	}
68 
69 	/* Burst is calculated as
70 	 * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
71 	 * Max supported burst size is 130,816 bytes.
72 	 */
73 	burst = min_t(u32, burst, max_burst);
74 	if (burst) {
75 		*burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
76 		tmp = burst - rounddown_pow_of_two(burst);
77 		if (burst < max_mantissa)
78 			*burst_mantissa = tmp * 2;
79 		else
80 			*burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
81 	} else {
82 		*burst_exp = MAX_BURST_EXPONENT;
83 		*burst_mantissa = max_mantissa;
84 	}
85 }
86 
87 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
88 				     u32 *mantissa, u32 *div_exp)
89 {
90 	u64 tmp;
91 
92 	/* Rate calculation by hardware
93 	 *
94 	 * PIR_ADD = ((256 + mantissa) << exp) / 256
95 	 * rate = (2 * PIR_ADD) / ( 1 << div_exp)
96 	 * The resultant rate is in Mbps.
97 	 */
98 
99 	/* 2Mbps to 100Gbps can be expressed with div_exp = 0.
100 	 * Setting this to '0' will ease the calculation of
101 	 * exponent and mantissa.
102 	 */
103 	*div_exp = 0;
104 
105 	if (maxrate) {
106 		*exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
107 		tmp = maxrate - rounddown_pow_of_two(maxrate);
108 		if (maxrate < MAX_RATE_MANTISSA)
109 			*mantissa = tmp * 2;
110 		else
111 			*mantissa = tmp / (1ULL << (*exp - 7));
112 	} else {
113 		/* Instead of disabling rate limiting, set all values to max */
114 		*exp = MAX_RATE_EXPONENT;
115 		*mantissa = MAX_RATE_MANTISSA;
116 	}
117 }
118 
119 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
120 				u64 maxrate, u32 burst)
121 {
122 	u32 burst_exp, burst_mantissa;
123 	u32 exp, mantissa, div_exp;
124 	u64 regval = 0;
125 
126 	/* Get exponent and mantissa values from the desired rate */
127 	otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
128 	otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
129 
130 	if (is_dev_otx2(nic->pdev)) {
131 		regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
132 				FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
133 				FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
134 				FIELD_PREP(TLX_RATE_EXPONENT, exp) |
135 				FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
136 	} else {
137 		regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
138 				FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
139 				FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
140 				FIELD_PREP(TLX_RATE_EXPONENT, exp) |
141 				FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
142 	}
143 
144 	return regval;
145 }
146 
147 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
148 					 u32 burst, u64 maxrate)
149 {
150 	struct otx2_hw *hw = &nic->hw;
151 	struct nix_txschq_config *req;
152 	int txschq, err;
153 
154 	/* All SQs share the same TL4, so pick the first scheduler */
155 	txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
156 
157 	mutex_lock(&nic->mbox.lock);
158 	req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
159 	if (!req) {
160 		mutex_unlock(&nic->mbox.lock);
161 		return -ENOMEM;
162 	}
163 
164 	req->lvl = NIX_TXSCH_LVL_TL4;
165 	req->num_regs = 1;
166 	req->reg[0] = NIX_AF_TL4X_PIR(txschq);
167 	req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
168 
169 	err = otx2_sync_mbox_msg(&nic->mbox);
170 	mutex_unlock(&nic->mbox.lock);
171 	return err;
172 }
173 
174 static int otx2_tc_validate_flow(struct otx2_nic *nic,
175 				 struct flow_action *actions,
176 				 struct netlink_ext_ack *extack)
177 {
178 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
179 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
180 		return -EINVAL;
181 	}
182 
183 	if (!flow_action_has_entries(actions)) {
184 		NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
185 		return -EINVAL;
186 	}
187 
188 	if (!flow_offload_has_one_action(actions)) {
189 		NL_SET_ERR_MSG_MOD(extack,
190 				   "Egress MATCHALL offload supports only 1 policing action");
191 		return -EINVAL;
192 	}
193 	return 0;
194 }
195 
196 static int otx2_policer_validate(const struct flow_action *action,
197 				 const struct flow_action_entry *act,
198 				 struct netlink_ext_ack *extack)
199 {
200 	if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
201 		NL_SET_ERR_MSG_MOD(extack,
202 				   "Offload not supported when exceed action is not drop");
203 		return -EOPNOTSUPP;
204 	}
205 
206 	if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
207 	    act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
208 		NL_SET_ERR_MSG_MOD(extack,
209 				   "Offload not supported when conform action is not pipe or ok");
210 		return -EOPNOTSUPP;
211 	}
212 
213 	if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
214 	    !flow_action_is_last_entry(action, act)) {
215 		NL_SET_ERR_MSG_MOD(extack,
216 				   "Offload not supported when conform action is ok, but action is not last");
217 		return -EOPNOTSUPP;
218 	}
219 
220 	if (act->police.peakrate_bytes_ps ||
221 	    act->police.avrate || act->police.overhead) {
222 		NL_SET_ERR_MSG_MOD(extack,
223 				   "Offload not supported when peakrate/avrate/overhead is configured");
224 		return -EOPNOTSUPP;
225 	}
226 
227 	return 0;
228 }
229 
230 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
231 					   struct tc_cls_matchall_offload *cls)
232 {
233 	struct netlink_ext_ack *extack = cls->common.extack;
234 	struct flow_action *actions = &cls->rule->action;
235 	struct flow_action_entry *entry;
236 	int err;
237 
238 	err = otx2_tc_validate_flow(nic, actions, extack);
239 	if (err)
240 		return err;
241 
242 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
243 		NL_SET_ERR_MSG_MOD(extack,
244 				   "Only one Egress MATCHALL ratelimiter can be offloaded");
245 		return -ENOMEM;
246 	}
247 
248 	entry = &cls->rule->action.entries[0];
249 	switch (entry->id) {
250 	case FLOW_ACTION_POLICE:
251 		err = otx2_policer_validate(&cls->rule->action, entry, extack);
252 		if (err)
253 			return err;
254 
255 		if (entry->police.rate_pkt_ps) {
256 			NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
257 			return -EOPNOTSUPP;
258 		}
259 		err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
260 						    otx2_convert_rate(entry->police.rate_bytes_ps));
261 		if (err)
262 			return err;
263 		nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
264 		break;
265 	default:
266 		NL_SET_ERR_MSG_MOD(extack,
267 				   "Only police action is supported with Egress MATCHALL offload");
268 		return -EOPNOTSUPP;
269 	}
270 
271 	return 0;
272 }
273 
274 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
275 					  struct tc_cls_matchall_offload *cls)
276 {
277 	struct netlink_ext_ack *extack = cls->common.extack;
278 	int err;
279 
280 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
281 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
282 		return -EINVAL;
283 	}
284 
285 	err = otx2_set_matchall_egress_rate(nic, 0, 0);
286 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
287 	return err;
288 }
289 
290 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
291 				     struct otx2_tc_flow *node)
292 {
293 	int rc;
294 
295 	mutex_lock(&nic->mbox.lock);
296 
297 	rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
298 	if (rc) {
299 		mutex_unlock(&nic->mbox.lock);
300 		return rc;
301 	}
302 
303 	rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
304 				     node->burst, node->rate, node->is_pps);
305 	if (rc)
306 		goto free_leaf;
307 
308 	rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
309 	if (rc)
310 		goto free_leaf;
311 
312 	mutex_unlock(&nic->mbox.lock);
313 
314 	return 0;
315 
316 free_leaf:
317 	if (cn10k_free_leaf_profile(nic, node->leaf_profile))
318 		netdev_err(nic->netdev,
319 			   "Unable to free leaf bandwidth profile(%d)\n",
320 			   node->leaf_profile);
321 	mutex_unlock(&nic->mbox.lock);
322 	return rc;
323 }
324 
325 static int otx2_tc_act_set_police(struct otx2_nic *nic,
326 				  struct otx2_tc_flow *node,
327 				  struct flow_cls_offload *f,
328 				  u64 rate, u32 burst, u32 mark,
329 				  struct npc_install_flow_req *req, bool pps)
330 {
331 	struct netlink_ext_ack *extack = f->common.extack;
332 	struct otx2_hw *hw = &nic->hw;
333 	int rq_idx, rc;
334 
335 	rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
336 	if (rq_idx >= hw->rx_queues) {
337 		NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
338 		return -EINVAL;
339 	}
340 
341 	req->match_id = mark & 0xFFFFULL;
342 	req->index = rq_idx;
343 	req->op = NIX_RX_ACTIONOP_UCAST;
344 
345 	node->is_act_police = true;
346 	node->rq = rq_idx;
347 	node->burst = burst;
348 	node->rate = rate;
349 	node->is_pps = pps;
350 
351 	rc = otx2_tc_act_set_hw_police(nic, node);
352 	if (!rc)
353 		set_bit(rq_idx, &nic->rq_bmap);
354 
355 	return rc;
356 }
357 
358 static int otx2_tc_parse_actions(struct otx2_nic *nic,
359 				 struct flow_action *flow_action,
360 				 struct npc_install_flow_req *req,
361 				 struct flow_cls_offload *f,
362 				 struct otx2_tc_flow *node)
363 {
364 	struct netlink_ext_ack *extack = f->common.extack;
365 	struct flow_action_entry *act;
366 	struct net_device *target;
367 	struct otx2_nic *priv;
368 	u32 burst, mark = 0;
369 	u8 nr_police = 0;
370 	bool pps = false;
371 	u64 rate;
372 	int err;
373 	int i;
374 
375 	if (!flow_action_has_entries(flow_action)) {
376 		NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
377 		return -EINVAL;
378 	}
379 
380 	flow_action_for_each(i, act, flow_action) {
381 		switch (act->id) {
382 		case FLOW_ACTION_DROP:
383 			req->op = NIX_RX_ACTIONOP_DROP;
384 			return 0;
385 		case FLOW_ACTION_ACCEPT:
386 			req->op = NIX_RX_ACTION_DEFAULT;
387 			return 0;
388 		case FLOW_ACTION_REDIRECT_INGRESS:
389 			target = act->dev;
390 			priv = netdev_priv(target);
391 			/* npc_install_flow_req doesn't support passing a target pcifunc */
392 			if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
393 				NL_SET_ERR_MSG_MOD(extack,
394 						   "can't redirect to other pf/vf");
395 				return -EOPNOTSUPP;
396 			}
397 			req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
398 
399 			/* if op is already set; avoid overwriting the same */
400 			if (!req->op)
401 				req->op = NIX_RX_ACTION_DEFAULT;
402 			break;
403 
404 		case FLOW_ACTION_VLAN_POP:
405 			req->vtag0_valid = true;
406 			/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
407 			req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
408 			break;
409 		case FLOW_ACTION_POLICE:
410 			/* Ingress ratelimiting is not supported on OcteonTx2 */
411 			if (is_dev_otx2(nic->pdev)) {
412 				NL_SET_ERR_MSG_MOD(extack,
413 					"Ingress policing not supported on this platform");
414 				return -EOPNOTSUPP;
415 			}
416 
417 			err = otx2_policer_validate(flow_action, act, extack);
418 			if (err)
419 				return err;
420 
421 			if (act->police.rate_bytes_ps > 0) {
422 				rate = act->police.rate_bytes_ps * 8;
423 				burst = act->police.burst;
424 			} else if (act->police.rate_pkt_ps > 0) {
425 				/* The algorithm used to calculate rate
426 				 * mantissa, exponent values for a given token
427 				 * rate (token can be byte or packet) requires
428 				 * token rate to be mutiplied by 8.
429 				 */
430 				rate = act->police.rate_pkt_ps * 8;
431 				burst = act->police.burst_pkt;
432 				pps = true;
433 			}
434 			nr_police++;
435 			break;
436 		case FLOW_ACTION_MARK:
437 			mark = act->mark;
438 			break;
439 
440 		case FLOW_ACTION_RX_QUEUE_MAPPING:
441 			req->op = NIX_RX_ACTIONOP_UCAST;
442 			req->index = act->rx_queue;
443 			break;
444 
445 		default:
446 			return -EOPNOTSUPP;
447 		}
448 	}
449 
450 	if (nr_police > 1) {
451 		NL_SET_ERR_MSG_MOD(extack,
452 				   "rate limit police offload requires a single action");
453 		return -EOPNOTSUPP;
454 	}
455 
456 	if (nr_police)
457 		return otx2_tc_act_set_police(nic, node, f, rate, burst,
458 					      mark, req, pps);
459 
460 	return 0;
461 }
462 
463 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
464 				struct flow_msg *flow_mask, struct flow_rule *rule,
465 				struct npc_install_flow_req *req, bool is_inner)
466 {
467 	struct flow_match_vlan match;
468 	u16 vlan_tci, vlan_tci_mask;
469 
470 	if (is_inner)
471 		flow_rule_match_cvlan(rule, &match);
472 	else
473 		flow_rule_match_vlan(rule, &match);
474 
475 	if (!eth_type_vlan(match.key->vlan_tpid)) {
476 		netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
477 			   ntohs(match.key->vlan_tpid));
478 		return -EOPNOTSUPP;
479 	}
480 
481 	if (!match.mask->vlan_id) {
482 		struct flow_action_entry *act;
483 		int i;
484 
485 		flow_action_for_each(i, act, &rule->action) {
486 			if (act->id == FLOW_ACTION_DROP) {
487 				netdev_err(nic->netdev,
488 					   "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
489 					   ntohs(match.key->vlan_tpid), match.key->vlan_id);
490 				return -EOPNOTSUPP;
491 			}
492 		}
493 	}
494 
495 	if (match.mask->vlan_id ||
496 	    match.mask->vlan_dei ||
497 	    match.mask->vlan_priority) {
498 		vlan_tci = match.key->vlan_id |
499 			   match.key->vlan_dei << 12 |
500 			   match.key->vlan_priority << 13;
501 
502 		vlan_tci_mask = match.mask->vlan_id |
503 				match.mask->vlan_dei << 12 |
504 				match.mask->vlan_priority << 13;
505 		if (is_inner) {
506 			flow_spec->vlan_itci = htons(vlan_tci);
507 			flow_mask->vlan_itci = htons(vlan_tci_mask);
508 			req->features |= BIT_ULL(NPC_INNER_VID);
509 		} else {
510 			flow_spec->vlan_tci = htons(vlan_tci);
511 			flow_mask->vlan_tci = htons(vlan_tci_mask);
512 			req->features |= BIT_ULL(NPC_OUTER_VID);
513 		}
514 	}
515 
516 	return 0;
517 }
518 
519 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
520 				struct flow_cls_offload *f,
521 				struct npc_install_flow_req *req)
522 {
523 	struct netlink_ext_ack *extack = f->common.extack;
524 	struct flow_msg *flow_spec = &req->packet;
525 	struct flow_msg *flow_mask = &req->mask;
526 	struct flow_dissector *dissector;
527 	struct flow_rule *rule;
528 	u8 ip_proto = 0;
529 
530 	rule = flow_cls_offload_flow_rule(f);
531 	dissector = rule->match.dissector;
532 
533 	if ((dissector->used_keys &
534 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
535 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
536 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
537 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
538 	      BIT(FLOW_DISSECTOR_KEY_CVLAN) |
539 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
540 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
541 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
542 	      BIT(FLOW_DISSECTOR_KEY_IPSEC) |
543 	      BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
544 	      BIT_ULL(FLOW_DISSECTOR_KEY_IP))))  {
545 		netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
546 			    dissector->used_keys);
547 		return -EOPNOTSUPP;
548 	}
549 
550 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
551 		struct flow_match_basic match;
552 
553 		flow_rule_match_basic(rule, &match);
554 
555 		/* All EtherTypes can be matched, no hw limitation */
556 		flow_spec->etype = match.key->n_proto;
557 		flow_mask->etype = match.mask->n_proto;
558 		req->features |= BIT_ULL(NPC_ETYPE);
559 
560 		if (match.mask->ip_proto &&
561 		    (match.key->ip_proto != IPPROTO_TCP &&
562 		     match.key->ip_proto != IPPROTO_UDP &&
563 		     match.key->ip_proto != IPPROTO_SCTP &&
564 		     match.key->ip_proto != IPPROTO_ICMP &&
565 		     match.key->ip_proto != IPPROTO_ESP &&
566 		     match.key->ip_proto != IPPROTO_AH &&
567 		     match.key->ip_proto != IPPROTO_ICMPV6)) {
568 			netdev_info(nic->netdev,
569 				    "ip_proto=0x%x not supported\n",
570 				    match.key->ip_proto);
571 			return -EOPNOTSUPP;
572 		}
573 		if (match.mask->ip_proto)
574 			ip_proto = match.key->ip_proto;
575 
576 		if (ip_proto == IPPROTO_UDP)
577 			req->features |= BIT_ULL(NPC_IPPROTO_UDP);
578 		else if (ip_proto == IPPROTO_TCP)
579 			req->features |= BIT_ULL(NPC_IPPROTO_TCP);
580 		else if (ip_proto == IPPROTO_SCTP)
581 			req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
582 		else if (ip_proto == IPPROTO_ICMP)
583 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
584 		else if (ip_proto == IPPROTO_ICMPV6)
585 			req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
586 		else if (ip_proto == IPPROTO_ESP)
587 			req->features |= BIT_ULL(NPC_IPPROTO_ESP);
588 		else if (ip_proto == IPPROTO_AH)
589 			req->features |= BIT_ULL(NPC_IPPROTO_AH);
590 	}
591 
592 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
593 		struct flow_match_control match;
594 
595 		flow_rule_match_control(rule, &match);
596 		if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
597 			NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
598 			return -EOPNOTSUPP;
599 		}
600 
601 		if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
602 			if (ntohs(flow_spec->etype) == ETH_P_IP) {
603 				flow_spec->ip_flag = IPV4_FLAG_MORE;
604 				flow_mask->ip_flag = IPV4_FLAG_MORE;
605 				req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
606 			} else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
607 				flow_spec->next_header = IPPROTO_FRAGMENT;
608 				flow_mask->next_header = 0xff;
609 				req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
610 			} else {
611 				NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
612 				return -EOPNOTSUPP;
613 			}
614 		}
615 	}
616 
617 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
618 		struct flow_match_eth_addrs match;
619 
620 		flow_rule_match_eth_addrs(rule, &match);
621 		if (!is_zero_ether_addr(match.mask->src)) {
622 			NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
623 			return -EOPNOTSUPP;
624 		}
625 
626 		if (!is_zero_ether_addr(match.mask->dst)) {
627 			ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
628 			ether_addr_copy(flow_mask->dmac,
629 					(u8 *)&match.mask->dst);
630 			req->features |= BIT_ULL(NPC_DMAC);
631 		}
632 	}
633 
634 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) {
635 		struct flow_match_ipsec match;
636 
637 		flow_rule_match_ipsec(rule, &match);
638 		if (!match.mask->spi) {
639 			NL_SET_ERR_MSG_MOD(extack, "spi index not specified");
640 			return -EOPNOTSUPP;
641 		}
642 		if (ip_proto != IPPROTO_ESP &&
643 		    ip_proto != IPPROTO_AH) {
644 			NL_SET_ERR_MSG_MOD(extack,
645 					   "SPI index is valid only for ESP/AH proto");
646 			return -EOPNOTSUPP;
647 		}
648 
649 		flow_spec->spi = match.key->spi;
650 		flow_mask->spi = match.mask->spi;
651 		req->features |= BIT_ULL(NPC_IPSEC_SPI);
652 	}
653 
654 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
655 		struct flow_match_ip match;
656 
657 		flow_rule_match_ip(rule, &match);
658 		if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
659 		    match.mask->tos) {
660 			NL_SET_ERR_MSG_MOD(extack, "tos not supported");
661 			return -EOPNOTSUPP;
662 		}
663 		if (match.mask->ttl) {
664 			NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
665 			return -EOPNOTSUPP;
666 		}
667 		flow_spec->tos = match.key->tos;
668 		flow_mask->tos = match.mask->tos;
669 		req->features |= BIT_ULL(NPC_TOS);
670 	}
671 
672 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
673 		int ret;
674 
675 		ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
676 		if (ret)
677 			return ret;
678 	}
679 
680 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
681 		int ret;
682 
683 		ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
684 		if (ret)
685 			return ret;
686 	}
687 
688 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
689 		struct flow_match_ipv4_addrs match;
690 
691 		flow_rule_match_ipv4_addrs(rule, &match);
692 
693 		flow_spec->ip4dst = match.key->dst;
694 		flow_mask->ip4dst = match.mask->dst;
695 		req->features |= BIT_ULL(NPC_DIP_IPV4);
696 
697 		flow_spec->ip4src = match.key->src;
698 		flow_mask->ip4src = match.mask->src;
699 		req->features |= BIT_ULL(NPC_SIP_IPV4);
700 	} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
701 		struct flow_match_ipv6_addrs match;
702 
703 		flow_rule_match_ipv6_addrs(rule, &match);
704 
705 		if (ipv6_addr_loopback(&match.key->dst) ||
706 		    ipv6_addr_loopback(&match.key->src)) {
707 			NL_SET_ERR_MSG_MOD(extack,
708 					   "Flow matching IPv6 loopback addr not supported");
709 			return -EOPNOTSUPP;
710 		}
711 
712 		if (!ipv6_addr_any(&match.mask->dst)) {
713 			memcpy(&flow_spec->ip6dst,
714 			       (struct in6_addr *)&match.key->dst,
715 			       sizeof(flow_spec->ip6dst));
716 			memcpy(&flow_mask->ip6dst,
717 			       (struct in6_addr *)&match.mask->dst,
718 			       sizeof(flow_spec->ip6dst));
719 			req->features |= BIT_ULL(NPC_DIP_IPV6);
720 		}
721 
722 		if (!ipv6_addr_any(&match.mask->src)) {
723 			memcpy(&flow_spec->ip6src,
724 			       (struct in6_addr *)&match.key->src,
725 			       sizeof(flow_spec->ip6src));
726 			memcpy(&flow_mask->ip6src,
727 			       (struct in6_addr *)&match.mask->src,
728 			       sizeof(flow_spec->ip6src));
729 			req->features |= BIT_ULL(NPC_SIP_IPV6);
730 		}
731 	}
732 
733 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
734 		struct flow_match_ports match;
735 
736 		flow_rule_match_ports(rule, &match);
737 
738 		flow_spec->dport = match.key->dst;
739 		flow_mask->dport = match.mask->dst;
740 
741 		if (flow_mask->dport) {
742 			if (ip_proto == IPPROTO_UDP)
743 				req->features |= BIT_ULL(NPC_DPORT_UDP);
744 			else if (ip_proto == IPPROTO_TCP)
745 				req->features |= BIT_ULL(NPC_DPORT_TCP);
746 			else if (ip_proto == IPPROTO_SCTP)
747 				req->features |= BIT_ULL(NPC_DPORT_SCTP);
748 		}
749 
750 		flow_spec->sport = match.key->src;
751 		flow_mask->sport = match.mask->src;
752 
753 		if (flow_mask->sport) {
754 			if (ip_proto == IPPROTO_UDP)
755 				req->features |= BIT_ULL(NPC_SPORT_UDP);
756 			else if (ip_proto == IPPROTO_TCP)
757 				req->features |= BIT_ULL(NPC_SPORT_TCP);
758 			else if (ip_proto == IPPROTO_SCTP)
759 				req->features |= BIT_ULL(NPC_SPORT_SCTP);
760 		}
761 	}
762 
763 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
764 		struct flow_match_mpls match;
765 		u8 bit;
766 
767 		flow_rule_match_mpls(rule, &match);
768 
769 		if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
770 			NL_SET_ERR_MSG_MOD(extack,
771 					   "unsupported LSE depth for MPLS match offload");
772 			return -EOPNOTSUPP;
773 		}
774 
775 		for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses,
776 				 FLOW_DIS_MPLS_MAX)  {
777 			/* check if any of the fields LABEL,TC,BOS are set */
778 			if (*((u32 *)&match.mask->ls[bit]) &
779 			    OTX2_FLOWER_MASK_MPLS_NON_TTL) {
780 				/* Hardware will capture 4 byte MPLS header into
781 				 * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL.
782 				 * Derive the associated NPC key based on header
783 				 * index and offset.
784 				 */
785 
786 				req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS +
787 							 2 * bit);
788 				flow_spec->mpls_lse[bit] =
789 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
790 						   match.key->ls[bit].mpls_label) |
791 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
792 						   match.key->ls[bit].mpls_tc) |
793 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
794 						   match.key->ls[bit].mpls_bos);
795 
796 				flow_mask->mpls_lse[bit] =
797 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
798 						   match.mask->ls[bit].mpls_label) |
799 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
800 						   match.mask->ls[bit].mpls_tc) |
801 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
802 						   match.mask->ls[bit].mpls_bos);
803 			}
804 
805 			if (match.mask->ls[bit].mpls_ttl) {
806 				req->features |= BIT_ULL(NPC_MPLS1_TTL +
807 							 2 * bit);
808 				flow_spec->mpls_lse[bit] |=
809 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
810 						   match.key->ls[bit].mpls_ttl);
811 				flow_mask->mpls_lse[bit] |=
812 					FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
813 						   match.mask->ls[bit].mpls_ttl);
814 			}
815 		}
816 	}
817 
818 	return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
819 }
820 
821 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
822 {
823 	struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
824 	struct otx2_tc_flow *iter, *tmp;
825 
826 	if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
827 		return;
828 
829 	list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
830 		list_del(&iter->list);
831 		kfree(iter);
832 		flow_cfg->nr_flows--;
833 	}
834 }
835 
836 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
837 							unsigned long cookie)
838 {
839 	struct otx2_tc_flow *tmp;
840 
841 	list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
842 		if (tmp->cookie == cookie)
843 			return tmp;
844 	}
845 
846 	return NULL;
847 }
848 
849 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
850 						       int index)
851 {
852 	struct otx2_tc_flow *tmp;
853 	int i = 0;
854 
855 	list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
856 		if (i == index)
857 			return tmp;
858 		i++;
859 	}
860 
861 	return NULL;
862 }
863 
864 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
865 				       struct otx2_tc_flow *node)
866 {
867 	struct list_head *pos, *n;
868 	struct otx2_tc_flow *tmp;
869 
870 	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
871 		tmp = list_entry(pos, struct otx2_tc_flow, list);
872 		if (node == tmp) {
873 			list_del(&node->list);
874 			return;
875 		}
876 	}
877 }
878 
879 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
880 				    struct otx2_tc_flow *node)
881 {
882 	struct list_head *pos, *n;
883 	struct otx2_tc_flow *tmp;
884 	int index = 0;
885 
886 	/* If the flow list is empty then add the new node */
887 	if (list_empty(&flow_cfg->flow_list_tc)) {
888 		list_add(&node->list, &flow_cfg->flow_list_tc);
889 		return index;
890 	}
891 
892 	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
893 		tmp = list_entry(pos, struct otx2_tc_flow, list);
894 		if (node->prio < tmp->prio)
895 			break;
896 		index++;
897 	}
898 
899 	list_add(&node->list, pos->prev);
900 	return index;
901 }
902 
903 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
904 {
905 	struct npc_install_flow_req *tmp_req;
906 	int err;
907 
908 	mutex_lock(&nic->mbox.lock);
909 	tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
910 	if (!tmp_req) {
911 		mutex_unlock(&nic->mbox.lock);
912 		return -ENOMEM;
913 	}
914 
915 	memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
916 	/* Send message to AF */
917 	err = otx2_sync_mbox_msg(&nic->mbox);
918 	if (err) {
919 		netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
920 			   req->entry);
921 		mutex_unlock(&nic->mbox.lock);
922 		return -EFAULT;
923 	}
924 
925 	mutex_unlock(&nic->mbox.lock);
926 	return 0;
927 }
928 
929 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
930 {
931 	struct npc_delete_flow_rsp *rsp;
932 	struct npc_delete_flow_req *req;
933 	int err;
934 
935 	mutex_lock(&nic->mbox.lock);
936 	req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
937 	if (!req) {
938 		mutex_unlock(&nic->mbox.lock);
939 		return -ENOMEM;
940 	}
941 
942 	req->entry = entry;
943 
944 	/* Send message to AF */
945 	err = otx2_sync_mbox_msg(&nic->mbox);
946 	if (err) {
947 		netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
948 			   entry);
949 		mutex_unlock(&nic->mbox.lock);
950 		return -EFAULT;
951 	}
952 
953 	if (cntr_val) {
954 		rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
955 								      0, &req->hdr);
956 		if (IS_ERR(rsp)) {
957 			netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
958 				   entry);
959 			mutex_unlock(&nic->mbox.lock);
960 			return -EFAULT;
961 		}
962 
963 		*cntr_val = rsp->cntr_val;
964 	}
965 
966 	mutex_unlock(&nic->mbox.lock);
967 	return 0;
968 }
969 
970 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
971 					     struct otx2_flow_config *flow_cfg,
972 					     struct otx2_tc_flow *node)
973 {
974 	struct list_head *pos, *n;
975 	struct otx2_tc_flow *tmp;
976 	int i = 0, index = 0;
977 	u16 cntr_val = 0;
978 
979 	/* Find and delete the entry from the list and re-install
980 	 * all the entries from beginning to the index of the
981 	 * deleted entry to higher mcam indexes.
982 	 */
983 	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
984 		tmp = list_entry(pos, struct otx2_tc_flow, list);
985 		if (node == tmp) {
986 			list_del(&tmp->list);
987 			break;
988 		}
989 
990 		otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
991 		tmp->entry++;
992 		tmp->req.entry = tmp->entry;
993 		tmp->req.cntr_val = cntr_val;
994 		index++;
995 	}
996 
997 	list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
998 		if (i == index)
999 			break;
1000 
1001 		tmp = list_entry(pos, struct otx2_tc_flow, list);
1002 		otx2_add_mcam_flow_entry(nic, &tmp->req);
1003 		i++;
1004 	}
1005 
1006 	return 0;
1007 }
1008 
1009 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
1010 					     struct otx2_flow_config *flow_cfg,
1011 					     struct otx2_tc_flow *node)
1012 {
1013 	int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
1014 	struct otx2_tc_flow *tmp;
1015 	int list_idx, i;
1016 	u16 cntr_val = 0;
1017 
1018 	/* Find the index of the entry(list_idx) whose priority
1019 	 * is greater than the new entry and re-install all
1020 	 * the entries from beginning to list_idx to higher
1021 	 * mcam indexes.
1022 	 */
1023 	list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
1024 	for (i = 0; i < list_idx; i++) {
1025 		tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
1026 		if (!tmp)
1027 			return -ENOMEM;
1028 
1029 		otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
1030 		tmp->entry = flow_cfg->flow_ent[mcam_idx];
1031 		tmp->req.entry = tmp->entry;
1032 		tmp->req.cntr_val = cntr_val;
1033 		otx2_add_mcam_flow_entry(nic, &tmp->req);
1034 		mcam_idx++;
1035 	}
1036 
1037 	return mcam_idx;
1038 }
1039 
1040 static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
1041 				     struct otx2_flow_config *flow_cfg,
1042 				     struct otx2_tc_flow *node,
1043 				     bool add_req)
1044 {
1045 	if (add_req)
1046 		return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
1047 
1048 	return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
1049 }
1050 
1051 static int otx2_tc_del_flow(struct otx2_nic *nic,
1052 			    struct flow_cls_offload *tc_flow_cmd)
1053 {
1054 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1055 	struct otx2_tc_flow *flow_node;
1056 	int err;
1057 
1058 	flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1059 	if (!flow_node) {
1060 		netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
1061 			   tc_flow_cmd->cookie);
1062 		return -EINVAL;
1063 	}
1064 
1065 	if (flow_node->is_act_police) {
1066 		__clear_bit(flow_node->rq, &nic->rq_bmap);
1067 
1068 		if (nic->flags & OTX2_FLAG_INTF_DOWN)
1069 			goto free_mcam_flow;
1070 
1071 		mutex_lock(&nic->mbox.lock);
1072 
1073 		err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
1074 						 flow_node->leaf_profile, false);
1075 		if (err)
1076 			netdev_err(nic->netdev,
1077 				   "Unmapping RQ %d & profile %d failed\n",
1078 				   flow_node->rq, flow_node->leaf_profile);
1079 
1080 		err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
1081 		if (err)
1082 			netdev_err(nic->netdev,
1083 				   "Unable to free leaf bandwidth profile(%d)\n",
1084 				   flow_node->leaf_profile);
1085 
1086 		mutex_unlock(&nic->mbox.lock);
1087 	}
1088 
1089 free_mcam_flow:
1090 	otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
1091 	otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
1092 	kfree_rcu(flow_node, rcu);
1093 	flow_cfg->nr_flows--;
1094 	return 0;
1095 }
1096 
1097 static int otx2_tc_add_flow(struct otx2_nic *nic,
1098 			    struct flow_cls_offload *tc_flow_cmd)
1099 {
1100 	struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
1101 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1102 	struct otx2_tc_flow *new_node, *old_node;
1103 	struct npc_install_flow_req *req, dummy;
1104 	int rc, err, mcam_idx;
1105 
1106 	if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
1107 		return -ENOMEM;
1108 
1109 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1110 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1111 		return -EINVAL;
1112 	}
1113 
1114 	if (flow_cfg->nr_flows == flow_cfg->max_flows) {
1115 		NL_SET_ERR_MSG_MOD(extack,
1116 				   "Free MCAM entry not available to add the flow");
1117 		return -ENOMEM;
1118 	}
1119 
1120 	/* allocate memory for the new flow and it's node */
1121 	new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1122 	if (!new_node)
1123 		return -ENOMEM;
1124 	spin_lock_init(&new_node->lock);
1125 	new_node->cookie = tc_flow_cmd->cookie;
1126 	new_node->prio = tc_flow_cmd->common.prio;
1127 
1128 	memset(&dummy, 0, sizeof(struct npc_install_flow_req));
1129 
1130 	rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
1131 	if (rc) {
1132 		kfree_rcu(new_node, rcu);
1133 		return rc;
1134 	}
1135 
1136 	/* If a flow exists with the same cookie, delete it */
1137 	old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1138 	if (old_node)
1139 		otx2_tc_del_flow(nic, tc_flow_cmd);
1140 
1141 	mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
1142 	mutex_lock(&nic->mbox.lock);
1143 	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1144 	if (!req) {
1145 		mutex_unlock(&nic->mbox.lock);
1146 		rc = -ENOMEM;
1147 		goto free_leaf;
1148 	}
1149 
1150 	memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
1151 	memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
1152 	req->channel = nic->hw.rx_chan_base;
1153 	req->entry = flow_cfg->flow_ent[mcam_idx];
1154 	req->intf = NIX_INTF_RX;
1155 	req->set_cntr = 1;
1156 	new_node->entry = req->entry;
1157 
1158 	/* Send message to AF */
1159 	rc = otx2_sync_mbox_msg(&nic->mbox);
1160 	if (rc) {
1161 		NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
1162 		mutex_unlock(&nic->mbox.lock);
1163 		goto free_leaf;
1164 	}
1165 
1166 	mutex_unlock(&nic->mbox.lock);
1167 	memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
1168 
1169 	flow_cfg->nr_flows++;
1170 	return 0;
1171 
1172 free_leaf:
1173 	otx2_tc_del_from_flow_list(flow_cfg, new_node);
1174 	kfree_rcu(new_node, rcu);
1175 	if (new_node->is_act_police) {
1176 		mutex_lock(&nic->mbox.lock);
1177 
1178 		err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
1179 						 new_node->leaf_profile, false);
1180 		if (err)
1181 			netdev_err(nic->netdev,
1182 				   "Unmapping RQ %d & profile %d failed\n",
1183 				   new_node->rq, new_node->leaf_profile);
1184 		err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
1185 		if (err)
1186 			netdev_err(nic->netdev,
1187 				   "Unable to free leaf bandwidth profile(%d)\n",
1188 				   new_node->leaf_profile);
1189 
1190 		__clear_bit(new_node->rq, &nic->rq_bmap);
1191 
1192 		mutex_unlock(&nic->mbox.lock);
1193 	}
1194 
1195 	return rc;
1196 }
1197 
1198 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
1199 				  struct flow_cls_offload *tc_flow_cmd)
1200 {
1201 	struct npc_mcam_get_stats_req *req;
1202 	struct npc_mcam_get_stats_rsp *rsp;
1203 	struct otx2_tc_flow_stats *stats;
1204 	struct otx2_tc_flow *flow_node;
1205 	int err;
1206 
1207 	flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
1208 	if (!flow_node) {
1209 		netdev_info(nic->netdev, "tc flow not found for cookie %lx",
1210 			    tc_flow_cmd->cookie);
1211 		return -EINVAL;
1212 	}
1213 
1214 	mutex_lock(&nic->mbox.lock);
1215 
1216 	req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
1217 	if (!req) {
1218 		mutex_unlock(&nic->mbox.lock);
1219 		return -ENOMEM;
1220 	}
1221 
1222 	req->entry = flow_node->entry;
1223 
1224 	err = otx2_sync_mbox_msg(&nic->mbox);
1225 	if (err) {
1226 		netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
1227 			   req->entry);
1228 		mutex_unlock(&nic->mbox.lock);
1229 		return -EFAULT;
1230 	}
1231 
1232 	rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
1233 		(&nic->mbox.mbox, 0, &req->hdr);
1234 	if (IS_ERR(rsp)) {
1235 		mutex_unlock(&nic->mbox.lock);
1236 		return PTR_ERR(rsp);
1237 	}
1238 
1239 	mutex_unlock(&nic->mbox.lock);
1240 
1241 	if (!rsp->stat_ena)
1242 		return -EINVAL;
1243 
1244 	stats = &flow_node->stats;
1245 
1246 	spin_lock(&flow_node->lock);
1247 	flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
1248 			  FLOW_ACTION_HW_STATS_IMMEDIATE);
1249 	stats->pkts = rsp->stat;
1250 	spin_unlock(&flow_node->lock);
1251 
1252 	return 0;
1253 }
1254 
1255 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
1256 				    struct flow_cls_offload *cls_flower)
1257 {
1258 	switch (cls_flower->command) {
1259 	case FLOW_CLS_REPLACE:
1260 		return otx2_tc_add_flow(nic, cls_flower);
1261 	case FLOW_CLS_DESTROY:
1262 		return otx2_tc_del_flow(nic, cls_flower);
1263 	case FLOW_CLS_STATS:
1264 		return otx2_tc_get_flow_stats(nic, cls_flower);
1265 	default:
1266 		return -EOPNOTSUPP;
1267 	}
1268 }
1269 
1270 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
1271 					    struct tc_cls_matchall_offload *cls)
1272 {
1273 	struct netlink_ext_ack *extack = cls->common.extack;
1274 	struct flow_action *actions = &cls->rule->action;
1275 	struct flow_action_entry *entry;
1276 	u64 rate;
1277 	int err;
1278 
1279 	err = otx2_tc_validate_flow(nic, actions, extack);
1280 	if (err)
1281 		return err;
1282 
1283 	if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
1284 		NL_SET_ERR_MSG_MOD(extack,
1285 				   "Only one ingress MATCHALL ratelimitter can be offloaded");
1286 		return -ENOMEM;
1287 	}
1288 
1289 	entry = &cls->rule->action.entries[0];
1290 	switch (entry->id) {
1291 	case FLOW_ACTION_POLICE:
1292 		/* Ingress ratelimiting is not supported on OcteonTx2 */
1293 		if (is_dev_otx2(nic->pdev)) {
1294 			NL_SET_ERR_MSG_MOD(extack,
1295 					   "Ingress policing not supported on this platform");
1296 			return -EOPNOTSUPP;
1297 		}
1298 
1299 		err = cn10k_alloc_matchall_ipolicer(nic);
1300 		if (err)
1301 			return err;
1302 
1303 		/* Convert to bits per second */
1304 		rate = entry->police.rate_bytes_ps * 8;
1305 		err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
1306 		if (err)
1307 			return err;
1308 		nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1309 		break;
1310 	default:
1311 		NL_SET_ERR_MSG_MOD(extack,
1312 				   "Only police action supported with Ingress MATCHALL offload");
1313 		return -EOPNOTSUPP;
1314 	}
1315 
1316 	return 0;
1317 }
1318 
1319 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
1320 					   struct tc_cls_matchall_offload *cls)
1321 {
1322 	struct netlink_ext_ack *extack = cls->common.extack;
1323 	int err;
1324 
1325 	if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1326 		NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1327 		return -EINVAL;
1328 	}
1329 
1330 	err = cn10k_free_matchall_ipolicer(nic);
1331 	nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1332 	return err;
1333 }
1334 
1335 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
1336 					  struct tc_cls_matchall_offload *cls_matchall)
1337 {
1338 	switch (cls_matchall->command) {
1339 	case TC_CLSMATCHALL_REPLACE:
1340 		return otx2_tc_ingress_matchall_install(nic, cls_matchall);
1341 	case TC_CLSMATCHALL_DESTROY:
1342 		return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
1343 	case TC_CLSMATCHALL_STATS:
1344 	default:
1345 		break;
1346 	}
1347 
1348 	return -EOPNOTSUPP;
1349 }
1350 
1351 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
1352 					  void *type_data, void *cb_priv)
1353 {
1354 	struct otx2_nic *nic = cb_priv;
1355 	bool ntuple;
1356 
1357 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1358 		return -EOPNOTSUPP;
1359 
1360 	ntuple = nic->netdev->features & NETIF_F_NTUPLE;
1361 	switch (type) {
1362 	case TC_SETUP_CLSFLOWER:
1363 		if (ntuple) {
1364 			netdev_warn(nic->netdev,
1365 				    "Can't install TC flower offload rule when NTUPLE is active");
1366 			return -EOPNOTSUPP;
1367 		}
1368 
1369 		return otx2_setup_tc_cls_flower(nic, type_data);
1370 	case TC_SETUP_CLSMATCHALL:
1371 		return otx2_setup_tc_ingress_matchall(nic, type_data);
1372 	default:
1373 		break;
1374 	}
1375 
1376 	return -EOPNOTSUPP;
1377 }
1378 
1379 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1380 					 struct tc_cls_matchall_offload *cls_matchall)
1381 {
1382 	switch (cls_matchall->command) {
1383 	case TC_CLSMATCHALL_REPLACE:
1384 		return otx2_tc_egress_matchall_install(nic, cls_matchall);
1385 	case TC_CLSMATCHALL_DESTROY:
1386 		return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1387 	case TC_CLSMATCHALL_STATS:
1388 	default:
1389 		break;
1390 	}
1391 
1392 	return -EOPNOTSUPP;
1393 }
1394 
1395 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1396 					 void *type_data, void *cb_priv)
1397 {
1398 	struct otx2_nic *nic = cb_priv;
1399 
1400 	if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1401 		return -EOPNOTSUPP;
1402 
1403 	switch (type) {
1404 	case TC_SETUP_CLSMATCHALL:
1405 		return otx2_setup_tc_egress_matchall(nic, type_data);
1406 	default:
1407 		break;
1408 	}
1409 
1410 	return -EOPNOTSUPP;
1411 }
1412 
1413 static LIST_HEAD(otx2_block_cb_list);
1414 
1415 static int otx2_setup_tc_block(struct net_device *netdev,
1416 			       struct flow_block_offload *f)
1417 {
1418 	struct otx2_nic *nic = netdev_priv(netdev);
1419 	flow_setup_cb_t *cb;
1420 	bool ingress;
1421 
1422 	if (f->block_shared)
1423 		return -EOPNOTSUPP;
1424 
1425 	if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1426 		cb = otx2_setup_tc_block_ingress_cb;
1427 		ingress = true;
1428 	} else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1429 		cb = otx2_setup_tc_block_egress_cb;
1430 		ingress = false;
1431 	} else {
1432 		return -EOPNOTSUPP;
1433 	}
1434 
1435 	return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1436 					  nic, nic, ingress);
1437 }
1438 
1439 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1440 		  void *type_data)
1441 {
1442 	switch (type) {
1443 	case TC_SETUP_BLOCK:
1444 		return otx2_setup_tc_block(netdev, type_data);
1445 	case TC_SETUP_QDISC_HTB:
1446 		return otx2_setup_tc_htb(netdev, type_data);
1447 	default:
1448 		return -EOPNOTSUPP;
1449 	}
1450 }
1451 EXPORT_SYMBOL(otx2_setup_tc);
1452 
1453 int otx2_init_tc(struct otx2_nic *nic)
1454 {
1455 	/* Exclude receive queue 0 being used for police action */
1456 	set_bit(0, &nic->rq_bmap);
1457 
1458 	if (!nic->flow_cfg) {
1459 		netdev_err(nic->netdev,
1460 			   "Can't init TC, nic->flow_cfg is not setup\n");
1461 		return -EINVAL;
1462 	}
1463 
1464 	return 0;
1465 }
1466 EXPORT_SYMBOL(otx2_init_tc);
1467 
1468 void otx2_shutdown_tc(struct otx2_nic *nic)
1469 {
1470 	otx2_destroy_tc_flow_list(nic);
1471 }
1472 EXPORT_SYMBOL(otx2_shutdown_tc);
1473 
1474 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
1475 					struct otx2_tc_flow *node)
1476 {
1477 	struct npc_install_flow_req *req;
1478 
1479 	if (otx2_tc_act_set_hw_police(nic, node))
1480 		return;
1481 
1482 	mutex_lock(&nic->mbox.lock);
1483 
1484 	req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1485 	if (!req)
1486 		goto err;
1487 
1488 	memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
1489 
1490 	if (otx2_sync_mbox_msg(&nic->mbox))
1491 		netdev_err(nic->netdev,
1492 			   "Failed to install MCAM flow entry for ingress rule");
1493 err:
1494 	mutex_unlock(&nic->mbox.lock);
1495 }
1496 
1497 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
1498 {
1499 	struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1500 	struct otx2_tc_flow *node;
1501 
1502 	/* If any ingress policer rules exist for the interface then
1503 	 * apply those rules. Ingress policer rules depend on bandwidth
1504 	 * profiles linked to the receive queues. Since no receive queues
1505 	 * exist when interface is down, ingress policer rules are stored
1506 	 * and configured in hardware after all receive queues are allocated
1507 	 * in otx2_open.
1508 	 */
1509 	list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
1510 		if (node->is_act_police)
1511 			otx2_tc_config_ingress_rule(nic, node);
1512 	}
1513 }
1514 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);
1515