xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies. */
3 
4 #include <linux/netdevice.h>
5 #include <linux/if_macvlan.h>
6 #include <linux/list.h>
7 #include <linux/rculist.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
10 #include <linux/spinlock.h>
11 #include "tc.h"
12 #include "neigh.h"
13 #include "en_rep.h"
14 #include "eswitch.h"
15 #include "lib/fs_chains.h"
16 #include "en/tc_ct.h"
17 #include "en/mapping.h"
18 #include "en/tc_tun.h"
19 #include "lib/port_tun.h"
20 #include "en/tc/sample.h"
21 #include "en_accel/ipsec_rxtx.h"
22 #include "en/tc/int_port.h"
23 #include "en/tc/act/act.h"
24 
25 struct mlx5e_rep_indr_block_priv {
26 	struct net_device *netdev;
27 	struct mlx5e_rep_priv *rpriv;
28 	enum flow_block_binder_type binder_type;
29 
30 	struct list_head list;
31 };
32 
mlx5e_rep_encap_entry_attach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,struct mlx5e_neigh * m_neigh,struct net_device * neigh_dev)33 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
34 				 struct mlx5e_encap_entry *e,
35 				 struct mlx5e_neigh *m_neigh,
36 				 struct net_device *neigh_dev)
37 {
38 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
39 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
40 	struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
41 	struct mlx5e_neigh_hash_entry *nhe;
42 	int err;
43 
44 	err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
45 	if (err)
46 		return err;
47 
48 	mutex_lock(&rpriv->neigh_update.encap_lock);
49 	nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh);
50 	if (!nhe) {
51 		err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe);
52 		if (err) {
53 			mutex_unlock(&rpriv->neigh_update.encap_lock);
54 			mlx5_tun_entropy_refcount_dec(tun_entropy,
55 						      e->reformat_type);
56 			return err;
57 		}
58 	}
59 
60 	e->nhe = nhe;
61 	spin_lock(&nhe->encap_list_lock);
62 	list_add_rcu(&e->encap_list, &nhe->encap_list);
63 	spin_unlock(&nhe->encap_list_lock);
64 
65 	mutex_unlock(&rpriv->neigh_update.encap_lock);
66 
67 	return 0;
68 }
69 
mlx5e_rep_encap_entry_detach(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e)70 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
71 				  struct mlx5e_encap_entry *e)
72 {
73 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
74 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
75 	struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
76 
77 	if (!e->nhe)
78 		return;
79 
80 	spin_lock(&e->nhe->encap_list_lock);
81 	list_del_rcu(&e->encap_list);
82 	spin_unlock(&e->nhe->encap_list_lock);
83 
84 	mlx5e_rep_neigh_entry_release(e->nhe);
85 	e->nhe = NULL;
86 	mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
87 }
88 
mlx5e_rep_update_flows(struct mlx5e_priv * priv,struct mlx5e_encap_entry * e,bool neigh_connected,unsigned char ha[ETH_ALEN])89 void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
90 			    struct mlx5e_encap_entry *e,
91 			    bool neigh_connected,
92 			    unsigned char ha[ETH_ALEN])
93 {
94 	struct ethhdr *eth = (struct ethhdr *)e->encap_header;
95 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
96 	bool encap_connected;
97 	LIST_HEAD(flow_list);
98 
99 	ASSERT_RTNL();
100 
101 	mutex_lock(&esw->offloads.encap_tbl_lock);
102 	encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
103 	if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
104 		goto unlock;
105 
106 	mlx5e_take_all_encap_flows(e, &flow_list);
107 
108 	if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
109 	    (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
110 		mlx5e_tc_encap_flows_del(priv, e, &flow_list);
111 
112 	if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
113 		struct net_device *route_dev;
114 
115 		ether_addr_copy(e->h_dest, ha);
116 		ether_addr_copy(eth->h_dest, ha);
117 		/* Update the encap source mac, in case that we delete
118 		 * the flows when encap source mac changed.
119 		 */
120 		route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
121 		if (route_dev)
122 			ether_addr_copy(eth->h_source, route_dev->dev_addr);
123 
124 		mlx5e_tc_encap_flows_add(priv, e, &flow_list);
125 	}
126 unlock:
127 	mutex_unlock(&esw->offloads.encap_tbl_lock);
128 	mlx5e_put_flow_list(priv, &flow_list);
129 }
130 
131 static int
mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv * priv,struct flow_cls_offload * cls_flower,int flags)132 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
133 			      struct flow_cls_offload *cls_flower, int flags)
134 {
135 	switch (cls_flower->command) {
136 	case FLOW_CLS_REPLACE:
137 		return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
138 					      flags);
139 	case FLOW_CLS_DESTROY:
140 		return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
141 					   flags);
142 	case FLOW_CLS_STATS:
143 		return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
144 					  flags);
145 	default:
146 		return -EOPNOTSUPP;
147 	}
148 }
149 
mlx5e_tc_stats_matchall(struct mlx5e_priv * priv,struct tc_cls_matchall_offload * ma)150 static void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
151 				    struct tc_cls_matchall_offload *ma)
152 {
153 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
154 	u64 dbytes;
155 	u64 dpkts;
156 
157 	dpkts = priv->stats.rep_stats.vport_rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
158 	dbytes = priv->stats.rep_stats.vport_rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
159 	mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, &priv->stats.rep_stats);
160 	flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
161 			  FLOW_ACTION_HW_STATS_DELAYED);
162 }
163 
164 static
mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv * priv,struct tc_cls_matchall_offload * ma)165 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
166 				    struct tc_cls_matchall_offload *ma)
167 {
168 	switch (ma->command) {
169 	case TC_CLSMATCHALL_REPLACE:
170 		return mlx5e_tc_configure_matchall(priv, ma);
171 	case TC_CLSMATCHALL_DESTROY:
172 		return mlx5e_tc_delete_matchall(priv, ma);
173 	case TC_CLSMATCHALL_STATS:
174 		mlx5e_tc_stats_matchall(priv, ma);
175 		return 0;
176 	default:
177 		return -EOPNOTSUPP;
178 	}
179 }
180 
mlx5e_rep_setup_tc_cb(enum tc_setup_type type,void * type_data,void * cb_priv)181 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
182 				 void *cb_priv)
183 {
184 	unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
185 	struct mlx5e_priv *priv = cb_priv;
186 
187 	if (!priv->netdev || !netif_device_present(priv->netdev))
188 		return -EOPNOTSUPP;
189 
190 	switch (type) {
191 	case TC_SETUP_CLSFLOWER:
192 		return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
193 	case TC_SETUP_CLSMATCHALL:
194 		return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
195 	default:
196 		return -EOPNOTSUPP;
197 	}
198 }
199 
mlx5e_rep_setup_ft_cb(enum tc_setup_type type,void * type_data,void * cb_priv)200 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
201 				 void *cb_priv)
202 {
203 	struct flow_cls_offload tmp, *f = type_data;
204 	struct mlx5e_priv *priv = cb_priv;
205 	struct mlx5_eswitch *esw;
206 	unsigned long flags;
207 	int err;
208 
209 	flags = MLX5_TC_FLAG(INGRESS) |
210 		MLX5_TC_FLAG(ESW_OFFLOAD) |
211 		MLX5_TC_FLAG(FT_OFFLOAD);
212 	esw = priv->mdev->priv.eswitch;
213 
214 	switch (type) {
215 	case TC_SETUP_CLSFLOWER:
216 		memcpy(&tmp, f, sizeof(*f));
217 
218 		if (!mlx5_chains_prios_supported(esw_chains(esw)))
219 			return -EOPNOTSUPP;
220 
221 		/* Re-use tc offload path by moving the ft flow to the
222 		 * reserved ft chain.
223 		 *
224 		 * FT offload can use prio range [0, INT_MAX], so we normalize
225 		 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
226 		 * as with tc, where prio 0 isn't supported.
227 		 *
228 		 * We only support chain 0 of FT offload.
229 		 */
230 		if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
231 			return -EOPNOTSUPP;
232 		if (tmp.common.chain_index != 0)
233 			return -EOPNOTSUPP;
234 
235 		tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
236 		tmp.common.prio++;
237 		err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
238 		memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
239 		return err;
240 	default:
241 		return -EOPNOTSUPP;
242 	}
243 }
244 
245 static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
246 static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
mlx5e_rep_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)247 int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
248 		       void *type_data)
249 {
250 	struct mlx5e_priv *priv = netdev_priv(dev);
251 	struct flow_block_offload *f = type_data;
252 
253 	f->unlocked_driver_cb = true;
254 
255 	switch (type) {
256 	case TC_SETUP_BLOCK:
257 		return flow_block_cb_setup_simple(type_data,
258 						  &mlx5e_rep_block_tc_cb_list,
259 						  mlx5e_rep_setup_tc_cb,
260 						  priv, priv, true);
261 	case TC_SETUP_FT:
262 		return flow_block_cb_setup_simple(type_data,
263 						  &mlx5e_rep_block_ft_cb_list,
264 						  mlx5e_rep_setup_ft_cb,
265 						  priv, priv, true);
266 	default:
267 		return -EOPNOTSUPP;
268 	}
269 }
270 
mlx5e_rep_tc_init(struct mlx5e_rep_priv * rpriv)271 int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
272 {
273 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
274 	int err;
275 
276 	mutex_init(&uplink_priv->unready_flows_lock);
277 	INIT_LIST_HEAD(&uplink_priv->unready_flows);
278 
279 	/* init shared tc flow table */
280 	err = mlx5e_tc_esw_init(uplink_priv);
281 	return err;
282 }
283 
mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv * rpriv)284 void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
285 {
286 	/* delete shared tc flow table */
287 	mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
288 	mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
289 }
290 
mlx5e_rep_tc_enable(struct mlx5e_priv * priv)291 void mlx5e_rep_tc_enable(struct mlx5e_priv *priv)
292 {
293 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
294 
295 	INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
296 		  mlx5e_tc_reoffload_flows_work);
297 }
298 
mlx5e_rep_tc_disable(struct mlx5e_priv * priv)299 void mlx5e_rep_tc_disable(struct mlx5e_priv *priv)
300 {
301 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
302 
303 	cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
304 }
305 
mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv * priv)306 int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
307 {
308 	struct mlx5e_rep_priv *rpriv = priv->ppriv;
309 
310 	queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
311 
312 	return NOTIFY_OK;
313 }
314 
315 static struct mlx5e_rep_indr_block_priv *
mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv * rpriv,struct net_device * netdev,enum flow_block_binder_type binder_type)316 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
317 				 struct net_device *netdev,
318 				 enum flow_block_binder_type binder_type)
319 {
320 	struct mlx5e_rep_indr_block_priv *cb_priv;
321 
322 	list_for_each_entry(cb_priv,
323 			    &rpriv->uplink_priv.tc_indr_block_priv_list,
324 			    list)
325 		if (cb_priv->netdev == netdev &&
326 		    cb_priv->binder_type == binder_type)
327 			return cb_priv;
328 
329 	return NULL;
330 }
331 
332 static int
mlx5e_rep_indr_offload(struct net_device * netdev,struct flow_cls_offload * flower,struct mlx5e_rep_indr_block_priv * indr_priv,unsigned long flags)333 mlx5e_rep_indr_offload(struct net_device *netdev,
334 		       struct flow_cls_offload *flower,
335 		       struct mlx5e_rep_indr_block_priv *indr_priv,
336 		       unsigned long flags)
337 {
338 	struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
339 	int err = 0;
340 
341 	if (!netif_device_present(indr_priv->rpriv->netdev))
342 		return -EOPNOTSUPP;
343 
344 	switch (flower->command) {
345 	case FLOW_CLS_REPLACE:
346 		err = mlx5e_configure_flower(netdev, priv, flower, flags);
347 		break;
348 	case FLOW_CLS_DESTROY:
349 		err = mlx5e_delete_flower(netdev, priv, flower, flags);
350 		break;
351 	case FLOW_CLS_STATS:
352 		err = mlx5e_stats_flower(netdev, priv, flower, flags);
353 		break;
354 	default:
355 		err = -EOPNOTSUPP;
356 	}
357 
358 	return err;
359 }
360 
mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,void * type_data,void * indr_priv)361 static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
362 				      void *type_data, void *indr_priv)
363 {
364 	unsigned long flags = MLX5_TC_FLAG(ESW_OFFLOAD);
365 	struct mlx5e_rep_indr_block_priv *priv = indr_priv;
366 
367 	flags |= (priv->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) ?
368 		MLX5_TC_FLAG(EGRESS) :
369 		MLX5_TC_FLAG(INGRESS);
370 
371 	switch (type) {
372 	case TC_SETUP_CLSFLOWER:
373 		return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
374 					      flags);
375 	default:
376 		return -EOPNOTSUPP;
377 	}
378 }
379 
mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,void * type_data,void * indr_priv)380 static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
381 				      void *type_data, void *indr_priv)
382 {
383 	struct mlx5e_rep_indr_block_priv *priv = indr_priv;
384 	struct flow_cls_offload *f = type_data;
385 	struct flow_cls_offload tmp;
386 	struct mlx5e_priv *mpriv;
387 	struct mlx5_eswitch *esw;
388 	unsigned long flags;
389 	int err;
390 
391 	mpriv = netdev_priv(priv->rpriv->netdev);
392 	esw = mpriv->mdev->priv.eswitch;
393 
394 	flags = MLX5_TC_FLAG(EGRESS) |
395 		MLX5_TC_FLAG(ESW_OFFLOAD) |
396 		MLX5_TC_FLAG(FT_OFFLOAD);
397 
398 	switch (type) {
399 	case TC_SETUP_CLSFLOWER:
400 		memcpy(&tmp, f, sizeof(*f));
401 
402 		/* Re-use tc offload path by moving the ft flow to the
403 		 * reserved ft chain.
404 		 *
405 		 * FT offload can use prio range [0, INT_MAX], so we normalize
406 		 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
407 		 * as with tc, where prio 0 isn't supported.
408 		 *
409 		 * We only support chain 0 of FT offload.
410 		 */
411 		if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
412 		    tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
413 		    tmp.common.chain_index)
414 			return -EOPNOTSUPP;
415 
416 		tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
417 		tmp.common.prio++;
418 		err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
419 		memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
420 		return err;
421 	default:
422 		return -EOPNOTSUPP;
423 	}
424 }
425 
mlx5e_rep_indr_block_unbind(void * cb_priv)426 static void mlx5e_rep_indr_block_unbind(void *cb_priv)
427 {
428 	struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
429 
430 	list_del(&indr_priv->list);
431 	kfree(indr_priv);
432 }
433 
434 static LIST_HEAD(mlx5e_block_cb_list);
435 
mlx5e_rep_macvlan_mode_supported(const struct net_device * dev)436 static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
437 {
438 	struct macvlan_dev *macvlan = netdev_priv(dev);
439 
440 	return macvlan->mode == MACVLAN_MODE_PASSTHRU;
441 }
442 
443 static bool
mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv * rpriv,struct net_device * netdev,struct flow_block_offload * f)444 mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
445 				     struct net_device *netdev,
446 				     struct flow_block_offload *f)
447 {
448 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
449 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
450 	struct net_device *macvlan_real_dev;
451 
452 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
453 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
454 		return false;
455 
456 	if (mlx5e_tc_tun_device_to_offload(priv, netdev))
457 		return true;
458 
459 	if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
460 		return true;
461 
462 	if (netif_is_macvlan(netdev)) {
463 		if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
464 			netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
465 			return false;
466 		}
467 
468 		macvlan_real_dev = macvlan_dev_real_dev(netdev);
469 
470 		if (macvlan_real_dev == rpriv->netdev)
471 			return true;
472 		if (netif_is_bond_master(macvlan_real_dev))
473 			return true;
474 	}
475 
476 	if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
477 	    mlx5e_tc_int_port_supported(esw))
478 		return true;
479 
480 	return false;
481 }
482 
483 static int
mlx5e_rep_indr_setup_block(struct net_device * netdev,struct Qdisc * sch,struct mlx5e_rep_priv * rpriv,struct flow_block_offload * f,flow_setup_cb_t * setup_cb,void * data,void (* cleanup)(struct flow_block_cb * block_cb))484 mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
485 			   struct mlx5e_rep_priv *rpriv,
486 			   struct flow_block_offload *f,
487 			   flow_setup_cb_t *setup_cb,
488 			   void *data,
489 			   void (*cleanup)(struct flow_block_cb *block_cb))
490 {
491 	struct mlx5e_rep_indr_block_priv *indr_priv;
492 	struct flow_block_cb *block_cb;
493 
494 	if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
495 		return -EOPNOTSUPP;
496 
497 	f->unlocked_driver_cb = true;
498 	f->driver_block_list = &mlx5e_block_cb_list;
499 
500 	switch (f->command) {
501 	case FLOW_BLOCK_BIND:
502 		indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
503 		if (indr_priv)
504 			return -EEXIST;
505 
506 		indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
507 		if (!indr_priv)
508 			return -ENOMEM;
509 
510 		indr_priv->netdev = netdev;
511 		indr_priv->rpriv = rpriv;
512 		indr_priv->binder_type = f->binder_type;
513 		list_add(&indr_priv->list,
514 			 &rpriv->uplink_priv.tc_indr_block_priv_list);
515 
516 		block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
517 						    mlx5e_rep_indr_block_unbind,
518 						    f, netdev, sch, data, rpriv,
519 						    cleanup);
520 		if (IS_ERR(block_cb)) {
521 			list_del(&indr_priv->list);
522 			kfree(indr_priv);
523 			return PTR_ERR(block_cb);
524 		}
525 		flow_block_cb_add(block_cb, f);
526 		list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
527 
528 		return 0;
529 	case FLOW_BLOCK_UNBIND:
530 		indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
531 		if (!indr_priv)
532 			return -ENOENT;
533 
534 		block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
535 		if (!block_cb)
536 			return -ENOENT;
537 
538 		flow_indr_block_cb_remove(block_cb, f);
539 		list_del(&block_cb->driver_list);
540 		return 0;
541 	default:
542 		return -EOPNOTSUPP;
543 	}
544 	return 0;
545 }
546 
547 static int
mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv * rpriv,struct flow_offload_action * fl_act)548 mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
549 			   struct flow_offload_action *fl_act)
550 
551 {
552 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
553 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
554 	enum mlx5_flow_namespace_type ns_type;
555 	struct flow_action_entry *action;
556 	struct mlx5e_tc_act *act;
557 	bool add = false;
558 	int i;
559 
560 	/* There is no use case currently for more than one action (e.g. pedit).
561 	 * when there will be, need to handle cleaning multiple actions on err.
562 	 */
563 	if (!flow_offload_has_one_action(&fl_act->action))
564 		return -EOPNOTSUPP;
565 
566 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
567 		ns_type = MLX5_FLOW_NAMESPACE_FDB;
568 	else
569 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
570 
571 	flow_action_for_each(i, action, &fl_act->action) {
572 		act = mlx5e_tc_act_get(action->id, ns_type);
573 		if (!act)
574 			continue;
575 
576 		if (!act->offload_action)
577 			continue;
578 
579 		if (!act->offload_action(priv, fl_act, action))
580 			add = true;
581 	}
582 
583 	return add ? 0 : -EOPNOTSUPP;
584 }
585 
586 static int
mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv * rpriv,struct flow_offload_action * fl_act)587 mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
588 			   struct flow_offload_action *fl_act)
589 {
590 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
591 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
592 	enum mlx5_flow_namespace_type ns_type;
593 	struct mlx5e_tc_act *act;
594 
595 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
596 		ns_type = MLX5_FLOW_NAMESPACE_FDB;
597 	else
598 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
599 
600 	act = mlx5e_tc_act_get(fl_act->id, ns_type);
601 	if (!act || !act->destroy_action)
602 		return -EOPNOTSUPP;
603 
604 	return act->destroy_action(priv, fl_act);
605 }
606 
607 static int
mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv * rpriv,struct flow_offload_action * fl_act)608 mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
609 			 struct flow_offload_action *fl_act)
610 
611 {
612 	struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
613 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
614 	enum mlx5_flow_namespace_type ns_type;
615 	struct mlx5e_tc_act *act;
616 
617 	if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
618 		ns_type = MLX5_FLOW_NAMESPACE_FDB;
619 	else
620 		ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
621 
622 	act = mlx5e_tc_act_get(fl_act->id, ns_type);
623 	if (!act || !act->stats_action)
624 		return mlx5e_tc_fill_action_stats(priv, fl_act);
625 
626 	return act->stats_action(priv, fl_act);
627 }
628 
629 static int
mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv * rpriv,struct flow_offload_action * fl_act)630 mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
631 			 struct flow_offload_action *fl_act)
632 {
633 	switch (fl_act->command) {
634 	case FLOW_ACT_REPLACE:
635 		return mlx5e_rep_indr_replace_act(rpriv, fl_act);
636 	case FLOW_ACT_DESTROY:
637 		return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
638 	case FLOW_ACT_STATS:
639 		return mlx5e_rep_indr_stats_act(rpriv, fl_act);
640 	default:
641 		return -EOPNOTSUPP;
642 	}
643 }
644 
645 static int
mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv * rpriv,enum tc_setup_type type,void * data)646 mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
647 			    enum tc_setup_type type,
648 			    void *data)
649 {
650 	if (!data)
651 		return -EOPNOTSUPP;
652 
653 	switch (type) {
654 	case TC_SETUP_ACT:
655 		return mlx5e_rep_indr_setup_act(rpriv, data);
656 	default:
657 		return -EOPNOTSUPP;
658 	}
659 }
660 
661 static
mlx5e_rep_indr_setup_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))662 int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
663 			    enum tc_setup_type type, void *type_data,
664 			    void *data,
665 			    void (*cleanup)(struct flow_block_cb *block_cb))
666 {
667 	if (!netdev)
668 		return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
669 
670 	switch (type) {
671 	case TC_SETUP_BLOCK:
672 		return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
673 						  mlx5e_rep_indr_setup_tc_cb,
674 						  data, cleanup);
675 	case TC_SETUP_FT:
676 		return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
677 						  mlx5e_rep_indr_setup_ft_cb,
678 						  data, cleanup);
679 	default:
680 		return -EOPNOTSUPP;
681 	}
682 }
683 
mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv * rpriv)684 int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
685 {
686 	struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
687 
688 	/* init indirect block notifications */
689 	INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
690 
691 	return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
692 }
693 
mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv * rpriv)694 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
695 {
696 	flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
697 				 mlx5e_rep_indr_block_unbind);
698 }
699 
mlx5e_rep_tc_receive(struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb)700 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
701 			  struct sk_buff *skb)
702 {
703 	u32 reg_c0, reg_c1, zone_restore_id, tunnel_id;
704 	struct mlx5e_tc_update_priv tc_priv = {};
705 	struct mlx5_rep_uplink_priv *uplink_priv;
706 	struct mlx5e_rep_priv *uplink_rpriv;
707 	struct mlx5_tc_ct_priv *ct_priv;
708 	struct mapping_ctx *mapping_ctx;
709 	struct mlx5_eswitch *esw;
710 	struct mlx5e_priv *priv;
711 
712 	reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
713 	if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
714 		goto forward;
715 
716 	/* If mapped_obj_id is not equal to the default flow tag then skb->mark
717 	 * is not supported and must be reset back to 0.
718 	 */
719 	skb->mark = 0;
720 
721 	priv = netdev_priv(skb->dev);
722 	esw = priv->mdev->priv.eswitch;
723 	mapping_ctx = esw->offloads.reg_c0_obj_pool;
724 	reg_c1 = be32_to_cpu(cqe->ft_metadata);
725 	zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
726 	tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
727 
728 	uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
729 	uplink_priv = &uplink_rpriv->uplink_priv;
730 	ct_priv = uplink_priv->ct_priv;
731 
732 #ifdef CONFIG_MLX5_EN_IPSEC
733 	if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) {
734 		u32 mapped_id;
735 		u32 metadata;
736 
737 		mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK;
738 		if (mapped_id &&
739 		    !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata))
740 			mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata);
741 	}
742 #endif
743 
744 	if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
745 				 zone_restore_id, tunnel_id, &tc_priv))
746 		goto free_skb;
747 
748 forward:
749 	if (tc_priv.skb_done)
750 		goto free_skb;
751 
752 	if (tc_priv.forward_tx)
753 		dev_queue_xmit(skb);
754 	else
755 		napi_gro_receive(rq->cq.napi, skb);
756 
757 	dev_put(tc_priv.fwd_dev);
758 
759 	return;
760 
761 free_skb:
762 	dev_put(tc_priv.fwd_dev);
763 	dev_kfree_skb_any(skb);
764 }
765