xref: /linux/drivers/net/ethernet/netronome/nfp/flower/offload.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7 
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16 
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 	 TCPHDR_PSH | TCPHDR_URG)
20 
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 	(FLOW_DIS_IS_FRAGMENT | \
23 	 FLOW_DIS_FIRST_FRAG)
24 
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
35 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
36 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
37 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
38 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
39 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
40 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
41 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
42 	 BIT(FLOW_DISSECTOR_KEY_IP))
43 
44 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
45 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
46 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
47 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
48 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
49 	 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
50 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
51 	 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
52 
53 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
54 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
55 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
56 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
57 
58 static int
59 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
60 		     u8 mtype)
61 {
62 	u32 meta_len, key_len, mask_len, act_len, tot_len;
63 	struct sk_buff *skb;
64 	unsigned char *msg;
65 
66 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
67 	key_len = nfp_flow->meta.key_len;
68 	mask_len = nfp_flow->meta.mask_len;
69 	act_len = nfp_flow->meta.act_len;
70 
71 	tot_len = meta_len + key_len + mask_len + act_len;
72 
73 	/* Convert to long words as firmware expects
74 	 * lengths in units of NFP_FL_LW_SIZ.
75 	 */
76 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
77 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
78 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
79 
80 	skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
81 	if (!skb)
82 		return -ENOMEM;
83 
84 	msg = nfp_flower_cmsg_get_data(skb);
85 	memcpy(msg, &nfp_flow->meta, meta_len);
86 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
87 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
88 	memcpy(&msg[meta_len + key_len + mask_len],
89 	       nfp_flow->action_data, act_len);
90 
91 	/* Convert back to bytes as software expects
92 	 * lengths in units of bytes.
93 	 */
94 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
95 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
96 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
97 
98 	nfp_ctrl_tx(app->ctrl, skb);
99 
100 	return 0;
101 }
102 
103 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
104 {
105 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
106 
107 	return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
108 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
109 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
110 	       flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
111 }
112 
113 static int
114 nfp_flower_calc_opt_layer(struct flow_match_enc_opts *enc_opts,
115 			  u32 *key_layer_two, int *key_size)
116 {
117 	if (enc_opts->key->len > NFP_FL_MAX_GENEVE_OPT_KEY)
118 		return -EOPNOTSUPP;
119 
120 	if (enc_opts->key->len > 0) {
121 		*key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
122 		*key_size += sizeof(struct nfp_flower_geneve_options);
123 	}
124 
125 	return 0;
126 }
127 
128 static int
129 nfp_flower_calculate_key_layers(struct nfp_app *app,
130 				struct net_device *netdev,
131 				struct nfp_fl_key_ls *ret_key_ls,
132 				struct tc_cls_flower_offload *flow,
133 				enum nfp_flower_tun_type *tun_type)
134 {
135 	struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
136 	struct flow_dissector *dissector = rule->match.dissector;
137 	struct flow_match_basic basic = { NULL, NULL};
138 	struct nfp_flower_priv *priv = app->priv;
139 	u32 key_layer_two;
140 	u8 key_layer;
141 	int key_size;
142 	int err;
143 
144 	if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
145 		return -EOPNOTSUPP;
146 
147 	/* If any tun dissector is used then the required set must be used. */
148 	if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
149 	    (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
150 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
151 		return -EOPNOTSUPP;
152 
153 	key_layer_two = 0;
154 	key_layer = NFP_FLOWER_LAYER_PORT;
155 	key_size = sizeof(struct nfp_flower_meta_tci) +
156 		   sizeof(struct nfp_flower_in_port);
157 
158 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
159 	    flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
160 		key_layer |= NFP_FLOWER_LAYER_MAC;
161 		key_size += sizeof(struct nfp_flower_mac_mpls);
162 	}
163 
164 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
165 		struct flow_match_vlan vlan;
166 
167 		flow_rule_match_vlan(rule, &vlan);
168 		if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
169 		    vlan.key->vlan_priority)
170 			return -EOPNOTSUPP;
171 	}
172 
173 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
174 		struct flow_match_enc_opts enc_op = { NULL, NULL };
175 		struct flow_match_ipv4_addrs ipv4_addrs;
176 		struct flow_match_control enc_ctl;
177 		struct flow_match_ports enc_ports;
178 
179 		flow_rule_match_enc_control(rule, &enc_ctl);
180 
181 		if (enc_ctl.mask->addr_type != 0xffff ||
182 		    enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
183 			return -EOPNOTSUPP;
184 
185 		/* These fields are already verified as used. */
186 		flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
187 		if (ipv4_addrs.mask->dst != cpu_to_be32(~0))
188 			return -EOPNOTSUPP;
189 
190 		flow_rule_match_enc_ports(rule, &enc_ports);
191 		if (enc_ports.mask->dst != cpu_to_be16(~0))
192 			return -EOPNOTSUPP;
193 
194 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
195 			flow_rule_match_enc_opts(rule, &enc_op);
196 
197 		switch (enc_ports.key->dst) {
198 		case htons(NFP_FL_VXLAN_PORT):
199 			*tun_type = NFP_FL_TUNNEL_VXLAN;
200 			key_layer |= NFP_FLOWER_LAYER_VXLAN;
201 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
202 
203 			if (enc_op.key)
204 				return -EOPNOTSUPP;
205 			break;
206 		case htons(NFP_FL_GENEVE_PORT):
207 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
208 				return -EOPNOTSUPP;
209 			*tun_type = NFP_FL_TUNNEL_GENEVE;
210 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
211 			key_size += sizeof(struct nfp_flower_ext_meta);
212 			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
213 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
214 
215 			if (!enc_op.key)
216 				break;
217 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
218 				return -EOPNOTSUPP;
219 			err = nfp_flower_calc_opt_layer(&enc_op, &key_layer_two,
220 							&key_size);
221 			if (err)
222 				return err;
223 			break;
224 		default:
225 			return -EOPNOTSUPP;
226 		}
227 
228 		/* Ensure the ingress netdev matches the expected tun type. */
229 		if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
230 			return -EOPNOTSUPP;
231 	}
232 
233 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
234 		flow_rule_match_basic(rule, &basic);
235 
236 	if (basic.mask && basic.mask->n_proto) {
237 		/* Ethernet type is present in the key. */
238 		switch (basic.key->n_proto) {
239 		case cpu_to_be16(ETH_P_IP):
240 			key_layer |= NFP_FLOWER_LAYER_IPV4;
241 			key_size += sizeof(struct nfp_flower_ipv4);
242 			break;
243 
244 		case cpu_to_be16(ETH_P_IPV6):
245 			key_layer |= NFP_FLOWER_LAYER_IPV6;
246 			key_size += sizeof(struct nfp_flower_ipv6);
247 			break;
248 
249 		/* Currently we do not offload ARP
250 		 * because we rely on it to get to the host.
251 		 */
252 		case cpu_to_be16(ETH_P_ARP):
253 			return -EOPNOTSUPP;
254 
255 		case cpu_to_be16(ETH_P_MPLS_UC):
256 		case cpu_to_be16(ETH_P_MPLS_MC):
257 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
258 				key_layer |= NFP_FLOWER_LAYER_MAC;
259 				key_size += sizeof(struct nfp_flower_mac_mpls);
260 			}
261 			break;
262 
263 		/* Will be included in layer 2. */
264 		case cpu_to_be16(ETH_P_8021Q):
265 			break;
266 
267 		default:
268 			/* Other ethtype - we need check the masks for the
269 			 * remainder of the key to ensure we can offload.
270 			 */
271 			if (nfp_flower_check_higher_than_mac(flow))
272 				return -EOPNOTSUPP;
273 			break;
274 		}
275 	}
276 
277 	if (basic.mask && basic.mask->ip_proto) {
278 		/* Ethernet type is present in the key. */
279 		switch (basic.key->ip_proto) {
280 		case IPPROTO_TCP:
281 		case IPPROTO_UDP:
282 		case IPPROTO_SCTP:
283 		case IPPROTO_ICMP:
284 		case IPPROTO_ICMPV6:
285 			key_layer |= NFP_FLOWER_LAYER_TP;
286 			key_size += sizeof(struct nfp_flower_tp_ports);
287 			break;
288 		default:
289 			/* Other ip proto - we need check the masks for the
290 			 * remainder of the key to ensure we can offload.
291 			 */
292 			return -EOPNOTSUPP;
293 		}
294 	}
295 
296 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
297 		struct flow_match_tcp tcp;
298 		u32 tcp_flags;
299 
300 		flow_rule_match_tcp(rule, &tcp);
301 		tcp_flags = be16_to_cpu(tcp.key->flags);
302 
303 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
304 			return -EOPNOTSUPP;
305 
306 		/* We only support PSH and URG flags when either
307 		 * FIN, SYN or RST is present as well.
308 		 */
309 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
310 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
311 			return -EOPNOTSUPP;
312 
313 		/* We need to store TCP flags in the either the IPv4 or IPv6 key
314 		 * space, thus we need to ensure we include a IPv4/IPv6 key
315 		 * layer if we have not done so already.
316 		 */
317 		if (!basic.key)
318 			return -EOPNOTSUPP;
319 
320 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
321 		    !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
322 			switch (basic.key->n_proto) {
323 			case cpu_to_be16(ETH_P_IP):
324 				key_layer |= NFP_FLOWER_LAYER_IPV4;
325 				key_size += sizeof(struct nfp_flower_ipv4);
326 				break;
327 
328 			case cpu_to_be16(ETH_P_IPV6):
329 				key_layer |= NFP_FLOWER_LAYER_IPV6;
330 				key_size += sizeof(struct nfp_flower_ipv6);
331 				break;
332 
333 			default:
334 				return -EOPNOTSUPP;
335 			}
336 		}
337 	}
338 
339 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
340 		struct flow_match_control ctl;
341 
342 		flow_rule_match_control(rule, &ctl);
343 		if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
344 			return -EOPNOTSUPP;
345 	}
346 
347 	ret_key_ls->key_layer = key_layer;
348 	ret_key_ls->key_layer_two = key_layer_two;
349 	ret_key_ls->key_size = key_size;
350 
351 	return 0;
352 }
353 
354 static struct nfp_fl_payload *
355 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
356 {
357 	struct nfp_fl_payload *flow_pay;
358 
359 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
360 	if (!flow_pay)
361 		return NULL;
362 
363 	flow_pay->meta.key_len = key_layer->key_size;
364 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
365 	if (!flow_pay->unmasked_data)
366 		goto err_free_flow;
367 
368 	flow_pay->meta.mask_len = key_layer->key_size;
369 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
370 	if (!flow_pay->mask_data)
371 		goto err_free_unmasked;
372 
373 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
374 	if (!flow_pay->action_data)
375 		goto err_free_mask;
376 
377 	flow_pay->nfp_tun_ipv4_addr = 0;
378 	flow_pay->meta.flags = 0;
379 
380 	return flow_pay;
381 
382 err_free_mask:
383 	kfree(flow_pay->mask_data);
384 err_free_unmasked:
385 	kfree(flow_pay->unmasked_data);
386 err_free_flow:
387 	kfree(flow_pay);
388 	return NULL;
389 }
390 
391 /**
392  * nfp_flower_add_offload() - Adds a new flow to hardware.
393  * @app:	Pointer to the APP handle
394  * @netdev:	netdev structure.
395  * @flow:	TC flower classifier offload structure.
396  *
397  * Adds a new flow to the repeated hash structure and action payload.
398  *
399  * Return: negative value on error, 0 if configured successfully.
400  */
401 static int
402 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
403 		       struct tc_cls_flower_offload *flow)
404 {
405 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
406 	struct nfp_flower_priv *priv = app->priv;
407 	struct nfp_fl_payload *flow_pay;
408 	struct nfp_fl_key_ls *key_layer;
409 	struct nfp_port *port = NULL;
410 	int err;
411 
412 	if (nfp_netdev_is_nfp_repr(netdev))
413 		port = nfp_port_from_netdev(netdev);
414 
415 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
416 	if (!key_layer)
417 		return -ENOMEM;
418 
419 	err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
420 					      &tun_type);
421 	if (err)
422 		goto err_free_key_ls;
423 
424 	flow_pay = nfp_flower_allocate_new(key_layer);
425 	if (!flow_pay) {
426 		err = -ENOMEM;
427 		goto err_free_key_ls;
428 	}
429 
430 	err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
431 					    flow_pay, tun_type);
432 	if (err)
433 		goto err_destroy_flow;
434 
435 	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
436 	if (err)
437 		goto err_destroy_flow;
438 
439 	err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
440 	if (err)
441 		goto err_destroy_flow;
442 
443 	flow_pay->tc_flower_cookie = flow->cookie;
444 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
445 				     nfp_flower_table_params);
446 	if (err)
447 		goto err_release_metadata;
448 
449 	err = nfp_flower_xmit_flow(app, flow_pay,
450 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
451 	if (err)
452 		goto err_remove_rhash;
453 
454 	if (port)
455 		port->tc_offload_cnt++;
456 
457 	/* Deallocate flow payload when flower rule has been destroyed. */
458 	kfree(key_layer);
459 
460 	return 0;
461 
462 err_remove_rhash:
463 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
464 					    &flow_pay->fl_node,
465 					    nfp_flower_table_params));
466 err_release_metadata:
467 	nfp_modify_flow_metadata(app, flow_pay);
468 err_destroy_flow:
469 	kfree(flow_pay->action_data);
470 	kfree(flow_pay->mask_data);
471 	kfree(flow_pay->unmasked_data);
472 	kfree(flow_pay);
473 err_free_key_ls:
474 	kfree(key_layer);
475 	return err;
476 }
477 
478 /**
479  * nfp_flower_del_offload() - Removes a flow from hardware.
480  * @app:	Pointer to the APP handle
481  * @netdev:	netdev structure.
482  * @flow:	TC flower classifier offload structure
483  *
484  * Removes a flow from the repeated hash structure and clears the
485  * action payload.
486  *
487  * Return: negative value on error, 0 if removed successfully.
488  */
489 static int
490 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
491 		       struct tc_cls_flower_offload *flow)
492 {
493 	struct nfp_flower_priv *priv = app->priv;
494 	struct nfp_fl_payload *nfp_flow;
495 	struct nfp_port *port = NULL;
496 	int err;
497 
498 	if (nfp_netdev_is_nfp_repr(netdev))
499 		port = nfp_port_from_netdev(netdev);
500 
501 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
502 	if (!nfp_flow)
503 		return -ENOENT;
504 
505 	err = nfp_modify_flow_metadata(app, nfp_flow);
506 	if (err)
507 		goto err_free_flow;
508 
509 	if (nfp_flow->nfp_tun_ipv4_addr)
510 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
511 
512 	err = nfp_flower_xmit_flow(app, nfp_flow,
513 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
514 	if (err)
515 		goto err_free_flow;
516 
517 err_free_flow:
518 	if (port)
519 		port->tc_offload_cnt--;
520 	kfree(nfp_flow->action_data);
521 	kfree(nfp_flow->mask_data);
522 	kfree(nfp_flow->unmasked_data);
523 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
524 					    &nfp_flow->fl_node,
525 					    nfp_flower_table_params));
526 	kfree_rcu(nfp_flow, rcu);
527 	return err;
528 }
529 
530 /**
531  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
532  * @app:	Pointer to the APP handle
533  * @netdev:	Netdev structure.
534  * @flow:	TC flower classifier offload structure
535  *
536  * Populates a flow statistics structure which which corresponds to a
537  * specific flow.
538  *
539  * Return: negative value on error, 0 if stats populated successfully.
540  */
541 static int
542 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
543 		     struct tc_cls_flower_offload *flow)
544 {
545 	struct nfp_flower_priv *priv = app->priv;
546 	struct nfp_fl_payload *nfp_flow;
547 	u32 ctx_id;
548 
549 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
550 	if (!nfp_flow)
551 		return -EINVAL;
552 
553 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
554 
555 	spin_lock_bh(&priv->stats_lock);
556 	flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
557 			  priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
558 
559 	priv->stats[ctx_id].pkts = 0;
560 	priv->stats[ctx_id].bytes = 0;
561 	spin_unlock_bh(&priv->stats_lock);
562 
563 	return 0;
564 }
565 
566 static int
567 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
568 			struct tc_cls_flower_offload *flower)
569 {
570 	if (!eth_proto_is_802_3(flower->common.protocol))
571 		return -EOPNOTSUPP;
572 
573 	switch (flower->command) {
574 	case TC_CLSFLOWER_REPLACE:
575 		return nfp_flower_add_offload(app, netdev, flower);
576 	case TC_CLSFLOWER_DESTROY:
577 		return nfp_flower_del_offload(app, netdev, flower);
578 	case TC_CLSFLOWER_STATS:
579 		return nfp_flower_get_stats(app, netdev, flower);
580 	default:
581 		return -EOPNOTSUPP;
582 	}
583 }
584 
585 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
586 					void *type_data, void *cb_priv)
587 {
588 	struct nfp_repr *repr = cb_priv;
589 
590 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
591 		return -EOPNOTSUPP;
592 
593 	switch (type) {
594 	case TC_SETUP_CLSFLOWER:
595 		return nfp_flower_repr_offload(repr->app, repr->netdev,
596 					       type_data);
597 	default:
598 		return -EOPNOTSUPP;
599 	}
600 }
601 
602 static int nfp_flower_setup_tc_block(struct net_device *netdev,
603 				     struct tc_block_offload *f)
604 {
605 	struct nfp_repr *repr = netdev_priv(netdev);
606 
607 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
608 		return -EOPNOTSUPP;
609 
610 	switch (f->command) {
611 	case TC_BLOCK_BIND:
612 		return tcf_block_cb_register(f->block,
613 					     nfp_flower_setup_tc_block_cb,
614 					     repr, repr, f->extack);
615 	case TC_BLOCK_UNBIND:
616 		tcf_block_cb_unregister(f->block,
617 					nfp_flower_setup_tc_block_cb,
618 					repr);
619 		return 0;
620 	default:
621 		return -EOPNOTSUPP;
622 	}
623 }
624 
625 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
626 			enum tc_setup_type type, void *type_data)
627 {
628 	switch (type) {
629 	case TC_SETUP_BLOCK:
630 		return nfp_flower_setup_tc_block(netdev, type_data);
631 	default:
632 		return -EOPNOTSUPP;
633 	}
634 }
635 
636 struct nfp_flower_indr_block_cb_priv {
637 	struct net_device *netdev;
638 	struct nfp_app *app;
639 	struct list_head list;
640 };
641 
642 static struct nfp_flower_indr_block_cb_priv *
643 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
644 				     struct net_device *netdev)
645 {
646 	struct nfp_flower_indr_block_cb_priv *cb_priv;
647 	struct nfp_flower_priv *priv = app->priv;
648 
649 	/* All callback list access should be protected by RTNL. */
650 	ASSERT_RTNL();
651 
652 	list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
653 		if (cb_priv->netdev == netdev)
654 			return cb_priv;
655 
656 	return NULL;
657 }
658 
659 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
660 					  void *type_data, void *cb_priv)
661 {
662 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
663 	struct tc_cls_flower_offload *flower = type_data;
664 
665 	if (flower->common.chain_index)
666 		return -EOPNOTSUPP;
667 
668 	switch (type) {
669 	case TC_SETUP_CLSFLOWER:
670 		return nfp_flower_repr_offload(priv->app, priv->netdev,
671 					       type_data);
672 	default:
673 		return -EOPNOTSUPP;
674 	}
675 }
676 
677 static int
678 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
679 			       struct tc_block_offload *f)
680 {
681 	struct nfp_flower_indr_block_cb_priv *cb_priv;
682 	struct nfp_flower_priv *priv = app->priv;
683 	int err;
684 
685 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
686 		return -EOPNOTSUPP;
687 
688 	switch (f->command) {
689 	case TC_BLOCK_BIND:
690 		cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
691 		if (!cb_priv)
692 			return -ENOMEM;
693 
694 		cb_priv->netdev = netdev;
695 		cb_priv->app = app;
696 		list_add(&cb_priv->list, &priv->indr_block_cb_priv);
697 
698 		err = tcf_block_cb_register(f->block,
699 					    nfp_flower_setup_indr_block_cb,
700 					    cb_priv, cb_priv, f->extack);
701 		if (err) {
702 			list_del(&cb_priv->list);
703 			kfree(cb_priv);
704 		}
705 
706 		return err;
707 	case TC_BLOCK_UNBIND:
708 		cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
709 		if (!cb_priv)
710 			return -ENOENT;
711 
712 		tcf_block_cb_unregister(f->block,
713 					nfp_flower_setup_indr_block_cb,
714 					cb_priv);
715 		list_del(&cb_priv->list);
716 		kfree(cb_priv);
717 
718 		return 0;
719 	default:
720 		return -EOPNOTSUPP;
721 	}
722 	return 0;
723 }
724 
725 static int
726 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
727 			    enum tc_setup_type type, void *type_data)
728 {
729 	switch (type) {
730 	case TC_SETUP_BLOCK:
731 		return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
732 						      type_data);
733 	default:
734 		return -EOPNOTSUPP;
735 	}
736 }
737 
738 int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
739 				       struct net_device *netdev,
740 				       unsigned long event)
741 {
742 	int err;
743 
744 	if (!nfp_fl_is_netdev_to_offload(netdev))
745 		return NOTIFY_OK;
746 
747 	if (event == NETDEV_REGISTER) {
748 		err = __tc_indr_block_cb_register(netdev, app,
749 						  nfp_flower_indr_setup_tc_cb,
750 						  app);
751 		if (err)
752 			nfp_flower_cmsg_warn(app,
753 					     "Indirect block reg failed - %s\n",
754 					     netdev->name);
755 	} else if (event == NETDEV_UNREGISTER) {
756 		__tc_indr_block_cb_unregister(netdev,
757 					      nfp_flower_indr_setup_tc_cb, app);
758 	}
759 
760 	return NOTIFY_OK;
761 }
762