xref: /linux/drivers/net/ethernet/netronome/nfp/flower/offload.c (revision bab2c80e5a6c855657482eac9e97f5f3eedb509a)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37 
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46 
47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
48 	(TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
49 	 TCPHDR_PSH | TCPHDR_URG)
50 
51 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
52 	(FLOW_DIS_IS_FRAGMENT | \
53 	 FLOW_DIS_FIRST_FRAG)
54 
55 #define NFP_FLOWER_WHITELIST_DISSECTOR \
56 	(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
57 	 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
58 	 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
59 	 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
60 	 BIT(FLOW_DISSECTOR_KEY_TCP) | \
61 	 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
62 	 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
63 	 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
64 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
65 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
66 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
67 	 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
68 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
69 	 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
70 	 BIT(FLOW_DISSECTOR_KEY_IP))
71 
72 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
73 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
74 	 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
75 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
76 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
77 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
78 
79 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
80 	(BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
81 	 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
82 	 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
83 
84 static int
85 nfp_flower_xmit_flow(struct net_device *netdev,
86 		     struct nfp_fl_payload *nfp_flow, u8 mtype)
87 {
88 	u32 meta_len, key_len, mask_len, act_len, tot_len;
89 	struct nfp_repr *priv = netdev_priv(netdev);
90 	struct sk_buff *skb;
91 	unsigned char *msg;
92 
93 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
94 	key_len = nfp_flow->meta.key_len;
95 	mask_len = nfp_flow->meta.mask_len;
96 	act_len = nfp_flow->meta.act_len;
97 
98 	tot_len = meta_len + key_len + mask_len + act_len;
99 
100 	/* Convert to long words as firmware expects
101 	 * lengths in units of NFP_FL_LW_SIZ.
102 	 */
103 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
104 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
105 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
106 
107 	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
108 	if (!skb)
109 		return -ENOMEM;
110 
111 	msg = nfp_flower_cmsg_get_data(skb);
112 	memcpy(msg, &nfp_flow->meta, meta_len);
113 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
114 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
115 	memcpy(&msg[meta_len + key_len + mask_len],
116 	       nfp_flow->action_data, act_len);
117 
118 	/* Convert back to bytes as software expects
119 	 * lengths in units of bytes.
120 	 */
121 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
122 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
123 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
124 
125 	nfp_ctrl_tx(priv->app->ctrl, skb);
126 
127 	return 0;
128 }
129 
130 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
131 {
132 	return dissector_uses_key(f->dissector,
133 				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
134 		dissector_uses_key(f->dissector,
135 				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
136 		dissector_uses_key(f->dissector,
137 				   FLOW_DISSECTOR_KEY_PORTS) ||
138 		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
139 }
140 
141 static int
142 nfp_flower_calculate_key_layers(struct nfp_app *app,
143 				struct nfp_fl_key_ls *ret_key_ls,
144 				struct tc_cls_flower_offload *flow,
145 				bool egress,
146 				enum nfp_flower_tun_type *tun_type)
147 {
148 	struct flow_dissector_key_basic *mask_basic = NULL;
149 	struct flow_dissector_key_basic *key_basic = NULL;
150 	struct nfp_flower_priv *priv = app->priv;
151 	u32 key_layer_two;
152 	u8 key_layer;
153 	int key_size;
154 
155 	if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
156 		return -EOPNOTSUPP;
157 
158 	/* If any tun dissector is used then the required set must be used. */
159 	if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
160 	    (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
161 	    != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
162 		return -EOPNOTSUPP;
163 
164 	key_layer_two = 0;
165 	key_layer = NFP_FLOWER_LAYER_PORT;
166 	key_size = sizeof(struct nfp_flower_meta_tci) +
167 		   sizeof(struct nfp_flower_in_port);
168 
169 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
170 	    dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
171 		key_layer |= NFP_FLOWER_LAYER_MAC;
172 		key_size += sizeof(struct nfp_flower_mac_mpls);
173 	}
174 
175 	if (dissector_uses_key(flow->dissector,
176 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
177 		struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
178 		struct flow_dissector_key_ports *mask_enc_ports = NULL;
179 		struct flow_dissector_key_ports *enc_ports = NULL;
180 		struct flow_dissector_key_control *mask_enc_ctl =
181 			skb_flow_dissector_target(flow->dissector,
182 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
183 						  flow->mask);
184 		struct flow_dissector_key_control *enc_ctl =
185 			skb_flow_dissector_target(flow->dissector,
186 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
187 						  flow->key);
188 		if (!egress)
189 			return -EOPNOTSUPP;
190 
191 		if (mask_enc_ctl->addr_type != 0xffff ||
192 		    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
193 			return -EOPNOTSUPP;
194 
195 		/* These fields are already verified as used. */
196 		mask_ipv4 =
197 			skb_flow_dissector_target(flow->dissector,
198 						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
199 						  flow->mask);
200 		if (mask_ipv4->dst != cpu_to_be32(~0))
201 			return -EOPNOTSUPP;
202 
203 		mask_enc_ports =
204 			skb_flow_dissector_target(flow->dissector,
205 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
206 						  flow->mask);
207 		enc_ports =
208 			skb_flow_dissector_target(flow->dissector,
209 						  FLOW_DISSECTOR_KEY_ENC_PORTS,
210 						  flow->key);
211 
212 		if (mask_enc_ports->dst != cpu_to_be16(~0))
213 			return -EOPNOTSUPP;
214 
215 		switch (enc_ports->dst) {
216 		case htons(NFP_FL_VXLAN_PORT):
217 			*tun_type = NFP_FL_TUNNEL_VXLAN;
218 			key_layer |= NFP_FLOWER_LAYER_VXLAN;
219 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
220 			break;
221 		case htons(NFP_FL_GENEVE_PORT):
222 			if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
223 				return -EOPNOTSUPP;
224 			*tun_type = NFP_FL_TUNNEL_GENEVE;
225 			key_layer |= NFP_FLOWER_LAYER_EXT_META;
226 			key_size += sizeof(struct nfp_flower_ext_meta);
227 			key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
228 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
229 			break;
230 		default:
231 			return -EOPNOTSUPP;
232 		}
233 	} else if (egress) {
234 		/* Reject non tunnel matches offloaded to egress repr. */
235 		return -EOPNOTSUPP;
236 	}
237 
238 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
239 		mask_basic = skb_flow_dissector_target(flow->dissector,
240 						       FLOW_DISSECTOR_KEY_BASIC,
241 						       flow->mask);
242 
243 		key_basic = skb_flow_dissector_target(flow->dissector,
244 						      FLOW_DISSECTOR_KEY_BASIC,
245 						      flow->key);
246 	}
247 
248 	if (mask_basic && mask_basic->n_proto) {
249 		/* Ethernet type is present in the key. */
250 		switch (key_basic->n_proto) {
251 		case cpu_to_be16(ETH_P_IP):
252 			key_layer |= NFP_FLOWER_LAYER_IPV4;
253 			key_size += sizeof(struct nfp_flower_ipv4);
254 			break;
255 
256 		case cpu_to_be16(ETH_P_IPV6):
257 			key_layer |= NFP_FLOWER_LAYER_IPV6;
258 			key_size += sizeof(struct nfp_flower_ipv6);
259 			break;
260 
261 		/* Currently we do not offload ARP
262 		 * because we rely on it to get to the host.
263 		 */
264 		case cpu_to_be16(ETH_P_ARP):
265 			return -EOPNOTSUPP;
266 
267 		case cpu_to_be16(ETH_P_MPLS_UC):
268 		case cpu_to_be16(ETH_P_MPLS_MC):
269 			if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
270 				key_layer |= NFP_FLOWER_LAYER_MAC;
271 				key_size += sizeof(struct nfp_flower_mac_mpls);
272 			}
273 			break;
274 
275 		/* Will be included in layer 2. */
276 		case cpu_to_be16(ETH_P_8021Q):
277 			break;
278 
279 		default:
280 			/* Other ethtype - we need check the masks for the
281 			 * remainder of the key to ensure we can offload.
282 			 */
283 			if (nfp_flower_check_higher_than_mac(flow))
284 				return -EOPNOTSUPP;
285 			break;
286 		}
287 	}
288 
289 	if (mask_basic && mask_basic->ip_proto) {
290 		/* Ethernet type is present in the key. */
291 		switch (key_basic->ip_proto) {
292 		case IPPROTO_TCP:
293 		case IPPROTO_UDP:
294 		case IPPROTO_SCTP:
295 		case IPPROTO_ICMP:
296 		case IPPROTO_ICMPV6:
297 			key_layer |= NFP_FLOWER_LAYER_TP;
298 			key_size += sizeof(struct nfp_flower_tp_ports);
299 			break;
300 		default:
301 			/* Other ip proto - we need check the masks for the
302 			 * remainder of the key to ensure we can offload.
303 			 */
304 			return -EOPNOTSUPP;
305 		}
306 	}
307 
308 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
309 		struct flow_dissector_key_tcp *tcp;
310 		u32 tcp_flags;
311 
312 		tcp = skb_flow_dissector_target(flow->dissector,
313 						FLOW_DISSECTOR_KEY_TCP,
314 						flow->key);
315 		tcp_flags = be16_to_cpu(tcp->flags);
316 
317 		if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
318 			return -EOPNOTSUPP;
319 
320 		/* We only support PSH and URG flags when either
321 		 * FIN, SYN or RST is present as well.
322 		 */
323 		if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
324 		    !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
325 			return -EOPNOTSUPP;
326 
327 		/* We need to store TCP flags in the IPv4 key space, thus
328 		 * we need to ensure we include a IPv4 key layer if we have
329 		 * not done so already.
330 		 */
331 		if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
332 			key_layer |= NFP_FLOWER_LAYER_IPV4;
333 			key_size += sizeof(struct nfp_flower_ipv4);
334 		}
335 	}
336 
337 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
338 		struct flow_dissector_key_control *key_ctl;
339 
340 		key_ctl = skb_flow_dissector_target(flow->dissector,
341 						    FLOW_DISSECTOR_KEY_CONTROL,
342 						    flow->key);
343 
344 		if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
345 			return -EOPNOTSUPP;
346 	}
347 
348 	ret_key_ls->key_layer = key_layer;
349 	ret_key_ls->key_layer_two = key_layer_two;
350 	ret_key_ls->key_size = key_size;
351 
352 	return 0;
353 }
354 
355 static struct nfp_fl_payload *
356 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
357 {
358 	struct nfp_fl_payload *flow_pay;
359 
360 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
361 	if (!flow_pay)
362 		return NULL;
363 
364 	flow_pay->meta.key_len = key_layer->key_size;
365 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
366 	if (!flow_pay->unmasked_data)
367 		goto err_free_flow;
368 
369 	flow_pay->meta.mask_len = key_layer->key_size;
370 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
371 	if (!flow_pay->mask_data)
372 		goto err_free_unmasked;
373 
374 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
375 	if (!flow_pay->action_data)
376 		goto err_free_mask;
377 
378 	flow_pay->nfp_tun_ipv4_addr = 0;
379 	flow_pay->meta.flags = 0;
380 	spin_lock_init(&flow_pay->lock);
381 
382 	flow_pay->ingress_offload = !egress;
383 
384 	return flow_pay;
385 
386 err_free_mask:
387 	kfree(flow_pay->mask_data);
388 err_free_unmasked:
389 	kfree(flow_pay->unmasked_data);
390 err_free_flow:
391 	kfree(flow_pay);
392 	return NULL;
393 }
394 
395 /**
396  * nfp_flower_add_offload() - Adds a new flow to hardware.
397  * @app:	Pointer to the APP handle
398  * @netdev:	netdev structure.
399  * @flow:	TC flower classifier offload structure.
400  * @egress:	NFP netdev is the egress.
401  *
402  * Adds a new flow to the repeated hash structure and action payload.
403  *
404  * Return: negative value on error, 0 if configured successfully.
405  */
406 static int
407 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
408 		       struct tc_cls_flower_offload *flow, bool egress)
409 {
410 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
411 	struct nfp_port *port = nfp_port_from_netdev(netdev);
412 	struct nfp_flower_priv *priv = app->priv;
413 	struct nfp_fl_payload *flow_pay;
414 	struct nfp_fl_key_ls *key_layer;
415 	struct net_device *ingr_dev;
416 	int err;
417 
418 	ingr_dev = egress ? NULL : netdev;
419 	flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
420 					      NFP_FL_STATS_CTX_DONT_CARE);
421 	if (flow_pay) {
422 		/* Ignore as duplicate if it has been added by different cb. */
423 		if (flow_pay->ingress_offload && egress)
424 			return 0;
425 		else
426 			return -EOPNOTSUPP;
427 	}
428 
429 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
430 	if (!key_layer)
431 		return -ENOMEM;
432 
433 	err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
434 					      &tun_type);
435 	if (err)
436 		goto err_free_key_ls;
437 
438 	flow_pay = nfp_flower_allocate_new(key_layer, egress);
439 	if (!flow_pay) {
440 		err = -ENOMEM;
441 		goto err_free_key_ls;
442 	}
443 
444 	flow_pay->ingress_dev = egress ? NULL : netdev;
445 
446 	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
447 					    tun_type);
448 	if (err)
449 		goto err_destroy_flow;
450 
451 	err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
452 	if (err)
453 		goto err_destroy_flow;
454 
455 	err = nfp_compile_flow_metadata(app, flow, flow_pay,
456 					flow_pay->ingress_dev);
457 	if (err)
458 		goto err_destroy_flow;
459 
460 	err = nfp_flower_xmit_flow(netdev, flow_pay,
461 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
462 	if (err)
463 		goto err_destroy_flow;
464 
465 	INIT_HLIST_NODE(&flow_pay->link);
466 	flow_pay->tc_flower_cookie = flow->cookie;
467 	hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
468 	port->tc_offload_cnt++;
469 
470 	/* Deallocate flow payload when flower rule has been destroyed. */
471 	kfree(key_layer);
472 
473 	return 0;
474 
475 err_destroy_flow:
476 	kfree(flow_pay->action_data);
477 	kfree(flow_pay->mask_data);
478 	kfree(flow_pay->unmasked_data);
479 	kfree(flow_pay);
480 err_free_key_ls:
481 	kfree(key_layer);
482 	return err;
483 }
484 
485 /**
486  * nfp_flower_del_offload() - Removes a flow from hardware.
487  * @app:	Pointer to the APP handle
488  * @netdev:	netdev structure.
489  * @flow:	TC flower classifier offload structure
490  * @egress:	Netdev is the egress dev.
491  *
492  * Removes a flow from the repeated hash structure and clears the
493  * action payload.
494  *
495  * Return: negative value on error, 0 if removed successfully.
496  */
497 static int
498 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
499 		       struct tc_cls_flower_offload *flow, bool egress)
500 {
501 	struct nfp_port *port = nfp_port_from_netdev(netdev);
502 	struct nfp_fl_payload *nfp_flow;
503 	struct net_device *ingr_dev;
504 	int err;
505 
506 	ingr_dev = egress ? NULL : netdev;
507 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
508 					      NFP_FL_STATS_CTX_DONT_CARE);
509 	if (!nfp_flow)
510 		return egress ? 0 : -ENOENT;
511 
512 	err = nfp_modify_flow_metadata(app, nfp_flow);
513 	if (err)
514 		goto err_free_flow;
515 
516 	if (nfp_flow->nfp_tun_ipv4_addr)
517 		nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
518 
519 	err = nfp_flower_xmit_flow(netdev, nfp_flow,
520 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
521 	if (err)
522 		goto err_free_flow;
523 
524 err_free_flow:
525 	hash_del_rcu(&nfp_flow->link);
526 	port->tc_offload_cnt--;
527 	kfree(nfp_flow->action_data);
528 	kfree(nfp_flow->mask_data);
529 	kfree(nfp_flow->unmasked_data);
530 	kfree_rcu(nfp_flow, rcu);
531 	return err;
532 }
533 
534 /**
535  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
536  * @app:	Pointer to the APP handle
537  * @netdev:	Netdev structure.
538  * @flow:	TC flower classifier offload structure
539  * @egress:	Netdev is the egress dev.
540  *
541  * Populates a flow statistics structure which which corresponds to a
542  * specific flow.
543  *
544  * Return: negative value on error, 0 if stats populated successfully.
545  */
546 static int
547 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
548 		     struct tc_cls_flower_offload *flow, bool egress)
549 {
550 	struct nfp_fl_payload *nfp_flow;
551 	struct net_device *ingr_dev;
552 
553 	ingr_dev = egress ? NULL : netdev;
554 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
555 					      NFP_FL_STATS_CTX_DONT_CARE);
556 	if (!nfp_flow)
557 		return -EINVAL;
558 
559 	if (nfp_flow->ingress_offload && egress)
560 		return 0;
561 
562 	spin_lock_bh(&nfp_flow->lock);
563 	tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
564 			      nfp_flow->stats.pkts, nfp_flow->stats.used);
565 
566 	nfp_flow->stats.pkts = 0;
567 	nfp_flow->stats.bytes = 0;
568 	spin_unlock_bh(&nfp_flow->lock);
569 
570 	return 0;
571 }
572 
573 static int
574 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
575 			struct tc_cls_flower_offload *flower, bool egress)
576 {
577 	if (!eth_proto_is_802_3(flower->common.protocol))
578 		return -EOPNOTSUPP;
579 
580 	switch (flower->command) {
581 	case TC_CLSFLOWER_REPLACE:
582 		return nfp_flower_add_offload(app, netdev, flower, egress);
583 	case TC_CLSFLOWER_DESTROY:
584 		return nfp_flower_del_offload(app, netdev, flower, egress);
585 	case TC_CLSFLOWER_STATS:
586 		return nfp_flower_get_stats(app, netdev, flower, egress);
587 	}
588 
589 	return -EOPNOTSUPP;
590 }
591 
592 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
593 				  void *cb_priv)
594 {
595 	struct nfp_repr *repr = cb_priv;
596 
597 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
598 		return -EOPNOTSUPP;
599 
600 	switch (type) {
601 	case TC_SETUP_CLSFLOWER:
602 		return nfp_flower_repr_offload(repr->app, repr->netdev,
603 					       type_data, true);
604 	default:
605 		return -EOPNOTSUPP;
606 	}
607 }
608 
609 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
610 					void *type_data, void *cb_priv)
611 {
612 	struct nfp_repr *repr = cb_priv;
613 
614 	if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
615 		return -EOPNOTSUPP;
616 
617 	switch (type) {
618 	case TC_SETUP_CLSFLOWER:
619 		return nfp_flower_repr_offload(repr->app, repr->netdev,
620 					       type_data, false);
621 	default:
622 		return -EOPNOTSUPP;
623 	}
624 }
625 
626 static int nfp_flower_setup_tc_block(struct net_device *netdev,
627 				     struct tc_block_offload *f)
628 {
629 	struct nfp_repr *repr = netdev_priv(netdev);
630 
631 	if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
632 		return -EOPNOTSUPP;
633 
634 	if (tcf_block_shared(f->block))
635 		return -EOPNOTSUPP;
636 
637 	switch (f->command) {
638 	case TC_BLOCK_BIND:
639 		return tcf_block_cb_register(f->block,
640 					     nfp_flower_setup_tc_block_cb,
641 					     repr, repr);
642 	case TC_BLOCK_UNBIND:
643 		tcf_block_cb_unregister(f->block,
644 					nfp_flower_setup_tc_block_cb,
645 					repr);
646 		return 0;
647 	default:
648 		return -EOPNOTSUPP;
649 	}
650 }
651 
652 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
653 			enum tc_setup_type type, void *type_data)
654 {
655 	switch (type) {
656 	case TC_SETUP_BLOCK:
657 		return nfp_flower_setup_tc_block(netdev, type_data);
658 	default:
659 		return -EOPNOTSUPP;
660 	}
661 }
662