xref: /linux/drivers/net/ethernet/netronome/nfp/flower/offload.c (revision 26b433d0da062d6e19d75350c0171d3cf8ff560d)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37 
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46 
47 static int
48 nfp_flower_xmit_flow(struct net_device *netdev,
49 		     struct nfp_fl_payload *nfp_flow, u8 mtype)
50 {
51 	u32 meta_len, key_len, mask_len, act_len, tot_len;
52 	struct nfp_repr *priv = netdev_priv(netdev);
53 	struct sk_buff *skb;
54 	unsigned char *msg;
55 
56 	meta_len =  sizeof(struct nfp_fl_rule_metadata);
57 	key_len = nfp_flow->meta.key_len;
58 	mask_len = nfp_flow->meta.mask_len;
59 	act_len = nfp_flow->meta.act_len;
60 
61 	tot_len = meta_len + key_len + mask_len + act_len;
62 
63 	/* Convert to long words as firmware expects
64 	 * lengths in units of NFP_FL_LW_SIZ.
65 	 */
66 	nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
67 	nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
68 	nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
69 
70 	skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype);
71 	if (!skb)
72 		return -ENOMEM;
73 
74 	msg = nfp_flower_cmsg_get_data(skb);
75 	memcpy(msg, &nfp_flow->meta, meta_len);
76 	memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
77 	memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
78 	memcpy(&msg[meta_len + key_len + mask_len],
79 	       nfp_flow->action_data, act_len);
80 
81 	/* Convert back to bytes as software expects
82 	 * lengths in units of bytes.
83 	 */
84 	nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
85 	nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
86 	nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
87 
88 	nfp_ctrl_tx(priv->app->ctrl, skb);
89 
90 	return 0;
91 }
92 
93 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
94 {
95 	return dissector_uses_key(f->dissector,
96 				  FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
97 		dissector_uses_key(f->dissector,
98 				   FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
99 		dissector_uses_key(f->dissector,
100 				   FLOW_DISSECTOR_KEY_PORTS) ||
101 		dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
102 }
103 
104 static int
105 nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
106 				struct tc_cls_flower_offload *flow)
107 {
108 	struct flow_dissector_key_basic *mask_basic = NULL;
109 	struct flow_dissector_key_basic *key_basic = NULL;
110 	struct flow_dissector_key_ip *mask_ip = NULL;
111 	u32 key_layer_two;
112 	u8 key_layer;
113 	int key_size;
114 
115 	if (dissector_uses_key(flow->dissector,
116 			       FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
117 		struct flow_dissector_key_control *mask_enc_ctl =
118 			skb_flow_dissector_target(flow->dissector,
119 						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
120 						  flow->mask);
121 		/* We are expecting a tunnel. For now we ignore offloading. */
122 		if (mask_enc_ctl->addr_type)
123 			return -EOPNOTSUPP;
124 	}
125 
126 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
127 		mask_basic = skb_flow_dissector_target(flow->dissector,
128 						       FLOW_DISSECTOR_KEY_BASIC,
129 						       flow->mask);
130 
131 		key_basic = skb_flow_dissector_target(flow->dissector,
132 						      FLOW_DISSECTOR_KEY_BASIC,
133 						      flow->key);
134 	}
135 
136 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
137 		mask_ip = skb_flow_dissector_target(flow->dissector,
138 						    FLOW_DISSECTOR_KEY_IP,
139 						    flow->mask);
140 
141 	key_layer_two = 0;
142 	key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
143 	key_size = sizeof(struct nfp_flower_meta_one) +
144 		   sizeof(struct nfp_flower_in_port) +
145 		   sizeof(struct nfp_flower_mac_mpls);
146 
147 	if (mask_basic && mask_basic->n_proto) {
148 		/* Ethernet type is present in the key. */
149 		switch (key_basic->n_proto) {
150 		case cpu_to_be16(ETH_P_IP):
151 			if (mask_ip && mask_ip->tos)
152 				return -EOPNOTSUPP;
153 			if (mask_ip && mask_ip->ttl)
154 				return -EOPNOTSUPP;
155 			key_layer |= NFP_FLOWER_LAYER_IPV4;
156 			key_size += sizeof(struct nfp_flower_ipv4);
157 			break;
158 
159 		case cpu_to_be16(ETH_P_IPV6):
160 			if (mask_ip && mask_ip->tos)
161 				return -EOPNOTSUPP;
162 			if (mask_ip && mask_ip->ttl)
163 				return -EOPNOTSUPP;
164 			key_layer |= NFP_FLOWER_LAYER_IPV6;
165 			key_size += sizeof(struct nfp_flower_ipv6);
166 			break;
167 
168 		/* Currently we do not offload ARP
169 		 * because we rely on it to get to the host.
170 		 */
171 		case cpu_to_be16(ETH_P_ARP):
172 			return -EOPNOTSUPP;
173 
174 		/* Currently we do not offload MPLS. */
175 		case cpu_to_be16(ETH_P_MPLS_UC):
176 		case cpu_to_be16(ETH_P_MPLS_MC):
177 			return -EOPNOTSUPP;
178 
179 		/* Will be included in layer 2. */
180 		case cpu_to_be16(ETH_P_8021Q):
181 			break;
182 
183 		default:
184 			/* Other ethtype - we need check the masks for the
185 			 * remainder of the key to ensure we can offload.
186 			 */
187 			if (nfp_flower_check_higher_than_mac(flow))
188 				return -EOPNOTSUPP;
189 			break;
190 		}
191 	}
192 
193 	if (mask_basic && mask_basic->ip_proto) {
194 		/* Ethernet type is present in the key. */
195 		switch (key_basic->ip_proto) {
196 		case IPPROTO_TCP:
197 		case IPPROTO_UDP:
198 		case IPPROTO_SCTP:
199 		case IPPROTO_ICMP:
200 		case IPPROTO_ICMPV6:
201 			key_layer |= NFP_FLOWER_LAYER_TP;
202 			key_size += sizeof(struct nfp_flower_tp_ports);
203 			break;
204 		default:
205 			/* Other ip proto - we need check the masks for the
206 			 * remainder of the key to ensure we can offload.
207 			 */
208 			return -EOPNOTSUPP;
209 		}
210 	}
211 
212 	ret_key_ls->key_layer = key_layer;
213 	ret_key_ls->key_layer_two = key_layer_two;
214 	ret_key_ls->key_size = key_size;
215 
216 	return 0;
217 }
218 
219 static struct nfp_fl_payload *
220 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
221 {
222 	struct nfp_fl_payload *flow_pay;
223 
224 	flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
225 	if (!flow_pay)
226 		return NULL;
227 
228 	flow_pay->meta.key_len = key_layer->key_size;
229 	flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
230 	if (!flow_pay->unmasked_data)
231 		goto err_free_flow;
232 
233 	flow_pay->meta.mask_len = key_layer->key_size;
234 	flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
235 	if (!flow_pay->mask_data)
236 		goto err_free_unmasked;
237 
238 	flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
239 	if (!flow_pay->action_data)
240 		goto err_free_mask;
241 
242 	flow_pay->meta.flags = 0;
243 	spin_lock_init(&flow_pay->lock);
244 
245 	return flow_pay;
246 
247 err_free_mask:
248 	kfree(flow_pay->mask_data);
249 err_free_unmasked:
250 	kfree(flow_pay->unmasked_data);
251 err_free_flow:
252 	kfree(flow_pay);
253 	return NULL;
254 }
255 
256 /**
257  * nfp_flower_add_offload() - Adds a new flow to hardware.
258  * @app:	Pointer to the APP handle
259  * @netdev:	netdev structure.
260  * @flow:	TC flower classifier offload structure.
261  *
262  * Adds a new flow to the repeated hash structure and action payload.
263  *
264  * Return: negative value on error, 0 if configured successfully.
265  */
266 static int
267 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
268 		       struct tc_cls_flower_offload *flow)
269 {
270 	struct nfp_flower_priv *priv = app->priv;
271 	struct nfp_fl_payload *flow_pay;
272 	struct nfp_fl_key_ls *key_layer;
273 	int err;
274 
275 	key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
276 	if (!key_layer)
277 		return -ENOMEM;
278 
279 	err = nfp_flower_calculate_key_layers(key_layer, flow);
280 	if (err)
281 		goto err_free_key_ls;
282 
283 	flow_pay = nfp_flower_allocate_new(key_layer);
284 	if (!flow_pay) {
285 		err = -ENOMEM;
286 		goto err_free_key_ls;
287 	}
288 
289 	err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay);
290 	if (err)
291 		goto err_destroy_flow;
292 
293 	err = nfp_flower_compile_action(flow, netdev, flow_pay);
294 	if (err)
295 		goto err_destroy_flow;
296 
297 	err = nfp_compile_flow_metadata(app, flow, flow_pay);
298 	if (err)
299 		goto err_destroy_flow;
300 
301 	err = nfp_flower_xmit_flow(netdev, flow_pay,
302 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
303 	if (err)
304 		goto err_destroy_flow;
305 
306 	INIT_HLIST_NODE(&flow_pay->link);
307 	flow_pay->tc_flower_cookie = flow->cookie;
308 	hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
309 
310 	/* Deallocate flow payload when flower rule has been destroyed. */
311 	kfree(key_layer);
312 
313 	return 0;
314 
315 err_destroy_flow:
316 	kfree(flow_pay->action_data);
317 	kfree(flow_pay->mask_data);
318 	kfree(flow_pay->unmasked_data);
319 	kfree(flow_pay);
320 err_free_key_ls:
321 	kfree(key_layer);
322 	return err;
323 }
324 
325 /**
326  * nfp_flower_del_offload() - Removes a flow from hardware.
327  * @app:	Pointer to the APP handle
328  * @netdev:	netdev structure.
329  * @flow:	TC flower classifier offload structure
330  *
331  * Removes a flow from the repeated hash structure and clears the
332  * action payload.
333  *
334  * Return: negative value on error, 0 if removed successfully.
335  */
336 static int
337 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
338 		       struct tc_cls_flower_offload *flow)
339 {
340 	struct nfp_fl_payload *nfp_flow;
341 	int err;
342 
343 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
344 	if (!nfp_flow)
345 		return -ENOENT;
346 
347 	err = nfp_modify_flow_metadata(app, nfp_flow);
348 	if (err)
349 		goto err_free_flow;
350 
351 	err = nfp_flower_xmit_flow(netdev, nfp_flow,
352 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
353 	if (err)
354 		goto err_free_flow;
355 
356 err_free_flow:
357 	hash_del_rcu(&nfp_flow->link);
358 	kfree(nfp_flow->action_data);
359 	kfree(nfp_flow->mask_data);
360 	kfree(nfp_flow->unmasked_data);
361 	kfree_rcu(nfp_flow, rcu);
362 	return err;
363 }
364 
365 /**
366  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
367  * @app:	Pointer to the APP handle
368  * @flow:	TC flower classifier offload structure
369  *
370  * Populates a flow statistics structure which which corresponds to a
371  * specific flow.
372  *
373  * Return: negative value on error, 0 if stats populated successfully.
374  */
375 static int
376 nfp_flower_get_stats(struct nfp_app *app, struct tc_cls_flower_offload *flow)
377 {
378 	struct nfp_fl_payload *nfp_flow;
379 
380 	nfp_flow = nfp_flower_search_fl_table(app, flow->cookie);
381 	if (!nfp_flow)
382 		return -EINVAL;
383 
384 	spin_lock_bh(&nfp_flow->lock);
385 	tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
386 			      nfp_flow->stats.pkts, nfp_flow->stats.used);
387 
388 	nfp_flow->stats.pkts = 0;
389 	nfp_flow->stats.bytes = 0;
390 	spin_unlock_bh(&nfp_flow->lock);
391 
392 	return 0;
393 }
394 
395 static int
396 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
397 			struct tc_cls_flower_offload *flower)
398 {
399 	switch (flower->command) {
400 	case TC_CLSFLOWER_REPLACE:
401 		return nfp_flower_add_offload(app, netdev, flower);
402 	case TC_CLSFLOWER_DESTROY:
403 		return nfp_flower_del_offload(app, netdev, flower);
404 	case TC_CLSFLOWER_STATS:
405 		return nfp_flower_get_stats(app, flower);
406 	}
407 
408 	return -EOPNOTSUPP;
409 }
410 
411 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
412 			u32 handle, __be16 proto, struct tc_to_netdev *tc)
413 {
414 	if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
415 		return -EOPNOTSUPP;
416 
417 	if (!eth_proto_is_802_3(proto))
418 		return -EOPNOTSUPP;
419 
420 	if (tc->type != TC_SETUP_CLSFLOWER)
421 		return -EINVAL;
422 
423 	return nfp_flower_repr_offload(app, netdev, tc->cls_flower);
424 }
425