xref: /linux/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
40 
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
43 #include "fw.h"
44 #include "main.h"
45 
46 #define NFP_BPF_TAG_ALLOC_SPAN	(U16_MAX / 4)
47 
48 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
49 {
50 	u16 used_tags;
51 
52 	used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
53 
54 	return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
55 }
56 
57 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
58 {
59 	/* All FW communication for BPF is request-reply.  To make sure we
60 	 * don't reuse the message ID too early after timeout - limit the
61 	 * number of requests in flight.
62 	 */
63 	if (nfp_bpf_all_tags_busy(bpf)) {
64 		cmsg_warn(bpf, "all FW request contexts busy!\n");
65 		return -EAGAIN;
66 	}
67 
68 	WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
69 	return bpf->tag_alloc_next++;
70 }
71 
72 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
73 {
74 	WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
75 
76 	while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
77 	       bpf->tag_alloc_last != bpf->tag_alloc_next)
78 		bpf->tag_alloc_last++;
79 }
80 
81 static struct sk_buff *
82 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
83 {
84 	struct sk_buff *skb;
85 
86 	skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
87 	skb_put(skb, size);
88 
89 	return skb;
90 }
91 
92 static unsigned int
93 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
94 {
95 	unsigned int size;
96 
97 	size = sizeof(struct cmsg_req_map_op);
98 	size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
99 
100 	return size;
101 }
102 
103 static struct sk_buff *
104 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
105 {
106 	return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
107 }
108 
109 static unsigned int
110 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
111 {
112 	unsigned int size;
113 
114 	size = sizeof(struct cmsg_reply_map_op);
115 	size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
116 
117 	return size;
118 }
119 
120 static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
121 {
122 	struct cmsg_hdr *hdr;
123 
124 	hdr = (struct cmsg_hdr *)skb->data;
125 
126 	return hdr->type;
127 }
128 
129 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
130 {
131 	struct cmsg_hdr *hdr;
132 
133 	hdr = (struct cmsg_hdr *)skb->data;
134 
135 	return be16_to_cpu(hdr->tag);
136 }
137 
138 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
139 {
140 	unsigned int msg_tag;
141 	struct sk_buff *skb;
142 
143 	skb_queue_walk(&bpf->cmsg_replies, skb) {
144 		msg_tag = nfp_bpf_cmsg_get_tag(skb);
145 		if (msg_tag == tag) {
146 			nfp_bpf_free_tag(bpf, tag);
147 			__skb_unlink(skb, &bpf->cmsg_replies);
148 			return skb;
149 		}
150 	}
151 
152 	return NULL;
153 }
154 
155 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
156 {
157 	struct sk_buff *skb;
158 
159 	nfp_ctrl_lock(bpf->app->ctrl);
160 	skb = __nfp_bpf_reply(bpf, tag);
161 	nfp_ctrl_unlock(bpf->app->ctrl);
162 
163 	return skb;
164 }
165 
166 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
167 {
168 	struct sk_buff *skb;
169 
170 	nfp_ctrl_lock(bpf->app->ctrl);
171 	skb = __nfp_bpf_reply(bpf, tag);
172 	if (!skb)
173 		nfp_bpf_free_tag(bpf, tag);
174 	nfp_ctrl_unlock(bpf->app->ctrl);
175 
176 	return skb;
177 }
178 
179 static struct sk_buff *
180 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
181 			int tag)
182 {
183 	struct sk_buff *skb;
184 	int i, err;
185 
186 	for (i = 0; i < 50; i++) {
187 		udelay(4);
188 		skb = nfp_bpf_reply(bpf, tag);
189 		if (skb)
190 			return skb;
191 	}
192 
193 	err = wait_event_interruptible_timeout(bpf->cmsg_wq,
194 					       skb = nfp_bpf_reply(bpf, tag),
195 					       msecs_to_jiffies(5000));
196 	/* We didn't get a response - try last time and atomically drop
197 	 * the tag even if no response is matched.
198 	 */
199 	if (!skb)
200 		skb = nfp_bpf_reply_drop_tag(bpf, tag);
201 	if (err < 0) {
202 		cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
203 			  err == ERESTARTSYS ? "interrupted" : "error",
204 			  type, err);
205 		return ERR_PTR(err);
206 	}
207 	if (!skb) {
208 		cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
209 			  type);
210 		return ERR_PTR(-ETIMEDOUT);
211 	}
212 
213 	return skb;
214 }
215 
216 static struct sk_buff *
217 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
218 			 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
219 {
220 	struct cmsg_hdr *hdr;
221 	int tag;
222 
223 	nfp_ctrl_lock(bpf->app->ctrl);
224 	tag = nfp_bpf_alloc_tag(bpf);
225 	if (tag < 0) {
226 		nfp_ctrl_unlock(bpf->app->ctrl);
227 		dev_kfree_skb_any(skb);
228 		return ERR_PTR(tag);
229 	}
230 
231 	hdr = (void *)skb->data;
232 	hdr->ver = CMSG_MAP_ABI_VERSION;
233 	hdr->type = type;
234 	hdr->tag = cpu_to_be16(tag);
235 
236 	__nfp_app_ctrl_tx(bpf->app, skb);
237 
238 	nfp_ctrl_unlock(bpf->app->ctrl);
239 
240 	skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
241 	if (IS_ERR(skb))
242 		return skb;
243 
244 	hdr = (struct cmsg_hdr *)skb->data;
245 	if (hdr->type != __CMSG_REPLY(type)) {
246 		cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
247 			  hdr->type, __CMSG_REPLY(type));
248 		goto err_free;
249 	}
250 	/* 0 reply_size means caller will do the validation */
251 	if (reply_size && skb->len != reply_size) {
252 		cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
253 			  type, skb->len, reply_size);
254 		goto err_free;
255 	}
256 
257 	return skb;
258 err_free:
259 	dev_kfree_skb_any(skb);
260 	return ERR_PTR(-EIO);
261 }
262 
263 static int
264 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
265 			 struct cmsg_reply_map_simple *reply)
266 {
267 	static const int res_table[] = {
268 		[CMSG_RC_SUCCESS]	= 0,
269 		[CMSG_RC_ERR_MAP_FD]	= -EBADFD,
270 		[CMSG_RC_ERR_MAP_NOENT]	= -ENOENT,
271 		[CMSG_RC_ERR_MAP_ERR]	= -EINVAL,
272 		[CMSG_RC_ERR_MAP_PARSE]	= -EIO,
273 		[CMSG_RC_ERR_MAP_EXIST]	= -EEXIST,
274 		[CMSG_RC_ERR_MAP_NOMEM]	= -ENOMEM,
275 		[CMSG_RC_ERR_MAP_E2BIG]	= -E2BIG,
276 	};
277 	u32 rc;
278 
279 	rc = be32_to_cpu(reply->rc);
280 	if (rc >= ARRAY_SIZE(res_table)) {
281 		cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
282 		return -EIO;
283 	}
284 
285 	return res_table[rc];
286 }
287 
288 long long int
289 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
290 {
291 	struct cmsg_reply_map_alloc_tbl *reply;
292 	struct cmsg_req_map_alloc_tbl *req;
293 	struct sk_buff *skb;
294 	u32 tid;
295 	int err;
296 
297 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
298 	if (!skb)
299 		return -ENOMEM;
300 
301 	req = (void *)skb->data;
302 	req->key_size = cpu_to_be32(map->key_size);
303 	req->value_size = cpu_to_be32(map->value_size);
304 	req->max_entries = cpu_to_be32(map->max_entries);
305 	req->map_type = cpu_to_be32(map->map_type);
306 	req->map_flags = 0;
307 
308 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
309 				       sizeof(*reply));
310 	if (IS_ERR(skb))
311 		return PTR_ERR(skb);
312 
313 	reply = (void *)skb->data;
314 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
315 	if (err)
316 		goto err_free;
317 
318 	tid = be32_to_cpu(reply->tid);
319 	dev_consume_skb_any(skb);
320 
321 	return tid;
322 err_free:
323 	dev_kfree_skb_any(skb);
324 	return err;
325 }
326 
327 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
328 {
329 	struct cmsg_reply_map_free_tbl *reply;
330 	struct cmsg_req_map_free_tbl *req;
331 	struct sk_buff *skb;
332 	int err;
333 
334 	skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
335 	if (!skb) {
336 		cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
337 		return;
338 	}
339 
340 	req = (void *)skb->data;
341 	req->tid = cpu_to_be32(nfp_map->tid);
342 
343 	skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
344 				       sizeof(*reply));
345 	if (IS_ERR(skb)) {
346 		cmsg_warn(bpf, "leaking map - I/O error\n");
347 		return;
348 	}
349 
350 	reply = (void *)skb->data;
351 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
352 	if (err)
353 		cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
354 
355 	dev_consume_skb_any(skb);
356 }
357 
358 static void *
359 nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
360 		     unsigned int n)
361 {
362 	return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
363 }
364 
365 static void *
366 nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
367 		     unsigned int n)
368 {
369 	return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
370 }
371 
372 static void *
373 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
374 		       unsigned int n)
375 {
376 	return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
377 }
378 
379 static void *
380 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
381 		       unsigned int n)
382 {
383 	return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
384 }
385 
386 static int
387 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
388 		      enum nfp_bpf_cmsg_type op,
389 		      u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
390 {
391 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
392 	struct nfp_app_bpf *bpf = nfp_map->bpf;
393 	struct bpf_map *map = &offmap->map;
394 	struct cmsg_reply_map_op *reply;
395 	struct cmsg_req_map_op *req;
396 	struct sk_buff *skb;
397 	int err;
398 
399 	/* FW messages have no space for more than 32 bits of flags */
400 	if (flags >> 32)
401 		return -EOPNOTSUPP;
402 
403 	skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
404 	if (!skb)
405 		return -ENOMEM;
406 
407 	req = (void *)skb->data;
408 	req->tid = cpu_to_be32(nfp_map->tid);
409 	req->count = cpu_to_be32(1);
410 	req->flags = cpu_to_be32(flags);
411 
412 	/* Copy inputs */
413 	if (key)
414 		memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
415 	if (value)
416 		memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
417 		       map->value_size);
418 
419 	skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
420 				       nfp_bpf_cmsg_map_reply_size(bpf, 1));
421 	if (IS_ERR(skb))
422 		return PTR_ERR(skb);
423 
424 	reply = (void *)skb->data;
425 	err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
426 	if (err)
427 		goto err_free;
428 
429 	/* Copy outputs */
430 	if (out_key)
431 		memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
432 		       map->key_size);
433 	if (out_value)
434 		memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
435 		       map->value_size);
436 
437 	dev_consume_skb_any(skb);
438 
439 	return 0;
440 err_free:
441 	dev_kfree_skb_any(skb);
442 	return err;
443 }
444 
445 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
446 			      void *key, void *value, u64 flags)
447 {
448 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
449 				     key, value, flags, NULL, NULL);
450 }
451 
452 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
453 {
454 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
455 				     key, NULL, 0, NULL, NULL);
456 }
457 
458 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
459 			      void *key, void *value)
460 {
461 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
462 				     key, NULL, 0, NULL, value);
463 }
464 
465 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
466 				void *next_key)
467 {
468 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
469 				     NULL, NULL, 0, next_key, NULL);
470 }
471 
472 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
473 			       void *key, void *next_key)
474 {
475 	return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
476 				     key, NULL, 0, next_key, NULL);
477 }
478 
479 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
480 {
481 	return max3((unsigned int)NFP_NET_DEFAULT_MTU,
482 		    nfp_bpf_cmsg_map_req_size(bpf, 1),
483 		    nfp_bpf_cmsg_map_reply_size(bpf, 1));
484 }
485 
486 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
487 {
488 	struct nfp_app_bpf *bpf = app->priv;
489 	unsigned int tag;
490 
491 	if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
492 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
493 		goto err_free;
494 	}
495 
496 	if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
497 		if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
498 			dev_consume_skb_any(skb);
499 		else
500 			dev_kfree_skb_any(skb);
501 		return;
502 	}
503 
504 	nfp_ctrl_lock(bpf->app->ctrl);
505 
506 	tag = nfp_bpf_cmsg_get_tag(skb);
507 	if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
508 		cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
509 			  tag);
510 		goto err_unlock;
511 	}
512 
513 	__skb_queue_tail(&bpf->cmsg_replies, skb);
514 	wake_up_interruptible_all(&bpf->cmsg_wq);
515 
516 	nfp_ctrl_unlock(bpf->app->ctrl);
517 
518 	return;
519 err_unlock:
520 	nfp_ctrl_unlock(bpf->app->ctrl);
521 err_free:
522 	dev_kfree_skb_any(skb);
523 }
524 
525 void
526 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
527 {
528 	struct nfp_app_bpf *bpf = app->priv;
529 	const struct cmsg_hdr *hdr = data;
530 
531 	if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
532 		cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
533 		return;
534 	}
535 
536 	if (hdr->type == CMSG_TYPE_BPF_EVENT)
537 		nfp_bpf_event_output(bpf, data, len);
538 	else
539 		cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
540 			  hdr->type);
541 }
542