xref: /linux/drivers/net/ethernet/netronome/nfp/bpf/offload.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /*
2  * Copyright (C) 2016-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 /*
35  * nfp_net_offload.c
36  * Netronome network device driver: TC offload functions for PF and VF
37  */
38 
39 #define pr_fmt(fmt)	"NFP net bpf: " fmt
40 
41 #include <linux/bpf.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/pci.h>
45 #include <linux/jiffies.h>
46 #include <linux/timer.h>
47 #include <linux/list.h>
48 #include <linux/mm.h>
49 
50 #include <net/pkt_cls.h>
51 #include <net/tc_act/tc_gact.h>
52 #include <net/tc_act/tc_mirred.h>
53 
54 #include "main.h"
55 #include "../nfp_app.h"
56 #include "../nfp_net_ctrl.h"
57 #include "../nfp_net.h"
58 
59 static int
60 nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
61 		   struct bpf_map *map)
62 {
63 	struct nfp_bpf_neutral_map *record;
64 	int err;
65 
66 	/* Map record paths are entered via ndo, update side is protected. */
67 	ASSERT_RTNL();
68 
69 	/* Reuse path - other offloaded program is already tracking this map. */
70 	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
71 					nfp_bpf_maps_neutral_params);
72 	if (record) {
73 		nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
74 		record->count++;
75 		return 0;
76 	}
77 
78 	/* Grab a single ref to the map for our record.  The prog destroy ndo
79 	 * happens after free_used_maps().
80 	 */
81 	map = bpf_map_inc(map, false);
82 	if (IS_ERR(map))
83 		return PTR_ERR(map);
84 
85 	record = kmalloc(sizeof(*record), GFP_KERNEL);
86 	if (!record) {
87 		err = -ENOMEM;
88 		goto err_map_put;
89 	}
90 
91 	record->ptr = map;
92 	record->map_id = map->id;
93 	record->count = 1;
94 
95 	err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
96 				     nfp_bpf_maps_neutral_params);
97 	if (err)
98 		goto err_free_rec;
99 
100 	nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
101 
102 	return 0;
103 
104 err_free_rec:
105 	kfree(record);
106 err_map_put:
107 	bpf_map_put(map);
108 	return err;
109 }
110 
111 static void
112 nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
113 {
114 	bool freed = false;
115 	int i;
116 
117 	ASSERT_RTNL();
118 
119 	for (i = 0; i < nfp_prog->map_records_cnt; i++) {
120 		if (--nfp_prog->map_records[i]->count) {
121 			nfp_prog->map_records[i] = NULL;
122 			continue;
123 		}
124 
125 		WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
126 					       &nfp_prog->map_records[i]->l,
127 					       nfp_bpf_maps_neutral_params));
128 		freed = true;
129 	}
130 
131 	if (freed) {
132 		synchronize_rcu();
133 
134 		for (i = 0; i < nfp_prog->map_records_cnt; i++)
135 			if (nfp_prog->map_records[i]) {
136 				bpf_map_put(nfp_prog->map_records[i]->ptr);
137 				kfree(nfp_prog->map_records[i]);
138 			}
139 	}
140 
141 	kfree(nfp_prog->map_records);
142 	nfp_prog->map_records = NULL;
143 	nfp_prog->map_records_cnt = 0;
144 }
145 
146 static int
147 nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
148 		    struct bpf_prog *prog)
149 {
150 	int i, cnt, err;
151 
152 	/* Quickly count the maps we will have to remember */
153 	cnt = 0;
154 	for (i = 0; i < prog->aux->used_map_cnt; i++)
155 		if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
156 			cnt++;
157 	if (!cnt)
158 		return 0;
159 
160 	nfp_prog->map_records = kmalloc_array(cnt,
161 					      sizeof(nfp_prog->map_records[0]),
162 					      GFP_KERNEL);
163 	if (!nfp_prog->map_records)
164 		return -ENOMEM;
165 
166 	for (i = 0; i < prog->aux->used_map_cnt; i++)
167 		if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
168 			err = nfp_map_ptr_record(bpf, nfp_prog,
169 						 prog->aux->used_maps[i]);
170 			if (err) {
171 				nfp_map_ptrs_forget(bpf, nfp_prog);
172 				return err;
173 			}
174 		}
175 	WARN_ON(cnt != nfp_prog->map_records_cnt);
176 
177 	return 0;
178 }
179 
180 static int
181 nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
182 		 unsigned int cnt)
183 {
184 	struct nfp_insn_meta *meta;
185 	unsigned int i;
186 
187 	for (i = 0; i < cnt; i++) {
188 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
189 		if (!meta)
190 			return -ENOMEM;
191 
192 		meta->insn = prog[i];
193 		meta->n = i;
194 		if (is_mbpf_alu(meta)) {
195 			meta->umin_src = U64_MAX;
196 			meta->umin_dst = U64_MAX;
197 		}
198 
199 		list_add_tail(&meta->l, &nfp_prog->insns);
200 	}
201 
202 	nfp_bpf_jit_prepare(nfp_prog, cnt);
203 
204 	return 0;
205 }
206 
207 static void nfp_prog_free(struct nfp_prog *nfp_prog)
208 {
209 	struct nfp_insn_meta *meta, *tmp;
210 
211 	list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
212 		list_del(&meta->l);
213 		kfree(meta);
214 	}
215 	kfree(nfp_prog);
216 }
217 
218 static int
219 nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
220 		      struct netdev_bpf *bpf)
221 {
222 	struct bpf_prog *prog = bpf->verifier.prog;
223 	struct nfp_prog *nfp_prog;
224 	int ret;
225 
226 	nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
227 	if (!nfp_prog)
228 		return -ENOMEM;
229 	prog->aux->offload->dev_priv = nfp_prog;
230 
231 	INIT_LIST_HEAD(&nfp_prog->insns);
232 	nfp_prog->type = prog->type;
233 	nfp_prog->bpf = app->priv;
234 
235 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
236 	if (ret)
237 		goto err_free;
238 
239 	nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
240 	bpf->verifier.ops = &nfp_bpf_analyzer_ops;
241 
242 	return 0;
243 
244 err_free:
245 	nfp_prog_free(nfp_prog);
246 
247 	return ret;
248 }
249 
250 static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
251 {
252 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
253 	unsigned int stack_size;
254 	unsigned int max_instr;
255 	int err;
256 
257 	stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
258 	if (prog->aux->stack_depth > stack_size) {
259 		nn_info(nn, "stack too large: program %dB > FW stack %dB\n",
260 			prog->aux->stack_depth, stack_size);
261 		return -EOPNOTSUPP;
262 	}
263 	nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
264 
265 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
266 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
267 
268 	nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
269 	if (!nfp_prog->prog)
270 		return -ENOMEM;
271 
272 	err = nfp_bpf_jit(nfp_prog);
273 	if (err)
274 		return err;
275 
276 	prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
277 	prog->aux->offload->jited_image = nfp_prog->prog;
278 
279 	return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
280 }
281 
282 static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
283 {
284 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
285 
286 	kvfree(nfp_prog->prog);
287 	nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
288 	nfp_prog_free(nfp_prog);
289 
290 	return 0;
291 }
292 
293 /* Atomic engine requires values to be in big endian, we need to byte swap
294  * the value words used with xadd.
295  */
296 static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
297 {
298 	u32 *word = value;
299 	unsigned int i;
300 
301 	for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
302 		if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
303 			word[i] = (__force u32)cpu_to_be32(word[i]);
304 }
305 
306 static int
307 nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
308 			 void *key, void *value)
309 {
310 	int err;
311 
312 	err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
313 	if (err)
314 		return err;
315 
316 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
317 	return 0;
318 }
319 
320 static int
321 nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
322 			 void *key, void *value, u64 flags)
323 {
324 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
325 	return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
326 }
327 
328 static int
329 nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
330 			 void *key, void *next_key)
331 {
332 	if (!key)
333 		return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
334 	return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
335 }
336 
337 static int
338 nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
339 {
340 	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
341 		return -EINVAL;
342 	return nfp_bpf_ctrl_del_entry(offmap, key);
343 }
344 
345 static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
346 	.map_get_next_key	= nfp_bpf_map_get_next_key,
347 	.map_lookup_elem	= nfp_bpf_map_lookup_entry,
348 	.map_update_elem	= nfp_bpf_map_update_entry,
349 	.map_delete_elem	= nfp_bpf_map_delete_elem,
350 };
351 
352 static int
353 nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
354 {
355 	struct nfp_bpf_map *nfp_map;
356 	unsigned int use_map_size;
357 	long long int res;
358 
359 	if (!bpf->maps.types)
360 		return -EOPNOTSUPP;
361 
362 	if (offmap->map.map_flags ||
363 	    offmap->map.numa_node != NUMA_NO_NODE) {
364 		pr_info("map flags are not supported\n");
365 		return -EINVAL;
366 	}
367 
368 	if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
369 		pr_info("map type not supported\n");
370 		return -EOPNOTSUPP;
371 	}
372 	if (bpf->maps.max_maps == bpf->maps_in_use) {
373 		pr_info("too many maps for a device\n");
374 		return -ENOMEM;
375 	}
376 	if (bpf->maps.max_elems - bpf->map_elems_in_use <
377 	    offmap->map.max_entries) {
378 		pr_info("map with too many elements: %u, left: %u\n",
379 			offmap->map.max_entries,
380 			bpf->maps.max_elems - bpf->map_elems_in_use);
381 		return -ENOMEM;
382 	}
383 
384 	if (round_up(offmap->map.key_size, 8) +
385 	    round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
386 		pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
387 			round_up(offmap->map.key_size, 8) +
388 			round_up(offmap->map.value_size, 8),
389 			bpf->maps.max_elem_sz);
390 		return -ENOMEM;
391 	}
392 	if (offmap->map.key_size > bpf->maps.max_key_sz) {
393 		pr_info("map key size %u, FW max is %u\n",
394 			offmap->map.key_size, bpf->maps.max_key_sz);
395 		return -ENOMEM;
396 	}
397 	if (offmap->map.value_size > bpf->maps.max_val_sz) {
398 		pr_info("map value size %u, FW max is %u\n",
399 			offmap->map.value_size, bpf->maps.max_val_sz);
400 		return -ENOMEM;
401 	}
402 
403 	use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
404 		       FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
405 
406 	nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
407 	if (!nfp_map)
408 		return -ENOMEM;
409 
410 	offmap->dev_priv = nfp_map;
411 	nfp_map->offmap = offmap;
412 	nfp_map->bpf = bpf;
413 
414 	res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
415 	if (res < 0) {
416 		kfree(nfp_map);
417 		return res;
418 	}
419 
420 	nfp_map->tid = res;
421 	offmap->dev_ops = &nfp_bpf_map_ops;
422 	bpf->maps_in_use++;
423 	bpf->map_elems_in_use += offmap->map.max_entries;
424 	list_add_tail(&nfp_map->l, &bpf->map_list);
425 
426 	return 0;
427 }
428 
429 static int
430 nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
431 {
432 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
433 
434 	nfp_bpf_ctrl_free_map(bpf, nfp_map);
435 	list_del_init(&nfp_map->l);
436 	bpf->map_elems_in_use -= offmap->map.max_entries;
437 	bpf->maps_in_use--;
438 	kfree(nfp_map);
439 
440 	return 0;
441 }
442 
443 int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
444 {
445 	switch (bpf->command) {
446 	case BPF_OFFLOAD_VERIFIER_PREP:
447 		return nfp_bpf_verifier_prep(app, nn, bpf);
448 	case BPF_OFFLOAD_TRANSLATE:
449 		return nfp_bpf_translate(nn, bpf->offload.prog);
450 	case BPF_OFFLOAD_DESTROY:
451 		return nfp_bpf_destroy(nn, bpf->offload.prog);
452 	case BPF_OFFLOAD_MAP_ALLOC:
453 		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
454 	case BPF_OFFLOAD_MAP_FREE:
455 		return nfp_bpf_map_free(app->priv, bpf->offmap);
456 	default:
457 		return -EINVAL;
458 	}
459 }
460 
461 static unsigned long
462 nfp_bpf_perf_event_copy(void *dst, const void *src,
463 			unsigned long off, unsigned long len)
464 {
465 	memcpy(dst, src + off, len);
466 	return 0;
467 }
468 
469 int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
470 			 unsigned int len)
471 {
472 	struct cmsg_bpf_event *cbe = (void *)data;
473 	struct nfp_bpf_neutral_map *record;
474 	u32 pkt_size, data_size, map_id;
475 	u64 map_id_full;
476 
477 	if (len < sizeof(struct cmsg_bpf_event))
478 		return -EINVAL;
479 
480 	pkt_size = be32_to_cpu(cbe->pkt_size);
481 	data_size = be32_to_cpu(cbe->data_size);
482 	map_id_full = be64_to_cpu(cbe->map_ptr);
483 	map_id = map_id_full;
484 
485 	if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
486 		return -EINVAL;
487 	if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
488 		return -EINVAL;
489 
490 	rcu_read_lock();
491 	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
492 					nfp_bpf_maps_neutral_params);
493 	if (!record || map_id_full > U32_MAX) {
494 		rcu_read_unlock();
495 		cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
496 			  map_id_full, map_id_full);
497 		return -EINVAL;
498 	}
499 
500 	bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
501 			 &cbe->data[round_up(pkt_size, 4)], data_size,
502 			 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
503 	rcu_read_unlock();
504 
505 	return 0;
506 }
507 
508 static int
509 nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
510 		 struct netlink_ext_ack *extack)
511 {
512 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
513 	unsigned int max_mtu;
514 	dma_addr_t dma_addr;
515 	void *img;
516 	int err;
517 
518 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
519 	if (max_mtu < nn->dp.netdev->mtu) {
520 		NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
521 		return -EOPNOTSUPP;
522 	}
523 
524 	img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
525 	if (IS_ERR(img))
526 		return PTR_ERR(img);
527 
528 	dma_addr = dma_map_single(nn->dp.dev, img,
529 				  nfp_prog->prog_len * sizeof(u64),
530 				  DMA_TO_DEVICE);
531 	if (dma_mapping_error(nn->dp.dev, dma_addr)) {
532 		kfree(img);
533 		return -ENOMEM;
534 	}
535 
536 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
537 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
538 
539 	/* Load up the JITed code */
540 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
541 	if (err)
542 		NL_SET_ERR_MSG_MOD(extack,
543 				   "FW command error while loading BPF");
544 
545 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
546 			 DMA_TO_DEVICE);
547 	kfree(img);
548 
549 	return err;
550 }
551 
552 static void
553 nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
554 {
555 	int err;
556 
557 	/* Enable passing packets through BPF function */
558 	nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
559 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
560 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
561 	if (err)
562 		NL_SET_ERR_MSG_MOD(extack,
563 				   "FW command error while enabling BPF");
564 }
565 
566 static int nfp_net_bpf_stop(struct nfp_net *nn)
567 {
568 	if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
569 		return 0;
570 
571 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
572 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
573 
574 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
575 }
576 
577 int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
578 			bool old_prog, struct netlink_ext_ack *extack)
579 {
580 	int err;
581 
582 	if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
583 		return -EINVAL;
584 
585 	if (prog && old_prog) {
586 		u8 cap;
587 
588 		cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
589 		if (!(cap & NFP_NET_BPF_CAP_RELO)) {
590 			NL_SET_ERR_MSG_MOD(extack,
591 					   "FW does not support live reload");
592 			return -EBUSY;
593 		}
594 	}
595 
596 	/* Something else is loaded, different program type? */
597 	if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
598 		return -EBUSY;
599 
600 	if (old_prog && !prog)
601 		return nfp_net_bpf_stop(nn);
602 
603 	err = nfp_net_bpf_load(nn, prog, extack);
604 	if (err)
605 		return err;
606 
607 	if (!old_prog)
608 		nfp_net_bpf_start(nn, extack);
609 
610 	return 0;
611 }
612