xref: /linux/drivers/net/ethernet/netronome/nfp/bpf/main.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <net/pkt_cls.h>
5 
6 #include "../nfpcore/nfp_cpp.h"
7 #include "../nfpcore/nfp_nffw.h"
8 #include "../nfpcore/nfp_nsp.h"
9 #include "../nfp_app.h"
10 #include "../nfp_main.h"
11 #include "../nfp_net.h"
12 #include "../nfp_port.h"
13 #include "fw.h"
14 #include "main.h"
15 
16 const struct rhashtable_params nfp_bpf_maps_neutral_params = {
17 	.nelem_hint		= 4,
18 	.key_len		= sizeof_field(struct bpf_map, id),
19 	.key_offset		= offsetof(struct nfp_bpf_neutral_map, map_id),
20 	.head_offset		= offsetof(struct nfp_bpf_neutral_map, l),
21 	.automatic_shrinking	= true,
22 };
23 
24 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
25 {
26 #ifdef __LITTLE_ENDIAN
27 	struct nfp_app_bpf *bpf = nn->app->priv;
28 
29 	return nn->cap & NFP_NET_CFG_CTRL_BPF &&
30 	       bpf->abi_version &&
31 	       nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version;
32 #else
33 	return false;
34 #endif
35 }
36 
37 static int
38 nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
39 		    struct bpf_prog *prog, struct netlink_ext_ack *extack)
40 {
41 	bool running, xdp_running;
42 
43 	if (!nfp_net_ebpf_capable(nn))
44 		return -EINVAL;
45 
46 	running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
47 	xdp_running = running && nn->xdp_hw.prog;
48 
49 	if (!prog && !xdp_running)
50 		return 0;
51 	if (prog && running && !xdp_running)
52 		return -EBUSY;
53 
54 	return nfp_net_bpf_offload(nn, prog, running, extack);
55 }
56 
57 static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
58 {
59 	return nfp_net_ebpf_capable(nn) ? "BPF" : "";
60 }
61 
62 static int
63 nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
64 {
65 	struct nfp_pf *pf = app->pf;
66 	struct nfp_bpf_vnic *bv;
67 	int err;
68 
69 	if (!pf->eth_tbl) {
70 		nfp_err(pf->cpp, "No ETH table\n");
71 		return -EINVAL;
72 	}
73 	if (pf->max_data_vnics != pf->eth_tbl->count) {
74 		nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
75 			pf->max_data_vnics, pf->eth_tbl->count);
76 		return -EINVAL;
77 	}
78 
79 	bv = kzalloc(sizeof(*bv), GFP_KERNEL);
80 	if (!bv)
81 		return -ENOMEM;
82 	nn->app_priv = bv;
83 
84 	err = nfp_app_nic_vnic_alloc(app, nn, id);
85 	if (err)
86 		goto err_free_priv;
87 
88 	bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
89 	bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
90 
91 	return 0;
92 err_free_priv:
93 	kfree(nn->app_priv);
94 	return err;
95 }
96 
97 static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
98 {
99 	struct nfp_bpf_vnic *bv = nn->app_priv;
100 
101 	WARN_ON(bv->tc_prog);
102 	kfree(bv);
103 }
104 
105 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
106 				     void *type_data, void *cb_priv)
107 {
108 	struct tc_cls_bpf_offload *cls_bpf = type_data;
109 	struct nfp_net *nn = cb_priv;
110 	struct bpf_prog *oldprog;
111 	struct nfp_bpf_vnic *bv;
112 	int err;
113 
114 	if (type != TC_SETUP_CLSBPF) {
115 		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
116 				   "only offload of BPF classifiers supported");
117 		return -EOPNOTSUPP;
118 	}
119 	if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
120 		return -EOPNOTSUPP;
121 	if (!nfp_net_ebpf_capable(nn)) {
122 		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
123 				   "NFP firmware does not support eBPF offload");
124 		return -EOPNOTSUPP;
125 	}
126 	if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
127 		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
128 				   "only ETH_P_ALL supported as filter protocol");
129 		return -EOPNOTSUPP;
130 	}
131 
132 	/* Only support TC direct action */
133 	if (!cls_bpf->exts_integrated ||
134 	    tcf_exts_has_actions(cls_bpf->exts)) {
135 		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
136 				   "only direct action with no legacy actions supported");
137 		return -EOPNOTSUPP;
138 	}
139 
140 	if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
141 		return -EOPNOTSUPP;
142 
143 	bv = nn->app_priv;
144 	oldprog = cls_bpf->oldprog;
145 
146 	/* Don't remove if oldprog doesn't match driver's state */
147 	if (bv->tc_prog != oldprog) {
148 		oldprog = NULL;
149 		if (!cls_bpf->prog)
150 			return 0;
151 	}
152 
153 	err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
154 				  cls_bpf->common.extack);
155 	if (err)
156 		return err;
157 
158 	bv->tc_prog = cls_bpf->prog;
159 	nn->port->tc_offload_cnt = !!bv->tc_prog;
160 	return 0;
161 }
162 
163 static LIST_HEAD(nfp_bpf_block_cb_list);
164 
165 static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
166 			    enum tc_setup_type type, void *type_data)
167 {
168 	struct nfp_net *nn = netdev_priv(netdev);
169 
170 	switch (type) {
171 	case TC_SETUP_BLOCK:
172 		return flow_block_cb_setup_simple(type_data,
173 						  &nfp_bpf_block_cb_list,
174 						  nfp_bpf_setup_tc_block_cb,
175 						  nn, nn, true);
176 	default:
177 		return -EOPNOTSUPP;
178 	}
179 }
180 
181 static int
182 nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
183 {
184 	struct nfp_net *nn = netdev_priv(netdev);
185 	unsigned int max_mtu;
186 
187 	if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
188 		return 0;
189 
190 	max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
191 	if (new_mtu > max_mtu) {
192 		nn_info(nn, "BPF offload active, MTU over %u not supported\n",
193 			max_mtu);
194 		return -EBUSY;
195 	}
196 	return 0;
197 }
198 
199 static int
200 nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
201 			      u32 length)
202 {
203 	struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
204 	struct nfp_cpp *cpp = bpf->app->pf->cpp;
205 
206 	if (length < sizeof(*cap)) {
207 		nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
208 		return -EINVAL;
209 	}
210 
211 	bpf->adjust_head.flags = readl(&cap->flags);
212 	bpf->adjust_head.off_min = readl(&cap->off_min);
213 	bpf->adjust_head.off_max = readl(&cap->off_max);
214 	bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
215 	bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
216 
217 	if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
218 		nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
219 		return -EINVAL;
220 	}
221 	if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
222 	    !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
223 		nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
224 		memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
225 		return 0;
226 	}
227 
228 	return 0;
229 }
230 
231 static int
232 nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
233 {
234 	struct nfp_bpf_cap_tlv_func __iomem *cap = value;
235 
236 	if (length < sizeof(*cap)) {
237 		nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
238 		return -EINVAL;
239 	}
240 
241 	switch (readl(&cap->func_id)) {
242 	case BPF_FUNC_map_lookup_elem:
243 		bpf->helpers.map_lookup = readl(&cap->func_addr);
244 		break;
245 	case BPF_FUNC_map_update_elem:
246 		bpf->helpers.map_update = readl(&cap->func_addr);
247 		break;
248 	case BPF_FUNC_map_delete_elem:
249 		bpf->helpers.map_delete = readl(&cap->func_addr);
250 		break;
251 	case BPF_FUNC_perf_event_output:
252 		bpf->helpers.perf_event_output = readl(&cap->func_addr);
253 		break;
254 	}
255 
256 	return 0;
257 }
258 
259 static int
260 nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
261 {
262 	struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
263 
264 	if (length < sizeof(*cap)) {
265 		nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
266 		return -EINVAL;
267 	}
268 
269 	bpf->maps.types = readl(&cap->types);
270 	bpf->maps.max_maps = readl(&cap->max_maps);
271 	bpf->maps.max_elems = readl(&cap->max_elems);
272 	bpf->maps.max_key_sz = readl(&cap->max_key_sz);
273 	bpf->maps.max_val_sz = readl(&cap->max_val_sz);
274 	bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
275 
276 	return 0;
277 }
278 
279 static int
280 nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
281 			 u32 length)
282 {
283 	bpf->pseudo_random = true;
284 	return 0;
285 }
286 
287 static int
288 nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
289 {
290 	bpf->queue_select = true;
291 	return 0;
292 }
293 
294 static int
295 nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
296 			      u32 length)
297 {
298 	bpf->adjust_tail = true;
299 	return 0;
300 }
301 
302 static int
303 nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value,
304 				 u32 length)
305 {
306 	bpf->cmsg_multi_ent = true;
307 	return 0;
308 }
309 
310 static int
311 nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
312 			      u32 length)
313 {
314 	if (length < 4) {
315 		nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
316 			length);
317 		return -EINVAL;
318 	}
319 
320 	bpf->abi_version = readl(value);
321 	if (bpf->abi_version < 2 || bpf->abi_version > 3) {
322 		nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
323 			 bpf->abi_version);
324 		bpf->abi_version = 0;
325 	}
326 
327 	return 0;
328 }
329 
330 static int nfp_bpf_parse_capabilities(struct nfp_app *app)
331 {
332 	struct nfp_cpp *cpp = app->pf->cpp;
333 	struct nfp_cpp_area *area;
334 	u8 __iomem *mem, *start;
335 
336 	mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
337 			    8, &area);
338 	if (IS_ERR(mem))
339 		return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
340 
341 	start = mem;
342 	while (mem - start + 8 <= nfp_cpp_area_size(area)) {
343 		u8 __iomem *value;
344 		u32 type, length;
345 
346 		type = readl(mem);
347 		length = readl(mem + 4);
348 		value = mem + 8;
349 
350 		mem += 8 + length;
351 		if (mem - start > nfp_cpp_area_size(area))
352 			goto err_release_free;
353 
354 		switch (type) {
355 		case NFP_BPF_CAP_TYPE_FUNC:
356 			if (nfp_bpf_parse_cap_func(app->priv, value, length))
357 				goto err_release_free;
358 			break;
359 		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
360 			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
361 							  length))
362 				goto err_release_free;
363 			break;
364 		case NFP_BPF_CAP_TYPE_MAPS:
365 			if (nfp_bpf_parse_cap_maps(app->priv, value, length))
366 				goto err_release_free;
367 			break;
368 		case NFP_BPF_CAP_TYPE_RANDOM:
369 			if (nfp_bpf_parse_cap_random(app->priv, value, length))
370 				goto err_release_free;
371 			break;
372 		case NFP_BPF_CAP_TYPE_QUEUE_SELECT:
373 			if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
374 				goto err_release_free;
375 			break;
376 		case NFP_BPF_CAP_TYPE_ADJUST_TAIL:
377 			if (nfp_bpf_parse_cap_adjust_tail(app->priv, value,
378 							  length))
379 				goto err_release_free;
380 			break;
381 		case NFP_BPF_CAP_TYPE_ABI_VERSION:
382 			if (nfp_bpf_parse_cap_abi_version(app->priv, value,
383 							  length))
384 				goto err_release_free;
385 			break;
386 		case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT:
387 			if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value,
388 							     length))
389 				goto err_release_free;
390 			break;
391 		default:
392 			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
393 			break;
394 		}
395 	}
396 	if (mem - start != nfp_cpp_area_size(area)) {
397 		nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
398 			mem - start, nfp_cpp_area_size(area));
399 		goto err_release_free;
400 	}
401 
402 	nfp_cpp_area_release_free(area);
403 
404 	return 0;
405 
406 err_release_free:
407 	nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
408 	nfp_cpp_area_release_free(area);
409 	return -EINVAL;
410 }
411 
412 static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf)
413 {
414 	bpf->abi_version = 2; /* Original BPF ABI version */
415 }
416 
417 static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
418 {
419 	struct nfp_app_bpf *bpf = app->priv;
420 
421 	return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
422 }
423 
424 static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
425 {
426 	struct nfp_app_bpf *bpf = app->priv;
427 
428 	bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
429 }
430 
431 static int nfp_bpf_start(struct nfp_app *app)
432 {
433 	struct nfp_app_bpf *bpf = app->priv;
434 
435 	if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) {
436 		nfp_err(bpf->app->cpp,
437 			"ctrl channel MTU below min required %u < %u\n",
438 			app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf));
439 		return -EINVAL;
440 	}
441 
442 	if (bpf->cmsg_multi_ent)
443 		bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf);
444 	else
445 		bpf->cmsg_cache_cnt = 1;
446 
447 	return 0;
448 }
449 
450 static int nfp_bpf_init(struct nfp_app *app)
451 {
452 	struct nfp_app_bpf *bpf;
453 	int err;
454 
455 	bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
456 	if (!bpf)
457 		return -ENOMEM;
458 	bpf->app = app;
459 	app->priv = bpf;
460 
461 	INIT_LIST_HEAD(&bpf->map_list);
462 
463 	err = nfp_ccm_init(&bpf->ccm, app);
464 	if (err)
465 		goto err_free_bpf;
466 
467 	err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
468 	if (err)
469 		goto err_clean_ccm;
470 
471 	nfp_bpf_init_capabilities(bpf);
472 
473 	err = nfp_bpf_parse_capabilities(app);
474 	if (err)
475 		goto err_free_neutral_maps;
476 
477 	if (bpf->abi_version < 3) {
478 		bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
479 		bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
480 	} else {
481 		bpf->cmsg_key_sz = bpf->maps.max_key_sz;
482 		bpf->cmsg_val_sz = bpf->maps.max_val_sz;
483 		app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
484 	}
485 
486 	bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops, bpf);
487 	err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
488 	if (err)
489 		goto err_free_neutral_maps;
490 
491 	return 0;
492 
493 err_free_neutral_maps:
494 	rhashtable_destroy(&bpf->maps_neutral);
495 err_clean_ccm:
496 	nfp_ccm_clean(&bpf->ccm);
497 err_free_bpf:
498 	kfree(bpf);
499 	return err;
500 }
501 
502 static void nfp_bpf_clean(struct nfp_app *app)
503 {
504 	struct nfp_app_bpf *bpf = app->priv;
505 
506 	bpf_offload_dev_destroy(bpf->bpf_dev);
507 	nfp_ccm_clean(&bpf->ccm);
508 	WARN_ON(!list_empty(&bpf->map_list));
509 	WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
510 	rhashtable_free_and_destroy(&bpf->maps_neutral,
511 				    nfp_check_rhashtable_empty, NULL);
512 	kfree(bpf);
513 }
514 
515 const struct nfp_app_type app_bpf = {
516 	.id		= NFP_APP_BPF_NIC,
517 	.name		= "ebpf",
518 
519 	.ctrl_cap_mask	= 0,
520 
521 	.init		= nfp_bpf_init,
522 	.clean		= nfp_bpf_clean,
523 	.start		= nfp_bpf_start,
524 
525 	.check_mtu	= nfp_bpf_check_mtu,
526 
527 	.extra_cap	= nfp_bpf_extra_cap,
528 
529 	.ndo_init	= nfp_bpf_ndo_init,
530 	.ndo_uninit	= nfp_bpf_ndo_uninit,
531 
532 	.vnic_alloc	= nfp_bpf_vnic_alloc,
533 	.vnic_free	= nfp_bpf_vnic_free,
534 
535 	.ctrl_msg_rx	= nfp_bpf_ctrl_msg_rx,
536 	.ctrl_msg_rx_raw	= nfp_bpf_ctrl_msg_rx_raw,
537 
538 	.setup_tc	= nfp_bpf_setup_tc,
539 	.bpf		= nfp_ndo_bpf,
540 	.xdp_offload	= nfp_bpf_xdp_offload,
541 };
542