xref: /linux/drivers/net/ethernet/netronome/nfp/bpf/verifier.c (revision b7d3826c2ed6c3e626e7ae796c5df2c0d2551c6a)
1 /*
2  * Copyright (C) 2016-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bpf.h>
35 #include <linux/bpf_verifier.h>
36 #include <linux/kernel.h>
37 #include <linux/netdevice.h>
38 #include <linux/pkt_cls.h>
39 
40 #include "../nfp_app.h"
41 #include "../nfp_main.h"
42 #include "../nfp_net.h"
43 #include "fw.h"
44 #include "main.h"
45 
46 #define pr_vlog(env, fmt, ...)	\
47 	bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
48 
49 struct nfp_insn_meta *
50 nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
51 		  unsigned int insn_idx, unsigned int n_insns)
52 {
53 	unsigned int forward, backward, i;
54 
55 	backward = meta->n - insn_idx;
56 	forward = insn_idx - meta->n;
57 
58 	if (min(forward, backward) > n_insns - insn_idx - 1) {
59 		backward = n_insns - insn_idx - 1;
60 		meta = nfp_prog_last_meta(nfp_prog);
61 	}
62 	if (min(forward, backward) > insn_idx && backward > insn_idx) {
63 		forward = insn_idx;
64 		meta = nfp_prog_first_meta(nfp_prog);
65 	}
66 
67 	if (forward < backward)
68 		for (i = 0; i < forward; i++)
69 			meta = nfp_meta_next(meta);
70 	else
71 		for (i = 0; i < backward; i++)
72 			meta = nfp_meta_prev(meta);
73 
74 	return meta;
75 }
76 
77 static void
78 nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
79 		       struct nfp_insn_meta *meta,
80 		       const struct bpf_reg_state *reg2)
81 {
82 	unsigned int location =	UINT_MAX;
83 	int imm;
84 
85 	/* Datapath usually can give us guarantees on how much adjust head
86 	 * can be done without the need for any checks.  Optimize the simple
87 	 * case where there is only one adjust head by a constant.
88 	 */
89 	if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
90 		goto exit_set_location;
91 	imm = reg2->var_off.value;
92 	/* Translator will skip all checks, we need to guarantee min pkt len */
93 	if (imm > ETH_ZLEN - ETH_HLEN)
94 		goto exit_set_location;
95 	if (imm > (int)bpf->adjust_head.guaranteed_add ||
96 	    imm < -bpf->adjust_head.guaranteed_sub)
97 		goto exit_set_location;
98 
99 	if (nfp_prog->adjust_head_location) {
100 		/* Only one call per program allowed */
101 		if (nfp_prog->adjust_head_location != meta->n)
102 			goto exit_set_location;
103 
104 		if (meta->arg2.reg.var_off.value != imm)
105 			goto exit_set_location;
106 	}
107 
108 	location = meta->n;
109 exit_set_location:
110 	nfp_prog->adjust_head_location = location;
111 }
112 
113 static int
114 nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
115 		     const struct bpf_reg_state *reg,
116 		     struct nfp_bpf_reg_state *old_arg)
117 {
118 	s64 off, old_off;
119 
120 	if (reg->type != PTR_TO_STACK) {
121 		pr_vlog(env, "%s: unsupported ptr type %d\n",
122 			fname, reg->type);
123 		return false;
124 	}
125 	if (!tnum_is_const(reg->var_off)) {
126 		pr_vlog(env, "%s: variable pointer\n", fname);
127 		return false;
128 	}
129 
130 	off = reg->var_off.value + reg->off;
131 	if (-off % 4) {
132 		pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
133 		return false;
134 	}
135 
136 	/* Rest of the checks is only if we re-parse the same insn */
137 	if (!old_arg)
138 		return true;
139 
140 	old_off = old_arg->reg.var_off.value + old_arg->reg.off;
141 	old_arg->var_off |= off != old_off;
142 
143 	return true;
144 }
145 
146 static bool
147 nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
148 		    struct nfp_insn_meta *meta,
149 		    u32 helper_tgt, const struct bpf_reg_state *reg1)
150 {
151 	if (!helper_tgt) {
152 		pr_vlog(env, "%s: not supported by FW\n", fname);
153 		return false;
154 	}
155 
156 	return true;
157 }
158 
159 static int
160 nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog,
161 			  struct bpf_verifier_env *env,
162 			  struct nfp_insn_meta *meta)
163 {
164 	const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
165 	const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
166 	const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
167 	struct nfp_app_bpf *bpf = nfp_prog->bpf;
168 	u32 func_id = meta->insn.imm;
169 
170 	switch (func_id) {
171 	case BPF_FUNC_xdp_adjust_head:
172 		if (!bpf->adjust_head.off_max) {
173 			pr_vlog(env, "adjust_head not supported by FW\n");
174 			return -EOPNOTSUPP;
175 		}
176 		if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
177 			pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
178 			return -EOPNOTSUPP;
179 		}
180 
181 		nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
182 		break;
183 
184 	case BPF_FUNC_xdp_adjust_tail:
185 		if (!bpf->adjust_tail) {
186 			pr_vlog(env, "adjust_tail not supported by FW\n");
187 			return -EOPNOTSUPP;
188 		}
189 		break;
190 
191 	case BPF_FUNC_map_lookup_elem:
192 		if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
193 					 bpf->helpers.map_lookup, reg1) ||
194 		    !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
195 					  meta->func_id ? &meta->arg2 : NULL))
196 			return -EOPNOTSUPP;
197 		break;
198 
199 	case BPF_FUNC_map_update_elem:
200 		if (!nfp_bpf_map_call_ok("map_update", env, meta,
201 					 bpf->helpers.map_update, reg1) ||
202 		    !nfp_bpf_stack_arg_ok("map_update", env, reg2,
203 					  meta->func_id ? &meta->arg2 : NULL) ||
204 		    !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL))
205 			return -EOPNOTSUPP;
206 		break;
207 
208 	case BPF_FUNC_map_delete_elem:
209 		if (!nfp_bpf_map_call_ok("map_delete", env, meta,
210 					 bpf->helpers.map_delete, reg1) ||
211 		    !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
212 					  meta->func_id ? &meta->arg2 : NULL))
213 			return -EOPNOTSUPP;
214 		break;
215 
216 	case BPF_FUNC_get_prandom_u32:
217 		if (bpf->pseudo_random)
218 			break;
219 		pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
220 		return -EOPNOTSUPP;
221 
222 	case BPF_FUNC_perf_event_output:
223 		BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
224 			     NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
225 			     NFP_BPF_STACK != PTR_TO_STACK ||
226 			     NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
227 
228 		if (!bpf->helpers.perf_event_output) {
229 			pr_vlog(env, "event_output: not supported by FW\n");
230 			return -EOPNOTSUPP;
231 		}
232 
233 		/* Force current CPU to make sure we can report the event
234 		 * wherever we get the control message from FW.
235 		 */
236 		if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
237 		    (reg3->var_off.value & BPF_F_INDEX_MASK) !=
238 		    BPF_F_CURRENT_CPU) {
239 			char tn_buf[48];
240 
241 			tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
242 			pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
243 				tn_buf);
244 			return -EOPNOTSUPP;
245 		}
246 
247 		/* Save space in meta, we don't care about arguments other
248 		 * than 4th meta, shove it into arg1.
249 		 */
250 		reg1 = cur_regs(env) + BPF_REG_4;
251 
252 		if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
253 		    reg1->type != PTR_TO_STACK &&
254 		    reg1->type != PTR_TO_MAP_VALUE &&
255 		    reg1->type != PTR_TO_PACKET) {
256 			pr_vlog(env, "event_output: unsupported ptr type: %d\n",
257 				reg1->type);
258 			return -EOPNOTSUPP;
259 		}
260 
261 		if (reg1->type == PTR_TO_STACK &&
262 		    !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
263 			return -EOPNOTSUPP;
264 
265 		/* Warn user that on offload NFP may return success even if map
266 		 * is not going to accept the event, since the event output is
267 		 * fully async and device won't know the state of the map.
268 		 * There is also FW limitation on the event length.
269 		 *
270 		 * Lost events will not show up on the perf ring, driver
271 		 * won't see them at all.  Events may also get reordered.
272 		 */
273 		dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
274 			      "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
275 		pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
276 
277 		if (!meta->func_id)
278 			break;
279 
280 		if (reg1->type != meta->arg1.type) {
281 			pr_vlog(env, "event_output: ptr type changed: %d %d\n",
282 				meta->arg1.type, reg1->type);
283 			return -EINVAL;
284 		}
285 		break;
286 
287 	default:
288 		pr_vlog(env, "unsupported function id: %d\n", func_id);
289 		return -EOPNOTSUPP;
290 	}
291 
292 	meta->func_id = func_id;
293 	meta->arg1 = *reg1;
294 	meta->arg2.reg = *reg2;
295 
296 	return 0;
297 }
298 
299 static int
300 nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
301 		   struct bpf_verifier_env *env)
302 {
303 	const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
304 	u64 imm;
305 
306 	if (nfp_prog->type == BPF_PROG_TYPE_XDP)
307 		return 0;
308 
309 	if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
310 		char tn_buf[48];
311 
312 		tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
313 		pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
314 			reg0->type, tn_buf);
315 		return -EINVAL;
316 	}
317 
318 	imm = reg0->var_off.value;
319 	if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
320 	    imm <= TC_ACT_REDIRECT &&
321 	    imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
322 	    imm != TC_ACT_QUEUED) {
323 		pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
324 			reg0->type, imm);
325 		return -EINVAL;
326 	}
327 
328 	return 0;
329 }
330 
331 static int
332 nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
333 			   struct nfp_insn_meta *meta,
334 			   const struct bpf_reg_state *reg,
335 			   struct bpf_verifier_env *env)
336 {
337 	s32 old_off, new_off;
338 
339 	if (reg->frameno != env->cur_state->curframe)
340 		meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME;
341 
342 	if (!tnum_is_const(reg->var_off)) {
343 		pr_vlog(env, "variable ptr stack access\n");
344 		return -EINVAL;
345 	}
346 
347 	if (meta->ptr.type == NOT_INIT)
348 		return 0;
349 
350 	old_off = meta->ptr.off + meta->ptr.var_off.value;
351 	new_off = reg->off + reg->var_off.value;
352 
353 	meta->ptr_not_const |= old_off != new_off;
354 
355 	if (!meta->ptr_not_const)
356 		return 0;
357 
358 	if (old_off % 4 == new_off % 4)
359 		return 0;
360 
361 	pr_vlog(env, "stack access changed location was:%d is:%d\n",
362 		old_off, new_off);
363 	return -EINVAL;
364 }
365 
366 static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
367 {
368 	static const char * const names[] = {
369 		[NFP_MAP_UNUSED]	= "unused",
370 		[NFP_MAP_USE_READ]	= "read",
371 		[NFP_MAP_USE_WRITE]	= "write",
372 		[NFP_MAP_USE_ATOMIC_CNT] = "atomic",
373 	};
374 
375 	if (use >= ARRAY_SIZE(names) || !names[use])
376 		return "unknown";
377 	return names[use];
378 }
379 
380 static int
381 nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
382 			  struct nfp_bpf_map *nfp_map,
383 			  unsigned int off, enum nfp_bpf_map_use use)
384 {
385 	if (nfp_map->use_map[off / 4] != NFP_MAP_UNUSED &&
386 	    nfp_map->use_map[off / 4] != use) {
387 		pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
388 			nfp_bpf_map_use_name(nfp_map->use_map[off / 4]),
389 			nfp_bpf_map_use_name(use), off);
390 		return -EOPNOTSUPP;
391 	}
392 
393 	nfp_map->use_map[off / 4] = use;
394 
395 	return 0;
396 }
397 
398 static int
399 nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
400 		      const struct bpf_reg_state *reg,
401 		      enum nfp_bpf_map_use use)
402 {
403 	struct bpf_offloaded_map *offmap;
404 	struct nfp_bpf_map *nfp_map;
405 	unsigned int size, off;
406 	int i, err;
407 
408 	if (!tnum_is_const(reg->var_off)) {
409 		pr_vlog(env, "map value offset is variable\n");
410 		return -EOPNOTSUPP;
411 	}
412 
413 	off = reg->var_off.value + meta->insn.off + reg->off;
414 	size = BPF_LDST_BYTES(&meta->insn);
415 	offmap = map_to_offmap(reg->map_ptr);
416 	nfp_map = offmap->dev_priv;
417 
418 	if (off + size > offmap->map.value_size) {
419 		pr_vlog(env, "map value access out-of-bounds\n");
420 		return -EINVAL;
421 	}
422 
423 	for (i = 0; i < size; i += 4 - (off + i) % 4) {
424 		err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
425 		if (err)
426 			return err;
427 	}
428 
429 	return 0;
430 }
431 
432 static int
433 nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
434 		  struct bpf_verifier_env *env, u8 reg_no)
435 {
436 	const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
437 	int err;
438 
439 	if (reg->type != PTR_TO_CTX &&
440 	    reg->type != PTR_TO_STACK &&
441 	    reg->type != PTR_TO_MAP_VALUE &&
442 	    reg->type != PTR_TO_PACKET) {
443 		pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
444 		return -EINVAL;
445 	}
446 
447 	if (reg->type == PTR_TO_STACK) {
448 		err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
449 		if (err)
450 			return err;
451 	}
452 
453 	if (reg->type == PTR_TO_MAP_VALUE) {
454 		if (is_mbpf_load(meta)) {
455 			err = nfp_bpf_map_mark_used(env, meta, reg,
456 						    NFP_MAP_USE_READ);
457 			if (err)
458 				return err;
459 		}
460 		if (is_mbpf_store(meta)) {
461 			pr_vlog(env, "map writes not supported\n");
462 			return -EOPNOTSUPP;
463 		}
464 		if (is_mbpf_xadd(meta)) {
465 			err = nfp_bpf_map_mark_used(env, meta, reg,
466 						    NFP_MAP_USE_ATOMIC_CNT);
467 			if (err)
468 				return err;
469 		}
470 	}
471 
472 	if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
473 		pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
474 			meta->ptr.type, reg->type);
475 		return -EINVAL;
476 	}
477 
478 	meta->ptr = *reg;
479 
480 	return 0;
481 }
482 
483 static int
484 nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
485 		    struct bpf_verifier_env *env)
486 {
487 	const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
488 
489 	if (reg->type == PTR_TO_CTX) {
490 		if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
491 			/* XDP ctx accesses must be 4B in size */
492 			switch (meta->insn.off) {
493 			case offsetof(struct xdp_md, rx_queue_index):
494 				if (nfp_prog->bpf->queue_select)
495 					goto exit_check_ptr;
496 				pr_vlog(env, "queue selection not supported by FW\n");
497 				return -EOPNOTSUPP;
498 			}
499 		}
500 		pr_vlog(env, "unsupported store to context field\n");
501 		return -EOPNOTSUPP;
502 	}
503 exit_check_ptr:
504 	return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
505 }
506 
507 static int
508 nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
509 		   struct bpf_verifier_env *env)
510 {
511 	const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
512 	const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
513 
514 	if (dreg->type != PTR_TO_MAP_VALUE) {
515 		pr_vlog(env, "atomic add not to a map value pointer: %d\n",
516 			dreg->type);
517 		return -EOPNOTSUPP;
518 	}
519 	if (sreg->type != SCALAR_VALUE) {
520 		pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
521 		return -EOPNOTSUPP;
522 	}
523 
524 	meta->xadd_over_16bit |=
525 		sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
526 	meta->xadd_maybe_16bit |=
527 		(sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
528 
529 	return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
530 }
531 
532 static int
533 nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
534 		  struct bpf_verifier_env *env)
535 {
536 	const struct bpf_reg_state *sreg =
537 		cur_regs(env) + meta->insn.src_reg;
538 	const struct bpf_reg_state *dreg =
539 		cur_regs(env) + meta->insn.dst_reg;
540 
541 	meta->umin_src = min(meta->umin_src, sreg->umin_value);
542 	meta->umax_src = max(meta->umax_src, sreg->umax_value);
543 	meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
544 	meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
545 
546 	/* NFP supports u16 and u32 multiplication.
547 	 *
548 	 * For ALU64, if either operand is beyond u32's value range, we reject
549 	 * it. One thing to note, if the source operand is BPF_K, then we need
550 	 * to check "imm" field directly, and we'd reject it if it is negative.
551 	 * Because for ALU64, "imm" (with s32 type) is expected to be sign
552 	 * extended to s64 which NFP mul doesn't support.
553 	 *
554 	 * For ALU32, it is fine for "imm" be negative though, because the
555 	 * result is 32-bits and there is no difference on the low halve of
556 	 * the result for signed/unsigned mul, so we will get correct result.
557 	 */
558 	if (is_mbpf_mul(meta)) {
559 		if (meta->umax_dst > U32_MAX) {
560 			pr_vlog(env, "multiplier is not within u32 value range\n");
561 			return -EINVAL;
562 		}
563 		if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
564 			pr_vlog(env, "multiplicand is not within u32 value range\n");
565 			return -EINVAL;
566 		}
567 		if (mbpf_class(meta) == BPF_ALU64 &&
568 		    mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
569 			pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
570 			return -EINVAL;
571 		}
572 	}
573 
574 	/* NFP doesn't have divide instructions, we support divide by constant
575 	 * through reciprocal multiplication. Given NFP support multiplication
576 	 * no bigger than u32, we'd require divisor and dividend no bigger than
577 	 * that as well.
578 	 *
579 	 * Also eBPF doesn't support signed divide and has enforced this on C
580 	 * language level by failing compilation. However LLVM assembler hasn't
581 	 * enforced this, so it is possible for negative constant to leak in as
582 	 * a BPF_K operand through assembly code, we reject such cases as well.
583 	 */
584 	if (is_mbpf_div(meta)) {
585 		if (meta->umax_dst > U32_MAX) {
586 			pr_vlog(env, "dividend is not within u32 value range\n");
587 			return -EINVAL;
588 		}
589 		if (mbpf_src(meta) == BPF_X) {
590 			if (meta->umin_src != meta->umax_src) {
591 				pr_vlog(env, "divisor is not constant\n");
592 				return -EINVAL;
593 			}
594 			if (meta->umax_src > U32_MAX) {
595 				pr_vlog(env, "divisor is not within u32 value range\n");
596 				return -EINVAL;
597 			}
598 		}
599 		if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
600 			pr_vlog(env, "divide by negative constant is not supported\n");
601 			return -EINVAL;
602 		}
603 	}
604 
605 	return 0;
606 }
607 
608 static int
609 nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
610 {
611 	struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
612 	struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
613 
614 	meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
615 	nfp_prog->verifier_meta = meta;
616 
617 	if (!nfp_bpf_supported_opcode(meta->insn.code)) {
618 		pr_vlog(env, "instruction %#02x not supported\n",
619 			meta->insn.code);
620 		return -EINVAL;
621 	}
622 
623 	if (meta->insn.src_reg >= MAX_BPF_REG ||
624 	    meta->insn.dst_reg >= MAX_BPF_REG) {
625 		pr_vlog(env, "program uses extended registers - jit hardening?\n");
626 		return -EINVAL;
627 	}
628 
629 	if (is_mbpf_helper_call(meta))
630 		return nfp_bpf_check_helper_call(nfp_prog, env, meta);
631 	if (meta->insn.code == (BPF_JMP | BPF_EXIT))
632 		return nfp_bpf_check_exit(nfp_prog, env);
633 
634 	if (is_mbpf_load(meta))
635 		return nfp_bpf_check_ptr(nfp_prog, meta, env,
636 					 meta->insn.src_reg);
637 	if (is_mbpf_store(meta))
638 		return nfp_bpf_check_store(nfp_prog, meta, env);
639 
640 	if (is_mbpf_xadd(meta))
641 		return nfp_bpf_check_xadd(nfp_prog, meta, env);
642 
643 	if (is_mbpf_alu(meta))
644 		return nfp_bpf_check_alu(nfp_prog, meta, env);
645 
646 	return 0;
647 }
648 
649 static int
650 nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
651 				struct nfp_prog *nfp_prog)
652 {
653 	struct nfp_insn_meta *meta;
654 	int index = 0;
655 
656 	list_for_each_entry(meta, &nfp_prog->insns, l) {
657 		if (nfp_is_subprog_start(meta))
658 			index++;
659 		meta->subprog_idx = index;
660 
661 		if (meta->insn.dst_reg >= BPF_REG_6 &&
662 		    meta->insn.dst_reg <= BPF_REG_9)
663 			nfp_prog->subprog[index].needs_reg_push = 1;
664 	}
665 
666 	if (index + 1 != nfp_prog->subprog_cnt) {
667 		pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n",
668 			index + 1, nfp_prog->subprog_cnt);
669 		return -EFAULT;
670 	}
671 
672 	return 0;
673 }
674 
675 static unsigned int
676 nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog, unsigned int cnt)
677 {
678 	struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
679 	unsigned int max_depth = 0, depth = 0, frame = 0;
680 	struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES];
681 	unsigned short frame_depths[MAX_CALL_FRAMES];
682 	unsigned short ret_prog[MAX_CALL_FRAMES];
683 	unsigned short idx = meta->subprog_idx;
684 
685 	/* Inspired from check_max_stack_depth() from kernel verifier.
686 	 * Starting from main subprogram, walk all instructions and recursively
687 	 * walk all callees that given subprogram can call. Since recursion is
688 	 * prevented by the kernel verifier, this algorithm only needs a local
689 	 * stack of MAX_CALL_FRAMES to remember callsites.
690 	 */
691 process_subprog:
692 	frame_depths[frame] = nfp_prog->subprog[idx].stack_depth;
693 	frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN);
694 	depth += frame_depths[frame];
695 	max_depth = max(max_depth, depth);
696 
697 continue_subprog:
698 	for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
699 	     meta = nfp_meta_next(meta)) {
700 		if (!is_mbpf_pseudo_call(meta))
701 			continue;
702 
703 		/* We found a call to a subprogram. Remember instruction to
704 		 * return to and subprog id.
705 		 */
706 		ret_insn[frame] = nfp_meta_next(meta);
707 		ret_prog[frame] = idx;
708 
709 		/* Find the callee and start processing it. */
710 		meta = nfp_bpf_goto_meta(nfp_prog, meta,
711 					 meta->n + 1 + meta->insn.imm, cnt);
712 		idx = meta->subprog_idx;
713 		frame++;
714 		goto process_subprog;
715 	}
716 	/* End of for() loop means the last instruction of the subprog was
717 	 * reached. If we popped all stack frames, return; otherwise, go on
718 	 * processing remaining instructions from the caller.
719 	 */
720 	if (frame == 0)
721 		return max_depth;
722 
723 	depth -= frame_depths[frame];
724 	frame--;
725 	meta = ret_insn[frame];
726 	idx = ret_prog[frame];
727 	goto continue_subprog;
728 }
729 
730 static int nfp_bpf_finalize(struct bpf_verifier_env *env)
731 {
732 	unsigned int stack_size, stack_needed;
733 	struct bpf_subprog_info *info;
734 	struct nfp_prog *nfp_prog;
735 	struct nfp_net *nn;
736 	int i;
737 
738 	nfp_prog = env->prog->aux->offload->dev_priv;
739 	nfp_prog->subprog_cnt = env->subprog_cnt;
740 	nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt,
741 				    sizeof(nfp_prog->subprog[0]), GFP_KERNEL);
742 	if (!nfp_prog->subprog)
743 		return -ENOMEM;
744 
745 	nfp_assign_subprog_idx_and_regs(env, nfp_prog);
746 
747 	info = env->subprog_info;
748 	for (i = 0; i < nfp_prog->subprog_cnt; i++) {
749 		nfp_prog->subprog[i].stack_depth = info[i].stack_depth;
750 
751 		if (i == 0)
752 			continue;
753 
754 		/* Account for size of return address. */
755 		nfp_prog->subprog[i].stack_depth += REG_WIDTH;
756 		/* Account for size of saved registers, if necessary. */
757 		if (nfp_prog->subprog[i].needs_reg_push)
758 			nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4;
759 	}
760 
761 	nn = netdev_priv(env->prog->aux->offload->netdev);
762 	stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
763 	stack_needed = nfp_bpf_get_stack_usage(nfp_prog, env->prog->len);
764 	if (stack_needed > stack_size) {
765 		pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
766 			stack_needed, stack_size);
767 		return -EOPNOTSUPP;
768 	}
769 
770 	return 0;
771 }
772 
773 const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = {
774 	.insn_hook	= nfp_verify_insn,
775 	.finalize	= nfp_bpf_finalize,
776 };
777