xref: /linux/net/sched/cls_bpf.c (revision 22d55f02b8922a097cd4be1e2f131dfa7ef65901)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Berkeley Packet Filter based traffic classifier
4  *
5  * Might be used to classify traffic through flexible, user-defined and
6  * possibly JIT-ed BPF filters for traffic control as an alternative to
7  * ematches.
8  *
9  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/skbuff.h>
15 #include <linux/filter.h>
16 #include <linux/bpf.h>
17 #include <linux/idr.h>
18 
19 #include <net/rtnetlink.h>
20 #include <net/pkt_cls.h>
21 #include <net/sock.h>
22 
23 MODULE_LICENSE("GPL");
24 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
25 MODULE_DESCRIPTION("TC BPF based classifier");
26 
27 #define CLS_BPF_NAME_LEN	256
28 #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
29 	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
30 
31 struct cls_bpf_head {
32 	struct list_head plist;
33 	struct idr handle_idr;
34 	struct rcu_head rcu;
35 };
36 
37 struct cls_bpf_prog {
38 	struct bpf_prog *filter;
39 	struct list_head link;
40 	struct tcf_result res;
41 	bool exts_integrated;
42 	u32 gen_flags;
43 	unsigned int in_hw_count;
44 	struct tcf_exts exts;
45 	u32 handle;
46 	u16 bpf_num_ops;
47 	struct sock_filter *bpf_ops;
48 	const char *bpf_name;
49 	struct tcf_proto *tp;
50 	struct rcu_work rwork;
51 };
52 
53 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
54 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
55 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
56 	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
57 	[TCA_BPF_FD]		= { .type = NLA_U32 },
58 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
59 				    .len = CLS_BPF_NAME_LEN },
60 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
61 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
62 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
63 };
64 
65 static int cls_bpf_exec_opcode(int code)
66 {
67 	switch (code) {
68 	case TC_ACT_OK:
69 	case TC_ACT_SHOT:
70 	case TC_ACT_STOLEN:
71 	case TC_ACT_TRAP:
72 	case TC_ACT_REDIRECT:
73 	case TC_ACT_UNSPEC:
74 		return code;
75 	default:
76 		return TC_ACT_UNSPEC;
77 	}
78 }
79 
80 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81 			    struct tcf_result *res)
82 {
83 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
84 	bool at_ingress = skb_at_tc_ingress(skb);
85 	struct cls_bpf_prog *prog;
86 	int ret = -1;
87 
88 	/* Needed here for accessing maps. */
89 	rcu_read_lock();
90 	list_for_each_entry_rcu(prog, &head->plist, link) {
91 		int filter_res;
92 
93 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
94 
95 		if (tc_skip_sw(prog->gen_flags)) {
96 			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
97 		} else if (at_ingress) {
98 			/* It is safe to push/pull even if skb_shared() */
99 			__skb_push(skb, skb->mac_len);
100 			bpf_compute_data_pointers(skb);
101 			filter_res = BPF_PROG_RUN(prog->filter, skb);
102 			__skb_pull(skb, skb->mac_len);
103 		} else {
104 			bpf_compute_data_pointers(skb);
105 			filter_res = BPF_PROG_RUN(prog->filter, skb);
106 		}
107 
108 		if (prog->exts_integrated) {
109 			res->class   = 0;
110 			res->classid = TC_H_MAJ(prog->res.classid) |
111 				       qdisc_skb_cb(skb)->tc_classid;
112 
113 			ret = cls_bpf_exec_opcode(filter_res);
114 			if (ret == TC_ACT_UNSPEC)
115 				continue;
116 			break;
117 		}
118 
119 		if (filter_res == 0)
120 			continue;
121 		if (filter_res != -1) {
122 			res->class   = 0;
123 			res->classid = filter_res;
124 		} else {
125 			*res = prog->res;
126 		}
127 
128 		ret = tcf_exts_exec(skb, &prog->exts, res);
129 		if (ret < 0)
130 			continue;
131 
132 		break;
133 	}
134 	rcu_read_unlock();
135 
136 	return ret;
137 }
138 
139 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
140 {
141 	return !prog->bpf_ops;
142 }
143 
144 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
145 			       struct cls_bpf_prog *oldprog,
146 			       struct netlink_ext_ack *extack)
147 {
148 	struct tcf_block *block = tp->chain->block;
149 	struct tc_cls_bpf_offload cls_bpf = {};
150 	struct cls_bpf_prog *obj;
151 	bool skip_sw;
152 	int err;
153 
154 	skip_sw = prog && tc_skip_sw(prog->gen_flags);
155 	obj = prog ?: oldprog;
156 
157 	tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
158 	cls_bpf.command = TC_CLSBPF_OFFLOAD;
159 	cls_bpf.exts = &obj->exts;
160 	cls_bpf.prog = prog ? prog->filter : NULL;
161 	cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
162 	cls_bpf.name = obj->bpf_name;
163 	cls_bpf.exts_integrated = obj->exts_integrated;
164 
165 	if (oldprog)
166 		tcf_block_offload_dec(block, &oldprog->gen_flags);
167 
168 	err = tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, skip_sw);
169 	if (prog) {
170 		if (err < 0) {
171 			cls_bpf_offload_cmd(tp, oldprog, prog, extack);
172 			return err;
173 		} else if (err > 0) {
174 			prog->in_hw_count = err;
175 			tcf_block_offload_inc(block, &prog->gen_flags);
176 		}
177 	}
178 
179 	if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
180 		return -EINVAL;
181 
182 	return 0;
183 }
184 
185 static u32 cls_bpf_flags(u32 flags)
186 {
187 	return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
188 }
189 
190 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
191 			   struct cls_bpf_prog *oldprog,
192 			   struct netlink_ext_ack *extack)
193 {
194 	if (prog && oldprog &&
195 	    cls_bpf_flags(prog->gen_flags) !=
196 	    cls_bpf_flags(oldprog->gen_flags))
197 		return -EINVAL;
198 
199 	if (prog && tc_skip_hw(prog->gen_flags))
200 		prog = NULL;
201 	if (oldprog && tc_skip_hw(oldprog->gen_flags))
202 		oldprog = NULL;
203 	if (!prog && !oldprog)
204 		return 0;
205 
206 	return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
207 }
208 
209 static void cls_bpf_stop_offload(struct tcf_proto *tp,
210 				 struct cls_bpf_prog *prog,
211 				 struct netlink_ext_ack *extack)
212 {
213 	int err;
214 
215 	err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
216 	if (err)
217 		pr_err("Stopping hardware offload failed: %d\n", err);
218 }
219 
220 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
221 					 struct cls_bpf_prog *prog)
222 {
223 	struct tcf_block *block = tp->chain->block;
224 	struct tc_cls_bpf_offload cls_bpf = {};
225 
226 	tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
227 	cls_bpf.command = TC_CLSBPF_STATS;
228 	cls_bpf.exts = &prog->exts;
229 	cls_bpf.prog = prog->filter;
230 	cls_bpf.name = prog->bpf_name;
231 	cls_bpf.exts_integrated = prog->exts_integrated;
232 
233 	tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false);
234 }
235 
236 static int cls_bpf_init(struct tcf_proto *tp)
237 {
238 	struct cls_bpf_head *head;
239 
240 	head = kzalloc(sizeof(*head), GFP_KERNEL);
241 	if (head == NULL)
242 		return -ENOBUFS;
243 
244 	INIT_LIST_HEAD_RCU(&head->plist);
245 	idr_init(&head->handle_idr);
246 	rcu_assign_pointer(tp->root, head);
247 
248 	return 0;
249 }
250 
251 static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
252 {
253 	if (cls_bpf_is_ebpf(prog))
254 		bpf_prog_put(prog->filter);
255 	else
256 		bpf_prog_destroy(prog->filter);
257 
258 	kfree(prog->bpf_name);
259 	kfree(prog->bpf_ops);
260 }
261 
262 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
263 {
264 	tcf_exts_destroy(&prog->exts);
265 	tcf_exts_put_net(&prog->exts);
266 
267 	cls_bpf_free_parms(prog);
268 	kfree(prog);
269 }
270 
271 static void cls_bpf_delete_prog_work(struct work_struct *work)
272 {
273 	struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
274 						 struct cls_bpf_prog,
275 						 rwork);
276 	rtnl_lock();
277 	__cls_bpf_delete_prog(prog);
278 	rtnl_unlock();
279 }
280 
281 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
282 			     struct netlink_ext_ack *extack)
283 {
284 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
285 
286 	idr_remove(&head->handle_idr, prog->handle);
287 	cls_bpf_stop_offload(tp, prog, extack);
288 	list_del_rcu(&prog->link);
289 	tcf_unbind_filter(tp, &prog->res);
290 	if (tcf_exts_get_net(&prog->exts))
291 		tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
292 	else
293 		__cls_bpf_delete_prog(prog);
294 }
295 
296 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
297 			  bool rtnl_held, struct netlink_ext_ack *extack)
298 {
299 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
300 
301 	__cls_bpf_delete(tp, arg, extack);
302 	*last = list_empty(&head->plist);
303 	return 0;
304 }
305 
306 static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
307 			    struct netlink_ext_ack *extack)
308 {
309 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
310 	struct cls_bpf_prog *prog, *tmp;
311 
312 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
313 		__cls_bpf_delete(tp, prog, extack);
314 
315 	idr_destroy(&head->handle_idr);
316 	kfree_rcu(head, rcu);
317 }
318 
319 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
320 {
321 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
322 	struct cls_bpf_prog *prog;
323 
324 	list_for_each_entry(prog, &head->plist, link) {
325 		if (prog->handle == handle)
326 			return prog;
327 	}
328 
329 	return NULL;
330 }
331 
332 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
333 {
334 	struct sock_filter *bpf_ops;
335 	struct sock_fprog_kern fprog_tmp;
336 	struct bpf_prog *fp;
337 	u16 bpf_size, bpf_num_ops;
338 	int ret;
339 
340 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
341 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
342 		return -EINVAL;
343 
344 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
345 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
346 		return -EINVAL;
347 
348 	bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
349 	if (bpf_ops == NULL)
350 		return -ENOMEM;
351 
352 	fprog_tmp.len = bpf_num_ops;
353 	fprog_tmp.filter = bpf_ops;
354 
355 	ret = bpf_prog_create(&fp, &fprog_tmp);
356 	if (ret < 0) {
357 		kfree(bpf_ops);
358 		return ret;
359 	}
360 
361 	prog->bpf_ops = bpf_ops;
362 	prog->bpf_num_ops = bpf_num_ops;
363 	prog->bpf_name = NULL;
364 	prog->filter = fp;
365 
366 	return 0;
367 }
368 
369 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
370 				 u32 gen_flags, const struct tcf_proto *tp)
371 {
372 	struct bpf_prog *fp;
373 	char *name = NULL;
374 	bool skip_sw;
375 	u32 bpf_fd;
376 
377 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
378 	skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
379 
380 	fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
381 	if (IS_ERR(fp))
382 		return PTR_ERR(fp);
383 
384 	if (tb[TCA_BPF_NAME]) {
385 		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
386 		if (!name) {
387 			bpf_prog_put(fp);
388 			return -ENOMEM;
389 		}
390 	}
391 
392 	prog->bpf_ops = NULL;
393 	prog->bpf_name = name;
394 	prog->filter = fp;
395 
396 	if (fp->dst_needed)
397 		tcf_block_netif_keep_dst(tp->chain->block);
398 
399 	return 0;
400 }
401 
402 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
403 			     struct cls_bpf_prog *prog, unsigned long base,
404 			     struct nlattr **tb, struct nlattr *est, bool ovr,
405 			     struct netlink_ext_ack *extack)
406 {
407 	bool is_bpf, is_ebpf, have_exts = false;
408 	u32 gen_flags = 0;
409 	int ret;
410 
411 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
412 	is_ebpf = tb[TCA_BPF_FD];
413 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
414 		return -EINVAL;
415 
416 	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, true,
417 				extack);
418 	if (ret < 0)
419 		return ret;
420 
421 	if (tb[TCA_BPF_FLAGS]) {
422 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
423 
424 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
425 			return -EINVAL;
426 
427 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
428 	}
429 	if (tb[TCA_BPF_FLAGS_GEN]) {
430 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
431 		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
432 		    !tc_flags_valid(gen_flags))
433 			return -EINVAL;
434 	}
435 
436 	prog->exts_integrated = have_exts;
437 	prog->gen_flags = gen_flags;
438 
439 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
440 		       cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
441 	if (ret < 0)
442 		return ret;
443 
444 	if (tb[TCA_BPF_CLASSID]) {
445 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
446 		tcf_bind_filter(tp, &prog->res, base);
447 	}
448 
449 	return 0;
450 }
451 
452 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
453 			  struct tcf_proto *tp, unsigned long base,
454 			  u32 handle, struct nlattr **tca,
455 			  void **arg, bool ovr, bool rtnl_held,
456 			  struct netlink_ext_ack *extack)
457 {
458 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
459 	struct cls_bpf_prog *oldprog = *arg;
460 	struct nlattr *tb[TCA_BPF_MAX + 1];
461 	struct cls_bpf_prog *prog;
462 	int ret;
463 
464 	if (tca[TCA_OPTIONS] == NULL)
465 		return -EINVAL;
466 
467 	ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
468 					  bpf_policy, NULL);
469 	if (ret < 0)
470 		return ret;
471 
472 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
473 	if (!prog)
474 		return -ENOBUFS;
475 
476 	ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
477 	if (ret < 0)
478 		goto errout;
479 
480 	if (oldprog) {
481 		if (handle && oldprog->handle != handle) {
482 			ret = -EINVAL;
483 			goto errout;
484 		}
485 	}
486 
487 	if (handle == 0) {
488 		handle = 1;
489 		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
490 				    INT_MAX, GFP_KERNEL);
491 	} else if (!oldprog) {
492 		ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
493 				    handle, GFP_KERNEL);
494 	}
495 
496 	if (ret)
497 		goto errout;
498 	prog->handle = handle;
499 
500 	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr,
501 				extack);
502 	if (ret < 0)
503 		goto errout_idr;
504 
505 	ret = cls_bpf_offload(tp, prog, oldprog, extack);
506 	if (ret)
507 		goto errout_parms;
508 
509 	if (!tc_in_hw(prog->gen_flags))
510 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
511 
512 	if (oldprog) {
513 		idr_replace(&head->handle_idr, prog, handle);
514 		list_replace_rcu(&oldprog->link, &prog->link);
515 		tcf_unbind_filter(tp, &oldprog->res);
516 		tcf_exts_get_net(&oldprog->exts);
517 		tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
518 	} else {
519 		list_add_rcu(&prog->link, &head->plist);
520 	}
521 
522 	*arg = prog;
523 	return 0;
524 
525 errout_parms:
526 	cls_bpf_free_parms(prog);
527 errout_idr:
528 	if (!oldprog)
529 		idr_remove(&head->handle_idr, prog->handle);
530 errout:
531 	tcf_exts_destroy(&prog->exts);
532 	kfree(prog);
533 	return ret;
534 }
535 
536 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
537 				 struct sk_buff *skb)
538 {
539 	struct nlattr *nla;
540 
541 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
542 		return -EMSGSIZE;
543 
544 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
545 			  sizeof(struct sock_filter));
546 	if (nla == NULL)
547 		return -EMSGSIZE;
548 
549 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
550 
551 	return 0;
552 }
553 
554 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
555 				  struct sk_buff *skb)
556 {
557 	struct nlattr *nla;
558 
559 	if (prog->bpf_name &&
560 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
561 		return -EMSGSIZE;
562 
563 	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
564 		return -EMSGSIZE;
565 
566 	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
567 	if (nla == NULL)
568 		return -EMSGSIZE;
569 
570 	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
571 
572 	return 0;
573 }
574 
575 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
576 			struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
577 {
578 	struct cls_bpf_prog *prog = fh;
579 	struct nlattr *nest;
580 	u32 bpf_flags = 0;
581 	int ret;
582 
583 	if (prog == NULL)
584 		return skb->len;
585 
586 	tm->tcm_handle = prog->handle;
587 
588 	cls_bpf_offload_update_stats(tp, prog);
589 
590 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
591 	if (nest == NULL)
592 		goto nla_put_failure;
593 
594 	if (prog->res.classid &&
595 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
596 		goto nla_put_failure;
597 
598 	if (cls_bpf_is_ebpf(prog))
599 		ret = cls_bpf_dump_ebpf_info(prog, skb);
600 	else
601 		ret = cls_bpf_dump_bpf_info(prog, skb);
602 	if (ret)
603 		goto nla_put_failure;
604 
605 	if (tcf_exts_dump(skb, &prog->exts) < 0)
606 		goto nla_put_failure;
607 
608 	if (prog->exts_integrated)
609 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
610 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
611 		goto nla_put_failure;
612 	if (prog->gen_flags &&
613 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
614 		goto nla_put_failure;
615 
616 	nla_nest_end(skb, nest);
617 
618 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
619 		goto nla_put_failure;
620 
621 	return skb->len;
622 
623 nla_put_failure:
624 	nla_nest_cancel(skb, nest);
625 	return -1;
626 }
627 
628 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
629 {
630 	struct cls_bpf_prog *prog = fh;
631 
632 	if (prog && prog->res.classid == classid)
633 		prog->res.class = cl;
634 }
635 
636 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
637 			 bool rtnl_held)
638 {
639 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
640 	struct cls_bpf_prog *prog;
641 
642 	list_for_each_entry(prog, &head->plist, link) {
643 		if (arg->count < arg->skip)
644 			goto skip;
645 		if (arg->fn(tp, prog, arg) < 0) {
646 			arg->stop = 1;
647 			break;
648 		}
649 skip:
650 		arg->count++;
651 	}
652 }
653 
654 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
655 			     void *cb_priv, struct netlink_ext_ack *extack)
656 {
657 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
658 	struct tcf_block *block = tp->chain->block;
659 	struct tc_cls_bpf_offload cls_bpf = {};
660 	struct cls_bpf_prog *prog;
661 	int err;
662 
663 	list_for_each_entry(prog, &head->plist, link) {
664 		if (tc_skip_hw(prog->gen_flags))
665 			continue;
666 
667 		tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
668 					   extack);
669 		cls_bpf.command = TC_CLSBPF_OFFLOAD;
670 		cls_bpf.exts = &prog->exts;
671 		cls_bpf.prog = add ? prog->filter : NULL;
672 		cls_bpf.oldprog = add ? NULL : prog->filter;
673 		cls_bpf.name = prog->bpf_name;
674 		cls_bpf.exts_integrated = prog->exts_integrated;
675 
676 		err = cb(TC_SETUP_CLSBPF, &cls_bpf, cb_priv);
677 		if (err) {
678 			if (add && tc_skip_sw(prog->gen_flags))
679 				return err;
680 			continue;
681 		}
682 
683 		tc_cls_offload_cnt_update(block, &prog->in_hw_count,
684 					  &prog->gen_flags, add);
685 	}
686 
687 	return 0;
688 }
689 
690 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
691 	.kind		=	"bpf",
692 	.owner		=	THIS_MODULE,
693 	.classify	=	cls_bpf_classify,
694 	.init		=	cls_bpf_init,
695 	.destroy	=	cls_bpf_destroy,
696 	.get		=	cls_bpf_get,
697 	.change		=	cls_bpf_change,
698 	.delete		=	cls_bpf_delete,
699 	.walk		=	cls_bpf_walk,
700 	.reoffload	=	cls_bpf_reoffload,
701 	.dump		=	cls_bpf_dump,
702 	.bind_class	=	cls_bpf_bind_class,
703 };
704 
705 static int __init cls_bpf_init_mod(void)
706 {
707 	return register_tcf_proto_ops(&cls_bpf_ops);
708 }
709 
710 static void __exit cls_bpf_exit_mod(void)
711 {
712 	unregister_tcf_proto_ops(&cls_bpf_ops);
713 }
714 
715 module_init(cls_bpf_init_mod);
716 module_exit(cls_bpf_exit_mod);
717