xref: /linux/net/sched/cls_bpf.c (revision 4246b92cf9fb32da8d8b060c92d8302797c6fbea)
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24 
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28 
29 #define CLS_BPF_NAME_LEN	256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
31 	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32 
33 struct cls_bpf_head {
34 	struct list_head plist;
35 	u32 hgen;
36 	struct rcu_head rcu;
37 };
38 
39 struct cls_bpf_prog {
40 	struct bpf_prog *filter;
41 	struct list_head link;
42 	struct tcf_result res;
43 	bool exts_integrated;
44 	bool offloaded;
45 	u32 gen_flags;
46 	struct tcf_exts exts;
47 	u32 handle;
48 	u16 bpf_num_ops;
49 	struct sock_filter *bpf_ops;
50 	const char *bpf_name;
51 	struct tcf_proto *tp;
52 	union {
53 		struct work_struct work;
54 		struct rcu_head rcu;
55 	};
56 };
57 
58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
60 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
61 	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
62 	[TCA_BPF_FD]		= { .type = NLA_U32 },
63 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
64 				    .len = CLS_BPF_NAME_LEN },
65 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
66 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
67 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68 };
69 
70 static int cls_bpf_exec_opcode(int code)
71 {
72 	switch (code) {
73 	case TC_ACT_OK:
74 	case TC_ACT_SHOT:
75 	case TC_ACT_STOLEN:
76 	case TC_ACT_TRAP:
77 	case TC_ACT_REDIRECT:
78 	case TC_ACT_UNSPEC:
79 		return code;
80 	default:
81 		return TC_ACT_UNSPEC;
82 	}
83 }
84 
85 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
86 			    struct tcf_result *res)
87 {
88 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
89 	bool at_ingress = skb_at_tc_ingress(skb);
90 	struct cls_bpf_prog *prog;
91 	int ret = -1;
92 
93 	/* Needed here for accessing maps. */
94 	rcu_read_lock();
95 	list_for_each_entry_rcu(prog, &head->plist, link) {
96 		int filter_res;
97 
98 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
99 
100 		if (tc_skip_sw(prog->gen_flags)) {
101 			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
102 		} else if (at_ingress) {
103 			/* It is safe to push/pull even if skb_shared() */
104 			__skb_push(skb, skb->mac_len);
105 			bpf_compute_data_end(skb);
106 			filter_res = BPF_PROG_RUN(prog->filter, skb);
107 			__skb_pull(skb, skb->mac_len);
108 		} else {
109 			bpf_compute_data_end(skb);
110 			filter_res = BPF_PROG_RUN(prog->filter, skb);
111 		}
112 
113 		if (prog->exts_integrated) {
114 			res->class   = 0;
115 			res->classid = TC_H_MAJ(prog->res.classid) |
116 				       qdisc_skb_cb(skb)->tc_classid;
117 
118 			ret = cls_bpf_exec_opcode(filter_res);
119 			if (ret == TC_ACT_UNSPEC)
120 				continue;
121 			break;
122 		}
123 
124 		if (filter_res == 0)
125 			continue;
126 		if (filter_res != -1) {
127 			res->class   = 0;
128 			res->classid = filter_res;
129 		} else {
130 			*res = prog->res;
131 		}
132 
133 		ret = tcf_exts_exec(skb, &prog->exts, res);
134 		if (ret < 0)
135 			continue;
136 
137 		break;
138 	}
139 	rcu_read_unlock();
140 
141 	return ret;
142 }
143 
144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
145 {
146 	return !prog->bpf_ops;
147 }
148 
149 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
150 			       enum tc_clsbpf_command cmd)
151 {
152 	struct net_device *dev = tp->q->dev_queue->dev;
153 	struct tc_cls_bpf_offload cls_bpf = {};
154 	int err;
155 
156 	tc_cls_common_offload_init(&cls_bpf.common, tp);
157 	cls_bpf.command = cmd;
158 	cls_bpf.exts = &prog->exts;
159 	cls_bpf.prog = prog->filter;
160 	cls_bpf.name = prog->bpf_name;
161 	cls_bpf.exts_integrated = prog->exts_integrated;
162 	cls_bpf.gen_flags = prog->gen_flags;
163 
164 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf);
165 	if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
166 		prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
167 
168 	return err;
169 }
170 
171 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
172 			   struct cls_bpf_prog *oldprog)
173 {
174 	struct net_device *dev = tp->q->dev_queue->dev;
175 	struct cls_bpf_prog *obj = prog;
176 	enum tc_clsbpf_command cmd;
177 	bool skip_sw;
178 	int ret;
179 
180 	skip_sw = tc_skip_sw(prog->gen_flags) ||
181 		(oldprog && tc_skip_sw(oldprog->gen_flags));
182 
183 	if (oldprog && oldprog->offloaded) {
184 		if (tc_should_offload(dev, prog->gen_flags)) {
185 			cmd = TC_CLSBPF_REPLACE;
186 		} else if (!tc_skip_sw(prog->gen_flags)) {
187 			obj = oldprog;
188 			cmd = TC_CLSBPF_DESTROY;
189 		} else {
190 			return -EINVAL;
191 		}
192 	} else {
193 		if (!tc_should_offload(dev, prog->gen_flags))
194 			return skip_sw ? -EINVAL : 0;
195 		cmd = TC_CLSBPF_ADD;
196 	}
197 
198 	ret = cls_bpf_offload_cmd(tp, obj, cmd);
199 	if (ret)
200 		return skip_sw ? ret : 0;
201 
202 	obj->offloaded = true;
203 	if (oldprog)
204 		oldprog->offloaded = false;
205 
206 	return 0;
207 }
208 
209 static void cls_bpf_stop_offload(struct tcf_proto *tp,
210 				 struct cls_bpf_prog *prog)
211 {
212 	int err;
213 
214 	if (!prog->offloaded)
215 		return;
216 
217 	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
218 	if (err) {
219 		pr_err("Stopping hardware offload failed: %d\n", err);
220 		return;
221 	}
222 
223 	prog->offloaded = false;
224 }
225 
226 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
227 					 struct cls_bpf_prog *prog)
228 {
229 	if (!prog->offloaded)
230 		return;
231 
232 	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
233 }
234 
235 static int cls_bpf_init(struct tcf_proto *tp)
236 {
237 	struct cls_bpf_head *head;
238 
239 	head = kzalloc(sizeof(*head), GFP_KERNEL);
240 	if (head == NULL)
241 		return -ENOBUFS;
242 
243 	INIT_LIST_HEAD_RCU(&head->plist);
244 	rcu_assign_pointer(tp->root, head);
245 
246 	return 0;
247 }
248 
249 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
250 {
251 	tcf_exts_destroy(&prog->exts);
252 
253 	if (cls_bpf_is_ebpf(prog))
254 		bpf_prog_put(prog->filter);
255 	else
256 		bpf_prog_destroy(prog->filter);
257 
258 	kfree(prog->bpf_name);
259 	kfree(prog->bpf_ops);
260 	kfree(prog);
261 }
262 
263 static void cls_bpf_delete_prog_work(struct work_struct *work)
264 {
265 	struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
266 
267 	rtnl_lock();
268 	__cls_bpf_delete_prog(prog);
269 	rtnl_unlock();
270 }
271 
272 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
273 {
274 	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
275 
276 	INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
277 	tcf_queue_work(&prog->work);
278 }
279 
280 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
281 {
282 	cls_bpf_stop_offload(tp, prog);
283 	list_del_rcu(&prog->link);
284 	tcf_unbind_filter(tp, &prog->res);
285 	call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
286 }
287 
288 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last)
289 {
290 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
291 
292 	__cls_bpf_delete(tp, arg);
293 	*last = list_empty(&head->plist);
294 	return 0;
295 }
296 
297 static void cls_bpf_destroy(struct tcf_proto *tp)
298 {
299 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
300 	struct cls_bpf_prog *prog, *tmp;
301 
302 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
303 		__cls_bpf_delete(tp, prog);
304 
305 	kfree_rcu(head, rcu);
306 }
307 
308 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
309 {
310 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
311 	struct cls_bpf_prog *prog;
312 
313 	list_for_each_entry(prog, &head->plist, link) {
314 		if (prog->handle == handle)
315 			return prog;
316 	}
317 
318 	return NULL;
319 }
320 
321 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
322 {
323 	struct sock_filter *bpf_ops;
324 	struct sock_fprog_kern fprog_tmp;
325 	struct bpf_prog *fp;
326 	u16 bpf_size, bpf_num_ops;
327 	int ret;
328 
329 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
330 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
331 		return -EINVAL;
332 
333 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
334 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
335 		return -EINVAL;
336 
337 	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
338 	if (bpf_ops == NULL)
339 		return -ENOMEM;
340 
341 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
342 
343 	fprog_tmp.len = bpf_num_ops;
344 	fprog_tmp.filter = bpf_ops;
345 
346 	ret = bpf_prog_create(&fp, &fprog_tmp);
347 	if (ret < 0) {
348 		kfree(bpf_ops);
349 		return ret;
350 	}
351 
352 	prog->bpf_ops = bpf_ops;
353 	prog->bpf_num_ops = bpf_num_ops;
354 	prog->bpf_name = NULL;
355 	prog->filter = fp;
356 
357 	return 0;
358 }
359 
360 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
361 				 const struct tcf_proto *tp)
362 {
363 	struct bpf_prog *fp;
364 	char *name = NULL;
365 	u32 bpf_fd;
366 
367 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
368 
369 	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
370 	if (IS_ERR(fp))
371 		return PTR_ERR(fp);
372 
373 	if (tb[TCA_BPF_NAME]) {
374 		name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
375 		if (!name) {
376 			bpf_prog_put(fp);
377 			return -ENOMEM;
378 		}
379 	}
380 
381 	prog->bpf_ops = NULL;
382 	prog->bpf_name = name;
383 	prog->filter = fp;
384 
385 	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
386 		netif_keep_dst(qdisc_dev(tp->q));
387 
388 	return 0;
389 }
390 
391 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
392 			     struct cls_bpf_prog *prog, unsigned long base,
393 			     struct nlattr **tb, struct nlattr *est, bool ovr)
394 {
395 	bool is_bpf, is_ebpf, have_exts = false;
396 	u32 gen_flags = 0;
397 	int ret;
398 
399 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
400 	is_ebpf = tb[TCA_BPF_FD];
401 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
402 		return -EINVAL;
403 
404 	ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr);
405 	if (ret < 0)
406 		return ret;
407 
408 	if (tb[TCA_BPF_FLAGS]) {
409 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
410 
411 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
412 			return -EINVAL;
413 
414 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
415 	}
416 	if (tb[TCA_BPF_FLAGS_GEN]) {
417 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
418 		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
419 		    !tc_flags_valid(gen_flags))
420 			return -EINVAL;
421 	}
422 
423 	prog->exts_integrated = have_exts;
424 	prog->gen_flags = gen_flags;
425 
426 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
427 		       cls_bpf_prog_from_efd(tb, prog, tp);
428 	if (ret < 0)
429 		return ret;
430 
431 	if (tb[TCA_BPF_CLASSID]) {
432 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
433 		tcf_bind_filter(tp, &prog->res, base);
434 	}
435 
436 	return 0;
437 }
438 
439 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
440 				   struct cls_bpf_head *head)
441 {
442 	unsigned int i = 0x80000000;
443 	u32 handle;
444 
445 	do {
446 		if (++head->hgen == 0x7FFFFFFF)
447 			head->hgen = 1;
448 	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
449 
450 	if (unlikely(i == 0)) {
451 		pr_err("Insufficient number of handles\n");
452 		handle = 0;
453 	} else {
454 		handle = head->hgen;
455 	}
456 
457 	return handle;
458 }
459 
460 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
461 			  struct tcf_proto *tp, unsigned long base,
462 			  u32 handle, struct nlattr **tca,
463 			  void **arg, bool ovr)
464 {
465 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
466 	struct cls_bpf_prog *oldprog = *arg;
467 	struct nlattr *tb[TCA_BPF_MAX + 1];
468 	struct cls_bpf_prog *prog;
469 	int ret;
470 
471 	if (tca[TCA_OPTIONS] == NULL)
472 		return -EINVAL;
473 
474 	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
475 			       NULL);
476 	if (ret < 0)
477 		return ret;
478 
479 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
480 	if (!prog)
481 		return -ENOBUFS;
482 
483 	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
484 	if (ret < 0)
485 		goto errout;
486 
487 	if (oldprog) {
488 		if (handle && oldprog->handle != handle) {
489 			ret = -EINVAL;
490 			goto errout;
491 		}
492 	}
493 
494 	if (handle == 0)
495 		prog->handle = cls_bpf_grab_new_handle(tp, head);
496 	else
497 		prog->handle = handle;
498 	if (prog->handle == 0) {
499 		ret = -EINVAL;
500 		goto errout;
501 	}
502 
503 	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
504 	if (ret < 0)
505 		goto errout;
506 
507 	ret = cls_bpf_offload(tp, prog, oldprog);
508 	if (ret) {
509 		__cls_bpf_delete_prog(prog);
510 		return ret;
511 	}
512 
513 	if (!tc_in_hw(prog->gen_flags))
514 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
515 
516 	if (oldprog) {
517 		list_replace_rcu(&oldprog->link, &prog->link);
518 		tcf_unbind_filter(tp, &oldprog->res);
519 		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
520 	} else {
521 		list_add_rcu(&prog->link, &head->plist);
522 	}
523 
524 	*arg = prog;
525 	return 0;
526 
527 errout:
528 	tcf_exts_destroy(&prog->exts);
529 	kfree(prog);
530 	return ret;
531 }
532 
533 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
534 				 struct sk_buff *skb)
535 {
536 	struct nlattr *nla;
537 
538 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
539 		return -EMSGSIZE;
540 
541 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
542 			  sizeof(struct sock_filter));
543 	if (nla == NULL)
544 		return -EMSGSIZE;
545 
546 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
547 
548 	return 0;
549 }
550 
551 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
552 				  struct sk_buff *skb)
553 {
554 	struct nlattr *nla;
555 
556 	if (prog->bpf_name &&
557 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
558 		return -EMSGSIZE;
559 
560 	if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
561 		return -EMSGSIZE;
562 
563 	nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
564 	if (nla == NULL)
565 		return -EMSGSIZE;
566 
567 	memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
568 
569 	return 0;
570 }
571 
572 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
573 			struct sk_buff *skb, struct tcmsg *tm)
574 {
575 	struct cls_bpf_prog *prog = fh;
576 	struct nlattr *nest;
577 	u32 bpf_flags = 0;
578 	int ret;
579 
580 	if (prog == NULL)
581 		return skb->len;
582 
583 	tm->tcm_handle = prog->handle;
584 
585 	cls_bpf_offload_update_stats(tp, prog);
586 
587 	nest = nla_nest_start(skb, TCA_OPTIONS);
588 	if (nest == NULL)
589 		goto nla_put_failure;
590 
591 	if (prog->res.classid &&
592 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
593 		goto nla_put_failure;
594 
595 	if (cls_bpf_is_ebpf(prog))
596 		ret = cls_bpf_dump_ebpf_info(prog, skb);
597 	else
598 		ret = cls_bpf_dump_bpf_info(prog, skb);
599 	if (ret)
600 		goto nla_put_failure;
601 
602 	if (tcf_exts_dump(skb, &prog->exts) < 0)
603 		goto nla_put_failure;
604 
605 	if (prog->exts_integrated)
606 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
607 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
608 		goto nla_put_failure;
609 	if (prog->gen_flags &&
610 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
611 		goto nla_put_failure;
612 
613 	nla_nest_end(skb, nest);
614 
615 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
616 		goto nla_put_failure;
617 
618 	return skb->len;
619 
620 nla_put_failure:
621 	nla_nest_cancel(skb, nest);
622 	return -1;
623 }
624 
625 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl)
626 {
627 	struct cls_bpf_prog *prog = fh;
628 
629 	if (prog && prog->res.classid == classid)
630 		prog->res.class = cl;
631 }
632 
633 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
634 {
635 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
636 	struct cls_bpf_prog *prog;
637 
638 	list_for_each_entry(prog, &head->plist, link) {
639 		if (arg->count < arg->skip)
640 			goto skip;
641 		if (arg->fn(tp, prog, arg) < 0) {
642 			arg->stop = 1;
643 			break;
644 		}
645 skip:
646 		arg->count++;
647 	}
648 }
649 
650 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
651 	.kind		=	"bpf",
652 	.owner		=	THIS_MODULE,
653 	.classify	=	cls_bpf_classify,
654 	.init		=	cls_bpf_init,
655 	.destroy	=	cls_bpf_destroy,
656 	.get		=	cls_bpf_get,
657 	.change		=	cls_bpf_change,
658 	.delete		=	cls_bpf_delete,
659 	.walk		=	cls_bpf_walk,
660 	.dump		=	cls_bpf_dump,
661 	.bind_class	=	cls_bpf_bind_class,
662 };
663 
664 static int __init cls_bpf_init_mod(void)
665 {
666 	return register_tcf_proto_ops(&cls_bpf_ops);
667 }
668 
669 static void __exit cls_bpf_exit_mod(void)
670 {
671 	unregister_tcf_proto_ops(&cls_bpf_ops);
672 }
673 
674 module_init(cls_bpf_init_mod);
675 module_exit(cls_bpf_exit_mod);
676