xref: /linux/net/sched/cls_bpf.c (revision 7181e5590e5ba898804aef3ee6be7f27606e6f8b)
1 /*
2  * Berkeley Packet Filter based traffic classifier
3  *
4  * Might be used to classify traffic through flexible, user-defined and
5  * possibly JIT-ed BPF filters for traffic control as an alternative to
6  * ematches.
7  *
8  * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  */
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
24 
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
28 
29 #define CLS_BPF_NAME_LEN	256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS		\
31 	(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32 
33 struct cls_bpf_head {
34 	struct list_head plist;
35 	u32 hgen;
36 	struct rcu_head rcu;
37 };
38 
39 struct cls_bpf_prog {
40 	struct bpf_prog *filter;
41 	struct list_head link;
42 	struct tcf_result res;
43 	bool exts_integrated;
44 	bool offloaded;
45 	u32 gen_flags;
46 	struct tcf_exts exts;
47 	u32 handle;
48 	union {
49 		u32 bpf_fd;
50 		u16 bpf_num_ops;
51 	};
52 	struct sock_filter *bpf_ops;
53 	const char *bpf_name;
54 	struct tcf_proto *tp;
55 	struct rcu_head rcu;
56 };
57 
58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
59 	[TCA_BPF_CLASSID]	= { .type = NLA_U32 },
60 	[TCA_BPF_FLAGS]		= { .type = NLA_U32 },
61 	[TCA_BPF_FLAGS_GEN]	= { .type = NLA_U32 },
62 	[TCA_BPF_FD]		= { .type = NLA_U32 },
63 	[TCA_BPF_NAME]		= { .type = NLA_NUL_STRING,
64 				    .len = CLS_BPF_NAME_LEN },
65 	[TCA_BPF_OPS_LEN]	= { .type = NLA_U16 },
66 	[TCA_BPF_OPS]		= { .type = NLA_BINARY,
67 				    .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
68 };
69 
70 static int cls_bpf_exec_opcode(int code)
71 {
72 	switch (code) {
73 	case TC_ACT_OK:
74 	case TC_ACT_SHOT:
75 	case TC_ACT_STOLEN:
76 	case TC_ACT_REDIRECT:
77 	case TC_ACT_UNSPEC:
78 		return code;
79 	default:
80 		return TC_ACT_UNSPEC;
81 	}
82 }
83 
84 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
85 			    struct tcf_result *res)
86 {
87 	struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
88 	bool at_ingress = skb_at_tc_ingress(skb);
89 	struct cls_bpf_prog *prog;
90 	int ret = -1;
91 
92 	/* Needed here for accessing maps. */
93 	rcu_read_lock();
94 	list_for_each_entry_rcu(prog, &head->plist, link) {
95 		int filter_res;
96 
97 		qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
98 
99 		if (tc_skip_sw(prog->gen_flags)) {
100 			filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
101 		} else if (at_ingress) {
102 			/* It is safe to push/pull even if skb_shared() */
103 			__skb_push(skb, skb->mac_len);
104 			bpf_compute_data_end(skb);
105 			filter_res = BPF_PROG_RUN(prog->filter, skb);
106 			__skb_pull(skb, skb->mac_len);
107 		} else {
108 			bpf_compute_data_end(skb);
109 			filter_res = BPF_PROG_RUN(prog->filter, skb);
110 		}
111 
112 		if (prog->exts_integrated) {
113 			res->class   = 0;
114 			res->classid = TC_H_MAJ(prog->res.classid) |
115 				       qdisc_skb_cb(skb)->tc_classid;
116 
117 			ret = cls_bpf_exec_opcode(filter_res);
118 			if (ret == TC_ACT_UNSPEC)
119 				continue;
120 			break;
121 		}
122 
123 		if (filter_res == 0)
124 			continue;
125 		if (filter_res != -1) {
126 			res->class   = 0;
127 			res->classid = filter_res;
128 		} else {
129 			*res = prog->res;
130 		}
131 
132 		ret = tcf_exts_exec(skb, &prog->exts, res);
133 		if (ret < 0)
134 			continue;
135 
136 		break;
137 	}
138 	rcu_read_unlock();
139 
140 	return ret;
141 }
142 
143 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
144 {
145 	return !prog->bpf_ops;
146 }
147 
148 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
149 			       enum tc_clsbpf_command cmd)
150 {
151 	struct net_device *dev = tp->q->dev_queue->dev;
152 	struct tc_cls_bpf_offload bpf_offload = {};
153 	struct tc_to_netdev offload;
154 
155 	offload.type = TC_SETUP_CLSBPF;
156 	offload.cls_bpf = &bpf_offload;
157 
158 	bpf_offload.command = cmd;
159 	bpf_offload.exts = &prog->exts;
160 	bpf_offload.prog = prog->filter;
161 	bpf_offload.name = prog->bpf_name;
162 	bpf_offload.exts_integrated = prog->exts_integrated;
163 	bpf_offload.gen_flags = prog->gen_flags;
164 
165 	return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
166 					     tp->protocol, &offload);
167 }
168 
169 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
170 			   struct cls_bpf_prog *oldprog)
171 {
172 	struct net_device *dev = tp->q->dev_queue->dev;
173 	struct cls_bpf_prog *obj = prog;
174 	enum tc_clsbpf_command cmd;
175 	bool skip_sw;
176 	int ret;
177 
178 	skip_sw = tc_skip_sw(prog->gen_flags) ||
179 		(oldprog && tc_skip_sw(oldprog->gen_flags));
180 
181 	if (oldprog && oldprog->offloaded) {
182 		if (tc_should_offload(dev, tp, prog->gen_flags)) {
183 			cmd = TC_CLSBPF_REPLACE;
184 		} else if (!tc_skip_sw(prog->gen_flags)) {
185 			obj = oldprog;
186 			cmd = TC_CLSBPF_DESTROY;
187 		} else {
188 			return -EINVAL;
189 		}
190 	} else {
191 		if (!tc_should_offload(dev, tp, prog->gen_flags))
192 			return skip_sw ? -EINVAL : 0;
193 		cmd = TC_CLSBPF_ADD;
194 	}
195 
196 	ret = cls_bpf_offload_cmd(tp, obj, cmd);
197 	if (ret)
198 		return skip_sw ? ret : 0;
199 
200 	obj->offloaded = true;
201 	if (oldprog)
202 		oldprog->offloaded = false;
203 
204 	return 0;
205 }
206 
207 static void cls_bpf_stop_offload(struct tcf_proto *tp,
208 				 struct cls_bpf_prog *prog)
209 {
210 	int err;
211 
212 	if (!prog->offloaded)
213 		return;
214 
215 	err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
216 	if (err) {
217 		pr_err("Stopping hardware offload failed: %d\n", err);
218 		return;
219 	}
220 
221 	prog->offloaded = false;
222 }
223 
224 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
225 					 struct cls_bpf_prog *prog)
226 {
227 	if (!prog->offloaded)
228 		return;
229 
230 	cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
231 }
232 
233 static int cls_bpf_init(struct tcf_proto *tp)
234 {
235 	struct cls_bpf_head *head;
236 
237 	head = kzalloc(sizeof(*head), GFP_KERNEL);
238 	if (head == NULL)
239 		return -ENOBUFS;
240 
241 	INIT_LIST_HEAD_RCU(&head->plist);
242 	rcu_assign_pointer(tp->root, head);
243 
244 	return 0;
245 }
246 
247 static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
248 {
249 	tcf_exts_destroy(&prog->exts);
250 
251 	if (cls_bpf_is_ebpf(prog))
252 		bpf_prog_put(prog->filter);
253 	else
254 		bpf_prog_destroy(prog->filter);
255 
256 	kfree(prog->bpf_name);
257 	kfree(prog->bpf_ops);
258 	kfree(prog);
259 }
260 
261 static void __cls_bpf_delete_prog(struct rcu_head *rcu)
262 {
263 	struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
264 
265 	cls_bpf_delete_prog(prog->tp, prog);
266 }
267 
268 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
269 {
270 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
271 
272 	cls_bpf_stop_offload(tp, prog);
273 	list_del_rcu(&prog->link);
274 	tcf_unbind_filter(tp, &prog->res);
275 	call_rcu(&prog->rcu, __cls_bpf_delete_prog);
276 
277 	return 0;
278 }
279 
280 static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
281 {
282 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
283 	struct cls_bpf_prog *prog, *tmp;
284 
285 	if (!force && !list_empty(&head->plist))
286 		return false;
287 
288 	list_for_each_entry_safe(prog, tmp, &head->plist, link) {
289 		cls_bpf_stop_offload(tp, prog);
290 		list_del_rcu(&prog->link);
291 		tcf_unbind_filter(tp, &prog->res);
292 		call_rcu(&prog->rcu, __cls_bpf_delete_prog);
293 	}
294 
295 	RCU_INIT_POINTER(tp->root, NULL);
296 	kfree_rcu(head, rcu);
297 	return true;
298 }
299 
300 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
301 {
302 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
303 	struct cls_bpf_prog *prog;
304 	unsigned long ret = 0UL;
305 
306 	if (head == NULL)
307 		return 0UL;
308 
309 	list_for_each_entry(prog, &head->plist, link) {
310 		if (prog->handle == handle) {
311 			ret = (unsigned long) prog;
312 			break;
313 		}
314 	}
315 
316 	return ret;
317 }
318 
319 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
320 {
321 	struct sock_filter *bpf_ops;
322 	struct sock_fprog_kern fprog_tmp;
323 	struct bpf_prog *fp;
324 	u16 bpf_size, bpf_num_ops;
325 	int ret;
326 
327 	bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
328 	if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
329 		return -EINVAL;
330 
331 	bpf_size = bpf_num_ops * sizeof(*bpf_ops);
332 	if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
333 		return -EINVAL;
334 
335 	bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
336 	if (bpf_ops == NULL)
337 		return -ENOMEM;
338 
339 	memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
340 
341 	fprog_tmp.len = bpf_num_ops;
342 	fprog_tmp.filter = bpf_ops;
343 
344 	ret = bpf_prog_create(&fp, &fprog_tmp);
345 	if (ret < 0) {
346 		kfree(bpf_ops);
347 		return ret;
348 	}
349 
350 	prog->bpf_ops = bpf_ops;
351 	prog->bpf_num_ops = bpf_num_ops;
352 	prog->bpf_name = NULL;
353 	prog->filter = fp;
354 
355 	return 0;
356 }
357 
358 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
359 				 const struct tcf_proto *tp)
360 {
361 	struct bpf_prog *fp;
362 	char *name = NULL;
363 	u32 bpf_fd;
364 
365 	bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
366 
367 	fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
368 	if (IS_ERR(fp))
369 		return PTR_ERR(fp);
370 
371 	if (tb[TCA_BPF_NAME]) {
372 		name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
373 			       nla_len(tb[TCA_BPF_NAME]),
374 			       GFP_KERNEL);
375 		if (!name) {
376 			bpf_prog_put(fp);
377 			return -ENOMEM;
378 		}
379 	}
380 
381 	prog->bpf_ops = NULL;
382 	prog->bpf_fd = bpf_fd;
383 	prog->bpf_name = name;
384 	prog->filter = fp;
385 
386 	if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
387 		netif_keep_dst(qdisc_dev(tp->q));
388 
389 	return 0;
390 }
391 
392 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
393 				   struct cls_bpf_prog *prog,
394 				   unsigned long base, struct nlattr **tb,
395 				   struct nlattr *est, bool ovr)
396 {
397 	bool is_bpf, is_ebpf, have_exts = false;
398 	struct tcf_exts exts;
399 	u32 gen_flags = 0;
400 	int ret;
401 
402 	is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
403 	is_ebpf = tb[TCA_BPF_FD];
404 	if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
405 		return -EINVAL;
406 
407 	ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
408 	if (ret < 0)
409 		return ret;
410 	ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
411 	if (ret < 0)
412 		goto errout;
413 
414 	if (tb[TCA_BPF_FLAGS]) {
415 		u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
416 
417 		if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
418 			ret = -EINVAL;
419 			goto errout;
420 		}
421 
422 		have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
423 	}
424 	if (tb[TCA_BPF_FLAGS_GEN]) {
425 		gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
426 		if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
427 		    !tc_flags_valid(gen_flags)) {
428 			ret = -EINVAL;
429 			goto errout;
430 		}
431 	}
432 
433 	prog->exts_integrated = have_exts;
434 	prog->gen_flags = gen_flags;
435 
436 	ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
437 		       cls_bpf_prog_from_efd(tb, prog, tp);
438 	if (ret < 0)
439 		goto errout;
440 
441 	if (tb[TCA_BPF_CLASSID]) {
442 		prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
443 		tcf_bind_filter(tp, &prog->res, base);
444 	}
445 
446 	tcf_exts_change(tp, &prog->exts, &exts);
447 	return 0;
448 
449 errout:
450 	tcf_exts_destroy(&exts);
451 	return ret;
452 }
453 
454 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
455 				   struct cls_bpf_head *head)
456 {
457 	unsigned int i = 0x80000000;
458 	u32 handle;
459 
460 	do {
461 		if (++head->hgen == 0x7FFFFFFF)
462 			head->hgen = 1;
463 	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
464 
465 	if (unlikely(i == 0)) {
466 		pr_err("Insufficient number of handles\n");
467 		handle = 0;
468 	} else {
469 		handle = head->hgen;
470 	}
471 
472 	return handle;
473 }
474 
475 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
476 			  struct tcf_proto *tp, unsigned long base,
477 			  u32 handle, struct nlattr **tca,
478 			  unsigned long *arg, bool ovr)
479 {
480 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
481 	struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
482 	struct nlattr *tb[TCA_BPF_MAX + 1];
483 	struct cls_bpf_prog *prog;
484 	int ret;
485 
486 	if (tca[TCA_OPTIONS] == NULL)
487 		return -EINVAL;
488 
489 	ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
490 	if (ret < 0)
491 		return ret;
492 
493 	prog = kzalloc(sizeof(*prog), GFP_KERNEL);
494 	if (!prog)
495 		return -ENOBUFS;
496 
497 	ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
498 	if (ret < 0)
499 		goto errout;
500 
501 	if (oldprog) {
502 		if (handle && oldprog->handle != handle) {
503 			ret = -EINVAL;
504 			goto errout;
505 		}
506 	}
507 
508 	if (handle == 0)
509 		prog->handle = cls_bpf_grab_new_handle(tp, head);
510 	else
511 		prog->handle = handle;
512 	if (prog->handle == 0) {
513 		ret = -EINVAL;
514 		goto errout;
515 	}
516 
517 	ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
518 				      ovr);
519 	if (ret < 0)
520 		goto errout;
521 
522 	ret = cls_bpf_offload(tp, prog, oldprog);
523 	if (ret) {
524 		cls_bpf_delete_prog(tp, prog);
525 		return ret;
526 	}
527 
528 	if (oldprog) {
529 		list_replace_rcu(&oldprog->link, &prog->link);
530 		tcf_unbind_filter(tp, &oldprog->res);
531 		call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
532 	} else {
533 		list_add_rcu(&prog->link, &head->plist);
534 	}
535 
536 	*arg = (unsigned long) prog;
537 	return 0;
538 
539 errout:
540 	tcf_exts_destroy(&prog->exts);
541 	kfree(prog);
542 	return ret;
543 }
544 
545 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
546 				 struct sk_buff *skb)
547 {
548 	struct nlattr *nla;
549 
550 	if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
551 		return -EMSGSIZE;
552 
553 	nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
554 			  sizeof(struct sock_filter));
555 	if (nla == NULL)
556 		return -EMSGSIZE;
557 
558 	memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
559 
560 	return 0;
561 }
562 
563 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
564 				  struct sk_buff *skb)
565 {
566 	if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
567 		return -EMSGSIZE;
568 
569 	if (prog->bpf_name &&
570 	    nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
571 		return -EMSGSIZE;
572 
573 	return 0;
574 }
575 
576 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
577 			struct sk_buff *skb, struct tcmsg *tm)
578 {
579 	struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
580 	struct nlattr *nest;
581 	u32 bpf_flags = 0;
582 	int ret;
583 
584 	if (prog == NULL)
585 		return skb->len;
586 
587 	tm->tcm_handle = prog->handle;
588 
589 	cls_bpf_offload_update_stats(tp, prog);
590 
591 	nest = nla_nest_start(skb, TCA_OPTIONS);
592 	if (nest == NULL)
593 		goto nla_put_failure;
594 
595 	if (prog->res.classid &&
596 	    nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
597 		goto nla_put_failure;
598 
599 	if (cls_bpf_is_ebpf(prog))
600 		ret = cls_bpf_dump_ebpf_info(prog, skb);
601 	else
602 		ret = cls_bpf_dump_bpf_info(prog, skb);
603 	if (ret)
604 		goto nla_put_failure;
605 
606 	if (tcf_exts_dump(skb, &prog->exts) < 0)
607 		goto nla_put_failure;
608 
609 	if (prog->exts_integrated)
610 		bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
611 	if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
612 		goto nla_put_failure;
613 	if (prog->gen_flags &&
614 	    nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
615 		goto nla_put_failure;
616 
617 	nla_nest_end(skb, nest);
618 
619 	if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
620 		goto nla_put_failure;
621 
622 	return skb->len;
623 
624 nla_put_failure:
625 	nla_nest_cancel(skb, nest);
626 	return -1;
627 }
628 
629 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
630 {
631 	struct cls_bpf_head *head = rtnl_dereference(tp->root);
632 	struct cls_bpf_prog *prog;
633 
634 	list_for_each_entry(prog, &head->plist, link) {
635 		if (arg->count < arg->skip)
636 			goto skip;
637 		if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
638 			arg->stop = 1;
639 			break;
640 		}
641 skip:
642 		arg->count++;
643 	}
644 }
645 
646 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
647 	.kind		=	"bpf",
648 	.owner		=	THIS_MODULE,
649 	.classify	=	cls_bpf_classify,
650 	.init		=	cls_bpf_init,
651 	.destroy	=	cls_bpf_destroy,
652 	.get		=	cls_bpf_get,
653 	.change		=	cls_bpf_change,
654 	.delete		=	cls_bpf_delete,
655 	.walk		=	cls_bpf_walk,
656 	.dump		=	cls_bpf_dump,
657 };
658 
659 static int __init cls_bpf_init_mod(void)
660 {
661 	return register_tcf_proto_ops(&cls_bpf_ops);
662 }
663 
664 static void __exit cls_bpf_exit_mod(void)
665 {
666 	unregister_tcf_proto_ops(&cls_bpf_ops);
667 }
668 
669 module_init(cls_bpf_init_mod);
670 module_exit(cls_bpf_exit_mod);
671