1 /* 2 * Berkeley Packet Filter based traffic classifier 3 * 4 * Might be used to classify traffic through flexible, user-defined and 5 * possibly JIT-ed BPF filters for traffic control as an alternative to 6 * ematches. 7 * 8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/skbuff.h> 18 #include <linux/filter.h> 19 #include <linux/bpf.h> 20 #include <linux/idr.h> 21 22 #include <net/rtnetlink.h> 23 #include <net/pkt_cls.h> 24 #include <net/sock.h> 25 26 MODULE_LICENSE("GPL"); 27 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); 28 MODULE_DESCRIPTION("TC BPF based classifier"); 29 30 #define CLS_BPF_NAME_LEN 256 31 #define CLS_BPF_SUPPORTED_GEN_FLAGS \ 32 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW) 33 34 struct cls_bpf_head { 35 struct list_head plist; 36 struct idr handle_idr; 37 struct rcu_head rcu; 38 }; 39 40 struct cls_bpf_prog { 41 struct bpf_prog *filter; 42 struct list_head link; 43 struct tcf_result res; 44 bool exts_integrated; 45 bool offloaded; 46 u32 gen_flags; 47 struct tcf_exts exts; 48 u32 handle; 49 u16 bpf_num_ops; 50 struct sock_filter *bpf_ops; 51 const char *bpf_name; 52 struct tcf_proto *tp; 53 union { 54 struct work_struct work; 55 struct rcu_head rcu; 56 }; 57 }; 58 59 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 60 [TCA_BPF_CLASSID] = { .type = NLA_U32 }, 61 [TCA_BPF_FLAGS] = { .type = NLA_U32 }, 62 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 }, 63 [TCA_BPF_FD] = { .type = NLA_U32 }, 64 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, 65 .len = CLS_BPF_NAME_LEN }, 66 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, 67 [TCA_BPF_OPS] = { .type = NLA_BINARY, 68 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 69 }; 70 71 static int cls_bpf_exec_opcode(int code) 72 { 73 switch (code) { 74 case TC_ACT_OK: 75 case TC_ACT_SHOT: 76 case TC_ACT_STOLEN: 77 case TC_ACT_TRAP: 78 case TC_ACT_REDIRECT: 79 case TC_ACT_UNSPEC: 80 return code; 81 default: 82 return TC_ACT_UNSPEC; 83 } 84 } 85 86 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 87 struct tcf_result *res) 88 { 89 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); 90 bool at_ingress = skb_at_tc_ingress(skb); 91 struct cls_bpf_prog *prog; 92 int ret = -1; 93 94 /* Needed here for accessing maps. */ 95 rcu_read_lock(); 96 list_for_each_entry_rcu(prog, &head->plist, link) { 97 int filter_res; 98 99 qdisc_skb_cb(skb)->tc_classid = prog->res.classid; 100 101 if (tc_skip_sw(prog->gen_flags)) { 102 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0; 103 } else if (at_ingress) { 104 /* It is safe to push/pull even if skb_shared() */ 105 __skb_push(skb, skb->mac_len); 106 bpf_compute_data_pointers(skb); 107 filter_res = BPF_PROG_RUN(prog->filter, skb); 108 __skb_pull(skb, skb->mac_len); 109 } else { 110 bpf_compute_data_pointers(skb); 111 filter_res = BPF_PROG_RUN(prog->filter, skb); 112 } 113 114 if (prog->exts_integrated) { 115 res->class = 0; 116 res->classid = TC_H_MAJ(prog->res.classid) | 117 qdisc_skb_cb(skb)->tc_classid; 118 119 ret = cls_bpf_exec_opcode(filter_res); 120 if (ret == TC_ACT_UNSPEC) 121 continue; 122 break; 123 } 124 125 if (filter_res == 0) 126 continue; 127 if (filter_res != -1) { 128 res->class = 0; 129 res->classid = filter_res; 130 } else { 131 *res = prog->res; 132 } 133 134 ret = tcf_exts_exec(skb, &prog->exts, res); 135 if (ret < 0) 136 continue; 137 138 break; 139 } 140 rcu_read_unlock(); 141 142 return ret; 143 } 144 145 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) 146 { 147 return !prog->bpf_ops; 148 } 149 150 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, 151 enum tc_clsbpf_command cmd) 152 { 153 bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE; 154 struct tcf_block *block = tp->chain->block; 155 bool skip_sw = tc_skip_sw(prog->gen_flags); 156 struct tc_cls_bpf_offload cls_bpf = {}; 157 int err; 158 159 tc_cls_common_offload_init(&cls_bpf.common, tp); 160 cls_bpf.command = cmd; 161 cls_bpf.exts = &prog->exts; 162 cls_bpf.prog = prog->filter; 163 cls_bpf.name = prog->bpf_name; 164 cls_bpf.exts_integrated = prog->exts_integrated; 165 cls_bpf.gen_flags = prog->gen_flags; 166 167 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); 168 if (addorrep) { 169 if (err < 0) { 170 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY); 171 return err; 172 } else if (err > 0) { 173 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; 174 } 175 } 176 177 if (addorrep && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW)) 178 return -EINVAL; 179 180 return 0; 181 } 182 183 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 184 struct cls_bpf_prog *oldprog) 185 { 186 struct cls_bpf_prog *obj = prog; 187 enum tc_clsbpf_command cmd; 188 bool skip_sw; 189 int ret; 190 191 skip_sw = tc_skip_sw(prog->gen_flags) || 192 (oldprog && tc_skip_sw(oldprog->gen_flags)); 193 194 if (oldprog && oldprog->offloaded) { 195 if (!tc_skip_hw(prog->gen_flags)) { 196 cmd = TC_CLSBPF_REPLACE; 197 } else if (!tc_skip_sw(prog->gen_flags)) { 198 obj = oldprog; 199 cmd = TC_CLSBPF_DESTROY; 200 } else { 201 return -EINVAL; 202 } 203 } else { 204 if (tc_skip_hw(prog->gen_flags)) 205 return skip_sw ? -EINVAL : 0; 206 cmd = TC_CLSBPF_ADD; 207 } 208 209 ret = cls_bpf_offload_cmd(tp, obj, cmd); 210 if (ret) 211 return ret; 212 213 obj->offloaded = true; 214 if (oldprog) 215 oldprog->offloaded = false; 216 217 return 0; 218 } 219 220 static void cls_bpf_stop_offload(struct tcf_proto *tp, 221 struct cls_bpf_prog *prog) 222 { 223 int err; 224 225 if (!prog->offloaded) 226 return; 227 228 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY); 229 if (err) { 230 pr_err("Stopping hardware offload failed: %d\n", err); 231 return; 232 } 233 234 prog->offloaded = false; 235 } 236 237 static void cls_bpf_offload_update_stats(struct tcf_proto *tp, 238 struct cls_bpf_prog *prog) 239 { 240 if (!prog->offloaded) 241 return; 242 243 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS); 244 } 245 246 static int cls_bpf_init(struct tcf_proto *tp) 247 { 248 struct cls_bpf_head *head; 249 250 head = kzalloc(sizeof(*head), GFP_KERNEL); 251 if (head == NULL) 252 return -ENOBUFS; 253 254 INIT_LIST_HEAD_RCU(&head->plist); 255 idr_init(&head->handle_idr); 256 rcu_assign_pointer(tp->root, head); 257 258 return 0; 259 } 260 261 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 262 { 263 tcf_exts_destroy(&prog->exts); 264 tcf_exts_put_net(&prog->exts); 265 266 if (cls_bpf_is_ebpf(prog)) 267 bpf_prog_put(prog->filter); 268 else 269 bpf_prog_destroy(prog->filter); 270 271 kfree(prog->bpf_name); 272 kfree(prog->bpf_ops); 273 kfree(prog); 274 } 275 276 static void cls_bpf_delete_prog_work(struct work_struct *work) 277 { 278 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work); 279 280 rtnl_lock(); 281 __cls_bpf_delete_prog(prog); 282 rtnl_unlock(); 283 } 284 285 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 286 { 287 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); 288 289 INIT_WORK(&prog->work, cls_bpf_delete_prog_work); 290 tcf_queue_work(&prog->work); 291 } 292 293 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) 294 { 295 struct cls_bpf_head *head = rtnl_dereference(tp->root); 296 297 idr_remove_ext(&head->handle_idr, prog->handle); 298 cls_bpf_stop_offload(tp, prog); 299 list_del_rcu(&prog->link); 300 tcf_unbind_filter(tp, &prog->res); 301 if (tcf_exts_get_net(&prog->exts)) 302 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 303 else 304 __cls_bpf_delete_prog(prog); 305 } 306 307 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) 308 { 309 struct cls_bpf_head *head = rtnl_dereference(tp->root); 310 311 __cls_bpf_delete(tp, arg); 312 *last = list_empty(&head->plist); 313 return 0; 314 } 315 316 static void cls_bpf_destroy(struct tcf_proto *tp) 317 { 318 struct cls_bpf_head *head = rtnl_dereference(tp->root); 319 struct cls_bpf_prog *prog, *tmp; 320 321 list_for_each_entry_safe(prog, tmp, &head->plist, link) 322 __cls_bpf_delete(tp, prog); 323 324 idr_destroy(&head->handle_idr); 325 kfree_rcu(head, rcu); 326 } 327 328 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) 329 { 330 struct cls_bpf_head *head = rtnl_dereference(tp->root); 331 struct cls_bpf_prog *prog; 332 333 list_for_each_entry(prog, &head->plist, link) { 334 if (prog->handle == handle) 335 return prog; 336 } 337 338 return NULL; 339 } 340 341 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) 342 { 343 struct sock_filter *bpf_ops; 344 struct sock_fprog_kern fprog_tmp; 345 struct bpf_prog *fp; 346 u16 bpf_size, bpf_num_ops; 347 int ret; 348 349 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); 350 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 351 return -EINVAL; 352 353 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 354 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) 355 return -EINVAL; 356 357 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 358 if (bpf_ops == NULL) 359 return -ENOMEM; 360 361 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 362 363 fprog_tmp.len = bpf_num_ops; 364 fprog_tmp.filter = bpf_ops; 365 366 ret = bpf_prog_create(&fp, &fprog_tmp); 367 if (ret < 0) { 368 kfree(bpf_ops); 369 return ret; 370 } 371 372 prog->bpf_ops = bpf_ops; 373 prog->bpf_num_ops = bpf_num_ops; 374 prog->bpf_name = NULL; 375 prog->filter = fp; 376 377 return 0; 378 } 379 380 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 381 u32 gen_flags, const struct tcf_proto *tp) 382 { 383 struct bpf_prog *fp; 384 char *name = NULL; 385 u32 bpf_fd; 386 387 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 388 389 if (gen_flags & TCA_CLS_FLAGS_SKIP_SW) 390 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, 391 qdisc_dev(tp->q)); 392 else 393 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS); 394 if (IS_ERR(fp)) 395 return PTR_ERR(fp); 396 397 if (tb[TCA_BPF_NAME]) { 398 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL); 399 if (!name) { 400 bpf_prog_put(fp); 401 return -ENOMEM; 402 } 403 } 404 405 prog->bpf_ops = NULL; 406 prog->bpf_name = name; 407 prog->filter = fp; 408 409 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS)) 410 netif_keep_dst(qdisc_dev(tp->q)); 411 412 return 0; 413 } 414 415 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, 416 struct cls_bpf_prog *prog, unsigned long base, 417 struct nlattr **tb, struct nlattr *est, bool ovr) 418 { 419 bool is_bpf, is_ebpf, have_exts = false; 420 u32 gen_flags = 0; 421 int ret; 422 423 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 424 is_ebpf = tb[TCA_BPF_FD]; 425 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) 426 return -EINVAL; 427 428 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr); 429 if (ret < 0) 430 return ret; 431 432 if (tb[TCA_BPF_FLAGS]) { 433 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 434 435 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) 436 return -EINVAL; 437 438 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 439 } 440 if (tb[TCA_BPF_FLAGS_GEN]) { 441 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); 442 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || 443 !tc_flags_valid(gen_flags)) 444 return -EINVAL; 445 } 446 447 prog->exts_integrated = have_exts; 448 prog->gen_flags = gen_flags; 449 450 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 451 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp); 452 if (ret < 0) 453 return ret; 454 455 if (tb[TCA_BPF_CLASSID]) { 456 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); 457 tcf_bind_filter(tp, &prog->res, base); 458 } 459 460 return 0; 461 } 462 463 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 464 struct tcf_proto *tp, unsigned long base, 465 u32 handle, struct nlattr **tca, 466 void **arg, bool ovr) 467 { 468 struct cls_bpf_head *head = rtnl_dereference(tp->root); 469 struct cls_bpf_prog *oldprog = *arg; 470 struct nlattr *tb[TCA_BPF_MAX + 1]; 471 struct cls_bpf_prog *prog; 472 unsigned long idr_index; 473 int ret; 474 475 if (tca[TCA_OPTIONS] == NULL) 476 return -EINVAL; 477 478 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy, 479 NULL); 480 if (ret < 0) 481 return ret; 482 483 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 484 if (!prog) 485 return -ENOBUFS; 486 487 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); 488 if (ret < 0) 489 goto errout; 490 491 if (oldprog) { 492 if (handle && oldprog->handle != handle) { 493 ret = -EINVAL; 494 goto errout; 495 } 496 } 497 498 if (handle == 0) { 499 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, 500 1, 0x7FFFFFFF, GFP_KERNEL); 501 if (ret) 502 goto errout; 503 prog->handle = idr_index; 504 } else { 505 if (!oldprog) { 506 ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index, 507 handle, handle + 1, GFP_KERNEL); 508 if (ret) 509 goto errout; 510 } 511 prog->handle = handle; 512 } 513 514 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr); 515 if (ret < 0) 516 goto errout_idr; 517 518 ret = cls_bpf_offload(tp, prog, oldprog); 519 if (ret) { 520 if (!oldprog) 521 idr_remove_ext(&head->handle_idr, prog->handle); 522 __cls_bpf_delete_prog(prog); 523 return ret; 524 } 525 526 if (!tc_in_hw(prog->gen_flags)) 527 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; 528 529 if (oldprog) { 530 idr_replace_ext(&head->handle_idr, prog, handle); 531 list_replace_rcu(&oldprog->link, &prog->link); 532 tcf_unbind_filter(tp, &oldprog->res); 533 tcf_exts_get_net(&oldprog->exts); 534 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); 535 } else { 536 list_add_rcu(&prog->link, &head->plist); 537 } 538 539 *arg = prog; 540 return 0; 541 542 errout_idr: 543 if (!oldprog) 544 idr_remove_ext(&head->handle_idr, prog->handle); 545 errout: 546 tcf_exts_destroy(&prog->exts); 547 kfree(prog); 548 return ret; 549 } 550 551 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, 552 struct sk_buff *skb) 553 { 554 struct nlattr *nla; 555 556 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) 557 return -EMSGSIZE; 558 559 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * 560 sizeof(struct sock_filter)); 561 if (nla == NULL) 562 return -EMSGSIZE; 563 564 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 565 566 return 0; 567 } 568 569 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, 570 struct sk_buff *skb) 571 { 572 struct nlattr *nla; 573 574 if (prog->bpf_name && 575 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 576 return -EMSGSIZE; 577 578 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id)) 579 return -EMSGSIZE; 580 581 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); 582 if (nla == NULL) 583 return -EMSGSIZE; 584 585 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); 586 587 return 0; 588 } 589 590 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, 591 struct sk_buff *skb, struct tcmsg *tm) 592 { 593 struct cls_bpf_prog *prog = fh; 594 struct nlattr *nest; 595 u32 bpf_flags = 0; 596 int ret; 597 598 if (prog == NULL) 599 return skb->len; 600 601 tm->tcm_handle = prog->handle; 602 603 cls_bpf_offload_update_stats(tp, prog); 604 605 nest = nla_nest_start(skb, TCA_OPTIONS); 606 if (nest == NULL) 607 goto nla_put_failure; 608 609 if (prog->res.classid && 610 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 611 goto nla_put_failure; 612 613 if (cls_bpf_is_ebpf(prog)) 614 ret = cls_bpf_dump_ebpf_info(prog, skb); 615 else 616 ret = cls_bpf_dump_bpf_info(prog, skb); 617 if (ret) 618 goto nla_put_failure; 619 620 if (tcf_exts_dump(skb, &prog->exts) < 0) 621 goto nla_put_failure; 622 623 if (prog->exts_integrated) 624 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; 625 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) 626 goto nla_put_failure; 627 if (prog->gen_flags && 628 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags)) 629 goto nla_put_failure; 630 631 nla_nest_end(skb, nest); 632 633 if (tcf_exts_dump_stats(skb, &prog->exts) < 0) 634 goto nla_put_failure; 635 636 return skb->len; 637 638 nla_put_failure: 639 nla_nest_cancel(skb, nest); 640 return -1; 641 } 642 643 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl) 644 { 645 struct cls_bpf_prog *prog = fh; 646 647 if (prog && prog->res.classid == classid) 648 prog->res.class = cl; 649 } 650 651 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) 652 { 653 struct cls_bpf_head *head = rtnl_dereference(tp->root); 654 struct cls_bpf_prog *prog; 655 656 list_for_each_entry(prog, &head->plist, link) { 657 if (arg->count < arg->skip) 658 goto skip; 659 if (arg->fn(tp, prog, arg) < 0) { 660 arg->stop = 1; 661 break; 662 } 663 skip: 664 arg->count++; 665 } 666 } 667 668 static struct tcf_proto_ops cls_bpf_ops __read_mostly = { 669 .kind = "bpf", 670 .owner = THIS_MODULE, 671 .classify = cls_bpf_classify, 672 .init = cls_bpf_init, 673 .destroy = cls_bpf_destroy, 674 .get = cls_bpf_get, 675 .change = cls_bpf_change, 676 .delete = cls_bpf_delete, 677 .walk = cls_bpf_walk, 678 .dump = cls_bpf_dump, 679 .bind_class = cls_bpf_bind_class, 680 }; 681 682 static int __init cls_bpf_init_mod(void) 683 { 684 return register_tcf_proto_ops(&cls_bpf_ops); 685 } 686 687 static void __exit cls_bpf_exit_mod(void) 688 { 689 unregister_tcf_proto_ops(&cls_bpf_ops); 690 } 691 692 module_init(cls_bpf_init_mod); 693 module_exit(cls_bpf_exit_mod); 694