1 /* 2 * Berkeley Packet Filter based traffic classifier 3 * 4 * Might be used to classify traffic through flexible, user-defined and 5 * possibly JIT-ed BPF filters for traffic control as an alternative to 6 * ematches. 7 * 8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/module.h> 16 #include <linux/types.h> 17 #include <linux/skbuff.h> 18 #include <linux/filter.h> 19 #include <linux/bpf.h> 20 #include <linux/idr.h> 21 22 #include <net/rtnetlink.h> 23 #include <net/pkt_cls.h> 24 #include <net/sock.h> 25 26 MODULE_LICENSE("GPL"); 27 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); 28 MODULE_DESCRIPTION("TC BPF based classifier"); 29 30 #define CLS_BPF_NAME_LEN 256 31 #define CLS_BPF_SUPPORTED_GEN_FLAGS \ 32 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW) 33 34 struct cls_bpf_head { 35 struct list_head plist; 36 struct idr handle_idr; 37 struct rcu_head rcu; 38 }; 39 40 struct cls_bpf_prog { 41 struct bpf_prog *filter; 42 struct list_head link; 43 struct tcf_result res; 44 bool exts_integrated; 45 u32 gen_flags; 46 struct tcf_exts exts; 47 u32 handle; 48 u16 bpf_num_ops; 49 struct sock_filter *bpf_ops; 50 const char *bpf_name; 51 struct tcf_proto *tp; 52 union { 53 struct work_struct work; 54 struct rcu_head rcu; 55 }; 56 }; 57 58 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 59 [TCA_BPF_CLASSID] = { .type = NLA_U32 }, 60 [TCA_BPF_FLAGS] = { .type = NLA_U32 }, 61 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 }, 62 [TCA_BPF_FD] = { .type = NLA_U32 }, 63 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, 64 .len = CLS_BPF_NAME_LEN }, 65 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, 66 [TCA_BPF_OPS] = { .type = NLA_BINARY, 67 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 68 }; 69 70 static int cls_bpf_exec_opcode(int code) 71 { 72 switch (code) { 73 case TC_ACT_OK: 74 case TC_ACT_SHOT: 75 case TC_ACT_STOLEN: 76 case TC_ACT_TRAP: 77 case TC_ACT_REDIRECT: 78 case TC_ACT_UNSPEC: 79 return code; 80 default: 81 return TC_ACT_UNSPEC; 82 } 83 } 84 85 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 86 struct tcf_result *res) 87 { 88 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); 89 bool at_ingress = skb_at_tc_ingress(skb); 90 struct cls_bpf_prog *prog; 91 int ret = -1; 92 93 /* Needed here for accessing maps. */ 94 rcu_read_lock(); 95 list_for_each_entry_rcu(prog, &head->plist, link) { 96 int filter_res; 97 98 qdisc_skb_cb(skb)->tc_classid = prog->res.classid; 99 100 if (tc_skip_sw(prog->gen_flags)) { 101 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0; 102 } else if (at_ingress) { 103 /* It is safe to push/pull even if skb_shared() */ 104 __skb_push(skb, skb->mac_len); 105 bpf_compute_data_pointers(skb); 106 filter_res = BPF_PROG_RUN(prog->filter, skb); 107 __skb_pull(skb, skb->mac_len); 108 } else { 109 bpf_compute_data_pointers(skb); 110 filter_res = BPF_PROG_RUN(prog->filter, skb); 111 } 112 113 if (prog->exts_integrated) { 114 res->class = 0; 115 res->classid = TC_H_MAJ(prog->res.classid) | 116 qdisc_skb_cb(skb)->tc_classid; 117 118 ret = cls_bpf_exec_opcode(filter_res); 119 if (ret == TC_ACT_UNSPEC) 120 continue; 121 break; 122 } 123 124 if (filter_res == 0) 125 continue; 126 if (filter_res != -1) { 127 res->class = 0; 128 res->classid = filter_res; 129 } else { 130 *res = prog->res; 131 } 132 133 ret = tcf_exts_exec(skb, &prog->exts, res); 134 if (ret < 0) 135 continue; 136 137 break; 138 } 139 rcu_read_unlock(); 140 141 return ret; 142 } 143 144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) 145 { 146 return !prog->bpf_ops; 147 } 148 149 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, 150 struct cls_bpf_prog *oldprog, 151 struct netlink_ext_ack *extack) 152 { 153 struct tcf_block *block = tp->chain->block; 154 struct tc_cls_bpf_offload cls_bpf = {}; 155 struct cls_bpf_prog *obj; 156 bool skip_sw; 157 int err; 158 159 skip_sw = prog && tc_skip_sw(prog->gen_flags); 160 obj = prog ?: oldprog; 161 162 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, 163 extack); 164 cls_bpf.command = TC_CLSBPF_OFFLOAD; 165 cls_bpf.exts = &obj->exts; 166 cls_bpf.prog = prog ? prog->filter : NULL; 167 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL; 168 cls_bpf.name = obj->bpf_name; 169 cls_bpf.exts_integrated = obj->exts_integrated; 170 171 if (oldprog) 172 tcf_block_offload_dec(block, &oldprog->gen_flags); 173 174 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); 175 if (prog) { 176 if (err < 0) { 177 cls_bpf_offload_cmd(tp, oldprog, prog, extack); 178 return err; 179 } else if (err > 0) { 180 tcf_block_offload_inc(block, &prog->gen_flags); 181 } 182 } 183 184 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW)) 185 return -EINVAL; 186 187 return 0; 188 } 189 190 static u32 cls_bpf_flags(u32 flags) 191 { 192 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS; 193 } 194 195 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 196 struct cls_bpf_prog *oldprog, 197 struct netlink_ext_ack *extack) 198 { 199 if (prog && oldprog && 200 cls_bpf_flags(prog->gen_flags) != 201 cls_bpf_flags(oldprog->gen_flags)) 202 return -EINVAL; 203 204 if (prog && tc_skip_hw(prog->gen_flags)) 205 prog = NULL; 206 if (oldprog && tc_skip_hw(oldprog->gen_flags)) 207 oldprog = NULL; 208 if (!prog && !oldprog) 209 return 0; 210 211 return cls_bpf_offload_cmd(tp, prog, oldprog, extack); 212 } 213 214 static void cls_bpf_stop_offload(struct tcf_proto *tp, 215 struct cls_bpf_prog *prog, 216 struct netlink_ext_ack *extack) 217 { 218 int err; 219 220 err = cls_bpf_offload_cmd(tp, NULL, prog, extack); 221 if (err) 222 pr_err("Stopping hardware offload failed: %d\n", err); 223 } 224 225 static void cls_bpf_offload_update_stats(struct tcf_proto *tp, 226 struct cls_bpf_prog *prog) 227 { 228 struct tcf_block *block = tp->chain->block; 229 struct tc_cls_bpf_offload cls_bpf = {}; 230 231 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL); 232 cls_bpf.command = TC_CLSBPF_STATS; 233 cls_bpf.exts = &prog->exts; 234 cls_bpf.prog = prog->filter; 235 cls_bpf.name = prog->bpf_name; 236 cls_bpf.exts_integrated = prog->exts_integrated; 237 238 tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, false); 239 } 240 241 static int cls_bpf_init(struct tcf_proto *tp) 242 { 243 struct cls_bpf_head *head; 244 245 head = kzalloc(sizeof(*head), GFP_KERNEL); 246 if (head == NULL) 247 return -ENOBUFS; 248 249 INIT_LIST_HEAD_RCU(&head->plist); 250 idr_init(&head->handle_idr); 251 rcu_assign_pointer(tp->root, head); 252 253 return 0; 254 } 255 256 static void cls_bpf_free_parms(struct cls_bpf_prog *prog) 257 { 258 if (cls_bpf_is_ebpf(prog)) 259 bpf_prog_put(prog->filter); 260 else 261 bpf_prog_destroy(prog->filter); 262 263 kfree(prog->bpf_name); 264 kfree(prog->bpf_ops); 265 } 266 267 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 268 { 269 tcf_exts_destroy(&prog->exts); 270 tcf_exts_put_net(&prog->exts); 271 272 cls_bpf_free_parms(prog); 273 kfree(prog); 274 } 275 276 static void cls_bpf_delete_prog_work(struct work_struct *work) 277 { 278 struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work); 279 280 rtnl_lock(); 281 __cls_bpf_delete_prog(prog); 282 rtnl_unlock(); 283 } 284 285 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) 286 { 287 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); 288 289 INIT_WORK(&prog->work, cls_bpf_delete_prog_work); 290 tcf_queue_work(&prog->work); 291 } 292 293 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog, 294 struct netlink_ext_ack *extack) 295 { 296 struct cls_bpf_head *head = rtnl_dereference(tp->root); 297 298 idr_remove(&head->handle_idr, prog->handle); 299 cls_bpf_stop_offload(tp, prog, extack); 300 list_del_rcu(&prog->link); 301 tcf_unbind_filter(tp, &prog->res); 302 if (tcf_exts_get_net(&prog->exts)) 303 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); 304 else 305 __cls_bpf_delete_prog(prog); 306 } 307 308 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last, 309 struct netlink_ext_ack *extack) 310 { 311 struct cls_bpf_head *head = rtnl_dereference(tp->root); 312 313 __cls_bpf_delete(tp, arg, extack); 314 *last = list_empty(&head->plist); 315 return 0; 316 } 317 318 static void cls_bpf_destroy(struct tcf_proto *tp, 319 struct netlink_ext_ack *extack) 320 { 321 struct cls_bpf_head *head = rtnl_dereference(tp->root); 322 struct cls_bpf_prog *prog, *tmp; 323 324 list_for_each_entry_safe(prog, tmp, &head->plist, link) 325 __cls_bpf_delete(tp, prog, extack); 326 327 idr_destroy(&head->handle_idr); 328 kfree_rcu(head, rcu); 329 } 330 331 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) 332 { 333 struct cls_bpf_head *head = rtnl_dereference(tp->root); 334 struct cls_bpf_prog *prog; 335 336 list_for_each_entry(prog, &head->plist, link) { 337 if (prog->handle == handle) 338 return prog; 339 } 340 341 return NULL; 342 } 343 344 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) 345 { 346 struct sock_filter *bpf_ops; 347 struct sock_fprog_kern fprog_tmp; 348 struct bpf_prog *fp; 349 u16 bpf_size, bpf_num_ops; 350 int ret; 351 352 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); 353 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 354 return -EINVAL; 355 356 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 357 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) 358 return -EINVAL; 359 360 bpf_ops = kzalloc(bpf_size, GFP_KERNEL); 361 if (bpf_ops == NULL) 362 return -ENOMEM; 363 364 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size); 365 366 fprog_tmp.len = bpf_num_ops; 367 fprog_tmp.filter = bpf_ops; 368 369 ret = bpf_prog_create(&fp, &fprog_tmp); 370 if (ret < 0) { 371 kfree(bpf_ops); 372 return ret; 373 } 374 375 prog->bpf_ops = bpf_ops; 376 prog->bpf_num_ops = bpf_num_ops; 377 prog->bpf_name = NULL; 378 prog->filter = fp; 379 380 return 0; 381 } 382 383 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 384 u32 gen_flags, const struct tcf_proto *tp) 385 { 386 struct bpf_prog *fp; 387 char *name = NULL; 388 bool skip_sw; 389 u32 bpf_fd; 390 391 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 392 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW; 393 394 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); 395 if (IS_ERR(fp)) 396 return PTR_ERR(fp); 397 398 if (tb[TCA_BPF_NAME]) { 399 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL); 400 if (!name) { 401 bpf_prog_put(fp); 402 return -ENOMEM; 403 } 404 } 405 406 prog->bpf_ops = NULL; 407 prog->bpf_name = name; 408 prog->filter = fp; 409 410 if (fp->dst_needed) 411 tcf_block_netif_keep_dst(tp->chain->block); 412 413 return 0; 414 } 415 416 static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, 417 struct cls_bpf_prog *prog, unsigned long base, 418 struct nlattr **tb, struct nlattr *est, bool ovr, 419 struct netlink_ext_ack *extack) 420 { 421 bool is_bpf, is_ebpf, have_exts = false; 422 u32 gen_flags = 0; 423 int ret; 424 425 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 426 is_ebpf = tb[TCA_BPF_FD]; 427 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) 428 return -EINVAL; 429 430 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr, extack); 431 if (ret < 0) 432 return ret; 433 434 if (tb[TCA_BPF_FLAGS]) { 435 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 436 437 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) 438 return -EINVAL; 439 440 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 441 } 442 if (tb[TCA_BPF_FLAGS_GEN]) { 443 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); 444 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || 445 !tc_flags_valid(gen_flags)) 446 return -EINVAL; 447 } 448 449 prog->exts_integrated = have_exts; 450 prog->gen_flags = gen_flags; 451 452 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 453 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp); 454 if (ret < 0) 455 return ret; 456 457 if (tb[TCA_BPF_CLASSID]) { 458 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); 459 tcf_bind_filter(tp, &prog->res, base); 460 } 461 462 return 0; 463 } 464 465 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 466 struct tcf_proto *tp, unsigned long base, 467 u32 handle, struct nlattr **tca, 468 void **arg, bool ovr, struct netlink_ext_ack *extack) 469 { 470 struct cls_bpf_head *head = rtnl_dereference(tp->root); 471 struct cls_bpf_prog *oldprog = *arg; 472 struct nlattr *tb[TCA_BPF_MAX + 1]; 473 struct cls_bpf_prog *prog; 474 int ret; 475 476 if (tca[TCA_OPTIONS] == NULL) 477 return -EINVAL; 478 479 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy, 480 NULL); 481 if (ret < 0) 482 return ret; 483 484 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 485 if (!prog) 486 return -ENOBUFS; 487 488 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); 489 if (ret < 0) 490 goto errout; 491 492 if (oldprog) { 493 if (handle && oldprog->handle != handle) { 494 ret = -EINVAL; 495 goto errout; 496 } 497 } 498 499 if (handle == 0) { 500 handle = 1; 501 ret = idr_alloc_u32(&head->handle_idr, prog, &handle, 502 INT_MAX, GFP_KERNEL); 503 } else if (!oldprog) { 504 ret = idr_alloc_u32(&head->handle_idr, prog, &handle, 505 handle, GFP_KERNEL); 506 } 507 508 if (ret) 509 goto errout; 510 prog->handle = handle; 511 512 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr, 513 extack); 514 if (ret < 0) 515 goto errout_idr; 516 517 ret = cls_bpf_offload(tp, prog, oldprog, extack); 518 if (ret) 519 goto errout_parms; 520 521 if (!tc_in_hw(prog->gen_flags)) 522 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; 523 524 if (oldprog) { 525 idr_replace(&head->handle_idr, prog, handle); 526 list_replace_rcu(&oldprog->link, &prog->link); 527 tcf_unbind_filter(tp, &oldprog->res); 528 tcf_exts_get_net(&oldprog->exts); 529 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu); 530 } else { 531 list_add_rcu(&prog->link, &head->plist); 532 } 533 534 *arg = prog; 535 return 0; 536 537 errout_parms: 538 cls_bpf_free_parms(prog); 539 errout_idr: 540 if (!oldprog) 541 idr_remove(&head->handle_idr, prog->handle); 542 errout: 543 tcf_exts_destroy(&prog->exts); 544 kfree(prog); 545 return ret; 546 } 547 548 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, 549 struct sk_buff *skb) 550 { 551 struct nlattr *nla; 552 553 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) 554 return -EMSGSIZE; 555 556 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * 557 sizeof(struct sock_filter)); 558 if (nla == NULL) 559 return -EMSGSIZE; 560 561 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 562 563 return 0; 564 } 565 566 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, 567 struct sk_buff *skb) 568 { 569 struct nlattr *nla; 570 571 if (prog->bpf_name && 572 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 573 return -EMSGSIZE; 574 575 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id)) 576 return -EMSGSIZE; 577 578 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); 579 if (nla == NULL) 580 return -EMSGSIZE; 581 582 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); 583 584 return 0; 585 } 586 587 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, 588 struct sk_buff *skb, struct tcmsg *tm) 589 { 590 struct cls_bpf_prog *prog = fh; 591 struct nlattr *nest; 592 u32 bpf_flags = 0; 593 int ret; 594 595 if (prog == NULL) 596 return skb->len; 597 598 tm->tcm_handle = prog->handle; 599 600 cls_bpf_offload_update_stats(tp, prog); 601 602 nest = nla_nest_start(skb, TCA_OPTIONS); 603 if (nest == NULL) 604 goto nla_put_failure; 605 606 if (prog->res.classid && 607 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 608 goto nla_put_failure; 609 610 if (cls_bpf_is_ebpf(prog)) 611 ret = cls_bpf_dump_ebpf_info(prog, skb); 612 else 613 ret = cls_bpf_dump_bpf_info(prog, skb); 614 if (ret) 615 goto nla_put_failure; 616 617 if (tcf_exts_dump(skb, &prog->exts) < 0) 618 goto nla_put_failure; 619 620 if (prog->exts_integrated) 621 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; 622 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) 623 goto nla_put_failure; 624 if (prog->gen_flags && 625 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags)) 626 goto nla_put_failure; 627 628 nla_nest_end(skb, nest); 629 630 if (tcf_exts_dump_stats(skb, &prog->exts) < 0) 631 goto nla_put_failure; 632 633 return skb->len; 634 635 nla_put_failure: 636 nla_nest_cancel(skb, nest); 637 return -1; 638 } 639 640 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl) 641 { 642 struct cls_bpf_prog *prog = fh; 643 644 if (prog && prog->res.classid == classid) 645 prog->res.class = cl; 646 } 647 648 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) 649 { 650 struct cls_bpf_head *head = rtnl_dereference(tp->root); 651 struct cls_bpf_prog *prog; 652 653 list_for_each_entry(prog, &head->plist, link) { 654 if (arg->count < arg->skip) 655 goto skip; 656 if (arg->fn(tp, prog, arg) < 0) { 657 arg->stop = 1; 658 break; 659 } 660 skip: 661 arg->count++; 662 } 663 } 664 665 static struct tcf_proto_ops cls_bpf_ops __read_mostly = { 666 .kind = "bpf", 667 .owner = THIS_MODULE, 668 .classify = cls_bpf_classify, 669 .init = cls_bpf_init, 670 .destroy = cls_bpf_destroy, 671 .get = cls_bpf_get, 672 .change = cls_bpf_change, 673 .delete = cls_bpf_delete, 674 .walk = cls_bpf_walk, 675 .dump = cls_bpf_dump, 676 .bind_class = cls_bpf_bind_class, 677 }; 678 679 static int __init cls_bpf_init_mod(void) 680 { 681 return register_tcf_proto_ops(&cls_bpf_ops); 682 } 683 684 static void __exit cls_bpf_exit_mod(void) 685 { 686 unregister_tcf_proto_ops(&cls_bpf_ops); 687 } 688 689 module_init(cls_bpf_init_mod); 690 module_exit(cls_bpf_exit_mod); 691