1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Berkeley Packet Filter based traffic classifier 4 * 5 * Might be used to classify traffic through flexible, user-defined and 6 * possibly JIT-ed BPF filters for traffic control as an alternative to 7 * ematches. 8 * 9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com> 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/skbuff.h> 15 #include <linux/filter.h> 16 #include <linux/bpf.h> 17 #include <linux/idr.h> 18 19 #include <net/rtnetlink.h> 20 #include <net/pkt_cls.h> 21 #include <net/sock.h> 22 #include <net/tc_wrapper.h> 23 24 MODULE_LICENSE("GPL"); 25 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>"); 26 MODULE_DESCRIPTION("TC BPF based classifier"); 27 28 #define CLS_BPF_NAME_LEN 256 29 #define CLS_BPF_SUPPORTED_GEN_FLAGS \ 30 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW) 31 32 struct cls_bpf_head { 33 struct list_head plist; 34 struct idr handle_idr; 35 struct rcu_head rcu; 36 }; 37 38 struct cls_bpf_prog { 39 struct bpf_prog *filter; 40 struct list_head link; 41 struct tcf_result res; 42 bool exts_integrated; 43 u32 gen_flags; 44 unsigned int in_hw_count; 45 struct tcf_exts exts; 46 u32 handle; 47 u16 bpf_num_ops; 48 struct sock_filter *bpf_ops; 49 const char *bpf_name; 50 struct tcf_proto *tp; 51 struct rcu_work rwork; 52 }; 53 54 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { 55 [TCA_BPF_CLASSID] = { .type = NLA_U32 }, 56 [TCA_BPF_FLAGS] = { .type = NLA_U32 }, 57 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 }, 58 [TCA_BPF_FD] = { .type = NLA_U32 }, 59 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, 60 .len = CLS_BPF_NAME_LEN }, 61 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 }, 62 [TCA_BPF_OPS] = { .type = NLA_BINARY, 63 .len = sizeof(struct sock_filter) * BPF_MAXINSNS }, 64 }; 65 66 static int cls_bpf_exec_opcode(int code) 67 { 68 switch (code) { 69 case TC_ACT_OK: 70 case TC_ACT_SHOT: 71 case TC_ACT_STOLEN: 72 case TC_ACT_TRAP: 73 case TC_ACT_REDIRECT: 74 case TC_ACT_UNSPEC: 75 return code; 76 default: 77 return TC_ACT_UNSPEC; 78 } 79 } 80 81 TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb, 82 const struct tcf_proto *tp, 83 struct tcf_result *res) 84 { 85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root); 86 bool at_ingress = skb_at_tc_ingress(skb); 87 struct cls_bpf_prog *prog; 88 int ret = -1; 89 90 list_for_each_entry_rcu(prog, &head->plist, link) { 91 int filter_res; 92 93 qdisc_skb_cb(skb)->tc_classid = prog->res.classid; 94 95 if (tc_skip_sw(prog->gen_flags)) { 96 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0; 97 } else if (at_ingress) { 98 /* It is safe to push/pull even if skb_shared() */ 99 __skb_push(skb, skb->mac_len); 100 filter_res = bpf_prog_run_data_pointers(prog->filter, skb); 101 __skb_pull(skb, skb->mac_len); 102 } else { 103 filter_res = bpf_prog_run_data_pointers(prog->filter, skb); 104 } 105 if (unlikely(!skb->tstamp && skb->tstamp_type)) 106 skb->tstamp_type = SKB_CLOCK_REALTIME; 107 108 if (prog->exts_integrated) { 109 res->class = 0; 110 res->classid = TC_H_MAJ(prog->res.classid) | 111 qdisc_skb_cb(skb)->tc_classid; 112 113 ret = cls_bpf_exec_opcode(filter_res); 114 if (ret == TC_ACT_UNSPEC) 115 continue; 116 break; 117 } 118 119 if (filter_res == 0) 120 continue; 121 if (filter_res != -1) { 122 res->class = 0; 123 res->classid = filter_res; 124 } else { 125 *res = prog->res; 126 } 127 128 ret = tcf_exts_exec(skb, &prog->exts, res); 129 if (ret < 0) 130 continue; 131 132 break; 133 } 134 135 return ret; 136 } 137 138 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog) 139 { 140 return !prog->bpf_ops; 141 } 142 143 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, 144 struct cls_bpf_prog *oldprog, 145 struct netlink_ext_ack *extack) 146 { 147 struct tcf_block *block = tp->chain->block; 148 struct tc_cls_bpf_offload cls_bpf = {}; 149 struct cls_bpf_prog *obj; 150 bool skip_sw; 151 int err; 152 153 skip_sw = prog && tc_skip_sw(prog->gen_flags); 154 obj = prog ?: oldprog; 155 156 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack); 157 cls_bpf.command = TC_CLSBPF_OFFLOAD; 158 cls_bpf.exts = &obj->exts; 159 cls_bpf.prog = prog ? prog->filter : NULL; 160 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL; 161 cls_bpf.name = obj->bpf_name; 162 cls_bpf.exts_integrated = obj->exts_integrated; 163 164 if (oldprog && prog) 165 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 166 skip_sw, &oldprog->gen_flags, 167 &oldprog->in_hw_count, 168 &prog->gen_flags, &prog->in_hw_count, 169 true); 170 else if (prog) 171 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 172 skip_sw, &prog->gen_flags, 173 &prog->in_hw_count, true); 174 else 175 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf, 176 skip_sw, &oldprog->gen_flags, 177 &oldprog->in_hw_count, true); 178 179 if (prog && err) { 180 cls_bpf_offload_cmd(tp, oldprog, prog, extack); 181 return err; 182 } 183 184 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW)) 185 return -EINVAL; 186 187 return 0; 188 } 189 190 static u32 cls_bpf_flags(u32 flags) 191 { 192 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS; 193 } 194 195 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, 196 struct cls_bpf_prog *oldprog, 197 struct netlink_ext_ack *extack) 198 { 199 if (prog && oldprog && 200 cls_bpf_flags(prog->gen_flags) != 201 cls_bpf_flags(oldprog->gen_flags)) 202 return -EINVAL; 203 204 if (prog && tc_skip_hw(prog->gen_flags)) 205 prog = NULL; 206 if (oldprog && tc_skip_hw(oldprog->gen_flags)) 207 oldprog = NULL; 208 if (!prog && !oldprog) 209 return 0; 210 211 return cls_bpf_offload_cmd(tp, prog, oldprog, extack); 212 } 213 214 static void cls_bpf_stop_offload(struct tcf_proto *tp, 215 struct cls_bpf_prog *prog, 216 struct netlink_ext_ack *extack) 217 { 218 int err; 219 220 err = cls_bpf_offload_cmd(tp, NULL, prog, extack); 221 if (err) 222 pr_err("Stopping hardware offload failed: %d\n", err); 223 } 224 225 static void cls_bpf_offload_update_stats(struct tcf_proto *tp, 226 struct cls_bpf_prog *prog) 227 { 228 struct tcf_block *block = tp->chain->block; 229 struct tc_cls_bpf_offload cls_bpf = {}; 230 231 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL); 232 cls_bpf.command = TC_CLSBPF_STATS; 233 cls_bpf.exts = &prog->exts; 234 cls_bpf.prog = prog->filter; 235 cls_bpf.name = prog->bpf_name; 236 cls_bpf.exts_integrated = prog->exts_integrated; 237 238 tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true); 239 } 240 241 static int cls_bpf_init(struct tcf_proto *tp) 242 { 243 struct cls_bpf_head *head; 244 245 head = kzalloc(sizeof(*head), GFP_KERNEL); 246 if (head == NULL) 247 return -ENOBUFS; 248 249 INIT_LIST_HEAD_RCU(&head->plist); 250 idr_init(&head->handle_idr); 251 rcu_assign_pointer(tp->root, head); 252 253 return 0; 254 } 255 256 static void cls_bpf_free_parms(struct cls_bpf_prog *prog) 257 { 258 if (cls_bpf_is_ebpf(prog)) 259 bpf_prog_put(prog->filter); 260 else 261 bpf_prog_destroy(prog->filter); 262 263 kfree(prog->bpf_name); 264 kfree(prog->bpf_ops); 265 } 266 267 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 268 { 269 tcf_exts_destroy(&prog->exts); 270 tcf_exts_put_net(&prog->exts); 271 272 cls_bpf_free_parms(prog); 273 kfree(prog); 274 } 275 276 static void cls_bpf_delete_prog_work(struct work_struct *work) 277 { 278 struct cls_bpf_prog *prog = container_of(to_rcu_work(work), 279 struct cls_bpf_prog, 280 rwork); 281 rtnl_lock(); 282 __cls_bpf_delete_prog(prog); 283 rtnl_unlock(); 284 } 285 286 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog, 287 struct netlink_ext_ack *extack) 288 { 289 struct cls_bpf_head *head = rtnl_dereference(tp->root); 290 291 idr_remove(&head->handle_idr, prog->handle); 292 cls_bpf_stop_offload(tp, prog, extack); 293 list_del_rcu(&prog->link); 294 tcf_unbind_filter(tp, &prog->res); 295 if (tcf_exts_get_net(&prog->exts)) 296 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work); 297 else 298 __cls_bpf_delete_prog(prog); 299 } 300 301 static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last, 302 bool rtnl_held, struct netlink_ext_ack *extack) 303 { 304 struct cls_bpf_head *head = rtnl_dereference(tp->root); 305 306 __cls_bpf_delete(tp, arg, extack); 307 *last = list_empty(&head->plist); 308 return 0; 309 } 310 311 static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held, 312 struct netlink_ext_ack *extack) 313 { 314 struct cls_bpf_head *head = rtnl_dereference(tp->root); 315 struct cls_bpf_prog *prog, *tmp; 316 317 list_for_each_entry_safe(prog, tmp, &head->plist, link) 318 __cls_bpf_delete(tp, prog, extack); 319 320 idr_destroy(&head->handle_idr); 321 kfree_rcu(head, rcu); 322 } 323 324 static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) 325 { 326 struct cls_bpf_head *head = rtnl_dereference(tp->root); 327 struct cls_bpf_prog *prog; 328 329 list_for_each_entry(prog, &head->plist, link) { 330 if (prog->handle == handle) 331 return prog; 332 } 333 334 return NULL; 335 } 336 337 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) 338 { 339 struct sock_filter *bpf_ops; 340 struct sock_fprog_kern fprog_tmp; 341 struct bpf_prog *fp; 342 u16 bpf_size, bpf_num_ops; 343 int ret; 344 345 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]); 346 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0) 347 return -EINVAL; 348 349 bpf_size = bpf_num_ops * sizeof(*bpf_ops); 350 if (bpf_size != nla_len(tb[TCA_BPF_OPS])) 351 return -EINVAL; 352 353 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL); 354 if (bpf_ops == NULL) 355 return -ENOMEM; 356 357 fprog_tmp.len = bpf_num_ops; 358 fprog_tmp.filter = bpf_ops; 359 360 ret = bpf_prog_create(&fp, &fprog_tmp); 361 if (ret < 0) { 362 kfree(bpf_ops); 363 return ret; 364 } 365 366 prog->bpf_ops = bpf_ops; 367 prog->bpf_num_ops = bpf_num_ops; 368 prog->bpf_name = NULL; 369 prog->filter = fp; 370 371 return 0; 372 } 373 374 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, 375 u32 gen_flags, const struct tcf_proto *tp) 376 { 377 struct bpf_prog *fp; 378 char *name = NULL; 379 bool skip_sw; 380 u32 bpf_fd; 381 382 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 383 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW; 384 385 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); 386 if (IS_ERR(fp)) 387 return PTR_ERR(fp); 388 389 if (tb[TCA_BPF_NAME]) { 390 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL); 391 if (!name) { 392 bpf_prog_put(fp); 393 return -ENOMEM; 394 } 395 } 396 397 prog->bpf_ops = NULL; 398 prog->bpf_name = name; 399 prog->filter = fp; 400 401 if (fp->dst_needed) 402 tcf_block_netif_keep_dst(tp->chain->block); 403 404 return 0; 405 } 406 407 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, 408 struct tcf_proto *tp, unsigned long base, 409 u32 handle, struct nlattr **tca, 410 void **arg, u32 flags, 411 struct netlink_ext_ack *extack) 412 { 413 struct cls_bpf_head *head = rtnl_dereference(tp->root); 414 bool is_bpf, is_ebpf, have_exts = false; 415 struct cls_bpf_prog *oldprog = *arg; 416 struct nlattr *tb[TCA_BPF_MAX + 1]; 417 bool bound_to_filter = false; 418 struct cls_bpf_prog *prog; 419 u32 gen_flags = 0; 420 int ret; 421 422 if (tca[TCA_OPTIONS] == NULL) 423 return -EINVAL; 424 425 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], 426 bpf_policy, NULL); 427 if (ret < 0) 428 return ret; 429 430 prog = kzalloc(sizeof(*prog), GFP_KERNEL); 431 if (!prog) 432 return -ENOBUFS; 433 434 ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE); 435 if (ret < 0) 436 goto errout; 437 438 if (oldprog) { 439 if (handle && oldprog->handle != handle) { 440 ret = -EINVAL; 441 goto errout; 442 } 443 } 444 445 if (handle == 0) { 446 handle = 1; 447 ret = idr_alloc_u32(&head->handle_idr, prog, &handle, 448 INT_MAX, GFP_KERNEL); 449 } else if (!oldprog) { 450 ret = idr_alloc_u32(&head->handle_idr, prog, &handle, 451 handle, GFP_KERNEL); 452 } 453 454 if (ret) 455 goto errout; 456 prog->handle = handle; 457 458 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS]; 459 is_ebpf = tb[TCA_BPF_FD]; 460 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) { 461 ret = -EINVAL; 462 goto errout_idr; 463 } 464 465 ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts, 466 flags, extack); 467 if (ret < 0) 468 goto errout_idr; 469 470 if (tb[TCA_BPF_FLAGS]) { 471 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); 472 473 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) { 474 ret = -EINVAL; 475 goto errout_idr; 476 } 477 478 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; 479 } 480 if (tb[TCA_BPF_FLAGS_GEN]) { 481 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); 482 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || 483 !tc_flags_valid(gen_flags)) { 484 ret = -EINVAL; 485 goto errout_idr; 486 } 487 } 488 489 prog->exts_integrated = have_exts; 490 prog->gen_flags = gen_flags; 491 492 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : 493 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp); 494 if (ret < 0) 495 goto errout_idr; 496 497 if (tb[TCA_BPF_CLASSID]) { 498 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); 499 tcf_bind_filter(tp, &prog->res, base); 500 bound_to_filter = true; 501 } 502 503 ret = cls_bpf_offload(tp, prog, oldprog, extack); 504 if (ret) 505 goto errout_parms; 506 507 if (!tc_in_hw(prog->gen_flags)) 508 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; 509 510 tcf_proto_update_usesw(tp, prog->gen_flags); 511 512 if (oldprog) { 513 idr_replace(&head->handle_idr, prog, handle); 514 list_replace_rcu(&oldprog->link, &prog->link); 515 tcf_unbind_filter(tp, &oldprog->res); 516 tcf_exts_get_net(&oldprog->exts); 517 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work); 518 } else { 519 list_add_rcu(&prog->link, &head->plist); 520 } 521 522 *arg = prog; 523 return 0; 524 525 errout_parms: 526 if (bound_to_filter) 527 tcf_unbind_filter(tp, &prog->res); 528 cls_bpf_free_parms(prog); 529 errout_idr: 530 if (!oldprog) 531 idr_remove(&head->handle_idr, prog->handle); 532 errout: 533 tcf_exts_destroy(&prog->exts); 534 kfree(prog); 535 return ret; 536 } 537 538 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog, 539 struct sk_buff *skb) 540 { 541 struct nlattr *nla; 542 543 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops)) 544 return -EMSGSIZE; 545 546 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops * 547 sizeof(struct sock_filter)); 548 if (nla == NULL) 549 return -EMSGSIZE; 550 551 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla)); 552 553 return 0; 554 } 555 556 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, 557 struct sk_buff *skb) 558 { 559 struct nlattr *nla; 560 561 if (prog->bpf_name && 562 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name)) 563 return -EMSGSIZE; 564 565 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id)) 566 return -EMSGSIZE; 567 568 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag)); 569 if (nla == NULL) 570 return -EMSGSIZE; 571 572 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla)); 573 574 return 0; 575 } 576 577 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, 578 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held) 579 { 580 struct cls_bpf_prog *prog = fh; 581 struct nlattr *nest; 582 u32 bpf_flags = 0; 583 int ret; 584 585 if (prog == NULL) 586 return skb->len; 587 588 tm->tcm_handle = prog->handle; 589 590 cls_bpf_offload_update_stats(tp, prog); 591 592 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 593 if (nest == NULL) 594 goto nla_put_failure; 595 596 if (prog->res.classid && 597 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid)) 598 goto nla_put_failure; 599 600 if (cls_bpf_is_ebpf(prog)) 601 ret = cls_bpf_dump_ebpf_info(prog, skb); 602 else 603 ret = cls_bpf_dump_bpf_info(prog, skb); 604 if (ret) 605 goto nla_put_failure; 606 607 if (tcf_exts_dump(skb, &prog->exts) < 0) 608 goto nla_put_failure; 609 610 if (prog->exts_integrated) 611 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT; 612 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags)) 613 goto nla_put_failure; 614 if (prog->gen_flags && 615 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags)) 616 goto nla_put_failure; 617 618 nla_nest_end(skb, nest); 619 620 if (tcf_exts_dump_stats(skb, &prog->exts) < 0) 621 goto nla_put_failure; 622 623 return skb->len; 624 625 nla_put_failure: 626 nla_nest_cancel(skb, nest); 627 return -1; 628 } 629 630 static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl, 631 void *q, unsigned long base) 632 { 633 struct cls_bpf_prog *prog = fh; 634 635 tc_cls_bind_class(classid, cl, q, &prog->res, base); 636 } 637 638 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg, 639 bool rtnl_held) 640 { 641 struct cls_bpf_head *head = rtnl_dereference(tp->root); 642 struct cls_bpf_prog *prog; 643 644 list_for_each_entry(prog, &head->plist, link) { 645 if (!tc_cls_stats_dump(tp, arg, prog)) 646 break; 647 } 648 } 649 650 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, 651 void *cb_priv, struct netlink_ext_ack *extack) 652 { 653 struct cls_bpf_head *head = rtnl_dereference(tp->root); 654 struct tcf_block *block = tp->chain->block; 655 struct tc_cls_bpf_offload cls_bpf = {}; 656 struct cls_bpf_prog *prog; 657 int err; 658 659 list_for_each_entry(prog, &head->plist, link) { 660 if (tc_skip_hw(prog->gen_flags)) 661 continue; 662 663 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, 664 extack); 665 cls_bpf.command = TC_CLSBPF_OFFLOAD; 666 cls_bpf.exts = &prog->exts; 667 cls_bpf.prog = add ? prog->filter : NULL; 668 cls_bpf.oldprog = add ? NULL : prog->filter; 669 cls_bpf.name = prog->bpf_name; 670 cls_bpf.exts_integrated = prog->exts_integrated; 671 672 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF, 673 &cls_bpf, cb_priv, &prog->gen_flags, 674 &prog->in_hw_count); 675 if (err) 676 return err; 677 } 678 679 return 0; 680 } 681 682 static struct tcf_proto_ops cls_bpf_ops __read_mostly = { 683 .kind = "bpf", 684 .owner = THIS_MODULE, 685 .classify = cls_bpf_classify, 686 .init = cls_bpf_init, 687 .destroy = cls_bpf_destroy, 688 .get = cls_bpf_get, 689 .change = cls_bpf_change, 690 .delete = cls_bpf_delete, 691 .walk = cls_bpf_walk, 692 .reoffload = cls_bpf_reoffload, 693 .dump = cls_bpf_dump, 694 .bind_class = cls_bpf_bind_class, 695 }; 696 MODULE_ALIAS_NET_CLS("bpf"); 697 698 static int __init cls_bpf_init_mod(void) 699 { 700 return register_tcf_proto_ops(&cls_bpf_ops); 701 } 702 703 static void __exit cls_bpf_exit_mod(void) 704 { 705 unregister_tcf_proto_ops(&cls_bpf_ops); 706 } 707 708 module_init(cls_bpf_init_mod); 709 module_exit(cls_bpf_exit_mod); 710