1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <linux/rhashtable.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/tc_act/tc_gate.h> 43 #include <net/flow_offload.h> 44 #include <net/tc_wrapper.h> 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static struct xarray tcf_exts_miss_cookies_xa; 53 struct tcf_exts_miss_cookie_node { 54 const struct tcf_chain *chain; 55 const struct tcf_proto *tp; 56 const struct tcf_exts *exts; 57 u32 chain_index; 58 u32 tp_prio; 59 u32 handle; 60 u32 miss_cookie_base; 61 struct rcu_head rcu; 62 }; 63 64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base + 65 * action index in the exts tc actions array. 66 */ 67 union tcf_exts_miss_cookie { 68 struct { 69 u32 miss_cookie_base; 70 u32 act_index; 71 }; 72 u64 miss_cookie; 73 }; 74 75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 76 static int 77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, 78 u32 handle) 79 { 80 struct tcf_exts_miss_cookie_node *n; 81 static u32 next; 82 int err; 83 84 if (WARN_ON(!handle || !tp->ops->get_exts)) 85 return -EINVAL; 86 87 n = kzalloc(sizeof(*n), GFP_KERNEL); 88 if (!n) 89 return -ENOMEM; 90 91 n->chain_index = tp->chain->index; 92 n->chain = tp->chain; 93 n->tp_prio = tp->prio; 94 n->tp = tp; 95 n->exts = exts; 96 n->handle = handle; 97 98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base, 99 n, xa_limit_32b, &next, GFP_KERNEL); 100 if (err) 101 goto err_xa_alloc; 102 103 exts->miss_cookie_node = n; 104 return 0; 105 106 err_xa_alloc: 107 kfree(n); 108 return err; 109 } 110 111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) 112 { 113 struct tcf_exts_miss_cookie_node *n; 114 115 if (!exts->miss_cookie_node) 116 return; 117 118 n = exts->miss_cookie_node; 119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base); 120 kfree_rcu(n, rcu); 121 } 122 123 static struct tcf_exts_miss_cookie_node * 124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index) 125 { 126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, }; 127 128 *act_index = mc.act_index; 129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base); 130 } 131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ 132 static int 133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, 134 u32 handle) 135 { 136 return 0; 137 } 138 139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) 140 { 141 } 142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ 143 144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index) 145 { 146 union tcf_exts_miss_cookie mc = { .act_index = act_index, }; 147 148 if (!miss_cookie_base) 149 return 0; 150 151 mc.miss_cookie_base = miss_cookie_base; 152 return mc.miss_cookie; 153 } 154 155 #ifdef CONFIG_NET_CLS_ACT 156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc); 157 EXPORT_SYMBOL(tc_skb_ext_tc); 158 159 void tc_skb_ext_tc_enable(void) 160 { 161 static_branch_inc(&tc_skb_ext_tc); 162 } 163 EXPORT_SYMBOL(tc_skb_ext_tc_enable); 164 165 void tc_skb_ext_tc_disable(void) 166 { 167 static_branch_dec(&tc_skb_ext_tc); 168 } 169 EXPORT_SYMBOL(tc_skb_ext_tc_disable); 170 #endif 171 172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 173 { 174 return jhash_3words(tp->chain->index, tp->prio, 175 (__force __u32)tp->protocol, 0); 176 } 177 178 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 179 struct tcf_proto *tp) 180 { 181 struct tcf_block *block = chain->block; 182 183 mutex_lock(&block->proto_destroy_lock); 184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 185 destroy_obj_hashfn(tp)); 186 mutex_unlock(&block->proto_destroy_lock); 187 } 188 189 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 190 const struct tcf_proto *tp2) 191 { 192 return tp1->chain->index == tp2->chain->index && 193 tp1->prio == tp2->prio && 194 tp1->protocol == tp2->protocol; 195 } 196 197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 198 struct tcf_proto *tp) 199 { 200 u32 hash = destroy_obj_hashfn(tp); 201 struct tcf_proto *iter; 202 bool found = false; 203 204 rcu_read_lock(); 205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 206 destroy_ht_node, hash) { 207 if (tcf_proto_cmp(tp, iter)) { 208 found = true; 209 break; 210 } 211 } 212 rcu_read_unlock(); 213 214 return found; 215 } 216 217 static void 218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 219 { 220 struct tcf_block *block = chain->block; 221 222 mutex_lock(&block->proto_destroy_lock); 223 if (hash_hashed(&tp->destroy_ht_node)) 224 hash_del_rcu(&tp->destroy_ht_node); 225 mutex_unlock(&block->proto_destroy_lock); 226 } 227 228 /* Find classifier type by string name */ 229 230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 231 { 232 const struct tcf_proto_ops *t, *res = NULL; 233 234 if (kind) { 235 read_lock(&cls_mod_lock); 236 list_for_each_entry(t, &tcf_proto_base, head) { 237 if (strcmp(kind, t->kind) == 0) { 238 if (try_module_get(t->owner)) 239 res = t; 240 break; 241 } 242 } 243 read_unlock(&cls_mod_lock); 244 } 245 return res; 246 } 247 248 static const struct tcf_proto_ops * 249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 250 struct netlink_ext_ack *extack) 251 { 252 const struct tcf_proto_ops *ops; 253 254 ops = __tcf_proto_lookup_ops(kind); 255 if (ops) 256 return ops; 257 #ifdef CONFIG_MODULES 258 if (rtnl_held) 259 rtnl_unlock(); 260 request_module("cls_%s", kind); 261 if (rtnl_held) 262 rtnl_lock(); 263 ops = __tcf_proto_lookup_ops(kind); 264 /* We dropped the RTNL semaphore in order to perform 265 * the module load. So, even if we succeeded in loading 266 * the module we have to replay the request. We indicate 267 * this using -EAGAIN. 268 */ 269 if (ops) { 270 module_put(ops->owner); 271 return ERR_PTR(-EAGAIN); 272 } 273 #endif 274 NL_SET_ERR_MSG(extack, "TC classifier not found"); 275 return ERR_PTR(-ENOENT); 276 } 277 278 /* Register(unregister) new classifier type */ 279 280 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 281 { 282 struct tcf_proto_ops *t; 283 int rc = -EEXIST; 284 285 write_lock(&cls_mod_lock); 286 list_for_each_entry(t, &tcf_proto_base, head) 287 if (!strcmp(ops->kind, t->kind)) 288 goto out; 289 290 list_add_tail(&ops->head, &tcf_proto_base); 291 rc = 0; 292 out: 293 write_unlock(&cls_mod_lock); 294 return rc; 295 } 296 EXPORT_SYMBOL(register_tcf_proto_ops); 297 298 static struct workqueue_struct *tc_filter_wq; 299 300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 301 { 302 struct tcf_proto_ops *t; 303 int rc = -ENOENT; 304 305 /* Wait for outstanding call_rcu()s, if any, from a 306 * tcf_proto_ops's destroy() handler. 307 */ 308 rcu_barrier(); 309 flush_workqueue(tc_filter_wq); 310 311 write_lock(&cls_mod_lock); 312 list_for_each_entry(t, &tcf_proto_base, head) { 313 if (t == ops) { 314 list_del(&t->head); 315 rc = 0; 316 break; 317 } 318 } 319 write_unlock(&cls_mod_lock); 320 321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc); 322 } 323 EXPORT_SYMBOL(unregister_tcf_proto_ops); 324 325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 326 { 327 INIT_RCU_WORK(rwork, func); 328 return queue_rcu_work(tc_filter_wq, rwork); 329 } 330 EXPORT_SYMBOL(tcf_queue_work); 331 332 /* Select new prio value from the range, managed by kernel. */ 333 334 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 335 { 336 u32 first = TC_H_MAKE(0xC0000000U, 0U); 337 338 if (tp) 339 first = tp->prio - 1; 340 341 return TC_H_MAJ(first); 342 } 343 344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 345 { 346 if (kind) 347 return nla_strscpy(name, kind, IFNAMSIZ) < 0; 348 memset(name, 0, IFNAMSIZ); 349 return false; 350 } 351 352 static bool tcf_proto_is_unlocked(const char *kind) 353 { 354 const struct tcf_proto_ops *ops; 355 bool ret; 356 357 if (strlen(kind) == 0) 358 return false; 359 360 ops = tcf_proto_lookup_ops(kind, false, NULL); 361 /* On error return false to take rtnl lock. Proto lookup/create 362 * functions will perform lookup again and properly handle errors. 363 */ 364 if (IS_ERR(ops)) 365 return false; 366 367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 368 module_put(ops->owner); 369 return ret; 370 } 371 372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 373 u32 prio, struct tcf_chain *chain, 374 bool rtnl_held, 375 struct netlink_ext_ack *extack) 376 { 377 struct tcf_proto *tp; 378 int err; 379 380 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 381 if (!tp) 382 return ERR_PTR(-ENOBUFS); 383 384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 385 if (IS_ERR(tp->ops)) { 386 err = PTR_ERR(tp->ops); 387 goto errout; 388 } 389 tp->classify = tp->ops->classify; 390 tp->protocol = protocol; 391 tp->prio = prio; 392 tp->chain = chain; 393 spin_lock_init(&tp->lock); 394 refcount_set(&tp->refcnt, 1); 395 396 err = tp->ops->init(tp); 397 if (err) { 398 module_put(tp->ops->owner); 399 goto errout; 400 } 401 return tp; 402 403 errout: 404 kfree(tp); 405 return ERR_PTR(err); 406 } 407 408 static void tcf_proto_get(struct tcf_proto *tp) 409 { 410 refcount_inc(&tp->refcnt); 411 } 412 413 static void tcf_chain_put(struct tcf_chain *chain); 414 415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 416 bool sig_destroy, struct netlink_ext_ack *extack) 417 { 418 tp->ops->destroy(tp, rtnl_held, extack); 419 if (sig_destroy) 420 tcf_proto_signal_destroyed(tp->chain, tp); 421 tcf_chain_put(tp->chain); 422 module_put(tp->ops->owner); 423 kfree_rcu(tp, rcu); 424 } 425 426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 427 struct netlink_ext_ack *extack) 428 { 429 if (refcount_dec_and_test(&tp->refcnt)) 430 tcf_proto_destroy(tp, rtnl_held, true, extack); 431 } 432 433 static bool tcf_proto_check_delete(struct tcf_proto *tp) 434 { 435 if (tp->ops->delete_empty) 436 return tp->ops->delete_empty(tp); 437 438 tp->deleting = true; 439 return tp->deleting; 440 } 441 442 static void tcf_proto_mark_delete(struct tcf_proto *tp) 443 { 444 spin_lock(&tp->lock); 445 tp->deleting = true; 446 spin_unlock(&tp->lock); 447 } 448 449 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 450 { 451 bool deleting; 452 453 spin_lock(&tp->lock); 454 deleting = tp->deleting; 455 spin_unlock(&tp->lock); 456 457 return deleting; 458 } 459 460 #define ASSERT_BLOCK_LOCKED(block) \ 461 lockdep_assert_held(&(block)->lock) 462 463 struct tcf_filter_chain_list_item { 464 struct list_head list; 465 tcf_chain_head_change_t *chain_head_change; 466 void *chain_head_change_priv; 467 }; 468 469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 470 u32 chain_index) 471 { 472 struct tcf_chain *chain; 473 474 ASSERT_BLOCK_LOCKED(block); 475 476 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 477 if (!chain) 478 return NULL; 479 list_add_tail_rcu(&chain->list, &block->chain_list); 480 mutex_init(&chain->filter_chain_lock); 481 chain->block = block; 482 chain->index = chain_index; 483 chain->refcnt = 1; 484 if (!chain->index) 485 block->chain0.chain = chain; 486 return chain; 487 } 488 489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 490 struct tcf_proto *tp_head) 491 { 492 if (item->chain_head_change) 493 item->chain_head_change(tp_head, item->chain_head_change_priv); 494 } 495 496 static void tcf_chain0_head_change(struct tcf_chain *chain, 497 struct tcf_proto *tp_head) 498 { 499 struct tcf_filter_chain_list_item *item; 500 struct tcf_block *block = chain->block; 501 502 if (chain->index) 503 return; 504 505 mutex_lock(&block->lock); 506 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 507 tcf_chain_head_change_item(item, tp_head); 508 mutex_unlock(&block->lock); 509 } 510 511 /* Returns true if block can be safely freed. */ 512 513 static bool tcf_chain_detach(struct tcf_chain *chain) 514 { 515 struct tcf_block *block = chain->block; 516 517 ASSERT_BLOCK_LOCKED(block); 518 519 list_del_rcu(&chain->list); 520 if (!chain->index) 521 block->chain0.chain = NULL; 522 523 if (list_empty(&block->chain_list) && 524 refcount_read(&block->refcnt) == 0) 525 return true; 526 527 return false; 528 } 529 530 static void tcf_block_destroy(struct tcf_block *block) 531 { 532 mutex_destroy(&block->lock); 533 mutex_destroy(&block->proto_destroy_lock); 534 xa_destroy(&block->ports); 535 kfree_rcu(block, rcu); 536 } 537 538 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 539 { 540 struct tcf_block *block = chain->block; 541 542 mutex_destroy(&chain->filter_chain_lock); 543 kfree_rcu(chain, rcu); 544 if (free_block) 545 tcf_block_destroy(block); 546 } 547 548 static void tcf_chain_hold(struct tcf_chain *chain) 549 { 550 ASSERT_BLOCK_LOCKED(chain->block); 551 552 ++chain->refcnt; 553 } 554 555 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 556 { 557 ASSERT_BLOCK_LOCKED(chain->block); 558 559 /* In case all the references are action references, this 560 * chain should not be shown to the user. 561 */ 562 return chain->refcnt == chain->action_refcnt; 563 } 564 565 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 566 u32 chain_index) 567 { 568 struct tcf_chain *chain; 569 570 ASSERT_BLOCK_LOCKED(block); 571 572 list_for_each_entry(chain, &block->chain_list, list) { 573 if (chain->index == chain_index) 574 return chain; 575 } 576 return NULL; 577 } 578 579 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 580 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 581 u32 chain_index) 582 { 583 struct tcf_chain *chain; 584 585 list_for_each_entry_rcu(chain, &block->chain_list, list) { 586 if (chain->index == chain_index) 587 return chain; 588 } 589 return NULL; 590 } 591 #endif 592 593 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 594 u32 seq, u16 flags, int event, bool unicast, 595 struct netlink_ext_ack *extack); 596 597 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 598 u32 chain_index, bool create, 599 bool by_act) 600 { 601 struct tcf_chain *chain = NULL; 602 bool is_first_reference; 603 604 mutex_lock(&block->lock); 605 chain = tcf_chain_lookup(block, chain_index); 606 if (chain) { 607 tcf_chain_hold(chain); 608 } else { 609 if (!create) 610 goto errout; 611 chain = tcf_chain_create(block, chain_index); 612 if (!chain) 613 goto errout; 614 } 615 616 if (by_act) 617 ++chain->action_refcnt; 618 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 619 mutex_unlock(&block->lock); 620 621 /* Send notification only in case we got the first 622 * non-action reference. Until then, the chain acts only as 623 * a placeholder for actions pointing to it and user ought 624 * not know about them. 625 */ 626 if (is_first_reference && !by_act) 627 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 628 RTM_NEWCHAIN, false, NULL); 629 630 return chain; 631 632 errout: 633 mutex_unlock(&block->lock); 634 return chain; 635 } 636 637 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 638 bool create) 639 { 640 return __tcf_chain_get(block, chain_index, create, false); 641 } 642 643 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 644 { 645 return __tcf_chain_get(block, chain_index, true, true); 646 } 647 EXPORT_SYMBOL(tcf_chain_get_by_act); 648 649 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 650 void *tmplt_priv); 651 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 652 void *tmplt_priv, u32 chain_index, 653 struct tcf_block *block, struct sk_buff *oskb, 654 u32 seq, u16 flags); 655 656 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 657 bool explicitly_created) 658 { 659 struct tcf_block *block = chain->block; 660 const struct tcf_proto_ops *tmplt_ops; 661 unsigned int refcnt, non_act_refcnt; 662 bool free_block = false; 663 void *tmplt_priv; 664 665 mutex_lock(&block->lock); 666 if (explicitly_created) { 667 if (!chain->explicitly_created) { 668 mutex_unlock(&block->lock); 669 return; 670 } 671 chain->explicitly_created = false; 672 } 673 674 if (by_act) 675 chain->action_refcnt--; 676 677 /* tc_chain_notify_delete can't be called while holding block lock. 678 * However, when block is unlocked chain can be changed concurrently, so 679 * save these to temporary variables. 680 */ 681 refcnt = --chain->refcnt; 682 non_act_refcnt = refcnt - chain->action_refcnt; 683 tmplt_ops = chain->tmplt_ops; 684 tmplt_priv = chain->tmplt_priv; 685 686 if (non_act_refcnt == chain->explicitly_created && !by_act) { 687 if (non_act_refcnt == 0) 688 tc_chain_notify_delete(tmplt_ops, tmplt_priv, 689 chain->index, block, NULL, 0, 0); 690 /* Last reference to chain, no need to lock. */ 691 chain->flushing = false; 692 } 693 694 if (refcnt == 0) 695 free_block = tcf_chain_detach(chain); 696 mutex_unlock(&block->lock); 697 698 if (refcnt == 0) { 699 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 700 tcf_chain_destroy(chain, free_block); 701 } 702 } 703 704 static void tcf_chain_put(struct tcf_chain *chain) 705 { 706 __tcf_chain_put(chain, false, false); 707 } 708 709 void tcf_chain_put_by_act(struct tcf_chain *chain) 710 { 711 __tcf_chain_put(chain, true, false); 712 } 713 EXPORT_SYMBOL(tcf_chain_put_by_act); 714 715 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 716 { 717 __tcf_chain_put(chain, false, true); 718 } 719 720 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 721 { 722 struct tcf_proto *tp, *tp_next; 723 724 mutex_lock(&chain->filter_chain_lock); 725 tp = tcf_chain_dereference(chain->filter_chain, chain); 726 while (tp) { 727 tp_next = rcu_dereference_protected(tp->next, 1); 728 tcf_proto_signal_destroying(chain, tp); 729 tp = tp_next; 730 } 731 tp = tcf_chain_dereference(chain->filter_chain, chain); 732 RCU_INIT_POINTER(chain->filter_chain, NULL); 733 tcf_chain0_head_change(chain, NULL); 734 chain->flushing = true; 735 mutex_unlock(&chain->filter_chain_lock); 736 737 while (tp) { 738 tp_next = rcu_dereference_protected(tp->next, 1); 739 tcf_proto_put(tp, rtnl_held, NULL); 740 tp = tp_next; 741 } 742 } 743 744 static int tcf_block_setup(struct tcf_block *block, 745 struct flow_block_offload *bo); 746 747 static void tcf_block_offload_init(struct flow_block_offload *bo, 748 struct net_device *dev, struct Qdisc *sch, 749 enum flow_block_command command, 750 enum flow_block_binder_type binder_type, 751 struct flow_block *flow_block, 752 bool shared, struct netlink_ext_ack *extack) 753 { 754 bo->net = dev_net(dev); 755 bo->command = command; 756 bo->binder_type = binder_type; 757 bo->block = flow_block; 758 bo->block_shared = shared; 759 bo->extack = extack; 760 bo->sch = sch; 761 bo->cb_list_head = &flow_block->cb_list; 762 INIT_LIST_HEAD(&bo->cb_list); 763 } 764 765 static void tcf_block_unbind(struct tcf_block *block, 766 struct flow_block_offload *bo); 767 768 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 769 { 770 struct tcf_block *block = block_cb->indr.data; 771 struct net_device *dev = block_cb->indr.dev; 772 struct Qdisc *sch = block_cb->indr.sch; 773 struct netlink_ext_ack extack = {}; 774 struct flow_block_offload bo = {}; 775 776 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 777 block_cb->indr.binder_type, 778 &block->flow_block, tcf_block_shared(block), 779 &extack); 780 rtnl_lock(); 781 down_write(&block->cb_lock); 782 list_del(&block_cb->driver_list); 783 list_move(&block_cb->list, &bo.cb_list); 784 tcf_block_unbind(block, &bo); 785 up_write(&block->cb_lock); 786 rtnl_unlock(); 787 } 788 789 static bool tcf_block_offload_in_use(struct tcf_block *block) 790 { 791 return atomic_read(&block->offloadcnt); 792 } 793 794 static int tcf_block_offload_cmd(struct tcf_block *block, 795 struct net_device *dev, struct Qdisc *sch, 796 struct tcf_block_ext_info *ei, 797 enum flow_block_command command, 798 struct netlink_ext_ack *extack) 799 { 800 struct flow_block_offload bo = {}; 801 802 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 803 &block->flow_block, tcf_block_shared(block), 804 extack); 805 806 if (dev->netdev_ops->ndo_setup_tc) { 807 int err; 808 809 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 810 if (err < 0) { 811 if (err != -EOPNOTSUPP) 812 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 813 return err; 814 } 815 816 return tcf_block_setup(block, &bo); 817 } 818 819 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 820 tc_block_indr_cleanup); 821 tcf_block_setup(block, &bo); 822 823 return -EOPNOTSUPP; 824 } 825 826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 827 struct tcf_block_ext_info *ei, 828 struct netlink_ext_ack *extack) 829 { 830 struct net_device *dev = q->dev_queue->dev; 831 int err; 832 833 down_write(&block->cb_lock); 834 835 /* If tc offload feature is disabled and the block we try to bind 836 * to already has some offloaded filters, forbid to bind. 837 */ 838 if (dev->netdev_ops->ndo_setup_tc && 839 !tc_can_offload(dev) && 840 tcf_block_offload_in_use(block)) { 841 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 842 err = -EOPNOTSUPP; 843 goto err_unlock; 844 } 845 846 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 847 if (err == -EOPNOTSUPP) 848 goto no_offload_dev_inc; 849 if (err) 850 goto err_unlock; 851 852 up_write(&block->cb_lock); 853 return 0; 854 855 no_offload_dev_inc: 856 if (tcf_block_offload_in_use(block)) 857 goto err_unlock; 858 859 err = 0; 860 block->nooffloaddevcnt++; 861 err_unlock: 862 up_write(&block->cb_lock); 863 return err; 864 } 865 866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 867 struct tcf_block_ext_info *ei) 868 { 869 struct net_device *dev = q->dev_queue->dev; 870 int err; 871 872 down_write(&block->cb_lock); 873 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 874 if (err == -EOPNOTSUPP) 875 goto no_offload_dev_dec; 876 up_write(&block->cb_lock); 877 return; 878 879 no_offload_dev_dec: 880 WARN_ON(block->nooffloaddevcnt-- == 0); 881 up_write(&block->cb_lock); 882 } 883 884 static int 885 tcf_chain0_head_change_cb_add(struct tcf_block *block, 886 struct tcf_block_ext_info *ei, 887 struct netlink_ext_ack *extack) 888 { 889 struct tcf_filter_chain_list_item *item; 890 struct tcf_chain *chain0; 891 892 item = kmalloc(sizeof(*item), GFP_KERNEL); 893 if (!item) { 894 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 895 return -ENOMEM; 896 } 897 item->chain_head_change = ei->chain_head_change; 898 item->chain_head_change_priv = ei->chain_head_change_priv; 899 900 mutex_lock(&block->lock); 901 chain0 = block->chain0.chain; 902 if (chain0) 903 tcf_chain_hold(chain0); 904 else 905 list_add(&item->list, &block->chain0.filter_chain_list); 906 mutex_unlock(&block->lock); 907 908 if (chain0) { 909 struct tcf_proto *tp_head; 910 911 mutex_lock(&chain0->filter_chain_lock); 912 913 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 914 if (tp_head) 915 tcf_chain_head_change_item(item, tp_head); 916 917 mutex_lock(&block->lock); 918 list_add(&item->list, &block->chain0.filter_chain_list); 919 mutex_unlock(&block->lock); 920 921 mutex_unlock(&chain0->filter_chain_lock); 922 tcf_chain_put(chain0); 923 } 924 925 return 0; 926 } 927 928 static void 929 tcf_chain0_head_change_cb_del(struct tcf_block *block, 930 struct tcf_block_ext_info *ei) 931 { 932 struct tcf_filter_chain_list_item *item; 933 934 mutex_lock(&block->lock); 935 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 936 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 937 (item->chain_head_change == ei->chain_head_change && 938 item->chain_head_change_priv == ei->chain_head_change_priv)) { 939 if (block->chain0.chain) 940 tcf_chain_head_change_item(item, NULL); 941 list_del(&item->list); 942 mutex_unlock(&block->lock); 943 944 kfree(item); 945 return; 946 } 947 } 948 mutex_unlock(&block->lock); 949 WARN_ON(1); 950 } 951 952 struct tcf_net { 953 spinlock_t idr_lock; /* Protects idr */ 954 struct idr idr; 955 }; 956 957 static unsigned int tcf_net_id; 958 959 static int tcf_block_insert(struct tcf_block *block, struct net *net, 960 struct netlink_ext_ack *extack) 961 { 962 struct tcf_net *tn = net_generic(net, tcf_net_id); 963 int err; 964 965 idr_preload(GFP_KERNEL); 966 spin_lock(&tn->idr_lock); 967 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 968 GFP_NOWAIT); 969 spin_unlock(&tn->idr_lock); 970 idr_preload_end(); 971 972 return err; 973 } 974 975 static void tcf_block_remove(struct tcf_block *block, struct net *net) 976 { 977 struct tcf_net *tn = net_generic(net, tcf_net_id); 978 979 spin_lock(&tn->idr_lock); 980 idr_remove(&tn->idr, block->index); 981 spin_unlock(&tn->idr_lock); 982 } 983 984 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 985 u32 block_index, 986 struct netlink_ext_ack *extack) 987 { 988 struct tcf_block *block; 989 990 block = kzalloc(sizeof(*block), GFP_KERNEL); 991 if (!block) { 992 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 993 return ERR_PTR(-ENOMEM); 994 } 995 mutex_init(&block->lock); 996 mutex_init(&block->proto_destroy_lock); 997 init_rwsem(&block->cb_lock); 998 flow_block_init(&block->flow_block); 999 INIT_LIST_HEAD(&block->chain_list); 1000 INIT_LIST_HEAD(&block->owner_list); 1001 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 1002 1003 refcount_set(&block->refcnt, 1); 1004 block->net = net; 1005 block->index = block_index; 1006 xa_init(&block->ports); 1007 1008 /* Don't store q pointer for blocks which are shared */ 1009 if (!tcf_block_shared(block)) 1010 block->q = q; 1011 return block; 1012 } 1013 1014 struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 1015 { 1016 struct tcf_net *tn = net_generic(net, tcf_net_id); 1017 1018 return idr_find(&tn->idr, block_index); 1019 } 1020 EXPORT_SYMBOL(tcf_block_lookup); 1021 1022 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 1023 { 1024 struct tcf_block *block; 1025 1026 rcu_read_lock(); 1027 block = tcf_block_lookup(net, block_index); 1028 if (block && !refcount_inc_not_zero(&block->refcnt)) 1029 block = NULL; 1030 rcu_read_unlock(); 1031 1032 return block; 1033 } 1034 1035 static struct tcf_chain * 1036 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 1037 { 1038 mutex_lock(&block->lock); 1039 if (chain) 1040 chain = list_is_last(&chain->list, &block->chain_list) ? 1041 NULL : list_next_entry(chain, list); 1042 else 1043 chain = list_first_entry_or_null(&block->chain_list, 1044 struct tcf_chain, list); 1045 1046 /* skip all action-only chains */ 1047 while (chain && tcf_chain_held_by_acts_only(chain)) 1048 chain = list_is_last(&chain->list, &block->chain_list) ? 1049 NULL : list_next_entry(chain, list); 1050 1051 if (chain) 1052 tcf_chain_hold(chain); 1053 mutex_unlock(&block->lock); 1054 1055 return chain; 1056 } 1057 1058 /* Function to be used by all clients that want to iterate over all chains on 1059 * block. It properly obtains block->lock and takes reference to chain before 1060 * returning it. Users of this function must be tolerant to concurrent chain 1061 * insertion/deletion or ensure that no concurrent chain modification is 1062 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1063 * consistent dump because rtnl lock is released each time skb is filled with 1064 * data and sent to user-space. 1065 */ 1066 1067 struct tcf_chain * 1068 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 1069 { 1070 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 1071 1072 if (chain) 1073 tcf_chain_put(chain); 1074 1075 return chain_next; 1076 } 1077 EXPORT_SYMBOL(tcf_get_next_chain); 1078 1079 static struct tcf_proto * 1080 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1081 { 1082 u32 prio = 0; 1083 1084 ASSERT_RTNL(); 1085 mutex_lock(&chain->filter_chain_lock); 1086 1087 if (!tp) { 1088 tp = tcf_chain_dereference(chain->filter_chain, chain); 1089 } else if (tcf_proto_is_deleting(tp)) { 1090 /* 'deleting' flag is set and chain->filter_chain_lock was 1091 * unlocked, which means next pointer could be invalid. Restart 1092 * search. 1093 */ 1094 prio = tp->prio + 1; 1095 tp = tcf_chain_dereference(chain->filter_chain, chain); 1096 1097 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1098 if (!tp->deleting && tp->prio >= prio) 1099 break; 1100 } else { 1101 tp = tcf_chain_dereference(tp->next, chain); 1102 } 1103 1104 if (tp) 1105 tcf_proto_get(tp); 1106 1107 mutex_unlock(&chain->filter_chain_lock); 1108 1109 return tp; 1110 } 1111 1112 /* Function to be used by all clients that want to iterate over all tp's on 1113 * chain. Users of this function must be tolerant to concurrent tp 1114 * insertion/deletion or ensure that no concurrent chain modification is 1115 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1116 * consistent dump because rtnl lock is released each time skb is filled with 1117 * data and sent to user-space. 1118 */ 1119 1120 struct tcf_proto * 1121 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1122 { 1123 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1124 1125 if (tp) 1126 tcf_proto_put(tp, true, NULL); 1127 1128 return tp_next; 1129 } 1130 EXPORT_SYMBOL(tcf_get_next_proto); 1131 1132 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1133 { 1134 struct tcf_chain *chain; 1135 1136 /* Last reference to block. At this point chains cannot be added or 1137 * removed concurrently. 1138 */ 1139 for (chain = tcf_get_next_chain(block, NULL); 1140 chain; 1141 chain = tcf_get_next_chain(block, chain)) { 1142 tcf_chain_put_explicitly_created(chain); 1143 tcf_chain_flush(chain, rtnl_held); 1144 } 1145 } 1146 1147 /* Lookup Qdisc and increments its reference counter. 1148 * Set parent, if necessary. 1149 */ 1150 1151 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1152 u32 *parent, int ifindex, bool rtnl_held, 1153 struct netlink_ext_ack *extack) 1154 { 1155 const struct Qdisc_class_ops *cops; 1156 struct net_device *dev; 1157 int err = 0; 1158 1159 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1160 return 0; 1161 1162 rcu_read_lock(); 1163 1164 /* Find link */ 1165 dev = dev_get_by_index_rcu(net, ifindex); 1166 if (!dev) { 1167 rcu_read_unlock(); 1168 return -ENODEV; 1169 } 1170 1171 /* Find qdisc */ 1172 if (!*parent) { 1173 *q = rcu_dereference(dev->qdisc); 1174 *parent = (*q)->handle; 1175 } else { 1176 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1177 if (!*q) { 1178 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1179 err = -EINVAL; 1180 goto errout_rcu; 1181 } 1182 } 1183 1184 *q = qdisc_refcount_inc_nz(*q); 1185 if (!*q) { 1186 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1187 err = -EINVAL; 1188 goto errout_rcu; 1189 } 1190 1191 /* Is it classful? */ 1192 cops = (*q)->ops->cl_ops; 1193 if (!cops) { 1194 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1195 err = -EINVAL; 1196 goto errout_qdisc; 1197 } 1198 1199 if (!cops->tcf_block) { 1200 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1201 err = -EOPNOTSUPP; 1202 goto errout_qdisc; 1203 } 1204 1205 errout_rcu: 1206 /* At this point we know that qdisc is not noop_qdisc, 1207 * which means that qdisc holds a reference to net_device 1208 * and we hold a reference to qdisc, so it is safe to release 1209 * rcu read lock. 1210 */ 1211 rcu_read_unlock(); 1212 return err; 1213 1214 errout_qdisc: 1215 rcu_read_unlock(); 1216 1217 if (rtnl_held) 1218 qdisc_put(*q); 1219 else 1220 qdisc_put_unlocked(*q); 1221 *q = NULL; 1222 1223 return err; 1224 } 1225 1226 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1227 int ifindex, struct netlink_ext_ack *extack) 1228 { 1229 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1230 return 0; 1231 1232 /* Do we search for filter, attached to class? */ 1233 if (TC_H_MIN(parent)) { 1234 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1235 1236 *cl = cops->find(q, parent); 1237 if (*cl == 0) { 1238 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1239 return -ENOENT; 1240 } 1241 } 1242 1243 return 0; 1244 } 1245 1246 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1247 unsigned long cl, int ifindex, 1248 u32 block_index, 1249 struct netlink_ext_ack *extack) 1250 { 1251 struct tcf_block *block; 1252 1253 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1254 block = tcf_block_refcnt_get(net, block_index); 1255 if (!block) { 1256 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1257 return ERR_PTR(-EINVAL); 1258 } 1259 } else { 1260 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1261 1262 block = cops->tcf_block(q, cl, extack); 1263 if (!block) 1264 return ERR_PTR(-EINVAL); 1265 1266 if (tcf_block_shared(block)) { 1267 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1268 return ERR_PTR(-EOPNOTSUPP); 1269 } 1270 1271 /* Always take reference to block in order to support execution 1272 * of rules update path of cls API without rtnl lock. Caller 1273 * must release block when it is finished using it. 'if' block 1274 * of this conditional obtain reference to block by calling 1275 * tcf_block_refcnt_get(). 1276 */ 1277 refcount_inc(&block->refcnt); 1278 } 1279 1280 return block; 1281 } 1282 1283 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1284 struct tcf_block_ext_info *ei, bool rtnl_held) 1285 { 1286 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1287 /* Flushing/putting all chains will cause the block to be 1288 * deallocated when last chain is freed. However, if chain_list 1289 * is empty, block has to be manually deallocated. After block 1290 * reference counter reached 0, it is no longer possible to 1291 * increment it or add new chains to block. 1292 */ 1293 bool free_block = list_empty(&block->chain_list); 1294 1295 mutex_unlock(&block->lock); 1296 if (tcf_block_shared(block)) 1297 tcf_block_remove(block, block->net); 1298 1299 if (q) 1300 tcf_block_offload_unbind(block, q, ei); 1301 1302 if (free_block) 1303 tcf_block_destroy(block); 1304 else 1305 tcf_block_flush_all_chains(block, rtnl_held); 1306 } else if (q) { 1307 tcf_block_offload_unbind(block, q, ei); 1308 } 1309 } 1310 1311 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1312 { 1313 __tcf_block_put(block, NULL, NULL, rtnl_held); 1314 } 1315 1316 /* Find tcf block. 1317 * Set q, parent, cl when appropriate. 1318 */ 1319 1320 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1321 u32 *parent, unsigned long *cl, 1322 int ifindex, u32 block_index, 1323 struct netlink_ext_ack *extack) 1324 { 1325 struct tcf_block *block; 1326 int err = 0; 1327 1328 ASSERT_RTNL(); 1329 1330 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1331 if (err) 1332 goto errout; 1333 1334 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1335 if (err) 1336 goto errout_qdisc; 1337 1338 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1339 if (IS_ERR(block)) { 1340 err = PTR_ERR(block); 1341 goto errout_qdisc; 1342 } 1343 1344 return block; 1345 1346 errout_qdisc: 1347 if (*q) 1348 qdisc_put(*q); 1349 errout: 1350 *q = NULL; 1351 return ERR_PTR(err); 1352 } 1353 1354 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1355 bool rtnl_held) 1356 { 1357 if (!IS_ERR_OR_NULL(block)) 1358 tcf_block_refcnt_put(block, rtnl_held); 1359 1360 if (q) { 1361 if (rtnl_held) 1362 qdisc_put(q); 1363 else 1364 qdisc_put_unlocked(q); 1365 } 1366 } 1367 1368 struct tcf_block_owner_item { 1369 struct list_head list; 1370 struct Qdisc *q; 1371 enum flow_block_binder_type binder_type; 1372 }; 1373 1374 static void 1375 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1376 struct Qdisc *q, 1377 enum flow_block_binder_type binder_type) 1378 { 1379 if (block->keep_dst && 1380 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1381 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1382 netif_keep_dst(qdisc_dev(q)); 1383 } 1384 1385 void tcf_block_netif_keep_dst(struct tcf_block *block) 1386 { 1387 struct tcf_block_owner_item *item; 1388 1389 block->keep_dst = true; 1390 list_for_each_entry(item, &block->owner_list, list) 1391 tcf_block_owner_netif_keep_dst(block, item->q, 1392 item->binder_type); 1393 } 1394 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1395 1396 static int tcf_block_owner_add(struct tcf_block *block, 1397 struct Qdisc *q, 1398 enum flow_block_binder_type binder_type) 1399 { 1400 struct tcf_block_owner_item *item; 1401 1402 item = kmalloc(sizeof(*item), GFP_KERNEL); 1403 if (!item) 1404 return -ENOMEM; 1405 item->q = q; 1406 item->binder_type = binder_type; 1407 list_add(&item->list, &block->owner_list); 1408 return 0; 1409 } 1410 1411 static void tcf_block_owner_del(struct tcf_block *block, 1412 struct Qdisc *q, 1413 enum flow_block_binder_type binder_type) 1414 { 1415 struct tcf_block_owner_item *item; 1416 1417 list_for_each_entry(item, &block->owner_list, list) { 1418 if (item->q == q && item->binder_type == binder_type) { 1419 list_del(&item->list); 1420 kfree(item); 1421 return; 1422 } 1423 } 1424 WARN_ON(1); 1425 } 1426 1427 static bool tcf_block_tracks_dev(struct tcf_block *block, 1428 struct tcf_block_ext_info *ei) 1429 { 1430 return tcf_block_shared(block) && 1431 (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS || 1432 ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS); 1433 } 1434 1435 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1436 struct tcf_block_ext_info *ei, 1437 struct netlink_ext_ack *extack) 1438 { 1439 struct net_device *dev = qdisc_dev(q); 1440 struct net *net = qdisc_net(q); 1441 struct tcf_block *block = NULL; 1442 int err; 1443 1444 if (ei->block_index) 1445 /* block_index not 0 means the shared block is requested */ 1446 block = tcf_block_refcnt_get(net, ei->block_index); 1447 1448 if (!block) { 1449 block = tcf_block_create(net, q, ei->block_index, extack); 1450 if (IS_ERR(block)) 1451 return PTR_ERR(block); 1452 if (tcf_block_shared(block)) { 1453 err = tcf_block_insert(block, net, extack); 1454 if (err) 1455 goto err_block_insert; 1456 } 1457 } 1458 1459 err = tcf_block_owner_add(block, q, ei->binder_type); 1460 if (err) 1461 goto err_block_owner_add; 1462 1463 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1464 1465 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1466 if (err) 1467 goto err_chain0_head_change_cb_add; 1468 1469 err = tcf_block_offload_bind(block, q, ei, extack); 1470 if (err) 1471 goto err_block_offload_bind; 1472 1473 if (tcf_block_tracks_dev(block, ei)) { 1474 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL); 1475 if (err) { 1476 NL_SET_ERR_MSG(extack, "block dev insert failed"); 1477 goto err_dev_insert; 1478 } 1479 } 1480 1481 *p_block = block; 1482 return 0; 1483 1484 err_dev_insert: 1485 err_block_offload_bind: 1486 tcf_chain0_head_change_cb_del(block, ei); 1487 err_chain0_head_change_cb_add: 1488 tcf_block_owner_del(block, q, ei->binder_type); 1489 err_block_owner_add: 1490 err_block_insert: 1491 tcf_block_refcnt_put(block, true); 1492 return err; 1493 } 1494 EXPORT_SYMBOL(tcf_block_get_ext); 1495 1496 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1497 { 1498 struct tcf_proto __rcu **p_filter_chain = priv; 1499 1500 rcu_assign_pointer(*p_filter_chain, tp_head); 1501 } 1502 1503 int tcf_block_get(struct tcf_block **p_block, 1504 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1505 struct netlink_ext_ack *extack) 1506 { 1507 struct tcf_block_ext_info ei = { 1508 .chain_head_change = tcf_chain_head_change_dflt, 1509 .chain_head_change_priv = p_filter_chain, 1510 }; 1511 1512 WARN_ON(!p_filter_chain); 1513 return tcf_block_get_ext(p_block, q, &ei, extack); 1514 } 1515 EXPORT_SYMBOL(tcf_block_get); 1516 1517 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1518 * actions should be all removed after flushing. 1519 */ 1520 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1521 struct tcf_block_ext_info *ei) 1522 { 1523 struct net_device *dev = qdisc_dev(q); 1524 1525 if (!block) 1526 return; 1527 if (tcf_block_tracks_dev(block, ei)) 1528 xa_erase(&block->ports, dev->ifindex); 1529 tcf_chain0_head_change_cb_del(block, ei); 1530 tcf_block_owner_del(block, q, ei->binder_type); 1531 1532 __tcf_block_put(block, q, ei, true); 1533 } 1534 EXPORT_SYMBOL(tcf_block_put_ext); 1535 1536 void tcf_block_put(struct tcf_block *block) 1537 { 1538 struct tcf_block_ext_info ei = {0, }; 1539 1540 if (!block) 1541 return; 1542 tcf_block_put_ext(block, block->q, &ei); 1543 } 1544 1545 EXPORT_SYMBOL(tcf_block_put); 1546 1547 static int 1548 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1549 void *cb_priv, bool add, bool offload_in_use, 1550 struct netlink_ext_ack *extack) 1551 { 1552 struct tcf_chain *chain, *chain_prev; 1553 struct tcf_proto *tp, *tp_prev; 1554 int err; 1555 1556 lockdep_assert_held(&block->cb_lock); 1557 1558 for (chain = __tcf_get_next_chain(block, NULL); 1559 chain; 1560 chain_prev = chain, 1561 chain = __tcf_get_next_chain(block, chain), 1562 tcf_chain_put(chain_prev)) { 1563 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1564 tp_prev = tp, 1565 tp = __tcf_get_next_proto(chain, tp), 1566 tcf_proto_put(tp_prev, true, NULL)) { 1567 if (tp->ops->reoffload) { 1568 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1569 extack); 1570 if (err && add) 1571 goto err_playback_remove; 1572 } else if (add && offload_in_use) { 1573 err = -EOPNOTSUPP; 1574 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1575 goto err_playback_remove; 1576 } 1577 } 1578 } 1579 1580 return 0; 1581 1582 err_playback_remove: 1583 tcf_proto_put(tp, true, NULL); 1584 tcf_chain_put(chain); 1585 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1586 extack); 1587 return err; 1588 } 1589 1590 static int tcf_block_bind(struct tcf_block *block, 1591 struct flow_block_offload *bo) 1592 { 1593 struct flow_block_cb *block_cb, *next; 1594 int err, i = 0; 1595 1596 lockdep_assert_held(&block->cb_lock); 1597 1598 list_for_each_entry(block_cb, &bo->cb_list, list) { 1599 err = tcf_block_playback_offloads(block, block_cb->cb, 1600 block_cb->cb_priv, true, 1601 tcf_block_offload_in_use(block), 1602 bo->extack); 1603 if (err) 1604 goto err_unroll; 1605 if (!bo->unlocked_driver_cb) 1606 block->lockeddevcnt++; 1607 1608 i++; 1609 } 1610 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1611 1612 return 0; 1613 1614 err_unroll: 1615 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1616 list_del(&block_cb->driver_list); 1617 if (i-- > 0) { 1618 list_del(&block_cb->list); 1619 tcf_block_playback_offloads(block, block_cb->cb, 1620 block_cb->cb_priv, false, 1621 tcf_block_offload_in_use(block), 1622 NULL); 1623 if (!bo->unlocked_driver_cb) 1624 block->lockeddevcnt--; 1625 } 1626 flow_block_cb_free(block_cb); 1627 } 1628 1629 return err; 1630 } 1631 1632 static void tcf_block_unbind(struct tcf_block *block, 1633 struct flow_block_offload *bo) 1634 { 1635 struct flow_block_cb *block_cb, *next; 1636 1637 lockdep_assert_held(&block->cb_lock); 1638 1639 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1640 tcf_block_playback_offloads(block, block_cb->cb, 1641 block_cb->cb_priv, false, 1642 tcf_block_offload_in_use(block), 1643 NULL); 1644 list_del(&block_cb->list); 1645 flow_block_cb_free(block_cb); 1646 if (!bo->unlocked_driver_cb) 1647 block->lockeddevcnt--; 1648 } 1649 } 1650 1651 static int tcf_block_setup(struct tcf_block *block, 1652 struct flow_block_offload *bo) 1653 { 1654 int err; 1655 1656 switch (bo->command) { 1657 case FLOW_BLOCK_BIND: 1658 err = tcf_block_bind(block, bo); 1659 break; 1660 case FLOW_BLOCK_UNBIND: 1661 err = 0; 1662 tcf_block_unbind(block, bo); 1663 break; 1664 default: 1665 WARN_ON_ONCE(1); 1666 err = -EOPNOTSUPP; 1667 } 1668 1669 return err; 1670 } 1671 1672 /* Main classifier routine: scans classifier chain attached 1673 * to this qdisc, (optionally) tests for protocol and asks 1674 * specific classifiers. 1675 */ 1676 static inline int __tcf_classify(struct sk_buff *skb, 1677 const struct tcf_proto *tp, 1678 const struct tcf_proto *orig_tp, 1679 struct tcf_result *res, 1680 bool compat_mode, 1681 struct tcf_exts_miss_cookie_node *n, 1682 int act_index, 1683 u32 *last_executed_chain) 1684 { 1685 #ifdef CONFIG_NET_CLS_ACT 1686 const int max_reclassify_loop = 16; 1687 const struct tcf_proto *first_tp; 1688 int limit = 0; 1689 1690 reclassify: 1691 #endif 1692 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1693 __be16 protocol = skb_protocol(skb, false); 1694 int err = 0; 1695 1696 if (n) { 1697 struct tcf_exts *exts; 1698 1699 if (n->tp_prio != tp->prio) 1700 continue; 1701 1702 /* We re-lookup the tp and chain based on index instead 1703 * of having hard refs and locks to them, so do a sanity 1704 * check if any of tp,chain,exts was replaced by the 1705 * time we got here with a cookie from hardware. 1706 */ 1707 if (unlikely(n->tp != tp || n->tp->chain != n->chain || 1708 !tp->ops->get_exts)) { 1709 tcf_set_drop_reason(skb, 1710 SKB_DROP_REASON_TC_COOKIE_ERROR); 1711 return TC_ACT_SHOT; 1712 } 1713 1714 exts = tp->ops->get_exts(tp, n->handle); 1715 if (unlikely(!exts || n->exts != exts)) { 1716 tcf_set_drop_reason(skb, 1717 SKB_DROP_REASON_TC_COOKIE_ERROR); 1718 return TC_ACT_SHOT; 1719 } 1720 1721 n = NULL; 1722 err = tcf_exts_exec_ex(skb, exts, act_index, res); 1723 } else { 1724 if (tp->protocol != protocol && 1725 tp->protocol != htons(ETH_P_ALL)) 1726 continue; 1727 1728 err = tc_classify(skb, tp, res); 1729 } 1730 #ifdef CONFIG_NET_CLS_ACT 1731 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1732 first_tp = orig_tp; 1733 *last_executed_chain = first_tp->chain->index; 1734 goto reset; 1735 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1736 first_tp = res->goto_tp; 1737 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1738 goto reset; 1739 } 1740 #endif 1741 if (err >= 0) 1742 return err; 1743 } 1744 1745 if (unlikely(n)) { 1746 tcf_set_drop_reason(skb, 1747 SKB_DROP_REASON_TC_COOKIE_ERROR); 1748 return TC_ACT_SHOT; 1749 } 1750 1751 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1752 #ifdef CONFIG_NET_CLS_ACT 1753 reset: 1754 if (unlikely(limit++ >= max_reclassify_loop)) { 1755 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1756 tp->chain->block->index, 1757 tp->prio & 0xffff, 1758 ntohs(tp->protocol)); 1759 tcf_set_drop_reason(skb, 1760 SKB_DROP_REASON_TC_RECLASSIFY_LOOP); 1761 return TC_ACT_SHOT; 1762 } 1763 1764 tp = first_tp; 1765 goto reclassify; 1766 #endif 1767 } 1768 1769 int tcf_classify(struct sk_buff *skb, 1770 const struct tcf_block *block, 1771 const struct tcf_proto *tp, 1772 struct tcf_result *res, bool compat_mode) 1773 { 1774 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1775 u32 last_executed_chain = 0; 1776 1777 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0, 1778 &last_executed_chain); 1779 #else 1780 u32 last_executed_chain = tp ? tp->chain->index : 0; 1781 struct tcf_exts_miss_cookie_node *n = NULL; 1782 const struct tcf_proto *orig_tp = tp; 1783 struct tc_skb_ext *ext; 1784 int act_index = 0; 1785 int ret; 1786 1787 if (block) { 1788 ext = skb_ext_find(skb, TC_SKB_EXT); 1789 1790 if (ext && (ext->chain || ext->act_miss)) { 1791 struct tcf_chain *fchain; 1792 u32 chain; 1793 1794 if (ext->act_miss) { 1795 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie, 1796 &act_index); 1797 if (!n) { 1798 tcf_set_drop_reason(skb, 1799 SKB_DROP_REASON_TC_COOKIE_ERROR); 1800 return TC_ACT_SHOT; 1801 } 1802 1803 chain = n->chain_index; 1804 } else { 1805 chain = ext->chain; 1806 } 1807 1808 fchain = tcf_chain_lookup_rcu(block, chain); 1809 if (!fchain) { 1810 tcf_set_drop_reason(skb, 1811 SKB_DROP_REASON_TC_CHAIN_NOTFOUND); 1812 1813 return TC_ACT_SHOT; 1814 } 1815 1816 /* Consume, so cloned/redirect skbs won't inherit ext */ 1817 skb_ext_del(skb, TC_SKB_EXT); 1818 1819 tp = rcu_dereference_bh(fchain->filter_chain); 1820 last_executed_chain = fchain->index; 1821 } 1822 } 1823 1824 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index, 1825 &last_executed_chain); 1826 1827 if (tc_skb_ext_tc_enabled()) { 1828 /* If we missed on some chain */ 1829 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1830 struct tc_skb_cb *cb = tc_skb_cb(skb); 1831 1832 ext = tc_skb_ext_alloc(skb); 1833 if (WARN_ON_ONCE(!ext)) { 1834 tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM); 1835 return TC_ACT_SHOT; 1836 } 1837 ext->chain = last_executed_chain; 1838 ext->mru = cb->mru; 1839 ext->post_ct = cb->post_ct; 1840 ext->post_ct_snat = cb->post_ct_snat; 1841 ext->post_ct_dnat = cb->post_ct_dnat; 1842 ext->zone = cb->zone; 1843 } 1844 } 1845 1846 return ret; 1847 #endif 1848 } 1849 EXPORT_SYMBOL(tcf_classify); 1850 1851 struct tcf_chain_info { 1852 struct tcf_proto __rcu **pprev; 1853 struct tcf_proto __rcu *next; 1854 }; 1855 1856 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1857 struct tcf_chain_info *chain_info) 1858 { 1859 return tcf_chain_dereference(*chain_info->pprev, chain); 1860 } 1861 1862 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1863 struct tcf_chain_info *chain_info, 1864 struct tcf_proto *tp) 1865 { 1866 if (chain->flushing) 1867 return -EAGAIN; 1868 1869 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1870 if (*chain_info->pprev == chain->filter_chain) 1871 tcf_chain0_head_change(chain, tp); 1872 tcf_proto_get(tp); 1873 rcu_assign_pointer(*chain_info->pprev, tp); 1874 1875 return 0; 1876 } 1877 1878 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1879 struct tcf_chain_info *chain_info, 1880 struct tcf_proto *tp) 1881 { 1882 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1883 1884 tcf_proto_mark_delete(tp); 1885 if (tp == chain->filter_chain) 1886 tcf_chain0_head_change(chain, next); 1887 RCU_INIT_POINTER(*chain_info->pprev, next); 1888 } 1889 1890 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1891 struct tcf_chain_info *chain_info, 1892 u32 protocol, u32 prio, 1893 bool prio_allocate); 1894 1895 /* Try to insert new proto. 1896 * If proto with specified priority already exists, free new proto 1897 * and return existing one. 1898 */ 1899 1900 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1901 struct tcf_proto *tp_new, 1902 u32 protocol, u32 prio, 1903 bool rtnl_held) 1904 { 1905 struct tcf_chain_info chain_info; 1906 struct tcf_proto *tp; 1907 int err = 0; 1908 1909 mutex_lock(&chain->filter_chain_lock); 1910 1911 if (tcf_proto_exists_destroying(chain, tp_new)) { 1912 mutex_unlock(&chain->filter_chain_lock); 1913 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1914 return ERR_PTR(-EAGAIN); 1915 } 1916 1917 tp = tcf_chain_tp_find(chain, &chain_info, 1918 protocol, prio, false); 1919 if (!tp) 1920 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1921 mutex_unlock(&chain->filter_chain_lock); 1922 1923 if (tp) { 1924 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1925 tp_new = tp; 1926 } else if (err) { 1927 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1928 tp_new = ERR_PTR(err); 1929 } 1930 1931 return tp_new; 1932 } 1933 1934 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1935 struct tcf_proto *tp, bool rtnl_held, 1936 struct netlink_ext_ack *extack) 1937 { 1938 struct tcf_chain_info chain_info; 1939 struct tcf_proto *tp_iter; 1940 struct tcf_proto **pprev; 1941 struct tcf_proto *next; 1942 1943 mutex_lock(&chain->filter_chain_lock); 1944 1945 /* Atomically find and remove tp from chain. */ 1946 for (pprev = &chain->filter_chain; 1947 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1948 pprev = &tp_iter->next) { 1949 if (tp_iter == tp) { 1950 chain_info.pprev = pprev; 1951 chain_info.next = tp_iter->next; 1952 WARN_ON(tp_iter->deleting); 1953 break; 1954 } 1955 } 1956 /* Verify that tp still exists and no new filters were inserted 1957 * concurrently. 1958 * Mark tp for deletion if it is empty. 1959 */ 1960 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1961 mutex_unlock(&chain->filter_chain_lock); 1962 return; 1963 } 1964 1965 tcf_proto_signal_destroying(chain, tp); 1966 next = tcf_chain_dereference(chain_info.next, chain); 1967 if (tp == chain->filter_chain) 1968 tcf_chain0_head_change(chain, next); 1969 RCU_INIT_POINTER(*chain_info.pprev, next); 1970 mutex_unlock(&chain->filter_chain_lock); 1971 1972 tcf_proto_put(tp, rtnl_held, extack); 1973 } 1974 1975 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1976 struct tcf_chain_info *chain_info, 1977 u32 protocol, u32 prio, 1978 bool prio_allocate) 1979 { 1980 struct tcf_proto **pprev; 1981 struct tcf_proto *tp; 1982 1983 /* Check the chain for existence of proto-tcf with this priority */ 1984 for (pprev = &chain->filter_chain; 1985 (tp = tcf_chain_dereference(*pprev, chain)); 1986 pprev = &tp->next) { 1987 if (tp->prio >= prio) { 1988 if (tp->prio == prio) { 1989 if (prio_allocate || 1990 (tp->protocol != protocol && protocol)) 1991 return ERR_PTR(-EINVAL); 1992 } else { 1993 tp = NULL; 1994 } 1995 break; 1996 } 1997 } 1998 chain_info->pprev = pprev; 1999 if (tp) { 2000 chain_info->next = tp->next; 2001 tcf_proto_get(tp); 2002 } else { 2003 chain_info->next = NULL; 2004 } 2005 return tp; 2006 } 2007 2008 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 2009 struct tcf_proto *tp, struct tcf_block *block, 2010 struct Qdisc *q, u32 parent, void *fh, 2011 u32 portid, u32 seq, u16 flags, int event, 2012 bool terse_dump, bool rtnl_held, 2013 struct netlink_ext_ack *extack) 2014 { 2015 struct tcmsg *tcm; 2016 struct nlmsghdr *nlh; 2017 unsigned char *b = skb_tail_pointer(skb); 2018 2019 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2020 if (!nlh) 2021 goto out_nlmsg_trim; 2022 tcm = nlmsg_data(nlh); 2023 tcm->tcm_family = AF_UNSPEC; 2024 tcm->tcm__pad1 = 0; 2025 tcm->tcm__pad2 = 0; 2026 if (q) { 2027 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 2028 tcm->tcm_parent = parent; 2029 } else { 2030 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2031 tcm->tcm_block_index = block->index; 2032 } 2033 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 2034 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 2035 goto nla_put_failure; 2036 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 2037 goto nla_put_failure; 2038 if (!fh) { 2039 tcm->tcm_handle = 0; 2040 } else if (terse_dump) { 2041 if (tp->ops->terse_dump) { 2042 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 2043 rtnl_held) < 0) 2044 goto nla_put_failure; 2045 } else { 2046 goto cls_op_not_supp; 2047 } 2048 } else { 2049 if (tp->ops->dump && 2050 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 2051 goto nla_put_failure; 2052 } 2053 2054 if (extack && extack->_msg && 2055 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 2056 goto nla_put_failure; 2057 2058 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2059 2060 return skb->len; 2061 2062 out_nlmsg_trim: 2063 nla_put_failure: 2064 cls_op_not_supp: 2065 nlmsg_trim(skb, b); 2066 return -1; 2067 } 2068 2069 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 2070 struct nlmsghdr *n, struct tcf_proto *tp, 2071 struct tcf_block *block, struct Qdisc *q, 2072 u32 parent, void *fh, int event, bool unicast, 2073 bool rtnl_held, struct netlink_ext_ack *extack) 2074 { 2075 struct sk_buff *skb; 2076 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2077 int err = 0; 2078 2079 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) 2080 return 0; 2081 2082 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2083 if (!skb) 2084 return -ENOBUFS; 2085 2086 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 2087 n->nlmsg_seq, n->nlmsg_flags, event, 2088 false, rtnl_held, extack) <= 0) { 2089 kfree_skb(skb); 2090 return -EINVAL; 2091 } 2092 2093 if (unicast) 2094 err = rtnl_unicast(skb, net, portid); 2095 else 2096 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2097 n->nlmsg_flags & NLM_F_ECHO); 2098 return err; 2099 } 2100 2101 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 2102 struct nlmsghdr *n, struct tcf_proto *tp, 2103 struct tcf_block *block, struct Qdisc *q, 2104 u32 parent, void *fh, bool *last, bool rtnl_held, 2105 struct netlink_ext_ack *extack) 2106 { 2107 struct sk_buff *skb; 2108 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2109 int err; 2110 2111 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) 2112 return tp->ops->delete(tp, fh, last, rtnl_held, extack); 2113 2114 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2115 if (!skb) 2116 return -ENOBUFS; 2117 2118 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 2119 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 2120 false, rtnl_held, extack) <= 0) { 2121 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 2122 kfree_skb(skb); 2123 return -EINVAL; 2124 } 2125 2126 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 2127 if (err) { 2128 kfree_skb(skb); 2129 return err; 2130 } 2131 2132 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2133 n->nlmsg_flags & NLM_F_ECHO); 2134 if (err < 0) 2135 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 2136 2137 return err; 2138 } 2139 2140 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 2141 struct tcf_block *block, struct Qdisc *q, 2142 u32 parent, struct nlmsghdr *n, 2143 struct tcf_chain *chain, int event, 2144 struct netlink_ext_ack *extack) 2145 { 2146 struct tcf_proto *tp; 2147 2148 for (tp = tcf_get_next_proto(chain, NULL); 2149 tp; tp = tcf_get_next_proto(chain, tp)) 2150 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, 2151 event, false, true, extack); 2152 } 2153 2154 static void tfilter_put(struct tcf_proto *tp, void *fh) 2155 { 2156 if (tp->ops->put && fh) 2157 tp->ops->put(tp, fh); 2158 } 2159 2160 static bool is_qdisc_ingress(__u32 classid) 2161 { 2162 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS)); 2163 } 2164 2165 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2166 struct netlink_ext_ack *extack) 2167 { 2168 struct net *net = sock_net(skb->sk); 2169 struct nlattr *tca[TCA_MAX + 1]; 2170 char name[IFNAMSIZ]; 2171 struct tcmsg *t; 2172 u32 protocol; 2173 u32 prio; 2174 bool prio_allocate; 2175 u32 parent; 2176 u32 chain_index; 2177 struct Qdisc *q; 2178 struct tcf_chain_info chain_info; 2179 struct tcf_chain *chain; 2180 struct tcf_block *block; 2181 struct tcf_proto *tp; 2182 unsigned long cl; 2183 void *fh; 2184 int err; 2185 int tp_created; 2186 bool rtnl_held = false; 2187 u32 flags; 2188 2189 replay: 2190 tp_created = 0; 2191 2192 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2193 rtm_tca_policy, extack); 2194 if (err < 0) 2195 return err; 2196 2197 t = nlmsg_data(n); 2198 protocol = TC_H_MIN(t->tcm_info); 2199 prio = TC_H_MAJ(t->tcm_info); 2200 prio_allocate = false; 2201 parent = t->tcm_parent; 2202 tp = NULL; 2203 cl = 0; 2204 block = NULL; 2205 q = NULL; 2206 chain = NULL; 2207 flags = 0; 2208 2209 if (prio == 0) { 2210 /* If no priority is provided by the user, 2211 * we allocate one. 2212 */ 2213 if (n->nlmsg_flags & NLM_F_CREATE) { 2214 prio = TC_H_MAKE(0x80000000U, 0U); 2215 prio_allocate = true; 2216 } else { 2217 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2218 return -ENOENT; 2219 } 2220 } 2221 2222 /* Find head of filter chain. */ 2223 2224 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2225 if (err) 2226 return err; 2227 2228 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2229 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2230 err = -EINVAL; 2231 goto errout; 2232 } 2233 2234 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2235 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2236 * type is not specified, classifier is not unlocked. 2237 */ 2238 if (rtnl_held || 2239 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2240 !tcf_proto_is_unlocked(name)) { 2241 rtnl_held = true; 2242 rtnl_lock(); 2243 } 2244 2245 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2246 if (err) 2247 goto errout; 2248 2249 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2250 extack); 2251 if (IS_ERR(block)) { 2252 err = PTR_ERR(block); 2253 goto errout; 2254 } 2255 block->classid = parent; 2256 2257 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2258 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2259 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2260 err = -EINVAL; 2261 goto errout; 2262 } 2263 chain = tcf_chain_get(block, chain_index, true); 2264 if (!chain) { 2265 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2266 err = -ENOMEM; 2267 goto errout; 2268 } 2269 2270 mutex_lock(&chain->filter_chain_lock); 2271 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2272 prio, prio_allocate); 2273 if (IS_ERR(tp)) { 2274 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2275 err = PTR_ERR(tp); 2276 goto errout_locked; 2277 } 2278 2279 if (tp == NULL) { 2280 struct tcf_proto *tp_new = NULL; 2281 2282 if (chain->flushing) { 2283 err = -EAGAIN; 2284 goto errout_locked; 2285 } 2286 2287 /* Proto-tcf does not exist, create new one */ 2288 2289 if (tca[TCA_KIND] == NULL || !protocol) { 2290 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2291 err = -EINVAL; 2292 goto errout_locked; 2293 } 2294 2295 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2296 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2297 err = -ENOENT; 2298 goto errout_locked; 2299 } 2300 2301 if (prio_allocate) 2302 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2303 &chain_info)); 2304 2305 mutex_unlock(&chain->filter_chain_lock); 2306 tp_new = tcf_proto_create(name, protocol, prio, chain, 2307 rtnl_held, extack); 2308 if (IS_ERR(tp_new)) { 2309 err = PTR_ERR(tp_new); 2310 goto errout_tp; 2311 } 2312 2313 tp_created = 1; 2314 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2315 rtnl_held); 2316 if (IS_ERR(tp)) { 2317 err = PTR_ERR(tp); 2318 goto errout_tp; 2319 } 2320 } else { 2321 mutex_unlock(&chain->filter_chain_lock); 2322 } 2323 2324 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2325 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2326 err = -EINVAL; 2327 goto errout; 2328 } 2329 2330 fh = tp->ops->get(tp, t->tcm_handle); 2331 2332 if (!fh) { 2333 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2334 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2335 err = -ENOENT; 2336 goto errout; 2337 } 2338 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2339 tfilter_put(tp, fh); 2340 NL_SET_ERR_MSG(extack, "Filter already exists"); 2341 err = -EEXIST; 2342 goto errout; 2343 } 2344 2345 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2346 tfilter_put(tp, fh); 2347 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2348 err = -EINVAL; 2349 goto errout; 2350 } 2351 2352 if (!(n->nlmsg_flags & NLM_F_CREATE)) 2353 flags |= TCA_ACT_FLAGS_REPLACE; 2354 if (!rtnl_held) 2355 flags |= TCA_ACT_FLAGS_NO_RTNL; 2356 if (is_qdisc_ingress(parent)) 2357 flags |= TCA_ACT_FLAGS_AT_INGRESS; 2358 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2359 flags, extack); 2360 if (err == 0) { 2361 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2362 RTM_NEWTFILTER, false, rtnl_held, extack); 2363 tfilter_put(tp, fh); 2364 /* q pointer is NULL for shared blocks */ 2365 if (q) 2366 q->flags &= ~TCQ_F_CAN_BYPASS; 2367 } 2368 2369 errout: 2370 if (err && tp_created) 2371 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2372 errout_tp: 2373 if (chain) { 2374 if (tp && !IS_ERR(tp)) 2375 tcf_proto_put(tp, rtnl_held, NULL); 2376 if (!tp_created) 2377 tcf_chain_put(chain); 2378 } 2379 tcf_block_release(q, block, rtnl_held); 2380 2381 if (rtnl_held) 2382 rtnl_unlock(); 2383 2384 if (err == -EAGAIN) { 2385 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2386 * of target chain. 2387 */ 2388 rtnl_held = true; 2389 /* Replay the request. */ 2390 goto replay; 2391 } 2392 return err; 2393 2394 errout_locked: 2395 mutex_unlock(&chain->filter_chain_lock); 2396 goto errout; 2397 } 2398 2399 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2400 struct netlink_ext_ack *extack) 2401 { 2402 struct net *net = sock_net(skb->sk); 2403 struct nlattr *tca[TCA_MAX + 1]; 2404 char name[IFNAMSIZ]; 2405 struct tcmsg *t; 2406 u32 protocol; 2407 u32 prio; 2408 u32 parent; 2409 u32 chain_index; 2410 struct Qdisc *q = NULL; 2411 struct tcf_chain_info chain_info; 2412 struct tcf_chain *chain = NULL; 2413 struct tcf_block *block = NULL; 2414 struct tcf_proto *tp = NULL; 2415 unsigned long cl = 0; 2416 void *fh = NULL; 2417 int err; 2418 bool rtnl_held = false; 2419 2420 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2421 rtm_tca_policy, extack); 2422 if (err < 0) 2423 return err; 2424 2425 t = nlmsg_data(n); 2426 protocol = TC_H_MIN(t->tcm_info); 2427 prio = TC_H_MAJ(t->tcm_info); 2428 parent = t->tcm_parent; 2429 2430 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2431 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2432 return -ENOENT; 2433 } 2434 2435 /* Find head of filter chain. */ 2436 2437 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2438 if (err) 2439 return err; 2440 2441 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2442 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2443 err = -EINVAL; 2444 goto errout; 2445 } 2446 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2447 * found), qdisc is not unlocked, classifier type is not specified, 2448 * classifier is not unlocked. 2449 */ 2450 if (!prio || 2451 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2452 !tcf_proto_is_unlocked(name)) { 2453 rtnl_held = true; 2454 rtnl_lock(); 2455 } 2456 2457 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2458 if (err) 2459 goto errout; 2460 2461 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2462 extack); 2463 if (IS_ERR(block)) { 2464 err = PTR_ERR(block); 2465 goto errout; 2466 } 2467 2468 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2469 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2470 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2471 err = -EINVAL; 2472 goto errout; 2473 } 2474 chain = tcf_chain_get(block, chain_index, false); 2475 if (!chain) { 2476 /* User requested flush on non-existent chain. Nothing to do, 2477 * so just return success. 2478 */ 2479 if (prio == 0) { 2480 err = 0; 2481 goto errout; 2482 } 2483 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2484 err = -ENOENT; 2485 goto errout; 2486 } 2487 2488 if (prio == 0) { 2489 tfilter_notify_chain(net, skb, block, q, parent, n, 2490 chain, RTM_DELTFILTER, extack); 2491 tcf_chain_flush(chain, rtnl_held); 2492 err = 0; 2493 goto errout; 2494 } 2495 2496 mutex_lock(&chain->filter_chain_lock); 2497 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2498 prio, false); 2499 if (!tp || IS_ERR(tp)) { 2500 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2501 err = tp ? PTR_ERR(tp) : -ENOENT; 2502 goto errout_locked; 2503 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2504 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2505 err = -EINVAL; 2506 goto errout_locked; 2507 } else if (t->tcm_handle == 0) { 2508 tcf_proto_signal_destroying(chain, tp); 2509 tcf_chain_tp_remove(chain, &chain_info, tp); 2510 mutex_unlock(&chain->filter_chain_lock); 2511 2512 tcf_proto_put(tp, rtnl_held, NULL); 2513 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2514 RTM_DELTFILTER, false, rtnl_held, extack); 2515 err = 0; 2516 goto errout; 2517 } 2518 mutex_unlock(&chain->filter_chain_lock); 2519 2520 fh = tp->ops->get(tp, t->tcm_handle); 2521 2522 if (!fh) { 2523 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2524 err = -ENOENT; 2525 } else { 2526 bool last; 2527 2528 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh, 2529 &last, rtnl_held, extack); 2530 2531 if (err) 2532 goto errout; 2533 if (last) 2534 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2535 } 2536 2537 errout: 2538 if (chain) { 2539 if (tp && !IS_ERR(tp)) 2540 tcf_proto_put(tp, rtnl_held, NULL); 2541 tcf_chain_put(chain); 2542 } 2543 tcf_block_release(q, block, rtnl_held); 2544 2545 if (rtnl_held) 2546 rtnl_unlock(); 2547 2548 return err; 2549 2550 errout_locked: 2551 mutex_unlock(&chain->filter_chain_lock); 2552 goto errout; 2553 } 2554 2555 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2556 struct netlink_ext_ack *extack) 2557 { 2558 struct net *net = sock_net(skb->sk); 2559 struct nlattr *tca[TCA_MAX + 1]; 2560 char name[IFNAMSIZ]; 2561 struct tcmsg *t; 2562 u32 protocol; 2563 u32 prio; 2564 u32 parent; 2565 u32 chain_index; 2566 struct Qdisc *q = NULL; 2567 struct tcf_chain_info chain_info; 2568 struct tcf_chain *chain = NULL; 2569 struct tcf_block *block = NULL; 2570 struct tcf_proto *tp = NULL; 2571 unsigned long cl = 0; 2572 void *fh = NULL; 2573 int err; 2574 bool rtnl_held = false; 2575 2576 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2577 rtm_tca_policy, extack); 2578 if (err < 0) 2579 return err; 2580 2581 t = nlmsg_data(n); 2582 protocol = TC_H_MIN(t->tcm_info); 2583 prio = TC_H_MAJ(t->tcm_info); 2584 parent = t->tcm_parent; 2585 2586 if (prio == 0) { 2587 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2588 return -ENOENT; 2589 } 2590 2591 /* Find head of filter chain. */ 2592 2593 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2594 if (err) 2595 return err; 2596 2597 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2598 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2599 err = -EINVAL; 2600 goto errout; 2601 } 2602 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2603 * unlocked, classifier type is not specified, classifier is not 2604 * unlocked. 2605 */ 2606 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2607 !tcf_proto_is_unlocked(name)) { 2608 rtnl_held = true; 2609 rtnl_lock(); 2610 } 2611 2612 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2613 if (err) 2614 goto errout; 2615 2616 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2617 extack); 2618 if (IS_ERR(block)) { 2619 err = PTR_ERR(block); 2620 goto errout; 2621 } 2622 2623 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2624 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2625 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2626 err = -EINVAL; 2627 goto errout; 2628 } 2629 chain = tcf_chain_get(block, chain_index, false); 2630 if (!chain) { 2631 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2632 err = -EINVAL; 2633 goto errout; 2634 } 2635 2636 mutex_lock(&chain->filter_chain_lock); 2637 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2638 prio, false); 2639 mutex_unlock(&chain->filter_chain_lock); 2640 if (!tp || IS_ERR(tp)) { 2641 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2642 err = tp ? PTR_ERR(tp) : -ENOENT; 2643 goto errout; 2644 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2645 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2646 err = -EINVAL; 2647 goto errout; 2648 } 2649 2650 fh = tp->ops->get(tp, t->tcm_handle); 2651 2652 if (!fh) { 2653 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2654 err = -ENOENT; 2655 } else { 2656 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2657 fh, RTM_NEWTFILTER, true, rtnl_held, NULL); 2658 if (err < 0) 2659 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2660 } 2661 2662 tfilter_put(tp, fh); 2663 errout: 2664 if (chain) { 2665 if (tp && !IS_ERR(tp)) 2666 tcf_proto_put(tp, rtnl_held, NULL); 2667 tcf_chain_put(chain); 2668 } 2669 tcf_block_release(q, block, rtnl_held); 2670 2671 if (rtnl_held) 2672 rtnl_unlock(); 2673 2674 return err; 2675 } 2676 2677 struct tcf_dump_args { 2678 struct tcf_walker w; 2679 struct sk_buff *skb; 2680 struct netlink_callback *cb; 2681 struct tcf_block *block; 2682 struct Qdisc *q; 2683 u32 parent; 2684 bool terse_dump; 2685 }; 2686 2687 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2688 { 2689 struct tcf_dump_args *a = (void *)arg; 2690 struct net *net = sock_net(a->skb->sk); 2691 2692 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2693 n, NETLINK_CB(a->cb->skb).portid, 2694 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2695 RTM_NEWTFILTER, a->terse_dump, true, NULL); 2696 } 2697 2698 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2699 struct sk_buff *skb, struct netlink_callback *cb, 2700 long index_start, long *p_index, bool terse) 2701 { 2702 struct net *net = sock_net(skb->sk); 2703 struct tcf_block *block = chain->block; 2704 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2705 struct tcf_proto *tp, *tp_prev; 2706 struct tcf_dump_args arg; 2707 2708 for (tp = __tcf_get_next_proto(chain, NULL); 2709 tp; 2710 tp_prev = tp, 2711 tp = __tcf_get_next_proto(chain, tp), 2712 tcf_proto_put(tp_prev, true, NULL), 2713 (*p_index)++) { 2714 if (*p_index < index_start) 2715 continue; 2716 if (TC_H_MAJ(tcm->tcm_info) && 2717 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2718 continue; 2719 if (TC_H_MIN(tcm->tcm_info) && 2720 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2721 continue; 2722 if (*p_index > index_start) 2723 memset(&cb->args[1], 0, 2724 sizeof(cb->args) - sizeof(cb->args[0])); 2725 if (cb->args[1] == 0) { 2726 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2727 NETLINK_CB(cb->skb).portid, 2728 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2729 RTM_NEWTFILTER, false, true, NULL) <= 0) 2730 goto errout; 2731 cb->args[1] = 1; 2732 } 2733 if (!tp->ops->walk) 2734 continue; 2735 arg.w.fn = tcf_node_dump; 2736 arg.skb = skb; 2737 arg.cb = cb; 2738 arg.block = block; 2739 arg.q = q; 2740 arg.parent = parent; 2741 arg.w.stop = 0; 2742 arg.w.skip = cb->args[1] - 1; 2743 arg.w.count = 0; 2744 arg.w.cookie = cb->args[2]; 2745 arg.terse_dump = terse; 2746 tp->ops->walk(tp, &arg.w, true); 2747 cb->args[2] = arg.w.cookie; 2748 cb->args[1] = arg.w.count + 1; 2749 if (arg.w.stop) 2750 goto errout; 2751 } 2752 return true; 2753 2754 errout: 2755 tcf_proto_put(tp, true, NULL); 2756 return false; 2757 } 2758 2759 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2760 [TCA_CHAIN] = { .type = NLA_U32 }, 2761 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2762 }; 2763 2764 /* called with RTNL */ 2765 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2766 { 2767 struct tcf_chain *chain, *chain_prev; 2768 struct net *net = sock_net(skb->sk); 2769 struct nlattr *tca[TCA_MAX + 1]; 2770 struct Qdisc *q = NULL; 2771 struct tcf_block *block; 2772 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2773 bool terse_dump = false; 2774 long index_start; 2775 long index; 2776 u32 parent; 2777 int err; 2778 2779 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2780 return skb->len; 2781 2782 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2783 tcf_tfilter_dump_policy, cb->extack); 2784 if (err) 2785 return err; 2786 2787 if (tca[TCA_DUMP_FLAGS]) { 2788 struct nla_bitfield32 flags = 2789 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2790 2791 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2792 } 2793 2794 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2795 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2796 if (!block) 2797 goto out; 2798 /* If we work with block index, q is NULL and parent value 2799 * will never be used in the following code. The check 2800 * in tcf_fill_node prevents it. However, compiler does not 2801 * see that far, so set parent to zero to silence the warning 2802 * about parent being uninitialized. 2803 */ 2804 parent = 0; 2805 } else { 2806 const struct Qdisc_class_ops *cops; 2807 struct net_device *dev; 2808 unsigned long cl = 0; 2809 2810 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2811 if (!dev) 2812 return skb->len; 2813 2814 parent = tcm->tcm_parent; 2815 if (!parent) 2816 q = rtnl_dereference(dev->qdisc); 2817 else 2818 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2819 if (!q) 2820 goto out; 2821 cops = q->ops->cl_ops; 2822 if (!cops) 2823 goto out; 2824 if (!cops->tcf_block) 2825 goto out; 2826 if (TC_H_MIN(tcm->tcm_parent)) { 2827 cl = cops->find(q, tcm->tcm_parent); 2828 if (cl == 0) 2829 goto out; 2830 } 2831 block = cops->tcf_block(q, cl, NULL); 2832 if (!block) 2833 goto out; 2834 parent = block->classid; 2835 if (tcf_block_shared(block)) 2836 q = NULL; 2837 } 2838 2839 index_start = cb->args[0]; 2840 index = 0; 2841 2842 for (chain = __tcf_get_next_chain(block, NULL); 2843 chain; 2844 chain_prev = chain, 2845 chain = __tcf_get_next_chain(block, chain), 2846 tcf_chain_put(chain_prev)) { 2847 if (tca[TCA_CHAIN] && 2848 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2849 continue; 2850 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2851 index_start, &index, terse_dump)) { 2852 tcf_chain_put(chain); 2853 err = -EMSGSIZE; 2854 break; 2855 } 2856 } 2857 2858 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2859 tcf_block_refcnt_put(block, true); 2860 cb->args[0] = index; 2861 2862 out: 2863 /* If we did no progress, the error (EMSGSIZE) is real */ 2864 if (skb->len == 0 && err) 2865 return err; 2866 return skb->len; 2867 } 2868 2869 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2870 void *tmplt_priv, u32 chain_index, 2871 struct net *net, struct sk_buff *skb, 2872 struct tcf_block *block, 2873 u32 portid, u32 seq, u16 flags, int event, 2874 struct netlink_ext_ack *extack) 2875 { 2876 unsigned char *b = skb_tail_pointer(skb); 2877 const struct tcf_proto_ops *ops; 2878 struct nlmsghdr *nlh; 2879 struct tcmsg *tcm; 2880 void *priv; 2881 2882 ops = tmplt_ops; 2883 priv = tmplt_priv; 2884 2885 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2886 if (!nlh) 2887 goto out_nlmsg_trim; 2888 tcm = nlmsg_data(nlh); 2889 tcm->tcm_family = AF_UNSPEC; 2890 tcm->tcm__pad1 = 0; 2891 tcm->tcm__pad2 = 0; 2892 tcm->tcm_handle = 0; 2893 if (block->q) { 2894 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2895 tcm->tcm_parent = block->q->handle; 2896 } else { 2897 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2898 tcm->tcm_block_index = block->index; 2899 } 2900 2901 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2902 goto nla_put_failure; 2903 2904 if (ops) { 2905 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2906 goto nla_put_failure; 2907 if (ops->tmplt_dump(skb, net, priv) < 0) 2908 goto nla_put_failure; 2909 } 2910 2911 if (extack && extack->_msg && 2912 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 2913 goto out_nlmsg_trim; 2914 2915 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2916 2917 return skb->len; 2918 2919 out_nlmsg_trim: 2920 nla_put_failure: 2921 nlmsg_trim(skb, b); 2922 return -EMSGSIZE; 2923 } 2924 2925 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2926 u32 seq, u16 flags, int event, bool unicast, 2927 struct netlink_ext_ack *extack) 2928 { 2929 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2930 struct tcf_block *block = chain->block; 2931 struct net *net = block->net; 2932 struct sk_buff *skb; 2933 int err = 0; 2934 2935 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC)) 2936 return 0; 2937 2938 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2939 if (!skb) 2940 return -ENOBUFS; 2941 2942 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2943 chain->index, net, skb, block, portid, 2944 seq, flags, event, extack) <= 0) { 2945 kfree_skb(skb); 2946 return -EINVAL; 2947 } 2948 2949 if (unicast) 2950 err = rtnl_unicast(skb, net, portid); 2951 else 2952 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2953 flags & NLM_F_ECHO); 2954 2955 return err; 2956 } 2957 2958 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2959 void *tmplt_priv, u32 chain_index, 2960 struct tcf_block *block, struct sk_buff *oskb, 2961 u32 seq, u16 flags) 2962 { 2963 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2964 struct net *net = block->net; 2965 struct sk_buff *skb; 2966 2967 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC)) 2968 return 0; 2969 2970 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2971 if (!skb) 2972 return -ENOBUFS; 2973 2974 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2975 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { 2976 kfree_skb(skb); 2977 return -EINVAL; 2978 } 2979 2980 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2981 } 2982 2983 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2984 struct nlattr **tca, 2985 struct netlink_ext_ack *extack) 2986 { 2987 const struct tcf_proto_ops *ops; 2988 char name[IFNAMSIZ]; 2989 void *tmplt_priv; 2990 2991 /* If kind is not set, user did not specify template. */ 2992 if (!tca[TCA_KIND]) 2993 return 0; 2994 2995 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2996 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2997 return -EINVAL; 2998 } 2999 3000 ops = tcf_proto_lookup_ops(name, true, extack); 3001 if (IS_ERR(ops)) 3002 return PTR_ERR(ops); 3003 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 3004 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 3005 module_put(ops->owner); 3006 return -EOPNOTSUPP; 3007 } 3008 3009 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 3010 if (IS_ERR(tmplt_priv)) { 3011 module_put(ops->owner); 3012 return PTR_ERR(tmplt_priv); 3013 } 3014 chain->tmplt_ops = ops; 3015 chain->tmplt_priv = tmplt_priv; 3016 return 0; 3017 } 3018 3019 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 3020 void *tmplt_priv) 3021 { 3022 /* If template ops are set, no work to do for us. */ 3023 if (!tmplt_ops) 3024 return; 3025 3026 tmplt_ops->tmplt_destroy(tmplt_priv); 3027 module_put(tmplt_ops->owner); 3028 } 3029 3030 /* Add/delete/get a chain */ 3031 3032 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 3033 struct netlink_ext_ack *extack) 3034 { 3035 struct net *net = sock_net(skb->sk); 3036 struct nlattr *tca[TCA_MAX + 1]; 3037 struct tcmsg *t; 3038 u32 parent; 3039 u32 chain_index; 3040 struct Qdisc *q; 3041 struct tcf_chain *chain; 3042 struct tcf_block *block; 3043 unsigned long cl; 3044 int err; 3045 3046 replay: 3047 q = NULL; 3048 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 3049 rtm_tca_policy, extack); 3050 if (err < 0) 3051 return err; 3052 3053 t = nlmsg_data(n); 3054 parent = t->tcm_parent; 3055 cl = 0; 3056 3057 block = tcf_block_find(net, &q, &parent, &cl, 3058 t->tcm_ifindex, t->tcm_block_index, extack); 3059 if (IS_ERR(block)) 3060 return PTR_ERR(block); 3061 3062 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 3063 if (chain_index > TC_ACT_EXT_VAL_MASK) { 3064 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 3065 err = -EINVAL; 3066 goto errout_block; 3067 } 3068 3069 mutex_lock(&block->lock); 3070 chain = tcf_chain_lookup(block, chain_index); 3071 if (n->nlmsg_type == RTM_NEWCHAIN) { 3072 if (chain) { 3073 if (tcf_chain_held_by_acts_only(chain)) { 3074 /* The chain exists only because there is 3075 * some action referencing it. 3076 */ 3077 tcf_chain_hold(chain); 3078 } else { 3079 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 3080 err = -EEXIST; 3081 goto errout_block_locked; 3082 } 3083 } else { 3084 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 3085 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 3086 err = -ENOENT; 3087 goto errout_block_locked; 3088 } 3089 chain = tcf_chain_create(block, chain_index); 3090 if (!chain) { 3091 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 3092 err = -ENOMEM; 3093 goto errout_block_locked; 3094 } 3095 } 3096 } else { 3097 if (!chain || tcf_chain_held_by_acts_only(chain)) { 3098 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 3099 err = -EINVAL; 3100 goto errout_block_locked; 3101 } 3102 tcf_chain_hold(chain); 3103 } 3104 3105 if (n->nlmsg_type == RTM_NEWCHAIN) { 3106 /* Modifying chain requires holding parent block lock. In case 3107 * the chain was successfully added, take a reference to the 3108 * chain. This ensures that an empty chain does not disappear at 3109 * the end of this function. 3110 */ 3111 tcf_chain_hold(chain); 3112 chain->explicitly_created = true; 3113 } 3114 mutex_unlock(&block->lock); 3115 3116 switch (n->nlmsg_type) { 3117 case RTM_NEWCHAIN: 3118 err = tc_chain_tmplt_add(chain, net, tca, extack); 3119 if (err) { 3120 tcf_chain_put_explicitly_created(chain); 3121 goto errout; 3122 } 3123 3124 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 3125 RTM_NEWCHAIN, false, extack); 3126 break; 3127 case RTM_DELCHAIN: 3128 tfilter_notify_chain(net, skb, block, q, parent, n, 3129 chain, RTM_DELTFILTER, extack); 3130 /* Flush the chain first as the user requested chain removal. */ 3131 tcf_chain_flush(chain, true); 3132 /* In case the chain was successfully deleted, put a reference 3133 * to the chain previously taken during addition. 3134 */ 3135 tcf_chain_put_explicitly_created(chain); 3136 break; 3137 case RTM_GETCHAIN: 3138 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 3139 n->nlmsg_flags, n->nlmsg_type, true, extack); 3140 if (err < 0) 3141 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 3142 break; 3143 default: 3144 err = -EOPNOTSUPP; 3145 NL_SET_ERR_MSG(extack, "Unsupported message type"); 3146 goto errout; 3147 } 3148 3149 errout: 3150 tcf_chain_put(chain); 3151 errout_block: 3152 tcf_block_release(q, block, true); 3153 if (err == -EAGAIN) 3154 /* Replay the request. */ 3155 goto replay; 3156 return err; 3157 3158 errout_block_locked: 3159 mutex_unlock(&block->lock); 3160 goto errout_block; 3161 } 3162 3163 /* called with RTNL */ 3164 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 3165 { 3166 struct net *net = sock_net(skb->sk); 3167 struct nlattr *tca[TCA_MAX + 1]; 3168 struct Qdisc *q = NULL; 3169 struct tcf_block *block; 3170 struct tcmsg *tcm = nlmsg_data(cb->nlh); 3171 struct tcf_chain *chain; 3172 long index_start; 3173 long index; 3174 int err; 3175 3176 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 3177 return skb->len; 3178 3179 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 3180 rtm_tca_policy, cb->extack); 3181 if (err) 3182 return err; 3183 3184 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 3185 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 3186 if (!block) 3187 goto out; 3188 } else { 3189 const struct Qdisc_class_ops *cops; 3190 struct net_device *dev; 3191 unsigned long cl = 0; 3192 3193 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 3194 if (!dev) 3195 return skb->len; 3196 3197 if (!tcm->tcm_parent) 3198 q = rtnl_dereference(dev->qdisc); 3199 else 3200 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3201 3202 if (!q) 3203 goto out; 3204 cops = q->ops->cl_ops; 3205 if (!cops) 3206 goto out; 3207 if (!cops->tcf_block) 3208 goto out; 3209 if (TC_H_MIN(tcm->tcm_parent)) { 3210 cl = cops->find(q, tcm->tcm_parent); 3211 if (cl == 0) 3212 goto out; 3213 } 3214 block = cops->tcf_block(q, cl, NULL); 3215 if (!block) 3216 goto out; 3217 if (tcf_block_shared(block)) 3218 q = NULL; 3219 } 3220 3221 index_start = cb->args[0]; 3222 index = 0; 3223 3224 mutex_lock(&block->lock); 3225 list_for_each_entry(chain, &block->chain_list, list) { 3226 if ((tca[TCA_CHAIN] && 3227 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3228 continue; 3229 if (index < index_start) { 3230 index++; 3231 continue; 3232 } 3233 if (tcf_chain_held_by_acts_only(chain)) 3234 continue; 3235 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3236 chain->index, net, skb, block, 3237 NETLINK_CB(cb->skb).portid, 3238 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3239 RTM_NEWCHAIN, NULL); 3240 if (err <= 0) 3241 break; 3242 index++; 3243 } 3244 mutex_unlock(&block->lock); 3245 3246 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3247 tcf_block_refcnt_put(block, true); 3248 cb->args[0] = index; 3249 3250 out: 3251 /* If we did no progress, the error (EMSGSIZE) is real */ 3252 if (skb->len == 0 && err) 3253 return err; 3254 return skb->len; 3255 } 3256 3257 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action, 3258 int police, struct tcf_proto *tp, u32 handle, 3259 bool use_action_miss) 3260 { 3261 int err = 0; 3262 3263 #ifdef CONFIG_NET_CLS_ACT 3264 exts->type = 0; 3265 exts->nr_actions = 0; 3266 exts->miss_cookie_node = NULL; 3267 /* Note: we do not own yet a reference on net. 3268 * This reference might be taken later from tcf_exts_get_net(). 3269 */ 3270 exts->net = net; 3271 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 3272 GFP_KERNEL); 3273 if (!exts->actions) 3274 return -ENOMEM; 3275 #endif 3276 3277 exts->action = action; 3278 exts->police = police; 3279 3280 if (!use_action_miss) 3281 return 0; 3282 3283 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle); 3284 if (err) 3285 goto err_miss_alloc; 3286 3287 return 0; 3288 3289 err_miss_alloc: 3290 tcf_exts_destroy(exts); 3291 #ifdef CONFIG_NET_CLS_ACT 3292 exts->actions = NULL; 3293 #endif 3294 return err; 3295 } 3296 EXPORT_SYMBOL(tcf_exts_init_ex); 3297 3298 void tcf_exts_destroy(struct tcf_exts *exts) 3299 { 3300 tcf_exts_miss_cookie_base_destroy(exts); 3301 3302 #ifdef CONFIG_NET_CLS_ACT 3303 if (exts->actions) { 3304 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3305 kfree(exts->actions); 3306 } 3307 exts->nr_actions = 0; 3308 #endif 3309 } 3310 EXPORT_SYMBOL(tcf_exts_destroy); 3311 3312 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3313 struct nlattr *rate_tlv, struct tcf_exts *exts, 3314 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) 3315 { 3316 #ifdef CONFIG_NET_CLS_ACT 3317 { 3318 int init_res[TCA_ACT_MAX_PRIO] = {}; 3319 struct tc_action *act; 3320 size_t attr_size = 0; 3321 3322 if (exts->police && tb[exts->police]) { 3323 struct tc_action_ops *a_o; 3324 3325 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; 3326 a_o = tc_action_load_ops(tb[exts->police], flags, 3327 extack); 3328 if (IS_ERR(a_o)) 3329 return PTR_ERR(a_o); 3330 act = tcf_action_init_1(net, tp, tb[exts->police], 3331 rate_tlv, a_o, init_res, flags, 3332 extack); 3333 module_put(a_o->owner); 3334 if (IS_ERR(act)) 3335 return PTR_ERR(act); 3336 3337 act->type = exts->type = TCA_OLD_COMPAT; 3338 exts->actions[0] = act; 3339 exts->nr_actions = 1; 3340 tcf_idr_insert_many(exts->actions, init_res); 3341 } else if (exts->action && tb[exts->action]) { 3342 int err; 3343 3344 flags |= TCA_ACT_FLAGS_BIND; 3345 err = tcf_action_init(net, tp, tb[exts->action], 3346 rate_tlv, exts->actions, init_res, 3347 &attr_size, flags, fl_flags, 3348 extack); 3349 if (err < 0) 3350 return err; 3351 exts->nr_actions = err; 3352 } 3353 } 3354 #else 3355 if ((exts->action && tb[exts->action]) || 3356 (exts->police && tb[exts->police])) { 3357 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3358 return -EOPNOTSUPP; 3359 } 3360 #endif 3361 3362 return 0; 3363 } 3364 EXPORT_SYMBOL(tcf_exts_validate_ex); 3365 3366 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3367 struct nlattr *rate_tlv, struct tcf_exts *exts, 3368 u32 flags, struct netlink_ext_ack *extack) 3369 { 3370 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, 3371 flags, 0, extack); 3372 } 3373 EXPORT_SYMBOL(tcf_exts_validate); 3374 3375 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3376 { 3377 #ifdef CONFIG_NET_CLS_ACT 3378 struct tcf_exts old = *dst; 3379 3380 *dst = *src; 3381 tcf_exts_destroy(&old); 3382 #endif 3383 } 3384 EXPORT_SYMBOL(tcf_exts_change); 3385 3386 #ifdef CONFIG_NET_CLS_ACT 3387 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3388 { 3389 if (exts->nr_actions == 0) 3390 return NULL; 3391 else 3392 return exts->actions[0]; 3393 } 3394 #endif 3395 3396 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3397 { 3398 #ifdef CONFIG_NET_CLS_ACT 3399 struct nlattr *nest; 3400 3401 if (exts->action && tcf_exts_has_actions(exts)) { 3402 /* 3403 * again for backward compatible mode - we want 3404 * to work with both old and new modes of entering 3405 * tc data even if iproute2 was newer - jhs 3406 */ 3407 if (exts->type != TCA_OLD_COMPAT) { 3408 nest = nla_nest_start_noflag(skb, exts->action); 3409 if (nest == NULL) 3410 goto nla_put_failure; 3411 3412 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3413 < 0) 3414 goto nla_put_failure; 3415 nla_nest_end(skb, nest); 3416 } else if (exts->police) { 3417 struct tc_action *act = tcf_exts_first_act(exts); 3418 nest = nla_nest_start_noflag(skb, exts->police); 3419 if (nest == NULL || !act) 3420 goto nla_put_failure; 3421 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3422 goto nla_put_failure; 3423 nla_nest_end(skb, nest); 3424 } 3425 } 3426 return 0; 3427 3428 nla_put_failure: 3429 nla_nest_cancel(skb, nest); 3430 return -1; 3431 #else 3432 return 0; 3433 #endif 3434 } 3435 EXPORT_SYMBOL(tcf_exts_dump); 3436 3437 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3438 { 3439 #ifdef CONFIG_NET_CLS_ACT 3440 struct nlattr *nest; 3441 3442 if (!exts->action || !tcf_exts_has_actions(exts)) 3443 return 0; 3444 3445 nest = nla_nest_start_noflag(skb, exts->action); 3446 if (!nest) 3447 goto nla_put_failure; 3448 3449 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3450 goto nla_put_failure; 3451 nla_nest_end(skb, nest); 3452 return 0; 3453 3454 nla_put_failure: 3455 nla_nest_cancel(skb, nest); 3456 return -1; 3457 #else 3458 return 0; 3459 #endif 3460 } 3461 EXPORT_SYMBOL(tcf_exts_terse_dump); 3462 3463 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3464 { 3465 #ifdef CONFIG_NET_CLS_ACT 3466 struct tc_action *a = tcf_exts_first_act(exts); 3467 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3468 return -1; 3469 #endif 3470 return 0; 3471 } 3472 EXPORT_SYMBOL(tcf_exts_dump_stats); 3473 3474 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3475 { 3476 if (*flags & TCA_CLS_FLAGS_IN_HW) 3477 return; 3478 *flags |= TCA_CLS_FLAGS_IN_HW; 3479 atomic_inc(&block->offloadcnt); 3480 } 3481 3482 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3483 { 3484 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3485 return; 3486 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3487 atomic_dec(&block->offloadcnt); 3488 } 3489 3490 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3491 struct tcf_proto *tp, u32 *cnt, 3492 u32 *flags, u32 diff, bool add) 3493 { 3494 lockdep_assert_held(&block->cb_lock); 3495 3496 spin_lock(&tp->lock); 3497 if (add) { 3498 if (!*cnt) 3499 tcf_block_offload_inc(block, flags); 3500 *cnt += diff; 3501 } else { 3502 *cnt -= diff; 3503 if (!*cnt) 3504 tcf_block_offload_dec(block, flags); 3505 } 3506 spin_unlock(&tp->lock); 3507 } 3508 3509 static void 3510 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3511 u32 *cnt, u32 *flags) 3512 { 3513 lockdep_assert_held(&block->cb_lock); 3514 3515 spin_lock(&tp->lock); 3516 tcf_block_offload_dec(block, flags); 3517 *cnt = 0; 3518 spin_unlock(&tp->lock); 3519 } 3520 3521 static int 3522 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3523 void *type_data, bool err_stop) 3524 { 3525 struct flow_block_cb *block_cb; 3526 int ok_count = 0; 3527 int err; 3528 3529 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3530 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3531 if (err) { 3532 if (err_stop) 3533 return err; 3534 } else { 3535 ok_count++; 3536 } 3537 } 3538 return ok_count; 3539 } 3540 3541 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3542 void *type_data, bool err_stop, bool rtnl_held) 3543 { 3544 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3545 int ok_count; 3546 3547 retry: 3548 if (take_rtnl) 3549 rtnl_lock(); 3550 down_read(&block->cb_lock); 3551 /* Need to obtain rtnl lock if block is bound to devs that require it. 3552 * In block bind code cb_lock is obtained while holding rtnl, so we must 3553 * obtain the locks in same order here. 3554 */ 3555 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3556 up_read(&block->cb_lock); 3557 take_rtnl = true; 3558 goto retry; 3559 } 3560 3561 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3562 3563 up_read(&block->cb_lock); 3564 if (take_rtnl) 3565 rtnl_unlock(); 3566 return ok_count; 3567 } 3568 EXPORT_SYMBOL(tc_setup_cb_call); 3569 3570 /* Non-destructive filter add. If filter that wasn't already in hardware is 3571 * successfully offloaded, increment block offloads counter. On failure, 3572 * previously offloaded filter is considered to be intact and offloads counter 3573 * is not decremented. 3574 */ 3575 3576 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3577 enum tc_setup_type type, void *type_data, bool err_stop, 3578 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3579 { 3580 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3581 int ok_count; 3582 3583 retry: 3584 if (take_rtnl) 3585 rtnl_lock(); 3586 down_read(&block->cb_lock); 3587 /* Need to obtain rtnl lock if block is bound to devs that require it. 3588 * In block bind code cb_lock is obtained while holding rtnl, so we must 3589 * obtain the locks in same order here. 3590 */ 3591 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3592 up_read(&block->cb_lock); 3593 take_rtnl = true; 3594 goto retry; 3595 } 3596 3597 /* Make sure all netdevs sharing this block are offload-capable. */ 3598 if (block->nooffloaddevcnt && err_stop) { 3599 ok_count = -EOPNOTSUPP; 3600 goto err_unlock; 3601 } 3602 3603 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3604 if (ok_count < 0) 3605 goto err_unlock; 3606 3607 if (tp->ops->hw_add) 3608 tp->ops->hw_add(tp, type_data); 3609 if (ok_count > 0) 3610 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3611 ok_count, true); 3612 err_unlock: 3613 up_read(&block->cb_lock); 3614 if (take_rtnl) 3615 rtnl_unlock(); 3616 return min(ok_count, 0); 3617 } 3618 EXPORT_SYMBOL(tc_setup_cb_add); 3619 3620 /* Destructive filter replace. If filter that wasn't already in hardware is 3621 * successfully offloaded, increment block offload counter. On failure, 3622 * previously offloaded filter is considered to be destroyed and offload counter 3623 * is decremented. 3624 */ 3625 3626 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3627 enum tc_setup_type type, void *type_data, bool err_stop, 3628 u32 *old_flags, unsigned int *old_in_hw_count, 3629 u32 *new_flags, unsigned int *new_in_hw_count, 3630 bool rtnl_held) 3631 { 3632 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3633 int ok_count; 3634 3635 retry: 3636 if (take_rtnl) 3637 rtnl_lock(); 3638 down_read(&block->cb_lock); 3639 /* Need to obtain rtnl lock if block is bound to devs that require it. 3640 * In block bind code cb_lock is obtained while holding rtnl, so we must 3641 * obtain the locks in same order here. 3642 */ 3643 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3644 up_read(&block->cb_lock); 3645 take_rtnl = true; 3646 goto retry; 3647 } 3648 3649 /* Make sure all netdevs sharing this block are offload-capable. */ 3650 if (block->nooffloaddevcnt && err_stop) { 3651 ok_count = -EOPNOTSUPP; 3652 goto err_unlock; 3653 } 3654 3655 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3656 if (tp->ops->hw_del) 3657 tp->ops->hw_del(tp, type_data); 3658 3659 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3660 if (ok_count < 0) 3661 goto err_unlock; 3662 3663 if (tp->ops->hw_add) 3664 tp->ops->hw_add(tp, type_data); 3665 if (ok_count > 0) 3666 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3667 new_flags, ok_count, true); 3668 err_unlock: 3669 up_read(&block->cb_lock); 3670 if (take_rtnl) 3671 rtnl_unlock(); 3672 return min(ok_count, 0); 3673 } 3674 EXPORT_SYMBOL(tc_setup_cb_replace); 3675 3676 /* Destroy filter and decrement block offload counter, if filter was previously 3677 * offloaded. 3678 */ 3679 3680 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3681 enum tc_setup_type type, void *type_data, bool err_stop, 3682 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3683 { 3684 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3685 int ok_count; 3686 3687 retry: 3688 if (take_rtnl) 3689 rtnl_lock(); 3690 down_read(&block->cb_lock); 3691 /* Need to obtain rtnl lock if block is bound to devs that require it. 3692 * In block bind code cb_lock is obtained while holding rtnl, so we must 3693 * obtain the locks in same order here. 3694 */ 3695 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3696 up_read(&block->cb_lock); 3697 take_rtnl = true; 3698 goto retry; 3699 } 3700 3701 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3702 3703 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3704 if (tp->ops->hw_del) 3705 tp->ops->hw_del(tp, type_data); 3706 3707 up_read(&block->cb_lock); 3708 if (take_rtnl) 3709 rtnl_unlock(); 3710 return min(ok_count, 0); 3711 } 3712 EXPORT_SYMBOL(tc_setup_cb_destroy); 3713 3714 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3715 bool add, flow_setup_cb_t *cb, 3716 enum tc_setup_type type, void *type_data, 3717 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3718 { 3719 int err = cb(type, type_data, cb_priv); 3720 3721 if (err) { 3722 if (add && tc_skip_sw(*flags)) 3723 return err; 3724 } else { 3725 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3726 add); 3727 } 3728 3729 return 0; 3730 } 3731 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3732 3733 static int tcf_act_get_user_cookie(struct flow_action_entry *entry, 3734 const struct tc_action *act) 3735 { 3736 struct tc_cookie *user_cookie; 3737 int err = 0; 3738 3739 rcu_read_lock(); 3740 user_cookie = rcu_dereference(act->user_cookie); 3741 if (user_cookie) { 3742 entry->user_cookie = flow_action_cookie_create(user_cookie->data, 3743 user_cookie->len, 3744 GFP_ATOMIC); 3745 if (!entry->user_cookie) 3746 err = -ENOMEM; 3747 } 3748 rcu_read_unlock(); 3749 return err; 3750 } 3751 3752 static void tcf_act_put_user_cookie(struct flow_action_entry *entry) 3753 { 3754 flow_action_cookie_destroy(entry->user_cookie); 3755 } 3756 3757 void tc_cleanup_offload_action(struct flow_action *flow_action) 3758 { 3759 struct flow_action_entry *entry; 3760 int i; 3761 3762 flow_action_for_each(i, entry, flow_action) { 3763 tcf_act_put_user_cookie(entry); 3764 if (entry->destructor) 3765 entry->destructor(entry->destructor_priv); 3766 } 3767 } 3768 EXPORT_SYMBOL(tc_cleanup_offload_action); 3769 3770 static int tc_setup_offload_act(struct tc_action *act, 3771 struct flow_action_entry *entry, 3772 u32 *index_inc, 3773 struct netlink_ext_ack *extack) 3774 { 3775 #ifdef CONFIG_NET_CLS_ACT 3776 if (act->ops->offload_act_setup) { 3777 return act->ops->offload_act_setup(act, entry, index_inc, true, 3778 extack); 3779 } else { 3780 NL_SET_ERR_MSG(extack, "Action does not support offload"); 3781 return -EOPNOTSUPP; 3782 } 3783 #else 3784 return 0; 3785 #endif 3786 } 3787 3788 int tc_setup_action(struct flow_action *flow_action, 3789 struct tc_action *actions[], 3790 u32 miss_cookie_base, 3791 struct netlink_ext_ack *extack) 3792 { 3793 int i, j, k, index, err = 0; 3794 struct tc_action *act; 3795 3796 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3797 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3798 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3799 3800 if (!actions) 3801 return 0; 3802 3803 j = 0; 3804 tcf_act_for_each_action(i, act, actions) { 3805 struct flow_action_entry *entry; 3806 3807 entry = &flow_action->entries[j]; 3808 spin_lock_bh(&act->tcfa_lock); 3809 err = tcf_act_get_user_cookie(entry, act); 3810 if (err) 3811 goto err_out_locked; 3812 3813 index = 0; 3814 err = tc_setup_offload_act(act, entry, &index, extack); 3815 if (err) 3816 goto err_out_locked; 3817 3818 for (k = 0; k < index ; k++) { 3819 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); 3820 entry[k].hw_index = act->tcfa_index; 3821 entry[k].cookie = (unsigned long)act; 3822 entry[k].miss_cookie = 3823 tcf_exts_miss_cookie_get(miss_cookie_base, i); 3824 } 3825 3826 j += index; 3827 3828 spin_unlock_bh(&act->tcfa_lock); 3829 } 3830 3831 err_out: 3832 if (err) 3833 tc_cleanup_offload_action(flow_action); 3834 3835 return err; 3836 err_out_locked: 3837 spin_unlock_bh(&act->tcfa_lock); 3838 goto err_out; 3839 } 3840 3841 int tc_setup_offload_action(struct flow_action *flow_action, 3842 const struct tcf_exts *exts, 3843 struct netlink_ext_ack *extack) 3844 { 3845 #ifdef CONFIG_NET_CLS_ACT 3846 u32 miss_cookie_base; 3847 3848 if (!exts) 3849 return 0; 3850 3851 miss_cookie_base = exts->miss_cookie_node ? 3852 exts->miss_cookie_node->miss_cookie_base : 0; 3853 return tc_setup_action(flow_action, exts->actions, miss_cookie_base, 3854 extack); 3855 #else 3856 return 0; 3857 #endif 3858 } 3859 EXPORT_SYMBOL(tc_setup_offload_action); 3860 3861 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3862 { 3863 unsigned int num_acts = 0; 3864 struct tc_action *act; 3865 int i; 3866 3867 tcf_exts_for_each_action(i, act, exts) { 3868 if (is_tcf_pedit(act)) 3869 num_acts += tcf_pedit_nkeys(act); 3870 else 3871 num_acts++; 3872 } 3873 return num_acts; 3874 } 3875 EXPORT_SYMBOL(tcf_exts_num_actions); 3876 3877 #ifdef CONFIG_NET_CLS_ACT 3878 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, 3879 u32 *p_block_index, 3880 struct netlink_ext_ack *extack) 3881 { 3882 *p_block_index = nla_get_u32(block_index_attr); 3883 if (!*p_block_index) { 3884 NL_SET_ERR_MSG(extack, "Block number may not be zero"); 3885 return -EINVAL; 3886 } 3887 3888 return 0; 3889 } 3890 3891 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 3892 enum flow_block_binder_type binder_type, 3893 struct nlattr *block_index_attr, 3894 struct netlink_ext_ack *extack) 3895 { 3896 u32 block_index; 3897 int err; 3898 3899 if (!block_index_attr) 3900 return 0; 3901 3902 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3903 if (err) 3904 return err; 3905 3906 qe->info.binder_type = binder_type; 3907 qe->info.chain_head_change = tcf_chain_head_change_dflt; 3908 qe->info.chain_head_change_priv = &qe->filter_chain; 3909 qe->info.block_index = block_index; 3910 3911 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); 3912 } 3913 EXPORT_SYMBOL(tcf_qevent_init); 3914 3915 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 3916 { 3917 if (qe->info.block_index) 3918 tcf_block_put_ext(qe->block, sch, &qe->info); 3919 } 3920 EXPORT_SYMBOL(tcf_qevent_destroy); 3921 3922 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 3923 struct netlink_ext_ack *extack) 3924 { 3925 u32 block_index; 3926 int err; 3927 3928 if (!block_index_attr) 3929 return 0; 3930 3931 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3932 if (err) 3933 return err; 3934 3935 /* Bounce newly-configured block or change in block. */ 3936 if (block_index != qe->info.block_index) { 3937 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 3938 return -EINVAL; 3939 } 3940 3941 return 0; 3942 } 3943 EXPORT_SYMBOL(tcf_qevent_validate_change); 3944 3945 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 3946 struct sk_buff **to_free, int *ret) 3947 { 3948 struct tcf_result cl_res; 3949 struct tcf_proto *fl; 3950 3951 if (!qe->info.block_index) 3952 return skb; 3953 3954 fl = rcu_dereference_bh(qe->filter_chain); 3955 3956 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { 3957 case TC_ACT_SHOT: 3958 qdisc_qstats_drop(sch); 3959 __qdisc_drop(skb, to_free); 3960 *ret = __NET_XMIT_BYPASS; 3961 return NULL; 3962 case TC_ACT_STOLEN: 3963 case TC_ACT_QUEUED: 3964 case TC_ACT_TRAP: 3965 __qdisc_drop(skb, to_free); 3966 *ret = __NET_XMIT_STOLEN; 3967 return NULL; 3968 case TC_ACT_REDIRECT: 3969 skb_do_redirect(skb); 3970 *ret = __NET_XMIT_STOLEN; 3971 return NULL; 3972 } 3973 3974 return skb; 3975 } 3976 EXPORT_SYMBOL(tcf_qevent_handle); 3977 3978 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 3979 { 3980 if (!qe->info.block_index) 3981 return 0; 3982 return nla_put_u32(skb, attr_name, qe->info.block_index); 3983 } 3984 EXPORT_SYMBOL(tcf_qevent_dump); 3985 #endif 3986 3987 static __net_init int tcf_net_init(struct net *net) 3988 { 3989 struct tcf_net *tn = net_generic(net, tcf_net_id); 3990 3991 spin_lock_init(&tn->idr_lock); 3992 idr_init(&tn->idr); 3993 return 0; 3994 } 3995 3996 static void __net_exit tcf_net_exit(struct net *net) 3997 { 3998 struct tcf_net *tn = net_generic(net, tcf_net_id); 3999 4000 idr_destroy(&tn->idr); 4001 } 4002 4003 static struct pernet_operations tcf_net_ops = { 4004 .init = tcf_net_init, 4005 .exit = tcf_net_exit, 4006 .id = &tcf_net_id, 4007 .size = sizeof(struct tcf_net), 4008 }; 4009 4010 static int __init tc_filter_init(void) 4011 { 4012 int err; 4013 4014 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 4015 if (!tc_filter_wq) 4016 return -ENOMEM; 4017 4018 err = register_pernet_subsys(&tcf_net_ops); 4019 if (err) 4020 goto err_register_pernet_subsys; 4021 4022 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1); 4023 4024 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 4025 RTNL_FLAG_DOIT_UNLOCKED); 4026 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 4027 RTNL_FLAG_DOIT_UNLOCKED); 4028 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 4029 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 4030 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 4031 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 4032 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 4033 tc_dump_chain, 0); 4034 4035 return 0; 4036 4037 err_register_pernet_subsys: 4038 destroy_workqueue(tc_filter_wq); 4039 return err; 4040 } 4041 4042 subsys_initcall(tc_filter_init); 4043