1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <linux/rhashtable.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/tc_act/tc_gate.h> 43 #include <net/flow_offload.h> 44 #include <net/tc_wrapper.h> 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static struct xarray tcf_exts_miss_cookies_xa; 53 struct tcf_exts_miss_cookie_node { 54 const struct tcf_chain *chain; 55 const struct tcf_proto *tp; 56 const struct tcf_exts *exts; 57 u32 chain_index; 58 u32 tp_prio; 59 u32 handle; 60 u32 miss_cookie_base; 61 struct rcu_head rcu; 62 }; 63 64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base + 65 * action index in the exts tc actions array. 66 */ 67 union tcf_exts_miss_cookie { 68 struct { 69 u32 miss_cookie_base; 70 u32 act_index; 71 }; 72 u64 miss_cookie; 73 }; 74 75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 76 static int 77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, 78 u32 handle) 79 { 80 struct tcf_exts_miss_cookie_node *n; 81 static u32 next; 82 int err; 83 84 if (WARN_ON(!handle || !tp->ops->get_exts)) 85 return -EINVAL; 86 87 n = kzalloc(sizeof(*n), GFP_KERNEL); 88 if (!n) 89 return -ENOMEM; 90 91 n->chain_index = tp->chain->index; 92 n->chain = tp->chain; 93 n->tp_prio = tp->prio; 94 n->tp = tp; 95 n->exts = exts; 96 n->handle = handle; 97 98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base, 99 n, xa_limit_32b, &next, GFP_KERNEL); 100 if (err) 101 goto err_xa_alloc; 102 103 exts->miss_cookie_node = n; 104 return 0; 105 106 err_xa_alloc: 107 kfree(n); 108 return err; 109 } 110 111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) 112 { 113 struct tcf_exts_miss_cookie_node *n; 114 115 if (!exts->miss_cookie_node) 116 return; 117 118 n = exts->miss_cookie_node; 119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base); 120 kfree_rcu(n, rcu); 121 } 122 123 static struct tcf_exts_miss_cookie_node * 124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index) 125 { 126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, }; 127 128 *act_index = mc.act_index; 129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base); 130 } 131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ 132 static int 133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, 134 u32 handle) 135 { 136 return 0; 137 } 138 139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) 140 { 141 } 142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ 143 144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index) 145 { 146 union tcf_exts_miss_cookie mc = { .act_index = act_index, }; 147 148 if (!miss_cookie_base) 149 return 0; 150 151 mc.miss_cookie_base = miss_cookie_base; 152 return mc.miss_cookie; 153 } 154 155 #ifdef CONFIG_NET_CLS_ACT 156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc); 157 EXPORT_SYMBOL(tc_skb_ext_tc); 158 159 void tc_skb_ext_tc_enable(void) 160 { 161 static_branch_inc(&tc_skb_ext_tc); 162 } 163 EXPORT_SYMBOL(tc_skb_ext_tc_enable); 164 165 void tc_skb_ext_tc_disable(void) 166 { 167 static_branch_dec(&tc_skb_ext_tc); 168 } 169 EXPORT_SYMBOL(tc_skb_ext_tc_disable); 170 #endif 171 172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 173 { 174 return jhash_3words(tp->chain->index, tp->prio, 175 (__force __u32)tp->protocol, 0); 176 } 177 178 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 179 struct tcf_proto *tp) 180 { 181 struct tcf_block *block = chain->block; 182 183 mutex_lock(&block->proto_destroy_lock); 184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 185 destroy_obj_hashfn(tp)); 186 mutex_unlock(&block->proto_destroy_lock); 187 } 188 189 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 190 const struct tcf_proto *tp2) 191 { 192 return tp1->chain->index == tp2->chain->index && 193 tp1->prio == tp2->prio && 194 tp1->protocol == tp2->protocol; 195 } 196 197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 198 struct tcf_proto *tp) 199 { 200 u32 hash = destroy_obj_hashfn(tp); 201 struct tcf_proto *iter; 202 bool found = false; 203 204 rcu_read_lock(); 205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 206 destroy_ht_node, hash) { 207 if (tcf_proto_cmp(tp, iter)) { 208 found = true; 209 break; 210 } 211 } 212 rcu_read_unlock(); 213 214 return found; 215 } 216 217 static void 218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 219 { 220 struct tcf_block *block = chain->block; 221 222 mutex_lock(&block->proto_destroy_lock); 223 if (hash_hashed(&tp->destroy_ht_node)) 224 hash_del_rcu(&tp->destroy_ht_node); 225 mutex_unlock(&block->proto_destroy_lock); 226 } 227 228 /* Find classifier type by string name */ 229 230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 231 { 232 const struct tcf_proto_ops *t, *res = NULL; 233 234 if (kind) { 235 read_lock(&cls_mod_lock); 236 list_for_each_entry(t, &tcf_proto_base, head) { 237 if (strcmp(kind, t->kind) == 0) { 238 if (try_module_get(t->owner)) 239 res = t; 240 break; 241 } 242 } 243 read_unlock(&cls_mod_lock); 244 } 245 return res; 246 } 247 248 static const struct tcf_proto_ops * 249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 250 struct netlink_ext_ack *extack) 251 { 252 const struct tcf_proto_ops *ops; 253 254 ops = __tcf_proto_lookup_ops(kind); 255 if (ops) 256 return ops; 257 #ifdef CONFIG_MODULES 258 if (rtnl_held) 259 rtnl_unlock(); 260 request_module("cls_%s", kind); 261 if (rtnl_held) 262 rtnl_lock(); 263 ops = __tcf_proto_lookup_ops(kind); 264 /* We dropped the RTNL semaphore in order to perform 265 * the module load. So, even if we succeeded in loading 266 * the module we have to replay the request. We indicate 267 * this using -EAGAIN. 268 */ 269 if (ops) { 270 module_put(ops->owner); 271 return ERR_PTR(-EAGAIN); 272 } 273 #endif 274 NL_SET_ERR_MSG(extack, "TC classifier not found"); 275 return ERR_PTR(-ENOENT); 276 } 277 278 /* Register(unregister) new classifier type */ 279 280 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 281 { 282 struct tcf_proto_ops *t; 283 int rc = -EEXIST; 284 285 write_lock(&cls_mod_lock); 286 list_for_each_entry(t, &tcf_proto_base, head) 287 if (!strcmp(ops->kind, t->kind)) 288 goto out; 289 290 list_add_tail(&ops->head, &tcf_proto_base); 291 rc = 0; 292 out: 293 write_unlock(&cls_mod_lock); 294 return rc; 295 } 296 EXPORT_SYMBOL(register_tcf_proto_ops); 297 298 static struct workqueue_struct *tc_filter_wq; 299 300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 301 { 302 struct tcf_proto_ops *t; 303 int rc = -ENOENT; 304 305 /* Wait for outstanding call_rcu()s, if any, from a 306 * tcf_proto_ops's destroy() handler. 307 */ 308 rcu_barrier(); 309 flush_workqueue(tc_filter_wq); 310 311 write_lock(&cls_mod_lock); 312 list_for_each_entry(t, &tcf_proto_base, head) { 313 if (t == ops) { 314 list_del(&t->head); 315 rc = 0; 316 break; 317 } 318 } 319 write_unlock(&cls_mod_lock); 320 321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc); 322 } 323 EXPORT_SYMBOL(unregister_tcf_proto_ops); 324 325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 326 { 327 INIT_RCU_WORK(rwork, func); 328 return queue_rcu_work(tc_filter_wq, rwork); 329 } 330 EXPORT_SYMBOL(tcf_queue_work); 331 332 /* Select new prio value from the range, managed by kernel. */ 333 334 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 335 { 336 u32 first = TC_H_MAKE(0xC0000000U, 0U); 337 338 if (tp) 339 first = tp->prio - 1; 340 341 return TC_H_MAJ(first); 342 } 343 344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 345 { 346 if (kind) 347 return nla_strscpy(name, kind, IFNAMSIZ) < 0; 348 memset(name, 0, IFNAMSIZ); 349 return false; 350 } 351 352 static bool tcf_proto_is_unlocked(const char *kind) 353 { 354 const struct tcf_proto_ops *ops; 355 bool ret; 356 357 if (strlen(kind) == 0) 358 return false; 359 360 ops = tcf_proto_lookup_ops(kind, false, NULL); 361 /* On error return false to take rtnl lock. Proto lookup/create 362 * functions will perform lookup again and properly handle errors. 363 */ 364 if (IS_ERR(ops)) 365 return false; 366 367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 368 module_put(ops->owner); 369 return ret; 370 } 371 372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 373 u32 prio, struct tcf_chain *chain, 374 bool rtnl_held, 375 struct netlink_ext_ack *extack) 376 { 377 struct tcf_proto *tp; 378 int err; 379 380 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 381 if (!tp) 382 return ERR_PTR(-ENOBUFS); 383 384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 385 if (IS_ERR(tp->ops)) { 386 err = PTR_ERR(tp->ops); 387 goto errout; 388 } 389 tp->classify = tp->ops->classify; 390 tp->protocol = protocol; 391 tp->prio = prio; 392 tp->chain = chain; 393 spin_lock_init(&tp->lock); 394 refcount_set(&tp->refcnt, 1); 395 396 err = tp->ops->init(tp); 397 if (err) { 398 module_put(tp->ops->owner); 399 goto errout; 400 } 401 return tp; 402 403 errout: 404 kfree(tp); 405 return ERR_PTR(err); 406 } 407 408 static void tcf_proto_get(struct tcf_proto *tp) 409 { 410 refcount_inc(&tp->refcnt); 411 } 412 413 static void tcf_chain_put(struct tcf_chain *chain); 414 415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 416 bool sig_destroy, struct netlink_ext_ack *extack) 417 { 418 tp->ops->destroy(tp, rtnl_held, extack); 419 if (sig_destroy) 420 tcf_proto_signal_destroyed(tp->chain, tp); 421 tcf_chain_put(tp->chain); 422 module_put(tp->ops->owner); 423 kfree_rcu(tp, rcu); 424 } 425 426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 427 struct netlink_ext_ack *extack) 428 { 429 if (refcount_dec_and_test(&tp->refcnt)) 430 tcf_proto_destroy(tp, rtnl_held, true, extack); 431 } 432 433 static bool tcf_proto_check_delete(struct tcf_proto *tp) 434 { 435 if (tp->ops->delete_empty) 436 return tp->ops->delete_empty(tp); 437 438 tp->deleting = true; 439 return tp->deleting; 440 } 441 442 static void tcf_proto_mark_delete(struct tcf_proto *tp) 443 { 444 spin_lock(&tp->lock); 445 tp->deleting = true; 446 spin_unlock(&tp->lock); 447 } 448 449 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 450 { 451 bool deleting; 452 453 spin_lock(&tp->lock); 454 deleting = tp->deleting; 455 spin_unlock(&tp->lock); 456 457 return deleting; 458 } 459 460 #define ASSERT_BLOCK_LOCKED(block) \ 461 lockdep_assert_held(&(block)->lock) 462 463 struct tcf_filter_chain_list_item { 464 struct list_head list; 465 tcf_chain_head_change_t *chain_head_change; 466 void *chain_head_change_priv; 467 }; 468 469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 470 u32 chain_index) 471 { 472 struct tcf_chain *chain; 473 474 ASSERT_BLOCK_LOCKED(block); 475 476 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 477 if (!chain) 478 return NULL; 479 list_add_tail_rcu(&chain->list, &block->chain_list); 480 mutex_init(&chain->filter_chain_lock); 481 chain->block = block; 482 chain->index = chain_index; 483 chain->refcnt = 1; 484 if (!chain->index) 485 block->chain0.chain = chain; 486 return chain; 487 } 488 489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 490 struct tcf_proto *tp_head) 491 { 492 if (item->chain_head_change) 493 item->chain_head_change(tp_head, item->chain_head_change_priv); 494 } 495 496 static void tcf_chain0_head_change(struct tcf_chain *chain, 497 struct tcf_proto *tp_head) 498 { 499 struct tcf_filter_chain_list_item *item; 500 struct tcf_block *block = chain->block; 501 502 if (chain->index) 503 return; 504 505 mutex_lock(&block->lock); 506 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 507 tcf_chain_head_change_item(item, tp_head); 508 mutex_unlock(&block->lock); 509 } 510 511 /* Returns true if block can be safely freed. */ 512 513 static bool tcf_chain_detach(struct tcf_chain *chain) 514 { 515 struct tcf_block *block = chain->block; 516 517 ASSERT_BLOCK_LOCKED(block); 518 519 list_del_rcu(&chain->list); 520 if (!chain->index) 521 block->chain0.chain = NULL; 522 523 if (list_empty(&block->chain_list) && 524 refcount_read(&block->refcnt) == 0) 525 return true; 526 527 return false; 528 } 529 530 static void tcf_block_destroy(struct tcf_block *block) 531 { 532 mutex_destroy(&block->lock); 533 mutex_destroy(&block->proto_destroy_lock); 534 kfree_rcu(block, rcu); 535 } 536 537 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 538 { 539 struct tcf_block *block = chain->block; 540 541 mutex_destroy(&chain->filter_chain_lock); 542 kfree_rcu(chain, rcu); 543 if (free_block) 544 tcf_block_destroy(block); 545 } 546 547 static void tcf_chain_hold(struct tcf_chain *chain) 548 { 549 ASSERT_BLOCK_LOCKED(chain->block); 550 551 ++chain->refcnt; 552 } 553 554 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 555 { 556 ASSERT_BLOCK_LOCKED(chain->block); 557 558 /* In case all the references are action references, this 559 * chain should not be shown to the user. 560 */ 561 return chain->refcnt == chain->action_refcnt; 562 } 563 564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 565 u32 chain_index) 566 { 567 struct tcf_chain *chain; 568 569 ASSERT_BLOCK_LOCKED(block); 570 571 list_for_each_entry(chain, &block->chain_list, list) { 572 if (chain->index == chain_index) 573 return chain; 574 } 575 return NULL; 576 } 577 578 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 580 u32 chain_index) 581 { 582 struct tcf_chain *chain; 583 584 list_for_each_entry_rcu(chain, &block->chain_list, list) { 585 if (chain->index == chain_index) 586 return chain; 587 } 588 return NULL; 589 } 590 #endif 591 592 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 593 u32 seq, u16 flags, int event, bool unicast, 594 struct netlink_ext_ack *extack); 595 596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 597 u32 chain_index, bool create, 598 bool by_act) 599 { 600 struct tcf_chain *chain = NULL; 601 bool is_first_reference; 602 603 mutex_lock(&block->lock); 604 chain = tcf_chain_lookup(block, chain_index); 605 if (chain) { 606 tcf_chain_hold(chain); 607 } else { 608 if (!create) 609 goto errout; 610 chain = tcf_chain_create(block, chain_index); 611 if (!chain) 612 goto errout; 613 } 614 615 if (by_act) 616 ++chain->action_refcnt; 617 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 618 mutex_unlock(&block->lock); 619 620 /* Send notification only in case we got the first 621 * non-action reference. Until then, the chain acts only as 622 * a placeholder for actions pointing to it and user ought 623 * not know about them. 624 */ 625 if (is_first_reference && !by_act) 626 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 627 RTM_NEWCHAIN, false, NULL); 628 629 return chain; 630 631 errout: 632 mutex_unlock(&block->lock); 633 return chain; 634 } 635 636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 637 bool create) 638 { 639 return __tcf_chain_get(block, chain_index, create, false); 640 } 641 642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 643 { 644 return __tcf_chain_get(block, chain_index, true, true); 645 } 646 EXPORT_SYMBOL(tcf_chain_get_by_act); 647 648 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 649 void *tmplt_priv); 650 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 651 void *tmplt_priv, u32 chain_index, 652 struct tcf_block *block, struct sk_buff *oskb, 653 u32 seq, u16 flags); 654 655 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 656 bool explicitly_created) 657 { 658 struct tcf_block *block = chain->block; 659 const struct tcf_proto_ops *tmplt_ops; 660 unsigned int refcnt, non_act_refcnt; 661 bool free_block = false; 662 void *tmplt_priv; 663 664 mutex_lock(&block->lock); 665 if (explicitly_created) { 666 if (!chain->explicitly_created) { 667 mutex_unlock(&block->lock); 668 return; 669 } 670 chain->explicitly_created = false; 671 } 672 673 if (by_act) 674 chain->action_refcnt--; 675 676 /* tc_chain_notify_delete can't be called while holding block lock. 677 * However, when block is unlocked chain can be changed concurrently, so 678 * save these to temporary variables. 679 */ 680 refcnt = --chain->refcnt; 681 non_act_refcnt = refcnt - chain->action_refcnt; 682 tmplt_ops = chain->tmplt_ops; 683 tmplt_priv = chain->tmplt_priv; 684 685 if (non_act_refcnt == chain->explicitly_created && !by_act) { 686 if (non_act_refcnt == 0) 687 tc_chain_notify_delete(tmplt_ops, tmplt_priv, 688 chain->index, block, NULL, 0, 0); 689 /* Last reference to chain, no need to lock. */ 690 chain->flushing = false; 691 } 692 693 if (refcnt == 0) 694 free_block = tcf_chain_detach(chain); 695 mutex_unlock(&block->lock); 696 697 if (refcnt == 0) { 698 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 699 tcf_chain_destroy(chain, free_block); 700 } 701 } 702 703 static void tcf_chain_put(struct tcf_chain *chain) 704 { 705 __tcf_chain_put(chain, false, false); 706 } 707 708 void tcf_chain_put_by_act(struct tcf_chain *chain) 709 { 710 __tcf_chain_put(chain, true, false); 711 } 712 EXPORT_SYMBOL(tcf_chain_put_by_act); 713 714 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 715 { 716 __tcf_chain_put(chain, false, true); 717 } 718 719 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 720 { 721 struct tcf_proto *tp, *tp_next; 722 723 mutex_lock(&chain->filter_chain_lock); 724 tp = tcf_chain_dereference(chain->filter_chain, chain); 725 while (tp) { 726 tp_next = rcu_dereference_protected(tp->next, 1); 727 tcf_proto_signal_destroying(chain, tp); 728 tp = tp_next; 729 } 730 tp = tcf_chain_dereference(chain->filter_chain, chain); 731 RCU_INIT_POINTER(chain->filter_chain, NULL); 732 tcf_chain0_head_change(chain, NULL); 733 chain->flushing = true; 734 mutex_unlock(&chain->filter_chain_lock); 735 736 while (tp) { 737 tp_next = rcu_dereference_protected(tp->next, 1); 738 tcf_proto_put(tp, rtnl_held, NULL); 739 tp = tp_next; 740 } 741 } 742 743 static int tcf_block_setup(struct tcf_block *block, 744 struct flow_block_offload *bo); 745 746 static void tcf_block_offload_init(struct flow_block_offload *bo, 747 struct net_device *dev, struct Qdisc *sch, 748 enum flow_block_command command, 749 enum flow_block_binder_type binder_type, 750 struct flow_block *flow_block, 751 bool shared, struct netlink_ext_ack *extack) 752 { 753 bo->net = dev_net(dev); 754 bo->command = command; 755 bo->binder_type = binder_type; 756 bo->block = flow_block; 757 bo->block_shared = shared; 758 bo->extack = extack; 759 bo->sch = sch; 760 bo->cb_list_head = &flow_block->cb_list; 761 INIT_LIST_HEAD(&bo->cb_list); 762 } 763 764 static void tcf_block_unbind(struct tcf_block *block, 765 struct flow_block_offload *bo); 766 767 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 768 { 769 struct tcf_block *block = block_cb->indr.data; 770 struct net_device *dev = block_cb->indr.dev; 771 struct Qdisc *sch = block_cb->indr.sch; 772 struct netlink_ext_ack extack = {}; 773 struct flow_block_offload bo = {}; 774 775 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 776 block_cb->indr.binder_type, 777 &block->flow_block, tcf_block_shared(block), 778 &extack); 779 rtnl_lock(); 780 down_write(&block->cb_lock); 781 list_del(&block_cb->driver_list); 782 list_move(&block_cb->list, &bo.cb_list); 783 tcf_block_unbind(block, &bo); 784 up_write(&block->cb_lock); 785 rtnl_unlock(); 786 } 787 788 static bool tcf_block_offload_in_use(struct tcf_block *block) 789 { 790 return atomic_read(&block->offloadcnt); 791 } 792 793 static int tcf_block_offload_cmd(struct tcf_block *block, 794 struct net_device *dev, struct Qdisc *sch, 795 struct tcf_block_ext_info *ei, 796 enum flow_block_command command, 797 struct netlink_ext_ack *extack) 798 { 799 struct flow_block_offload bo = {}; 800 801 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 802 &block->flow_block, tcf_block_shared(block), 803 extack); 804 805 if (dev->netdev_ops->ndo_setup_tc) { 806 int err; 807 808 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 809 if (err < 0) { 810 if (err != -EOPNOTSUPP) 811 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 812 return err; 813 } 814 815 return tcf_block_setup(block, &bo); 816 } 817 818 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 819 tc_block_indr_cleanup); 820 tcf_block_setup(block, &bo); 821 822 return -EOPNOTSUPP; 823 } 824 825 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 826 struct tcf_block_ext_info *ei, 827 struct netlink_ext_ack *extack) 828 { 829 struct net_device *dev = q->dev_queue->dev; 830 int err; 831 832 down_write(&block->cb_lock); 833 834 /* If tc offload feature is disabled and the block we try to bind 835 * to already has some offloaded filters, forbid to bind. 836 */ 837 if (dev->netdev_ops->ndo_setup_tc && 838 !tc_can_offload(dev) && 839 tcf_block_offload_in_use(block)) { 840 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 841 err = -EOPNOTSUPP; 842 goto err_unlock; 843 } 844 845 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 846 if (err == -EOPNOTSUPP) 847 goto no_offload_dev_inc; 848 if (err) 849 goto err_unlock; 850 851 up_write(&block->cb_lock); 852 return 0; 853 854 no_offload_dev_inc: 855 if (tcf_block_offload_in_use(block)) 856 goto err_unlock; 857 858 err = 0; 859 block->nooffloaddevcnt++; 860 err_unlock: 861 up_write(&block->cb_lock); 862 return err; 863 } 864 865 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 866 struct tcf_block_ext_info *ei) 867 { 868 struct net_device *dev = q->dev_queue->dev; 869 int err; 870 871 down_write(&block->cb_lock); 872 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 873 if (err == -EOPNOTSUPP) 874 goto no_offload_dev_dec; 875 up_write(&block->cb_lock); 876 return; 877 878 no_offload_dev_dec: 879 WARN_ON(block->nooffloaddevcnt-- == 0); 880 up_write(&block->cb_lock); 881 } 882 883 static int 884 tcf_chain0_head_change_cb_add(struct tcf_block *block, 885 struct tcf_block_ext_info *ei, 886 struct netlink_ext_ack *extack) 887 { 888 struct tcf_filter_chain_list_item *item; 889 struct tcf_chain *chain0; 890 891 item = kmalloc(sizeof(*item), GFP_KERNEL); 892 if (!item) { 893 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 894 return -ENOMEM; 895 } 896 item->chain_head_change = ei->chain_head_change; 897 item->chain_head_change_priv = ei->chain_head_change_priv; 898 899 mutex_lock(&block->lock); 900 chain0 = block->chain0.chain; 901 if (chain0) 902 tcf_chain_hold(chain0); 903 else 904 list_add(&item->list, &block->chain0.filter_chain_list); 905 mutex_unlock(&block->lock); 906 907 if (chain0) { 908 struct tcf_proto *tp_head; 909 910 mutex_lock(&chain0->filter_chain_lock); 911 912 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 913 if (tp_head) 914 tcf_chain_head_change_item(item, tp_head); 915 916 mutex_lock(&block->lock); 917 list_add(&item->list, &block->chain0.filter_chain_list); 918 mutex_unlock(&block->lock); 919 920 mutex_unlock(&chain0->filter_chain_lock); 921 tcf_chain_put(chain0); 922 } 923 924 return 0; 925 } 926 927 static void 928 tcf_chain0_head_change_cb_del(struct tcf_block *block, 929 struct tcf_block_ext_info *ei) 930 { 931 struct tcf_filter_chain_list_item *item; 932 933 mutex_lock(&block->lock); 934 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 935 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 936 (item->chain_head_change == ei->chain_head_change && 937 item->chain_head_change_priv == ei->chain_head_change_priv)) { 938 if (block->chain0.chain) 939 tcf_chain_head_change_item(item, NULL); 940 list_del(&item->list); 941 mutex_unlock(&block->lock); 942 943 kfree(item); 944 return; 945 } 946 } 947 mutex_unlock(&block->lock); 948 WARN_ON(1); 949 } 950 951 struct tcf_net { 952 spinlock_t idr_lock; /* Protects idr */ 953 struct idr idr; 954 }; 955 956 static unsigned int tcf_net_id; 957 958 static int tcf_block_insert(struct tcf_block *block, struct net *net, 959 struct netlink_ext_ack *extack) 960 { 961 struct tcf_net *tn = net_generic(net, tcf_net_id); 962 int err; 963 964 idr_preload(GFP_KERNEL); 965 spin_lock(&tn->idr_lock); 966 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 967 GFP_NOWAIT); 968 spin_unlock(&tn->idr_lock); 969 idr_preload_end(); 970 971 return err; 972 } 973 974 static void tcf_block_remove(struct tcf_block *block, struct net *net) 975 { 976 struct tcf_net *tn = net_generic(net, tcf_net_id); 977 978 spin_lock(&tn->idr_lock); 979 idr_remove(&tn->idr, block->index); 980 spin_unlock(&tn->idr_lock); 981 } 982 983 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 984 u32 block_index, 985 struct netlink_ext_ack *extack) 986 { 987 struct tcf_block *block; 988 989 block = kzalloc(sizeof(*block), GFP_KERNEL); 990 if (!block) { 991 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 992 return ERR_PTR(-ENOMEM); 993 } 994 mutex_init(&block->lock); 995 mutex_init(&block->proto_destroy_lock); 996 init_rwsem(&block->cb_lock); 997 flow_block_init(&block->flow_block); 998 INIT_LIST_HEAD(&block->chain_list); 999 INIT_LIST_HEAD(&block->owner_list); 1000 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 1001 1002 refcount_set(&block->refcnt, 1); 1003 block->net = net; 1004 block->index = block_index; 1005 1006 /* Don't store q pointer for blocks which are shared */ 1007 if (!tcf_block_shared(block)) 1008 block->q = q; 1009 return block; 1010 } 1011 1012 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 1013 { 1014 struct tcf_net *tn = net_generic(net, tcf_net_id); 1015 1016 return idr_find(&tn->idr, block_index); 1017 } 1018 1019 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 1020 { 1021 struct tcf_block *block; 1022 1023 rcu_read_lock(); 1024 block = tcf_block_lookup(net, block_index); 1025 if (block && !refcount_inc_not_zero(&block->refcnt)) 1026 block = NULL; 1027 rcu_read_unlock(); 1028 1029 return block; 1030 } 1031 1032 static struct tcf_chain * 1033 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 1034 { 1035 mutex_lock(&block->lock); 1036 if (chain) 1037 chain = list_is_last(&chain->list, &block->chain_list) ? 1038 NULL : list_next_entry(chain, list); 1039 else 1040 chain = list_first_entry_or_null(&block->chain_list, 1041 struct tcf_chain, list); 1042 1043 /* skip all action-only chains */ 1044 while (chain && tcf_chain_held_by_acts_only(chain)) 1045 chain = list_is_last(&chain->list, &block->chain_list) ? 1046 NULL : list_next_entry(chain, list); 1047 1048 if (chain) 1049 tcf_chain_hold(chain); 1050 mutex_unlock(&block->lock); 1051 1052 return chain; 1053 } 1054 1055 /* Function to be used by all clients that want to iterate over all chains on 1056 * block. It properly obtains block->lock and takes reference to chain before 1057 * returning it. Users of this function must be tolerant to concurrent chain 1058 * insertion/deletion or ensure that no concurrent chain modification is 1059 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1060 * consistent dump because rtnl lock is released each time skb is filled with 1061 * data and sent to user-space. 1062 */ 1063 1064 struct tcf_chain * 1065 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 1066 { 1067 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 1068 1069 if (chain) 1070 tcf_chain_put(chain); 1071 1072 return chain_next; 1073 } 1074 EXPORT_SYMBOL(tcf_get_next_chain); 1075 1076 static struct tcf_proto * 1077 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1078 { 1079 u32 prio = 0; 1080 1081 ASSERT_RTNL(); 1082 mutex_lock(&chain->filter_chain_lock); 1083 1084 if (!tp) { 1085 tp = tcf_chain_dereference(chain->filter_chain, chain); 1086 } else if (tcf_proto_is_deleting(tp)) { 1087 /* 'deleting' flag is set and chain->filter_chain_lock was 1088 * unlocked, which means next pointer could be invalid. Restart 1089 * search. 1090 */ 1091 prio = tp->prio + 1; 1092 tp = tcf_chain_dereference(chain->filter_chain, chain); 1093 1094 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1095 if (!tp->deleting && tp->prio >= prio) 1096 break; 1097 } else { 1098 tp = tcf_chain_dereference(tp->next, chain); 1099 } 1100 1101 if (tp) 1102 tcf_proto_get(tp); 1103 1104 mutex_unlock(&chain->filter_chain_lock); 1105 1106 return tp; 1107 } 1108 1109 /* Function to be used by all clients that want to iterate over all tp's on 1110 * chain. Users of this function must be tolerant to concurrent tp 1111 * insertion/deletion or ensure that no concurrent chain modification is 1112 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1113 * consistent dump because rtnl lock is released each time skb is filled with 1114 * data and sent to user-space. 1115 */ 1116 1117 struct tcf_proto * 1118 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1119 { 1120 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1121 1122 if (tp) 1123 tcf_proto_put(tp, true, NULL); 1124 1125 return tp_next; 1126 } 1127 EXPORT_SYMBOL(tcf_get_next_proto); 1128 1129 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1130 { 1131 struct tcf_chain *chain; 1132 1133 /* Last reference to block. At this point chains cannot be added or 1134 * removed concurrently. 1135 */ 1136 for (chain = tcf_get_next_chain(block, NULL); 1137 chain; 1138 chain = tcf_get_next_chain(block, chain)) { 1139 tcf_chain_put_explicitly_created(chain); 1140 tcf_chain_flush(chain, rtnl_held); 1141 } 1142 } 1143 1144 /* Lookup Qdisc and increments its reference counter. 1145 * Set parent, if necessary. 1146 */ 1147 1148 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1149 u32 *parent, int ifindex, bool rtnl_held, 1150 struct netlink_ext_ack *extack) 1151 { 1152 const struct Qdisc_class_ops *cops; 1153 struct net_device *dev; 1154 int err = 0; 1155 1156 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1157 return 0; 1158 1159 rcu_read_lock(); 1160 1161 /* Find link */ 1162 dev = dev_get_by_index_rcu(net, ifindex); 1163 if (!dev) { 1164 rcu_read_unlock(); 1165 return -ENODEV; 1166 } 1167 1168 /* Find qdisc */ 1169 if (!*parent) { 1170 *q = rcu_dereference(dev->qdisc); 1171 *parent = (*q)->handle; 1172 } else { 1173 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1174 if (!*q) { 1175 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1176 err = -EINVAL; 1177 goto errout_rcu; 1178 } 1179 } 1180 1181 *q = qdisc_refcount_inc_nz(*q); 1182 if (!*q) { 1183 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1184 err = -EINVAL; 1185 goto errout_rcu; 1186 } 1187 1188 /* Is it classful? */ 1189 cops = (*q)->ops->cl_ops; 1190 if (!cops) { 1191 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1192 err = -EINVAL; 1193 goto errout_qdisc; 1194 } 1195 1196 if (!cops->tcf_block) { 1197 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1198 err = -EOPNOTSUPP; 1199 goto errout_qdisc; 1200 } 1201 1202 errout_rcu: 1203 /* At this point we know that qdisc is not noop_qdisc, 1204 * which means that qdisc holds a reference to net_device 1205 * and we hold a reference to qdisc, so it is safe to release 1206 * rcu read lock. 1207 */ 1208 rcu_read_unlock(); 1209 return err; 1210 1211 errout_qdisc: 1212 rcu_read_unlock(); 1213 1214 if (rtnl_held) 1215 qdisc_put(*q); 1216 else 1217 qdisc_put_unlocked(*q); 1218 *q = NULL; 1219 1220 return err; 1221 } 1222 1223 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1224 int ifindex, struct netlink_ext_ack *extack) 1225 { 1226 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1227 return 0; 1228 1229 /* Do we search for filter, attached to class? */ 1230 if (TC_H_MIN(parent)) { 1231 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1232 1233 *cl = cops->find(q, parent); 1234 if (*cl == 0) { 1235 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1236 return -ENOENT; 1237 } 1238 } 1239 1240 return 0; 1241 } 1242 1243 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1244 unsigned long cl, int ifindex, 1245 u32 block_index, 1246 struct netlink_ext_ack *extack) 1247 { 1248 struct tcf_block *block; 1249 1250 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1251 block = tcf_block_refcnt_get(net, block_index); 1252 if (!block) { 1253 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1254 return ERR_PTR(-EINVAL); 1255 } 1256 } else { 1257 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1258 1259 block = cops->tcf_block(q, cl, extack); 1260 if (!block) 1261 return ERR_PTR(-EINVAL); 1262 1263 if (tcf_block_shared(block)) { 1264 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1265 return ERR_PTR(-EOPNOTSUPP); 1266 } 1267 1268 /* Always take reference to block in order to support execution 1269 * of rules update path of cls API without rtnl lock. Caller 1270 * must release block when it is finished using it. 'if' block 1271 * of this conditional obtain reference to block by calling 1272 * tcf_block_refcnt_get(). 1273 */ 1274 refcount_inc(&block->refcnt); 1275 } 1276 1277 return block; 1278 } 1279 1280 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1281 struct tcf_block_ext_info *ei, bool rtnl_held) 1282 { 1283 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1284 /* Flushing/putting all chains will cause the block to be 1285 * deallocated when last chain is freed. However, if chain_list 1286 * is empty, block has to be manually deallocated. After block 1287 * reference counter reached 0, it is no longer possible to 1288 * increment it or add new chains to block. 1289 */ 1290 bool free_block = list_empty(&block->chain_list); 1291 1292 mutex_unlock(&block->lock); 1293 if (tcf_block_shared(block)) 1294 tcf_block_remove(block, block->net); 1295 1296 if (q) 1297 tcf_block_offload_unbind(block, q, ei); 1298 1299 if (free_block) 1300 tcf_block_destroy(block); 1301 else 1302 tcf_block_flush_all_chains(block, rtnl_held); 1303 } else if (q) { 1304 tcf_block_offload_unbind(block, q, ei); 1305 } 1306 } 1307 1308 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1309 { 1310 __tcf_block_put(block, NULL, NULL, rtnl_held); 1311 } 1312 1313 /* Find tcf block. 1314 * Set q, parent, cl when appropriate. 1315 */ 1316 1317 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1318 u32 *parent, unsigned long *cl, 1319 int ifindex, u32 block_index, 1320 struct netlink_ext_ack *extack) 1321 { 1322 struct tcf_block *block; 1323 int err = 0; 1324 1325 ASSERT_RTNL(); 1326 1327 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1328 if (err) 1329 goto errout; 1330 1331 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1332 if (err) 1333 goto errout_qdisc; 1334 1335 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1336 if (IS_ERR(block)) { 1337 err = PTR_ERR(block); 1338 goto errout_qdisc; 1339 } 1340 1341 return block; 1342 1343 errout_qdisc: 1344 if (*q) 1345 qdisc_put(*q); 1346 errout: 1347 *q = NULL; 1348 return ERR_PTR(err); 1349 } 1350 1351 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1352 bool rtnl_held) 1353 { 1354 if (!IS_ERR_OR_NULL(block)) 1355 tcf_block_refcnt_put(block, rtnl_held); 1356 1357 if (q) { 1358 if (rtnl_held) 1359 qdisc_put(q); 1360 else 1361 qdisc_put_unlocked(q); 1362 } 1363 } 1364 1365 struct tcf_block_owner_item { 1366 struct list_head list; 1367 struct Qdisc *q; 1368 enum flow_block_binder_type binder_type; 1369 }; 1370 1371 static void 1372 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1373 struct Qdisc *q, 1374 enum flow_block_binder_type binder_type) 1375 { 1376 if (block->keep_dst && 1377 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1378 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1379 netif_keep_dst(qdisc_dev(q)); 1380 } 1381 1382 void tcf_block_netif_keep_dst(struct tcf_block *block) 1383 { 1384 struct tcf_block_owner_item *item; 1385 1386 block->keep_dst = true; 1387 list_for_each_entry(item, &block->owner_list, list) 1388 tcf_block_owner_netif_keep_dst(block, item->q, 1389 item->binder_type); 1390 } 1391 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1392 1393 static int tcf_block_owner_add(struct tcf_block *block, 1394 struct Qdisc *q, 1395 enum flow_block_binder_type binder_type) 1396 { 1397 struct tcf_block_owner_item *item; 1398 1399 item = kmalloc(sizeof(*item), GFP_KERNEL); 1400 if (!item) 1401 return -ENOMEM; 1402 item->q = q; 1403 item->binder_type = binder_type; 1404 list_add(&item->list, &block->owner_list); 1405 return 0; 1406 } 1407 1408 static void tcf_block_owner_del(struct tcf_block *block, 1409 struct Qdisc *q, 1410 enum flow_block_binder_type binder_type) 1411 { 1412 struct tcf_block_owner_item *item; 1413 1414 list_for_each_entry(item, &block->owner_list, list) { 1415 if (item->q == q && item->binder_type == binder_type) { 1416 list_del(&item->list); 1417 kfree(item); 1418 return; 1419 } 1420 } 1421 WARN_ON(1); 1422 } 1423 1424 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1425 struct tcf_block_ext_info *ei, 1426 struct netlink_ext_ack *extack) 1427 { 1428 struct net *net = qdisc_net(q); 1429 struct tcf_block *block = NULL; 1430 int err; 1431 1432 if (ei->block_index) 1433 /* block_index not 0 means the shared block is requested */ 1434 block = tcf_block_refcnt_get(net, ei->block_index); 1435 1436 if (!block) { 1437 block = tcf_block_create(net, q, ei->block_index, extack); 1438 if (IS_ERR(block)) 1439 return PTR_ERR(block); 1440 if (tcf_block_shared(block)) { 1441 err = tcf_block_insert(block, net, extack); 1442 if (err) 1443 goto err_block_insert; 1444 } 1445 } 1446 1447 err = tcf_block_owner_add(block, q, ei->binder_type); 1448 if (err) 1449 goto err_block_owner_add; 1450 1451 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1452 1453 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1454 if (err) 1455 goto err_chain0_head_change_cb_add; 1456 1457 err = tcf_block_offload_bind(block, q, ei, extack); 1458 if (err) 1459 goto err_block_offload_bind; 1460 1461 *p_block = block; 1462 return 0; 1463 1464 err_block_offload_bind: 1465 tcf_chain0_head_change_cb_del(block, ei); 1466 err_chain0_head_change_cb_add: 1467 tcf_block_owner_del(block, q, ei->binder_type); 1468 err_block_owner_add: 1469 err_block_insert: 1470 tcf_block_refcnt_put(block, true); 1471 return err; 1472 } 1473 EXPORT_SYMBOL(tcf_block_get_ext); 1474 1475 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1476 { 1477 struct tcf_proto __rcu **p_filter_chain = priv; 1478 1479 rcu_assign_pointer(*p_filter_chain, tp_head); 1480 } 1481 1482 int tcf_block_get(struct tcf_block **p_block, 1483 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1484 struct netlink_ext_ack *extack) 1485 { 1486 struct tcf_block_ext_info ei = { 1487 .chain_head_change = tcf_chain_head_change_dflt, 1488 .chain_head_change_priv = p_filter_chain, 1489 }; 1490 1491 WARN_ON(!p_filter_chain); 1492 return tcf_block_get_ext(p_block, q, &ei, extack); 1493 } 1494 EXPORT_SYMBOL(tcf_block_get); 1495 1496 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1497 * actions should be all removed after flushing. 1498 */ 1499 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1500 struct tcf_block_ext_info *ei) 1501 { 1502 if (!block) 1503 return; 1504 tcf_chain0_head_change_cb_del(block, ei); 1505 tcf_block_owner_del(block, q, ei->binder_type); 1506 1507 __tcf_block_put(block, q, ei, true); 1508 } 1509 EXPORT_SYMBOL(tcf_block_put_ext); 1510 1511 void tcf_block_put(struct tcf_block *block) 1512 { 1513 struct tcf_block_ext_info ei = {0, }; 1514 1515 if (!block) 1516 return; 1517 tcf_block_put_ext(block, block->q, &ei); 1518 } 1519 1520 EXPORT_SYMBOL(tcf_block_put); 1521 1522 static int 1523 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1524 void *cb_priv, bool add, bool offload_in_use, 1525 struct netlink_ext_ack *extack) 1526 { 1527 struct tcf_chain *chain, *chain_prev; 1528 struct tcf_proto *tp, *tp_prev; 1529 int err; 1530 1531 lockdep_assert_held(&block->cb_lock); 1532 1533 for (chain = __tcf_get_next_chain(block, NULL); 1534 chain; 1535 chain_prev = chain, 1536 chain = __tcf_get_next_chain(block, chain), 1537 tcf_chain_put(chain_prev)) { 1538 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1539 tp_prev = tp, 1540 tp = __tcf_get_next_proto(chain, tp), 1541 tcf_proto_put(tp_prev, true, NULL)) { 1542 if (tp->ops->reoffload) { 1543 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1544 extack); 1545 if (err && add) 1546 goto err_playback_remove; 1547 } else if (add && offload_in_use) { 1548 err = -EOPNOTSUPP; 1549 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1550 goto err_playback_remove; 1551 } 1552 } 1553 } 1554 1555 return 0; 1556 1557 err_playback_remove: 1558 tcf_proto_put(tp, true, NULL); 1559 tcf_chain_put(chain); 1560 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1561 extack); 1562 return err; 1563 } 1564 1565 static int tcf_block_bind(struct tcf_block *block, 1566 struct flow_block_offload *bo) 1567 { 1568 struct flow_block_cb *block_cb, *next; 1569 int err, i = 0; 1570 1571 lockdep_assert_held(&block->cb_lock); 1572 1573 list_for_each_entry(block_cb, &bo->cb_list, list) { 1574 err = tcf_block_playback_offloads(block, block_cb->cb, 1575 block_cb->cb_priv, true, 1576 tcf_block_offload_in_use(block), 1577 bo->extack); 1578 if (err) 1579 goto err_unroll; 1580 if (!bo->unlocked_driver_cb) 1581 block->lockeddevcnt++; 1582 1583 i++; 1584 } 1585 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1586 1587 return 0; 1588 1589 err_unroll: 1590 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1591 list_del(&block_cb->driver_list); 1592 if (i-- > 0) { 1593 list_del(&block_cb->list); 1594 tcf_block_playback_offloads(block, block_cb->cb, 1595 block_cb->cb_priv, false, 1596 tcf_block_offload_in_use(block), 1597 NULL); 1598 if (!bo->unlocked_driver_cb) 1599 block->lockeddevcnt--; 1600 } 1601 flow_block_cb_free(block_cb); 1602 } 1603 1604 return err; 1605 } 1606 1607 static void tcf_block_unbind(struct tcf_block *block, 1608 struct flow_block_offload *bo) 1609 { 1610 struct flow_block_cb *block_cb, *next; 1611 1612 lockdep_assert_held(&block->cb_lock); 1613 1614 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1615 tcf_block_playback_offloads(block, block_cb->cb, 1616 block_cb->cb_priv, false, 1617 tcf_block_offload_in_use(block), 1618 NULL); 1619 list_del(&block_cb->list); 1620 flow_block_cb_free(block_cb); 1621 if (!bo->unlocked_driver_cb) 1622 block->lockeddevcnt--; 1623 } 1624 } 1625 1626 static int tcf_block_setup(struct tcf_block *block, 1627 struct flow_block_offload *bo) 1628 { 1629 int err; 1630 1631 switch (bo->command) { 1632 case FLOW_BLOCK_BIND: 1633 err = tcf_block_bind(block, bo); 1634 break; 1635 case FLOW_BLOCK_UNBIND: 1636 err = 0; 1637 tcf_block_unbind(block, bo); 1638 break; 1639 default: 1640 WARN_ON_ONCE(1); 1641 err = -EOPNOTSUPP; 1642 } 1643 1644 return err; 1645 } 1646 1647 /* Main classifier routine: scans classifier chain attached 1648 * to this qdisc, (optionally) tests for protocol and asks 1649 * specific classifiers. 1650 */ 1651 static inline int __tcf_classify(struct sk_buff *skb, 1652 const struct tcf_proto *tp, 1653 const struct tcf_proto *orig_tp, 1654 struct tcf_result *res, 1655 bool compat_mode, 1656 struct tcf_exts_miss_cookie_node *n, 1657 int act_index, 1658 u32 *last_executed_chain) 1659 { 1660 u32 orig_reason = res->drop_reason; 1661 #ifdef CONFIG_NET_CLS_ACT 1662 const int max_reclassify_loop = 16; 1663 const struct tcf_proto *first_tp; 1664 int limit = 0; 1665 1666 reclassify: 1667 #endif 1668 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1669 __be16 protocol = skb_protocol(skb, false); 1670 int err = 0; 1671 1672 if (n) { 1673 struct tcf_exts *exts; 1674 1675 if (n->tp_prio != tp->prio) 1676 continue; 1677 1678 /* We re-lookup the tp and chain based on index instead 1679 * of having hard refs and locks to them, so do a sanity 1680 * check if any of tp,chain,exts was replaced by the 1681 * time we got here with a cookie from hardware. 1682 */ 1683 if (unlikely(n->tp != tp || n->tp->chain != n->chain || 1684 !tp->ops->get_exts)) { 1685 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1686 return TC_ACT_SHOT; 1687 } 1688 1689 exts = tp->ops->get_exts(tp, n->handle); 1690 if (unlikely(!exts || n->exts != exts)) { 1691 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1692 return TC_ACT_SHOT; 1693 } 1694 1695 n = NULL; 1696 err = tcf_exts_exec_ex(skb, exts, act_index, res); 1697 } else { 1698 if (tp->protocol != protocol && 1699 tp->protocol != htons(ETH_P_ALL)) 1700 continue; 1701 1702 err = tc_classify(skb, tp, res); 1703 } 1704 #ifdef CONFIG_NET_CLS_ACT 1705 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1706 first_tp = orig_tp; 1707 *last_executed_chain = first_tp->chain->index; 1708 goto reset; 1709 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1710 first_tp = res->goto_tp; 1711 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1712 goto reset; 1713 } 1714 #endif 1715 if (err >= 0) { 1716 /* Policy drop or drop reason is over-written by 1717 * classifiers with a bogus value(0) */ 1718 if (err == TC_ACT_SHOT && 1719 res->drop_reason == SKB_NOT_DROPPED_YET) 1720 tcf_set_drop_reason(res, orig_reason); 1721 return err; 1722 } 1723 } 1724 1725 if (unlikely(n)) { 1726 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1727 return TC_ACT_SHOT; 1728 } 1729 1730 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1731 #ifdef CONFIG_NET_CLS_ACT 1732 reset: 1733 if (unlikely(limit++ >= max_reclassify_loop)) { 1734 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1735 tp->chain->block->index, 1736 tp->prio & 0xffff, 1737 ntohs(tp->protocol)); 1738 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1739 return TC_ACT_SHOT; 1740 } 1741 1742 tp = first_tp; 1743 goto reclassify; 1744 #endif 1745 } 1746 1747 int tcf_classify(struct sk_buff *skb, 1748 const struct tcf_block *block, 1749 const struct tcf_proto *tp, 1750 struct tcf_result *res, bool compat_mode) 1751 { 1752 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1753 u32 last_executed_chain = 0; 1754 1755 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0, 1756 &last_executed_chain); 1757 #else 1758 u32 last_executed_chain = tp ? tp->chain->index : 0; 1759 struct tcf_exts_miss_cookie_node *n = NULL; 1760 const struct tcf_proto *orig_tp = tp; 1761 struct tc_skb_ext *ext; 1762 int act_index = 0; 1763 int ret; 1764 1765 if (block) { 1766 ext = skb_ext_find(skb, TC_SKB_EXT); 1767 1768 if (ext && (ext->chain || ext->act_miss)) { 1769 struct tcf_chain *fchain; 1770 u32 chain; 1771 1772 if (ext->act_miss) { 1773 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie, 1774 &act_index); 1775 if (!n) { 1776 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1777 return TC_ACT_SHOT; 1778 } 1779 1780 chain = n->chain_index; 1781 } else { 1782 chain = ext->chain; 1783 } 1784 1785 fchain = tcf_chain_lookup_rcu(block, chain); 1786 if (!fchain) { 1787 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1788 return TC_ACT_SHOT; 1789 } 1790 1791 /* Consume, so cloned/redirect skbs won't inherit ext */ 1792 skb_ext_del(skb, TC_SKB_EXT); 1793 1794 tp = rcu_dereference_bh(fchain->filter_chain); 1795 last_executed_chain = fchain->index; 1796 } 1797 } 1798 1799 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index, 1800 &last_executed_chain); 1801 1802 if (tc_skb_ext_tc_enabled()) { 1803 /* If we missed on some chain */ 1804 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1805 struct tc_skb_cb *cb = tc_skb_cb(skb); 1806 1807 ext = tc_skb_ext_alloc(skb); 1808 if (WARN_ON_ONCE(!ext)) { 1809 tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR); 1810 return TC_ACT_SHOT; 1811 } 1812 1813 ext->chain = last_executed_chain; 1814 ext->mru = cb->mru; 1815 ext->post_ct = cb->post_ct; 1816 ext->post_ct_snat = cb->post_ct_snat; 1817 ext->post_ct_dnat = cb->post_ct_dnat; 1818 ext->zone = cb->zone; 1819 } 1820 } 1821 1822 return ret; 1823 #endif 1824 } 1825 EXPORT_SYMBOL(tcf_classify); 1826 1827 struct tcf_chain_info { 1828 struct tcf_proto __rcu **pprev; 1829 struct tcf_proto __rcu *next; 1830 }; 1831 1832 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1833 struct tcf_chain_info *chain_info) 1834 { 1835 return tcf_chain_dereference(*chain_info->pprev, chain); 1836 } 1837 1838 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1839 struct tcf_chain_info *chain_info, 1840 struct tcf_proto *tp) 1841 { 1842 if (chain->flushing) 1843 return -EAGAIN; 1844 1845 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1846 if (*chain_info->pprev == chain->filter_chain) 1847 tcf_chain0_head_change(chain, tp); 1848 tcf_proto_get(tp); 1849 rcu_assign_pointer(*chain_info->pprev, tp); 1850 1851 return 0; 1852 } 1853 1854 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1855 struct tcf_chain_info *chain_info, 1856 struct tcf_proto *tp) 1857 { 1858 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1859 1860 tcf_proto_mark_delete(tp); 1861 if (tp == chain->filter_chain) 1862 tcf_chain0_head_change(chain, next); 1863 RCU_INIT_POINTER(*chain_info->pprev, next); 1864 } 1865 1866 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1867 struct tcf_chain_info *chain_info, 1868 u32 protocol, u32 prio, 1869 bool prio_allocate); 1870 1871 /* Try to insert new proto. 1872 * If proto with specified priority already exists, free new proto 1873 * and return existing one. 1874 */ 1875 1876 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1877 struct tcf_proto *tp_new, 1878 u32 protocol, u32 prio, 1879 bool rtnl_held) 1880 { 1881 struct tcf_chain_info chain_info; 1882 struct tcf_proto *tp; 1883 int err = 0; 1884 1885 mutex_lock(&chain->filter_chain_lock); 1886 1887 if (tcf_proto_exists_destroying(chain, tp_new)) { 1888 mutex_unlock(&chain->filter_chain_lock); 1889 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1890 return ERR_PTR(-EAGAIN); 1891 } 1892 1893 tp = tcf_chain_tp_find(chain, &chain_info, 1894 protocol, prio, false); 1895 if (!tp) 1896 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1897 mutex_unlock(&chain->filter_chain_lock); 1898 1899 if (tp) { 1900 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1901 tp_new = tp; 1902 } else if (err) { 1903 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1904 tp_new = ERR_PTR(err); 1905 } 1906 1907 return tp_new; 1908 } 1909 1910 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1911 struct tcf_proto *tp, bool rtnl_held, 1912 struct netlink_ext_ack *extack) 1913 { 1914 struct tcf_chain_info chain_info; 1915 struct tcf_proto *tp_iter; 1916 struct tcf_proto **pprev; 1917 struct tcf_proto *next; 1918 1919 mutex_lock(&chain->filter_chain_lock); 1920 1921 /* Atomically find and remove tp from chain. */ 1922 for (pprev = &chain->filter_chain; 1923 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1924 pprev = &tp_iter->next) { 1925 if (tp_iter == tp) { 1926 chain_info.pprev = pprev; 1927 chain_info.next = tp_iter->next; 1928 WARN_ON(tp_iter->deleting); 1929 break; 1930 } 1931 } 1932 /* Verify that tp still exists and no new filters were inserted 1933 * concurrently. 1934 * Mark tp for deletion if it is empty. 1935 */ 1936 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1937 mutex_unlock(&chain->filter_chain_lock); 1938 return; 1939 } 1940 1941 tcf_proto_signal_destroying(chain, tp); 1942 next = tcf_chain_dereference(chain_info.next, chain); 1943 if (tp == chain->filter_chain) 1944 tcf_chain0_head_change(chain, next); 1945 RCU_INIT_POINTER(*chain_info.pprev, next); 1946 mutex_unlock(&chain->filter_chain_lock); 1947 1948 tcf_proto_put(tp, rtnl_held, extack); 1949 } 1950 1951 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1952 struct tcf_chain_info *chain_info, 1953 u32 protocol, u32 prio, 1954 bool prio_allocate) 1955 { 1956 struct tcf_proto **pprev; 1957 struct tcf_proto *tp; 1958 1959 /* Check the chain for existence of proto-tcf with this priority */ 1960 for (pprev = &chain->filter_chain; 1961 (tp = tcf_chain_dereference(*pprev, chain)); 1962 pprev = &tp->next) { 1963 if (tp->prio >= prio) { 1964 if (tp->prio == prio) { 1965 if (prio_allocate || 1966 (tp->protocol != protocol && protocol)) 1967 return ERR_PTR(-EINVAL); 1968 } else { 1969 tp = NULL; 1970 } 1971 break; 1972 } 1973 } 1974 chain_info->pprev = pprev; 1975 if (tp) { 1976 chain_info->next = tp->next; 1977 tcf_proto_get(tp); 1978 } else { 1979 chain_info->next = NULL; 1980 } 1981 return tp; 1982 } 1983 1984 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1985 struct tcf_proto *tp, struct tcf_block *block, 1986 struct Qdisc *q, u32 parent, void *fh, 1987 u32 portid, u32 seq, u16 flags, int event, 1988 bool terse_dump, bool rtnl_held, 1989 struct netlink_ext_ack *extack) 1990 { 1991 struct tcmsg *tcm; 1992 struct nlmsghdr *nlh; 1993 unsigned char *b = skb_tail_pointer(skb); 1994 1995 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1996 if (!nlh) 1997 goto out_nlmsg_trim; 1998 tcm = nlmsg_data(nlh); 1999 tcm->tcm_family = AF_UNSPEC; 2000 tcm->tcm__pad1 = 0; 2001 tcm->tcm__pad2 = 0; 2002 if (q) { 2003 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 2004 tcm->tcm_parent = parent; 2005 } else { 2006 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2007 tcm->tcm_block_index = block->index; 2008 } 2009 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 2010 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 2011 goto nla_put_failure; 2012 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 2013 goto nla_put_failure; 2014 if (!fh) { 2015 tcm->tcm_handle = 0; 2016 } else if (terse_dump) { 2017 if (tp->ops->terse_dump) { 2018 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 2019 rtnl_held) < 0) 2020 goto nla_put_failure; 2021 } else { 2022 goto cls_op_not_supp; 2023 } 2024 } else { 2025 if (tp->ops->dump && 2026 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 2027 goto nla_put_failure; 2028 } 2029 2030 if (extack && extack->_msg && 2031 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 2032 goto nla_put_failure; 2033 2034 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2035 2036 return skb->len; 2037 2038 out_nlmsg_trim: 2039 nla_put_failure: 2040 cls_op_not_supp: 2041 nlmsg_trim(skb, b); 2042 return -1; 2043 } 2044 2045 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 2046 struct nlmsghdr *n, struct tcf_proto *tp, 2047 struct tcf_block *block, struct Qdisc *q, 2048 u32 parent, void *fh, int event, bool unicast, 2049 bool rtnl_held, struct netlink_ext_ack *extack) 2050 { 2051 struct sk_buff *skb; 2052 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2053 int err = 0; 2054 2055 if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) 2056 return 0; 2057 2058 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2059 if (!skb) 2060 return -ENOBUFS; 2061 2062 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 2063 n->nlmsg_seq, n->nlmsg_flags, event, 2064 false, rtnl_held, extack) <= 0) { 2065 kfree_skb(skb); 2066 return -EINVAL; 2067 } 2068 2069 if (unicast) 2070 err = rtnl_unicast(skb, net, portid); 2071 else 2072 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2073 n->nlmsg_flags & NLM_F_ECHO); 2074 return err; 2075 } 2076 2077 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 2078 struct nlmsghdr *n, struct tcf_proto *tp, 2079 struct tcf_block *block, struct Qdisc *q, 2080 u32 parent, void *fh, bool *last, bool rtnl_held, 2081 struct netlink_ext_ack *extack) 2082 { 2083 struct sk_buff *skb; 2084 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2085 int err; 2086 2087 if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) 2088 return tp->ops->delete(tp, fh, last, rtnl_held, extack); 2089 2090 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2091 if (!skb) 2092 return -ENOBUFS; 2093 2094 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 2095 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 2096 false, rtnl_held, extack) <= 0) { 2097 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 2098 kfree_skb(skb); 2099 return -EINVAL; 2100 } 2101 2102 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 2103 if (err) { 2104 kfree_skb(skb); 2105 return err; 2106 } 2107 2108 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2109 n->nlmsg_flags & NLM_F_ECHO); 2110 if (err < 0) 2111 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 2112 2113 return err; 2114 } 2115 2116 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 2117 struct tcf_block *block, struct Qdisc *q, 2118 u32 parent, struct nlmsghdr *n, 2119 struct tcf_chain *chain, int event, 2120 struct netlink_ext_ack *extack) 2121 { 2122 struct tcf_proto *tp; 2123 2124 for (tp = tcf_get_next_proto(chain, NULL); 2125 tp; tp = tcf_get_next_proto(chain, tp)) 2126 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, 2127 event, false, true, extack); 2128 } 2129 2130 static void tfilter_put(struct tcf_proto *tp, void *fh) 2131 { 2132 if (tp->ops->put && fh) 2133 tp->ops->put(tp, fh); 2134 } 2135 2136 static bool is_qdisc_ingress(__u32 classid) 2137 { 2138 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS)); 2139 } 2140 2141 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2142 struct netlink_ext_ack *extack) 2143 { 2144 struct net *net = sock_net(skb->sk); 2145 struct nlattr *tca[TCA_MAX + 1]; 2146 char name[IFNAMSIZ]; 2147 struct tcmsg *t; 2148 u32 protocol; 2149 u32 prio; 2150 bool prio_allocate; 2151 u32 parent; 2152 u32 chain_index; 2153 struct Qdisc *q; 2154 struct tcf_chain_info chain_info; 2155 struct tcf_chain *chain; 2156 struct tcf_block *block; 2157 struct tcf_proto *tp; 2158 unsigned long cl; 2159 void *fh; 2160 int err; 2161 int tp_created; 2162 bool rtnl_held = false; 2163 u32 flags; 2164 2165 replay: 2166 tp_created = 0; 2167 2168 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2169 rtm_tca_policy, extack); 2170 if (err < 0) 2171 return err; 2172 2173 t = nlmsg_data(n); 2174 protocol = TC_H_MIN(t->tcm_info); 2175 prio = TC_H_MAJ(t->tcm_info); 2176 prio_allocate = false; 2177 parent = t->tcm_parent; 2178 tp = NULL; 2179 cl = 0; 2180 block = NULL; 2181 q = NULL; 2182 chain = NULL; 2183 flags = 0; 2184 2185 if (prio == 0) { 2186 /* If no priority is provided by the user, 2187 * we allocate one. 2188 */ 2189 if (n->nlmsg_flags & NLM_F_CREATE) { 2190 prio = TC_H_MAKE(0x80000000U, 0U); 2191 prio_allocate = true; 2192 } else { 2193 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2194 return -ENOENT; 2195 } 2196 } 2197 2198 /* Find head of filter chain. */ 2199 2200 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2201 if (err) 2202 return err; 2203 2204 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2205 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2206 err = -EINVAL; 2207 goto errout; 2208 } 2209 2210 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2211 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2212 * type is not specified, classifier is not unlocked. 2213 */ 2214 if (rtnl_held || 2215 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2216 !tcf_proto_is_unlocked(name)) { 2217 rtnl_held = true; 2218 rtnl_lock(); 2219 } 2220 2221 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2222 if (err) 2223 goto errout; 2224 2225 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2226 extack); 2227 if (IS_ERR(block)) { 2228 err = PTR_ERR(block); 2229 goto errout; 2230 } 2231 block->classid = parent; 2232 2233 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2234 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2235 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2236 err = -EINVAL; 2237 goto errout; 2238 } 2239 chain = tcf_chain_get(block, chain_index, true); 2240 if (!chain) { 2241 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2242 err = -ENOMEM; 2243 goto errout; 2244 } 2245 2246 mutex_lock(&chain->filter_chain_lock); 2247 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2248 prio, prio_allocate); 2249 if (IS_ERR(tp)) { 2250 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2251 err = PTR_ERR(tp); 2252 goto errout_locked; 2253 } 2254 2255 if (tp == NULL) { 2256 struct tcf_proto *tp_new = NULL; 2257 2258 if (chain->flushing) { 2259 err = -EAGAIN; 2260 goto errout_locked; 2261 } 2262 2263 /* Proto-tcf does not exist, create new one */ 2264 2265 if (tca[TCA_KIND] == NULL || !protocol) { 2266 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2267 err = -EINVAL; 2268 goto errout_locked; 2269 } 2270 2271 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2272 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2273 err = -ENOENT; 2274 goto errout_locked; 2275 } 2276 2277 if (prio_allocate) 2278 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2279 &chain_info)); 2280 2281 mutex_unlock(&chain->filter_chain_lock); 2282 tp_new = tcf_proto_create(name, protocol, prio, chain, 2283 rtnl_held, extack); 2284 if (IS_ERR(tp_new)) { 2285 err = PTR_ERR(tp_new); 2286 goto errout_tp; 2287 } 2288 2289 tp_created = 1; 2290 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2291 rtnl_held); 2292 if (IS_ERR(tp)) { 2293 err = PTR_ERR(tp); 2294 goto errout_tp; 2295 } 2296 } else { 2297 mutex_unlock(&chain->filter_chain_lock); 2298 } 2299 2300 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2301 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2302 err = -EINVAL; 2303 goto errout; 2304 } 2305 2306 fh = tp->ops->get(tp, t->tcm_handle); 2307 2308 if (!fh) { 2309 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2310 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2311 err = -ENOENT; 2312 goto errout; 2313 } 2314 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2315 tfilter_put(tp, fh); 2316 NL_SET_ERR_MSG(extack, "Filter already exists"); 2317 err = -EEXIST; 2318 goto errout; 2319 } 2320 2321 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2322 tfilter_put(tp, fh); 2323 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2324 err = -EINVAL; 2325 goto errout; 2326 } 2327 2328 if (!(n->nlmsg_flags & NLM_F_CREATE)) 2329 flags |= TCA_ACT_FLAGS_REPLACE; 2330 if (!rtnl_held) 2331 flags |= TCA_ACT_FLAGS_NO_RTNL; 2332 if (is_qdisc_ingress(parent)) 2333 flags |= TCA_ACT_FLAGS_AT_INGRESS; 2334 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2335 flags, extack); 2336 if (err == 0) { 2337 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2338 RTM_NEWTFILTER, false, rtnl_held, extack); 2339 tfilter_put(tp, fh); 2340 /* q pointer is NULL for shared blocks */ 2341 if (q) 2342 q->flags &= ~TCQ_F_CAN_BYPASS; 2343 } 2344 2345 errout: 2346 if (err && tp_created) 2347 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2348 errout_tp: 2349 if (chain) { 2350 if (tp && !IS_ERR(tp)) 2351 tcf_proto_put(tp, rtnl_held, NULL); 2352 if (!tp_created) 2353 tcf_chain_put(chain); 2354 } 2355 tcf_block_release(q, block, rtnl_held); 2356 2357 if (rtnl_held) 2358 rtnl_unlock(); 2359 2360 if (err == -EAGAIN) { 2361 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2362 * of target chain. 2363 */ 2364 rtnl_held = true; 2365 /* Replay the request. */ 2366 goto replay; 2367 } 2368 return err; 2369 2370 errout_locked: 2371 mutex_unlock(&chain->filter_chain_lock); 2372 goto errout; 2373 } 2374 2375 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2376 struct netlink_ext_ack *extack) 2377 { 2378 struct net *net = sock_net(skb->sk); 2379 struct nlattr *tca[TCA_MAX + 1]; 2380 char name[IFNAMSIZ]; 2381 struct tcmsg *t; 2382 u32 protocol; 2383 u32 prio; 2384 u32 parent; 2385 u32 chain_index; 2386 struct Qdisc *q = NULL; 2387 struct tcf_chain_info chain_info; 2388 struct tcf_chain *chain = NULL; 2389 struct tcf_block *block = NULL; 2390 struct tcf_proto *tp = NULL; 2391 unsigned long cl = 0; 2392 void *fh = NULL; 2393 int err; 2394 bool rtnl_held = false; 2395 2396 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2397 rtm_tca_policy, extack); 2398 if (err < 0) 2399 return err; 2400 2401 t = nlmsg_data(n); 2402 protocol = TC_H_MIN(t->tcm_info); 2403 prio = TC_H_MAJ(t->tcm_info); 2404 parent = t->tcm_parent; 2405 2406 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2407 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2408 return -ENOENT; 2409 } 2410 2411 /* Find head of filter chain. */ 2412 2413 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2414 if (err) 2415 return err; 2416 2417 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2418 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2419 err = -EINVAL; 2420 goto errout; 2421 } 2422 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2423 * found), qdisc is not unlocked, classifier type is not specified, 2424 * classifier is not unlocked. 2425 */ 2426 if (!prio || 2427 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2428 !tcf_proto_is_unlocked(name)) { 2429 rtnl_held = true; 2430 rtnl_lock(); 2431 } 2432 2433 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2434 if (err) 2435 goto errout; 2436 2437 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2438 extack); 2439 if (IS_ERR(block)) { 2440 err = PTR_ERR(block); 2441 goto errout; 2442 } 2443 2444 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2445 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2446 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2447 err = -EINVAL; 2448 goto errout; 2449 } 2450 chain = tcf_chain_get(block, chain_index, false); 2451 if (!chain) { 2452 /* User requested flush on non-existent chain. Nothing to do, 2453 * so just return success. 2454 */ 2455 if (prio == 0) { 2456 err = 0; 2457 goto errout; 2458 } 2459 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2460 err = -ENOENT; 2461 goto errout; 2462 } 2463 2464 if (prio == 0) { 2465 tfilter_notify_chain(net, skb, block, q, parent, n, 2466 chain, RTM_DELTFILTER, extack); 2467 tcf_chain_flush(chain, rtnl_held); 2468 err = 0; 2469 goto errout; 2470 } 2471 2472 mutex_lock(&chain->filter_chain_lock); 2473 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2474 prio, false); 2475 if (!tp || IS_ERR(tp)) { 2476 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2477 err = tp ? PTR_ERR(tp) : -ENOENT; 2478 goto errout_locked; 2479 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2480 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2481 err = -EINVAL; 2482 goto errout_locked; 2483 } else if (t->tcm_handle == 0) { 2484 tcf_proto_signal_destroying(chain, tp); 2485 tcf_chain_tp_remove(chain, &chain_info, tp); 2486 mutex_unlock(&chain->filter_chain_lock); 2487 2488 tcf_proto_put(tp, rtnl_held, NULL); 2489 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2490 RTM_DELTFILTER, false, rtnl_held, extack); 2491 err = 0; 2492 goto errout; 2493 } 2494 mutex_unlock(&chain->filter_chain_lock); 2495 2496 fh = tp->ops->get(tp, t->tcm_handle); 2497 2498 if (!fh) { 2499 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2500 err = -ENOENT; 2501 } else { 2502 bool last; 2503 2504 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh, 2505 &last, rtnl_held, extack); 2506 2507 if (err) 2508 goto errout; 2509 if (last) 2510 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2511 } 2512 2513 errout: 2514 if (chain) { 2515 if (tp && !IS_ERR(tp)) 2516 tcf_proto_put(tp, rtnl_held, NULL); 2517 tcf_chain_put(chain); 2518 } 2519 tcf_block_release(q, block, rtnl_held); 2520 2521 if (rtnl_held) 2522 rtnl_unlock(); 2523 2524 return err; 2525 2526 errout_locked: 2527 mutex_unlock(&chain->filter_chain_lock); 2528 goto errout; 2529 } 2530 2531 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2532 struct netlink_ext_ack *extack) 2533 { 2534 struct net *net = sock_net(skb->sk); 2535 struct nlattr *tca[TCA_MAX + 1]; 2536 char name[IFNAMSIZ]; 2537 struct tcmsg *t; 2538 u32 protocol; 2539 u32 prio; 2540 u32 parent; 2541 u32 chain_index; 2542 struct Qdisc *q = NULL; 2543 struct tcf_chain_info chain_info; 2544 struct tcf_chain *chain = NULL; 2545 struct tcf_block *block = NULL; 2546 struct tcf_proto *tp = NULL; 2547 unsigned long cl = 0; 2548 void *fh = NULL; 2549 int err; 2550 bool rtnl_held = false; 2551 2552 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2553 rtm_tca_policy, extack); 2554 if (err < 0) 2555 return err; 2556 2557 t = nlmsg_data(n); 2558 protocol = TC_H_MIN(t->tcm_info); 2559 prio = TC_H_MAJ(t->tcm_info); 2560 parent = t->tcm_parent; 2561 2562 if (prio == 0) { 2563 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2564 return -ENOENT; 2565 } 2566 2567 /* Find head of filter chain. */ 2568 2569 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2570 if (err) 2571 return err; 2572 2573 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2574 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2575 err = -EINVAL; 2576 goto errout; 2577 } 2578 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2579 * unlocked, classifier type is not specified, classifier is not 2580 * unlocked. 2581 */ 2582 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2583 !tcf_proto_is_unlocked(name)) { 2584 rtnl_held = true; 2585 rtnl_lock(); 2586 } 2587 2588 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2589 if (err) 2590 goto errout; 2591 2592 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2593 extack); 2594 if (IS_ERR(block)) { 2595 err = PTR_ERR(block); 2596 goto errout; 2597 } 2598 2599 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2600 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2601 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2602 err = -EINVAL; 2603 goto errout; 2604 } 2605 chain = tcf_chain_get(block, chain_index, false); 2606 if (!chain) { 2607 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2608 err = -EINVAL; 2609 goto errout; 2610 } 2611 2612 mutex_lock(&chain->filter_chain_lock); 2613 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2614 prio, false); 2615 mutex_unlock(&chain->filter_chain_lock); 2616 if (!tp || IS_ERR(tp)) { 2617 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2618 err = tp ? PTR_ERR(tp) : -ENOENT; 2619 goto errout; 2620 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2621 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2622 err = -EINVAL; 2623 goto errout; 2624 } 2625 2626 fh = tp->ops->get(tp, t->tcm_handle); 2627 2628 if (!fh) { 2629 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2630 err = -ENOENT; 2631 } else { 2632 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2633 fh, RTM_NEWTFILTER, true, rtnl_held, NULL); 2634 if (err < 0) 2635 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2636 } 2637 2638 tfilter_put(tp, fh); 2639 errout: 2640 if (chain) { 2641 if (tp && !IS_ERR(tp)) 2642 tcf_proto_put(tp, rtnl_held, NULL); 2643 tcf_chain_put(chain); 2644 } 2645 tcf_block_release(q, block, rtnl_held); 2646 2647 if (rtnl_held) 2648 rtnl_unlock(); 2649 2650 return err; 2651 } 2652 2653 struct tcf_dump_args { 2654 struct tcf_walker w; 2655 struct sk_buff *skb; 2656 struct netlink_callback *cb; 2657 struct tcf_block *block; 2658 struct Qdisc *q; 2659 u32 parent; 2660 bool terse_dump; 2661 }; 2662 2663 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2664 { 2665 struct tcf_dump_args *a = (void *)arg; 2666 struct net *net = sock_net(a->skb->sk); 2667 2668 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2669 n, NETLINK_CB(a->cb->skb).portid, 2670 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2671 RTM_NEWTFILTER, a->terse_dump, true, NULL); 2672 } 2673 2674 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2675 struct sk_buff *skb, struct netlink_callback *cb, 2676 long index_start, long *p_index, bool terse) 2677 { 2678 struct net *net = sock_net(skb->sk); 2679 struct tcf_block *block = chain->block; 2680 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2681 struct tcf_proto *tp, *tp_prev; 2682 struct tcf_dump_args arg; 2683 2684 for (tp = __tcf_get_next_proto(chain, NULL); 2685 tp; 2686 tp_prev = tp, 2687 tp = __tcf_get_next_proto(chain, tp), 2688 tcf_proto_put(tp_prev, true, NULL), 2689 (*p_index)++) { 2690 if (*p_index < index_start) 2691 continue; 2692 if (TC_H_MAJ(tcm->tcm_info) && 2693 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2694 continue; 2695 if (TC_H_MIN(tcm->tcm_info) && 2696 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2697 continue; 2698 if (*p_index > index_start) 2699 memset(&cb->args[1], 0, 2700 sizeof(cb->args) - sizeof(cb->args[0])); 2701 if (cb->args[1] == 0) { 2702 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2703 NETLINK_CB(cb->skb).portid, 2704 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2705 RTM_NEWTFILTER, false, true, NULL) <= 0) 2706 goto errout; 2707 cb->args[1] = 1; 2708 } 2709 if (!tp->ops->walk) 2710 continue; 2711 arg.w.fn = tcf_node_dump; 2712 arg.skb = skb; 2713 arg.cb = cb; 2714 arg.block = block; 2715 arg.q = q; 2716 arg.parent = parent; 2717 arg.w.stop = 0; 2718 arg.w.skip = cb->args[1] - 1; 2719 arg.w.count = 0; 2720 arg.w.cookie = cb->args[2]; 2721 arg.terse_dump = terse; 2722 tp->ops->walk(tp, &arg.w, true); 2723 cb->args[2] = arg.w.cookie; 2724 cb->args[1] = arg.w.count + 1; 2725 if (arg.w.stop) 2726 goto errout; 2727 } 2728 return true; 2729 2730 errout: 2731 tcf_proto_put(tp, true, NULL); 2732 return false; 2733 } 2734 2735 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2736 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2737 }; 2738 2739 /* called with RTNL */ 2740 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2741 { 2742 struct tcf_chain *chain, *chain_prev; 2743 struct net *net = sock_net(skb->sk); 2744 struct nlattr *tca[TCA_MAX + 1]; 2745 struct Qdisc *q = NULL; 2746 struct tcf_block *block; 2747 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2748 bool terse_dump = false; 2749 long index_start; 2750 long index; 2751 u32 parent; 2752 int err; 2753 2754 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2755 return skb->len; 2756 2757 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2758 tcf_tfilter_dump_policy, cb->extack); 2759 if (err) 2760 return err; 2761 2762 if (tca[TCA_DUMP_FLAGS]) { 2763 struct nla_bitfield32 flags = 2764 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2765 2766 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2767 } 2768 2769 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2770 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2771 if (!block) 2772 goto out; 2773 /* If we work with block index, q is NULL and parent value 2774 * will never be used in the following code. The check 2775 * in tcf_fill_node prevents it. However, compiler does not 2776 * see that far, so set parent to zero to silence the warning 2777 * about parent being uninitialized. 2778 */ 2779 parent = 0; 2780 } else { 2781 const struct Qdisc_class_ops *cops; 2782 struct net_device *dev; 2783 unsigned long cl = 0; 2784 2785 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2786 if (!dev) 2787 return skb->len; 2788 2789 parent = tcm->tcm_parent; 2790 if (!parent) 2791 q = rtnl_dereference(dev->qdisc); 2792 else 2793 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2794 if (!q) 2795 goto out; 2796 cops = q->ops->cl_ops; 2797 if (!cops) 2798 goto out; 2799 if (!cops->tcf_block) 2800 goto out; 2801 if (TC_H_MIN(tcm->tcm_parent)) { 2802 cl = cops->find(q, tcm->tcm_parent); 2803 if (cl == 0) 2804 goto out; 2805 } 2806 block = cops->tcf_block(q, cl, NULL); 2807 if (!block) 2808 goto out; 2809 parent = block->classid; 2810 if (tcf_block_shared(block)) 2811 q = NULL; 2812 } 2813 2814 index_start = cb->args[0]; 2815 index = 0; 2816 2817 for (chain = __tcf_get_next_chain(block, NULL); 2818 chain; 2819 chain_prev = chain, 2820 chain = __tcf_get_next_chain(block, chain), 2821 tcf_chain_put(chain_prev)) { 2822 if (tca[TCA_CHAIN] && 2823 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2824 continue; 2825 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2826 index_start, &index, terse_dump)) { 2827 tcf_chain_put(chain); 2828 err = -EMSGSIZE; 2829 break; 2830 } 2831 } 2832 2833 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2834 tcf_block_refcnt_put(block, true); 2835 cb->args[0] = index; 2836 2837 out: 2838 /* If we did no progress, the error (EMSGSIZE) is real */ 2839 if (skb->len == 0 && err) 2840 return err; 2841 return skb->len; 2842 } 2843 2844 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2845 void *tmplt_priv, u32 chain_index, 2846 struct net *net, struct sk_buff *skb, 2847 struct tcf_block *block, 2848 u32 portid, u32 seq, u16 flags, int event, 2849 struct netlink_ext_ack *extack) 2850 { 2851 unsigned char *b = skb_tail_pointer(skb); 2852 const struct tcf_proto_ops *ops; 2853 struct nlmsghdr *nlh; 2854 struct tcmsg *tcm; 2855 void *priv; 2856 2857 ops = tmplt_ops; 2858 priv = tmplt_priv; 2859 2860 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2861 if (!nlh) 2862 goto out_nlmsg_trim; 2863 tcm = nlmsg_data(nlh); 2864 tcm->tcm_family = AF_UNSPEC; 2865 tcm->tcm__pad1 = 0; 2866 tcm->tcm__pad2 = 0; 2867 tcm->tcm_handle = 0; 2868 if (block->q) { 2869 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2870 tcm->tcm_parent = block->q->handle; 2871 } else { 2872 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2873 tcm->tcm_block_index = block->index; 2874 } 2875 2876 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2877 goto nla_put_failure; 2878 2879 if (ops) { 2880 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2881 goto nla_put_failure; 2882 if (ops->tmplt_dump(skb, net, priv) < 0) 2883 goto nla_put_failure; 2884 } 2885 2886 if (extack && extack->_msg && 2887 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 2888 goto out_nlmsg_trim; 2889 2890 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2891 2892 return skb->len; 2893 2894 out_nlmsg_trim: 2895 nla_put_failure: 2896 nlmsg_trim(skb, b); 2897 return -EMSGSIZE; 2898 } 2899 2900 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2901 u32 seq, u16 flags, int event, bool unicast, 2902 struct netlink_ext_ack *extack) 2903 { 2904 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2905 struct tcf_block *block = chain->block; 2906 struct net *net = block->net; 2907 struct sk_buff *skb; 2908 int err = 0; 2909 2910 if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC)) 2911 return 0; 2912 2913 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2914 if (!skb) 2915 return -ENOBUFS; 2916 2917 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2918 chain->index, net, skb, block, portid, 2919 seq, flags, event, extack) <= 0) { 2920 kfree_skb(skb); 2921 return -EINVAL; 2922 } 2923 2924 if (unicast) 2925 err = rtnl_unicast(skb, net, portid); 2926 else 2927 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2928 flags & NLM_F_ECHO); 2929 2930 return err; 2931 } 2932 2933 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2934 void *tmplt_priv, u32 chain_index, 2935 struct tcf_block *block, struct sk_buff *oskb, 2936 u32 seq, u16 flags) 2937 { 2938 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2939 struct net *net = block->net; 2940 struct sk_buff *skb; 2941 2942 if (!rtnl_notify_needed(net, flags, RTNLGRP_TC)) 2943 return 0; 2944 2945 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2946 if (!skb) 2947 return -ENOBUFS; 2948 2949 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2950 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { 2951 kfree_skb(skb); 2952 return -EINVAL; 2953 } 2954 2955 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2956 } 2957 2958 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2959 struct nlattr **tca, 2960 struct netlink_ext_ack *extack) 2961 { 2962 const struct tcf_proto_ops *ops; 2963 char name[IFNAMSIZ]; 2964 void *tmplt_priv; 2965 2966 /* If kind is not set, user did not specify template. */ 2967 if (!tca[TCA_KIND]) 2968 return 0; 2969 2970 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2971 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2972 return -EINVAL; 2973 } 2974 2975 ops = tcf_proto_lookup_ops(name, true, extack); 2976 if (IS_ERR(ops)) 2977 return PTR_ERR(ops); 2978 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2979 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2980 module_put(ops->owner); 2981 return -EOPNOTSUPP; 2982 } 2983 2984 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2985 if (IS_ERR(tmplt_priv)) { 2986 module_put(ops->owner); 2987 return PTR_ERR(tmplt_priv); 2988 } 2989 chain->tmplt_ops = ops; 2990 chain->tmplt_priv = tmplt_priv; 2991 return 0; 2992 } 2993 2994 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2995 void *tmplt_priv) 2996 { 2997 /* If template ops are set, no work to do for us. */ 2998 if (!tmplt_ops) 2999 return; 3000 3001 tmplt_ops->tmplt_destroy(tmplt_priv); 3002 module_put(tmplt_ops->owner); 3003 } 3004 3005 /* Add/delete/get a chain */ 3006 3007 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 3008 struct netlink_ext_ack *extack) 3009 { 3010 struct net *net = sock_net(skb->sk); 3011 struct nlattr *tca[TCA_MAX + 1]; 3012 struct tcmsg *t; 3013 u32 parent; 3014 u32 chain_index; 3015 struct Qdisc *q; 3016 struct tcf_chain *chain; 3017 struct tcf_block *block; 3018 unsigned long cl; 3019 int err; 3020 3021 replay: 3022 q = NULL; 3023 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 3024 rtm_tca_policy, extack); 3025 if (err < 0) 3026 return err; 3027 3028 t = nlmsg_data(n); 3029 parent = t->tcm_parent; 3030 cl = 0; 3031 3032 block = tcf_block_find(net, &q, &parent, &cl, 3033 t->tcm_ifindex, t->tcm_block_index, extack); 3034 if (IS_ERR(block)) 3035 return PTR_ERR(block); 3036 3037 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 3038 if (chain_index > TC_ACT_EXT_VAL_MASK) { 3039 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 3040 err = -EINVAL; 3041 goto errout_block; 3042 } 3043 3044 mutex_lock(&block->lock); 3045 chain = tcf_chain_lookup(block, chain_index); 3046 if (n->nlmsg_type == RTM_NEWCHAIN) { 3047 if (chain) { 3048 if (tcf_chain_held_by_acts_only(chain)) { 3049 /* The chain exists only because there is 3050 * some action referencing it. 3051 */ 3052 tcf_chain_hold(chain); 3053 } else { 3054 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 3055 err = -EEXIST; 3056 goto errout_block_locked; 3057 } 3058 } else { 3059 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 3060 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 3061 err = -ENOENT; 3062 goto errout_block_locked; 3063 } 3064 chain = tcf_chain_create(block, chain_index); 3065 if (!chain) { 3066 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 3067 err = -ENOMEM; 3068 goto errout_block_locked; 3069 } 3070 } 3071 } else { 3072 if (!chain || tcf_chain_held_by_acts_only(chain)) { 3073 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 3074 err = -EINVAL; 3075 goto errout_block_locked; 3076 } 3077 tcf_chain_hold(chain); 3078 } 3079 3080 if (n->nlmsg_type == RTM_NEWCHAIN) { 3081 /* Modifying chain requires holding parent block lock. In case 3082 * the chain was successfully added, take a reference to the 3083 * chain. This ensures that an empty chain does not disappear at 3084 * the end of this function. 3085 */ 3086 tcf_chain_hold(chain); 3087 chain->explicitly_created = true; 3088 } 3089 mutex_unlock(&block->lock); 3090 3091 switch (n->nlmsg_type) { 3092 case RTM_NEWCHAIN: 3093 err = tc_chain_tmplt_add(chain, net, tca, extack); 3094 if (err) { 3095 tcf_chain_put_explicitly_created(chain); 3096 goto errout; 3097 } 3098 3099 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 3100 RTM_NEWCHAIN, false, extack); 3101 break; 3102 case RTM_DELCHAIN: 3103 tfilter_notify_chain(net, skb, block, q, parent, n, 3104 chain, RTM_DELTFILTER, extack); 3105 /* Flush the chain first as the user requested chain removal. */ 3106 tcf_chain_flush(chain, true); 3107 /* In case the chain was successfully deleted, put a reference 3108 * to the chain previously taken during addition. 3109 */ 3110 tcf_chain_put_explicitly_created(chain); 3111 break; 3112 case RTM_GETCHAIN: 3113 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 3114 n->nlmsg_flags, n->nlmsg_type, true, extack); 3115 if (err < 0) 3116 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 3117 break; 3118 default: 3119 err = -EOPNOTSUPP; 3120 NL_SET_ERR_MSG(extack, "Unsupported message type"); 3121 goto errout; 3122 } 3123 3124 errout: 3125 tcf_chain_put(chain); 3126 errout_block: 3127 tcf_block_release(q, block, true); 3128 if (err == -EAGAIN) 3129 /* Replay the request. */ 3130 goto replay; 3131 return err; 3132 3133 errout_block_locked: 3134 mutex_unlock(&block->lock); 3135 goto errout_block; 3136 } 3137 3138 /* called with RTNL */ 3139 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 3140 { 3141 struct net *net = sock_net(skb->sk); 3142 struct nlattr *tca[TCA_MAX + 1]; 3143 struct Qdisc *q = NULL; 3144 struct tcf_block *block; 3145 struct tcmsg *tcm = nlmsg_data(cb->nlh); 3146 struct tcf_chain *chain; 3147 long index_start; 3148 long index; 3149 int err; 3150 3151 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 3152 return skb->len; 3153 3154 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 3155 rtm_tca_policy, cb->extack); 3156 if (err) 3157 return err; 3158 3159 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 3160 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 3161 if (!block) 3162 goto out; 3163 } else { 3164 const struct Qdisc_class_ops *cops; 3165 struct net_device *dev; 3166 unsigned long cl = 0; 3167 3168 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 3169 if (!dev) 3170 return skb->len; 3171 3172 if (!tcm->tcm_parent) 3173 q = rtnl_dereference(dev->qdisc); 3174 else 3175 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3176 3177 if (!q) 3178 goto out; 3179 cops = q->ops->cl_ops; 3180 if (!cops) 3181 goto out; 3182 if (!cops->tcf_block) 3183 goto out; 3184 if (TC_H_MIN(tcm->tcm_parent)) { 3185 cl = cops->find(q, tcm->tcm_parent); 3186 if (cl == 0) 3187 goto out; 3188 } 3189 block = cops->tcf_block(q, cl, NULL); 3190 if (!block) 3191 goto out; 3192 if (tcf_block_shared(block)) 3193 q = NULL; 3194 } 3195 3196 index_start = cb->args[0]; 3197 index = 0; 3198 3199 mutex_lock(&block->lock); 3200 list_for_each_entry(chain, &block->chain_list, list) { 3201 if ((tca[TCA_CHAIN] && 3202 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3203 continue; 3204 if (index < index_start) { 3205 index++; 3206 continue; 3207 } 3208 if (tcf_chain_held_by_acts_only(chain)) 3209 continue; 3210 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3211 chain->index, net, skb, block, 3212 NETLINK_CB(cb->skb).portid, 3213 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3214 RTM_NEWCHAIN, NULL); 3215 if (err <= 0) 3216 break; 3217 index++; 3218 } 3219 mutex_unlock(&block->lock); 3220 3221 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3222 tcf_block_refcnt_put(block, true); 3223 cb->args[0] = index; 3224 3225 out: 3226 /* If we did no progress, the error (EMSGSIZE) is real */ 3227 if (skb->len == 0 && err) 3228 return err; 3229 return skb->len; 3230 } 3231 3232 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action, 3233 int police, struct tcf_proto *tp, u32 handle, 3234 bool use_action_miss) 3235 { 3236 int err = 0; 3237 3238 #ifdef CONFIG_NET_CLS_ACT 3239 exts->type = 0; 3240 exts->nr_actions = 0; 3241 exts->miss_cookie_node = NULL; 3242 /* Note: we do not own yet a reference on net. 3243 * This reference might be taken later from tcf_exts_get_net(). 3244 */ 3245 exts->net = net; 3246 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 3247 GFP_KERNEL); 3248 if (!exts->actions) 3249 return -ENOMEM; 3250 #endif 3251 3252 exts->action = action; 3253 exts->police = police; 3254 3255 if (!use_action_miss) 3256 return 0; 3257 3258 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle); 3259 if (err) 3260 goto err_miss_alloc; 3261 3262 return 0; 3263 3264 err_miss_alloc: 3265 tcf_exts_destroy(exts); 3266 #ifdef CONFIG_NET_CLS_ACT 3267 exts->actions = NULL; 3268 #endif 3269 return err; 3270 } 3271 EXPORT_SYMBOL(tcf_exts_init_ex); 3272 3273 void tcf_exts_destroy(struct tcf_exts *exts) 3274 { 3275 tcf_exts_miss_cookie_base_destroy(exts); 3276 3277 #ifdef CONFIG_NET_CLS_ACT 3278 if (exts->actions) { 3279 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3280 kfree(exts->actions); 3281 } 3282 exts->nr_actions = 0; 3283 #endif 3284 } 3285 EXPORT_SYMBOL(tcf_exts_destroy); 3286 3287 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3288 struct nlattr *rate_tlv, struct tcf_exts *exts, 3289 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) 3290 { 3291 #ifdef CONFIG_NET_CLS_ACT 3292 { 3293 int init_res[TCA_ACT_MAX_PRIO] = {}; 3294 struct tc_action *act; 3295 size_t attr_size = 0; 3296 3297 if (exts->police && tb[exts->police]) { 3298 struct tc_action_ops *a_o; 3299 3300 a_o = tc_action_load_ops(tb[exts->police], true, 3301 !(flags & TCA_ACT_FLAGS_NO_RTNL), 3302 extack); 3303 if (IS_ERR(a_o)) 3304 return PTR_ERR(a_o); 3305 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; 3306 act = tcf_action_init_1(net, tp, tb[exts->police], 3307 rate_tlv, a_o, init_res, flags, 3308 extack); 3309 module_put(a_o->owner); 3310 if (IS_ERR(act)) 3311 return PTR_ERR(act); 3312 3313 act->type = exts->type = TCA_OLD_COMPAT; 3314 exts->actions[0] = act; 3315 exts->nr_actions = 1; 3316 tcf_idr_insert_many(exts->actions, init_res); 3317 } else if (exts->action && tb[exts->action]) { 3318 int err; 3319 3320 flags |= TCA_ACT_FLAGS_BIND; 3321 err = tcf_action_init(net, tp, tb[exts->action], 3322 rate_tlv, exts->actions, init_res, 3323 &attr_size, flags, fl_flags, 3324 extack); 3325 if (err < 0) 3326 return err; 3327 exts->nr_actions = err; 3328 } 3329 } 3330 #else 3331 if ((exts->action && tb[exts->action]) || 3332 (exts->police && tb[exts->police])) { 3333 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3334 return -EOPNOTSUPP; 3335 } 3336 #endif 3337 3338 return 0; 3339 } 3340 EXPORT_SYMBOL(tcf_exts_validate_ex); 3341 3342 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3343 struct nlattr *rate_tlv, struct tcf_exts *exts, 3344 u32 flags, struct netlink_ext_ack *extack) 3345 { 3346 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, 3347 flags, 0, extack); 3348 } 3349 EXPORT_SYMBOL(tcf_exts_validate); 3350 3351 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3352 { 3353 #ifdef CONFIG_NET_CLS_ACT 3354 struct tcf_exts old = *dst; 3355 3356 *dst = *src; 3357 tcf_exts_destroy(&old); 3358 #endif 3359 } 3360 EXPORT_SYMBOL(tcf_exts_change); 3361 3362 #ifdef CONFIG_NET_CLS_ACT 3363 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3364 { 3365 if (exts->nr_actions == 0) 3366 return NULL; 3367 else 3368 return exts->actions[0]; 3369 } 3370 #endif 3371 3372 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3373 { 3374 #ifdef CONFIG_NET_CLS_ACT 3375 struct nlattr *nest; 3376 3377 if (exts->action && tcf_exts_has_actions(exts)) { 3378 /* 3379 * again for backward compatible mode - we want 3380 * to work with both old and new modes of entering 3381 * tc data even if iproute2 was newer - jhs 3382 */ 3383 if (exts->type != TCA_OLD_COMPAT) { 3384 nest = nla_nest_start_noflag(skb, exts->action); 3385 if (nest == NULL) 3386 goto nla_put_failure; 3387 3388 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3389 < 0) 3390 goto nla_put_failure; 3391 nla_nest_end(skb, nest); 3392 } else if (exts->police) { 3393 struct tc_action *act = tcf_exts_first_act(exts); 3394 nest = nla_nest_start_noflag(skb, exts->police); 3395 if (nest == NULL || !act) 3396 goto nla_put_failure; 3397 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3398 goto nla_put_failure; 3399 nla_nest_end(skb, nest); 3400 } 3401 } 3402 return 0; 3403 3404 nla_put_failure: 3405 nla_nest_cancel(skb, nest); 3406 return -1; 3407 #else 3408 return 0; 3409 #endif 3410 } 3411 EXPORT_SYMBOL(tcf_exts_dump); 3412 3413 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3414 { 3415 #ifdef CONFIG_NET_CLS_ACT 3416 struct nlattr *nest; 3417 3418 if (!exts->action || !tcf_exts_has_actions(exts)) 3419 return 0; 3420 3421 nest = nla_nest_start_noflag(skb, exts->action); 3422 if (!nest) 3423 goto nla_put_failure; 3424 3425 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3426 goto nla_put_failure; 3427 nla_nest_end(skb, nest); 3428 return 0; 3429 3430 nla_put_failure: 3431 nla_nest_cancel(skb, nest); 3432 return -1; 3433 #else 3434 return 0; 3435 #endif 3436 } 3437 EXPORT_SYMBOL(tcf_exts_terse_dump); 3438 3439 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3440 { 3441 #ifdef CONFIG_NET_CLS_ACT 3442 struct tc_action *a = tcf_exts_first_act(exts); 3443 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3444 return -1; 3445 #endif 3446 return 0; 3447 } 3448 EXPORT_SYMBOL(tcf_exts_dump_stats); 3449 3450 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3451 { 3452 if (*flags & TCA_CLS_FLAGS_IN_HW) 3453 return; 3454 *flags |= TCA_CLS_FLAGS_IN_HW; 3455 atomic_inc(&block->offloadcnt); 3456 } 3457 3458 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3459 { 3460 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3461 return; 3462 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3463 atomic_dec(&block->offloadcnt); 3464 } 3465 3466 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3467 struct tcf_proto *tp, u32 *cnt, 3468 u32 *flags, u32 diff, bool add) 3469 { 3470 lockdep_assert_held(&block->cb_lock); 3471 3472 spin_lock(&tp->lock); 3473 if (add) { 3474 if (!*cnt) 3475 tcf_block_offload_inc(block, flags); 3476 *cnt += diff; 3477 } else { 3478 *cnt -= diff; 3479 if (!*cnt) 3480 tcf_block_offload_dec(block, flags); 3481 } 3482 spin_unlock(&tp->lock); 3483 } 3484 3485 static void 3486 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3487 u32 *cnt, u32 *flags) 3488 { 3489 lockdep_assert_held(&block->cb_lock); 3490 3491 spin_lock(&tp->lock); 3492 tcf_block_offload_dec(block, flags); 3493 *cnt = 0; 3494 spin_unlock(&tp->lock); 3495 } 3496 3497 static int 3498 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3499 void *type_data, bool err_stop) 3500 { 3501 struct flow_block_cb *block_cb; 3502 int ok_count = 0; 3503 int err; 3504 3505 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3506 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3507 if (err) { 3508 if (err_stop) 3509 return err; 3510 } else { 3511 ok_count++; 3512 } 3513 } 3514 return ok_count; 3515 } 3516 3517 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3518 void *type_data, bool err_stop, bool rtnl_held) 3519 { 3520 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3521 int ok_count; 3522 3523 retry: 3524 if (take_rtnl) 3525 rtnl_lock(); 3526 down_read(&block->cb_lock); 3527 /* Need to obtain rtnl lock if block is bound to devs that require it. 3528 * In block bind code cb_lock is obtained while holding rtnl, so we must 3529 * obtain the locks in same order here. 3530 */ 3531 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3532 up_read(&block->cb_lock); 3533 take_rtnl = true; 3534 goto retry; 3535 } 3536 3537 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3538 3539 up_read(&block->cb_lock); 3540 if (take_rtnl) 3541 rtnl_unlock(); 3542 return ok_count; 3543 } 3544 EXPORT_SYMBOL(tc_setup_cb_call); 3545 3546 /* Non-destructive filter add. If filter that wasn't already in hardware is 3547 * successfully offloaded, increment block offloads counter. On failure, 3548 * previously offloaded filter is considered to be intact and offloads counter 3549 * is not decremented. 3550 */ 3551 3552 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3553 enum tc_setup_type type, void *type_data, bool err_stop, 3554 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3555 { 3556 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3557 int ok_count; 3558 3559 retry: 3560 if (take_rtnl) 3561 rtnl_lock(); 3562 down_read(&block->cb_lock); 3563 /* Need to obtain rtnl lock if block is bound to devs that require it. 3564 * In block bind code cb_lock is obtained while holding rtnl, so we must 3565 * obtain the locks in same order here. 3566 */ 3567 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3568 up_read(&block->cb_lock); 3569 take_rtnl = true; 3570 goto retry; 3571 } 3572 3573 /* Make sure all netdevs sharing this block are offload-capable. */ 3574 if (block->nooffloaddevcnt && err_stop) { 3575 ok_count = -EOPNOTSUPP; 3576 goto err_unlock; 3577 } 3578 3579 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3580 if (ok_count < 0) 3581 goto err_unlock; 3582 3583 if (tp->ops->hw_add) 3584 tp->ops->hw_add(tp, type_data); 3585 if (ok_count > 0) 3586 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3587 ok_count, true); 3588 err_unlock: 3589 up_read(&block->cb_lock); 3590 if (take_rtnl) 3591 rtnl_unlock(); 3592 return min(ok_count, 0); 3593 } 3594 EXPORT_SYMBOL(tc_setup_cb_add); 3595 3596 /* Destructive filter replace. If filter that wasn't already in hardware is 3597 * successfully offloaded, increment block offload counter. On failure, 3598 * previously offloaded filter is considered to be destroyed and offload counter 3599 * is decremented. 3600 */ 3601 3602 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3603 enum tc_setup_type type, void *type_data, bool err_stop, 3604 u32 *old_flags, unsigned int *old_in_hw_count, 3605 u32 *new_flags, unsigned int *new_in_hw_count, 3606 bool rtnl_held) 3607 { 3608 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3609 int ok_count; 3610 3611 retry: 3612 if (take_rtnl) 3613 rtnl_lock(); 3614 down_read(&block->cb_lock); 3615 /* Need to obtain rtnl lock if block is bound to devs that require it. 3616 * In block bind code cb_lock is obtained while holding rtnl, so we must 3617 * obtain the locks in same order here. 3618 */ 3619 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3620 up_read(&block->cb_lock); 3621 take_rtnl = true; 3622 goto retry; 3623 } 3624 3625 /* Make sure all netdevs sharing this block are offload-capable. */ 3626 if (block->nooffloaddevcnt && err_stop) { 3627 ok_count = -EOPNOTSUPP; 3628 goto err_unlock; 3629 } 3630 3631 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3632 if (tp->ops->hw_del) 3633 tp->ops->hw_del(tp, type_data); 3634 3635 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3636 if (ok_count < 0) 3637 goto err_unlock; 3638 3639 if (tp->ops->hw_add) 3640 tp->ops->hw_add(tp, type_data); 3641 if (ok_count > 0) 3642 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3643 new_flags, ok_count, true); 3644 err_unlock: 3645 up_read(&block->cb_lock); 3646 if (take_rtnl) 3647 rtnl_unlock(); 3648 return min(ok_count, 0); 3649 } 3650 EXPORT_SYMBOL(tc_setup_cb_replace); 3651 3652 /* Destroy filter and decrement block offload counter, if filter was previously 3653 * offloaded. 3654 */ 3655 3656 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3657 enum tc_setup_type type, void *type_data, bool err_stop, 3658 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3659 { 3660 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3661 int ok_count; 3662 3663 retry: 3664 if (take_rtnl) 3665 rtnl_lock(); 3666 down_read(&block->cb_lock); 3667 /* Need to obtain rtnl lock if block is bound to devs that require it. 3668 * In block bind code cb_lock is obtained while holding rtnl, so we must 3669 * obtain the locks in same order here. 3670 */ 3671 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3672 up_read(&block->cb_lock); 3673 take_rtnl = true; 3674 goto retry; 3675 } 3676 3677 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3678 3679 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3680 if (tp->ops->hw_del) 3681 tp->ops->hw_del(tp, type_data); 3682 3683 up_read(&block->cb_lock); 3684 if (take_rtnl) 3685 rtnl_unlock(); 3686 return min(ok_count, 0); 3687 } 3688 EXPORT_SYMBOL(tc_setup_cb_destroy); 3689 3690 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3691 bool add, flow_setup_cb_t *cb, 3692 enum tc_setup_type type, void *type_data, 3693 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3694 { 3695 int err = cb(type, type_data, cb_priv); 3696 3697 if (err) { 3698 if (add && tc_skip_sw(*flags)) 3699 return err; 3700 } else { 3701 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3702 add); 3703 } 3704 3705 return 0; 3706 } 3707 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3708 3709 static int tcf_act_get_user_cookie(struct flow_action_entry *entry, 3710 const struct tc_action *act) 3711 { 3712 struct tc_cookie *user_cookie; 3713 int err = 0; 3714 3715 rcu_read_lock(); 3716 user_cookie = rcu_dereference(act->user_cookie); 3717 if (user_cookie) { 3718 entry->user_cookie = flow_action_cookie_create(user_cookie->data, 3719 user_cookie->len, 3720 GFP_ATOMIC); 3721 if (!entry->user_cookie) 3722 err = -ENOMEM; 3723 } 3724 rcu_read_unlock(); 3725 return err; 3726 } 3727 3728 static void tcf_act_put_user_cookie(struct flow_action_entry *entry) 3729 { 3730 flow_action_cookie_destroy(entry->user_cookie); 3731 } 3732 3733 void tc_cleanup_offload_action(struct flow_action *flow_action) 3734 { 3735 struct flow_action_entry *entry; 3736 int i; 3737 3738 flow_action_for_each(i, entry, flow_action) { 3739 tcf_act_put_user_cookie(entry); 3740 if (entry->destructor) 3741 entry->destructor(entry->destructor_priv); 3742 } 3743 } 3744 EXPORT_SYMBOL(tc_cleanup_offload_action); 3745 3746 static int tc_setup_offload_act(struct tc_action *act, 3747 struct flow_action_entry *entry, 3748 u32 *index_inc, 3749 struct netlink_ext_ack *extack) 3750 { 3751 #ifdef CONFIG_NET_CLS_ACT 3752 if (act->ops->offload_act_setup) { 3753 return act->ops->offload_act_setup(act, entry, index_inc, true, 3754 extack); 3755 } else { 3756 NL_SET_ERR_MSG(extack, "Action does not support offload"); 3757 return -EOPNOTSUPP; 3758 } 3759 #else 3760 return 0; 3761 #endif 3762 } 3763 3764 int tc_setup_action(struct flow_action *flow_action, 3765 struct tc_action *actions[], 3766 u32 miss_cookie_base, 3767 struct netlink_ext_ack *extack) 3768 { 3769 int i, j, k, index, err = 0; 3770 struct tc_action *act; 3771 3772 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3773 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3774 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3775 3776 if (!actions) 3777 return 0; 3778 3779 j = 0; 3780 tcf_act_for_each_action(i, act, actions) { 3781 struct flow_action_entry *entry; 3782 3783 entry = &flow_action->entries[j]; 3784 spin_lock_bh(&act->tcfa_lock); 3785 err = tcf_act_get_user_cookie(entry, act); 3786 if (err) 3787 goto err_out_locked; 3788 3789 index = 0; 3790 err = tc_setup_offload_act(act, entry, &index, extack); 3791 if (err) 3792 goto err_out_locked; 3793 3794 for (k = 0; k < index ; k++) { 3795 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); 3796 entry[k].hw_index = act->tcfa_index; 3797 entry[k].cookie = (unsigned long)act; 3798 entry[k].miss_cookie = 3799 tcf_exts_miss_cookie_get(miss_cookie_base, i); 3800 } 3801 3802 j += index; 3803 3804 spin_unlock_bh(&act->tcfa_lock); 3805 } 3806 3807 err_out: 3808 if (err) 3809 tc_cleanup_offload_action(flow_action); 3810 3811 return err; 3812 err_out_locked: 3813 spin_unlock_bh(&act->tcfa_lock); 3814 goto err_out; 3815 } 3816 3817 int tc_setup_offload_action(struct flow_action *flow_action, 3818 const struct tcf_exts *exts, 3819 struct netlink_ext_ack *extack) 3820 { 3821 #ifdef CONFIG_NET_CLS_ACT 3822 u32 miss_cookie_base; 3823 3824 if (!exts) 3825 return 0; 3826 3827 miss_cookie_base = exts->miss_cookie_node ? 3828 exts->miss_cookie_node->miss_cookie_base : 0; 3829 return tc_setup_action(flow_action, exts->actions, miss_cookie_base, 3830 extack); 3831 #else 3832 return 0; 3833 #endif 3834 } 3835 EXPORT_SYMBOL(tc_setup_offload_action); 3836 3837 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3838 { 3839 unsigned int num_acts = 0; 3840 struct tc_action *act; 3841 int i; 3842 3843 tcf_exts_for_each_action(i, act, exts) { 3844 if (is_tcf_pedit(act)) 3845 num_acts += tcf_pedit_nkeys(act); 3846 else 3847 num_acts++; 3848 } 3849 return num_acts; 3850 } 3851 EXPORT_SYMBOL(tcf_exts_num_actions); 3852 3853 #ifdef CONFIG_NET_CLS_ACT 3854 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, 3855 u32 *p_block_index, 3856 struct netlink_ext_ack *extack) 3857 { 3858 *p_block_index = nla_get_u32(block_index_attr); 3859 if (!*p_block_index) { 3860 NL_SET_ERR_MSG(extack, "Block number may not be zero"); 3861 return -EINVAL; 3862 } 3863 3864 return 0; 3865 } 3866 3867 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 3868 enum flow_block_binder_type binder_type, 3869 struct nlattr *block_index_attr, 3870 struct netlink_ext_ack *extack) 3871 { 3872 u32 block_index; 3873 int err; 3874 3875 if (!block_index_attr) 3876 return 0; 3877 3878 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3879 if (err) 3880 return err; 3881 3882 qe->info.binder_type = binder_type; 3883 qe->info.chain_head_change = tcf_chain_head_change_dflt; 3884 qe->info.chain_head_change_priv = &qe->filter_chain; 3885 qe->info.block_index = block_index; 3886 3887 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); 3888 } 3889 EXPORT_SYMBOL(tcf_qevent_init); 3890 3891 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 3892 { 3893 if (qe->info.block_index) 3894 tcf_block_put_ext(qe->block, sch, &qe->info); 3895 } 3896 EXPORT_SYMBOL(tcf_qevent_destroy); 3897 3898 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 3899 struct netlink_ext_ack *extack) 3900 { 3901 u32 block_index; 3902 int err; 3903 3904 if (!block_index_attr) 3905 return 0; 3906 3907 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3908 if (err) 3909 return err; 3910 3911 /* Bounce newly-configured block or change in block. */ 3912 if (block_index != qe->info.block_index) { 3913 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 3914 return -EINVAL; 3915 } 3916 3917 return 0; 3918 } 3919 EXPORT_SYMBOL(tcf_qevent_validate_change); 3920 3921 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 3922 struct sk_buff **to_free, int *ret) 3923 { 3924 struct tcf_result cl_res; 3925 struct tcf_proto *fl; 3926 3927 if (!qe->info.block_index) 3928 return skb; 3929 3930 fl = rcu_dereference_bh(qe->filter_chain); 3931 3932 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { 3933 case TC_ACT_SHOT: 3934 qdisc_qstats_drop(sch); 3935 __qdisc_drop(skb, to_free); 3936 *ret = __NET_XMIT_BYPASS; 3937 return NULL; 3938 case TC_ACT_STOLEN: 3939 case TC_ACT_QUEUED: 3940 case TC_ACT_TRAP: 3941 __qdisc_drop(skb, to_free); 3942 *ret = __NET_XMIT_STOLEN; 3943 return NULL; 3944 case TC_ACT_REDIRECT: 3945 skb_do_redirect(skb); 3946 *ret = __NET_XMIT_STOLEN; 3947 return NULL; 3948 } 3949 3950 return skb; 3951 } 3952 EXPORT_SYMBOL(tcf_qevent_handle); 3953 3954 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 3955 { 3956 if (!qe->info.block_index) 3957 return 0; 3958 return nla_put_u32(skb, attr_name, qe->info.block_index); 3959 } 3960 EXPORT_SYMBOL(tcf_qevent_dump); 3961 #endif 3962 3963 static __net_init int tcf_net_init(struct net *net) 3964 { 3965 struct tcf_net *tn = net_generic(net, tcf_net_id); 3966 3967 spin_lock_init(&tn->idr_lock); 3968 idr_init(&tn->idr); 3969 return 0; 3970 } 3971 3972 static void __net_exit tcf_net_exit(struct net *net) 3973 { 3974 struct tcf_net *tn = net_generic(net, tcf_net_id); 3975 3976 idr_destroy(&tn->idr); 3977 } 3978 3979 static struct pernet_operations tcf_net_ops = { 3980 .init = tcf_net_init, 3981 .exit = tcf_net_exit, 3982 .id = &tcf_net_id, 3983 .size = sizeof(struct tcf_net), 3984 }; 3985 3986 static int __init tc_filter_init(void) 3987 { 3988 int err; 3989 3990 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3991 if (!tc_filter_wq) 3992 return -ENOMEM; 3993 3994 err = register_pernet_subsys(&tcf_net_ops); 3995 if (err) 3996 goto err_register_pernet_subsys; 3997 3998 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1); 3999 4000 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 4001 RTNL_FLAG_DOIT_UNLOCKED); 4002 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 4003 RTNL_FLAG_DOIT_UNLOCKED); 4004 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 4005 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 4006 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 4007 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 4008 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 4009 tc_dump_chain, 0); 4010 4011 return 0; 4012 4013 err_register_pernet_subsys: 4014 destroy_workqueue(tc_filter_wq); 4015 return err; 4016 } 4017 4018 subsys_initcall(tc_filter_init); 4019