1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <linux/rhashtable.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/tc_act/tc_gate.h> 43 #include <net/flow_offload.h> 44 #include <net/tc_wrapper.h> 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static struct xarray tcf_exts_miss_cookies_xa; 53 struct tcf_exts_miss_cookie_node { 54 const struct tcf_chain *chain; 55 const struct tcf_proto *tp; 56 const struct tcf_exts *exts; 57 u32 chain_index; 58 u32 tp_prio; 59 u32 handle; 60 u32 miss_cookie_base; 61 struct rcu_head rcu; 62 }; 63 64 /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base + 65 * action index in the exts tc actions array. 66 */ 67 union tcf_exts_miss_cookie { 68 struct { 69 u32 miss_cookie_base; 70 u32 act_index; 71 }; 72 u64 miss_cookie; 73 }; 74 75 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 76 static int 77 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, 78 u32 handle) 79 { 80 struct tcf_exts_miss_cookie_node *n; 81 static u32 next; 82 int err; 83 84 if (WARN_ON(!handle || !tp->ops->get_exts)) 85 return -EINVAL; 86 87 n = kzalloc(sizeof(*n), GFP_KERNEL); 88 if (!n) 89 return -ENOMEM; 90 91 n->chain_index = tp->chain->index; 92 n->chain = tp->chain; 93 n->tp_prio = tp->prio; 94 n->tp = tp; 95 n->exts = exts; 96 n->handle = handle; 97 98 err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base, 99 n, xa_limit_32b, &next, GFP_KERNEL); 100 if (err) 101 goto err_xa_alloc; 102 103 exts->miss_cookie_node = n; 104 return 0; 105 106 err_xa_alloc: 107 kfree(n); 108 return err; 109 } 110 111 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) 112 { 113 struct tcf_exts_miss_cookie_node *n; 114 115 if (!exts->miss_cookie_node) 116 return; 117 118 n = exts->miss_cookie_node; 119 xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base); 120 kfree_rcu(n, rcu); 121 } 122 123 static struct tcf_exts_miss_cookie_node * 124 tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index) 125 { 126 union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, }; 127 128 *act_index = mc.act_index; 129 return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base); 130 } 131 #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ 132 static int 133 tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, 134 u32 handle) 135 { 136 return 0; 137 } 138 139 static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) 140 { 141 } 142 #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ 143 144 static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index) 145 { 146 union tcf_exts_miss_cookie mc = { .act_index = act_index, }; 147 148 if (!miss_cookie_base) 149 return 0; 150 151 mc.miss_cookie_base = miss_cookie_base; 152 return mc.miss_cookie; 153 } 154 155 #ifdef CONFIG_NET_CLS_ACT 156 DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc); 157 EXPORT_SYMBOL(tc_skb_ext_tc); 158 159 void tc_skb_ext_tc_enable(void) 160 { 161 static_branch_inc(&tc_skb_ext_tc); 162 } 163 EXPORT_SYMBOL(tc_skb_ext_tc_enable); 164 165 void tc_skb_ext_tc_disable(void) 166 { 167 static_branch_dec(&tc_skb_ext_tc); 168 } 169 EXPORT_SYMBOL(tc_skb_ext_tc_disable); 170 #endif 171 172 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 173 { 174 return jhash_3words(tp->chain->index, tp->prio, 175 (__force __u32)tp->protocol, 0); 176 } 177 178 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 179 struct tcf_proto *tp) 180 { 181 struct tcf_block *block = chain->block; 182 183 mutex_lock(&block->proto_destroy_lock); 184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 185 destroy_obj_hashfn(tp)); 186 mutex_unlock(&block->proto_destroy_lock); 187 } 188 189 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 190 const struct tcf_proto *tp2) 191 { 192 return tp1->chain->index == tp2->chain->index && 193 tp1->prio == tp2->prio && 194 tp1->protocol == tp2->protocol; 195 } 196 197 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 198 struct tcf_proto *tp) 199 { 200 u32 hash = destroy_obj_hashfn(tp); 201 struct tcf_proto *iter; 202 bool found = false; 203 204 rcu_read_lock(); 205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 206 destroy_ht_node, hash) { 207 if (tcf_proto_cmp(tp, iter)) { 208 found = true; 209 break; 210 } 211 } 212 rcu_read_unlock(); 213 214 return found; 215 } 216 217 static void 218 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 219 { 220 struct tcf_block *block = chain->block; 221 222 mutex_lock(&block->proto_destroy_lock); 223 if (hash_hashed(&tp->destroy_ht_node)) 224 hash_del_rcu(&tp->destroy_ht_node); 225 mutex_unlock(&block->proto_destroy_lock); 226 } 227 228 /* Find classifier type by string name */ 229 230 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 231 { 232 const struct tcf_proto_ops *t, *res = NULL; 233 234 if (kind) { 235 read_lock(&cls_mod_lock); 236 list_for_each_entry(t, &tcf_proto_base, head) { 237 if (strcmp(kind, t->kind) == 0) { 238 if (try_module_get(t->owner)) 239 res = t; 240 break; 241 } 242 } 243 read_unlock(&cls_mod_lock); 244 } 245 return res; 246 } 247 248 static const struct tcf_proto_ops * 249 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 250 struct netlink_ext_ack *extack) 251 { 252 const struct tcf_proto_ops *ops; 253 254 ops = __tcf_proto_lookup_ops(kind); 255 if (ops) 256 return ops; 257 #ifdef CONFIG_MODULES 258 if (rtnl_held) 259 rtnl_unlock(); 260 request_module("cls_%s", kind); 261 if (rtnl_held) 262 rtnl_lock(); 263 ops = __tcf_proto_lookup_ops(kind); 264 /* We dropped the RTNL semaphore in order to perform 265 * the module load. So, even if we succeeded in loading 266 * the module we have to replay the request. We indicate 267 * this using -EAGAIN. 268 */ 269 if (ops) { 270 module_put(ops->owner); 271 return ERR_PTR(-EAGAIN); 272 } 273 #endif 274 NL_SET_ERR_MSG(extack, "TC classifier not found"); 275 return ERR_PTR(-ENOENT); 276 } 277 278 /* Register(unregister) new classifier type */ 279 280 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 281 { 282 struct tcf_proto_ops *t; 283 int rc = -EEXIST; 284 285 write_lock(&cls_mod_lock); 286 list_for_each_entry(t, &tcf_proto_base, head) 287 if (!strcmp(ops->kind, t->kind)) 288 goto out; 289 290 list_add_tail(&ops->head, &tcf_proto_base); 291 rc = 0; 292 out: 293 write_unlock(&cls_mod_lock); 294 return rc; 295 } 296 EXPORT_SYMBOL(register_tcf_proto_ops); 297 298 static struct workqueue_struct *tc_filter_wq; 299 300 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 301 { 302 struct tcf_proto_ops *t; 303 int rc = -ENOENT; 304 305 /* Wait for outstanding call_rcu()s, if any, from a 306 * tcf_proto_ops's destroy() handler. 307 */ 308 rcu_barrier(); 309 flush_workqueue(tc_filter_wq); 310 311 write_lock(&cls_mod_lock); 312 list_for_each_entry(t, &tcf_proto_base, head) { 313 if (t == ops) { 314 list_del(&t->head); 315 rc = 0; 316 break; 317 } 318 } 319 write_unlock(&cls_mod_lock); 320 321 WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc); 322 } 323 EXPORT_SYMBOL(unregister_tcf_proto_ops); 324 325 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 326 { 327 INIT_RCU_WORK(rwork, func); 328 return queue_rcu_work(tc_filter_wq, rwork); 329 } 330 EXPORT_SYMBOL(tcf_queue_work); 331 332 /* Select new prio value from the range, managed by kernel. */ 333 334 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 335 { 336 u32 first = TC_H_MAKE(0xC0000000U, 0U); 337 338 if (tp) 339 first = tp->prio - 1; 340 341 return TC_H_MAJ(first); 342 } 343 344 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 345 { 346 if (kind) 347 return nla_strscpy(name, kind, IFNAMSIZ) < 0; 348 memset(name, 0, IFNAMSIZ); 349 return false; 350 } 351 352 static bool tcf_proto_is_unlocked(const char *kind) 353 { 354 const struct tcf_proto_ops *ops; 355 bool ret; 356 357 if (strlen(kind) == 0) 358 return false; 359 360 ops = tcf_proto_lookup_ops(kind, false, NULL); 361 /* On error return false to take rtnl lock. Proto lookup/create 362 * functions will perform lookup again and properly handle errors. 363 */ 364 if (IS_ERR(ops)) 365 return false; 366 367 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 368 module_put(ops->owner); 369 return ret; 370 } 371 372 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 373 u32 prio, struct tcf_chain *chain, 374 bool rtnl_held, 375 struct netlink_ext_ack *extack) 376 { 377 struct tcf_proto *tp; 378 int err; 379 380 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 381 if (!tp) 382 return ERR_PTR(-ENOBUFS); 383 384 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 385 if (IS_ERR(tp->ops)) { 386 err = PTR_ERR(tp->ops); 387 goto errout; 388 } 389 tp->classify = tp->ops->classify; 390 tp->protocol = protocol; 391 tp->prio = prio; 392 tp->chain = chain; 393 spin_lock_init(&tp->lock); 394 refcount_set(&tp->refcnt, 1); 395 396 err = tp->ops->init(tp); 397 if (err) { 398 module_put(tp->ops->owner); 399 goto errout; 400 } 401 return tp; 402 403 errout: 404 kfree(tp); 405 return ERR_PTR(err); 406 } 407 408 static void tcf_proto_get(struct tcf_proto *tp) 409 { 410 refcount_inc(&tp->refcnt); 411 } 412 413 static void tcf_chain_put(struct tcf_chain *chain); 414 415 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 416 bool sig_destroy, struct netlink_ext_ack *extack) 417 { 418 tp->ops->destroy(tp, rtnl_held, extack); 419 if (sig_destroy) 420 tcf_proto_signal_destroyed(tp->chain, tp); 421 tcf_chain_put(tp->chain); 422 module_put(tp->ops->owner); 423 kfree_rcu(tp, rcu); 424 } 425 426 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 427 struct netlink_ext_ack *extack) 428 { 429 if (refcount_dec_and_test(&tp->refcnt)) 430 tcf_proto_destroy(tp, rtnl_held, true, extack); 431 } 432 433 static bool tcf_proto_check_delete(struct tcf_proto *tp) 434 { 435 if (tp->ops->delete_empty) 436 return tp->ops->delete_empty(tp); 437 438 tp->deleting = true; 439 return tp->deleting; 440 } 441 442 static void tcf_proto_mark_delete(struct tcf_proto *tp) 443 { 444 spin_lock(&tp->lock); 445 tp->deleting = true; 446 spin_unlock(&tp->lock); 447 } 448 449 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 450 { 451 bool deleting; 452 453 spin_lock(&tp->lock); 454 deleting = tp->deleting; 455 spin_unlock(&tp->lock); 456 457 return deleting; 458 } 459 460 #define ASSERT_BLOCK_LOCKED(block) \ 461 lockdep_assert_held(&(block)->lock) 462 463 struct tcf_filter_chain_list_item { 464 struct list_head list; 465 tcf_chain_head_change_t *chain_head_change; 466 void *chain_head_change_priv; 467 }; 468 469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 470 u32 chain_index) 471 { 472 struct tcf_chain *chain; 473 474 ASSERT_BLOCK_LOCKED(block); 475 476 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 477 if (!chain) 478 return NULL; 479 list_add_tail_rcu(&chain->list, &block->chain_list); 480 mutex_init(&chain->filter_chain_lock); 481 chain->block = block; 482 chain->index = chain_index; 483 chain->refcnt = 1; 484 if (!chain->index) 485 block->chain0.chain = chain; 486 return chain; 487 } 488 489 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 490 struct tcf_proto *tp_head) 491 { 492 if (item->chain_head_change) 493 item->chain_head_change(tp_head, item->chain_head_change_priv); 494 } 495 496 static void tcf_chain0_head_change(struct tcf_chain *chain, 497 struct tcf_proto *tp_head) 498 { 499 struct tcf_filter_chain_list_item *item; 500 struct tcf_block *block = chain->block; 501 502 if (chain->index) 503 return; 504 505 mutex_lock(&block->lock); 506 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 507 tcf_chain_head_change_item(item, tp_head); 508 mutex_unlock(&block->lock); 509 } 510 511 /* Returns true if block can be safely freed. */ 512 513 static bool tcf_chain_detach(struct tcf_chain *chain) 514 { 515 struct tcf_block *block = chain->block; 516 517 ASSERT_BLOCK_LOCKED(block); 518 519 list_del_rcu(&chain->list); 520 if (!chain->index) 521 block->chain0.chain = NULL; 522 523 if (list_empty(&block->chain_list) && 524 refcount_read(&block->refcnt) == 0) 525 return true; 526 527 return false; 528 } 529 530 static void tcf_block_destroy(struct tcf_block *block) 531 { 532 mutex_destroy(&block->lock); 533 mutex_destroy(&block->proto_destroy_lock); 534 kfree_rcu(block, rcu); 535 } 536 537 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 538 { 539 struct tcf_block *block = chain->block; 540 541 mutex_destroy(&chain->filter_chain_lock); 542 kfree_rcu(chain, rcu); 543 if (free_block) 544 tcf_block_destroy(block); 545 } 546 547 static void tcf_chain_hold(struct tcf_chain *chain) 548 { 549 ASSERT_BLOCK_LOCKED(chain->block); 550 551 ++chain->refcnt; 552 } 553 554 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 555 { 556 ASSERT_BLOCK_LOCKED(chain->block); 557 558 /* In case all the references are action references, this 559 * chain should not be shown to the user. 560 */ 561 return chain->refcnt == chain->action_refcnt; 562 } 563 564 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 565 u32 chain_index) 566 { 567 struct tcf_chain *chain; 568 569 ASSERT_BLOCK_LOCKED(block); 570 571 list_for_each_entry(chain, &block->chain_list, list) { 572 if (chain->index == chain_index) 573 return chain; 574 } 575 return NULL; 576 } 577 578 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 579 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 580 u32 chain_index) 581 { 582 struct tcf_chain *chain; 583 584 list_for_each_entry_rcu(chain, &block->chain_list, list) { 585 if (chain->index == chain_index) 586 return chain; 587 } 588 return NULL; 589 } 590 #endif 591 592 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 593 u32 seq, u16 flags, int event, bool unicast, 594 struct netlink_ext_ack *extack); 595 596 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 597 u32 chain_index, bool create, 598 bool by_act) 599 { 600 struct tcf_chain *chain = NULL; 601 bool is_first_reference; 602 603 mutex_lock(&block->lock); 604 chain = tcf_chain_lookup(block, chain_index); 605 if (chain) { 606 tcf_chain_hold(chain); 607 } else { 608 if (!create) 609 goto errout; 610 chain = tcf_chain_create(block, chain_index); 611 if (!chain) 612 goto errout; 613 } 614 615 if (by_act) 616 ++chain->action_refcnt; 617 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 618 mutex_unlock(&block->lock); 619 620 /* Send notification only in case we got the first 621 * non-action reference. Until then, the chain acts only as 622 * a placeholder for actions pointing to it and user ought 623 * not know about them. 624 */ 625 if (is_first_reference && !by_act) 626 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 627 RTM_NEWCHAIN, false, NULL); 628 629 return chain; 630 631 errout: 632 mutex_unlock(&block->lock); 633 return chain; 634 } 635 636 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 637 bool create) 638 { 639 return __tcf_chain_get(block, chain_index, create, false); 640 } 641 642 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 643 { 644 return __tcf_chain_get(block, chain_index, true, true); 645 } 646 EXPORT_SYMBOL(tcf_chain_get_by_act); 647 648 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 649 void *tmplt_priv); 650 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 651 void *tmplt_priv, u32 chain_index, 652 struct tcf_block *block, struct sk_buff *oskb, 653 u32 seq, u16 flags, bool unicast); 654 655 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 656 bool explicitly_created) 657 { 658 struct tcf_block *block = chain->block; 659 const struct tcf_proto_ops *tmplt_ops; 660 unsigned int refcnt, non_act_refcnt; 661 bool free_block = false; 662 void *tmplt_priv; 663 664 mutex_lock(&block->lock); 665 if (explicitly_created) { 666 if (!chain->explicitly_created) { 667 mutex_unlock(&block->lock); 668 return; 669 } 670 chain->explicitly_created = false; 671 } 672 673 if (by_act) 674 chain->action_refcnt--; 675 676 /* tc_chain_notify_delete can't be called while holding block lock. 677 * However, when block is unlocked chain can be changed concurrently, so 678 * save these to temporary variables. 679 */ 680 refcnt = --chain->refcnt; 681 non_act_refcnt = refcnt - chain->action_refcnt; 682 tmplt_ops = chain->tmplt_ops; 683 tmplt_priv = chain->tmplt_priv; 684 685 if (non_act_refcnt == chain->explicitly_created && !by_act) { 686 if (non_act_refcnt == 0) 687 tc_chain_notify_delete(tmplt_ops, tmplt_priv, 688 chain->index, block, NULL, 0, 0, 689 false); 690 /* Last reference to chain, no need to lock. */ 691 chain->flushing = false; 692 } 693 694 if (refcnt == 0) 695 free_block = tcf_chain_detach(chain); 696 mutex_unlock(&block->lock); 697 698 if (refcnt == 0) { 699 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 700 tcf_chain_destroy(chain, free_block); 701 } 702 } 703 704 static void tcf_chain_put(struct tcf_chain *chain) 705 { 706 __tcf_chain_put(chain, false, false); 707 } 708 709 void tcf_chain_put_by_act(struct tcf_chain *chain) 710 { 711 __tcf_chain_put(chain, true, false); 712 } 713 EXPORT_SYMBOL(tcf_chain_put_by_act); 714 715 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 716 { 717 __tcf_chain_put(chain, false, true); 718 } 719 720 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 721 { 722 struct tcf_proto *tp, *tp_next; 723 724 mutex_lock(&chain->filter_chain_lock); 725 tp = tcf_chain_dereference(chain->filter_chain, chain); 726 while (tp) { 727 tp_next = rcu_dereference_protected(tp->next, 1); 728 tcf_proto_signal_destroying(chain, tp); 729 tp = tp_next; 730 } 731 tp = tcf_chain_dereference(chain->filter_chain, chain); 732 RCU_INIT_POINTER(chain->filter_chain, NULL); 733 tcf_chain0_head_change(chain, NULL); 734 chain->flushing = true; 735 mutex_unlock(&chain->filter_chain_lock); 736 737 while (tp) { 738 tp_next = rcu_dereference_protected(tp->next, 1); 739 tcf_proto_put(tp, rtnl_held, NULL); 740 tp = tp_next; 741 } 742 } 743 744 static int tcf_block_setup(struct tcf_block *block, 745 struct flow_block_offload *bo); 746 747 static void tcf_block_offload_init(struct flow_block_offload *bo, 748 struct net_device *dev, struct Qdisc *sch, 749 enum flow_block_command command, 750 enum flow_block_binder_type binder_type, 751 struct flow_block *flow_block, 752 bool shared, struct netlink_ext_ack *extack) 753 { 754 bo->net = dev_net(dev); 755 bo->command = command; 756 bo->binder_type = binder_type; 757 bo->block = flow_block; 758 bo->block_shared = shared; 759 bo->extack = extack; 760 bo->sch = sch; 761 bo->cb_list_head = &flow_block->cb_list; 762 INIT_LIST_HEAD(&bo->cb_list); 763 } 764 765 static void tcf_block_unbind(struct tcf_block *block, 766 struct flow_block_offload *bo); 767 768 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 769 { 770 struct tcf_block *block = block_cb->indr.data; 771 struct net_device *dev = block_cb->indr.dev; 772 struct Qdisc *sch = block_cb->indr.sch; 773 struct netlink_ext_ack extack = {}; 774 struct flow_block_offload bo = {}; 775 776 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 777 block_cb->indr.binder_type, 778 &block->flow_block, tcf_block_shared(block), 779 &extack); 780 rtnl_lock(); 781 down_write(&block->cb_lock); 782 list_del(&block_cb->driver_list); 783 list_move(&block_cb->list, &bo.cb_list); 784 tcf_block_unbind(block, &bo); 785 up_write(&block->cb_lock); 786 rtnl_unlock(); 787 } 788 789 static bool tcf_block_offload_in_use(struct tcf_block *block) 790 { 791 return atomic_read(&block->offloadcnt); 792 } 793 794 static int tcf_block_offload_cmd(struct tcf_block *block, 795 struct net_device *dev, struct Qdisc *sch, 796 struct tcf_block_ext_info *ei, 797 enum flow_block_command command, 798 struct netlink_ext_ack *extack) 799 { 800 struct flow_block_offload bo = {}; 801 802 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 803 &block->flow_block, tcf_block_shared(block), 804 extack); 805 806 if (dev->netdev_ops->ndo_setup_tc) { 807 int err; 808 809 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 810 if (err < 0) { 811 if (err != -EOPNOTSUPP) 812 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 813 return err; 814 } 815 816 return tcf_block_setup(block, &bo); 817 } 818 819 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 820 tc_block_indr_cleanup); 821 tcf_block_setup(block, &bo); 822 823 return -EOPNOTSUPP; 824 } 825 826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 827 struct tcf_block_ext_info *ei, 828 struct netlink_ext_ack *extack) 829 { 830 struct net_device *dev = q->dev_queue->dev; 831 int err; 832 833 down_write(&block->cb_lock); 834 835 /* If tc offload feature is disabled and the block we try to bind 836 * to already has some offloaded filters, forbid to bind. 837 */ 838 if (dev->netdev_ops->ndo_setup_tc && 839 !tc_can_offload(dev) && 840 tcf_block_offload_in_use(block)) { 841 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 842 err = -EOPNOTSUPP; 843 goto err_unlock; 844 } 845 846 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 847 if (err == -EOPNOTSUPP) 848 goto no_offload_dev_inc; 849 if (err) 850 goto err_unlock; 851 852 up_write(&block->cb_lock); 853 return 0; 854 855 no_offload_dev_inc: 856 if (tcf_block_offload_in_use(block)) 857 goto err_unlock; 858 859 err = 0; 860 block->nooffloaddevcnt++; 861 err_unlock: 862 up_write(&block->cb_lock); 863 return err; 864 } 865 866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 867 struct tcf_block_ext_info *ei) 868 { 869 struct net_device *dev = q->dev_queue->dev; 870 int err; 871 872 down_write(&block->cb_lock); 873 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 874 if (err == -EOPNOTSUPP) 875 goto no_offload_dev_dec; 876 up_write(&block->cb_lock); 877 return; 878 879 no_offload_dev_dec: 880 WARN_ON(block->nooffloaddevcnt-- == 0); 881 up_write(&block->cb_lock); 882 } 883 884 static int 885 tcf_chain0_head_change_cb_add(struct tcf_block *block, 886 struct tcf_block_ext_info *ei, 887 struct netlink_ext_ack *extack) 888 { 889 struct tcf_filter_chain_list_item *item; 890 struct tcf_chain *chain0; 891 892 item = kmalloc(sizeof(*item), GFP_KERNEL); 893 if (!item) { 894 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 895 return -ENOMEM; 896 } 897 item->chain_head_change = ei->chain_head_change; 898 item->chain_head_change_priv = ei->chain_head_change_priv; 899 900 mutex_lock(&block->lock); 901 chain0 = block->chain0.chain; 902 if (chain0) 903 tcf_chain_hold(chain0); 904 else 905 list_add(&item->list, &block->chain0.filter_chain_list); 906 mutex_unlock(&block->lock); 907 908 if (chain0) { 909 struct tcf_proto *tp_head; 910 911 mutex_lock(&chain0->filter_chain_lock); 912 913 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 914 if (tp_head) 915 tcf_chain_head_change_item(item, tp_head); 916 917 mutex_lock(&block->lock); 918 list_add(&item->list, &block->chain0.filter_chain_list); 919 mutex_unlock(&block->lock); 920 921 mutex_unlock(&chain0->filter_chain_lock); 922 tcf_chain_put(chain0); 923 } 924 925 return 0; 926 } 927 928 static void 929 tcf_chain0_head_change_cb_del(struct tcf_block *block, 930 struct tcf_block_ext_info *ei) 931 { 932 struct tcf_filter_chain_list_item *item; 933 934 mutex_lock(&block->lock); 935 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 936 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 937 (item->chain_head_change == ei->chain_head_change && 938 item->chain_head_change_priv == ei->chain_head_change_priv)) { 939 if (block->chain0.chain) 940 tcf_chain_head_change_item(item, NULL); 941 list_del(&item->list); 942 mutex_unlock(&block->lock); 943 944 kfree(item); 945 return; 946 } 947 } 948 mutex_unlock(&block->lock); 949 WARN_ON(1); 950 } 951 952 struct tcf_net { 953 spinlock_t idr_lock; /* Protects idr */ 954 struct idr idr; 955 }; 956 957 static unsigned int tcf_net_id; 958 959 static int tcf_block_insert(struct tcf_block *block, struct net *net, 960 struct netlink_ext_ack *extack) 961 { 962 struct tcf_net *tn = net_generic(net, tcf_net_id); 963 int err; 964 965 idr_preload(GFP_KERNEL); 966 spin_lock(&tn->idr_lock); 967 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 968 GFP_NOWAIT); 969 spin_unlock(&tn->idr_lock); 970 idr_preload_end(); 971 972 return err; 973 } 974 975 static void tcf_block_remove(struct tcf_block *block, struct net *net) 976 { 977 struct tcf_net *tn = net_generic(net, tcf_net_id); 978 979 spin_lock(&tn->idr_lock); 980 idr_remove(&tn->idr, block->index); 981 spin_unlock(&tn->idr_lock); 982 } 983 984 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 985 u32 block_index, 986 struct netlink_ext_ack *extack) 987 { 988 struct tcf_block *block; 989 990 block = kzalloc(sizeof(*block), GFP_KERNEL); 991 if (!block) { 992 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 993 return ERR_PTR(-ENOMEM); 994 } 995 mutex_init(&block->lock); 996 mutex_init(&block->proto_destroy_lock); 997 init_rwsem(&block->cb_lock); 998 flow_block_init(&block->flow_block); 999 INIT_LIST_HEAD(&block->chain_list); 1000 INIT_LIST_HEAD(&block->owner_list); 1001 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 1002 1003 refcount_set(&block->refcnt, 1); 1004 block->net = net; 1005 block->index = block_index; 1006 1007 /* Don't store q pointer for blocks which are shared */ 1008 if (!tcf_block_shared(block)) 1009 block->q = q; 1010 return block; 1011 } 1012 1013 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 1014 { 1015 struct tcf_net *tn = net_generic(net, tcf_net_id); 1016 1017 return idr_find(&tn->idr, block_index); 1018 } 1019 1020 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 1021 { 1022 struct tcf_block *block; 1023 1024 rcu_read_lock(); 1025 block = tcf_block_lookup(net, block_index); 1026 if (block && !refcount_inc_not_zero(&block->refcnt)) 1027 block = NULL; 1028 rcu_read_unlock(); 1029 1030 return block; 1031 } 1032 1033 static struct tcf_chain * 1034 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 1035 { 1036 mutex_lock(&block->lock); 1037 if (chain) 1038 chain = list_is_last(&chain->list, &block->chain_list) ? 1039 NULL : list_next_entry(chain, list); 1040 else 1041 chain = list_first_entry_or_null(&block->chain_list, 1042 struct tcf_chain, list); 1043 1044 /* skip all action-only chains */ 1045 while (chain && tcf_chain_held_by_acts_only(chain)) 1046 chain = list_is_last(&chain->list, &block->chain_list) ? 1047 NULL : list_next_entry(chain, list); 1048 1049 if (chain) 1050 tcf_chain_hold(chain); 1051 mutex_unlock(&block->lock); 1052 1053 return chain; 1054 } 1055 1056 /* Function to be used by all clients that want to iterate over all chains on 1057 * block. It properly obtains block->lock and takes reference to chain before 1058 * returning it. Users of this function must be tolerant to concurrent chain 1059 * insertion/deletion or ensure that no concurrent chain modification is 1060 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1061 * consistent dump because rtnl lock is released each time skb is filled with 1062 * data and sent to user-space. 1063 */ 1064 1065 struct tcf_chain * 1066 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 1067 { 1068 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 1069 1070 if (chain) 1071 tcf_chain_put(chain); 1072 1073 return chain_next; 1074 } 1075 EXPORT_SYMBOL(tcf_get_next_chain); 1076 1077 static struct tcf_proto * 1078 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1079 { 1080 u32 prio = 0; 1081 1082 ASSERT_RTNL(); 1083 mutex_lock(&chain->filter_chain_lock); 1084 1085 if (!tp) { 1086 tp = tcf_chain_dereference(chain->filter_chain, chain); 1087 } else if (tcf_proto_is_deleting(tp)) { 1088 /* 'deleting' flag is set and chain->filter_chain_lock was 1089 * unlocked, which means next pointer could be invalid. Restart 1090 * search. 1091 */ 1092 prio = tp->prio + 1; 1093 tp = tcf_chain_dereference(chain->filter_chain, chain); 1094 1095 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1096 if (!tp->deleting && tp->prio >= prio) 1097 break; 1098 } else { 1099 tp = tcf_chain_dereference(tp->next, chain); 1100 } 1101 1102 if (tp) 1103 tcf_proto_get(tp); 1104 1105 mutex_unlock(&chain->filter_chain_lock); 1106 1107 return tp; 1108 } 1109 1110 /* Function to be used by all clients that want to iterate over all tp's on 1111 * chain. Users of this function must be tolerant to concurrent tp 1112 * insertion/deletion or ensure that no concurrent chain modification is 1113 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1114 * consistent dump because rtnl lock is released each time skb is filled with 1115 * data and sent to user-space. 1116 */ 1117 1118 struct tcf_proto * 1119 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1120 { 1121 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1122 1123 if (tp) 1124 tcf_proto_put(tp, true, NULL); 1125 1126 return tp_next; 1127 } 1128 EXPORT_SYMBOL(tcf_get_next_proto); 1129 1130 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1131 { 1132 struct tcf_chain *chain; 1133 1134 /* Last reference to block. At this point chains cannot be added or 1135 * removed concurrently. 1136 */ 1137 for (chain = tcf_get_next_chain(block, NULL); 1138 chain; 1139 chain = tcf_get_next_chain(block, chain)) { 1140 tcf_chain_put_explicitly_created(chain); 1141 tcf_chain_flush(chain, rtnl_held); 1142 } 1143 } 1144 1145 /* Lookup Qdisc and increments its reference counter. 1146 * Set parent, if necessary. 1147 */ 1148 1149 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1150 u32 *parent, int ifindex, bool rtnl_held, 1151 struct netlink_ext_ack *extack) 1152 { 1153 const struct Qdisc_class_ops *cops; 1154 struct net_device *dev; 1155 int err = 0; 1156 1157 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1158 return 0; 1159 1160 rcu_read_lock(); 1161 1162 /* Find link */ 1163 dev = dev_get_by_index_rcu(net, ifindex); 1164 if (!dev) { 1165 rcu_read_unlock(); 1166 return -ENODEV; 1167 } 1168 1169 /* Find qdisc */ 1170 if (!*parent) { 1171 *q = rcu_dereference(dev->qdisc); 1172 *parent = (*q)->handle; 1173 } else { 1174 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1175 if (!*q) { 1176 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1177 err = -EINVAL; 1178 goto errout_rcu; 1179 } 1180 } 1181 1182 *q = qdisc_refcount_inc_nz(*q); 1183 if (!*q) { 1184 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1185 err = -EINVAL; 1186 goto errout_rcu; 1187 } 1188 1189 /* Is it classful? */ 1190 cops = (*q)->ops->cl_ops; 1191 if (!cops) { 1192 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1193 err = -EINVAL; 1194 goto errout_qdisc; 1195 } 1196 1197 if (!cops->tcf_block) { 1198 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1199 err = -EOPNOTSUPP; 1200 goto errout_qdisc; 1201 } 1202 1203 errout_rcu: 1204 /* At this point we know that qdisc is not noop_qdisc, 1205 * which means that qdisc holds a reference to net_device 1206 * and we hold a reference to qdisc, so it is safe to release 1207 * rcu read lock. 1208 */ 1209 rcu_read_unlock(); 1210 return err; 1211 1212 errout_qdisc: 1213 rcu_read_unlock(); 1214 1215 if (rtnl_held) 1216 qdisc_put(*q); 1217 else 1218 qdisc_put_unlocked(*q); 1219 *q = NULL; 1220 1221 return err; 1222 } 1223 1224 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1225 int ifindex, struct netlink_ext_ack *extack) 1226 { 1227 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1228 return 0; 1229 1230 /* Do we search for filter, attached to class? */ 1231 if (TC_H_MIN(parent)) { 1232 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1233 1234 *cl = cops->find(q, parent); 1235 if (*cl == 0) { 1236 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1237 return -ENOENT; 1238 } 1239 } 1240 1241 return 0; 1242 } 1243 1244 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1245 unsigned long cl, int ifindex, 1246 u32 block_index, 1247 struct netlink_ext_ack *extack) 1248 { 1249 struct tcf_block *block; 1250 1251 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1252 block = tcf_block_refcnt_get(net, block_index); 1253 if (!block) { 1254 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1255 return ERR_PTR(-EINVAL); 1256 } 1257 } else { 1258 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1259 1260 block = cops->tcf_block(q, cl, extack); 1261 if (!block) 1262 return ERR_PTR(-EINVAL); 1263 1264 if (tcf_block_shared(block)) { 1265 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1266 return ERR_PTR(-EOPNOTSUPP); 1267 } 1268 1269 /* Always take reference to block in order to support execution 1270 * of rules update path of cls API without rtnl lock. Caller 1271 * must release block when it is finished using it. 'if' block 1272 * of this conditional obtain reference to block by calling 1273 * tcf_block_refcnt_get(). 1274 */ 1275 refcount_inc(&block->refcnt); 1276 } 1277 1278 return block; 1279 } 1280 1281 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1282 struct tcf_block_ext_info *ei, bool rtnl_held) 1283 { 1284 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1285 /* Flushing/putting all chains will cause the block to be 1286 * deallocated when last chain is freed. However, if chain_list 1287 * is empty, block has to be manually deallocated. After block 1288 * reference counter reached 0, it is no longer possible to 1289 * increment it or add new chains to block. 1290 */ 1291 bool free_block = list_empty(&block->chain_list); 1292 1293 mutex_unlock(&block->lock); 1294 if (tcf_block_shared(block)) 1295 tcf_block_remove(block, block->net); 1296 1297 if (q) 1298 tcf_block_offload_unbind(block, q, ei); 1299 1300 if (free_block) 1301 tcf_block_destroy(block); 1302 else 1303 tcf_block_flush_all_chains(block, rtnl_held); 1304 } else if (q) { 1305 tcf_block_offload_unbind(block, q, ei); 1306 } 1307 } 1308 1309 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1310 { 1311 __tcf_block_put(block, NULL, NULL, rtnl_held); 1312 } 1313 1314 /* Find tcf block. 1315 * Set q, parent, cl when appropriate. 1316 */ 1317 1318 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1319 u32 *parent, unsigned long *cl, 1320 int ifindex, u32 block_index, 1321 struct netlink_ext_ack *extack) 1322 { 1323 struct tcf_block *block; 1324 int err = 0; 1325 1326 ASSERT_RTNL(); 1327 1328 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1329 if (err) 1330 goto errout; 1331 1332 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1333 if (err) 1334 goto errout_qdisc; 1335 1336 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1337 if (IS_ERR(block)) { 1338 err = PTR_ERR(block); 1339 goto errout_qdisc; 1340 } 1341 1342 return block; 1343 1344 errout_qdisc: 1345 if (*q) 1346 qdisc_put(*q); 1347 errout: 1348 *q = NULL; 1349 return ERR_PTR(err); 1350 } 1351 1352 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1353 bool rtnl_held) 1354 { 1355 if (!IS_ERR_OR_NULL(block)) 1356 tcf_block_refcnt_put(block, rtnl_held); 1357 1358 if (q) { 1359 if (rtnl_held) 1360 qdisc_put(q); 1361 else 1362 qdisc_put_unlocked(q); 1363 } 1364 } 1365 1366 struct tcf_block_owner_item { 1367 struct list_head list; 1368 struct Qdisc *q; 1369 enum flow_block_binder_type binder_type; 1370 }; 1371 1372 static void 1373 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1374 struct Qdisc *q, 1375 enum flow_block_binder_type binder_type) 1376 { 1377 if (block->keep_dst && 1378 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1379 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1380 netif_keep_dst(qdisc_dev(q)); 1381 } 1382 1383 void tcf_block_netif_keep_dst(struct tcf_block *block) 1384 { 1385 struct tcf_block_owner_item *item; 1386 1387 block->keep_dst = true; 1388 list_for_each_entry(item, &block->owner_list, list) 1389 tcf_block_owner_netif_keep_dst(block, item->q, 1390 item->binder_type); 1391 } 1392 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1393 1394 static int tcf_block_owner_add(struct tcf_block *block, 1395 struct Qdisc *q, 1396 enum flow_block_binder_type binder_type) 1397 { 1398 struct tcf_block_owner_item *item; 1399 1400 item = kmalloc(sizeof(*item), GFP_KERNEL); 1401 if (!item) 1402 return -ENOMEM; 1403 item->q = q; 1404 item->binder_type = binder_type; 1405 list_add(&item->list, &block->owner_list); 1406 return 0; 1407 } 1408 1409 static void tcf_block_owner_del(struct tcf_block *block, 1410 struct Qdisc *q, 1411 enum flow_block_binder_type binder_type) 1412 { 1413 struct tcf_block_owner_item *item; 1414 1415 list_for_each_entry(item, &block->owner_list, list) { 1416 if (item->q == q && item->binder_type == binder_type) { 1417 list_del(&item->list); 1418 kfree(item); 1419 return; 1420 } 1421 } 1422 WARN_ON(1); 1423 } 1424 1425 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1426 struct tcf_block_ext_info *ei, 1427 struct netlink_ext_ack *extack) 1428 { 1429 struct net *net = qdisc_net(q); 1430 struct tcf_block *block = NULL; 1431 int err; 1432 1433 if (ei->block_index) 1434 /* block_index not 0 means the shared block is requested */ 1435 block = tcf_block_refcnt_get(net, ei->block_index); 1436 1437 if (!block) { 1438 block = tcf_block_create(net, q, ei->block_index, extack); 1439 if (IS_ERR(block)) 1440 return PTR_ERR(block); 1441 if (tcf_block_shared(block)) { 1442 err = tcf_block_insert(block, net, extack); 1443 if (err) 1444 goto err_block_insert; 1445 } 1446 } 1447 1448 err = tcf_block_owner_add(block, q, ei->binder_type); 1449 if (err) 1450 goto err_block_owner_add; 1451 1452 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1453 1454 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1455 if (err) 1456 goto err_chain0_head_change_cb_add; 1457 1458 err = tcf_block_offload_bind(block, q, ei, extack); 1459 if (err) 1460 goto err_block_offload_bind; 1461 1462 *p_block = block; 1463 return 0; 1464 1465 err_block_offload_bind: 1466 tcf_chain0_head_change_cb_del(block, ei); 1467 err_chain0_head_change_cb_add: 1468 tcf_block_owner_del(block, q, ei->binder_type); 1469 err_block_owner_add: 1470 err_block_insert: 1471 tcf_block_refcnt_put(block, true); 1472 return err; 1473 } 1474 EXPORT_SYMBOL(tcf_block_get_ext); 1475 1476 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1477 { 1478 struct tcf_proto __rcu **p_filter_chain = priv; 1479 1480 rcu_assign_pointer(*p_filter_chain, tp_head); 1481 } 1482 1483 int tcf_block_get(struct tcf_block **p_block, 1484 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1485 struct netlink_ext_ack *extack) 1486 { 1487 struct tcf_block_ext_info ei = { 1488 .chain_head_change = tcf_chain_head_change_dflt, 1489 .chain_head_change_priv = p_filter_chain, 1490 }; 1491 1492 WARN_ON(!p_filter_chain); 1493 return tcf_block_get_ext(p_block, q, &ei, extack); 1494 } 1495 EXPORT_SYMBOL(tcf_block_get); 1496 1497 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1498 * actions should be all removed after flushing. 1499 */ 1500 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1501 struct tcf_block_ext_info *ei) 1502 { 1503 if (!block) 1504 return; 1505 tcf_chain0_head_change_cb_del(block, ei); 1506 tcf_block_owner_del(block, q, ei->binder_type); 1507 1508 __tcf_block_put(block, q, ei, true); 1509 } 1510 EXPORT_SYMBOL(tcf_block_put_ext); 1511 1512 void tcf_block_put(struct tcf_block *block) 1513 { 1514 struct tcf_block_ext_info ei = {0, }; 1515 1516 if (!block) 1517 return; 1518 tcf_block_put_ext(block, block->q, &ei); 1519 } 1520 1521 EXPORT_SYMBOL(tcf_block_put); 1522 1523 static int 1524 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1525 void *cb_priv, bool add, bool offload_in_use, 1526 struct netlink_ext_ack *extack) 1527 { 1528 struct tcf_chain *chain, *chain_prev; 1529 struct tcf_proto *tp, *tp_prev; 1530 int err; 1531 1532 lockdep_assert_held(&block->cb_lock); 1533 1534 for (chain = __tcf_get_next_chain(block, NULL); 1535 chain; 1536 chain_prev = chain, 1537 chain = __tcf_get_next_chain(block, chain), 1538 tcf_chain_put(chain_prev)) { 1539 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1540 tp_prev = tp, 1541 tp = __tcf_get_next_proto(chain, tp), 1542 tcf_proto_put(tp_prev, true, NULL)) { 1543 if (tp->ops->reoffload) { 1544 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1545 extack); 1546 if (err && add) 1547 goto err_playback_remove; 1548 } else if (add && offload_in_use) { 1549 err = -EOPNOTSUPP; 1550 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1551 goto err_playback_remove; 1552 } 1553 } 1554 } 1555 1556 return 0; 1557 1558 err_playback_remove: 1559 tcf_proto_put(tp, true, NULL); 1560 tcf_chain_put(chain); 1561 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1562 extack); 1563 return err; 1564 } 1565 1566 static int tcf_block_bind(struct tcf_block *block, 1567 struct flow_block_offload *bo) 1568 { 1569 struct flow_block_cb *block_cb, *next; 1570 int err, i = 0; 1571 1572 lockdep_assert_held(&block->cb_lock); 1573 1574 list_for_each_entry(block_cb, &bo->cb_list, list) { 1575 err = tcf_block_playback_offloads(block, block_cb->cb, 1576 block_cb->cb_priv, true, 1577 tcf_block_offload_in_use(block), 1578 bo->extack); 1579 if (err) 1580 goto err_unroll; 1581 if (!bo->unlocked_driver_cb) 1582 block->lockeddevcnt++; 1583 1584 i++; 1585 } 1586 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1587 1588 return 0; 1589 1590 err_unroll: 1591 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1592 list_del(&block_cb->driver_list); 1593 if (i-- > 0) { 1594 list_del(&block_cb->list); 1595 tcf_block_playback_offloads(block, block_cb->cb, 1596 block_cb->cb_priv, false, 1597 tcf_block_offload_in_use(block), 1598 NULL); 1599 if (!bo->unlocked_driver_cb) 1600 block->lockeddevcnt--; 1601 } 1602 flow_block_cb_free(block_cb); 1603 } 1604 1605 return err; 1606 } 1607 1608 static void tcf_block_unbind(struct tcf_block *block, 1609 struct flow_block_offload *bo) 1610 { 1611 struct flow_block_cb *block_cb, *next; 1612 1613 lockdep_assert_held(&block->cb_lock); 1614 1615 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1616 tcf_block_playback_offloads(block, block_cb->cb, 1617 block_cb->cb_priv, false, 1618 tcf_block_offload_in_use(block), 1619 NULL); 1620 list_del(&block_cb->list); 1621 flow_block_cb_free(block_cb); 1622 if (!bo->unlocked_driver_cb) 1623 block->lockeddevcnt--; 1624 } 1625 } 1626 1627 static int tcf_block_setup(struct tcf_block *block, 1628 struct flow_block_offload *bo) 1629 { 1630 int err; 1631 1632 switch (bo->command) { 1633 case FLOW_BLOCK_BIND: 1634 err = tcf_block_bind(block, bo); 1635 break; 1636 case FLOW_BLOCK_UNBIND: 1637 err = 0; 1638 tcf_block_unbind(block, bo); 1639 break; 1640 default: 1641 WARN_ON_ONCE(1); 1642 err = -EOPNOTSUPP; 1643 } 1644 1645 return err; 1646 } 1647 1648 /* Main classifier routine: scans classifier chain attached 1649 * to this qdisc, (optionally) tests for protocol and asks 1650 * specific classifiers. 1651 */ 1652 static inline int __tcf_classify(struct sk_buff *skb, 1653 const struct tcf_proto *tp, 1654 const struct tcf_proto *orig_tp, 1655 struct tcf_result *res, 1656 bool compat_mode, 1657 struct tcf_exts_miss_cookie_node *n, 1658 int act_index, 1659 u32 *last_executed_chain) 1660 { 1661 #ifdef CONFIG_NET_CLS_ACT 1662 const int max_reclassify_loop = 16; 1663 const struct tcf_proto *first_tp; 1664 int limit = 0; 1665 1666 reclassify: 1667 #endif 1668 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1669 __be16 protocol = skb_protocol(skb, false); 1670 int err = 0; 1671 1672 if (n) { 1673 struct tcf_exts *exts; 1674 1675 if (n->tp_prio != tp->prio) 1676 continue; 1677 1678 /* We re-lookup the tp and chain based on index instead 1679 * of having hard refs and locks to them, so do a sanity 1680 * check if any of tp,chain,exts was replaced by the 1681 * time we got here with a cookie from hardware. 1682 */ 1683 if (unlikely(n->tp != tp || n->tp->chain != n->chain || 1684 !tp->ops->get_exts)) 1685 return TC_ACT_SHOT; 1686 1687 exts = tp->ops->get_exts(tp, n->handle); 1688 if (unlikely(!exts || n->exts != exts)) 1689 return TC_ACT_SHOT; 1690 1691 n = NULL; 1692 err = tcf_exts_exec_ex(skb, exts, act_index, res); 1693 } else { 1694 if (tp->protocol != protocol && 1695 tp->protocol != htons(ETH_P_ALL)) 1696 continue; 1697 1698 err = tc_classify(skb, tp, res); 1699 } 1700 #ifdef CONFIG_NET_CLS_ACT 1701 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1702 first_tp = orig_tp; 1703 *last_executed_chain = first_tp->chain->index; 1704 goto reset; 1705 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1706 first_tp = res->goto_tp; 1707 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1708 goto reset; 1709 } 1710 #endif 1711 if (err >= 0) 1712 return err; 1713 } 1714 1715 if (unlikely(n)) 1716 return TC_ACT_SHOT; 1717 1718 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1719 #ifdef CONFIG_NET_CLS_ACT 1720 reset: 1721 if (unlikely(limit++ >= max_reclassify_loop)) { 1722 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1723 tp->chain->block->index, 1724 tp->prio & 0xffff, 1725 ntohs(tp->protocol)); 1726 return TC_ACT_SHOT; 1727 } 1728 1729 tp = first_tp; 1730 goto reclassify; 1731 #endif 1732 } 1733 1734 int tcf_classify(struct sk_buff *skb, 1735 const struct tcf_block *block, 1736 const struct tcf_proto *tp, 1737 struct tcf_result *res, bool compat_mode) 1738 { 1739 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1740 u32 last_executed_chain = 0; 1741 1742 return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0, 1743 &last_executed_chain); 1744 #else 1745 u32 last_executed_chain = tp ? tp->chain->index : 0; 1746 struct tcf_exts_miss_cookie_node *n = NULL; 1747 const struct tcf_proto *orig_tp = tp; 1748 struct tc_skb_ext *ext; 1749 int act_index = 0; 1750 int ret; 1751 1752 if (block) { 1753 ext = skb_ext_find(skb, TC_SKB_EXT); 1754 1755 if (ext && (ext->chain || ext->act_miss)) { 1756 struct tcf_chain *fchain; 1757 u32 chain; 1758 1759 if (ext->act_miss) { 1760 n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie, 1761 &act_index); 1762 if (!n) 1763 return TC_ACT_SHOT; 1764 1765 chain = n->chain_index; 1766 } else { 1767 chain = ext->chain; 1768 } 1769 1770 fchain = tcf_chain_lookup_rcu(block, chain); 1771 if (!fchain) 1772 return TC_ACT_SHOT; 1773 1774 /* Consume, so cloned/redirect skbs won't inherit ext */ 1775 skb_ext_del(skb, TC_SKB_EXT); 1776 1777 tp = rcu_dereference_bh(fchain->filter_chain); 1778 last_executed_chain = fchain->index; 1779 } 1780 } 1781 1782 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index, 1783 &last_executed_chain); 1784 1785 if (tc_skb_ext_tc_enabled()) { 1786 /* If we missed on some chain */ 1787 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1788 struct tc_skb_cb *cb = tc_skb_cb(skb); 1789 1790 ext = tc_skb_ext_alloc(skb); 1791 if (WARN_ON_ONCE(!ext)) 1792 return TC_ACT_SHOT; 1793 ext->chain = last_executed_chain; 1794 ext->mru = cb->mru; 1795 ext->post_ct = cb->post_ct; 1796 ext->post_ct_snat = cb->post_ct_snat; 1797 ext->post_ct_dnat = cb->post_ct_dnat; 1798 ext->zone = cb->zone; 1799 } 1800 } 1801 1802 return ret; 1803 #endif 1804 } 1805 EXPORT_SYMBOL(tcf_classify); 1806 1807 struct tcf_chain_info { 1808 struct tcf_proto __rcu **pprev; 1809 struct tcf_proto __rcu *next; 1810 }; 1811 1812 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1813 struct tcf_chain_info *chain_info) 1814 { 1815 return tcf_chain_dereference(*chain_info->pprev, chain); 1816 } 1817 1818 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1819 struct tcf_chain_info *chain_info, 1820 struct tcf_proto *tp) 1821 { 1822 if (chain->flushing) 1823 return -EAGAIN; 1824 1825 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1826 if (*chain_info->pprev == chain->filter_chain) 1827 tcf_chain0_head_change(chain, tp); 1828 tcf_proto_get(tp); 1829 rcu_assign_pointer(*chain_info->pprev, tp); 1830 1831 return 0; 1832 } 1833 1834 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1835 struct tcf_chain_info *chain_info, 1836 struct tcf_proto *tp) 1837 { 1838 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1839 1840 tcf_proto_mark_delete(tp); 1841 if (tp == chain->filter_chain) 1842 tcf_chain0_head_change(chain, next); 1843 RCU_INIT_POINTER(*chain_info->pprev, next); 1844 } 1845 1846 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1847 struct tcf_chain_info *chain_info, 1848 u32 protocol, u32 prio, 1849 bool prio_allocate); 1850 1851 /* Try to insert new proto. 1852 * If proto with specified priority already exists, free new proto 1853 * and return existing one. 1854 */ 1855 1856 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1857 struct tcf_proto *tp_new, 1858 u32 protocol, u32 prio, 1859 bool rtnl_held) 1860 { 1861 struct tcf_chain_info chain_info; 1862 struct tcf_proto *tp; 1863 int err = 0; 1864 1865 mutex_lock(&chain->filter_chain_lock); 1866 1867 if (tcf_proto_exists_destroying(chain, tp_new)) { 1868 mutex_unlock(&chain->filter_chain_lock); 1869 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1870 return ERR_PTR(-EAGAIN); 1871 } 1872 1873 tp = tcf_chain_tp_find(chain, &chain_info, 1874 protocol, prio, false); 1875 if (!tp) 1876 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1877 mutex_unlock(&chain->filter_chain_lock); 1878 1879 if (tp) { 1880 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1881 tp_new = tp; 1882 } else if (err) { 1883 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1884 tp_new = ERR_PTR(err); 1885 } 1886 1887 return tp_new; 1888 } 1889 1890 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1891 struct tcf_proto *tp, bool rtnl_held, 1892 struct netlink_ext_ack *extack) 1893 { 1894 struct tcf_chain_info chain_info; 1895 struct tcf_proto *tp_iter; 1896 struct tcf_proto **pprev; 1897 struct tcf_proto *next; 1898 1899 mutex_lock(&chain->filter_chain_lock); 1900 1901 /* Atomically find and remove tp from chain. */ 1902 for (pprev = &chain->filter_chain; 1903 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1904 pprev = &tp_iter->next) { 1905 if (tp_iter == tp) { 1906 chain_info.pprev = pprev; 1907 chain_info.next = tp_iter->next; 1908 WARN_ON(tp_iter->deleting); 1909 break; 1910 } 1911 } 1912 /* Verify that tp still exists and no new filters were inserted 1913 * concurrently. 1914 * Mark tp for deletion if it is empty. 1915 */ 1916 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1917 mutex_unlock(&chain->filter_chain_lock); 1918 return; 1919 } 1920 1921 tcf_proto_signal_destroying(chain, tp); 1922 next = tcf_chain_dereference(chain_info.next, chain); 1923 if (tp == chain->filter_chain) 1924 tcf_chain0_head_change(chain, next); 1925 RCU_INIT_POINTER(*chain_info.pprev, next); 1926 mutex_unlock(&chain->filter_chain_lock); 1927 1928 tcf_proto_put(tp, rtnl_held, extack); 1929 } 1930 1931 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1932 struct tcf_chain_info *chain_info, 1933 u32 protocol, u32 prio, 1934 bool prio_allocate) 1935 { 1936 struct tcf_proto **pprev; 1937 struct tcf_proto *tp; 1938 1939 /* Check the chain for existence of proto-tcf with this priority */ 1940 for (pprev = &chain->filter_chain; 1941 (tp = tcf_chain_dereference(*pprev, chain)); 1942 pprev = &tp->next) { 1943 if (tp->prio >= prio) { 1944 if (tp->prio == prio) { 1945 if (prio_allocate || 1946 (tp->protocol != protocol && protocol)) 1947 return ERR_PTR(-EINVAL); 1948 } else { 1949 tp = NULL; 1950 } 1951 break; 1952 } 1953 } 1954 chain_info->pprev = pprev; 1955 if (tp) { 1956 chain_info->next = tp->next; 1957 tcf_proto_get(tp); 1958 } else { 1959 chain_info->next = NULL; 1960 } 1961 return tp; 1962 } 1963 1964 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1965 struct tcf_proto *tp, struct tcf_block *block, 1966 struct Qdisc *q, u32 parent, void *fh, 1967 u32 portid, u32 seq, u16 flags, int event, 1968 bool terse_dump, bool rtnl_held, 1969 struct netlink_ext_ack *extack) 1970 { 1971 struct tcmsg *tcm; 1972 struct nlmsghdr *nlh; 1973 unsigned char *b = skb_tail_pointer(skb); 1974 1975 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1976 if (!nlh) 1977 goto out_nlmsg_trim; 1978 tcm = nlmsg_data(nlh); 1979 tcm->tcm_family = AF_UNSPEC; 1980 tcm->tcm__pad1 = 0; 1981 tcm->tcm__pad2 = 0; 1982 if (q) { 1983 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1984 tcm->tcm_parent = parent; 1985 } else { 1986 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1987 tcm->tcm_block_index = block->index; 1988 } 1989 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1990 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1991 goto nla_put_failure; 1992 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1993 goto nla_put_failure; 1994 if (!fh) { 1995 tcm->tcm_handle = 0; 1996 } else if (terse_dump) { 1997 if (tp->ops->terse_dump) { 1998 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 1999 rtnl_held) < 0) 2000 goto nla_put_failure; 2001 } else { 2002 goto cls_op_not_supp; 2003 } 2004 } else { 2005 if (tp->ops->dump && 2006 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 2007 goto nla_put_failure; 2008 } 2009 2010 if (extack && extack->_msg && 2011 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 2012 goto nla_put_failure; 2013 2014 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2015 2016 return skb->len; 2017 2018 out_nlmsg_trim: 2019 nla_put_failure: 2020 cls_op_not_supp: 2021 nlmsg_trim(skb, b); 2022 return -1; 2023 } 2024 2025 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 2026 struct nlmsghdr *n, struct tcf_proto *tp, 2027 struct tcf_block *block, struct Qdisc *q, 2028 u32 parent, void *fh, int event, bool unicast, 2029 bool rtnl_held, struct netlink_ext_ack *extack) 2030 { 2031 struct sk_buff *skb; 2032 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2033 int err = 0; 2034 2035 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2036 if (!skb) 2037 return -ENOBUFS; 2038 2039 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 2040 n->nlmsg_seq, n->nlmsg_flags, event, 2041 false, rtnl_held, extack) <= 0) { 2042 kfree_skb(skb); 2043 return -EINVAL; 2044 } 2045 2046 if (unicast) 2047 err = rtnl_unicast(skb, net, portid); 2048 else 2049 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2050 n->nlmsg_flags & NLM_F_ECHO); 2051 return err; 2052 } 2053 2054 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 2055 struct nlmsghdr *n, struct tcf_proto *tp, 2056 struct tcf_block *block, struct Qdisc *q, 2057 u32 parent, void *fh, bool unicast, bool *last, 2058 bool rtnl_held, struct netlink_ext_ack *extack) 2059 { 2060 struct sk_buff *skb; 2061 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2062 int err; 2063 2064 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2065 if (!skb) 2066 return -ENOBUFS; 2067 2068 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 2069 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 2070 false, rtnl_held, extack) <= 0) { 2071 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 2072 kfree_skb(skb); 2073 return -EINVAL; 2074 } 2075 2076 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 2077 if (err) { 2078 kfree_skb(skb); 2079 return err; 2080 } 2081 2082 if (unicast) 2083 err = rtnl_unicast(skb, net, portid); 2084 else 2085 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2086 n->nlmsg_flags & NLM_F_ECHO); 2087 if (err < 0) 2088 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 2089 2090 return err; 2091 } 2092 2093 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 2094 struct tcf_block *block, struct Qdisc *q, 2095 u32 parent, struct nlmsghdr *n, 2096 struct tcf_chain *chain, int event, 2097 struct netlink_ext_ack *extack) 2098 { 2099 struct tcf_proto *tp; 2100 2101 for (tp = tcf_get_next_proto(chain, NULL); 2102 tp; tp = tcf_get_next_proto(chain, tp)) 2103 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, 2104 event, false, true, extack); 2105 } 2106 2107 static void tfilter_put(struct tcf_proto *tp, void *fh) 2108 { 2109 if (tp->ops->put && fh) 2110 tp->ops->put(tp, fh); 2111 } 2112 2113 static bool is_qdisc_ingress(__u32 classid) 2114 { 2115 return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS)); 2116 } 2117 2118 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2119 struct netlink_ext_ack *extack) 2120 { 2121 struct net *net = sock_net(skb->sk); 2122 struct nlattr *tca[TCA_MAX + 1]; 2123 char name[IFNAMSIZ]; 2124 struct tcmsg *t; 2125 u32 protocol; 2126 u32 prio; 2127 bool prio_allocate; 2128 u32 parent; 2129 u32 chain_index; 2130 struct Qdisc *q; 2131 struct tcf_chain_info chain_info; 2132 struct tcf_chain *chain; 2133 struct tcf_block *block; 2134 struct tcf_proto *tp; 2135 unsigned long cl; 2136 void *fh; 2137 int err; 2138 int tp_created; 2139 bool rtnl_held = false; 2140 u32 flags; 2141 2142 replay: 2143 tp_created = 0; 2144 2145 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2146 rtm_tca_policy, extack); 2147 if (err < 0) 2148 return err; 2149 2150 t = nlmsg_data(n); 2151 protocol = TC_H_MIN(t->tcm_info); 2152 prio = TC_H_MAJ(t->tcm_info); 2153 prio_allocate = false; 2154 parent = t->tcm_parent; 2155 tp = NULL; 2156 cl = 0; 2157 block = NULL; 2158 q = NULL; 2159 chain = NULL; 2160 flags = 0; 2161 2162 if (prio == 0) { 2163 /* If no priority is provided by the user, 2164 * we allocate one. 2165 */ 2166 if (n->nlmsg_flags & NLM_F_CREATE) { 2167 prio = TC_H_MAKE(0x80000000U, 0U); 2168 prio_allocate = true; 2169 } else { 2170 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2171 return -ENOENT; 2172 } 2173 } 2174 2175 /* Find head of filter chain. */ 2176 2177 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2178 if (err) 2179 return err; 2180 2181 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2182 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2183 err = -EINVAL; 2184 goto errout; 2185 } 2186 2187 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2188 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2189 * type is not specified, classifier is not unlocked. 2190 */ 2191 if (rtnl_held || 2192 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2193 !tcf_proto_is_unlocked(name)) { 2194 rtnl_held = true; 2195 rtnl_lock(); 2196 } 2197 2198 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2199 if (err) 2200 goto errout; 2201 2202 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2203 extack); 2204 if (IS_ERR(block)) { 2205 err = PTR_ERR(block); 2206 goto errout; 2207 } 2208 block->classid = parent; 2209 2210 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2211 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2212 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2213 err = -EINVAL; 2214 goto errout; 2215 } 2216 chain = tcf_chain_get(block, chain_index, true); 2217 if (!chain) { 2218 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2219 err = -ENOMEM; 2220 goto errout; 2221 } 2222 2223 mutex_lock(&chain->filter_chain_lock); 2224 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2225 prio, prio_allocate); 2226 if (IS_ERR(tp)) { 2227 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2228 err = PTR_ERR(tp); 2229 goto errout_locked; 2230 } 2231 2232 if (tp == NULL) { 2233 struct tcf_proto *tp_new = NULL; 2234 2235 if (chain->flushing) { 2236 err = -EAGAIN; 2237 goto errout_locked; 2238 } 2239 2240 /* Proto-tcf does not exist, create new one */ 2241 2242 if (tca[TCA_KIND] == NULL || !protocol) { 2243 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2244 err = -EINVAL; 2245 goto errout_locked; 2246 } 2247 2248 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2249 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2250 err = -ENOENT; 2251 goto errout_locked; 2252 } 2253 2254 if (prio_allocate) 2255 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2256 &chain_info)); 2257 2258 mutex_unlock(&chain->filter_chain_lock); 2259 tp_new = tcf_proto_create(name, protocol, prio, chain, 2260 rtnl_held, extack); 2261 if (IS_ERR(tp_new)) { 2262 err = PTR_ERR(tp_new); 2263 goto errout_tp; 2264 } 2265 2266 tp_created = 1; 2267 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2268 rtnl_held); 2269 if (IS_ERR(tp)) { 2270 err = PTR_ERR(tp); 2271 goto errout_tp; 2272 } 2273 } else { 2274 mutex_unlock(&chain->filter_chain_lock); 2275 } 2276 2277 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2278 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2279 err = -EINVAL; 2280 goto errout; 2281 } 2282 2283 fh = tp->ops->get(tp, t->tcm_handle); 2284 2285 if (!fh) { 2286 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2287 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2288 err = -ENOENT; 2289 goto errout; 2290 } 2291 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2292 tfilter_put(tp, fh); 2293 NL_SET_ERR_MSG(extack, "Filter already exists"); 2294 err = -EEXIST; 2295 goto errout; 2296 } 2297 2298 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2299 tfilter_put(tp, fh); 2300 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2301 err = -EINVAL; 2302 goto errout; 2303 } 2304 2305 if (!(n->nlmsg_flags & NLM_F_CREATE)) 2306 flags |= TCA_ACT_FLAGS_REPLACE; 2307 if (!rtnl_held) 2308 flags |= TCA_ACT_FLAGS_NO_RTNL; 2309 if (is_qdisc_ingress(parent)) 2310 flags |= TCA_ACT_FLAGS_AT_INGRESS; 2311 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2312 flags, extack); 2313 if (err == 0) { 2314 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2315 RTM_NEWTFILTER, false, rtnl_held, extack); 2316 tfilter_put(tp, fh); 2317 /* q pointer is NULL for shared blocks */ 2318 if (q) 2319 q->flags &= ~TCQ_F_CAN_BYPASS; 2320 } 2321 2322 errout: 2323 if (err && tp_created) 2324 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2325 errout_tp: 2326 if (chain) { 2327 if (tp && !IS_ERR(tp)) 2328 tcf_proto_put(tp, rtnl_held, NULL); 2329 if (!tp_created) 2330 tcf_chain_put(chain); 2331 } 2332 tcf_block_release(q, block, rtnl_held); 2333 2334 if (rtnl_held) 2335 rtnl_unlock(); 2336 2337 if (err == -EAGAIN) { 2338 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2339 * of target chain. 2340 */ 2341 rtnl_held = true; 2342 /* Replay the request. */ 2343 goto replay; 2344 } 2345 return err; 2346 2347 errout_locked: 2348 mutex_unlock(&chain->filter_chain_lock); 2349 goto errout; 2350 } 2351 2352 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2353 struct netlink_ext_ack *extack) 2354 { 2355 struct net *net = sock_net(skb->sk); 2356 struct nlattr *tca[TCA_MAX + 1]; 2357 char name[IFNAMSIZ]; 2358 struct tcmsg *t; 2359 u32 protocol; 2360 u32 prio; 2361 u32 parent; 2362 u32 chain_index; 2363 struct Qdisc *q = NULL; 2364 struct tcf_chain_info chain_info; 2365 struct tcf_chain *chain = NULL; 2366 struct tcf_block *block = NULL; 2367 struct tcf_proto *tp = NULL; 2368 unsigned long cl = 0; 2369 void *fh = NULL; 2370 int err; 2371 bool rtnl_held = false; 2372 2373 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2374 rtm_tca_policy, extack); 2375 if (err < 0) 2376 return err; 2377 2378 t = nlmsg_data(n); 2379 protocol = TC_H_MIN(t->tcm_info); 2380 prio = TC_H_MAJ(t->tcm_info); 2381 parent = t->tcm_parent; 2382 2383 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2384 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2385 return -ENOENT; 2386 } 2387 2388 /* Find head of filter chain. */ 2389 2390 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2391 if (err) 2392 return err; 2393 2394 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2395 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2396 err = -EINVAL; 2397 goto errout; 2398 } 2399 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2400 * found), qdisc is not unlocked, classifier type is not specified, 2401 * classifier is not unlocked. 2402 */ 2403 if (!prio || 2404 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2405 !tcf_proto_is_unlocked(name)) { 2406 rtnl_held = true; 2407 rtnl_lock(); 2408 } 2409 2410 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2411 if (err) 2412 goto errout; 2413 2414 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2415 extack); 2416 if (IS_ERR(block)) { 2417 err = PTR_ERR(block); 2418 goto errout; 2419 } 2420 2421 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2422 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2423 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2424 err = -EINVAL; 2425 goto errout; 2426 } 2427 chain = tcf_chain_get(block, chain_index, false); 2428 if (!chain) { 2429 /* User requested flush on non-existent chain. Nothing to do, 2430 * so just return success. 2431 */ 2432 if (prio == 0) { 2433 err = 0; 2434 goto errout; 2435 } 2436 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2437 err = -ENOENT; 2438 goto errout; 2439 } 2440 2441 if (prio == 0) { 2442 tfilter_notify_chain(net, skb, block, q, parent, n, 2443 chain, RTM_DELTFILTER, extack); 2444 tcf_chain_flush(chain, rtnl_held); 2445 err = 0; 2446 goto errout; 2447 } 2448 2449 mutex_lock(&chain->filter_chain_lock); 2450 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2451 prio, false); 2452 if (!tp || IS_ERR(tp)) { 2453 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2454 err = tp ? PTR_ERR(tp) : -ENOENT; 2455 goto errout_locked; 2456 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2457 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2458 err = -EINVAL; 2459 goto errout_locked; 2460 } else if (t->tcm_handle == 0) { 2461 tcf_proto_signal_destroying(chain, tp); 2462 tcf_chain_tp_remove(chain, &chain_info, tp); 2463 mutex_unlock(&chain->filter_chain_lock); 2464 2465 tcf_proto_put(tp, rtnl_held, NULL); 2466 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2467 RTM_DELTFILTER, false, rtnl_held, extack); 2468 err = 0; 2469 goto errout; 2470 } 2471 mutex_unlock(&chain->filter_chain_lock); 2472 2473 fh = tp->ops->get(tp, t->tcm_handle); 2474 2475 if (!fh) { 2476 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2477 err = -ENOENT; 2478 } else { 2479 bool last; 2480 2481 err = tfilter_del_notify(net, skb, n, tp, block, 2482 q, parent, fh, false, &last, 2483 rtnl_held, extack); 2484 2485 if (err) 2486 goto errout; 2487 if (last) 2488 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2489 } 2490 2491 errout: 2492 if (chain) { 2493 if (tp && !IS_ERR(tp)) 2494 tcf_proto_put(tp, rtnl_held, NULL); 2495 tcf_chain_put(chain); 2496 } 2497 tcf_block_release(q, block, rtnl_held); 2498 2499 if (rtnl_held) 2500 rtnl_unlock(); 2501 2502 return err; 2503 2504 errout_locked: 2505 mutex_unlock(&chain->filter_chain_lock); 2506 goto errout; 2507 } 2508 2509 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2510 struct netlink_ext_ack *extack) 2511 { 2512 struct net *net = sock_net(skb->sk); 2513 struct nlattr *tca[TCA_MAX + 1]; 2514 char name[IFNAMSIZ]; 2515 struct tcmsg *t; 2516 u32 protocol; 2517 u32 prio; 2518 u32 parent; 2519 u32 chain_index; 2520 struct Qdisc *q = NULL; 2521 struct tcf_chain_info chain_info; 2522 struct tcf_chain *chain = NULL; 2523 struct tcf_block *block = NULL; 2524 struct tcf_proto *tp = NULL; 2525 unsigned long cl = 0; 2526 void *fh = NULL; 2527 int err; 2528 bool rtnl_held = false; 2529 2530 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2531 rtm_tca_policy, extack); 2532 if (err < 0) 2533 return err; 2534 2535 t = nlmsg_data(n); 2536 protocol = TC_H_MIN(t->tcm_info); 2537 prio = TC_H_MAJ(t->tcm_info); 2538 parent = t->tcm_parent; 2539 2540 if (prio == 0) { 2541 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2542 return -ENOENT; 2543 } 2544 2545 /* Find head of filter chain. */ 2546 2547 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2548 if (err) 2549 return err; 2550 2551 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2552 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2553 err = -EINVAL; 2554 goto errout; 2555 } 2556 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2557 * unlocked, classifier type is not specified, classifier is not 2558 * unlocked. 2559 */ 2560 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2561 !tcf_proto_is_unlocked(name)) { 2562 rtnl_held = true; 2563 rtnl_lock(); 2564 } 2565 2566 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2567 if (err) 2568 goto errout; 2569 2570 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2571 extack); 2572 if (IS_ERR(block)) { 2573 err = PTR_ERR(block); 2574 goto errout; 2575 } 2576 2577 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2578 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2579 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2580 err = -EINVAL; 2581 goto errout; 2582 } 2583 chain = tcf_chain_get(block, chain_index, false); 2584 if (!chain) { 2585 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2586 err = -EINVAL; 2587 goto errout; 2588 } 2589 2590 mutex_lock(&chain->filter_chain_lock); 2591 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2592 prio, false); 2593 mutex_unlock(&chain->filter_chain_lock); 2594 if (!tp || IS_ERR(tp)) { 2595 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2596 err = tp ? PTR_ERR(tp) : -ENOENT; 2597 goto errout; 2598 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2599 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2600 err = -EINVAL; 2601 goto errout; 2602 } 2603 2604 fh = tp->ops->get(tp, t->tcm_handle); 2605 2606 if (!fh) { 2607 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2608 err = -ENOENT; 2609 } else { 2610 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2611 fh, RTM_NEWTFILTER, true, rtnl_held, NULL); 2612 if (err < 0) 2613 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2614 } 2615 2616 tfilter_put(tp, fh); 2617 errout: 2618 if (chain) { 2619 if (tp && !IS_ERR(tp)) 2620 tcf_proto_put(tp, rtnl_held, NULL); 2621 tcf_chain_put(chain); 2622 } 2623 tcf_block_release(q, block, rtnl_held); 2624 2625 if (rtnl_held) 2626 rtnl_unlock(); 2627 2628 return err; 2629 } 2630 2631 struct tcf_dump_args { 2632 struct tcf_walker w; 2633 struct sk_buff *skb; 2634 struct netlink_callback *cb; 2635 struct tcf_block *block; 2636 struct Qdisc *q; 2637 u32 parent; 2638 bool terse_dump; 2639 }; 2640 2641 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2642 { 2643 struct tcf_dump_args *a = (void *)arg; 2644 struct net *net = sock_net(a->skb->sk); 2645 2646 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2647 n, NETLINK_CB(a->cb->skb).portid, 2648 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2649 RTM_NEWTFILTER, a->terse_dump, true, NULL); 2650 } 2651 2652 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2653 struct sk_buff *skb, struct netlink_callback *cb, 2654 long index_start, long *p_index, bool terse) 2655 { 2656 struct net *net = sock_net(skb->sk); 2657 struct tcf_block *block = chain->block; 2658 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2659 struct tcf_proto *tp, *tp_prev; 2660 struct tcf_dump_args arg; 2661 2662 for (tp = __tcf_get_next_proto(chain, NULL); 2663 tp; 2664 tp_prev = tp, 2665 tp = __tcf_get_next_proto(chain, tp), 2666 tcf_proto_put(tp_prev, true, NULL), 2667 (*p_index)++) { 2668 if (*p_index < index_start) 2669 continue; 2670 if (TC_H_MAJ(tcm->tcm_info) && 2671 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2672 continue; 2673 if (TC_H_MIN(tcm->tcm_info) && 2674 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2675 continue; 2676 if (*p_index > index_start) 2677 memset(&cb->args[1], 0, 2678 sizeof(cb->args) - sizeof(cb->args[0])); 2679 if (cb->args[1] == 0) { 2680 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2681 NETLINK_CB(cb->skb).portid, 2682 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2683 RTM_NEWTFILTER, false, true, NULL) <= 0) 2684 goto errout; 2685 cb->args[1] = 1; 2686 } 2687 if (!tp->ops->walk) 2688 continue; 2689 arg.w.fn = tcf_node_dump; 2690 arg.skb = skb; 2691 arg.cb = cb; 2692 arg.block = block; 2693 arg.q = q; 2694 arg.parent = parent; 2695 arg.w.stop = 0; 2696 arg.w.skip = cb->args[1] - 1; 2697 arg.w.count = 0; 2698 arg.w.cookie = cb->args[2]; 2699 arg.terse_dump = terse; 2700 tp->ops->walk(tp, &arg.w, true); 2701 cb->args[2] = arg.w.cookie; 2702 cb->args[1] = arg.w.count + 1; 2703 if (arg.w.stop) 2704 goto errout; 2705 } 2706 return true; 2707 2708 errout: 2709 tcf_proto_put(tp, true, NULL); 2710 return false; 2711 } 2712 2713 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2714 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2715 }; 2716 2717 /* called with RTNL */ 2718 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2719 { 2720 struct tcf_chain *chain, *chain_prev; 2721 struct net *net = sock_net(skb->sk); 2722 struct nlattr *tca[TCA_MAX + 1]; 2723 struct Qdisc *q = NULL; 2724 struct tcf_block *block; 2725 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2726 bool terse_dump = false; 2727 long index_start; 2728 long index; 2729 u32 parent; 2730 int err; 2731 2732 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2733 return skb->len; 2734 2735 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2736 tcf_tfilter_dump_policy, cb->extack); 2737 if (err) 2738 return err; 2739 2740 if (tca[TCA_DUMP_FLAGS]) { 2741 struct nla_bitfield32 flags = 2742 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2743 2744 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2745 } 2746 2747 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2748 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2749 if (!block) 2750 goto out; 2751 /* If we work with block index, q is NULL and parent value 2752 * will never be used in the following code. The check 2753 * in tcf_fill_node prevents it. However, compiler does not 2754 * see that far, so set parent to zero to silence the warning 2755 * about parent being uninitialized. 2756 */ 2757 parent = 0; 2758 } else { 2759 const struct Qdisc_class_ops *cops; 2760 struct net_device *dev; 2761 unsigned long cl = 0; 2762 2763 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2764 if (!dev) 2765 return skb->len; 2766 2767 parent = tcm->tcm_parent; 2768 if (!parent) 2769 q = rtnl_dereference(dev->qdisc); 2770 else 2771 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2772 if (!q) 2773 goto out; 2774 cops = q->ops->cl_ops; 2775 if (!cops) 2776 goto out; 2777 if (!cops->tcf_block) 2778 goto out; 2779 if (TC_H_MIN(tcm->tcm_parent)) { 2780 cl = cops->find(q, tcm->tcm_parent); 2781 if (cl == 0) 2782 goto out; 2783 } 2784 block = cops->tcf_block(q, cl, NULL); 2785 if (!block) 2786 goto out; 2787 parent = block->classid; 2788 if (tcf_block_shared(block)) 2789 q = NULL; 2790 } 2791 2792 index_start = cb->args[0]; 2793 index = 0; 2794 2795 for (chain = __tcf_get_next_chain(block, NULL); 2796 chain; 2797 chain_prev = chain, 2798 chain = __tcf_get_next_chain(block, chain), 2799 tcf_chain_put(chain_prev)) { 2800 if (tca[TCA_CHAIN] && 2801 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2802 continue; 2803 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2804 index_start, &index, terse_dump)) { 2805 tcf_chain_put(chain); 2806 err = -EMSGSIZE; 2807 break; 2808 } 2809 } 2810 2811 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2812 tcf_block_refcnt_put(block, true); 2813 cb->args[0] = index; 2814 2815 out: 2816 /* If we did no progress, the error (EMSGSIZE) is real */ 2817 if (skb->len == 0 && err) 2818 return err; 2819 return skb->len; 2820 } 2821 2822 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2823 void *tmplt_priv, u32 chain_index, 2824 struct net *net, struct sk_buff *skb, 2825 struct tcf_block *block, 2826 u32 portid, u32 seq, u16 flags, int event, 2827 struct netlink_ext_ack *extack) 2828 { 2829 unsigned char *b = skb_tail_pointer(skb); 2830 const struct tcf_proto_ops *ops; 2831 struct nlmsghdr *nlh; 2832 struct tcmsg *tcm; 2833 void *priv; 2834 2835 ops = tmplt_ops; 2836 priv = tmplt_priv; 2837 2838 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2839 if (!nlh) 2840 goto out_nlmsg_trim; 2841 tcm = nlmsg_data(nlh); 2842 tcm->tcm_family = AF_UNSPEC; 2843 tcm->tcm__pad1 = 0; 2844 tcm->tcm__pad2 = 0; 2845 tcm->tcm_handle = 0; 2846 if (block->q) { 2847 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2848 tcm->tcm_parent = block->q->handle; 2849 } else { 2850 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2851 tcm->tcm_block_index = block->index; 2852 } 2853 2854 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2855 goto nla_put_failure; 2856 2857 if (ops) { 2858 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2859 goto nla_put_failure; 2860 if (ops->tmplt_dump(skb, net, priv) < 0) 2861 goto nla_put_failure; 2862 } 2863 2864 if (extack && extack->_msg && 2865 nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) 2866 goto out_nlmsg_trim; 2867 2868 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2869 2870 return skb->len; 2871 2872 out_nlmsg_trim: 2873 nla_put_failure: 2874 nlmsg_trim(skb, b); 2875 return -EMSGSIZE; 2876 } 2877 2878 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2879 u32 seq, u16 flags, int event, bool unicast, 2880 struct netlink_ext_ack *extack) 2881 { 2882 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2883 struct tcf_block *block = chain->block; 2884 struct net *net = block->net; 2885 struct sk_buff *skb; 2886 int err = 0; 2887 2888 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2889 if (!skb) 2890 return -ENOBUFS; 2891 2892 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2893 chain->index, net, skb, block, portid, 2894 seq, flags, event, extack) <= 0) { 2895 kfree_skb(skb); 2896 return -EINVAL; 2897 } 2898 2899 if (unicast) 2900 err = rtnl_unicast(skb, net, portid); 2901 else 2902 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2903 flags & NLM_F_ECHO); 2904 2905 return err; 2906 } 2907 2908 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2909 void *tmplt_priv, u32 chain_index, 2910 struct tcf_block *block, struct sk_buff *oskb, 2911 u32 seq, u16 flags, bool unicast) 2912 { 2913 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2914 struct net *net = block->net; 2915 struct sk_buff *skb; 2916 2917 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2918 if (!skb) 2919 return -ENOBUFS; 2920 2921 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2922 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { 2923 kfree_skb(skb); 2924 return -EINVAL; 2925 } 2926 2927 if (unicast) 2928 return rtnl_unicast(skb, net, portid); 2929 2930 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2931 } 2932 2933 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2934 struct nlattr **tca, 2935 struct netlink_ext_ack *extack) 2936 { 2937 const struct tcf_proto_ops *ops; 2938 char name[IFNAMSIZ]; 2939 void *tmplt_priv; 2940 2941 /* If kind is not set, user did not specify template. */ 2942 if (!tca[TCA_KIND]) 2943 return 0; 2944 2945 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2946 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2947 return -EINVAL; 2948 } 2949 2950 ops = tcf_proto_lookup_ops(name, true, extack); 2951 if (IS_ERR(ops)) 2952 return PTR_ERR(ops); 2953 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2954 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2955 module_put(ops->owner); 2956 return -EOPNOTSUPP; 2957 } 2958 2959 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2960 if (IS_ERR(tmplt_priv)) { 2961 module_put(ops->owner); 2962 return PTR_ERR(tmplt_priv); 2963 } 2964 chain->tmplt_ops = ops; 2965 chain->tmplt_priv = tmplt_priv; 2966 return 0; 2967 } 2968 2969 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2970 void *tmplt_priv) 2971 { 2972 /* If template ops are set, no work to do for us. */ 2973 if (!tmplt_ops) 2974 return; 2975 2976 tmplt_ops->tmplt_destroy(tmplt_priv); 2977 module_put(tmplt_ops->owner); 2978 } 2979 2980 /* Add/delete/get a chain */ 2981 2982 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2983 struct netlink_ext_ack *extack) 2984 { 2985 struct net *net = sock_net(skb->sk); 2986 struct nlattr *tca[TCA_MAX + 1]; 2987 struct tcmsg *t; 2988 u32 parent; 2989 u32 chain_index; 2990 struct Qdisc *q; 2991 struct tcf_chain *chain; 2992 struct tcf_block *block; 2993 unsigned long cl; 2994 int err; 2995 2996 replay: 2997 q = NULL; 2998 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2999 rtm_tca_policy, extack); 3000 if (err < 0) 3001 return err; 3002 3003 t = nlmsg_data(n); 3004 parent = t->tcm_parent; 3005 cl = 0; 3006 3007 block = tcf_block_find(net, &q, &parent, &cl, 3008 t->tcm_ifindex, t->tcm_block_index, extack); 3009 if (IS_ERR(block)) 3010 return PTR_ERR(block); 3011 3012 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 3013 if (chain_index > TC_ACT_EXT_VAL_MASK) { 3014 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 3015 err = -EINVAL; 3016 goto errout_block; 3017 } 3018 3019 mutex_lock(&block->lock); 3020 chain = tcf_chain_lookup(block, chain_index); 3021 if (n->nlmsg_type == RTM_NEWCHAIN) { 3022 if (chain) { 3023 if (tcf_chain_held_by_acts_only(chain)) { 3024 /* The chain exists only because there is 3025 * some action referencing it. 3026 */ 3027 tcf_chain_hold(chain); 3028 } else { 3029 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 3030 err = -EEXIST; 3031 goto errout_block_locked; 3032 } 3033 } else { 3034 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 3035 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 3036 err = -ENOENT; 3037 goto errout_block_locked; 3038 } 3039 chain = tcf_chain_create(block, chain_index); 3040 if (!chain) { 3041 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 3042 err = -ENOMEM; 3043 goto errout_block_locked; 3044 } 3045 } 3046 } else { 3047 if (!chain || tcf_chain_held_by_acts_only(chain)) { 3048 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 3049 err = -EINVAL; 3050 goto errout_block_locked; 3051 } 3052 tcf_chain_hold(chain); 3053 } 3054 3055 if (n->nlmsg_type == RTM_NEWCHAIN) { 3056 /* Modifying chain requires holding parent block lock. In case 3057 * the chain was successfully added, take a reference to the 3058 * chain. This ensures that an empty chain does not disappear at 3059 * the end of this function. 3060 */ 3061 tcf_chain_hold(chain); 3062 chain->explicitly_created = true; 3063 } 3064 mutex_unlock(&block->lock); 3065 3066 switch (n->nlmsg_type) { 3067 case RTM_NEWCHAIN: 3068 err = tc_chain_tmplt_add(chain, net, tca, extack); 3069 if (err) { 3070 tcf_chain_put_explicitly_created(chain); 3071 goto errout; 3072 } 3073 3074 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 3075 RTM_NEWCHAIN, false, extack); 3076 break; 3077 case RTM_DELCHAIN: 3078 tfilter_notify_chain(net, skb, block, q, parent, n, 3079 chain, RTM_DELTFILTER, extack); 3080 /* Flush the chain first as the user requested chain removal. */ 3081 tcf_chain_flush(chain, true); 3082 /* In case the chain was successfully deleted, put a reference 3083 * to the chain previously taken during addition. 3084 */ 3085 tcf_chain_put_explicitly_created(chain); 3086 break; 3087 case RTM_GETCHAIN: 3088 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 3089 n->nlmsg_flags, n->nlmsg_type, true, extack); 3090 if (err < 0) 3091 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 3092 break; 3093 default: 3094 err = -EOPNOTSUPP; 3095 NL_SET_ERR_MSG(extack, "Unsupported message type"); 3096 goto errout; 3097 } 3098 3099 errout: 3100 tcf_chain_put(chain); 3101 errout_block: 3102 tcf_block_release(q, block, true); 3103 if (err == -EAGAIN) 3104 /* Replay the request. */ 3105 goto replay; 3106 return err; 3107 3108 errout_block_locked: 3109 mutex_unlock(&block->lock); 3110 goto errout_block; 3111 } 3112 3113 /* called with RTNL */ 3114 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 3115 { 3116 struct net *net = sock_net(skb->sk); 3117 struct nlattr *tca[TCA_MAX + 1]; 3118 struct Qdisc *q = NULL; 3119 struct tcf_block *block; 3120 struct tcmsg *tcm = nlmsg_data(cb->nlh); 3121 struct tcf_chain *chain; 3122 long index_start; 3123 long index; 3124 int err; 3125 3126 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 3127 return skb->len; 3128 3129 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 3130 rtm_tca_policy, cb->extack); 3131 if (err) 3132 return err; 3133 3134 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 3135 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 3136 if (!block) 3137 goto out; 3138 } else { 3139 const struct Qdisc_class_ops *cops; 3140 struct net_device *dev; 3141 unsigned long cl = 0; 3142 3143 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 3144 if (!dev) 3145 return skb->len; 3146 3147 if (!tcm->tcm_parent) 3148 q = rtnl_dereference(dev->qdisc); 3149 else 3150 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3151 3152 if (!q) 3153 goto out; 3154 cops = q->ops->cl_ops; 3155 if (!cops) 3156 goto out; 3157 if (!cops->tcf_block) 3158 goto out; 3159 if (TC_H_MIN(tcm->tcm_parent)) { 3160 cl = cops->find(q, tcm->tcm_parent); 3161 if (cl == 0) 3162 goto out; 3163 } 3164 block = cops->tcf_block(q, cl, NULL); 3165 if (!block) 3166 goto out; 3167 if (tcf_block_shared(block)) 3168 q = NULL; 3169 } 3170 3171 index_start = cb->args[0]; 3172 index = 0; 3173 3174 mutex_lock(&block->lock); 3175 list_for_each_entry(chain, &block->chain_list, list) { 3176 if ((tca[TCA_CHAIN] && 3177 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3178 continue; 3179 if (index < index_start) { 3180 index++; 3181 continue; 3182 } 3183 if (tcf_chain_held_by_acts_only(chain)) 3184 continue; 3185 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3186 chain->index, net, skb, block, 3187 NETLINK_CB(cb->skb).portid, 3188 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3189 RTM_NEWCHAIN, NULL); 3190 if (err <= 0) 3191 break; 3192 index++; 3193 } 3194 mutex_unlock(&block->lock); 3195 3196 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3197 tcf_block_refcnt_put(block, true); 3198 cb->args[0] = index; 3199 3200 out: 3201 /* If we did no progress, the error (EMSGSIZE) is real */ 3202 if (skb->len == 0 && err) 3203 return err; 3204 return skb->len; 3205 } 3206 3207 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action, 3208 int police, struct tcf_proto *tp, u32 handle, 3209 bool use_action_miss) 3210 { 3211 int err = 0; 3212 3213 #ifdef CONFIG_NET_CLS_ACT 3214 exts->type = 0; 3215 exts->nr_actions = 0; 3216 exts->miss_cookie_node = NULL; 3217 /* Note: we do not own yet a reference on net. 3218 * This reference might be taken later from tcf_exts_get_net(). 3219 */ 3220 exts->net = net; 3221 exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), 3222 GFP_KERNEL); 3223 if (!exts->actions) 3224 return -ENOMEM; 3225 #endif 3226 3227 exts->action = action; 3228 exts->police = police; 3229 3230 if (!use_action_miss) 3231 return 0; 3232 3233 err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle); 3234 if (err) 3235 goto err_miss_alloc; 3236 3237 return 0; 3238 3239 err_miss_alloc: 3240 tcf_exts_destroy(exts); 3241 #ifdef CONFIG_NET_CLS_ACT 3242 exts->actions = NULL; 3243 #endif 3244 return err; 3245 } 3246 EXPORT_SYMBOL(tcf_exts_init_ex); 3247 3248 void tcf_exts_destroy(struct tcf_exts *exts) 3249 { 3250 tcf_exts_miss_cookie_base_destroy(exts); 3251 3252 #ifdef CONFIG_NET_CLS_ACT 3253 if (exts->actions) { 3254 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3255 kfree(exts->actions); 3256 } 3257 exts->nr_actions = 0; 3258 #endif 3259 } 3260 EXPORT_SYMBOL(tcf_exts_destroy); 3261 3262 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3263 struct nlattr *rate_tlv, struct tcf_exts *exts, 3264 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) 3265 { 3266 #ifdef CONFIG_NET_CLS_ACT 3267 { 3268 int init_res[TCA_ACT_MAX_PRIO] = {}; 3269 struct tc_action *act; 3270 size_t attr_size = 0; 3271 3272 if (exts->police && tb[exts->police]) { 3273 struct tc_action_ops *a_o; 3274 3275 a_o = tc_action_load_ops(tb[exts->police], true, 3276 !(flags & TCA_ACT_FLAGS_NO_RTNL), 3277 extack); 3278 if (IS_ERR(a_o)) 3279 return PTR_ERR(a_o); 3280 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; 3281 act = tcf_action_init_1(net, tp, tb[exts->police], 3282 rate_tlv, a_o, init_res, flags, 3283 extack); 3284 module_put(a_o->owner); 3285 if (IS_ERR(act)) 3286 return PTR_ERR(act); 3287 3288 act->type = exts->type = TCA_OLD_COMPAT; 3289 exts->actions[0] = act; 3290 exts->nr_actions = 1; 3291 tcf_idr_insert_many(exts->actions); 3292 } else if (exts->action && tb[exts->action]) { 3293 int err; 3294 3295 flags |= TCA_ACT_FLAGS_BIND; 3296 err = tcf_action_init(net, tp, tb[exts->action], 3297 rate_tlv, exts->actions, init_res, 3298 &attr_size, flags, fl_flags, 3299 extack); 3300 if (err < 0) 3301 return err; 3302 exts->nr_actions = err; 3303 } 3304 } 3305 #else 3306 if ((exts->action && tb[exts->action]) || 3307 (exts->police && tb[exts->police])) { 3308 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3309 return -EOPNOTSUPP; 3310 } 3311 #endif 3312 3313 return 0; 3314 } 3315 EXPORT_SYMBOL(tcf_exts_validate_ex); 3316 3317 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3318 struct nlattr *rate_tlv, struct tcf_exts *exts, 3319 u32 flags, struct netlink_ext_ack *extack) 3320 { 3321 return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, 3322 flags, 0, extack); 3323 } 3324 EXPORT_SYMBOL(tcf_exts_validate); 3325 3326 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3327 { 3328 #ifdef CONFIG_NET_CLS_ACT 3329 struct tcf_exts old = *dst; 3330 3331 *dst = *src; 3332 tcf_exts_destroy(&old); 3333 #endif 3334 } 3335 EXPORT_SYMBOL(tcf_exts_change); 3336 3337 #ifdef CONFIG_NET_CLS_ACT 3338 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3339 { 3340 if (exts->nr_actions == 0) 3341 return NULL; 3342 else 3343 return exts->actions[0]; 3344 } 3345 #endif 3346 3347 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3348 { 3349 #ifdef CONFIG_NET_CLS_ACT 3350 struct nlattr *nest; 3351 3352 if (exts->action && tcf_exts_has_actions(exts)) { 3353 /* 3354 * again for backward compatible mode - we want 3355 * to work with both old and new modes of entering 3356 * tc data even if iproute2 was newer - jhs 3357 */ 3358 if (exts->type != TCA_OLD_COMPAT) { 3359 nest = nla_nest_start_noflag(skb, exts->action); 3360 if (nest == NULL) 3361 goto nla_put_failure; 3362 3363 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3364 < 0) 3365 goto nla_put_failure; 3366 nla_nest_end(skb, nest); 3367 } else if (exts->police) { 3368 struct tc_action *act = tcf_exts_first_act(exts); 3369 nest = nla_nest_start_noflag(skb, exts->police); 3370 if (nest == NULL || !act) 3371 goto nla_put_failure; 3372 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3373 goto nla_put_failure; 3374 nla_nest_end(skb, nest); 3375 } 3376 } 3377 return 0; 3378 3379 nla_put_failure: 3380 nla_nest_cancel(skb, nest); 3381 return -1; 3382 #else 3383 return 0; 3384 #endif 3385 } 3386 EXPORT_SYMBOL(tcf_exts_dump); 3387 3388 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3389 { 3390 #ifdef CONFIG_NET_CLS_ACT 3391 struct nlattr *nest; 3392 3393 if (!exts->action || !tcf_exts_has_actions(exts)) 3394 return 0; 3395 3396 nest = nla_nest_start_noflag(skb, exts->action); 3397 if (!nest) 3398 goto nla_put_failure; 3399 3400 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3401 goto nla_put_failure; 3402 nla_nest_end(skb, nest); 3403 return 0; 3404 3405 nla_put_failure: 3406 nla_nest_cancel(skb, nest); 3407 return -1; 3408 #else 3409 return 0; 3410 #endif 3411 } 3412 EXPORT_SYMBOL(tcf_exts_terse_dump); 3413 3414 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3415 { 3416 #ifdef CONFIG_NET_CLS_ACT 3417 struct tc_action *a = tcf_exts_first_act(exts); 3418 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3419 return -1; 3420 #endif 3421 return 0; 3422 } 3423 EXPORT_SYMBOL(tcf_exts_dump_stats); 3424 3425 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3426 { 3427 if (*flags & TCA_CLS_FLAGS_IN_HW) 3428 return; 3429 *flags |= TCA_CLS_FLAGS_IN_HW; 3430 atomic_inc(&block->offloadcnt); 3431 } 3432 3433 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3434 { 3435 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3436 return; 3437 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3438 atomic_dec(&block->offloadcnt); 3439 } 3440 3441 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3442 struct tcf_proto *tp, u32 *cnt, 3443 u32 *flags, u32 diff, bool add) 3444 { 3445 lockdep_assert_held(&block->cb_lock); 3446 3447 spin_lock(&tp->lock); 3448 if (add) { 3449 if (!*cnt) 3450 tcf_block_offload_inc(block, flags); 3451 *cnt += diff; 3452 } else { 3453 *cnt -= diff; 3454 if (!*cnt) 3455 tcf_block_offload_dec(block, flags); 3456 } 3457 spin_unlock(&tp->lock); 3458 } 3459 3460 static void 3461 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3462 u32 *cnt, u32 *flags) 3463 { 3464 lockdep_assert_held(&block->cb_lock); 3465 3466 spin_lock(&tp->lock); 3467 tcf_block_offload_dec(block, flags); 3468 *cnt = 0; 3469 spin_unlock(&tp->lock); 3470 } 3471 3472 static int 3473 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3474 void *type_data, bool err_stop) 3475 { 3476 struct flow_block_cb *block_cb; 3477 int ok_count = 0; 3478 int err; 3479 3480 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3481 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3482 if (err) { 3483 if (err_stop) 3484 return err; 3485 } else { 3486 ok_count++; 3487 } 3488 } 3489 return ok_count; 3490 } 3491 3492 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3493 void *type_data, bool err_stop, bool rtnl_held) 3494 { 3495 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3496 int ok_count; 3497 3498 retry: 3499 if (take_rtnl) 3500 rtnl_lock(); 3501 down_read(&block->cb_lock); 3502 /* Need to obtain rtnl lock if block is bound to devs that require it. 3503 * In block bind code cb_lock is obtained while holding rtnl, so we must 3504 * obtain the locks in same order here. 3505 */ 3506 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3507 up_read(&block->cb_lock); 3508 take_rtnl = true; 3509 goto retry; 3510 } 3511 3512 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3513 3514 up_read(&block->cb_lock); 3515 if (take_rtnl) 3516 rtnl_unlock(); 3517 return ok_count; 3518 } 3519 EXPORT_SYMBOL(tc_setup_cb_call); 3520 3521 /* Non-destructive filter add. If filter that wasn't already in hardware is 3522 * successfully offloaded, increment block offloads counter. On failure, 3523 * previously offloaded filter is considered to be intact and offloads counter 3524 * is not decremented. 3525 */ 3526 3527 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3528 enum tc_setup_type type, void *type_data, bool err_stop, 3529 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3530 { 3531 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3532 int ok_count; 3533 3534 retry: 3535 if (take_rtnl) 3536 rtnl_lock(); 3537 down_read(&block->cb_lock); 3538 /* Need to obtain rtnl lock if block is bound to devs that require it. 3539 * In block bind code cb_lock is obtained while holding rtnl, so we must 3540 * obtain the locks in same order here. 3541 */ 3542 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3543 up_read(&block->cb_lock); 3544 take_rtnl = true; 3545 goto retry; 3546 } 3547 3548 /* Make sure all netdevs sharing this block are offload-capable. */ 3549 if (block->nooffloaddevcnt && err_stop) { 3550 ok_count = -EOPNOTSUPP; 3551 goto err_unlock; 3552 } 3553 3554 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3555 if (ok_count < 0) 3556 goto err_unlock; 3557 3558 if (tp->ops->hw_add) 3559 tp->ops->hw_add(tp, type_data); 3560 if (ok_count > 0) 3561 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3562 ok_count, true); 3563 err_unlock: 3564 up_read(&block->cb_lock); 3565 if (take_rtnl) 3566 rtnl_unlock(); 3567 return min(ok_count, 0); 3568 } 3569 EXPORT_SYMBOL(tc_setup_cb_add); 3570 3571 /* Destructive filter replace. If filter that wasn't already in hardware is 3572 * successfully offloaded, increment block offload counter. On failure, 3573 * previously offloaded filter is considered to be destroyed and offload counter 3574 * is decremented. 3575 */ 3576 3577 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3578 enum tc_setup_type type, void *type_data, bool err_stop, 3579 u32 *old_flags, unsigned int *old_in_hw_count, 3580 u32 *new_flags, unsigned int *new_in_hw_count, 3581 bool rtnl_held) 3582 { 3583 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3584 int ok_count; 3585 3586 retry: 3587 if (take_rtnl) 3588 rtnl_lock(); 3589 down_read(&block->cb_lock); 3590 /* Need to obtain rtnl lock if block is bound to devs that require it. 3591 * In block bind code cb_lock is obtained while holding rtnl, so we must 3592 * obtain the locks in same order here. 3593 */ 3594 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3595 up_read(&block->cb_lock); 3596 take_rtnl = true; 3597 goto retry; 3598 } 3599 3600 /* Make sure all netdevs sharing this block are offload-capable. */ 3601 if (block->nooffloaddevcnt && err_stop) { 3602 ok_count = -EOPNOTSUPP; 3603 goto err_unlock; 3604 } 3605 3606 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3607 if (tp->ops->hw_del) 3608 tp->ops->hw_del(tp, type_data); 3609 3610 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3611 if (ok_count < 0) 3612 goto err_unlock; 3613 3614 if (tp->ops->hw_add) 3615 tp->ops->hw_add(tp, type_data); 3616 if (ok_count > 0) 3617 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3618 new_flags, ok_count, true); 3619 err_unlock: 3620 up_read(&block->cb_lock); 3621 if (take_rtnl) 3622 rtnl_unlock(); 3623 return min(ok_count, 0); 3624 } 3625 EXPORT_SYMBOL(tc_setup_cb_replace); 3626 3627 /* Destroy filter and decrement block offload counter, if filter was previously 3628 * offloaded. 3629 */ 3630 3631 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3632 enum tc_setup_type type, void *type_data, bool err_stop, 3633 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3634 { 3635 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3636 int ok_count; 3637 3638 retry: 3639 if (take_rtnl) 3640 rtnl_lock(); 3641 down_read(&block->cb_lock); 3642 /* Need to obtain rtnl lock if block is bound to devs that require it. 3643 * In block bind code cb_lock is obtained while holding rtnl, so we must 3644 * obtain the locks in same order here. 3645 */ 3646 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3647 up_read(&block->cb_lock); 3648 take_rtnl = true; 3649 goto retry; 3650 } 3651 3652 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3653 3654 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3655 if (tp->ops->hw_del) 3656 tp->ops->hw_del(tp, type_data); 3657 3658 up_read(&block->cb_lock); 3659 if (take_rtnl) 3660 rtnl_unlock(); 3661 return min(ok_count, 0); 3662 } 3663 EXPORT_SYMBOL(tc_setup_cb_destroy); 3664 3665 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3666 bool add, flow_setup_cb_t *cb, 3667 enum tc_setup_type type, void *type_data, 3668 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3669 { 3670 int err = cb(type, type_data, cb_priv); 3671 3672 if (err) { 3673 if (add && tc_skip_sw(*flags)) 3674 return err; 3675 } else { 3676 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3677 add); 3678 } 3679 3680 return 0; 3681 } 3682 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3683 3684 static int tcf_act_get_user_cookie(struct flow_action_entry *entry, 3685 const struct tc_action *act) 3686 { 3687 struct tc_cookie *user_cookie; 3688 int err = 0; 3689 3690 rcu_read_lock(); 3691 user_cookie = rcu_dereference(act->user_cookie); 3692 if (user_cookie) { 3693 entry->user_cookie = flow_action_cookie_create(user_cookie->data, 3694 user_cookie->len, 3695 GFP_ATOMIC); 3696 if (!entry->user_cookie) 3697 err = -ENOMEM; 3698 } 3699 rcu_read_unlock(); 3700 return err; 3701 } 3702 3703 static void tcf_act_put_user_cookie(struct flow_action_entry *entry) 3704 { 3705 flow_action_cookie_destroy(entry->user_cookie); 3706 } 3707 3708 void tc_cleanup_offload_action(struct flow_action *flow_action) 3709 { 3710 struct flow_action_entry *entry; 3711 int i; 3712 3713 flow_action_for_each(i, entry, flow_action) { 3714 tcf_act_put_user_cookie(entry); 3715 if (entry->destructor) 3716 entry->destructor(entry->destructor_priv); 3717 } 3718 } 3719 EXPORT_SYMBOL(tc_cleanup_offload_action); 3720 3721 static int tc_setup_offload_act(struct tc_action *act, 3722 struct flow_action_entry *entry, 3723 u32 *index_inc, 3724 struct netlink_ext_ack *extack) 3725 { 3726 #ifdef CONFIG_NET_CLS_ACT 3727 if (act->ops->offload_act_setup) { 3728 return act->ops->offload_act_setup(act, entry, index_inc, true, 3729 extack); 3730 } else { 3731 NL_SET_ERR_MSG(extack, "Action does not support offload"); 3732 return -EOPNOTSUPP; 3733 } 3734 #else 3735 return 0; 3736 #endif 3737 } 3738 3739 int tc_setup_action(struct flow_action *flow_action, 3740 struct tc_action *actions[], 3741 u32 miss_cookie_base, 3742 struct netlink_ext_ack *extack) 3743 { 3744 int i, j, k, index, err = 0; 3745 struct tc_action *act; 3746 3747 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3748 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3749 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3750 3751 if (!actions) 3752 return 0; 3753 3754 j = 0; 3755 tcf_act_for_each_action(i, act, actions) { 3756 struct flow_action_entry *entry; 3757 3758 entry = &flow_action->entries[j]; 3759 spin_lock_bh(&act->tcfa_lock); 3760 err = tcf_act_get_user_cookie(entry, act); 3761 if (err) 3762 goto err_out_locked; 3763 3764 index = 0; 3765 err = tc_setup_offload_act(act, entry, &index, extack); 3766 if (err) 3767 goto err_out_locked; 3768 3769 for (k = 0; k < index ; k++) { 3770 entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); 3771 entry[k].hw_index = act->tcfa_index; 3772 entry[k].cookie = (unsigned long)act; 3773 entry[k].miss_cookie = 3774 tcf_exts_miss_cookie_get(miss_cookie_base, i); 3775 } 3776 3777 j += index; 3778 3779 spin_unlock_bh(&act->tcfa_lock); 3780 } 3781 3782 err_out: 3783 if (err) 3784 tc_cleanup_offload_action(flow_action); 3785 3786 return err; 3787 err_out_locked: 3788 spin_unlock_bh(&act->tcfa_lock); 3789 goto err_out; 3790 } 3791 3792 int tc_setup_offload_action(struct flow_action *flow_action, 3793 const struct tcf_exts *exts, 3794 struct netlink_ext_ack *extack) 3795 { 3796 #ifdef CONFIG_NET_CLS_ACT 3797 u32 miss_cookie_base; 3798 3799 if (!exts) 3800 return 0; 3801 3802 miss_cookie_base = exts->miss_cookie_node ? 3803 exts->miss_cookie_node->miss_cookie_base : 0; 3804 return tc_setup_action(flow_action, exts->actions, miss_cookie_base, 3805 extack); 3806 #else 3807 return 0; 3808 #endif 3809 } 3810 EXPORT_SYMBOL(tc_setup_offload_action); 3811 3812 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3813 { 3814 unsigned int num_acts = 0; 3815 struct tc_action *act; 3816 int i; 3817 3818 tcf_exts_for_each_action(i, act, exts) { 3819 if (is_tcf_pedit(act)) 3820 num_acts += tcf_pedit_nkeys(act); 3821 else 3822 num_acts++; 3823 } 3824 return num_acts; 3825 } 3826 EXPORT_SYMBOL(tcf_exts_num_actions); 3827 3828 #ifdef CONFIG_NET_CLS_ACT 3829 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, 3830 u32 *p_block_index, 3831 struct netlink_ext_ack *extack) 3832 { 3833 *p_block_index = nla_get_u32(block_index_attr); 3834 if (!*p_block_index) { 3835 NL_SET_ERR_MSG(extack, "Block number may not be zero"); 3836 return -EINVAL; 3837 } 3838 3839 return 0; 3840 } 3841 3842 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 3843 enum flow_block_binder_type binder_type, 3844 struct nlattr *block_index_attr, 3845 struct netlink_ext_ack *extack) 3846 { 3847 u32 block_index; 3848 int err; 3849 3850 if (!block_index_attr) 3851 return 0; 3852 3853 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3854 if (err) 3855 return err; 3856 3857 qe->info.binder_type = binder_type; 3858 qe->info.chain_head_change = tcf_chain_head_change_dflt; 3859 qe->info.chain_head_change_priv = &qe->filter_chain; 3860 qe->info.block_index = block_index; 3861 3862 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); 3863 } 3864 EXPORT_SYMBOL(tcf_qevent_init); 3865 3866 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 3867 { 3868 if (qe->info.block_index) 3869 tcf_block_put_ext(qe->block, sch, &qe->info); 3870 } 3871 EXPORT_SYMBOL(tcf_qevent_destroy); 3872 3873 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 3874 struct netlink_ext_ack *extack) 3875 { 3876 u32 block_index; 3877 int err; 3878 3879 if (!block_index_attr) 3880 return 0; 3881 3882 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3883 if (err) 3884 return err; 3885 3886 /* Bounce newly-configured block or change in block. */ 3887 if (block_index != qe->info.block_index) { 3888 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 3889 return -EINVAL; 3890 } 3891 3892 return 0; 3893 } 3894 EXPORT_SYMBOL(tcf_qevent_validate_change); 3895 3896 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 3897 struct sk_buff **to_free, int *ret) 3898 { 3899 struct tcf_result cl_res; 3900 struct tcf_proto *fl; 3901 3902 if (!qe->info.block_index) 3903 return skb; 3904 3905 fl = rcu_dereference_bh(qe->filter_chain); 3906 3907 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { 3908 case TC_ACT_SHOT: 3909 qdisc_qstats_drop(sch); 3910 __qdisc_drop(skb, to_free); 3911 *ret = __NET_XMIT_BYPASS; 3912 return NULL; 3913 case TC_ACT_STOLEN: 3914 case TC_ACT_QUEUED: 3915 case TC_ACT_TRAP: 3916 __qdisc_drop(skb, to_free); 3917 *ret = __NET_XMIT_STOLEN; 3918 return NULL; 3919 case TC_ACT_REDIRECT: 3920 skb_do_redirect(skb); 3921 *ret = __NET_XMIT_STOLEN; 3922 return NULL; 3923 } 3924 3925 return skb; 3926 } 3927 EXPORT_SYMBOL(tcf_qevent_handle); 3928 3929 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 3930 { 3931 if (!qe->info.block_index) 3932 return 0; 3933 return nla_put_u32(skb, attr_name, qe->info.block_index); 3934 } 3935 EXPORT_SYMBOL(tcf_qevent_dump); 3936 #endif 3937 3938 static __net_init int tcf_net_init(struct net *net) 3939 { 3940 struct tcf_net *tn = net_generic(net, tcf_net_id); 3941 3942 spin_lock_init(&tn->idr_lock); 3943 idr_init(&tn->idr); 3944 return 0; 3945 } 3946 3947 static void __net_exit tcf_net_exit(struct net *net) 3948 { 3949 struct tcf_net *tn = net_generic(net, tcf_net_id); 3950 3951 idr_destroy(&tn->idr); 3952 } 3953 3954 static struct pernet_operations tcf_net_ops = { 3955 .init = tcf_net_init, 3956 .exit = tcf_net_exit, 3957 .id = &tcf_net_id, 3958 .size = sizeof(struct tcf_net), 3959 }; 3960 3961 static int __init tc_filter_init(void) 3962 { 3963 int err; 3964 3965 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3966 if (!tc_filter_wq) 3967 return -ENOMEM; 3968 3969 err = register_pernet_subsys(&tcf_net_ops); 3970 if (err) 3971 goto err_register_pernet_subsys; 3972 3973 xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1); 3974 3975 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3976 RTNL_FLAG_DOIT_UNLOCKED); 3977 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3978 RTNL_FLAG_DOIT_UNLOCKED); 3979 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3980 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3981 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3982 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3983 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3984 tc_dump_chain, 0); 3985 3986 return 0; 3987 3988 err_register_pernet_subsys: 3989 destroy_workqueue(tc_filter_wq); 3990 return err; 3991 } 3992 3993 subsys_initcall(tc_filter_init); 3994