1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/rhashtable.h> 24 #include <linux/jhash.h> 25 #include <linux/rculist.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/tc_act/tc_gate.h> 43 #include <net/flow_offload.h> 44 45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 46 47 /* The list of all installed classifier types */ 48 static LIST_HEAD(tcf_proto_base); 49 50 /* Protects list of registered TC modules. It is pure SMP lock. */ 51 static DEFINE_RWLOCK(cls_mod_lock); 52 53 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 54 { 55 return jhash_3words(tp->chain->index, tp->prio, 56 (__force __u32)tp->protocol, 0); 57 } 58 59 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 60 struct tcf_proto *tp) 61 { 62 struct tcf_block *block = chain->block; 63 64 mutex_lock(&block->proto_destroy_lock); 65 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 66 destroy_obj_hashfn(tp)); 67 mutex_unlock(&block->proto_destroy_lock); 68 } 69 70 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 71 const struct tcf_proto *tp2) 72 { 73 return tp1->chain->index == tp2->chain->index && 74 tp1->prio == tp2->prio && 75 tp1->protocol == tp2->protocol; 76 } 77 78 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 79 struct tcf_proto *tp) 80 { 81 u32 hash = destroy_obj_hashfn(tp); 82 struct tcf_proto *iter; 83 bool found = false; 84 85 rcu_read_lock(); 86 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 87 destroy_ht_node, hash) { 88 if (tcf_proto_cmp(tp, iter)) { 89 found = true; 90 break; 91 } 92 } 93 rcu_read_unlock(); 94 95 return found; 96 } 97 98 static void 99 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 100 { 101 struct tcf_block *block = chain->block; 102 103 mutex_lock(&block->proto_destroy_lock); 104 if (hash_hashed(&tp->destroy_ht_node)) 105 hash_del_rcu(&tp->destroy_ht_node); 106 mutex_unlock(&block->proto_destroy_lock); 107 } 108 109 /* Find classifier type by string name */ 110 111 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 112 { 113 const struct tcf_proto_ops *t, *res = NULL; 114 115 if (kind) { 116 read_lock(&cls_mod_lock); 117 list_for_each_entry(t, &tcf_proto_base, head) { 118 if (strcmp(kind, t->kind) == 0) { 119 if (try_module_get(t->owner)) 120 res = t; 121 break; 122 } 123 } 124 read_unlock(&cls_mod_lock); 125 } 126 return res; 127 } 128 129 static const struct tcf_proto_ops * 130 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 131 struct netlink_ext_ack *extack) 132 { 133 const struct tcf_proto_ops *ops; 134 135 ops = __tcf_proto_lookup_ops(kind); 136 if (ops) 137 return ops; 138 #ifdef CONFIG_MODULES 139 if (rtnl_held) 140 rtnl_unlock(); 141 request_module("cls_%s", kind); 142 if (rtnl_held) 143 rtnl_lock(); 144 ops = __tcf_proto_lookup_ops(kind); 145 /* We dropped the RTNL semaphore in order to perform 146 * the module load. So, even if we succeeded in loading 147 * the module we have to replay the request. We indicate 148 * this using -EAGAIN. 149 */ 150 if (ops) { 151 module_put(ops->owner); 152 return ERR_PTR(-EAGAIN); 153 } 154 #endif 155 NL_SET_ERR_MSG(extack, "TC classifier not found"); 156 return ERR_PTR(-ENOENT); 157 } 158 159 /* Register(unregister) new classifier type */ 160 161 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 162 { 163 struct tcf_proto_ops *t; 164 int rc = -EEXIST; 165 166 write_lock(&cls_mod_lock); 167 list_for_each_entry(t, &tcf_proto_base, head) 168 if (!strcmp(ops->kind, t->kind)) 169 goto out; 170 171 list_add_tail(&ops->head, &tcf_proto_base); 172 rc = 0; 173 out: 174 write_unlock(&cls_mod_lock); 175 return rc; 176 } 177 EXPORT_SYMBOL(register_tcf_proto_ops); 178 179 static struct workqueue_struct *tc_filter_wq; 180 181 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 182 { 183 struct tcf_proto_ops *t; 184 int rc = -ENOENT; 185 186 /* Wait for outstanding call_rcu()s, if any, from a 187 * tcf_proto_ops's destroy() handler. 188 */ 189 rcu_barrier(); 190 flush_workqueue(tc_filter_wq); 191 192 write_lock(&cls_mod_lock); 193 list_for_each_entry(t, &tcf_proto_base, head) { 194 if (t == ops) { 195 list_del(&t->head); 196 rc = 0; 197 break; 198 } 199 } 200 write_unlock(&cls_mod_lock); 201 return rc; 202 } 203 EXPORT_SYMBOL(unregister_tcf_proto_ops); 204 205 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 206 { 207 INIT_RCU_WORK(rwork, func); 208 return queue_rcu_work(tc_filter_wq, rwork); 209 } 210 EXPORT_SYMBOL(tcf_queue_work); 211 212 /* Select new prio value from the range, managed by kernel. */ 213 214 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 215 { 216 u32 first = TC_H_MAKE(0xC0000000U, 0U); 217 218 if (tp) 219 first = tp->prio - 1; 220 221 return TC_H_MAJ(first); 222 } 223 224 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 225 { 226 if (kind) 227 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; 228 memset(name, 0, IFNAMSIZ); 229 return false; 230 } 231 232 static bool tcf_proto_is_unlocked(const char *kind) 233 { 234 const struct tcf_proto_ops *ops; 235 bool ret; 236 237 if (strlen(kind) == 0) 238 return false; 239 240 ops = tcf_proto_lookup_ops(kind, false, NULL); 241 /* On error return false to take rtnl lock. Proto lookup/create 242 * functions will perform lookup again and properly handle errors. 243 */ 244 if (IS_ERR(ops)) 245 return false; 246 247 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 248 module_put(ops->owner); 249 return ret; 250 } 251 252 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 253 u32 prio, struct tcf_chain *chain, 254 bool rtnl_held, 255 struct netlink_ext_ack *extack) 256 { 257 struct tcf_proto *tp; 258 int err; 259 260 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 261 if (!tp) 262 return ERR_PTR(-ENOBUFS); 263 264 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 265 if (IS_ERR(tp->ops)) { 266 err = PTR_ERR(tp->ops); 267 goto errout; 268 } 269 tp->classify = tp->ops->classify; 270 tp->protocol = protocol; 271 tp->prio = prio; 272 tp->chain = chain; 273 spin_lock_init(&tp->lock); 274 refcount_set(&tp->refcnt, 1); 275 276 err = tp->ops->init(tp); 277 if (err) { 278 module_put(tp->ops->owner); 279 goto errout; 280 } 281 return tp; 282 283 errout: 284 kfree(tp); 285 return ERR_PTR(err); 286 } 287 288 static void tcf_proto_get(struct tcf_proto *tp) 289 { 290 refcount_inc(&tp->refcnt); 291 } 292 293 static void tcf_chain_put(struct tcf_chain *chain); 294 295 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 296 bool sig_destroy, struct netlink_ext_ack *extack) 297 { 298 tp->ops->destroy(tp, rtnl_held, extack); 299 if (sig_destroy) 300 tcf_proto_signal_destroyed(tp->chain, tp); 301 tcf_chain_put(tp->chain); 302 module_put(tp->ops->owner); 303 kfree_rcu(tp, rcu); 304 } 305 306 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 307 struct netlink_ext_ack *extack) 308 { 309 if (refcount_dec_and_test(&tp->refcnt)) 310 tcf_proto_destroy(tp, rtnl_held, true, extack); 311 } 312 313 static bool tcf_proto_check_delete(struct tcf_proto *tp) 314 { 315 if (tp->ops->delete_empty) 316 return tp->ops->delete_empty(tp); 317 318 tp->deleting = true; 319 return tp->deleting; 320 } 321 322 static void tcf_proto_mark_delete(struct tcf_proto *tp) 323 { 324 spin_lock(&tp->lock); 325 tp->deleting = true; 326 spin_unlock(&tp->lock); 327 } 328 329 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 330 { 331 bool deleting; 332 333 spin_lock(&tp->lock); 334 deleting = tp->deleting; 335 spin_unlock(&tp->lock); 336 337 return deleting; 338 } 339 340 #define ASSERT_BLOCK_LOCKED(block) \ 341 lockdep_assert_held(&(block)->lock) 342 343 struct tcf_filter_chain_list_item { 344 struct list_head list; 345 tcf_chain_head_change_t *chain_head_change; 346 void *chain_head_change_priv; 347 }; 348 349 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 350 u32 chain_index) 351 { 352 struct tcf_chain *chain; 353 354 ASSERT_BLOCK_LOCKED(block); 355 356 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 357 if (!chain) 358 return NULL; 359 list_add_tail_rcu(&chain->list, &block->chain_list); 360 mutex_init(&chain->filter_chain_lock); 361 chain->block = block; 362 chain->index = chain_index; 363 chain->refcnt = 1; 364 if (!chain->index) 365 block->chain0.chain = chain; 366 return chain; 367 } 368 369 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 370 struct tcf_proto *tp_head) 371 { 372 if (item->chain_head_change) 373 item->chain_head_change(tp_head, item->chain_head_change_priv); 374 } 375 376 static void tcf_chain0_head_change(struct tcf_chain *chain, 377 struct tcf_proto *tp_head) 378 { 379 struct tcf_filter_chain_list_item *item; 380 struct tcf_block *block = chain->block; 381 382 if (chain->index) 383 return; 384 385 mutex_lock(&block->lock); 386 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 387 tcf_chain_head_change_item(item, tp_head); 388 mutex_unlock(&block->lock); 389 } 390 391 /* Returns true if block can be safely freed. */ 392 393 static bool tcf_chain_detach(struct tcf_chain *chain) 394 { 395 struct tcf_block *block = chain->block; 396 397 ASSERT_BLOCK_LOCKED(block); 398 399 list_del_rcu(&chain->list); 400 if (!chain->index) 401 block->chain0.chain = NULL; 402 403 if (list_empty(&block->chain_list) && 404 refcount_read(&block->refcnt) == 0) 405 return true; 406 407 return false; 408 } 409 410 static void tcf_block_destroy(struct tcf_block *block) 411 { 412 mutex_destroy(&block->lock); 413 mutex_destroy(&block->proto_destroy_lock); 414 kfree_rcu(block, rcu); 415 } 416 417 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 418 { 419 struct tcf_block *block = chain->block; 420 421 mutex_destroy(&chain->filter_chain_lock); 422 kfree_rcu(chain, rcu); 423 if (free_block) 424 tcf_block_destroy(block); 425 } 426 427 static void tcf_chain_hold(struct tcf_chain *chain) 428 { 429 ASSERT_BLOCK_LOCKED(chain->block); 430 431 ++chain->refcnt; 432 } 433 434 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 435 { 436 ASSERT_BLOCK_LOCKED(chain->block); 437 438 /* In case all the references are action references, this 439 * chain should not be shown to the user. 440 */ 441 return chain->refcnt == chain->action_refcnt; 442 } 443 444 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 445 u32 chain_index) 446 { 447 struct tcf_chain *chain; 448 449 ASSERT_BLOCK_LOCKED(block); 450 451 list_for_each_entry(chain, &block->chain_list, list) { 452 if (chain->index == chain_index) 453 return chain; 454 } 455 return NULL; 456 } 457 458 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 459 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 460 u32 chain_index) 461 { 462 struct tcf_chain *chain; 463 464 list_for_each_entry_rcu(chain, &block->chain_list, list) { 465 if (chain->index == chain_index) 466 return chain; 467 } 468 return NULL; 469 } 470 #endif 471 472 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 473 u32 seq, u16 flags, int event, bool unicast); 474 475 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 476 u32 chain_index, bool create, 477 bool by_act) 478 { 479 struct tcf_chain *chain = NULL; 480 bool is_first_reference; 481 482 mutex_lock(&block->lock); 483 chain = tcf_chain_lookup(block, chain_index); 484 if (chain) { 485 tcf_chain_hold(chain); 486 } else { 487 if (!create) 488 goto errout; 489 chain = tcf_chain_create(block, chain_index); 490 if (!chain) 491 goto errout; 492 } 493 494 if (by_act) 495 ++chain->action_refcnt; 496 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 497 mutex_unlock(&block->lock); 498 499 /* Send notification only in case we got the first 500 * non-action reference. Until then, the chain acts only as 501 * a placeholder for actions pointing to it and user ought 502 * not know about them. 503 */ 504 if (is_first_reference && !by_act) 505 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 506 RTM_NEWCHAIN, false); 507 508 return chain; 509 510 errout: 511 mutex_unlock(&block->lock); 512 return chain; 513 } 514 515 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 516 bool create) 517 { 518 return __tcf_chain_get(block, chain_index, create, false); 519 } 520 521 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 522 { 523 return __tcf_chain_get(block, chain_index, true, true); 524 } 525 EXPORT_SYMBOL(tcf_chain_get_by_act); 526 527 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 528 void *tmplt_priv); 529 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 530 void *tmplt_priv, u32 chain_index, 531 struct tcf_block *block, struct sk_buff *oskb, 532 u32 seq, u16 flags, bool unicast); 533 534 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 535 bool explicitly_created) 536 { 537 struct tcf_block *block = chain->block; 538 const struct tcf_proto_ops *tmplt_ops; 539 bool free_block = false; 540 unsigned int refcnt; 541 void *tmplt_priv; 542 543 mutex_lock(&block->lock); 544 if (explicitly_created) { 545 if (!chain->explicitly_created) { 546 mutex_unlock(&block->lock); 547 return; 548 } 549 chain->explicitly_created = false; 550 } 551 552 if (by_act) 553 chain->action_refcnt--; 554 555 /* tc_chain_notify_delete can't be called while holding block lock. 556 * However, when block is unlocked chain can be changed concurrently, so 557 * save these to temporary variables. 558 */ 559 refcnt = --chain->refcnt; 560 tmplt_ops = chain->tmplt_ops; 561 tmplt_priv = chain->tmplt_priv; 562 563 /* The last dropped non-action reference will trigger notification. */ 564 if (refcnt - chain->action_refcnt == 0 && !by_act) { 565 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 566 block, NULL, 0, 0, false); 567 /* Last reference to chain, no need to lock. */ 568 chain->flushing = false; 569 } 570 571 if (refcnt == 0) 572 free_block = tcf_chain_detach(chain); 573 mutex_unlock(&block->lock); 574 575 if (refcnt == 0) { 576 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 577 tcf_chain_destroy(chain, free_block); 578 } 579 } 580 581 static void tcf_chain_put(struct tcf_chain *chain) 582 { 583 __tcf_chain_put(chain, false, false); 584 } 585 586 void tcf_chain_put_by_act(struct tcf_chain *chain) 587 { 588 __tcf_chain_put(chain, true, false); 589 } 590 EXPORT_SYMBOL(tcf_chain_put_by_act); 591 592 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 593 { 594 __tcf_chain_put(chain, false, true); 595 } 596 597 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 598 { 599 struct tcf_proto *tp, *tp_next; 600 601 mutex_lock(&chain->filter_chain_lock); 602 tp = tcf_chain_dereference(chain->filter_chain, chain); 603 while (tp) { 604 tp_next = rcu_dereference_protected(tp->next, 1); 605 tcf_proto_signal_destroying(chain, tp); 606 tp = tp_next; 607 } 608 tp = tcf_chain_dereference(chain->filter_chain, chain); 609 RCU_INIT_POINTER(chain->filter_chain, NULL); 610 tcf_chain0_head_change(chain, NULL); 611 chain->flushing = true; 612 mutex_unlock(&chain->filter_chain_lock); 613 614 while (tp) { 615 tp_next = rcu_dereference_protected(tp->next, 1); 616 tcf_proto_put(tp, rtnl_held, NULL); 617 tp = tp_next; 618 } 619 } 620 621 static int tcf_block_setup(struct tcf_block *block, 622 struct flow_block_offload *bo); 623 624 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, 625 flow_indr_block_bind_cb_t *cb, void *cb_priv, 626 enum flow_block_command command, bool ingress) 627 { 628 struct flow_block_offload bo = { 629 .command = command, 630 .binder_type = ingress ? 631 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : 632 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 633 .net = dev_net(dev), 634 .block_shared = tcf_block_non_null_shared(block), 635 }; 636 INIT_LIST_HEAD(&bo.cb_list); 637 638 if (!block) 639 return; 640 641 bo.block = &block->flow_block; 642 643 down_write(&block->cb_lock); 644 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); 645 646 tcf_block_setup(block, &bo); 647 up_write(&block->cb_lock); 648 } 649 650 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) 651 { 652 const struct Qdisc_class_ops *cops; 653 const struct Qdisc_ops *ops; 654 struct Qdisc *qdisc; 655 656 if (!dev_ingress_queue(dev)) 657 return NULL; 658 659 qdisc = dev_ingress_queue(dev)->qdisc_sleeping; 660 if (!qdisc) 661 return NULL; 662 663 ops = qdisc->ops; 664 if (!ops) 665 return NULL; 666 667 if (!ingress && !strcmp("ingress", ops->id)) 668 return NULL; 669 670 cops = ops->cl_ops; 671 if (!cops) 672 return NULL; 673 674 if (!cops->tcf_block) 675 return NULL; 676 677 return cops->tcf_block(qdisc, 678 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, 679 NULL); 680 } 681 682 static void tc_indr_block_get_and_cmd(struct net_device *dev, 683 flow_indr_block_bind_cb_t *cb, 684 void *cb_priv, 685 enum flow_block_command command) 686 { 687 struct tcf_block *block; 688 689 block = tc_dev_block(dev, true); 690 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); 691 692 block = tc_dev_block(dev, false); 693 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); 694 } 695 696 static void tc_indr_block_call(struct tcf_block *block, 697 struct net_device *dev, 698 struct tcf_block_ext_info *ei, 699 enum flow_block_command command, 700 struct netlink_ext_ack *extack) 701 { 702 struct flow_block_offload bo = { 703 .command = command, 704 .binder_type = ei->binder_type, 705 .net = dev_net(dev), 706 .block = &block->flow_block, 707 .block_shared = tcf_block_shared(block), 708 .extack = extack, 709 }; 710 INIT_LIST_HEAD(&bo.cb_list); 711 712 flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK); 713 tcf_block_setup(block, &bo); 714 } 715 716 static bool tcf_block_offload_in_use(struct tcf_block *block) 717 { 718 return atomic_read(&block->offloadcnt); 719 } 720 721 static int tcf_block_offload_cmd(struct tcf_block *block, 722 struct net_device *dev, 723 struct tcf_block_ext_info *ei, 724 enum flow_block_command command, 725 struct netlink_ext_ack *extack) 726 { 727 struct flow_block_offload bo = {}; 728 int err; 729 730 bo.net = dev_net(dev); 731 bo.command = command; 732 bo.binder_type = ei->binder_type; 733 bo.block = &block->flow_block; 734 bo.block_shared = tcf_block_shared(block); 735 bo.extack = extack; 736 INIT_LIST_HEAD(&bo.cb_list); 737 738 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 739 if (err < 0) { 740 if (err != -EOPNOTSUPP) 741 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 742 return err; 743 } 744 745 return tcf_block_setup(block, &bo); 746 } 747 748 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 749 struct tcf_block_ext_info *ei, 750 struct netlink_ext_ack *extack) 751 { 752 struct net_device *dev = q->dev_queue->dev; 753 int err; 754 755 down_write(&block->cb_lock); 756 if (!dev->netdev_ops->ndo_setup_tc) 757 goto no_offload_dev_inc; 758 759 /* If tc offload feature is disabled and the block we try to bind 760 * to already has some offloaded filters, forbid to bind. 761 */ 762 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { 763 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 764 err = -EOPNOTSUPP; 765 goto err_unlock; 766 } 767 768 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 769 if (err == -EOPNOTSUPP) 770 goto no_offload_dev_inc; 771 if (err) 772 goto err_unlock; 773 774 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 775 up_write(&block->cb_lock); 776 return 0; 777 778 no_offload_dev_inc: 779 if (tcf_block_offload_in_use(block)) { 780 err = -EOPNOTSUPP; 781 goto err_unlock; 782 } 783 err = 0; 784 block->nooffloaddevcnt++; 785 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 786 err_unlock: 787 up_write(&block->cb_lock); 788 return err; 789 } 790 791 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 792 struct tcf_block_ext_info *ei) 793 { 794 struct net_device *dev = q->dev_queue->dev; 795 int err; 796 797 down_write(&block->cb_lock); 798 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 799 800 if (!dev->netdev_ops->ndo_setup_tc) 801 goto no_offload_dev_dec; 802 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 803 if (err == -EOPNOTSUPP) 804 goto no_offload_dev_dec; 805 up_write(&block->cb_lock); 806 return; 807 808 no_offload_dev_dec: 809 WARN_ON(block->nooffloaddevcnt-- == 0); 810 up_write(&block->cb_lock); 811 } 812 813 static int 814 tcf_chain0_head_change_cb_add(struct tcf_block *block, 815 struct tcf_block_ext_info *ei, 816 struct netlink_ext_ack *extack) 817 { 818 struct tcf_filter_chain_list_item *item; 819 struct tcf_chain *chain0; 820 821 item = kmalloc(sizeof(*item), GFP_KERNEL); 822 if (!item) { 823 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 824 return -ENOMEM; 825 } 826 item->chain_head_change = ei->chain_head_change; 827 item->chain_head_change_priv = ei->chain_head_change_priv; 828 829 mutex_lock(&block->lock); 830 chain0 = block->chain0.chain; 831 if (chain0) 832 tcf_chain_hold(chain0); 833 else 834 list_add(&item->list, &block->chain0.filter_chain_list); 835 mutex_unlock(&block->lock); 836 837 if (chain0) { 838 struct tcf_proto *tp_head; 839 840 mutex_lock(&chain0->filter_chain_lock); 841 842 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 843 if (tp_head) 844 tcf_chain_head_change_item(item, tp_head); 845 846 mutex_lock(&block->lock); 847 list_add(&item->list, &block->chain0.filter_chain_list); 848 mutex_unlock(&block->lock); 849 850 mutex_unlock(&chain0->filter_chain_lock); 851 tcf_chain_put(chain0); 852 } 853 854 return 0; 855 } 856 857 static void 858 tcf_chain0_head_change_cb_del(struct tcf_block *block, 859 struct tcf_block_ext_info *ei) 860 { 861 struct tcf_filter_chain_list_item *item; 862 863 mutex_lock(&block->lock); 864 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 865 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 866 (item->chain_head_change == ei->chain_head_change && 867 item->chain_head_change_priv == ei->chain_head_change_priv)) { 868 if (block->chain0.chain) 869 tcf_chain_head_change_item(item, NULL); 870 list_del(&item->list); 871 mutex_unlock(&block->lock); 872 873 kfree(item); 874 return; 875 } 876 } 877 mutex_unlock(&block->lock); 878 WARN_ON(1); 879 } 880 881 struct tcf_net { 882 spinlock_t idr_lock; /* Protects idr */ 883 struct idr idr; 884 }; 885 886 static unsigned int tcf_net_id; 887 888 static int tcf_block_insert(struct tcf_block *block, struct net *net, 889 struct netlink_ext_ack *extack) 890 { 891 struct tcf_net *tn = net_generic(net, tcf_net_id); 892 int err; 893 894 idr_preload(GFP_KERNEL); 895 spin_lock(&tn->idr_lock); 896 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 897 GFP_NOWAIT); 898 spin_unlock(&tn->idr_lock); 899 idr_preload_end(); 900 901 return err; 902 } 903 904 static void tcf_block_remove(struct tcf_block *block, struct net *net) 905 { 906 struct tcf_net *tn = net_generic(net, tcf_net_id); 907 908 spin_lock(&tn->idr_lock); 909 idr_remove(&tn->idr, block->index); 910 spin_unlock(&tn->idr_lock); 911 } 912 913 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 914 u32 block_index, 915 struct netlink_ext_ack *extack) 916 { 917 struct tcf_block *block; 918 919 block = kzalloc(sizeof(*block), GFP_KERNEL); 920 if (!block) { 921 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 922 return ERR_PTR(-ENOMEM); 923 } 924 mutex_init(&block->lock); 925 mutex_init(&block->proto_destroy_lock); 926 init_rwsem(&block->cb_lock); 927 flow_block_init(&block->flow_block); 928 INIT_LIST_HEAD(&block->chain_list); 929 INIT_LIST_HEAD(&block->owner_list); 930 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 931 932 refcount_set(&block->refcnt, 1); 933 block->net = net; 934 block->index = block_index; 935 936 /* Don't store q pointer for blocks which are shared */ 937 if (!tcf_block_shared(block)) 938 block->q = q; 939 return block; 940 } 941 942 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 943 { 944 struct tcf_net *tn = net_generic(net, tcf_net_id); 945 946 return idr_find(&tn->idr, block_index); 947 } 948 949 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 950 { 951 struct tcf_block *block; 952 953 rcu_read_lock(); 954 block = tcf_block_lookup(net, block_index); 955 if (block && !refcount_inc_not_zero(&block->refcnt)) 956 block = NULL; 957 rcu_read_unlock(); 958 959 return block; 960 } 961 962 static struct tcf_chain * 963 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 964 { 965 mutex_lock(&block->lock); 966 if (chain) 967 chain = list_is_last(&chain->list, &block->chain_list) ? 968 NULL : list_next_entry(chain, list); 969 else 970 chain = list_first_entry_or_null(&block->chain_list, 971 struct tcf_chain, list); 972 973 /* skip all action-only chains */ 974 while (chain && tcf_chain_held_by_acts_only(chain)) 975 chain = list_is_last(&chain->list, &block->chain_list) ? 976 NULL : list_next_entry(chain, list); 977 978 if (chain) 979 tcf_chain_hold(chain); 980 mutex_unlock(&block->lock); 981 982 return chain; 983 } 984 985 /* Function to be used by all clients that want to iterate over all chains on 986 * block. It properly obtains block->lock and takes reference to chain before 987 * returning it. Users of this function must be tolerant to concurrent chain 988 * insertion/deletion or ensure that no concurrent chain modification is 989 * possible. Note that all netlink dump callbacks cannot guarantee to provide 990 * consistent dump because rtnl lock is released each time skb is filled with 991 * data and sent to user-space. 992 */ 993 994 struct tcf_chain * 995 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 996 { 997 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 998 999 if (chain) 1000 tcf_chain_put(chain); 1001 1002 return chain_next; 1003 } 1004 EXPORT_SYMBOL(tcf_get_next_chain); 1005 1006 static struct tcf_proto * 1007 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1008 { 1009 u32 prio = 0; 1010 1011 ASSERT_RTNL(); 1012 mutex_lock(&chain->filter_chain_lock); 1013 1014 if (!tp) { 1015 tp = tcf_chain_dereference(chain->filter_chain, chain); 1016 } else if (tcf_proto_is_deleting(tp)) { 1017 /* 'deleting' flag is set and chain->filter_chain_lock was 1018 * unlocked, which means next pointer could be invalid. Restart 1019 * search. 1020 */ 1021 prio = tp->prio + 1; 1022 tp = tcf_chain_dereference(chain->filter_chain, chain); 1023 1024 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1025 if (!tp->deleting && tp->prio >= prio) 1026 break; 1027 } else { 1028 tp = tcf_chain_dereference(tp->next, chain); 1029 } 1030 1031 if (tp) 1032 tcf_proto_get(tp); 1033 1034 mutex_unlock(&chain->filter_chain_lock); 1035 1036 return tp; 1037 } 1038 1039 /* Function to be used by all clients that want to iterate over all tp's on 1040 * chain. Users of this function must be tolerant to concurrent tp 1041 * insertion/deletion or ensure that no concurrent chain modification is 1042 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1043 * consistent dump because rtnl lock is released each time skb is filled with 1044 * data and sent to user-space. 1045 */ 1046 1047 struct tcf_proto * 1048 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, 1049 bool rtnl_held) 1050 { 1051 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1052 1053 if (tp) 1054 tcf_proto_put(tp, rtnl_held, NULL); 1055 1056 return tp_next; 1057 } 1058 EXPORT_SYMBOL(tcf_get_next_proto); 1059 1060 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1061 { 1062 struct tcf_chain *chain; 1063 1064 /* Last reference to block. At this point chains cannot be added or 1065 * removed concurrently. 1066 */ 1067 for (chain = tcf_get_next_chain(block, NULL); 1068 chain; 1069 chain = tcf_get_next_chain(block, chain)) { 1070 tcf_chain_put_explicitly_created(chain); 1071 tcf_chain_flush(chain, rtnl_held); 1072 } 1073 } 1074 1075 /* Lookup Qdisc and increments its reference counter. 1076 * Set parent, if necessary. 1077 */ 1078 1079 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1080 u32 *parent, int ifindex, bool rtnl_held, 1081 struct netlink_ext_ack *extack) 1082 { 1083 const struct Qdisc_class_ops *cops; 1084 struct net_device *dev; 1085 int err = 0; 1086 1087 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1088 return 0; 1089 1090 rcu_read_lock(); 1091 1092 /* Find link */ 1093 dev = dev_get_by_index_rcu(net, ifindex); 1094 if (!dev) { 1095 rcu_read_unlock(); 1096 return -ENODEV; 1097 } 1098 1099 /* Find qdisc */ 1100 if (!*parent) { 1101 *q = dev->qdisc; 1102 *parent = (*q)->handle; 1103 } else { 1104 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1105 if (!*q) { 1106 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1107 err = -EINVAL; 1108 goto errout_rcu; 1109 } 1110 } 1111 1112 *q = qdisc_refcount_inc_nz(*q); 1113 if (!*q) { 1114 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1115 err = -EINVAL; 1116 goto errout_rcu; 1117 } 1118 1119 /* Is it classful? */ 1120 cops = (*q)->ops->cl_ops; 1121 if (!cops) { 1122 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1123 err = -EINVAL; 1124 goto errout_qdisc; 1125 } 1126 1127 if (!cops->tcf_block) { 1128 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1129 err = -EOPNOTSUPP; 1130 goto errout_qdisc; 1131 } 1132 1133 errout_rcu: 1134 /* At this point we know that qdisc is not noop_qdisc, 1135 * which means that qdisc holds a reference to net_device 1136 * and we hold a reference to qdisc, so it is safe to release 1137 * rcu read lock. 1138 */ 1139 rcu_read_unlock(); 1140 return err; 1141 1142 errout_qdisc: 1143 rcu_read_unlock(); 1144 1145 if (rtnl_held) 1146 qdisc_put(*q); 1147 else 1148 qdisc_put_unlocked(*q); 1149 *q = NULL; 1150 1151 return err; 1152 } 1153 1154 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1155 int ifindex, struct netlink_ext_ack *extack) 1156 { 1157 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1158 return 0; 1159 1160 /* Do we search for filter, attached to class? */ 1161 if (TC_H_MIN(parent)) { 1162 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1163 1164 *cl = cops->find(q, parent); 1165 if (*cl == 0) { 1166 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1167 return -ENOENT; 1168 } 1169 } 1170 1171 return 0; 1172 } 1173 1174 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1175 unsigned long cl, int ifindex, 1176 u32 block_index, 1177 struct netlink_ext_ack *extack) 1178 { 1179 struct tcf_block *block; 1180 1181 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1182 block = tcf_block_refcnt_get(net, block_index); 1183 if (!block) { 1184 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1185 return ERR_PTR(-EINVAL); 1186 } 1187 } else { 1188 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1189 1190 block = cops->tcf_block(q, cl, extack); 1191 if (!block) 1192 return ERR_PTR(-EINVAL); 1193 1194 if (tcf_block_shared(block)) { 1195 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1196 return ERR_PTR(-EOPNOTSUPP); 1197 } 1198 1199 /* Always take reference to block in order to support execution 1200 * of rules update path of cls API without rtnl lock. Caller 1201 * must release block when it is finished using it. 'if' block 1202 * of this conditional obtain reference to block by calling 1203 * tcf_block_refcnt_get(). 1204 */ 1205 refcount_inc(&block->refcnt); 1206 } 1207 1208 return block; 1209 } 1210 1211 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1212 struct tcf_block_ext_info *ei, bool rtnl_held) 1213 { 1214 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1215 /* Flushing/putting all chains will cause the block to be 1216 * deallocated when last chain is freed. However, if chain_list 1217 * is empty, block has to be manually deallocated. After block 1218 * reference counter reached 0, it is no longer possible to 1219 * increment it or add new chains to block. 1220 */ 1221 bool free_block = list_empty(&block->chain_list); 1222 1223 mutex_unlock(&block->lock); 1224 if (tcf_block_shared(block)) 1225 tcf_block_remove(block, block->net); 1226 1227 if (q) 1228 tcf_block_offload_unbind(block, q, ei); 1229 1230 if (free_block) 1231 tcf_block_destroy(block); 1232 else 1233 tcf_block_flush_all_chains(block, rtnl_held); 1234 } else if (q) { 1235 tcf_block_offload_unbind(block, q, ei); 1236 } 1237 } 1238 1239 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1240 { 1241 __tcf_block_put(block, NULL, NULL, rtnl_held); 1242 } 1243 1244 /* Find tcf block. 1245 * Set q, parent, cl when appropriate. 1246 */ 1247 1248 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1249 u32 *parent, unsigned long *cl, 1250 int ifindex, u32 block_index, 1251 struct netlink_ext_ack *extack) 1252 { 1253 struct tcf_block *block; 1254 int err = 0; 1255 1256 ASSERT_RTNL(); 1257 1258 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1259 if (err) 1260 goto errout; 1261 1262 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1263 if (err) 1264 goto errout_qdisc; 1265 1266 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1267 if (IS_ERR(block)) { 1268 err = PTR_ERR(block); 1269 goto errout_qdisc; 1270 } 1271 1272 return block; 1273 1274 errout_qdisc: 1275 if (*q) 1276 qdisc_put(*q); 1277 errout: 1278 *q = NULL; 1279 return ERR_PTR(err); 1280 } 1281 1282 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1283 bool rtnl_held) 1284 { 1285 if (!IS_ERR_OR_NULL(block)) 1286 tcf_block_refcnt_put(block, rtnl_held); 1287 1288 if (q) { 1289 if (rtnl_held) 1290 qdisc_put(q); 1291 else 1292 qdisc_put_unlocked(q); 1293 } 1294 } 1295 1296 struct tcf_block_owner_item { 1297 struct list_head list; 1298 struct Qdisc *q; 1299 enum flow_block_binder_type binder_type; 1300 }; 1301 1302 static void 1303 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1304 struct Qdisc *q, 1305 enum flow_block_binder_type binder_type) 1306 { 1307 if (block->keep_dst && 1308 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1309 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1310 netif_keep_dst(qdisc_dev(q)); 1311 } 1312 1313 void tcf_block_netif_keep_dst(struct tcf_block *block) 1314 { 1315 struct tcf_block_owner_item *item; 1316 1317 block->keep_dst = true; 1318 list_for_each_entry(item, &block->owner_list, list) 1319 tcf_block_owner_netif_keep_dst(block, item->q, 1320 item->binder_type); 1321 } 1322 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1323 1324 static int tcf_block_owner_add(struct tcf_block *block, 1325 struct Qdisc *q, 1326 enum flow_block_binder_type binder_type) 1327 { 1328 struct tcf_block_owner_item *item; 1329 1330 item = kmalloc(sizeof(*item), GFP_KERNEL); 1331 if (!item) 1332 return -ENOMEM; 1333 item->q = q; 1334 item->binder_type = binder_type; 1335 list_add(&item->list, &block->owner_list); 1336 return 0; 1337 } 1338 1339 static void tcf_block_owner_del(struct tcf_block *block, 1340 struct Qdisc *q, 1341 enum flow_block_binder_type binder_type) 1342 { 1343 struct tcf_block_owner_item *item; 1344 1345 list_for_each_entry(item, &block->owner_list, list) { 1346 if (item->q == q && item->binder_type == binder_type) { 1347 list_del(&item->list); 1348 kfree(item); 1349 return; 1350 } 1351 } 1352 WARN_ON(1); 1353 } 1354 1355 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1356 struct tcf_block_ext_info *ei, 1357 struct netlink_ext_ack *extack) 1358 { 1359 struct net *net = qdisc_net(q); 1360 struct tcf_block *block = NULL; 1361 int err; 1362 1363 if (ei->block_index) 1364 /* block_index not 0 means the shared block is requested */ 1365 block = tcf_block_refcnt_get(net, ei->block_index); 1366 1367 if (!block) { 1368 block = tcf_block_create(net, q, ei->block_index, extack); 1369 if (IS_ERR(block)) 1370 return PTR_ERR(block); 1371 if (tcf_block_shared(block)) { 1372 err = tcf_block_insert(block, net, extack); 1373 if (err) 1374 goto err_block_insert; 1375 } 1376 } 1377 1378 err = tcf_block_owner_add(block, q, ei->binder_type); 1379 if (err) 1380 goto err_block_owner_add; 1381 1382 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1383 1384 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1385 if (err) 1386 goto err_chain0_head_change_cb_add; 1387 1388 err = tcf_block_offload_bind(block, q, ei, extack); 1389 if (err) 1390 goto err_block_offload_bind; 1391 1392 *p_block = block; 1393 return 0; 1394 1395 err_block_offload_bind: 1396 tcf_chain0_head_change_cb_del(block, ei); 1397 err_chain0_head_change_cb_add: 1398 tcf_block_owner_del(block, q, ei->binder_type); 1399 err_block_owner_add: 1400 err_block_insert: 1401 tcf_block_refcnt_put(block, true); 1402 return err; 1403 } 1404 EXPORT_SYMBOL(tcf_block_get_ext); 1405 1406 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1407 { 1408 struct tcf_proto __rcu **p_filter_chain = priv; 1409 1410 rcu_assign_pointer(*p_filter_chain, tp_head); 1411 } 1412 1413 int tcf_block_get(struct tcf_block **p_block, 1414 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1415 struct netlink_ext_ack *extack) 1416 { 1417 struct tcf_block_ext_info ei = { 1418 .chain_head_change = tcf_chain_head_change_dflt, 1419 .chain_head_change_priv = p_filter_chain, 1420 }; 1421 1422 WARN_ON(!p_filter_chain); 1423 return tcf_block_get_ext(p_block, q, &ei, extack); 1424 } 1425 EXPORT_SYMBOL(tcf_block_get); 1426 1427 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1428 * actions should be all removed after flushing. 1429 */ 1430 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1431 struct tcf_block_ext_info *ei) 1432 { 1433 if (!block) 1434 return; 1435 tcf_chain0_head_change_cb_del(block, ei); 1436 tcf_block_owner_del(block, q, ei->binder_type); 1437 1438 __tcf_block_put(block, q, ei, true); 1439 } 1440 EXPORT_SYMBOL(tcf_block_put_ext); 1441 1442 void tcf_block_put(struct tcf_block *block) 1443 { 1444 struct tcf_block_ext_info ei = {0, }; 1445 1446 if (!block) 1447 return; 1448 tcf_block_put_ext(block, block->q, &ei); 1449 } 1450 1451 EXPORT_SYMBOL(tcf_block_put); 1452 1453 static int 1454 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1455 void *cb_priv, bool add, bool offload_in_use, 1456 struct netlink_ext_ack *extack) 1457 { 1458 struct tcf_chain *chain, *chain_prev; 1459 struct tcf_proto *tp, *tp_prev; 1460 int err; 1461 1462 lockdep_assert_held(&block->cb_lock); 1463 1464 for (chain = __tcf_get_next_chain(block, NULL); 1465 chain; 1466 chain_prev = chain, 1467 chain = __tcf_get_next_chain(block, chain), 1468 tcf_chain_put(chain_prev)) { 1469 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1470 tp_prev = tp, 1471 tp = __tcf_get_next_proto(chain, tp), 1472 tcf_proto_put(tp_prev, true, NULL)) { 1473 if (tp->ops->reoffload) { 1474 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1475 extack); 1476 if (err && add) 1477 goto err_playback_remove; 1478 } else if (add && offload_in_use) { 1479 err = -EOPNOTSUPP; 1480 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1481 goto err_playback_remove; 1482 } 1483 } 1484 } 1485 1486 return 0; 1487 1488 err_playback_remove: 1489 tcf_proto_put(tp, true, NULL); 1490 tcf_chain_put(chain); 1491 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1492 extack); 1493 return err; 1494 } 1495 1496 static int tcf_block_bind(struct tcf_block *block, 1497 struct flow_block_offload *bo) 1498 { 1499 struct flow_block_cb *block_cb, *next; 1500 int err, i = 0; 1501 1502 lockdep_assert_held(&block->cb_lock); 1503 1504 list_for_each_entry(block_cb, &bo->cb_list, list) { 1505 err = tcf_block_playback_offloads(block, block_cb->cb, 1506 block_cb->cb_priv, true, 1507 tcf_block_offload_in_use(block), 1508 bo->extack); 1509 if (err) 1510 goto err_unroll; 1511 if (!bo->unlocked_driver_cb) 1512 block->lockeddevcnt++; 1513 1514 i++; 1515 } 1516 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1517 1518 return 0; 1519 1520 err_unroll: 1521 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1522 if (i-- > 0) { 1523 list_del(&block_cb->list); 1524 tcf_block_playback_offloads(block, block_cb->cb, 1525 block_cb->cb_priv, false, 1526 tcf_block_offload_in_use(block), 1527 NULL); 1528 if (!bo->unlocked_driver_cb) 1529 block->lockeddevcnt--; 1530 } 1531 flow_block_cb_free(block_cb); 1532 } 1533 1534 return err; 1535 } 1536 1537 static void tcf_block_unbind(struct tcf_block *block, 1538 struct flow_block_offload *bo) 1539 { 1540 struct flow_block_cb *block_cb, *next; 1541 1542 lockdep_assert_held(&block->cb_lock); 1543 1544 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1545 tcf_block_playback_offloads(block, block_cb->cb, 1546 block_cb->cb_priv, false, 1547 tcf_block_offload_in_use(block), 1548 NULL); 1549 list_del(&block_cb->list); 1550 flow_block_cb_free(block_cb); 1551 if (!bo->unlocked_driver_cb) 1552 block->lockeddevcnt--; 1553 } 1554 } 1555 1556 static int tcf_block_setup(struct tcf_block *block, 1557 struct flow_block_offload *bo) 1558 { 1559 int err; 1560 1561 switch (bo->command) { 1562 case FLOW_BLOCK_BIND: 1563 err = tcf_block_bind(block, bo); 1564 break; 1565 case FLOW_BLOCK_UNBIND: 1566 err = 0; 1567 tcf_block_unbind(block, bo); 1568 break; 1569 default: 1570 WARN_ON_ONCE(1); 1571 err = -EOPNOTSUPP; 1572 } 1573 1574 return err; 1575 } 1576 1577 /* Main classifier routine: scans classifier chain attached 1578 * to this qdisc, (optionally) tests for protocol and asks 1579 * specific classifiers. 1580 */ 1581 static inline int __tcf_classify(struct sk_buff *skb, 1582 const struct tcf_proto *tp, 1583 const struct tcf_proto *orig_tp, 1584 struct tcf_result *res, 1585 bool compat_mode, 1586 u32 *last_executed_chain) 1587 { 1588 #ifdef CONFIG_NET_CLS_ACT 1589 const int max_reclassify_loop = 4; 1590 const struct tcf_proto *first_tp; 1591 int limit = 0; 1592 1593 reclassify: 1594 #endif 1595 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1596 __be16 protocol = tc_skb_protocol(skb); 1597 int err; 1598 1599 if (tp->protocol != protocol && 1600 tp->protocol != htons(ETH_P_ALL)) 1601 continue; 1602 1603 err = tp->classify(skb, tp, res); 1604 #ifdef CONFIG_NET_CLS_ACT 1605 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1606 first_tp = orig_tp; 1607 *last_executed_chain = first_tp->chain->index; 1608 goto reset; 1609 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1610 first_tp = res->goto_tp; 1611 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1612 goto reset; 1613 } 1614 #endif 1615 if (err >= 0) 1616 return err; 1617 } 1618 1619 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1620 #ifdef CONFIG_NET_CLS_ACT 1621 reset: 1622 if (unlikely(limit++ >= max_reclassify_loop)) { 1623 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1624 tp->chain->block->index, 1625 tp->prio & 0xffff, 1626 ntohs(tp->protocol)); 1627 return TC_ACT_SHOT; 1628 } 1629 1630 tp = first_tp; 1631 goto reclassify; 1632 #endif 1633 } 1634 1635 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1636 struct tcf_result *res, bool compat_mode) 1637 { 1638 u32 last_executed_chain = 0; 1639 1640 return __tcf_classify(skb, tp, tp, res, compat_mode, 1641 &last_executed_chain); 1642 } 1643 EXPORT_SYMBOL(tcf_classify); 1644 1645 int tcf_classify_ingress(struct sk_buff *skb, 1646 const struct tcf_block *ingress_block, 1647 const struct tcf_proto *tp, 1648 struct tcf_result *res, bool compat_mode) 1649 { 1650 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1651 u32 last_executed_chain = 0; 1652 1653 return __tcf_classify(skb, tp, tp, res, compat_mode, 1654 &last_executed_chain); 1655 #else 1656 u32 last_executed_chain = tp ? tp->chain->index : 0; 1657 const struct tcf_proto *orig_tp = tp; 1658 struct tc_skb_ext *ext; 1659 int ret; 1660 1661 ext = skb_ext_find(skb, TC_SKB_EXT); 1662 1663 if (ext && ext->chain) { 1664 struct tcf_chain *fchain; 1665 1666 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); 1667 if (!fchain) 1668 return TC_ACT_SHOT; 1669 1670 /* Consume, so cloned/redirect skbs won't inherit ext */ 1671 skb_ext_del(skb, TC_SKB_EXT); 1672 1673 tp = rcu_dereference_bh(fchain->filter_chain); 1674 last_executed_chain = fchain->index; 1675 } 1676 1677 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1678 &last_executed_chain); 1679 1680 /* If we missed on some chain */ 1681 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1682 ext = skb_ext_add(skb, TC_SKB_EXT); 1683 if (WARN_ON_ONCE(!ext)) 1684 return TC_ACT_SHOT; 1685 ext->chain = last_executed_chain; 1686 } 1687 1688 return ret; 1689 #endif 1690 } 1691 EXPORT_SYMBOL(tcf_classify_ingress); 1692 1693 struct tcf_chain_info { 1694 struct tcf_proto __rcu **pprev; 1695 struct tcf_proto __rcu *next; 1696 }; 1697 1698 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1699 struct tcf_chain_info *chain_info) 1700 { 1701 return tcf_chain_dereference(*chain_info->pprev, chain); 1702 } 1703 1704 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1705 struct tcf_chain_info *chain_info, 1706 struct tcf_proto *tp) 1707 { 1708 if (chain->flushing) 1709 return -EAGAIN; 1710 1711 if (*chain_info->pprev == chain->filter_chain) 1712 tcf_chain0_head_change(chain, tp); 1713 tcf_proto_get(tp); 1714 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1715 rcu_assign_pointer(*chain_info->pprev, tp); 1716 1717 return 0; 1718 } 1719 1720 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1721 struct tcf_chain_info *chain_info, 1722 struct tcf_proto *tp) 1723 { 1724 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1725 1726 tcf_proto_mark_delete(tp); 1727 if (tp == chain->filter_chain) 1728 tcf_chain0_head_change(chain, next); 1729 RCU_INIT_POINTER(*chain_info->pprev, next); 1730 } 1731 1732 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1733 struct tcf_chain_info *chain_info, 1734 u32 protocol, u32 prio, 1735 bool prio_allocate); 1736 1737 /* Try to insert new proto. 1738 * If proto with specified priority already exists, free new proto 1739 * and return existing one. 1740 */ 1741 1742 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1743 struct tcf_proto *tp_new, 1744 u32 protocol, u32 prio, 1745 bool rtnl_held) 1746 { 1747 struct tcf_chain_info chain_info; 1748 struct tcf_proto *tp; 1749 int err = 0; 1750 1751 mutex_lock(&chain->filter_chain_lock); 1752 1753 if (tcf_proto_exists_destroying(chain, tp_new)) { 1754 mutex_unlock(&chain->filter_chain_lock); 1755 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1756 return ERR_PTR(-EAGAIN); 1757 } 1758 1759 tp = tcf_chain_tp_find(chain, &chain_info, 1760 protocol, prio, false); 1761 if (!tp) 1762 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1763 mutex_unlock(&chain->filter_chain_lock); 1764 1765 if (tp) { 1766 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1767 tp_new = tp; 1768 } else if (err) { 1769 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1770 tp_new = ERR_PTR(err); 1771 } 1772 1773 return tp_new; 1774 } 1775 1776 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1777 struct tcf_proto *tp, bool rtnl_held, 1778 struct netlink_ext_ack *extack) 1779 { 1780 struct tcf_chain_info chain_info; 1781 struct tcf_proto *tp_iter; 1782 struct tcf_proto **pprev; 1783 struct tcf_proto *next; 1784 1785 mutex_lock(&chain->filter_chain_lock); 1786 1787 /* Atomically find and remove tp from chain. */ 1788 for (pprev = &chain->filter_chain; 1789 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1790 pprev = &tp_iter->next) { 1791 if (tp_iter == tp) { 1792 chain_info.pprev = pprev; 1793 chain_info.next = tp_iter->next; 1794 WARN_ON(tp_iter->deleting); 1795 break; 1796 } 1797 } 1798 /* Verify that tp still exists and no new filters were inserted 1799 * concurrently. 1800 * Mark tp for deletion if it is empty. 1801 */ 1802 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1803 mutex_unlock(&chain->filter_chain_lock); 1804 return; 1805 } 1806 1807 tcf_proto_signal_destroying(chain, tp); 1808 next = tcf_chain_dereference(chain_info.next, chain); 1809 if (tp == chain->filter_chain) 1810 tcf_chain0_head_change(chain, next); 1811 RCU_INIT_POINTER(*chain_info.pprev, next); 1812 mutex_unlock(&chain->filter_chain_lock); 1813 1814 tcf_proto_put(tp, rtnl_held, extack); 1815 } 1816 1817 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1818 struct tcf_chain_info *chain_info, 1819 u32 protocol, u32 prio, 1820 bool prio_allocate) 1821 { 1822 struct tcf_proto **pprev; 1823 struct tcf_proto *tp; 1824 1825 /* Check the chain for existence of proto-tcf with this priority */ 1826 for (pprev = &chain->filter_chain; 1827 (tp = tcf_chain_dereference(*pprev, chain)); 1828 pprev = &tp->next) { 1829 if (tp->prio >= prio) { 1830 if (tp->prio == prio) { 1831 if (prio_allocate || 1832 (tp->protocol != protocol && protocol)) 1833 return ERR_PTR(-EINVAL); 1834 } else { 1835 tp = NULL; 1836 } 1837 break; 1838 } 1839 } 1840 chain_info->pprev = pprev; 1841 if (tp) { 1842 chain_info->next = tp->next; 1843 tcf_proto_get(tp); 1844 } else { 1845 chain_info->next = NULL; 1846 } 1847 return tp; 1848 } 1849 1850 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1851 struct tcf_proto *tp, struct tcf_block *block, 1852 struct Qdisc *q, u32 parent, void *fh, 1853 u32 portid, u32 seq, u16 flags, int event, 1854 bool terse_dump, bool rtnl_held) 1855 { 1856 struct tcmsg *tcm; 1857 struct nlmsghdr *nlh; 1858 unsigned char *b = skb_tail_pointer(skb); 1859 1860 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1861 if (!nlh) 1862 goto out_nlmsg_trim; 1863 tcm = nlmsg_data(nlh); 1864 tcm->tcm_family = AF_UNSPEC; 1865 tcm->tcm__pad1 = 0; 1866 tcm->tcm__pad2 = 0; 1867 if (q) { 1868 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1869 tcm->tcm_parent = parent; 1870 } else { 1871 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1872 tcm->tcm_block_index = block->index; 1873 } 1874 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1875 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1876 goto nla_put_failure; 1877 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1878 goto nla_put_failure; 1879 if (!fh) { 1880 tcm->tcm_handle = 0; 1881 } else if (terse_dump) { 1882 if (tp->ops->terse_dump) { 1883 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 1884 rtnl_held) < 0) 1885 goto nla_put_failure; 1886 } else { 1887 goto cls_op_not_supp; 1888 } 1889 } else { 1890 if (tp->ops->dump && 1891 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1892 goto nla_put_failure; 1893 } 1894 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1895 return skb->len; 1896 1897 out_nlmsg_trim: 1898 nla_put_failure: 1899 cls_op_not_supp: 1900 nlmsg_trim(skb, b); 1901 return -1; 1902 } 1903 1904 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1905 struct nlmsghdr *n, struct tcf_proto *tp, 1906 struct tcf_block *block, struct Qdisc *q, 1907 u32 parent, void *fh, int event, bool unicast, 1908 bool rtnl_held) 1909 { 1910 struct sk_buff *skb; 1911 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1912 int err = 0; 1913 1914 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1915 if (!skb) 1916 return -ENOBUFS; 1917 1918 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1919 n->nlmsg_seq, n->nlmsg_flags, event, 1920 false, rtnl_held) <= 0) { 1921 kfree_skb(skb); 1922 return -EINVAL; 1923 } 1924 1925 if (unicast) 1926 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1927 else 1928 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1929 n->nlmsg_flags & NLM_F_ECHO); 1930 1931 if (err > 0) 1932 err = 0; 1933 return err; 1934 } 1935 1936 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1937 struct nlmsghdr *n, struct tcf_proto *tp, 1938 struct tcf_block *block, struct Qdisc *q, 1939 u32 parent, void *fh, bool unicast, bool *last, 1940 bool rtnl_held, struct netlink_ext_ack *extack) 1941 { 1942 struct sk_buff *skb; 1943 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1944 int err; 1945 1946 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1947 if (!skb) 1948 return -ENOBUFS; 1949 1950 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1951 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1952 false, rtnl_held) <= 0) { 1953 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1954 kfree_skb(skb); 1955 return -EINVAL; 1956 } 1957 1958 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1959 if (err) { 1960 kfree_skb(skb); 1961 return err; 1962 } 1963 1964 if (unicast) 1965 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1966 else 1967 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1968 n->nlmsg_flags & NLM_F_ECHO); 1969 if (err < 0) 1970 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1971 1972 if (err > 0) 1973 err = 0; 1974 return err; 1975 } 1976 1977 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1978 struct tcf_block *block, struct Qdisc *q, 1979 u32 parent, struct nlmsghdr *n, 1980 struct tcf_chain *chain, int event, 1981 bool rtnl_held) 1982 { 1983 struct tcf_proto *tp; 1984 1985 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); 1986 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) 1987 tfilter_notify(net, oskb, n, tp, block, 1988 q, parent, NULL, event, false, rtnl_held); 1989 } 1990 1991 static void tfilter_put(struct tcf_proto *tp, void *fh) 1992 { 1993 if (tp->ops->put && fh) 1994 tp->ops->put(tp, fh); 1995 } 1996 1997 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1998 struct netlink_ext_ack *extack) 1999 { 2000 struct net *net = sock_net(skb->sk); 2001 struct nlattr *tca[TCA_MAX + 1]; 2002 char name[IFNAMSIZ]; 2003 struct tcmsg *t; 2004 u32 protocol; 2005 u32 prio; 2006 bool prio_allocate; 2007 u32 parent; 2008 u32 chain_index; 2009 struct Qdisc *q = NULL; 2010 struct tcf_chain_info chain_info; 2011 struct tcf_chain *chain = NULL; 2012 struct tcf_block *block; 2013 struct tcf_proto *tp; 2014 unsigned long cl; 2015 void *fh; 2016 int err; 2017 int tp_created; 2018 bool rtnl_held = false; 2019 2020 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2021 return -EPERM; 2022 2023 replay: 2024 tp_created = 0; 2025 2026 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2027 rtm_tca_policy, extack); 2028 if (err < 0) 2029 return err; 2030 2031 t = nlmsg_data(n); 2032 protocol = TC_H_MIN(t->tcm_info); 2033 prio = TC_H_MAJ(t->tcm_info); 2034 prio_allocate = false; 2035 parent = t->tcm_parent; 2036 tp = NULL; 2037 cl = 0; 2038 block = NULL; 2039 2040 if (prio == 0) { 2041 /* If no priority is provided by the user, 2042 * we allocate one. 2043 */ 2044 if (n->nlmsg_flags & NLM_F_CREATE) { 2045 prio = TC_H_MAKE(0x80000000U, 0U); 2046 prio_allocate = true; 2047 } else { 2048 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2049 return -ENOENT; 2050 } 2051 } 2052 2053 /* Find head of filter chain. */ 2054 2055 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2056 if (err) 2057 return err; 2058 2059 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2060 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2061 err = -EINVAL; 2062 goto errout; 2063 } 2064 2065 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2066 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2067 * type is not specified, classifier is not unlocked. 2068 */ 2069 if (rtnl_held || 2070 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2071 !tcf_proto_is_unlocked(name)) { 2072 rtnl_held = true; 2073 rtnl_lock(); 2074 } 2075 2076 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2077 if (err) 2078 goto errout; 2079 2080 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2081 extack); 2082 if (IS_ERR(block)) { 2083 err = PTR_ERR(block); 2084 goto errout; 2085 } 2086 block->classid = parent; 2087 2088 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2089 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2090 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2091 err = -EINVAL; 2092 goto errout; 2093 } 2094 chain = tcf_chain_get(block, chain_index, true); 2095 if (!chain) { 2096 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2097 err = -ENOMEM; 2098 goto errout; 2099 } 2100 2101 mutex_lock(&chain->filter_chain_lock); 2102 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2103 prio, prio_allocate); 2104 if (IS_ERR(tp)) { 2105 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2106 err = PTR_ERR(tp); 2107 goto errout_locked; 2108 } 2109 2110 if (tp == NULL) { 2111 struct tcf_proto *tp_new = NULL; 2112 2113 if (chain->flushing) { 2114 err = -EAGAIN; 2115 goto errout_locked; 2116 } 2117 2118 /* Proto-tcf does not exist, create new one */ 2119 2120 if (tca[TCA_KIND] == NULL || !protocol) { 2121 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2122 err = -EINVAL; 2123 goto errout_locked; 2124 } 2125 2126 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2127 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2128 err = -ENOENT; 2129 goto errout_locked; 2130 } 2131 2132 if (prio_allocate) 2133 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2134 &chain_info)); 2135 2136 mutex_unlock(&chain->filter_chain_lock); 2137 tp_new = tcf_proto_create(name, protocol, prio, chain, 2138 rtnl_held, extack); 2139 if (IS_ERR(tp_new)) { 2140 err = PTR_ERR(tp_new); 2141 goto errout_tp; 2142 } 2143 2144 tp_created = 1; 2145 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2146 rtnl_held); 2147 if (IS_ERR(tp)) { 2148 err = PTR_ERR(tp); 2149 goto errout_tp; 2150 } 2151 } else { 2152 mutex_unlock(&chain->filter_chain_lock); 2153 } 2154 2155 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2156 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2157 err = -EINVAL; 2158 goto errout; 2159 } 2160 2161 fh = tp->ops->get(tp, t->tcm_handle); 2162 2163 if (!fh) { 2164 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2165 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2166 err = -ENOENT; 2167 goto errout; 2168 } 2169 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2170 tfilter_put(tp, fh); 2171 NL_SET_ERR_MSG(extack, "Filter already exists"); 2172 err = -EEXIST; 2173 goto errout; 2174 } 2175 2176 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2177 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2178 err = -EINVAL; 2179 goto errout; 2180 } 2181 2182 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2183 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, 2184 rtnl_held, extack); 2185 if (err == 0) { 2186 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2187 RTM_NEWTFILTER, false, rtnl_held); 2188 tfilter_put(tp, fh); 2189 /* q pointer is NULL for shared blocks */ 2190 if (q) 2191 q->flags &= ~TCQ_F_CAN_BYPASS; 2192 } 2193 2194 errout: 2195 if (err && tp_created) 2196 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2197 errout_tp: 2198 if (chain) { 2199 if (tp && !IS_ERR(tp)) 2200 tcf_proto_put(tp, rtnl_held, NULL); 2201 if (!tp_created) 2202 tcf_chain_put(chain); 2203 } 2204 tcf_block_release(q, block, rtnl_held); 2205 2206 if (rtnl_held) 2207 rtnl_unlock(); 2208 2209 if (err == -EAGAIN) { 2210 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2211 * of target chain. 2212 */ 2213 rtnl_held = true; 2214 /* Replay the request. */ 2215 goto replay; 2216 } 2217 return err; 2218 2219 errout_locked: 2220 mutex_unlock(&chain->filter_chain_lock); 2221 goto errout; 2222 } 2223 2224 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2225 struct netlink_ext_ack *extack) 2226 { 2227 struct net *net = sock_net(skb->sk); 2228 struct nlattr *tca[TCA_MAX + 1]; 2229 char name[IFNAMSIZ]; 2230 struct tcmsg *t; 2231 u32 protocol; 2232 u32 prio; 2233 u32 parent; 2234 u32 chain_index; 2235 struct Qdisc *q = NULL; 2236 struct tcf_chain_info chain_info; 2237 struct tcf_chain *chain = NULL; 2238 struct tcf_block *block = NULL; 2239 struct tcf_proto *tp = NULL; 2240 unsigned long cl = 0; 2241 void *fh = NULL; 2242 int err; 2243 bool rtnl_held = false; 2244 2245 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2246 return -EPERM; 2247 2248 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2249 rtm_tca_policy, extack); 2250 if (err < 0) 2251 return err; 2252 2253 t = nlmsg_data(n); 2254 protocol = TC_H_MIN(t->tcm_info); 2255 prio = TC_H_MAJ(t->tcm_info); 2256 parent = t->tcm_parent; 2257 2258 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2259 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2260 return -ENOENT; 2261 } 2262 2263 /* Find head of filter chain. */ 2264 2265 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2266 if (err) 2267 return err; 2268 2269 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2270 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2271 err = -EINVAL; 2272 goto errout; 2273 } 2274 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2275 * found), qdisc is not unlocked, classifier type is not specified, 2276 * classifier is not unlocked. 2277 */ 2278 if (!prio || 2279 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2280 !tcf_proto_is_unlocked(name)) { 2281 rtnl_held = true; 2282 rtnl_lock(); 2283 } 2284 2285 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2286 if (err) 2287 goto errout; 2288 2289 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2290 extack); 2291 if (IS_ERR(block)) { 2292 err = PTR_ERR(block); 2293 goto errout; 2294 } 2295 2296 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2297 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2298 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2299 err = -EINVAL; 2300 goto errout; 2301 } 2302 chain = tcf_chain_get(block, chain_index, false); 2303 if (!chain) { 2304 /* User requested flush on non-existent chain. Nothing to do, 2305 * so just return success. 2306 */ 2307 if (prio == 0) { 2308 err = 0; 2309 goto errout; 2310 } 2311 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2312 err = -ENOENT; 2313 goto errout; 2314 } 2315 2316 if (prio == 0) { 2317 tfilter_notify_chain(net, skb, block, q, parent, n, 2318 chain, RTM_DELTFILTER, rtnl_held); 2319 tcf_chain_flush(chain, rtnl_held); 2320 err = 0; 2321 goto errout; 2322 } 2323 2324 mutex_lock(&chain->filter_chain_lock); 2325 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2326 prio, false); 2327 if (!tp || IS_ERR(tp)) { 2328 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2329 err = tp ? PTR_ERR(tp) : -ENOENT; 2330 goto errout_locked; 2331 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2332 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2333 err = -EINVAL; 2334 goto errout_locked; 2335 } else if (t->tcm_handle == 0) { 2336 tcf_proto_signal_destroying(chain, tp); 2337 tcf_chain_tp_remove(chain, &chain_info, tp); 2338 mutex_unlock(&chain->filter_chain_lock); 2339 2340 tcf_proto_put(tp, rtnl_held, NULL); 2341 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2342 RTM_DELTFILTER, false, rtnl_held); 2343 err = 0; 2344 goto errout; 2345 } 2346 mutex_unlock(&chain->filter_chain_lock); 2347 2348 fh = tp->ops->get(tp, t->tcm_handle); 2349 2350 if (!fh) { 2351 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2352 err = -ENOENT; 2353 } else { 2354 bool last; 2355 2356 err = tfilter_del_notify(net, skb, n, tp, block, 2357 q, parent, fh, false, &last, 2358 rtnl_held, extack); 2359 2360 if (err) 2361 goto errout; 2362 if (last) 2363 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2364 } 2365 2366 errout: 2367 if (chain) { 2368 if (tp && !IS_ERR(tp)) 2369 tcf_proto_put(tp, rtnl_held, NULL); 2370 tcf_chain_put(chain); 2371 } 2372 tcf_block_release(q, block, rtnl_held); 2373 2374 if (rtnl_held) 2375 rtnl_unlock(); 2376 2377 return err; 2378 2379 errout_locked: 2380 mutex_unlock(&chain->filter_chain_lock); 2381 goto errout; 2382 } 2383 2384 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2385 struct netlink_ext_ack *extack) 2386 { 2387 struct net *net = sock_net(skb->sk); 2388 struct nlattr *tca[TCA_MAX + 1]; 2389 char name[IFNAMSIZ]; 2390 struct tcmsg *t; 2391 u32 protocol; 2392 u32 prio; 2393 u32 parent; 2394 u32 chain_index; 2395 struct Qdisc *q = NULL; 2396 struct tcf_chain_info chain_info; 2397 struct tcf_chain *chain = NULL; 2398 struct tcf_block *block = NULL; 2399 struct tcf_proto *tp = NULL; 2400 unsigned long cl = 0; 2401 void *fh = NULL; 2402 int err; 2403 bool rtnl_held = false; 2404 2405 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2406 rtm_tca_policy, extack); 2407 if (err < 0) 2408 return err; 2409 2410 t = nlmsg_data(n); 2411 protocol = TC_H_MIN(t->tcm_info); 2412 prio = TC_H_MAJ(t->tcm_info); 2413 parent = t->tcm_parent; 2414 2415 if (prio == 0) { 2416 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2417 return -ENOENT; 2418 } 2419 2420 /* Find head of filter chain. */ 2421 2422 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2423 if (err) 2424 return err; 2425 2426 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2427 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2428 err = -EINVAL; 2429 goto errout; 2430 } 2431 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2432 * unlocked, classifier type is not specified, classifier is not 2433 * unlocked. 2434 */ 2435 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2436 !tcf_proto_is_unlocked(name)) { 2437 rtnl_held = true; 2438 rtnl_lock(); 2439 } 2440 2441 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2442 if (err) 2443 goto errout; 2444 2445 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2446 extack); 2447 if (IS_ERR(block)) { 2448 err = PTR_ERR(block); 2449 goto errout; 2450 } 2451 2452 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2453 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2454 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2455 err = -EINVAL; 2456 goto errout; 2457 } 2458 chain = tcf_chain_get(block, chain_index, false); 2459 if (!chain) { 2460 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2461 err = -EINVAL; 2462 goto errout; 2463 } 2464 2465 mutex_lock(&chain->filter_chain_lock); 2466 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2467 prio, false); 2468 mutex_unlock(&chain->filter_chain_lock); 2469 if (!tp || IS_ERR(tp)) { 2470 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2471 err = tp ? PTR_ERR(tp) : -ENOENT; 2472 goto errout; 2473 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2474 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2475 err = -EINVAL; 2476 goto errout; 2477 } 2478 2479 fh = tp->ops->get(tp, t->tcm_handle); 2480 2481 if (!fh) { 2482 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2483 err = -ENOENT; 2484 } else { 2485 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2486 fh, RTM_NEWTFILTER, true, rtnl_held); 2487 if (err < 0) 2488 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2489 } 2490 2491 tfilter_put(tp, fh); 2492 errout: 2493 if (chain) { 2494 if (tp && !IS_ERR(tp)) 2495 tcf_proto_put(tp, rtnl_held, NULL); 2496 tcf_chain_put(chain); 2497 } 2498 tcf_block_release(q, block, rtnl_held); 2499 2500 if (rtnl_held) 2501 rtnl_unlock(); 2502 2503 return err; 2504 } 2505 2506 struct tcf_dump_args { 2507 struct tcf_walker w; 2508 struct sk_buff *skb; 2509 struct netlink_callback *cb; 2510 struct tcf_block *block; 2511 struct Qdisc *q; 2512 u32 parent; 2513 bool terse_dump; 2514 }; 2515 2516 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2517 { 2518 struct tcf_dump_args *a = (void *)arg; 2519 struct net *net = sock_net(a->skb->sk); 2520 2521 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2522 n, NETLINK_CB(a->cb->skb).portid, 2523 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2524 RTM_NEWTFILTER, a->terse_dump, true); 2525 } 2526 2527 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2528 struct sk_buff *skb, struct netlink_callback *cb, 2529 long index_start, long *p_index, bool terse) 2530 { 2531 struct net *net = sock_net(skb->sk); 2532 struct tcf_block *block = chain->block; 2533 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2534 struct tcf_proto *tp, *tp_prev; 2535 struct tcf_dump_args arg; 2536 2537 for (tp = __tcf_get_next_proto(chain, NULL); 2538 tp; 2539 tp_prev = tp, 2540 tp = __tcf_get_next_proto(chain, tp), 2541 tcf_proto_put(tp_prev, true, NULL), 2542 (*p_index)++) { 2543 if (*p_index < index_start) 2544 continue; 2545 if (TC_H_MAJ(tcm->tcm_info) && 2546 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2547 continue; 2548 if (TC_H_MIN(tcm->tcm_info) && 2549 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2550 continue; 2551 if (*p_index > index_start) 2552 memset(&cb->args[1], 0, 2553 sizeof(cb->args) - sizeof(cb->args[0])); 2554 if (cb->args[1] == 0) { 2555 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2556 NETLINK_CB(cb->skb).portid, 2557 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2558 RTM_NEWTFILTER, false, true) <= 0) 2559 goto errout; 2560 cb->args[1] = 1; 2561 } 2562 if (!tp->ops->walk) 2563 continue; 2564 arg.w.fn = tcf_node_dump; 2565 arg.skb = skb; 2566 arg.cb = cb; 2567 arg.block = block; 2568 arg.q = q; 2569 arg.parent = parent; 2570 arg.w.stop = 0; 2571 arg.w.skip = cb->args[1] - 1; 2572 arg.w.count = 0; 2573 arg.w.cookie = cb->args[2]; 2574 arg.terse_dump = terse; 2575 tp->ops->walk(tp, &arg.w, true); 2576 cb->args[2] = arg.w.cookie; 2577 cb->args[1] = arg.w.count + 1; 2578 if (arg.w.stop) 2579 goto errout; 2580 } 2581 return true; 2582 2583 errout: 2584 tcf_proto_put(tp, true, NULL); 2585 return false; 2586 } 2587 2588 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2589 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2590 }; 2591 2592 /* called with RTNL */ 2593 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2594 { 2595 struct tcf_chain *chain, *chain_prev; 2596 struct net *net = sock_net(skb->sk); 2597 struct nlattr *tca[TCA_MAX + 1]; 2598 struct Qdisc *q = NULL; 2599 struct tcf_block *block; 2600 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2601 bool terse_dump = false; 2602 long index_start; 2603 long index; 2604 u32 parent; 2605 int err; 2606 2607 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2608 return skb->len; 2609 2610 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2611 tcf_tfilter_dump_policy, cb->extack); 2612 if (err) 2613 return err; 2614 2615 if (tca[TCA_DUMP_FLAGS]) { 2616 struct nla_bitfield32 flags = 2617 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2618 2619 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2620 } 2621 2622 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2623 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2624 if (!block) 2625 goto out; 2626 /* If we work with block index, q is NULL and parent value 2627 * will never be used in the following code. The check 2628 * in tcf_fill_node prevents it. However, compiler does not 2629 * see that far, so set parent to zero to silence the warning 2630 * about parent being uninitialized. 2631 */ 2632 parent = 0; 2633 } else { 2634 const struct Qdisc_class_ops *cops; 2635 struct net_device *dev; 2636 unsigned long cl = 0; 2637 2638 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2639 if (!dev) 2640 return skb->len; 2641 2642 parent = tcm->tcm_parent; 2643 if (!parent) 2644 q = dev->qdisc; 2645 else 2646 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2647 if (!q) 2648 goto out; 2649 cops = q->ops->cl_ops; 2650 if (!cops) 2651 goto out; 2652 if (!cops->tcf_block) 2653 goto out; 2654 if (TC_H_MIN(tcm->tcm_parent)) { 2655 cl = cops->find(q, tcm->tcm_parent); 2656 if (cl == 0) 2657 goto out; 2658 } 2659 block = cops->tcf_block(q, cl, NULL); 2660 if (!block) 2661 goto out; 2662 parent = block->classid; 2663 if (tcf_block_shared(block)) 2664 q = NULL; 2665 } 2666 2667 index_start = cb->args[0]; 2668 index = 0; 2669 2670 for (chain = __tcf_get_next_chain(block, NULL); 2671 chain; 2672 chain_prev = chain, 2673 chain = __tcf_get_next_chain(block, chain), 2674 tcf_chain_put(chain_prev)) { 2675 if (tca[TCA_CHAIN] && 2676 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2677 continue; 2678 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2679 index_start, &index, terse_dump)) { 2680 tcf_chain_put(chain); 2681 err = -EMSGSIZE; 2682 break; 2683 } 2684 } 2685 2686 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2687 tcf_block_refcnt_put(block, true); 2688 cb->args[0] = index; 2689 2690 out: 2691 /* If we did no progress, the error (EMSGSIZE) is real */ 2692 if (skb->len == 0 && err) 2693 return err; 2694 return skb->len; 2695 } 2696 2697 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2698 void *tmplt_priv, u32 chain_index, 2699 struct net *net, struct sk_buff *skb, 2700 struct tcf_block *block, 2701 u32 portid, u32 seq, u16 flags, int event) 2702 { 2703 unsigned char *b = skb_tail_pointer(skb); 2704 const struct tcf_proto_ops *ops; 2705 struct nlmsghdr *nlh; 2706 struct tcmsg *tcm; 2707 void *priv; 2708 2709 ops = tmplt_ops; 2710 priv = tmplt_priv; 2711 2712 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2713 if (!nlh) 2714 goto out_nlmsg_trim; 2715 tcm = nlmsg_data(nlh); 2716 tcm->tcm_family = AF_UNSPEC; 2717 tcm->tcm__pad1 = 0; 2718 tcm->tcm__pad2 = 0; 2719 tcm->tcm_handle = 0; 2720 if (block->q) { 2721 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2722 tcm->tcm_parent = block->q->handle; 2723 } else { 2724 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2725 tcm->tcm_block_index = block->index; 2726 } 2727 2728 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2729 goto nla_put_failure; 2730 2731 if (ops) { 2732 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2733 goto nla_put_failure; 2734 if (ops->tmplt_dump(skb, net, priv) < 0) 2735 goto nla_put_failure; 2736 } 2737 2738 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2739 return skb->len; 2740 2741 out_nlmsg_trim: 2742 nla_put_failure: 2743 nlmsg_trim(skb, b); 2744 return -EMSGSIZE; 2745 } 2746 2747 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2748 u32 seq, u16 flags, int event, bool unicast) 2749 { 2750 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2751 struct tcf_block *block = chain->block; 2752 struct net *net = block->net; 2753 struct sk_buff *skb; 2754 int err = 0; 2755 2756 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2757 if (!skb) 2758 return -ENOBUFS; 2759 2760 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2761 chain->index, net, skb, block, portid, 2762 seq, flags, event) <= 0) { 2763 kfree_skb(skb); 2764 return -EINVAL; 2765 } 2766 2767 if (unicast) 2768 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2769 else 2770 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2771 flags & NLM_F_ECHO); 2772 2773 if (err > 0) 2774 err = 0; 2775 return err; 2776 } 2777 2778 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2779 void *tmplt_priv, u32 chain_index, 2780 struct tcf_block *block, struct sk_buff *oskb, 2781 u32 seq, u16 flags, bool unicast) 2782 { 2783 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2784 struct net *net = block->net; 2785 struct sk_buff *skb; 2786 2787 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2788 if (!skb) 2789 return -ENOBUFS; 2790 2791 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2792 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2793 kfree_skb(skb); 2794 return -EINVAL; 2795 } 2796 2797 if (unicast) 2798 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2799 2800 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2801 } 2802 2803 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2804 struct nlattr **tca, 2805 struct netlink_ext_ack *extack) 2806 { 2807 const struct tcf_proto_ops *ops; 2808 char name[IFNAMSIZ]; 2809 void *tmplt_priv; 2810 2811 /* If kind is not set, user did not specify template. */ 2812 if (!tca[TCA_KIND]) 2813 return 0; 2814 2815 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2816 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2817 return -EINVAL; 2818 } 2819 2820 ops = tcf_proto_lookup_ops(name, true, extack); 2821 if (IS_ERR(ops)) 2822 return PTR_ERR(ops); 2823 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2824 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2825 return -EOPNOTSUPP; 2826 } 2827 2828 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2829 if (IS_ERR(tmplt_priv)) { 2830 module_put(ops->owner); 2831 return PTR_ERR(tmplt_priv); 2832 } 2833 chain->tmplt_ops = ops; 2834 chain->tmplt_priv = tmplt_priv; 2835 return 0; 2836 } 2837 2838 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2839 void *tmplt_priv) 2840 { 2841 /* If template ops are set, no work to do for us. */ 2842 if (!tmplt_ops) 2843 return; 2844 2845 tmplt_ops->tmplt_destroy(tmplt_priv); 2846 module_put(tmplt_ops->owner); 2847 } 2848 2849 /* Add/delete/get a chain */ 2850 2851 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2852 struct netlink_ext_ack *extack) 2853 { 2854 struct net *net = sock_net(skb->sk); 2855 struct nlattr *tca[TCA_MAX + 1]; 2856 struct tcmsg *t; 2857 u32 parent; 2858 u32 chain_index; 2859 struct Qdisc *q = NULL; 2860 struct tcf_chain *chain = NULL; 2861 struct tcf_block *block; 2862 unsigned long cl; 2863 int err; 2864 2865 if (n->nlmsg_type != RTM_GETCHAIN && 2866 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2867 return -EPERM; 2868 2869 replay: 2870 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2871 rtm_tca_policy, extack); 2872 if (err < 0) 2873 return err; 2874 2875 t = nlmsg_data(n); 2876 parent = t->tcm_parent; 2877 cl = 0; 2878 2879 block = tcf_block_find(net, &q, &parent, &cl, 2880 t->tcm_ifindex, t->tcm_block_index, extack); 2881 if (IS_ERR(block)) 2882 return PTR_ERR(block); 2883 2884 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2885 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2886 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2887 err = -EINVAL; 2888 goto errout_block; 2889 } 2890 2891 mutex_lock(&block->lock); 2892 chain = tcf_chain_lookup(block, chain_index); 2893 if (n->nlmsg_type == RTM_NEWCHAIN) { 2894 if (chain) { 2895 if (tcf_chain_held_by_acts_only(chain)) { 2896 /* The chain exists only because there is 2897 * some action referencing it. 2898 */ 2899 tcf_chain_hold(chain); 2900 } else { 2901 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2902 err = -EEXIST; 2903 goto errout_block_locked; 2904 } 2905 } else { 2906 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2907 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2908 err = -ENOENT; 2909 goto errout_block_locked; 2910 } 2911 chain = tcf_chain_create(block, chain_index); 2912 if (!chain) { 2913 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2914 err = -ENOMEM; 2915 goto errout_block_locked; 2916 } 2917 } 2918 } else { 2919 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2920 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2921 err = -EINVAL; 2922 goto errout_block_locked; 2923 } 2924 tcf_chain_hold(chain); 2925 } 2926 2927 if (n->nlmsg_type == RTM_NEWCHAIN) { 2928 /* Modifying chain requires holding parent block lock. In case 2929 * the chain was successfully added, take a reference to the 2930 * chain. This ensures that an empty chain does not disappear at 2931 * the end of this function. 2932 */ 2933 tcf_chain_hold(chain); 2934 chain->explicitly_created = true; 2935 } 2936 mutex_unlock(&block->lock); 2937 2938 switch (n->nlmsg_type) { 2939 case RTM_NEWCHAIN: 2940 err = tc_chain_tmplt_add(chain, net, tca, extack); 2941 if (err) { 2942 tcf_chain_put_explicitly_created(chain); 2943 goto errout; 2944 } 2945 2946 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2947 RTM_NEWCHAIN, false); 2948 break; 2949 case RTM_DELCHAIN: 2950 tfilter_notify_chain(net, skb, block, q, parent, n, 2951 chain, RTM_DELTFILTER, true); 2952 /* Flush the chain first as the user requested chain removal. */ 2953 tcf_chain_flush(chain, true); 2954 /* In case the chain was successfully deleted, put a reference 2955 * to the chain previously taken during addition. 2956 */ 2957 tcf_chain_put_explicitly_created(chain); 2958 break; 2959 case RTM_GETCHAIN: 2960 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2961 n->nlmsg_seq, n->nlmsg_type, true); 2962 if (err < 0) 2963 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2964 break; 2965 default: 2966 err = -EOPNOTSUPP; 2967 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2968 goto errout; 2969 } 2970 2971 errout: 2972 tcf_chain_put(chain); 2973 errout_block: 2974 tcf_block_release(q, block, true); 2975 if (err == -EAGAIN) 2976 /* Replay the request. */ 2977 goto replay; 2978 return err; 2979 2980 errout_block_locked: 2981 mutex_unlock(&block->lock); 2982 goto errout_block; 2983 } 2984 2985 /* called with RTNL */ 2986 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2987 { 2988 struct net *net = sock_net(skb->sk); 2989 struct nlattr *tca[TCA_MAX + 1]; 2990 struct Qdisc *q = NULL; 2991 struct tcf_block *block; 2992 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2993 struct tcf_chain *chain; 2994 long index_start; 2995 long index; 2996 u32 parent; 2997 int err; 2998 2999 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 3000 return skb->len; 3001 3002 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 3003 rtm_tca_policy, cb->extack); 3004 if (err) 3005 return err; 3006 3007 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 3008 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 3009 if (!block) 3010 goto out; 3011 /* If we work with block index, q is NULL and parent value 3012 * will never be used in the following code. The check 3013 * in tcf_fill_node prevents it. However, compiler does not 3014 * see that far, so set parent to zero to silence the warning 3015 * about parent being uninitialized. 3016 */ 3017 parent = 0; 3018 } else { 3019 const struct Qdisc_class_ops *cops; 3020 struct net_device *dev; 3021 unsigned long cl = 0; 3022 3023 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 3024 if (!dev) 3025 return skb->len; 3026 3027 parent = tcm->tcm_parent; 3028 if (!parent) { 3029 q = dev->qdisc; 3030 parent = q->handle; 3031 } else { 3032 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3033 } 3034 if (!q) 3035 goto out; 3036 cops = q->ops->cl_ops; 3037 if (!cops) 3038 goto out; 3039 if (!cops->tcf_block) 3040 goto out; 3041 if (TC_H_MIN(tcm->tcm_parent)) { 3042 cl = cops->find(q, tcm->tcm_parent); 3043 if (cl == 0) 3044 goto out; 3045 } 3046 block = cops->tcf_block(q, cl, NULL); 3047 if (!block) 3048 goto out; 3049 if (tcf_block_shared(block)) 3050 q = NULL; 3051 } 3052 3053 index_start = cb->args[0]; 3054 index = 0; 3055 3056 mutex_lock(&block->lock); 3057 list_for_each_entry(chain, &block->chain_list, list) { 3058 if ((tca[TCA_CHAIN] && 3059 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3060 continue; 3061 if (index < index_start) { 3062 index++; 3063 continue; 3064 } 3065 if (tcf_chain_held_by_acts_only(chain)) 3066 continue; 3067 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3068 chain->index, net, skb, block, 3069 NETLINK_CB(cb->skb).portid, 3070 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3071 RTM_NEWCHAIN); 3072 if (err <= 0) 3073 break; 3074 index++; 3075 } 3076 mutex_unlock(&block->lock); 3077 3078 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3079 tcf_block_refcnt_put(block, true); 3080 cb->args[0] = index; 3081 3082 out: 3083 /* If we did no progress, the error (EMSGSIZE) is real */ 3084 if (skb->len == 0 && err) 3085 return err; 3086 return skb->len; 3087 } 3088 3089 void tcf_exts_destroy(struct tcf_exts *exts) 3090 { 3091 #ifdef CONFIG_NET_CLS_ACT 3092 if (exts->actions) { 3093 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3094 kfree(exts->actions); 3095 } 3096 exts->nr_actions = 0; 3097 #endif 3098 } 3099 EXPORT_SYMBOL(tcf_exts_destroy); 3100 3101 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3102 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, 3103 bool rtnl_held, struct netlink_ext_ack *extack) 3104 { 3105 #ifdef CONFIG_NET_CLS_ACT 3106 { 3107 struct tc_action *act; 3108 size_t attr_size = 0; 3109 3110 if (exts->police && tb[exts->police]) { 3111 act = tcf_action_init_1(net, tp, tb[exts->police], 3112 rate_tlv, "police", ovr, 3113 TCA_ACT_BIND, rtnl_held, 3114 extack); 3115 if (IS_ERR(act)) 3116 return PTR_ERR(act); 3117 3118 act->type = exts->type = TCA_OLD_COMPAT; 3119 exts->actions[0] = act; 3120 exts->nr_actions = 1; 3121 } else if (exts->action && tb[exts->action]) { 3122 int err; 3123 3124 err = tcf_action_init(net, tp, tb[exts->action], 3125 rate_tlv, NULL, ovr, TCA_ACT_BIND, 3126 exts->actions, &attr_size, 3127 rtnl_held, extack); 3128 if (err < 0) 3129 return err; 3130 exts->nr_actions = err; 3131 } 3132 } 3133 #else 3134 if ((exts->action && tb[exts->action]) || 3135 (exts->police && tb[exts->police])) { 3136 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3137 return -EOPNOTSUPP; 3138 } 3139 #endif 3140 3141 return 0; 3142 } 3143 EXPORT_SYMBOL(tcf_exts_validate); 3144 3145 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3146 { 3147 #ifdef CONFIG_NET_CLS_ACT 3148 struct tcf_exts old = *dst; 3149 3150 *dst = *src; 3151 tcf_exts_destroy(&old); 3152 #endif 3153 } 3154 EXPORT_SYMBOL(tcf_exts_change); 3155 3156 #ifdef CONFIG_NET_CLS_ACT 3157 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3158 { 3159 if (exts->nr_actions == 0) 3160 return NULL; 3161 else 3162 return exts->actions[0]; 3163 } 3164 #endif 3165 3166 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3167 { 3168 #ifdef CONFIG_NET_CLS_ACT 3169 struct nlattr *nest; 3170 3171 if (exts->action && tcf_exts_has_actions(exts)) { 3172 /* 3173 * again for backward compatible mode - we want 3174 * to work with both old and new modes of entering 3175 * tc data even if iproute2 was newer - jhs 3176 */ 3177 if (exts->type != TCA_OLD_COMPAT) { 3178 nest = nla_nest_start_noflag(skb, exts->action); 3179 if (nest == NULL) 3180 goto nla_put_failure; 3181 3182 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3183 < 0) 3184 goto nla_put_failure; 3185 nla_nest_end(skb, nest); 3186 } else if (exts->police) { 3187 struct tc_action *act = tcf_exts_first_act(exts); 3188 nest = nla_nest_start_noflag(skb, exts->police); 3189 if (nest == NULL || !act) 3190 goto nla_put_failure; 3191 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3192 goto nla_put_failure; 3193 nla_nest_end(skb, nest); 3194 } 3195 } 3196 return 0; 3197 3198 nla_put_failure: 3199 nla_nest_cancel(skb, nest); 3200 return -1; 3201 #else 3202 return 0; 3203 #endif 3204 } 3205 EXPORT_SYMBOL(tcf_exts_dump); 3206 3207 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3208 { 3209 #ifdef CONFIG_NET_CLS_ACT 3210 struct nlattr *nest; 3211 3212 if (!exts->action || !tcf_exts_has_actions(exts)) 3213 return 0; 3214 3215 nest = nla_nest_start_noflag(skb, exts->action); 3216 if (!nest) 3217 goto nla_put_failure; 3218 3219 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3220 goto nla_put_failure; 3221 nla_nest_end(skb, nest); 3222 return 0; 3223 3224 nla_put_failure: 3225 nla_nest_cancel(skb, nest); 3226 return -1; 3227 #else 3228 return 0; 3229 #endif 3230 } 3231 EXPORT_SYMBOL(tcf_exts_terse_dump); 3232 3233 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3234 { 3235 #ifdef CONFIG_NET_CLS_ACT 3236 struct tc_action *a = tcf_exts_first_act(exts); 3237 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3238 return -1; 3239 #endif 3240 return 0; 3241 } 3242 EXPORT_SYMBOL(tcf_exts_dump_stats); 3243 3244 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3245 { 3246 if (*flags & TCA_CLS_FLAGS_IN_HW) 3247 return; 3248 *flags |= TCA_CLS_FLAGS_IN_HW; 3249 atomic_inc(&block->offloadcnt); 3250 } 3251 3252 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3253 { 3254 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3255 return; 3256 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3257 atomic_dec(&block->offloadcnt); 3258 } 3259 3260 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3261 struct tcf_proto *tp, u32 *cnt, 3262 u32 *flags, u32 diff, bool add) 3263 { 3264 lockdep_assert_held(&block->cb_lock); 3265 3266 spin_lock(&tp->lock); 3267 if (add) { 3268 if (!*cnt) 3269 tcf_block_offload_inc(block, flags); 3270 *cnt += diff; 3271 } else { 3272 *cnt -= diff; 3273 if (!*cnt) 3274 tcf_block_offload_dec(block, flags); 3275 } 3276 spin_unlock(&tp->lock); 3277 } 3278 3279 static void 3280 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3281 u32 *cnt, u32 *flags) 3282 { 3283 lockdep_assert_held(&block->cb_lock); 3284 3285 spin_lock(&tp->lock); 3286 tcf_block_offload_dec(block, flags); 3287 *cnt = 0; 3288 spin_unlock(&tp->lock); 3289 } 3290 3291 static int 3292 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3293 void *type_data, bool err_stop) 3294 { 3295 struct flow_block_cb *block_cb; 3296 int ok_count = 0; 3297 int err; 3298 3299 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3300 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3301 if (err) { 3302 if (err_stop) 3303 return err; 3304 } else { 3305 ok_count++; 3306 } 3307 } 3308 return ok_count; 3309 } 3310 3311 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3312 void *type_data, bool err_stop, bool rtnl_held) 3313 { 3314 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3315 int ok_count; 3316 3317 retry: 3318 if (take_rtnl) 3319 rtnl_lock(); 3320 down_read(&block->cb_lock); 3321 /* Need to obtain rtnl lock if block is bound to devs that require it. 3322 * In block bind code cb_lock is obtained while holding rtnl, so we must 3323 * obtain the locks in same order here. 3324 */ 3325 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3326 up_read(&block->cb_lock); 3327 take_rtnl = true; 3328 goto retry; 3329 } 3330 3331 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3332 3333 up_read(&block->cb_lock); 3334 if (take_rtnl) 3335 rtnl_unlock(); 3336 return ok_count; 3337 } 3338 EXPORT_SYMBOL(tc_setup_cb_call); 3339 3340 /* Non-destructive filter add. If filter that wasn't already in hardware is 3341 * successfully offloaded, increment block offloads counter. On failure, 3342 * previously offloaded filter is considered to be intact and offloads counter 3343 * is not decremented. 3344 */ 3345 3346 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3347 enum tc_setup_type type, void *type_data, bool err_stop, 3348 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3349 { 3350 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3351 int ok_count; 3352 3353 retry: 3354 if (take_rtnl) 3355 rtnl_lock(); 3356 down_read(&block->cb_lock); 3357 /* Need to obtain rtnl lock if block is bound to devs that require it. 3358 * In block bind code cb_lock is obtained while holding rtnl, so we must 3359 * obtain the locks in same order here. 3360 */ 3361 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3362 up_read(&block->cb_lock); 3363 take_rtnl = true; 3364 goto retry; 3365 } 3366 3367 /* Make sure all netdevs sharing this block are offload-capable. */ 3368 if (block->nooffloaddevcnt && err_stop) { 3369 ok_count = -EOPNOTSUPP; 3370 goto err_unlock; 3371 } 3372 3373 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3374 if (ok_count < 0) 3375 goto err_unlock; 3376 3377 if (tp->ops->hw_add) 3378 tp->ops->hw_add(tp, type_data); 3379 if (ok_count > 0) 3380 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3381 ok_count, true); 3382 err_unlock: 3383 up_read(&block->cb_lock); 3384 if (take_rtnl) 3385 rtnl_unlock(); 3386 return ok_count < 0 ? ok_count : 0; 3387 } 3388 EXPORT_SYMBOL(tc_setup_cb_add); 3389 3390 /* Destructive filter replace. If filter that wasn't already in hardware is 3391 * successfully offloaded, increment block offload counter. On failure, 3392 * previously offloaded filter is considered to be destroyed and offload counter 3393 * is decremented. 3394 */ 3395 3396 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3397 enum tc_setup_type type, void *type_data, bool err_stop, 3398 u32 *old_flags, unsigned int *old_in_hw_count, 3399 u32 *new_flags, unsigned int *new_in_hw_count, 3400 bool rtnl_held) 3401 { 3402 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3403 int ok_count; 3404 3405 retry: 3406 if (take_rtnl) 3407 rtnl_lock(); 3408 down_read(&block->cb_lock); 3409 /* Need to obtain rtnl lock if block is bound to devs that require it. 3410 * In block bind code cb_lock is obtained while holding rtnl, so we must 3411 * obtain the locks in same order here. 3412 */ 3413 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3414 up_read(&block->cb_lock); 3415 take_rtnl = true; 3416 goto retry; 3417 } 3418 3419 /* Make sure all netdevs sharing this block are offload-capable. */ 3420 if (block->nooffloaddevcnt && err_stop) { 3421 ok_count = -EOPNOTSUPP; 3422 goto err_unlock; 3423 } 3424 3425 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3426 if (tp->ops->hw_del) 3427 tp->ops->hw_del(tp, type_data); 3428 3429 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3430 if (ok_count < 0) 3431 goto err_unlock; 3432 3433 if (tp->ops->hw_add) 3434 tp->ops->hw_add(tp, type_data); 3435 if (ok_count > 0) 3436 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3437 new_flags, ok_count, true); 3438 err_unlock: 3439 up_read(&block->cb_lock); 3440 if (take_rtnl) 3441 rtnl_unlock(); 3442 return ok_count < 0 ? ok_count : 0; 3443 } 3444 EXPORT_SYMBOL(tc_setup_cb_replace); 3445 3446 /* Destroy filter and decrement block offload counter, if filter was previously 3447 * offloaded. 3448 */ 3449 3450 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3451 enum tc_setup_type type, void *type_data, bool err_stop, 3452 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3453 { 3454 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3455 int ok_count; 3456 3457 retry: 3458 if (take_rtnl) 3459 rtnl_lock(); 3460 down_read(&block->cb_lock); 3461 /* Need to obtain rtnl lock if block is bound to devs that require it. 3462 * In block bind code cb_lock is obtained while holding rtnl, so we must 3463 * obtain the locks in same order here. 3464 */ 3465 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3466 up_read(&block->cb_lock); 3467 take_rtnl = true; 3468 goto retry; 3469 } 3470 3471 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3472 3473 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3474 if (tp->ops->hw_del) 3475 tp->ops->hw_del(tp, type_data); 3476 3477 up_read(&block->cb_lock); 3478 if (take_rtnl) 3479 rtnl_unlock(); 3480 return ok_count < 0 ? ok_count : 0; 3481 } 3482 EXPORT_SYMBOL(tc_setup_cb_destroy); 3483 3484 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3485 bool add, flow_setup_cb_t *cb, 3486 enum tc_setup_type type, void *type_data, 3487 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3488 { 3489 int err = cb(type, type_data, cb_priv); 3490 3491 if (err) { 3492 if (add && tc_skip_sw(*flags)) 3493 return err; 3494 } else { 3495 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3496 add); 3497 } 3498 3499 return 0; 3500 } 3501 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3502 3503 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3504 const struct tc_action *act) 3505 { 3506 struct tc_cookie *cookie; 3507 int err = 0; 3508 3509 rcu_read_lock(); 3510 cookie = rcu_dereference(act->act_cookie); 3511 if (cookie) { 3512 entry->cookie = flow_action_cookie_create(cookie->data, 3513 cookie->len, 3514 GFP_ATOMIC); 3515 if (!entry->cookie) 3516 err = -ENOMEM; 3517 } 3518 rcu_read_unlock(); 3519 return err; 3520 } 3521 3522 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3523 { 3524 flow_action_cookie_destroy(entry->cookie); 3525 } 3526 3527 void tc_cleanup_flow_action(struct flow_action *flow_action) 3528 { 3529 struct flow_action_entry *entry; 3530 int i; 3531 3532 flow_action_for_each(i, entry, flow_action) { 3533 tcf_act_put_cookie(entry); 3534 if (entry->destructor) 3535 entry->destructor(entry->destructor_priv); 3536 } 3537 } 3538 EXPORT_SYMBOL(tc_cleanup_flow_action); 3539 3540 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3541 const struct tc_action *act) 3542 { 3543 #ifdef CONFIG_NET_CLS_ACT 3544 entry->dev = act->ops->get_dev(act, &entry->destructor); 3545 if (!entry->dev) 3546 return; 3547 entry->destructor_priv = entry->dev; 3548 #endif 3549 } 3550 3551 static void tcf_tunnel_encap_put_tunnel(void *priv) 3552 { 3553 struct ip_tunnel_info *tunnel = priv; 3554 3555 kfree(tunnel); 3556 } 3557 3558 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3559 const struct tc_action *act) 3560 { 3561 entry->tunnel = tcf_tunnel_info_copy(act); 3562 if (!entry->tunnel) 3563 return -ENOMEM; 3564 entry->destructor = tcf_tunnel_encap_put_tunnel; 3565 entry->destructor_priv = entry->tunnel; 3566 return 0; 3567 } 3568 3569 static void tcf_sample_get_group(struct flow_action_entry *entry, 3570 const struct tc_action *act) 3571 { 3572 #ifdef CONFIG_NET_CLS_ACT 3573 entry->sample.psample_group = 3574 act->ops->get_psample_group(act, &entry->destructor); 3575 entry->destructor_priv = entry->sample.psample_group; 3576 #endif 3577 } 3578 3579 static void tcf_gate_entry_destructor(void *priv) 3580 { 3581 struct action_gate_entry *oe = priv; 3582 3583 kfree(oe); 3584 } 3585 3586 static int tcf_gate_get_entries(struct flow_action_entry *entry, 3587 const struct tc_action *act) 3588 { 3589 entry->gate.entries = tcf_gate_get_list(act); 3590 3591 if (!entry->gate.entries) 3592 return -EINVAL; 3593 3594 entry->destructor = tcf_gate_entry_destructor; 3595 entry->destructor_priv = entry->gate.entries; 3596 3597 return 0; 3598 } 3599 3600 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) 3601 { 3602 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) 3603 return FLOW_ACTION_HW_STATS_DONT_CARE; 3604 else if (!hw_stats) 3605 return FLOW_ACTION_HW_STATS_DISABLED; 3606 3607 return hw_stats; 3608 } 3609 3610 int tc_setup_flow_action(struct flow_action *flow_action, 3611 const struct tcf_exts *exts) 3612 { 3613 struct tc_action *act; 3614 int i, j, k, err = 0; 3615 3616 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3617 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3618 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3619 3620 if (!exts) 3621 return 0; 3622 3623 j = 0; 3624 tcf_exts_for_each_action(i, act, exts) { 3625 struct flow_action_entry *entry; 3626 3627 entry = &flow_action->entries[j]; 3628 spin_lock_bh(&act->tcfa_lock); 3629 err = tcf_act_get_cookie(entry, act); 3630 if (err) 3631 goto err_out_locked; 3632 3633 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3634 3635 if (is_tcf_gact_ok(act)) { 3636 entry->id = FLOW_ACTION_ACCEPT; 3637 } else if (is_tcf_gact_shot(act)) { 3638 entry->id = FLOW_ACTION_DROP; 3639 } else if (is_tcf_gact_trap(act)) { 3640 entry->id = FLOW_ACTION_TRAP; 3641 } else if (is_tcf_gact_goto_chain(act)) { 3642 entry->id = FLOW_ACTION_GOTO; 3643 entry->chain_index = tcf_gact_goto_chain_index(act); 3644 } else if (is_tcf_mirred_egress_redirect(act)) { 3645 entry->id = FLOW_ACTION_REDIRECT; 3646 tcf_mirred_get_dev(entry, act); 3647 } else if (is_tcf_mirred_egress_mirror(act)) { 3648 entry->id = FLOW_ACTION_MIRRED; 3649 tcf_mirred_get_dev(entry, act); 3650 } else if (is_tcf_mirred_ingress_redirect(act)) { 3651 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3652 tcf_mirred_get_dev(entry, act); 3653 } else if (is_tcf_mirred_ingress_mirror(act)) { 3654 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3655 tcf_mirred_get_dev(entry, act); 3656 } else if (is_tcf_vlan(act)) { 3657 switch (tcf_vlan_action(act)) { 3658 case TCA_VLAN_ACT_PUSH: 3659 entry->id = FLOW_ACTION_VLAN_PUSH; 3660 entry->vlan.vid = tcf_vlan_push_vid(act); 3661 entry->vlan.proto = tcf_vlan_push_proto(act); 3662 entry->vlan.prio = tcf_vlan_push_prio(act); 3663 break; 3664 case TCA_VLAN_ACT_POP: 3665 entry->id = FLOW_ACTION_VLAN_POP; 3666 break; 3667 case TCA_VLAN_ACT_MODIFY: 3668 entry->id = FLOW_ACTION_VLAN_MANGLE; 3669 entry->vlan.vid = tcf_vlan_push_vid(act); 3670 entry->vlan.proto = tcf_vlan_push_proto(act); 3671 entry->vlan.prio = tcf_vlan_push_prio(act); 3672 break; 3673 default: 3674 err = -EOPNOTSUPP; 3675 goto err_out_locked; 3676 } 3677 } else if (is_tcf_tunnel_set(act)) { 3678 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3679 err = tcf_tunnel_encap_get_tunnel(entry, act); 3680 if (err) 3681 goto err_out_locked; 3682 } else if (is_tcf_tunnel_release(act)) { 3683 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3684 } else if (is_tcf_pedit(act)) { 3685 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3686 switch (tcf_pedit_cmd(act, k)) { 3687 case TCA_PEDIT_KEY_EX_CMD_SET: 3688 entry->id = FLOW_ACTION_MANGLE; 3689 break; 3690 case TCA_PEDIT_KEY_EX_CMD_ADD: 3691 entry->id = FLOW_ACTION_ADD; 3692 break; 3693 default: 3694 err = -EOPNOTSUPP; 3695 goto err_out_locked; 3696 } 3697 entry->mangle.htype = tcf_pedit_htype(act, k); 3698 entry->mangle.mask = tcf_pedit_mask(act, k); 3699 entry->mangle.val = tcf_pedit_val(act, k); 3700 entry->mangle.offset = tcf_pedit_offset(act, k); 3701 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3702 entry = &flow_action->entries[++j]; 3703 } 3704 } else if (is_tcf_csum(act)) { 3705 entry->id = FLOW_ACTION_CSUM; 3706 entry->csum_flags = tcf_csum_update_flags(act); 3707 } else if (is_tcf_skbedit_mark(act)) { 3708 entry->id = FLOW_ACTION_MARK; 3709 entry->mark = tcf_skbedit_mark(act); 3710 } else if (is_tcf_sample(act)) { 3711 entry->id = FLOW_ACTION_SAMPLE; 3712 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3713 entry->sample.truncate = tcf_sample_truncate(act); 3714 entry->sample.rate = tcf_sample_rate(act); 3715 tcf_sample_get_group(entry, act); 3716 } else if (is_tcf_police(act)) { 3717 entry->id = FLOW_ACTION_POLICE; 3718 entry->police.burst = tcf_police_tcfp_burst(act); 3719 entry->police.rate_bytes_ps = 3720 tcf_police_rate_bytes_ps(act); 3721 } else if (is_tcf_ct(act)) { 3722 entry->id = FLOW_ACTION_CT; 3723 entry->ct.action = tcf_ct_action(act); 3724 entry->ct.zone = tcf_ct_zone(act); 3725 entry->ct.flow_table = tcf_ct_ft(act); 3726 } else if (is_tcf_mpls(act)) { 3727 switch (tcf_mpls_action(act)) { 3728 case TCA_MPLS_ACT_PUSH: 3729 entry->id = FLOW_ACTION_MPLS_PUSH; 3730 entry->mpls_push.proto = tcf_mpls_proto(act); 3731 entry->mpls_push.label = tcf_mpls_label(act); 3732 entry->mpls_push.tc = tcf_mpls_tc(act); 3733 entry->mpls_push.bos = tcf_mpls_bos(act); 3734 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3735 break; 3736 case TCA_MPLS_ACT_POP: 3737 entry->id = FLOW_ACTION_MPLS_POP; 3738 entry->mpls_pop.proto = tcf_mpls_proto(act); 3739 break; 3740 case TCA_MPLS_ACT_MODIFY: 3741 entry->id = FLOW_ACTION_MPLS_MANGLE; 3742 entry->mpls_mangle.label = tcf_mpls_label(act); 3743 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3744 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3745 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3746 break; 3747 default: 3748 goto err_out_locked; 3749 } 3750 } else if (is_tcf_skbedit_ptype(act)) { 3751 entry->id = FLOW_ACTION_PTYPE; 3752 entry->ptype = tcf_skbedit_ptype(act); 3753 } else if (is_tcf_skbedit_priority(act)) { 3754 entry->id = FLOW_ACTION_PRIORITY; 3755 entry->priority = tcf_skbedit_priority(act); 3756 } else if (is_tcf_gate(act)) { 3757 entry->id = FLOW_ACTION_GATE; 3758 entry->gate.index = tcf_gate_index(act); 3759 entry->gate.prio = tcf_gate_prio(act); 3760 entry->gate.basetime = tcf_gate_basetime(act); 3761 entry->gate.cycletime = tcf_gate_cycletime(act); 3762 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 3763 entry->gate.num_entries = tcf_gate_num_entries(act); 3764 err = tcf_gate_get_entries(entry, act); 3765 if (err) 3766 goto err_out; 3767 } else { 3768 err = -EOPNOTSUPP; 3769 goto err_out_locked; 3770 } 3771 spin_unlock_bh(&act->tcfa_lock); 3772 3773 if (!is_tcf_pedit(act)) 3774 j++; 3775 } 3776 3777 err_out: 3778 if (err) 3779 tc_cleanup_flow_action(flow_action); 3780 3781 return err; 3782 err_out_locked: 3783 spin_unlock_bh(&act->tcfa_lock); 3784 goto err_out; 3785 } 3786 EXPORT_SYMBOL(tc_setup_flow_action); 3787 3788 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3789 { 3790 unsigned int num_acts = 0; 3791 struct tc_action *act; 3792 int i; 3793 3794 tcf_exts_for_each_action(i, act, exts) { 3795 if (is_tcf_pedit(act)) 3796 num_acts += tcf_pedit_nkeys(act); 3797 else 3798 num_acts++; 3799 } 3800 return num_acts; 3801 } 3802 EXPORT_SYMBOL(tcf_exts_num_actions); 3803 3804 static __net_init int tcf_net_init(struct net *net) 3805 { 3806 struct tcf_net *tn = net_generic(net, tcf_net_id); 3807 3808 spin_lock_init(&tn->idr_lock); 3809 idr_init(&tn->idr); 3810 return 0; 3811 } 3812 3813 static void __net_exit tcf_net_exit(struct net *net) 3814 { 3815 struct tcf_net *tn = net_generic(net, tcf_net_id); 3816 3817 idr_destroy(&tn->idr); 3818 } 3819 3820 static struct pernet_operations tcf_net_ops = { 3821 .init = tcf_net_init, 3822 .exit = tcf_net_exit, 3823 .id = &tcf_net_id, 3824 .size = sizeof(struct tcf_net), 3825 }; 3826 3827 static struct flow_indr_block_entry block_entry = { 3828 .cb = tc_indr_block_get_and_cmd, 3829 .list = LIST_HEAD_INIT(block_entry.list), 3830 }; 3831 3832 static int __init tc_filter_init(void) 3833 { 3834 int err; 3835 3836 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3837 if (!tc_filter_wq) 3838 return -ENOMEM; 3839 3840 err = register_pernet_subsys(&tcf_net_ops); 3841 if (err) 3842 goto err_register_pernet_subsys; 3843 3844 flow_indr_add_block_cb(&block_entry); 3845 3846 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3847 RTNL_FLAG_DOIT_UNLOCKED); 3848 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3849 RTNL_FLAG_DOIT_UNLOCKED); 3850 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3851 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3852 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3853 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3854 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3855 tc_dump_chain, 0); 3856 3857 return 0; 3858 3859 err_register_pernet_subsys: 3860 destroy_workqueue(tc_filter_wq); 3861 return err; 3862 } 3863 3864 subsys_initcall(tc_filter_init); 3865