1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/rhashtable.h> 24 #include <linux/jhash.h> 25 #include <linux/rculist.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/flow_offload.h> 43 44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 53 { 54 return jhash_3words(tp->chain->index, tp->prio, 55 (__force __u32)tp->protocol, 0); 56 } 57 58 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 59 struct tcf_proto *tp) 60 { 61 struct tcf_block *block = chain->block; 62 63 mutex_lock(&block->proto_destroy_lock); 64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 65 destroy_obj_hashfn(tp)); 66 mutex_unlock(&block->proto_destroy_lock); 67 } 68 69 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 70 const struct tcf_proto *tp2) 71 { 72 return tp1->chain->index == tp2->chain->index && 73 tp1->prio == tp2->prio && 74 tp1->protocol == tp2->protocol; 75 } 76 77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 78 struct tcf_proto *tp) 79 { 80 u32 hash = destroy_obj_hashfn(tp); 81 struct tcf_proto *iter; 82 bool found = false; 83 84 rcu_read_lock(); 85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 86 destroy_ht_node, hash) { 87 if (tcf_proto_cmp(tp, iter)) { 88 found = true; 89 break; 90 } 91 } 92 rcu_read_unlock(); 93 94 return found; 95 } 96 97 static void 98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 99 { 100 struct tcf_block *block = chain->block; 101 102 mutex_lock(&block->proto_destroy_lock); 103 if (hash_hashed(&tp->destroy_ht_node)) 104 hash_del_rcu(&tp->destroy_ht_node); 105 mutex_unlock(&block->proto_destroy_lock); 106 } 107 108 /* Find classifier type by string name */ 109 110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 111 { 112 const struct tcf_proto_ops *t, *res = NULL; 113 114 if (kind) { 115 read_lock(&cls_mod_lock); 116 list_for_each_entry(t, &tcf_proto_base, head) { 117 if (strcmp(kind, t->kind) == 0) { 118 if (try_module_get(t->owner)) 119 res = t; 120 break; 121 } 122 } 123 read_unlock(&cls_mod_lock); 124 } 125 return res; 126 } 127 128 static const struct tcf_proto_ops * 129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 130 struct netlink_ext_ack *extack) 131 { 132 const struct tcf_proto_ops *ops; 133 134 ops = __tcf_proto_lookup_ops(kind); 135 if (ops) 136 return ops; 137 #ifdef CONFIG_MODULES 138 if (rtnl_held) 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 if (rtnl_held) 142 rtnl_lock(); 143 ops = __tcf_proto_lookup_ops(kind); 144 /* We dropped the RTNL semaphore in order to perform 145 * the module load. So, even if we succeeded in loading 146 * the module we have to replay the request. We indicate 147 * this using -EAGAIN. 148 */ 149 if (ops) { 150 module_put(ops->owner); 151 return ERR_PTR(-EAGAIN); 152 } 153 #endif 154 NL_SET_ERR_MSG(extack, "TC classifier not found"); 155 return ERR_PTR(-ENOENT); 156 } 157 158 /* Register(unregister) new classifier type */ 159 160 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 161 { 162 struct tcf_proto_ops *t; 163 int rc = -EEXIST; 164 165 write_lock(&cls_mod_lock); 166 list_for_each_entry(t, &tcf_proto_base, head) 167 if (!strcmp(ops->kind, t->kind)) 168 goto out; 169 170 list_add_tail(&ops->head, &tcf_proto_base); 171 rc = 0; 172 out: 173 write_unlock(&cls_mod_lock); 174 return rc; 175 } 176 EXPORT_SYMBOL(register_tcf_proto_ops); 177 178 static struct workqueue_struct *tc_filter_wq; 179 180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 181 { 182 struct tcf_proto_ops *t; 183 int rc = -ENOENT; 184 185 /* Wait for outstanding call_rcu()s, if any, from a 186 * tcf_proto_ops's destroy() handler. 187 */ 188 rcu_barrier(); 189 flush_workqueue(tc_filter_wq); 190 191 write_lock(&cls_mod_lock); 192 list_for_each_entry(t, &tcf_proto_base, head) { 193 if (t == ops) { 194 list_del(&t->head); 195 rc = 0; 196 break; 197 } 198 } 199 write_unlock(&cls_mod_lock); 200 return rc; 201 } 202 EXPORT_SYMBOL(unregister_tcf_proto_ops); 203 204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 205 { 206 INIT_RCU_WORK(rwork, func); 207 return queue_rcu_work(tc_filter_wq, rwork); 208 } 209 EXPORT_SYMBOL(tcf_queue_work); 210 211 /* Select new prio value from the range, managed by kernel. */ 212 213 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 214 { 215 u32 first = TC_H_MAKE(0xC0000000U, 0U); 216 217 if (tp) 218 first = tp->prio - 1; 219 220 return TC_H_MAJ(first); 221 } 222 223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 224 { 225 if (kind) 226 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; 227 memset(name, 0, IFNAMSIZ); 228 return false; 229 } 230 231 static bool tcf_proto_is_unlocked(const char *kind) 232 { 233 const struct tcf_proto_ops *ops; 234 bool ret; 235 236 if (strlen(kind) == 0) 237 return false; 238 239 ops = tcf_proto_lookup_ops(kind, false, NULL); 240 /* On error return false to take rtnl lock. Proto lookup/create 241 * functions will perform lookup again and properly handle errors. 242 */ 243 if (IS_ERR(ops)) 244 return false; 245 246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 247 module_put(ops->owner); 248 return ret; 249 } 250 251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 252 u32 prio, struct tcf_chain *chain, 253 bool rtnl_held, 254 struct netlink_ext_ack *extack) 255 { 256 struct tcf_proto *tp; 257 int err; 258 259 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 260 if (!tp) 261 return ERR_PTR(-ENOBUFS); 262 263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 264 if (IS_ERR(tp->ops)) { 265 err = PTR_ERR(tp->ops); 266 goto errout; 267 } 268 tp->classify = tp->ops->classify; 269 tp->protocol = protocol; 270 tp->prio = prio; 271 tp->chain = chain; 272 spin_lock_init(&tp->lock); 273 refcount_set(&tp->refcnt, 1); 274 275 err = tp->ops->init(tp); 276 if (err) { 277 module_put(tp->ops->owner); 278 goto errout; 279 } 280 return tp; 281 282 errout: 283 kfree(tp); 284 return ERR_PTR(err); 285 } 286 287 static void tcf_proto_get(struct tcf_proto *tp) 288 { 289 refcount_inc(&tp->refcnt); 290 } 291 292 static void tcf_chain_put(struct tcf_chain *chain); 293 294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 295 bool sig_destroy, struct netlink_ext_ack *extack) 296 { 297 tp->ops->destroy(tp, rtnl_held, extack); 298 if (sig_destroy) 299 tcf_proto_signal_destroyed(tp->chain, tp); 300 tcf_chain_put(tp->chain); 301 module_put(tp->ops->owner); 302 kfree_rcu(tp, rcu); 303 } 304 305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 306 struct netlink_ext_ack *extack) 307 { 308 if (refcount_dec_and_test(&tp->refcnt)) 309 tcf_proto_destroy(tp, rtnl_held, true, extack); 310 } 311 312 static bool tcf_proto_check_delete(struct tcf_proto *tp) 313 { 314 if (tp->ops->delete_empty) 315 return tp->ops->delete_empty(tp); 316 317 tp->deleting = true; 318 return tp->deleting; 319 } 320 321 static void tcf_proto_mark_delete(struct tcf_proto *tp) 322 { 323 spin_lock(&tp->lock); 324 tp->deleting = true; 325 spin_unlock(&tp->lock); 326 } 327 328 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 329 { 330 bool deleting; 331 332 spin_lock(&tp->lock); 333 deleting = tp->deleting; 334 spin_unlock(&tp->lock); 335 336 return deleting; 337 } 338 339 #define ASSERT_BLOCK_LOCKED(block) \ 340 lockdep_assert_held(&(block)->lock) 341 342 struct tcf_filter_chain_list_item { 343 struct list_head list; 344 tcf_chain_head_change_t *chain_head_change; 345 void *chain_head_change_priv; 346 }; 347 348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 349 u32 chain_index) 350 { 351 struct tcf_chain *chain; 352 353 ASSERT_BLOCK_LOCKED(block); 354 355 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 356 if (!chain) 357 return NULL; 358 list_add_tail_rcu(&chain->list, &block->chain_list); 359 mutex_init(&chain->filter_chain_lock); 360 chain->block = block; 361 chain->index = chain_index; 362 chain->refcnt = 1; 363 if (!chain->index) 364 block->chain0.chain = chain; 365 return chain; 366 } 367 368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 369 struct tcf_proto *tp_head) 370 { 371 if (item->chain_head_change) 372 item->chain_head_change(tp_head, item->chain_head_change_priv); 373 } 374 375 static void tcf_chain0_head_change(struct tcf_chain *chain, 376 struct tcf_proto *tp_head) 377 { 378 struct tcf_filter_chain_list_item *item; 379 struct tcf_block *block = chain->block; 380 381 if (chain->index) 382 return; 383 384 mutex_lock(&block->lock); 385 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 386 tcf_chain_head_change_item(item, tp_head); 387 mutex_unlock(&block->lock); 388 } 389 390 /* Returns true if block can be safely freed. */ 391 392 static bool tcf_chain_detach(struct tcf_chain *chain) 393 { 394 struct tcf_block *block = chain->block; 395 396 ASSERT_BLOCK_LOCKED(block); 397 398 list_del_rcu(&chain->list); 399 if (!chain->index) 400 block->chain0.chain = NULL; 401 402 if (list_empty(&block->chain_list) && 403 refcount_read(&block->refcnt) == 0) 404 return true; 405 406 return false; 407 } 408 409 static void tcf_block_destroy(struct tcf_block *block) 410 { 411 mutex_destroy(&block->lock); 412 mutex_destroy(&block->proto_destroy_lock); 413 kfree_rcu(block, rcu); 414 } 415 416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 417 { 418 struct tcf_block *block = chain->block; 419 420 mutex_destroy(&chain->filter_chain_lock); 421 kfree_rcu(chain, rcu); 422 if (free_block) 423 tcf_block_destroy(block); 424 } 425 426 static void tcf_chain_hold(struct tcf_chain *chain) 427 { 428 ASSERT_BLOCK_LOCKED(chain->block); 429 430 ++chain->refcnt; 431 } 432 433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 434 { 435 ASSERT_BLOCK_LOCKED(chain->block); 436 437 /* In case all the references are action references, this 438 * chain should not be shown to the user. 439 */ 440 return chain->refcnt == chain->action_refcnt; 441 } 442 443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 444 u32 chain_index) 445 { 446 struct tcf_chain *chain; 447 448 ASSERT_BLOCK_LOCKED(block); 449 450 list_for_each_entry(chain, &block->chain_list, list) { 451 if (chain->index == chain_index) 452 return chain; 453 } 454 return NULL; 455 } 456 457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 459 u32 chain_index) 460 { 461 struct tcf_chain *chain; 462 463 list_for_each_entry_rcu(chain, &block->chain_list, list) { 464 if (chain->index == chain_index) 465 return chain; 466 } 467 return NULL; 468 } 469 #endif 470 471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 472 u32 seq, u16 flags, int event, bool unicast); 473 474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 475 u32 chain_index, bool create, 476 bool by_act) 477 { 478 struct tcf_chain *chain = NULL; 479 bool is_first_reference; 480 481 mutex_lock(&block->lock); 482 chain = tcf_chain_lookup(block, chain_index); 483 if (chain) { 484 tcf_chain_hold(chain); 485 } else { 486 if (!create) 487 goto errout; 488 chain = tcf_chain_create(block, chain_index); 489 if (!chain) 490 goto errout; 491 } 492 493 if (by_act) 494 ++chain->action_refcnt; 495 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 496 mutex_unlock(&block->lock); 497 498 /* Send notification only in case we got the first 499 * non-action reference. Until then, the chain acts only as 500 * a placeholder for actions pointing to it and user ought 501 * not know about them. 502 */ 503 if (is_first_reference && !by_act) 504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 505 RTM_NEWCHAIN, false); 506 507 return chain; 508 509 errout: 510 mutex_unlock(&block->lock); 511 return chain; 512 } 513 514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 515 bool create) 516 { 517 return __tcf_chain_get(block, chain_index, create, false); 518 } 519 520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 521 { 522 return __tcf_chain_get(block, chain_index, true, true); 523 } 524 EXPORT_SYMBOL(tcf_chain_get_by_act); 525 526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 527 void *tmplt_priv); 528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 529 void *tmplt_priv, u32 chain_index, 530 struct tcf_block *block, struct sk_buff *oskb, 531 u32 seq, u16 flags, bool unicast); 532 533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 534 bool explicitly_created) 535 { 536 struct tcf_block *block = chain->block; 537 const struct tcf_proto_ops *tmplt_ops; 538 bool free_block = false; 539 unsigned int refcnt; 540 void *tmplt_priv; 541 542 mutex_lock(&block->lock); 543 if (explicitly_created) { 544 if (!chain->explicitly_created) { 545 mutex_unlock(&block->lock); 546 return; 547 } 548 chain->explicitly_created = false; 549 } 550 551 if (by_act) 552 chain->action_refcnt--; 553 554 /* tc_chain_notify_delete can't be called while holding block lock. 555 * However, when block is unlocked chain can be changed concurrently, so 556 * save these to temporary variables. 557 */ 558 refcnt = --chain->refcnt; 559 tmplt_ops = chain->tmplt_ops; 560 tmplt_priv = chain->tmplt_priv; 561 562 /* The last dropped non-action reference will trigger notification. */ 563 if (refcnt - chain->action_refcnt == 0 && !by_act) { 564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 565 block, NULL, 0, 0, false); 566 /* Last reference to chain, no need to lock. */ 567 chain->flushing = false; 568 } 569 570 if (refcnt == 0) 571 free_block = tcf_chain_detach(chain); 572 mutex_unlock(&block->lock); 573 574 if (refcnt == 0) { 575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 576 tcf_chain_destroy(chain, free_block); 577 } 578 } 579 580 static void tcf_chain_put(struct tcf_chain *chain) 581 { 582 __tcf_chain_put(chain, false, false); 583 } 584 585 void tcf_chain_put_by_act(struct tcf_chain *chain) 586 { 587 __tcf_chain_put(chain, true, false); 588 } 589 EXPORT_SYMBOL(tcf_chain_put_by_act); 590 591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 592 { 593 __tcf_chain_put(chain, false, true); 594 } 595 596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 597 { 598 struct tcf_proto *tp, *tp_next; 599 600 mutex_lock(&chain->filter_chain_lock); 601 tp = tcf_chain_dereference(chain->filter_chain, chain); 602 while (tp) { 603 tp_next = rcu_dereference_protected(tp->next, 1); 604 tcf_proto_signal_destroying(chain, tp); 605 tp = tp_next; 606 } 607 tp = tcf_chain_dereference(chain->filter_chain, chain); 608 RCU_INIT_POINTER(chain->filter_chain, NULL); 609 tcf_chain0_head_change(chain, NULL); 610 chain->flushing = true; 611 mutex_unlock(&chain->filter_chain_lock); 612 613 while (tp) { 614 tp_next = rcu_dereference_protected(tp->next, 1); 615 tcf_proto_put(tp, rtnl_held, NULL); 616 tp = tp_next; 617 } 618 } 619 620 static int tcf_block_setup(struct tcf_block *block, 621 struct flow_block_offload *bo); 622 623 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, 624 flow_indr_block_bind_cb_t *cb, void *cb_priv, 625 enum flow_block_command command, bool ingress) 626 { 627 struct flow_block_offload bo = { 628 .command = command, 629 .binder_type = ingress ? 630 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : 631 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 632 .net = dev_net(dev), 633 .block_shared = tcf_block_non_null_shared(block), 634 }; 635 INIT_LIST_HEAD(&bo.cb_list); 636 637 if (!block) 638 return; 639 640 bo.block = &block->flow_block; 641 642 down_write(&block->cb_lock); 643 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); 644 645 tcf_block_setup(block, &bo); 646 up_write(&block->cb_lock); 647 } 648 649 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) 650 { 651 const struct Qdisc_class_ops *cops; 652 const struct Qdisc_ops *ops; 653 struct Qdisc *qdisc; 654 655 if (!dev_ingress_queue(dev)) 656 return NULL; 657 658 qdisc = dev_ingress_queue(dev)->qdisc_sleeping; 659 if (!qdisc) 660 return NULL; 661 662 ops = qdisc->ops; 663 if (!ops) 664 return NULL; 665 666 if (!ingress && !strcmp("ingress", ops->id)) 667 return NULL; 668 669 cops = ops->cl_ops; 670 if (!cops) 671 return NULL; 672 673 if (!cops->tcf_block) 674 return NULL; 675 676 return cops->tcf_block(qdisc, 677 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, 678 NULL); 679 } 680 681 static void tc_indr_block_get_and_cmd(struct net_device *dev, 682 flow_indr_block_bind_cb_t *cb, 683 void *cb_priv, 684 enum flow_block_command command) 685 { 686 struct tcf_block *block; 687 688 block = tc_dev_block(dev, true); 689 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); 690 691 block = tc_dev_block(dev, false); 692 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); 693 } 694 695 static void tc_indr_block_call(struct tcf_block *block, 696 struct net_device *dev, 697 struct tcf_block_ext_info *ei, 698 enum flow_block_command command, 699 struct netlink_ext_ack *extack) 700 { 701 struct flow_block_offload bo = { 702 .command = command, 703 .binder_type = ei->binder_type, 704 .net = dev_net(dev), 705 .block = &block->flow_block, 706 .block_shared = tcf_block_shared(block), 707 .extack = extack, 708 }; 709 INIT_LIST_HEAD(&bo.cb_list); 710 711 flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK); 712 tcf_block_setup(block, &bo); 713 } 714 715 static bool tcf_block_offload_in_use(struct tcf_block *block) 716 { 717 return atomic_read(&block->offloadcnt); 718 } 719 720 static int tcf_block_offload_cmd(struct tcf_block *block, 721 struct net_device *dev, 722 struct tcf_block_ext_info *ei, 723 enum flow_block_command command, 724 struct netlink_ext_ack *extack) 725 { 726 struct flow_block_offload bo = {}; 727 int err; 728 729 bo.net = dev_net(dev); 730 bo.command = command; 731 bo.binder_type = ei->binder_type; 732 bo.block = &block->flow_block; 733 bo.block_shared = tcf_block_shared(block); 734 bo.extack = extack; 735 INIT_LIST_HEAD(&bo.cb_list); 736 737 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 738 if (err < 0) { 739 if (err != -EOPNOTSUPP) 740 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 741 return err; 742 } 743 744 return tcf_block_setup(block, &bo); 745 } 746 747 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 748 struct tcf_block_ext_info *ei, 749 struct netlink_ext_ack *extack) 750 { 751 struct net_device *dev = q->dev_queue->dev; 752 int err; 753 754 down_write(&block->cb_lock); 755 if (!dev->netdev_ops->ndo_setup_tc) 756 goto no_offload_dev_inc; 757 758 /* If tc offload feature is disabled and the block we try to bind 759 * to already has some offloaded filters, forbid to bind. 760 */ 761 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { 762 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 763 err = -EOPNOTSUPP; 764 goto err_unlock; 765 } 766 767 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 768 if (err == -EOPNOTSUPP) 769 goto no_offload_dev_inc; 770 if (err) 771 goto err_unlock; 772 773 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 774 up_write(&block->cb_lock); 775 return 0; 776 777 no_offload_dev_inc: 778 if (tcf_block_offload_in_use(block)) { 779 err = -EOPNOTSUPP; 780 goto err_unlock; 781 } 782 err = 0; 783 block->nooffloaddevcnt++; 784 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 785 err_unlock: 786 up_write(&block->cb_lock); 787 return err; 788 } 789 790 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 791 struct tcf_block_ext_info *ei) 792 { 793 struct net_device *dev = q->dev_queue->dev; 794 int err; 795 796 down_write(&block->cb_lock); 797 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 798 799 if (!dev->netdev_ops->ndo_setup_tc) 800 goto no_offload_dev_dec; 801 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 802 if (err == -EOPNOTSUPP) 803 goto no_offload_dev_dec; 804 up_write(&block->cb_lock); 805 return; 806 807 no_offload_dev_dec: 808 WARN_ON(block->nooffloaddevcnt-- == 0); 809 up_write(&block->cb_lock); 810 } 811 812 static int 813 tcf_chain0_head_change_cb_add(struct tcf_block *block, 814 struct tcf_block_ext_info *ei, 815 struct netlink_ext_ack *extack) 816 { 817 struct tcf_filter_chain_list_item *item; 818 struct tcf_chain *chain0; 819 820 item = kmalloc(sizeof(*item), GFP_KERNEL); 821 if (!item) { 822 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 823 return -ENOMEM; 824 } 825 item->chain_head_change = ei->chain_head_change; 826 item->chain_head_change_priv = ei->chain_head_change_priv; 827 828 mutex_lock(&block->lock); 829 chain0 = block->chain0.chain; 830 if (chain0) 831 tcf_chain_hold(chain0); 832 else 833 list_add(&item->list, &block->chain0.filter_chain_list); 834 mutex_unlock(&block->lock); 835 836 if (chain0) { 837 struct tcf_proto *tp_head; 838 839 mutex_lock(&chain0->filter_chain_lock); 840 841 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 842 if (tp_head) 843 tcf_chain_head_change_item(item, tp_head); 844 845 mutex_lock(&block->lock); 846 list_add(&item->list, &block->chain0.filter_chain_list); 847 mutex_unlock(&block->lock); 848 849 mutex_unlock(&chain0->filter_chain_lock); 850 tcf_chain_put(chain0); 851 } 852 853 return 0; 854 } 855 856 static void 857 tcf_chain0_head_change_cb_del(struct tcf_block *block, 858 struct tcf_block_ext_info *ei) 859 { 860 struct tcf_filter_chain_list_item *item; 861 862 mutex_lock(&block->lock); 863 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 864 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 865 (item->chain_head_change == ei->chain_head_change && 866 item->chain_head_change_priv == ei->chain_head_change_priv)) { 867 if (block->chain0.chain) 868 tcf_chain_head_change_item(item, NULL); 869 list_del(&item->list); 870 mutex_unlock(&block->lock); 871 872 kfree(item); 873 return; 874 } 875 } 876 mutex_unlock(&block->lock); 877 WARN_ON(1); 878 } 879 880 struct tcf_net { 881 spinlock_t idr_lock; /* Protects idr */ 882 struct idr idr; 883 }; 884 885 static unsigned int tcf_net_id; 886 887 static int tcf_block_insert(struct tcf_block *block, struct net *net, 888 struct netlink_ext_ack *extack) 889 { 890 struct tcf_net *tn = net_generic(net, tcf_net_id); 891 int err; 892 893 idr_preload(GFP_KERNEL); 894 spin_lock(&tn->idr_lock); 895 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 896 GFP_NOWAIT); 897 spin_unlock(&tn->idr_lock); 898 idr_preload_end(); 899 900 return err; 901 } 902 903 static void tcf_block_remove(struct tcf_block *block, struct net *net) 904 { 905 struct tcf_net *tn = net_generic(net, tcf_net_id); 906 907 spin_lock(&tn->idr_lock); 908 idr_remove(&tn->idr, block->index); 909 spin_unlock(&tn->idr_lock); 910 } 911 912 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 913 u32 block_index, 914 struct netlink_ext_ack *extack) 915 { 916 struct tcf_block *block; 917 918 block = kzalloc(sizeof(*block), GFP_KERNEL); 919 if (!block) { 920 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 921 return ERR_PTR(-ENOMEM); 922 } 923 mutex_init(&block->lock); 924 mutex_init(&block->proto_destroy_lock); 925 init_rwsem(&block->cb_lock); 926 flow_block_init(&block->flow_block); 927 INIT_LIST_HEAD(&block->chain_list); 928 INIT_LIST_HEAD(&block->owner_list); 929 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 930 931 refcount_set(&block->refcnt, 1); 932 block->net = net; 933 block->index = block_index; 934 935 /* Don't store q pointer for blocks which are shared */ 936 if (!tcf_block_shared(block)) 937 block->q = q; 938 return block; 939 } 940 941 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 942 { 943 struct tcf_net *tn = net_generic(net, tcf_net_id); 944 945 return idr_find(&tn->idr, block_index); 946 } 947 948 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 949 { 950 struct tcf_block *block; 951 952 rcu_read_lock(); 953 block = tcf_block_lookup(net, block_index); 954 if (block && !refcount_inc_not_zero(&block->refcnt)) 955 block = NULL; 956 rcu_read_unlock(); 957 958 return block; 959 } 960 961 static struct tcf_chain * 962 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 963 { 964 mutex_lock(&block->lock); 965 if (chain) 966 chain = list_is_last(&chain->list, &block->chain_list) ? 967 NULL : list_next_entry(chain, list); 968 else 969 chain = list_first_entry_or_null(&block->chain_list, 970 struct tcf_chain, list); 971 972 /* skip all action-only chains */ 973 while (chain && tcf_chain_held_by_acts_only(chain)) 974 chain = list_is_last(&chain->list, &block->chain_list) ? 975 NULL : list_next_entry(chain, list); 976 977 if (chain) 978 tcf_chain_hold(chain); 979 mutex_unlock(&block->lock); 980 981 return chain; 982 } 983 984 /* Function to be used by all clients that want to iterate over all chains on 985 * block. It properly obtains block->lock and takes reference to chain before 986 * returning it. Users of this function must be tolerant to concurrent chain 987 * insertion/deletion or ensure that no concurrent chain modification is 988 * possible. Note that all netlink dump callbacks cannot guarantee to provide 989 * consistent dump because rtnl lock is released each time skb is filled with 990 * data and sent to user-space. 991 */ 992 993 struct tcf_chain * 994 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 995 { 996 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 997 998 if (chain) 999 tcf_chain_put(chain); 1000 1001 return chain_next; 1002 } 1003 EXPORT_SYMBOL(tcf_get_next_chain); 1004 1005 static struct tcf_proto * 1006 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1007 { 1008 u32 prio = 0; 1009 1010 ASSERT_RTNL(); 1011 mutex_lock(&chain->filter_chain_lock); 1012 1013 if (!tp) { 1014 tp = tcf_chain_dereference(chain->filter_chain, chain); 1015 } else if (tcf_proto_is_deleting(tp)) { 1016 /* 'deleting' flag is set and chain->filter_chain_lock was 1017 * unlocked, which means next pointer could be invalid. Restart 1018 * search. 1019 */ 1020 prio = tp->prio + 1; 1021 tp = tcf_chain_dereference(chain->filter_chain, chain); 1022 1023 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1024 if (!tp->deleting && tp->prio >= prio) 1025 break; 1026 } else { 1027 tp = tcf_chain_dereference(tp->next, chain); 1028 } 1029 1030 if (tp) 1031 tcf_proto_get(tp); 1032 1033 mutex_unlock(&chain->filter_chain_lock); 1034 1035 return tp; 1036 } 1037 1038 /* Function to be used by all clients that want to iterate over all tp's on 1039 * chain. Users of this function must be tolerant to concurrent tp 1040 * insertion/deletion or ensure that no concurrent chain modification is 1041 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1042 * consistent dump because rtnl lock is released each time skb is filled with 1043 * data and sent to user-space. 1044 */ 1045 1046 struct tcf_proto * 1047 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, 1048 bool rtnl_held) 1049 { 1050 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1051 1052 if (tp) 1053 tcf_proto_put(tp, rtnl_held, NULL); 1054 1055 return tp_next; 1056 } 1057 EXPORT_SYMBOL(tcf_get_next_proto); 1058 1059 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1060 { 1061 struct tcf_chain *chain; 1062 1063 /* Last reference to block. At this point chains cannot be added or 1064 * removed concurrently. 1065 */ 1066 for (chain = tcf_get_next_chain(block, NULL); 1067 chain; 1068 chain = tcf_get_next_chain(block, chain)) { 1069 tcf_chain_put_explicitly_created(chain); 1070 tcf_chain_flush(chain, rtnl_held); 1071 } 1072 } 1073 1074 /* Lookup Qdisc and increments its reference counter. 1075 * Set parent, if necessary. 1076 */ 1077 1078 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1079 u32 *parent, int ifindex, bool rtnl_held, 1080 struct netlink_ext_ack *extack) 1081 { 1082 const struct Qdisc_class_ops *cops; 1083 struct net_device *dev; 1084 int err = 0; 1085 1086 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1087 return 0; 1088 1089 rcu_read_lock(); 1090 1091 /* Find link */ 1092 dev = dev_get_by_index_rcu(net, ifindex); 1093 if (!dev) { 1094 rcu_read_unlock(); 1095 return -ENODEV; 1096 } 1097 1098 /* Find qdisc */ 1099 if (!*parent) { 1100 *q = dev->qdisc; 1101 *parent = (*q)->handle; 1102 } else { 1103 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1104 if (!*q) { 1105 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1106 err = -EINVAL; 1107 goto errout_rcu; 1108 } 1109 } 1110 1111 *q = qdisc_refcount_inc_nz(*q); 1112 if (!*q) { 1113 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1114 err = -EINVAL; 1115 goto errout_rcu; 1116 } 1117 1118 /* Is it classful? */ 1119 cops = (*q)->ops->cl_ops; 1120 if (!cops) { 1121 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1122 err = -EINVAL; 1123 goto errout_qdisc; 1124 } 1125 1126 if (!cops->tcf_block) { 1127 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1128 err = -EOPNOTSUPP; 1129 goto errout_qdisc; 1130 } 1131 1132 errout_rcu: 1133 /* At this point we know that qdisc is not noop_qdisc, 1134 * which means that qdisc holds a reference to net_device 1135 * and we hold a reference to qdisc, so it is safe to release 1136 * rcu read lock. 1137 */ 1138 rcu_read_unlock(); 1139 return err; 1140 1141 errout_qdisc: 1142 rcu_read_unlock(); 1143 1144 if (rtnl_held) 1145 qdisc_put(*q); 1146 else 1147 qdisc_put_unlocked(*q); 1148 *q = NULL; 1149 1150 return err; 1151 } 1152 1153 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1154 int ifindex, struct netlink_ext_ack *extack) 1155 { 1156 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1157 return 0; 1158 1159 /* Do we search for filter, attached to class? */ 1160 if (TC_H_MIN(parent)) { 1161 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1162 1163 *cl = cops->find(q, parent); 1164 if (*cl == 0) { 1165 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1166 return -ENOENT; 1167 } 1168 } 1169 1170 return 0; 1171 } 1172 1173 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1174 unsigned long cl, int ifindex, 1175 u32 block_index, 1176 struct netlink_ext_ack *extack) 1177 { 1178 struct tcf_block *block; 1179 1180 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1181 block = tcf_block_refcnt_get(net, block_index); 1182 if (!block) { 1183 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1184 return ERR_PTR(-EINVAL); 1185 } 1186 } else { 1187 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1188 1189 block = cops->tcf_block(q, cl, extack); 1190 if (!block) 1191 return ERR_PTR(-EINVAL); 1192 1193 if (tcf_block_shared(block)) { 1194 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1195 return ERR_PTR(-EOPNOTSUPP); 1196 } 1197 1198 /* Always take reference to block in order to support execution 1199 * of rules update path of cls API without rtnl lock. Caller 1200 * must release block when it is finished using it. 'if' block 1201 * of this conditional obtain reference to block by calling 1202 * tcf_block_refcnt_get(). 1203 */ 1204 refcount_inc(&block->refcnt); 1205 } 1206 1207 return block; 1208 } 1209 1210 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1211 struct tcf_block_ext_info *ei, bool rtnl_held) 1212 { 1213 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1214 /* Flushing/putting all chains will cause the block to be 1215 * deallocated when last chain is freed. However, if chain_list 1216 * is empty, block has to be manually deallocated. After block 1217 * reference counter reached 0, it is no longer possible to 1218 * increment it or add new chains to block. 1219 */ 1220 bool free_block = list_empty(&block->chain_list); 1221 1222 mutex_unlock(&block->lock); 1223 if (tcf_block_shared(block)) 1224 tcf_block_remove(block, block->net); 1225 1226 if (q) 1227 tcf_block_offload_unbind(block, q, ei); 1228 1229 if (free_block) 1230 tcf_block_destroy(block); 1231 else 1232 tcf_block_flush_all_chains(block, rtnl_held); 1233 } else if (q) { 1234 tcf_block_offload_unbind(block, q, ei); 1235 } 1236 } 1237 1238 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1239 { 1240 __tcf_block_put(block, NULL, NULL, rtnl_held); 1241 } 1242 1243 /* Find tcf block. 1244 * Set q, parent, cl when appropriate. 1245 */ 1246 1247 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1248 u32 *parent, unsigned long *cl, 1249 int ifindex, u32 block_index, 1250 struct netlink_ext_ack *extack) 1251 { 1252 struct tcf_block *block; 1253 int err = 0; 1254 1255 ASSERT_RTNL(); 1256 1257 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1258 if (err) 1259 goto errout; 1260 1261 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1262 if (err) 1263 goto errout_qdisc; 1264 1265 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1266 if (IS_ERR(block)) { 1267 err = PTR_ERR(block); 1268 goto errout_qdisc; 1269 } 1270 1271 return block; 1272 1273 errout_qdisc: 1274 if (*q) 1275 qdisc_put(*q); 1276 errout: 1277 *q = NULL; 1278 return ERR_PTR(err); 1279 } 1280 1281 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1282 bool rtnl_held) 1283 { 1284 if (!IS_ERR_OR_NULL(block)) 1285 tcf_block_refcnt_put(block, rtnl_held); 1286 1287 if (q) { 1288 if (rtnl_held) 1289 qdisc_put(q); 1290 else 1291 qdisc_put_unlocked(q); 1292 } 1293 } 1294 1295 struct tcf_block_owner_item { 1296 struct list_head list; 1297 struct Qdisc *q; 1298 enum flow_block_binder_type binder_type; 1299 }; 1300 1301 static void 1302 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1303 struct Qdisc *q, 1304 enum flow_block_binder_type binder_type) 1305 { 1306 if (block->keep_dst && 1307 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1308 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1309 netif_keep_dst(qdisc_dev(q)); 1310 } 1311 1312 void tcf_block_netif_keep_dst(struct tcf_block *block) 1313 { 1314 struct tcf_block_owner_item *item; 1315 1316 block->keep_dst = true; 1317 list_for_each_entry(item, &block->owner_list, list) 1318 tcf_block_owner_netif_keep_dst(block, item->q, 1319 item->binder_type); 1320 } 1321 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1322 1323 static int tcf_block_owner_add(struct tcf_block *block, 1324 struct Qdisc *q, 1325 enum flow_block_binder_type binder_type) 1326 { 1327 struct tcf_block_owner_item *item; 1328 1329 item = kmalloc(sizeof(*item), GFP_KERNEL); 1330 if (!item) 1331 return -ENOMEM; 1332 item->q = q; 1333 item->binder_type = binder_type; 1334 list_add(&item->list, &block->owner_list); 1335 return 0; 1336 } 1337 1338 static void tcf_block_owner_del(struct tcf_block *block, 1339 struct Qdisc *q, 1340 enum flow_block_binder_type binder_type) 1341 { 1342 struct tcf_block_owner_item *item; 1343 1344 list_for_each_entry(item, &block->owner_list, list) { 1345 if (item->q == q && item->binder_type == binder_type) { 1346 list_del(&item->list); 1347 kfree(item); 1348 return; 1349 } 1350 } 1351 WARN_ON(1); 1352 } 1353 1354 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1355 struct tcf_block_ext_info *ei, 1356 struct netlink_ext_ack *extack) 1357 { 1358 struct net *net = qdisc_net(q); 1359 struct tcf_block *block = NULL; 1360 int err; 1361 1362 if (ei->block_index) 1363 /* block_index not 0 means the shared block is requested */ 1364 block = tcf_block_refcnt_get(net, ei->block_index); 1365 1366 if (!block) { 1367 block = tcf_block_create(net, q, ei->block_index, extack); 1368 if (IS_ERR(block)) 1369 return PTR_ERR(block); 1370 if (tcf_block_shared(block)) { 1371 err = tcf_block_insert(block, net, extack); 1372 if (err) 1373 goto err_block_insert; 1374 } 1375 } 1376 1377 err = tcf_block_owner_add(block, q, ei->binder_type); 1378 if (err) 1379 goto err_block_owner_add; 1380 1381 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1382 1383 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1384 if (err) 1385 goto err_chain0_head_change_cb_add; 1386 1387 err = tcf_block_offload_bind(block, q, ei, extack); 1388 if (err) 1389 goto err_block_offload_bind; 1390 1391 *p_block = block; 1392 return 0; 1393 1394 err_block_offload_bind: 1395 tcf_chain0_head_change_cb_del(block, ei); 1396 err_chain0_head_change_cb_add: 1397 tcf_block_owner_del(block, q, ei->binder_type); 1398 err_block_owner_add: 1399 err_block_insert: 1400 tcf_block_refcnt_put(block, true); 1401 return err; 1402 } 1403 EXPORT_SYMBOL(tcf_block_get_ext); 1404 1405 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1406 { 1407 struct tcf_proto __rcu **p_filter_chain = priv; 1408 1409 rcu_assign_pointer(*p_filter_chain, tp_head); 1410 } 1411 1412 int tcf_block_get(struct tcf_block **p_block, 1413 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1414 struct netlink_ext_ack *extack) 1415 { 1416 struct tcf_block_ext_info ei = { 1417 .chain_head_change = tcf_chain_head_change_dflt, 1418 .chain_head_change_priv = p_filter_chain, 1419 }; 1420 1421 WARN_ON(!p_filter_chain); 1422 return tcf_block_get_ext(p_block, q, &ei, extack); 1423 } 1424 EXPORT_SYMBOL(tcf_block_get); 1425 1426 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1427 * actions should be all removed after flushing. 1428 */ 1429 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1430 struct tcf_block_ext_info *ei) 1431 { 1432 if (!block) 1433 return; 1434 tcf_chain0_head_change_cb_del(block, ei); 1435 tcf_block_owner_del(block, q, ei->binder_type); 1436 1437 __tcf_block_put(block, q, ei, true); 1438 } 1439 EXPORT_SYMBOL(tcf_block_put_ext); 1440 1441 void tcf_block_put(struct tcf_block *block) 1442 { 1443 struct tcf_block_ext_info ei = {0, }; 1444 1445 if (!block) 1446 return; 1447 tcf_block_put_ext(block, block->q, &ei); 1448 } 1449 1450 EXPORT_SYMBOL(tcf_block_put); 1451 1452 static int 1453 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1454 void *cb_priv, bool add, bool offload_in_use, 1455 struct netlink_ext_ack *extack) 1456 { 1457 struct tcf_chain *chain, *chain_prev; 1458 struct tcf_proto *tp, *tp_prev; 1459 int err; 1460 1461 lockdep_assert_held(&block->cb_lock); 1462 1463 for (chain = __tcf_get_next_chain(block, NULL); 1464 chain; 1465 chain_prev = chain, 1466 chain = __tcf_get_next_chain(block, chain), 1467 tcf_chain_put(chain_prev)) { 1468 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1469 tp_prev = tp, 1470 tp = __tcf_get_next_proto(chain, tp), 1471 tcf_proto_put(tp_prev, true, NULL)) { 1472 if (tp->ops->reoffload) { 1473 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1474 extack); 1475 if (err && add) 1476 goto err_playback_remove; 1477 } else if (add && offload_in_use) { 1478 err = -EOPNOTSUPP; 1479 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1480 goto err_playback_remove; 1481 } 1482 } 1483 } 1484 1485 return 0; 1486 1487 err_playback_remove: 1488 tcf_proto_put(tp, true, NULL); 1489 tcf_chain_put(chain); 1490 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1491 extack); 1492 return err; 1493 } 1494 1495 static int tcf_block_bind(struct tcf_block *block, 1496 struct flow_block_offload *bo) 1497 { 1498 struct flow_block_cb *block_cb, *next; 1499 int err, i = 0; 1500 1501 lockdep_assert_held(&block->cb_lock); 1502 1503 list_for_each_entry(block_cb, &bo->cb_list, list) { 1504 err = tcf_block_playback_offloads(block, block_cb->cb, 1505 block_cb->cb_priv, true, 1506 tcf_block_offload_in_use(block), 1507 bo->extack); 1508 if (err) 1509 goto err_unroll; 1510 if (!bo->unlocked_driver_cb) 1511 block->lockeddevcnt++; 1512 1513 i++; 1514 } 1515 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1516 1517 return 0; 1518 1519 err_unroll: 1520 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1521 if (i-- > 0) { 1522 list_del(&block_cb->list); 1523 tcf_block_playback_offloads(block, block_cb->cb, 1524 block_cb->cb_priv, false, 1525 tcf_block_offload_in_use(block), 1526 NULL); 1527 if (!bo->unlocked_driver_cb) 1528 block->lockeddevcnt--; 1529 } 1530 flow_block_cb_free(block_cb); 1531 } 1532 1533 return err; 1534 } 1535 1536 static void tcf_block_unbind(struct tcf_block *block, 1537 struct flow_block_offload *bo) 1538 { 1539 struct flow_block_cb *block_cb, *next; 1540 1541 lockdep_assert_held(&block->cb_lock); 1542 1543 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1544 tcf_block_playback_offloads(block, block_cb->cb, 1545 block_cb->cb_priv, false, 1546 tcf_block_offload_in_use(block), 1547 NULL); 1548 list_del(&block_cb->list); 1549 flow_block_cb_free(block_cb); 1550 if (!bo->unlocked_driver_cb) 1551 block->lockeddevcnt--; 1552 } 1553 } 1554 1555 static int tcf_block_setup(struct tcf_block *block, 1556 struct flow_block_offload *bo) 1557 { 1558 int err; 1559 1560 switch (bo->command) { 1561 case FLOW_BLOCK_BIND: 1562 err = tcf_block_bind(block, bo); 1563 break; 1564 case FLOW_BLOCK_UNBIND: 1565 err = 0; 1566 tcf_block_unbind(block, bo); 1567 break; 1568 default: 1569 WARN_ON_ONCE(1); 1570 err = -EOPNOTSUPP; 1571 } 1572 1573 return err; 1574 } 1575 1576 /* Main classifier routine: scans classifier chain attached 1577 * to this qdisc, (optionally) tests for protocol and asks 1578 * specific classifiers. 1579 */ 1580 static inline int __tcf_classify(struct sk_buff *skb, 1581 const struct tcf_proto *tp, 1582 const struct tcf_proto *orig_tp, 1583 struct tcf_result *res, 1584 bool compat_mode, 1585 u32 *last_executed_chain) 1586 { 1587 #ifdef CONFIG_NET_CLS_ACT 1588 const int max_reclassify_loop = 4; 1589 const struct tcf_proto *first_tp; 1590 int limit = 0; 1591 1592 reclassify: 1593 #endif 1594 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1595 __be16 protocol = tc_skb_protocol(skb); 1596 int err; 1597 1598 if (tp->protocol != protocol && 1599 tp->protocol != htons(ETH_P_ALL)) 1600 continue; 1601 1602 err = tp->classify(skb, tp, res); 1603 #ifdef CONFIG_NET_CLS_ACT 1604 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1605 first_tp = orig_tp; 1606 *last_executed_chain = first_tp->chain->index; 1607 goto reset; 1608 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1609 first_tp = res->goto_tp; 1610 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1611 goto reset; 1612 } 1613 #endif 1614 if (err >= 0) 1615 return err; 1616 } 1617 1618 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1619 #ifdef CONFIG_NET_CLS_ACT 1620 reset: 1621 if (unlikely(limit++ >= max_reclassify_loop)) { 1622 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1623 tp->chain->block->index, 1624 tp->prio & 0xffff, 1625 ntohs(tp->protocol)); 1626 return TC_ACT_SHOT; 1627 } 1628 1629 tp = first_tp; 1630 goto reclassify; 1631 #endif 1632 } 1633 1634 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1635 struct tcf_result *res, bool compat_mode) 1636 { 1637 u32 last_executed_chain = 0; 1638 1639 return __tcf_classify(skb, tp, tp, res, compat_mode, 1640 &last_executed_chain); 1641 } 1642 EXPORT_SYMBOL(tcf_classify); 1643 1644 int tcf_classify_ingress(struct sk_buff *skb, 1645 const struct tcf_block *ingress_block, 1646 const struct tcf_proto *tp, 1647 struct tcf_result *res, bool compat_mode) 1648 { 1649 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1650 u32 last_executed_chain = 0; 1651 1652 return __tcf_classify(skb, tp, tp, res, compat_mode, 1653 &last_executed_chain); 1654 #else 1655 u32 last_executed_chain = tp ? tp->chain->index : 0; 1656 const struct tcf_proto *orig_tp = tp; 1657 struct tc_skb_ext *ext; 1658 int ret; 1659 1660 ext = skb_ext_find(skb, TC_SKB_EXT); 1661 1662 if (ext && ext->chain) { 1663 struct tcf_chain *fchain; 1664 1665 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); 1666 if (!fchain) 1667 return TC_ACT_SHOT; 1668 1669 /* Consume, so cloned/redirect skbs won't inherit ext */ 1670 skb_ext_del(skb, TC_SKB_EXT); 1671 1672 tp = rcu_dereference_bh(fchain->filter_chain); 1673 last_executed_chain = fchain->index; 1674 } 1675 1676 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1677 &last_executed_chain); 1678 1679 /* If we missed on some chain */ 1680 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1681 ext = skb_ext_add(skb, TC_SKB_EXT); 1682 if (WARN_ON_ONCE(!ext)) 1683 return TC_ACT_SHOT; 1684 ext->chain = last_executed_chain; 1685 } 1686 1687 return ret; 1688 #endif 1689 } 1690 EXPORT_SYMBOL(tcf_classify_ingress); 1691 1692 struct tcf_chain_info { 1693 struct tcf_proto __rcu **pprev; 1694 struct tcf_proto __rcu *next; 1695 }; 1696 1697 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1698 struct tcf_chain_info *chain_info) 1699 { 1700 return tcf_chain_dereference(*chain_info->pprev, chain); 1701 } 1702 1703 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1704 struct tcf_chain_info *chain_info, 1705 struct tcf_proto *tp) 1706 { 1707 if (chain->flushing) 1708 return -EAGAIN; 1709 1710 if (*chain_info->pprev == chain->filter_chain) 1711 tcf_chain0_head_change(chain, tp); 1712 tcf_proto_get(tp); 1713 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1714 rcu_assign_pointer(*chain_info->pprev, tp); 1715 1716 return 0; 1717 } 1718 1719 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1720 struct tcf_chain_info *chain_info, 1721 struct tcf_proto *tp) 1722 { 1723 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1724 1725 tcf_proto_mark_delete(tp); 1726 if (tp == chain->filter_chain) 1727 tcf_chain0_head_change(chain, next); 1728 RCU_INIT_POINTER(*chain_info->pprev, next); 1729 } 1730 1731 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1732 struct tcf_chain_info *chain_info, 1733 u32 protocol, u32 prio, 1734 bool prio_allocate); 1735 1736 /* Try to insert new proto. 1737 * If proto with specified priority already exists, free new proto 1738 * and return existing one. 1739 */ 1740 1741 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1742 struct tcf_proto *tp_new, 1743 u32 protocol, u32 prio, 1744 bool rtnl_held) 1745 { 1746 struct tcf_chain_info chain_info; 1747 struct tcf_proto *tp; 1748 int err = 0; 1749 1750 mutex_lock(&chain->filter_chain_lock); 1751 1752 if (tcf_proto_exists_destroying(chain, tp_new)) { 1753 mutex_unlock(&chain->filter_chain_lock); 1754 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1755 return ERR_PTR(-EAGAIN); 1756 } 1757 1758 tp = tcf_chain_tp_find(chain, &chain_info, 1759 protocol, prio, false); 1760 if (!tp) 1761 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1762 mutex_unlock(&chain->filter_chain_lock); 1763 1764 if (tp) { 1765 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1766 tp_new = tp; 1767 } else if (err) { 1768 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1769 tp_new = ERR_PTR(err); 1770 } 1771 1772 return tp_new; 1773 } 1774 1775 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1776 struct tcf_proto *tp, bool rtnl_held, 1777 struct netlink_ext_ack *extack) 1778 { 1779 struct tcf_chain_info chain_info; 1780 struct tcf_proto *tp_iter; 1781 struct tcf_proto **pprev; 1782 struct tcf_proto *next; 1783 1784 mutex_lock(&chain->filter_chain_lock); 1785 1786 /* Atomically find and remove tp from chain. */ 1787 for (pprev = &chain->filter_chain; 1788 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1789 pprev = &tp_iter->next) { 1790 if (tp_iter == tp) { 1791 chain_info.pprev = pprev; 1792 chain_info.next = tp_iter->next; 1793 WARN_ON(tp_iter->deleting); 1794 break; 1795 } 1796 } 1797 /* Verify that tp still exists and no new filters were inserted 1798 * concurrently. 1799 * Mark tp for deletion if it is empty. 1800 */ 1801 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1802 mutex_unlock(&chain->filter_chain_lock); 1803 return; 1804 } 1805 1806 tcf_proto_signal_destroying(chain, tp); 1807 next = tcf_chain_dereference(chain_info.next, chain); 1808 if (tp == chain->filter_chain) 1809 tcf_chain0_head_change(chain, next); 1810 RCU_INIT_POINTER(*chain_info.pprev, next); 1811 mutex_unlock(&chain->filter_chain_lock); 1812 1813 tcf_proto_put(tp, rtnl_held, extack); 1814 } 1815 1816 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1817 struct tcf_chain_info *chain_info, 1818 u32 protocol, u32 prio, 1819 bool prio_allocate) 1820 { 1821 struct tcf_proto **pprev; 1822 struct tcf_proto *tp; 1823 1824 /* Check the chain for existence of proto-tcf with this priority */ 1825 for (pprev = &chain->filter_chain; 1826 (tp = tcf_chain_dereference(*pprev, chain)); 1827 pprev = &tp->next) { 1828 if (tp->prio >= prio) { 1829 if (tp->prio == prio) { 1830 if (prio_allocate || 1831 (tp->protocol != protocol && protocol)) 1832 return ERR_PTR(-EINVAL); 1833 } else { 1834 tp = NULL; 1835 } 1836 break; 1837 } 1838 } 1839 chain_info->pprev = pprev; 1840 if (tp) { 1841 chain_info->next = tp->next; 1842 tcf_proto_get(tp); 1843 } else { 1844 chain_info->next = NULL; 1845 } 1846 return tp; 1847 } 1848 1849 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1850 struct tcf_proto *tp, struct tcf_block *block, 1851 struct Qdisc *q, u32 parent, void *fh, 1852 u32 portid, u32 seq, u16 flags, int event, 1853 bool rtnl_held) 1854 { 1855 struct tcmsg *tcm; 1856 struct nlmsghdr *nlh; 1857 unsigned char *b = skb_tail_pointer(skb); 1858 1859 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1860 if (!nlh) 1861 goto out_nlmsg_trim; 1862 tcm = nlmsg_data(nlh); 1863 tcm->tcm_family = AF_UNSPEC; 1864 tcm->tcm__pad1 = 0; 1865 tcm->tcm__pad2 = 0; 1866 if (q) { 1867 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1868 tcm->tcm_parent = parent; 1869 } else { 1870 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1871 tcm->tcm_block_index = block->index; 1872 } 1873 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1874 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1875 goto nla_put_failure; 1876 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1877 goto nla_put_failure; 1878 if (!fh) { 1879 tcm->tcm_handle = 0; 1880 } else { 1881 if (tp->ops->dump && 1882 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1883 goto nla_put_failure; 1884 } 1885 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1886 return skb->len; 1887 1888 out_nlmsg_trim: 1889 nla_put_failure: 1890 nlmsg_trim(skb, b); 1891 return -1; 1892 } 1893 1894 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1895 struct nlmsghdr *n, struct tcf_proto *tp, 1896 struct tcf_block *block, struct Qdisc *q, 1897 u32 parent, void *fh, int event, bool unicast, 1898 bool rtnl_held) 1899 { 1900 struct sk_buff *skb; 1901 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1902 int err = 0; 1903 1904 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1905 if (!skb) 1906 return -ENOBUFS; 1907 1908 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1909 n->nlmsg_seq, n->nlmsg_flags, event, 1910 rtnl_held) <= 0) { 1911 kfree_skb(skb); 1912 return -EINVAL; 1913 } 1914 1915 if (unicast) 1916 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1917 else 1918 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1919 n->nlmsg_flags & NLM_F_ECHO); 1920 1921 if (err > 0) 1922 err = 0; 1923 return err; 1924 } 1925 1926 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1927 struct nlmsghdr *n, struct tcf_proto *tp, 1928 struct tcf_block *block, struct Qdisc *q, 1929 u32 parent, void *fh, bool unicast, bool *last, 1930 bool rtnl_held, struct netlink_ext_ack *extack) 1931 { 1932 struct sk_buff *skb; 1933 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1934 int err; 1935 1936 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1937 if (!skb) 1938 return -ENOBUFS; 1939 1940 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1941 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1942 rtnl_held) <= 0) { 1943 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1944 kfree_skb(skb); 1945 return -EINVAL; 1946 } 1947 1948 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1949 if (err) { 1950 kfree_skb(skb); 1951 return err; 1952 } 1953 1954 if (unicast) 1955 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1956 else 1957 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1958 n->nlmsg_flags & NLM_F_ECHO); 1959 if (err < 0) 1960 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1961 1962 if (err > 0) 1963 err = 0; 1964 return err; 1965 } 1966 1967 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1968 struct tcf_block *block, struct Qdisc *q, 1969 u32 parent, struct nlmsghdr *n, 1970 struct tcf_chain *chain, int event, 1971 bool rtnl_held) 1972 { 1973 struct tcf_proto *tp; 1974 1975 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); 1976 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) 1977 tfilter_notify(net, oskb, n, tp, block, 1978 q, parent, NULL, event, false, rtnl_held); 1979 } 1980 1981 static void tfilter_put(struct tcf_proto *tp, void *fh) 1982 { 1983 if (tp->ops->put && fh) 1984 tp->ops->put(tp, fh); 1985 } 1986 1987 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1988 struct netlink_ext_ack *extack) 1989 { 1990 struct net *net = sock_net(skb->sk); 1991 struct nlattr *tca[TCA_MAX + 1]; 1992 char name[IFNAMSIZ]; 1993 struct tcmsg *t; 1994 u32 protocol; 1995 u32 prio; 1996 bool prio_allocate; 1997 u32 parent; 1998 u32 chain_index; 1999 struct Qdisc *q = NULL; 2000 struct tcf_chain_info chain_info; 2001 struct tcf_chain *chain = NULL; 2002 struct tcf_block *block; 2003 struct tcf_proto *tp; 2004 unsigned long cl; 2005 void *fh; 2006 int err; 2007 int tp_created; 2008 bool rtnl_held = false; 2009 2010 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2011 return -EPERM; 2012 2013 replay: 2014 tp_created = 0; 2015 2016 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2017 rtm_tca_policy, extack); 2018 if (err < 0) 2019 return err; 2020 2021 t = nlmsg_data(n); 2022 protocol = TC_H_MIN(t->tcm_info); 2023 prio = TC_H_MAJ(t->tcm_info); 2024 prio_allocate = false; 2025 parent = t->tcm_parent; 2026 tp = NULL; 2027 cl = 0; 2028 block = NULL; 2029 2030 if (prio == 0) { 2031 /* If no priority is provided by the user, 2032 * we allocate one. 2033 */ 2034 if (n->nlmsg_flags & NLM_F_CREATE) { 2035 prio = TC_H_MAKE(0x80000000U, 0U); 2036 prio_allocate = true; 2037 } else { 2038 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2039 return -ENOENT; 2040 } 2041 } 2042 2043 /* Find head of filter chain. */ 2044 2045 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2046 if (err) 2047 return err; 2048 2049 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2050 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2051 err = -EINVAL; 2052 goto errout; 2053 } 2054 2055 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2056 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2057 * type is not specified, classifier is not unlocked. 2058 */ 2059 if (rtnl_held || 2060 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2061 !tcf_proto_is_unlocked(name)) { 2062 rtnl_held = true; 2063 rtnl_lock(); 2064 } 2065 2066 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2067 if (err) 2068 goto errout; 2069 2070 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2071 extack); 2072 if (IS_ERR(block)) { 2073 err = PTR_ERR(block); 2074 goto errout; 2075 } 2076 2077 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2078 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2079 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2080 err = -EINVAL; 2081 goto errout; 2082 } 2083 chain = tcf_chain_get(block, chain_index, true); 2084 if (!chain) { 2085 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2086 err = -ENOMEM; 2087 goto errout; 2088 } 2089 2090 mutex_lock(&chain->filter_chain_lock); 2091 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2092 prio, prio_allocate); 2093 if (IS_ERR(tp)) { 2094 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2095 err = PTR_ERR(tp); 2096 goto errout_locked; 2097 } 2098 2099 if (tp == NULL) { 2100 struct tcf_proto *tp_new = NULL; 2101 2102 if (chain->flushing) { 2103 err = -EAGAIN; 2104 goto errout_locked; 2105 } 2106 2107 /* Proto-tcf does not exist, create new one */ 2108 2109 if (tca[TCA_KIND] == NULL || !protocol) { 2110 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2111 err = -EINVAL; 2112 goto errout_locked; 2113 } 2114 2115 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2116 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2117 err = -ENOENT; 2118 goto errout_locked; 2119 } 2120 2121 if (prio_allocate) 2122 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2123 &chain_info)); 2124 2125 mutex_unlock(&chain->filter_chain_lock); 2126 tp_new = tcf_proto_create(name, protocol, prio, chain, 2127 rtnl_held, extack); 2128 if (IS_ERR(tp_new)) { 2129 err = PTR_ERR(tp_new); 2130 goto errout_tp; 2131 } 2132 2133 tp_created = 1; 2134 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2135 rtnl_held); 2136 if (IS_ERR(tp)) { 2137 err = PTR_ERR(tp); 2138 goto errout_tp; 2139 } 2140 } else { 2141 mutex_unlock(&chain->filter_chain_lock); 2142 } 2143 2144 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2145 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2146 err = -EINVAL; 2147 goto errout; 2148 } 2149 2150 fh = tp->ops->get(tp, t->tcm_handle); 2151 2152 if (!fh) { 2153 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2154 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2155 err = -ENOENT; 2156 goto errout; 2157 } 2158 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2159 tfilter_put(tp, fh); 2160 NL_SET_ERR_MSG(extack, "Filter already exists"); 2161 err = -EEXIST; 2162 goto errout; 2163 } 2164 2165 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2166 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2167 err = -EINVAL; 2168 goto errout; 2169 } 2170 2171 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2172 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, 2173 rtnl_held, extack); 2174 if (err == 0) { 2175 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2176 RTM_NEWTFILTER, false, rtnl_held); 2177 tfilter_put(tp, fh); 2178 /* q pointer is NULL for shared blocks */ 2179 if (q) 2180 q->flags &= ~TCQ_F_CAN_BYPASS; 2181 } 2182 2183 errout: 2184 if (err && tp_created) 2185 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2186 errout_tp: 2187 if (chain) { 2188 if (tp && !IS_ERR(tp)) 2189 tcf_proto_put(tp, rtnl_held, NULL); 2190 if (!tp_created) 2191 tcf_chain_put(chain); 2192 } 2193 tcf_block_release(q, block, rtnl_held); 2194 2195 if (rtnl_held) 2196 rtnl_unlock(); 2197 2198 if (err == -EAGAIN) { 2199 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2200 * of target chain. 2201 */ 2202 rtnl_held = true; 2203 /* Replay the request. */ 2204 goto replay; 2205 } 2206 return err; 2207 2208 errout_locked: 2209 mutex_unlock(&chain->filter_chain_lock); 2210 goto errout; 2211 } 2212 2213 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2214 struct netlink_ext_ack *extack) 2215 { 2216 struct net *net = sock_net(skb->sk); 2217 struct nlattr *tca[TCA_MAX + 1]; 2218 char name[IFNAMSIZ]; 2219 struct tcmsg *t; 2220 u32 protocol; 2221 u32 prio; 2222 u32 parent; 2223 u32 chain_index; 2224 struct Qdisc *q = NULL; 2225 struct tcf_chain_info chain_info; 2226 struct tcf_chain *chain = NULL; 2227 struct tcf_block *block = NULL; 2228 struct tcf_proto *tp = NULL; 2229 unsigned long cl = 0; 2230 void *fh = NULL; 2231 int err; 2232 bool rtnl_held = false; 2233 2234 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2235 return -EPERM; 2236 2237 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2238 rtm_tca_policy, extack); 2239 if (err < 0) 2240 return err; 2241 2242 t = nlmsg_data(n); 2243 protocol = TC_H_MIN(t->tcm_info); 2244 prio = TC_H_MAJ(t->tcm_info); 2245 parent = t->tcm_parent; 2246 2247 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2248 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2249 return -ENOENT; 2250 } 2251 2252 /* Find head of filter chain. */ 2253 2254 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2255 if (err) 2256 return err; 2257 2258 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2259 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2260 err = -EINVAL; 2261 goto errout; 2262 } 2263 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2264 * found), qdisc is not unlocked, classifier type is not specified, 2265 * classifier is not unlocked. 2266 */ 2267 if (!prio || 2268 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2269 !tcf_proto_is_unlocked(name)) { 2270 rtnl_held = true; 2271 rtnl_lock(); 2272 } 2273 2274 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2275 if (err) 2276 goto errout; 2277 2278 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2279 extack); 2280 if (IS_ERR(block)) { 2281 err = PTR_ERR(block); 2282 goto errout; 2283 } 2284 2285 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2286 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2287 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2288 err = -EINVAL; 2289 goto errout; 2290 } 2291 chain = tcf_chain_get(block, chain_index, false); 2292 if (!chain) { 2293 /* User requested flush on non-existent chain. Nothing to do, 2294 * so just return success. 2295 */ 2296 if (prio == 0) { 2297 err = 0; 2298 goto errout; 2299 } 2300 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2301 err = -ENOENT; 2302 goto errout; 2303 } 2304 2305 if (prio == 0) { 2306 tfilter_notify_chain(net, skb, block, q, parent, n, 2307 chain, RTM_DELTFILTER, rtnl_held); 2308 tcf_chain_flush(chain, rtnl_held); 2309 err = 0; 2310 goto errout; 2311 } 2312 2313 mutex_lock(&chain->filter_chain_lock); 2314 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2315 prio, false); 2316 if (!tp || IS_ERR(tp)) { 2317 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2318 err = tp ? PTR_ERR(tp) : -ENOENT; 2319 goto errout_locked; 2320 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2321 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2322 err = -EINVAL; 2323 goto errout_locked; 2324 } else if (t->tcm_handle == 0) { 2325 tcf_proto_signal_destroying(chain, tp); 2326 tcf_chain_tp_remove(chain, &chain_info, tp); 2327 mutex_unlock(&chain->filter_chain_lock); 2328 2329 tcf_proto_put(tp, rtnl_held, NULL); 2330 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2331 RTM_DELTFILTER, false, rtnl_held); 2332 err = 0; 2333 goto errout; 2334 } 2335 mutex_unlock(&chain->filter_chain_lock); 2336 2337 fh = tp->ops->get(tp, t->tcm_handle); 2338 2339 if (!fh) { 2340 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2341 err = -ENOENT; 2342 } else { 2343 bool last; 2344 2345 err = tfilter_del_notify(net, skb, n, tp, block, 2346 q, parent, fh, false, &last, 2347 rtnl_held, extack); 2348 2349 if (err) 2350 goto errout; 2351 if (last) 2352 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2353 } 2354 2355 errout: 2356 if (chain) { 2357 if (tp && !IS_ERR(tp)) 2358 tcf_proto_put(tp, rtnl_held, NULL); 2359 tcf_chain_put(chain); 2360 } 2361 tcf_block_release(q, block, rtnl_held); 2362 2363 if (rtnl_held) 2364 rtnl_unlock(); 2365 2366 return err; 2367 2368 errout_locked: 2369 mutex_unlock(&chain->filter_chain_lock); 2370 goto errout; 2371 } 2372 2373 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2374 struct netlink_ext_ack *extack) 2375 { 2376 struct net *net = sock_net(skb->sk); 2377 struct nlattr *tca[TCA_MAX + 1]; 2378 char name[IFNAMSIZ]; 2379 struct tcmsg *t; 2380 u32 protocol; 2381 u32 prio; 2382 u32 parent; 2383 u32 chain_index; 2384 struct Qdisc *q = NULL; 2385 struct tcf_chain_info chain_info; 2386 struct tcf_chain *chain = NULL; 2387 struct tcf_block *block = NULL; 2388 struct tcf_proto *tp = NULL; 2389 unsigned long cl = 0; 2390 void *fh = NULL; 2391 int err; 2392 bool rtnl_held = false; 2393 2394 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2395 rtm_tca_policy, extack); 2396 if (err < 0) 2397 return err; 2398 2399 t = nlmsg_data(n); 2400 protocol = TC_H_MIN(t->tcm_info); 2401 prio = TC_H_MAJ(t->tcm_info); 2402 parent = t->tcm_parent; 2403 2404 if (prio == 0) { 2405 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2406 return -ENOENT; 2407 } 2408 2409 /* Find head of filter chain. */ 2410 2411 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2412 if (err) 2413 return err; 2414 2415 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2416 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2417 err = -EINVAL; 2418 goto errout; 2419 } 2420 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2421 * unlocked, classifier type is not specified, classifier is not 2422 * unlocked. 2423 */ 2424 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2425 !tcf_proto_is_unlocked(name)) { 2426 rtnl_held = true; 2427 rtnl_lock(); 2428 } 2429 2430 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2431 if (err) 2432 goto errout; 2433 2434 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2435 extack); 2436 if (IS_ERR(block)) { 2437 err = PTR_ERR(block); 2438 goto errout; 2439 } 2440 2441 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2442 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2443 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2444 err = -EINVAL; 2445 goto errout; 2446 } 2447 chain = tcf_chain_get(block, chain_index, false); 2448 if (!chain) { 2449 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2450 err = -EINVAL; 2451 goto errout; 2452 } 2453 2454 mutex_lock(&chain->filter_chain_lock); 2455 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2456 prio, false); 2457 mutex_unlock(&chain->filter_chain_lock); 2458 if (!tp || IS_ERR(tp)) { 2459 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2460 err = tp ? PTR_ERR(tp) : -ENOENT; 2461 goto errout; 2462 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2463 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2464 err = -EINVAL; 2465 goto errout; 2466 } 2467 2468 fh = tp->ops->get(tp, t->tcm_handle); 2469 2470 if (!fh) { 2471 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2472 err = -ENOENT; 2473 } else { 2474 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2475 fh, RTM_NEWTFILTER, true, rtnl_held); 2476 if (err < 0) 2477 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2478 } 2479 2480 tfilter_put(tp, fh); 2481 errout: 2482 if (chain) { 2483 if (tp && !IS_ERR(tp)) 2484 tcf_proto_put(tp, rtnl_held, NULL); 2485 tcf_chain_put(chain); 2486 } 2487 tcf_block_release(q, block, rtnl_held); 2488 2489 if (rtnl_held) 2490 rtnl_unlock(); 2491 2492 return err; 2493 } 2494 2495 struct tcf_dump_args { 2496 struct tcf_walker w; 2497 struct sk_buff *skb; 2498 struct netlink_callback *cb; 2499 struct tcf_block *block; 2500 struct Qdisc *q; 2501 u32 parent; 2502 }; 2503 2504 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2505 { 2506 struct tcf_dump_args *a = (void *)arg; 2507 struct net *net = sock_net(a->skb->sk); 2508 2509 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2510 n, NETLINK_CB(a->cb->skb).portid, 2511 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2512 RTM_NEWTFILTER, true); 2513 } 2514 2515 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2516 struct sk_buff *skb, struct netlink_callback *cb, 2517 long index_start, long *p_index) 2518 { 2519 struct net *net = sock_net(skb->sk); 2520 struct tcf_block *block = chain->block; 2521 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2522 struct tcf_proto *tp, *tp_prev; 2523 struct tcf_dump_args arg; 2524 2525 for (tp = __tcf_get_next_proto(chain, NULL); 2526 tp; 2527 tp_prev = tp, 2528 tp = __tcf_get_next_proto(chain, tp), 2529 tcf_proto_put(tp_prev, true, NULL), 2530 (*p_index)++) { 2531 if (*p_index < index_start) 2532 continue; 2533 if (TC_H_MAJ(tcm->tcm_info) && 2534 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2535 continue; 2536 if (TC_H_MIN(tcm->tcm_info) && 2537 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2538 continue; 2539 if (*p_index > index_start) 2540 memset(&cb->args[1], 0, 2541 sizeof(cb->args) - sizeof(cb->args[0])); 2542 if (cb->args[1] == 0) { 2543 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2544 NETLINK_CB(cb->skb).portid, 2545 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2546 RTM_NEWTFILTER, true) <= 0) 2547 goto errout; 2548 cb->args[1] = 1; 2549 } 2550 if (!tp->ops->walk) 2551 continue; 2552 arg.w.fn = tcf_node_dump; 2553 arg.skb = skb; 2554 arg.cb = cb; 2555 arg.block = block; 2556 arg.q = q; 2557 arg.parent = parent; 2558 arg.w.stop = 0; 2559 arg.w.skip = cb->args[1] - 1; 2560 arg.w.count = 0; 2561 arg.w.cookie = cb->args[2]; 2562 tp->ops->walk(tp, &arg.w, true); 2563 cb->args[2] = arg.w.cookie; 2564 cb->args[1] = arg.w.count + 1; 2565 if (arg.w.stop) 2566 goto errout; 2567 } 2568 return true; 2569 2570 errout: 2571 tcf_proto_put(tp, true, NULL); 2572 return false; 2573 } 2574 2575 /* called with RTNL */ 2576 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2577 { 2578 struct tcf_chain *chain, *chain_prev; 2579 struct net *net = sock_net(skb->sk); 2580 struct nlattr *tca[TCA_MAX + 1]; 2581 struct Qdisc *q = NULL; 2582 struct tcf_block *block; 2583 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2584 long index_start; 2585 long index; 2586 u32 parent; 2587 int err; 2588 2589 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2590 return skb->len; 2591 2592 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2593 NULL, cb->extack); 2594 if (err) 2595 return err; 2596 2597 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2598 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2599 if (!block) 2600 goto out; 2601 /* If we work with block index, q is NULL and parent value 2602 * will never be used in the following code. The check 2603 * in tcf_fill_node prevents it. However, compiler does not 2604 * see that far, so set parent to zero to silence the warning 2605 * about parent being uninitialized. 2606 */ 2607 parent = 0; 2608 } else { 2609 const struct Qdisc_class_ops *cops; 2610 struct net_device *dev; 2611 unsigned long cl = 0; 2612 2613 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2614 if (!dev) 2615 return skb->len; 2616 2617 parent = tcm->tcm_parent; 2618 if (!parent) { 2619 q = dev->qdisc; 2620 parent = q->handle; 2621 } else { 2622 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2623 } 2624 if (!q) 2625 goto out; 2626 cops = q->ops->cl_ops; 2627 if (!cops) 2628 goto out; 2629 if (!cops->tcf_block) 2630 goto out; 2631 if (TC_H_MIN(tcm->tcm_parent)) { 2632 cl = cops->find(q, tcm->tcm_parent); 2633 if (cl == 0) 2634 goto out; 2635 } 2636 block = cops->tcf_block(q, cl, NULL); 2637 if (!block) 2638 goto out; 2639 if (tcf_block_shared(block)) 2640 q = NULL; 2641 } 2642 2643 index_start = cb->args[0]; 2644 index = 0; 2645 2646 for (chain = __tcf_get_next_chain(block, NULL); 2647 chain; 2648 chain_prev = chain, 2649 chain = __tcf_get_next_chain(block, chain), 2650 tcf_chain_put(chain_prev)) { 2651 if (tca[TCA_CHAIN] && 2652 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2653 continue; 2654 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2655 index_start, &index)) { 2656 tcf_chain_put(chain); 2657 err = -EMSGSIZE; 2658 break; 2659 } 2660 } 2661 2662 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2663 tcf_block_refcnt_put(block, true); 2664 cb->args[0] = index; 2665 2666 out: 2667 /* If we did no progress, the error (EMSGSIZE) is real */ 2668 if (skb->len == 0 && err) 2669 return err; 2670 return skb->len; 2671 } 2672 2673 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2674 void *tmplt_priv, u32 chain_index, 2675 struct net *net, struct sk_buff *skb, 2676 struct tcf_block *block, 2677 u32 portid, u32 seq, u16 flags, int event) 2678 { 2679 unsigned char *b = skb_tail_pointer(skb); 2680 const struct tcf_proto_ops *ops; 2681 struct nlmsghdr *nlh; 2682 struct tcmsg *tcm; 2683 void *priv; 2684 2685 ops = tmplt_ops; 2686 priv = tmplt_priv; 2687 2688 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2689 if (!nlh) 2690 goto out_nlmsg_trim; 2691 tcm = nlmsg_data(nlh); 2692 tcm->tcm_family = AF_UNSPEC; 2693 tcm->tcm__pad1 = 0; 2694 tcm->tcm__pad2 = 0; 2695 tcm->tcm_handle = 0; 2696 if (block->q) { 2697 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2698 tcm->tcm_parent = block->q->handle; 2699 } else { 2700 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2701 tcm->tcm_block_index = block->index; 2702 } 2703 2704 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2705 goto nla_put_failure; 2706 2707 if (ops) { 2708 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2709 goto nla_put_failure; 2710 if (ops->tmplt_dump(skb, net, priv) < 0) 2711 goto nla_put_failure; 2712 } 2713 2714 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2715 return skb->len; 2716 2717 out_nlmsg_trim: 2718 nla_put_failure: 2719 nlmsg_trim(skb, b); 2720 return -EMSGSIZE; 2721 } 2722 2723 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2724 u32 seq, u16 flags, int event, bool unicast) 2725 { 2726 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2727 struct tcf_block *block = chain->block; 2728 struct net *net = block->net; 2729 struct sk_buff *skb; 2730 int err = 0; 2731 2732 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2733 if (!skb) 2734 return -ENOBUFS; 2735 2736 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2737 chain->index, net, skb, block, portid, 2738 seq, flags, event) <= 0) { 2739 kfree_skb(skb); 2740 return -EINVAL; 2741 } 2742 2743 if (unicast) 2744 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2745 else 2746 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2747 flags & NLM_F_ECHO); 2748 2749 if (err > 0) 2750 err = 0; 2751 return err; 2752 } 2753 2754 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2755 void *tmplt_priv, u32 chain_index, 2756 struct tcf_block *block, struct sk_buff *oskb, 2757 u32 seq, u16 flags, bool unicast) 2758 { 2759 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2760 struct net *net = block->net; 2761 struct sk_buff *skb; 2762 2763 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2764 if (!skb) 2765 return -ENOBUFS; 2766 2767 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2768 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2769 kfree_skb(skb); 2770 return -EINVAL; 2771 } 2772 2773 if (unicast) 2774 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2775 2776 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2777 } 2778 2779 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2780 struct nlattr **tca, 2781 struct netlink_ext_ack *extack) 2782 { 2783 const struct tcf_proto_ops *ops; 2784 char name[IFNAMSIZ]; 2785 void *tmplt_priv; 2786 2787 /* If kind is not set, user did not specify template. */ 2788 if (!tca[TCA_KIND]) 2789 return 0; 2790 2791 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2792 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2793 return -EINVAL; 2794 } 2795 2796 ops = tcf_proto_lookup_ops(name, true, extack); 2797 if (IS_ERR(ops)) 2798 return PTR_ERR(ops); 2799 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2800 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2801 return -EOPNOTSUPP; 2802 } 2803 2804 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2805 if (IS_ERR(tmplt_priv)) { 2806 module_put(ops->owner); 2807 return PTR_ERR(tmplt_priv); 2808 } 2809 chain->tmplt_ops = ops; 2810 chain->tmplt_priv = tmplt_priv; 2811 return 0; 2812 } 2813 2814 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2815 void *tmplt_priv) 2816 { 2817 /* If template ops are set, no work to do for us. */ 2818 if (!tmplt_ops) 2819 return; 2820 2821 tmplt_ops->tmplt_destroy(tmplt_priv); 2822 module_put(tmplt_ops->owner); 2823 } 2824 2825 /* Add/delete/get a chain */ 2826 2827 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2828 struct netlink_ext_ack *extack) 2829 { 2830 struct net *net = sock_net(skb->sk); 2831 struct nlattr *tca[TCA_MAX + 1]; 2832 struct tcmsg *t; 2833 u32 parent; 2834 u32 chain_index; 2835 struct Qdisc *q = NULL; 2836 struct tcf_chain *chain = NULL; 2837 struct tcf_block *block; 2838 unsigned long cl; 2839 int err; 2840 2841 if (n->nlmsg_type != RTM_GETCHAIN && 2842 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2843 return -EPERM; 2844 2845 replay: 2846 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2847 rtm_tca_policy, extack); 2848 if (err < 0) 2849 return err; 2850 2851 t = nlmsg_data(n); 2852 parent = t->tcm_parent; 2853 cl = 0; 2854 2855 block = tcf_block_find(net, &q, &parent, &cl, 2856 t->tcm_ifindex, t->tcm_block_index, extack); 2857 if (IS_ERR(block)) 2858 return PTR_ERR(block); 2859 2860 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2861 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2862 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2863 err = -EINVAL; 2864 goto errout_block; 2865 } 2866 2867 mutex_lock(&block->lock); 2868 chain = tcf_chain_lookup(block, chain_index); 2869 if (n->nlmsg_type == RTM_NEWCHAIN) { 2870 if (chain) { 2871 if (tcf_chain_held_by_acts_only(chain)) { 2872 /* The chain exists only because there is 2873 * some action referencing it. 2874 */ 2875 tcf_chain_hold(chain); 2876 } else { 2877 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2878 err = -EEXIST; 2879 goto errout_block_locked; 2880 } 2881 } else { 2882 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2883 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2884 err = -ENOENT; 2885 goto errout_block_locked; 2886 } 2887 chain = tcf_chain_create(block, chain_index); 2888 if (!chain) { 2889 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2890 err = -ENOMEM; 2891 goto errout_block_locked; 2892 } 2893 } 2894 } else { 2895 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2896 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2897 err = -EINVAL; 2898 goto errout_block_locked; 2899 } 2900 tcf_chain_hold(chain); 2901 } 2902 2903 if (n->nlmsg_type == RTM_NEWCHAIN) { 2904 /* Modifying chain requires holding parent block lock. In case 2905 * the chain was successfully added, take a reference to the 2906 * chain. This ensures that an empty chain does not disappear at 2907 * the end of this function. 2908 */ 2909 tcf_chain_hold(chain); 2910 chain->explicitly_created = true; 2911 } 2912 mutex_unlock(&block->lock); 2913 2914 switch (n->nlmsg_type) { 2915 case RTM_NEWCHAIN: 2916 err = tc_chain_tmplt_add(chain, net, tca, extack); 2917 if (err) { 2918 tcf_chain_put_explicitly_created(chain); 2919 goto errout; 2920 } 2921 2922 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2923 RTM_NEWCHAIN, false); 2924 break; 2925 case RTM_DELCHAIN: 2926 tfilter_notify_chain(net, skb, block, q, parent, n, 2927 chain, RTM_DELTFILTER, true); 2928 /* Flush the chain first as the user requested chain removal. */ 2929 tcf_chain_flush(chain, true); 2930 /* In case the chain was successfully deleted, put a reference 2931 * to the chain previously taken during addition. 2932 */ 2933 tcf_chain_put_explicitly_created(chain); 2934 break; 2935 case RTM_GETCHAIN: 2936 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2937 n->nlmsg_seq, n->nlmsg_type, true); 2938 if (err < 0) 2939 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2940 break; 2941 default: 2942 err = -EOPNOTSUPP; 2943 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2944 goto errout; 2945 } 2946 2947 errout: 2948 tcf_chain_put(chain); 2949 errout_block: 2950 tcf_block_release(q, block, true); 2951 if (err == -EAGAIN) 2952 /* Replay the request. */ 2953 goto replay; 2954 return err; 2955 2956 errout_block_locked: 2957 mutex_unlock(&block->lock); 2958 goto errout_block; 2959 } 2960 2961 /* called with RTNL */ 2962 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2963 { 2964 struct net *net = sock_net(skb->sk); 2965 struct nlattr *tca[TCA_MAX + 1]; 2966 struct Qdisc *q = NULL; 2967 struct tcf_block *block; 2968 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2969 struct tcf_chain *chain; 2970 long index_start; 2971 long index; 2972 u32 parent; 2973 int err; 2974 2975 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2976 return skb->len; 2977 2978 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2979 rtm_tca_policy, cb->extack); 2980 if (err) 2981 return err; 2982 2983 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2984 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2985 if (!block) 2986 goto out; 2987 /* If we work with block index, q is NULL and parent value 2988 * will never be used in the following code. The check 2989 * in tcf_fill_node prevents it. However, compiler does not 2990 * see that far, so set parent to zero to silence the warning 2991 * about parent being uninitialized. 2992 */ 2993 parent = 0; 2994 } else { 2995 const struct Qdisc_class_ops *cops; 2996 struct net_device *dev; 2997 unsigned long cl = 0; 2998 2999 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 3000 if (!dev) 3001 return skb->len; 3002 3003 parent = tcm->tcm_parent; 3004 if (!parent) { 3005 q = dev->qdisc; 3006 parent = q->handle; 3007 } else { 3008 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3009 } 3010 if (!q) 3011 goto out; 3012 cops = q->ops->cl_ops; 3013 if (!cops) 3014 goto out; 3015 if (!cops->tcf_block) 3016 goto out; 3017 if (TC_H_MIN(tcm->tcm_parent)) { 3018 cl = cops->find(q, tcm->tcm_parent); 3019 if (cl == 0) 3020 goto out; 3021 } 3022 block = cops->tcf_block(q, cl, NULL); 3023 if (!block) 3024 goto out; 3025 if (tcf_block_shared(block)) 3026 q = NULL; 3027 } 3028 3029 index_start = cb->args[0]; 3030 index = 0; 3031 3032 mutex_lock(&block->lock); 3033 list_for_each_entry(chain, &block->chain_list, list) { 3034 if ((tca[TCA_CHAIN] && 3035 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3036 continue; 3037 if (index < index_start) { 3038 index++; 3039 continue; 3040 } 3041 if (tcf_chain_held_by_acts_only(chain)) 3042 continue; 3043 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3044 chain->index, net, skb, block, 3045 NETLINK_CB(cb->skb).portid, 3046 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3047 RTM_NEWCHAIN); 3048 if (err <= 0) 3049 break; 3050 index++; 3051 } 3052 mutex_unlock(&block->lock); 3053 3054 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3055 tcf_block_refcnt_put(block, true); 3056 cb->args[0] = index; 3057 3058 out: 3059 /* If we did no progress, the error (EMSGSIZE) is real */ 3060 if (skb->len == 0 && err) 3061 return err; 3062 return skb->len; 3063 } 3064 3065 void tcf_exts_destroy(struct tcf_exts *exts) 3066 { 3067 #ifdef CONFIG_NET_CLS_ACT 3068 if (exts->actions) { 3069 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3070 kfree(exts->actions); 3071 } 3072 exts->nr_actions = 0; 3073 #endif 3074 } 3075 EXPORT_SYMBOL(tcf_exts_destroy); 3076 3077 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3078 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, 3079 bool rtnl_held, struct netlink_ext_ack *extack) 3080 { 3081 #ifdef CONFIG_NET_CLS_ACT 3082 { 3083 struct tc_action *act; 3084 size_t attr_size = 0; 3085 3086 if (exts->police && tb[exts->police]) { 3087 act = tcf_action_init_1(net, tp, tb[exts->police], 3088 rate_tlv, "police", ovr, 3089 TCA_ACT_BIND, rtnl_held, 3090 extack); 3091 if (IS_ERR(act)) 3092 return PTR_ERR(act); 3093 3094 act->type = exts->type = TCA_OLD_COMPAT; 3095 exts->actions[0] = act; 3096 exts->nr_actions = 1; 3097 } else if (exts->action && tb[exts->action]) { 3098 int err; 3099 3100 err = tcf_action_init(net, tp, tb[exts->action], 3101 rate_tlv, NULL, ovr, TCA_ACT_BIND, 3102 exts->actions, &attr_size, 3103 rtnl_held, extack); 3104 if (err < 0) 3105 return err; 3106 exts->nr_actions = err; 3107 } 3108 } 3109 #else 3110 if ((exts->action && tb[exts->action]) || 3111 (exts->police && tb[exts->police])) { 3112 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3113 return -EOPNOTSUPP; 3114 } 3115 #endif 3116 3117 return 0; 3118 } 3119 EXPORT_SYMBOL(tcf_exts_validate); 3120 3121 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3122 { 3123 #ifdef CONFIG_NET_CLS_ACT 3124 struct tcf_exts old = *dst; 3125 3126 *dst = *src; 3127 tcf_exts_destroy(&old); 3128 #endif 3129 } 3130 EXPORT_SYMBOL(tcf_exts_change); 3131 3132 #ifdef CONFIG_NET_CLS_ACT 3133 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3134 { 3135 if (exts->nr_actions == 0) 3136 return NULL; 3137 else 3138 return exts->actions[0]; 3139 } 3140 #endif 3141 3142 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3143 { 3144 #ifdef CONFIG_NET_CLS_ACT 3145 struct nlattr *nest; 3146 3147 if (exts->action && tcf_exts_has_actions(exts)) { 3148 /* 3149 * again for backward compatible mode - we want 3150 * to work with both old and new modes of entering 3151 * tc data even if iproute2 was newer - jhs 3152 */ 3153 if (exts->type != TCA_OLD_COMPAT) { 3154 nest = nla_nest_start_noflag(skb, exts->action); 3155 if (nest == NULL) 3156 goto nla_put_failure; 3157 3158 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) 3159 goto nla_put_failure; 3160 nla_nest_end(skb, nest); 3161 } else if (exts->police) { 3162 struct tc_action *act = tcf_exts_first_act(exts); 3163 nest = nla_nest_start_noflag(skb, exts->police); 3164 if (nest == NULL || !act) 3165 goto nla_put_failure; 3166 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3167 goto nla_put_failure; 3168 nla_nest_end(skb, nest); 3169 } 3170 } 3171 return 0; 3172 3173 nla_put_failure: 3174 nla_nest_cancel(skb, nest); 3175 return -1; 3176 #else 3177 return 0; 3178 #endif 3179 } 3180 EXPORT_SYMBOL(tcf_exts_dump); 3181 3182 3183 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3184 { 3185 #ifdef CONFIG_NET_CLS_ACT 3186 struct tc_action *a = tcf_exts_first_act(exts); 3187 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3188 return -1; 3189 #endif 3190 return 0; 3191 } 3192 EXPORT_SYMBOL(tcf_exts_dump_stats); 3193 3194 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3195 { 3196 if (*flags & TCA_CLS_FLAGS_IN_HW) 3197 return; 3198 *flags |= TCA_CLS_FLAGS_IN_HW; 3199 atomic_inc(&block->offloadcnt); 3200 } 3201 3202 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3203 { 3204 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3205 return; 3206 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3207 atomic_dec(&block->offloadcnt); 3208 } 3209 3210 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3211 struct tcf_proto *tp, u32 *cnt, 3212 u32 *flags, u32 diff, bool add) 3213 { 3214 lockdep_assert_held(&block->cb_lock); 3215 3216 spin_lock(&tp->lock); 3217 if (add) { 3218 if (!*cnt) 3219 tcf_block_offload_inc(block, flags); 3220 *cnt += diff; 3221 } else { 3222 *cnt -= diff; 3223 if (!*cnt) 3224 tcf_block_offload_dec(block, flags); 3225 } 3226 spin_unlock(&tp->lock); 3227 } 3228 3229 static void 3230 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3231 u32 *cnt, u32 *flags) 3232 { 3233 lockdep_assert_held(&block->cb_lock); 3234 3235 spin_lock(&tp->lock); 3236 tcf_block_offload_dec(block, flags); 3237 *cnt = 0; 3238 spin_unlock(&tp->lock); 3239 } 3240 3241 static int 3242 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3243 void *type_data, bool err_stop) 3244 { 3245 struct flow_block_cb *block_cb; 3246 int ok_count = 0; 3247 int err; 3248 3249 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3250 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3251 if (err) { 3252 if (err_stop) 3253 return err; 3254 } else { 3255 ok_count++; 3256 } 3257 } 3258 return ok_count; 3259 } 3260 3261 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3262 void *type_data, bool err_stop, bool rtnl_held) 3263 { 3264 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3265 int ok_count; 3266 3267 retry: 3268 if (take_rtnl) 3269 rtnl_lock(); 3270 down_read(&block->cb_lock); 3271 /* Need to obtain rtnl lock if block is bound to devs that require it. 3272 * In block bind code cb_lock is obtained while holding rtnl, so we must 3273 * obtain the locks in same order here. 3274 */ 3275 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3276 up_read(&block->cb_lock); 3277 take_rtnl = true; 3278 goto retry; 3279 } 3280 3281 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3282 3283 up_read(&block->cb_lock); 3284 if (take_rtnl) 3285 rtnl_unlock(); 3286 return ok_count; 3287 } 3288 EXPORT_SYMBOL(tc_setup_cb_call); 3289 3290 /* Non-destructive filter add. If filter that wasn't already in hardware is 3291 * successfully offloaded, increment block offloads counter. On failure, 3292 * previously offloaded filter is considered to be intact and offloads counter 3293 * is not decremented. 3294 */ 3295 3296 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3297 enum tc_setup_type type, void *type_data, bool err_stop, 3298 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3299 { 3300 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3301 int ok_count; 3302 3303 retry: 3304 if (take_rtnl) 3305 rtnl_lock(); 3306 down_read(&block->cb_lock); 3307 /* Need to obtain rtnl lock if block is bound to devs that require it. 3308 * In block bind code cb_lock is obtained while holding rtnl, so we must 3309 * obtain the locks in same order here. 3310 */ 3311 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3312 up_read(&block->cb_lock); 3313 take_rtnl = true; 3314 goto retry; 3315 } 3316 3317 /* Make sure all netdevs sharing this block are offload-capable. */ 3318 if (block->nooffloaddevcnt && err_stop) { 3319 ok_count = -EOPNOTSUPP; 3320 goto err_unlock; 3321 } 3322 3323 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3324 if (ok_count < 0) 3325 goto err_unlock; 3326 3327 if (tp->ops->hw_add) 3328 tp->ops->hw_add(tp, type_data); 3329 if (ok_count > 0) 3330 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3331 ok_count, true); 3332 err_unlock: 3333 up_read(&block->cb_lock); 3334 if (take_rtnl) 3335 rtnl_unlock(); 3336 return ok_count < 0 ? ok_count : 0; 3337 } 3338 EXPORT_SYMBOL(tc_setup_cb_add); 3339 3340 /* Destructive filter replace. If filter that wasn't already in hardware is 3341 * successfully offloaded, increment block offload counter. On failure, 3342 * previously offloaded filter is considered to be destroyed and offload counter 3343 * is decremented. 3344 */ 3345 3346 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3347 enum tc_setup_type type, void *type_data, bool err_stop, 3348 u32 *old_flags, unsigned int *old_in_hw_count, 3349 u32 *new_flags, unsigned int *new_in_hw_count, 3350 bool rtnl_held) 3351 { 3352 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3353 int ok_count; 3354 3355 retry: 3356 if (take_rtnl) 3357 rtnl_lock(); 3358 down_read(&block->cb_lock); 3359 /* Need to obtain rtnl lock if block is bound to devs that require it. 3360 * In block bind code cb_lock is obtained while holding rtnl, so we must 3361 * obtain the locks in same order here. 3362 */ 3363 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3364 up_read(&block->cb_lock); 3365 take_rtnl = true; 3366 goto retry; 3367 } 3368 3369 /* Make sure all netdevs sharing this block are offload-capable. */ 3370 if (block->nooffloaddevcnt && err_stop) { 3371 ok_count = -EOPNOTSUPP; 3372 goto err_unlock; 3373 } 3374 3375 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3376 if (tp->ops->hw_del) 3377 tp->ops->hw_del(tp, type_data); 3378 3379 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3380 if (ok_count < 0) 3381 goto err_unlock; 3382 3383 if (tp->ops->hw_add) 3384 tp->ops->hw_add(tp, type_data); 3385 if (ok_count > 0) 3386 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3387 new_flags, ok_count, true); 3388 err_unlock: 3389 up_read(&block->cb_lock); 3390 if (take_rtnl) 3391 rtnl_unlock(); 3392 return ok_count < 0 ? ok_count : 0; 3393 } 3394 EXPORT_SYMBOL(tc_setup_cb_replace); 3395 3396 /* Destroy filter and decrement block offload counter, if filter was previously 3397 * offloaded. 3398 */ 3399 3400 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3401 enum tc_setup_type type, void *type_data, bool err_stop, 3402 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3403 { 3404 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3405 int ok_count; 3406 3407 retry: 3408 if (take_rtnl) 3409 rtnl_lock(); 3410 down_read(&block->cb_lock); 3411 /* Need to obtain rtnl lock if block is bound to devs that require it. 3412 * In block bind code cb_lock is obtained while holding rtnl, so we must 3413 * obtain the locks in same order here. 3414 */ 3415 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3416 up_read(&block->cb_lock); 3417 take_rtnl = true; 3418 goto retry; 3419 } 3420 3421 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3422 3423 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3424 if (tp->ops->hw_del) 3425 tp->ops->hw_del(tp, type_data); 3426 3427 up_read(&block->cb_lock); 3428 if (take_rtnl) 3429 rtnl_unlock(); 3430 return ok_count < 0 ? ok_count : 0; 3431 } 3432 EXPORT_SYMBOL(tc_setup_cb_destroy); 3433 3434 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3435 bool add, flow_setup_cb_t *cb, 3436 enum tc_setup_type type, void *type_data, 3437 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3438 { 3439 int err = cb(type, type_data, cb_priv); 3440 3441 if (err) { 3442 if (add && tc_skip_sw(*flags)) 3443 return err; 3444 } else { 3445 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3446 add); 3447 } 3448 3449 return 0; 3450 } 3451 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3452 3453 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3454 const struct tc_action *act) 3455 { 3456 struct tc_cookie *cookie; 3457 int err = 0; 3458 3459 rcu_read_lock(); 3460 cookie = rcu_dereference(act->act_cookie); 3461 if (cookie) { 3462 entry->cookie = flow_action_cookie_create(cookie->data, 3463 cookie->len, 3464 GFP_ATOMIC); 3465 if (!entry->cookie) 3466 err = -ENOMEM; 3467 } 3468 rcu_read_unlock(); 3469 return err; 3470 } 3471 3472 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3473 { 3474 flow_action_cookie_destroy(entry->cookie); 3475 } 3476 3477 void tc_cleanup_flow_action(struct flow_action *flow_action) 3478 { 3479 struct flow_action_entry *entry; 3480 int i; 3481 3482 flow_action_for_each(i, entry, flow_action) { 3483 tcf_act_put_cookie(entry); 3484 if (entry->destructor) 3485 entry->destructor(entry->destructor_priv); 3486 } 3487 } 3488 EXPORT_SYMBOL(tc_cleanup_flow_action); 3489 3490 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3491 const struct tc_action *act) 3492 { 3493 #ifdef CONFIG_NET_CLS_ACT 3494 entry->dev = act->ops->get_dev(act, &entry->destructor); 3495 if (!entry->dev) 3496 return; 3497 entry->destructor_priv = entry->dev; 3498 #endif 3499 } 3500 3501 static void tcf_tunnel_encap_put_tunnel(void *priv) 3502 { 3503 struct ip_tunnel_info *tunnel = priv; 3504 3505 kfree(tunnel); 3506 } 3507 3508 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3509 const struct tc_action *act) 3510 { 3511 entry->tunnel = tcf_tunnel_info_copy(act); 3512 if (!entry->tunnel) 3513 return -ENOMEM; 3514 entry->destructor = tcf_tunnel_encap_put_tunnel; 3515 entry->destructor_priv = entry->tunnel; 3516 return 0; 3517 } 3518 3519 static void tcf_sample_get_group(struct flow_action_entry *entry, 3520 const struct tc_action *act) 3521 { 3522 #ifdef CONFIG_NET_CLS_ACT 3523 entry->sample.psample_group = 3524 act->ops->get_psample_group(act, &entry->destructor); 3525 entry->destructor_priv = entry->sample.psample_group; 3526 #endif 3527 } 3528 3529 int tc_setup_flow_action(struct flow_action *flow_action, 3530 const struct tcf_exts *exts) 3531 { 3532 struct tc_action *act; 3533 int i, j, k, err = 0; 3534 3535 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3536 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3537 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3538 3539 if (!exts) 3540 return 0; 3541 3542 j = 0; 3543 tcf_exts_for_each_action(i, act, exts) { 3544 struct flow_action_entry *entry; 3545 3546 entry = &flow_action->entries[j]; 3547 spin_lock_bh(&act->tcfa_lock); 3548 err = tcf_act_get_cookie(entry, act); 3549 if (err) 3550 goto err_out_locked; 3551 3552 entry->hw_stats = act->hw_stats; 3553 3554 if (is_tcf_gact_ok(act)) { 3555 entry->id = FLOW_ACTION_ACCEPT; 3556 } else if (is_tcf_gact_shot(act)) { 3557 entry->id = FLOW_ACTION_DROP; 3558 } else if (is_tcf_gact_trap(act)) { 3559 entry->id = FLOW_ACTION_TRAP; 3560 } else if (is_tcf_gact_goto_chain(act)) { 3561 entry->id = FLOW_ACTION_GOTO; 3562 entry->chain_index = tcf_gact_goto_chain_index(act); 3563 } else if (is_tcf_mirred_egress_redirect(act)) { 3564 entry->id = FLOW_ACTION_REDIRECT; 3565 tcf_mirred_get_dev(entry, act); 3566 } else if (is_tcf_mirred_egress_mirror(act)) { 3567 entry->id = FLOW_ACTION_MIRRED; 3568 tcf_mirred_get_dev(entry, act); 3569 } else if (is_tcf_mirred_ingress_redirect(act)) { 3570 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3571 tcf_mirred_get_dev(entry, act); 3572 } else if (is_tcf_mirred_ingress_mirror(act)) { 3573 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3574 tcf_mirred_get_dev(entry, act); 3575 } else if (is_tcf_vlan(act)) { 3576 switch (tcf_vlan_action(act)) { 3577 case TCA_VLAN_ACT_PUSH: 3578 entry->id = FLOW_ACTION_VLAN_PUSH; 3579 entry->vlan.vid = tcf_vlan_push_vid(act); 3580 entry->vlan.proto = tcf_vlan_push_proto(act); 3581 entry->vlan.prio = tcf_vlan_push_prio(act); 3582 break; 3583 case TCA_VLAN_ACT_POP: 3584 entry->id = FLOW_ACTION_VLAN_POP; 3585 break; 3586 case TCA_VLAN_ACT_MODIFY: 3587 entry->id = FLOW_ACTION_VLAN_MANGLE; 3588 entry->vlan.vid = tcf_vlan_push_vid(act); 3589 entry->vlan.proto = tcf_vlan_push_proto(act); 3590 entry->vlan.prio = tcf_vlan_push_prio(act); 3591 break; 3592 default: 3593 err = -EOPNOTSUPP; 3594 goto err_out_locked; 3595 } 3596 } else if (is_tcf_tunnel_set(act)) { 3597 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3598 err = tcf_tunnel_encap_get_tunnel(entry, act); 3599 if (err) 3600 goto err_out_locked; 3601 } else if (is_tcf_tunnel_release(act)) { 3602 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3603 } else if (is_tcf_pedit(act)) { 3604 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3605 switch (tcf_pedit_cmd(act, k)) { 3606 case TCA_PEDIT_KEY_EX_CMD_SET: 3607 entry->id = FLOW_ACTION_MANGLE; 3608 break; 3609 case TCA_PEDIT_KEY_EX_CMD_ADD: 3610 entry->id = FLOW_ACTION_ADD; 3611 break; 3612 default: 3613 err = -EOPNOTSUPP; 3614 goto err_out_locked; 3615 } 3616 entry->mangle.htype = tcf_pedit_htype(act, k); 3617 entry->mangle.mask = tcf_pedit_mask(act, k); 3618 entry->mangle.val = tcf_pedit_val(act, k); 3619 entry->mangle.offset = tcf_pedit_offset(act, k); 3620 entry->hw_stats = act->hw_stats; 3621 entry = &flow_action->entries[++j]; 3622 } 3623 } else if (is_tcf_csum(act)) { 3624 entry->id = FLOW_ACTION_CSUM; 3625 entry->csum_flags = tcf_csum_update_flags(act); 3626 } else if (is_tcf_skbedit_mark(act)) { 3627 entry->id = FLOW_ACTION_MARK; 3628 entry->mark = tcf_skbedit_mark(act); 3629 } else if (is_tcf_sample(act)) { 3630 entry->id = FLOW_ACTION_SAMPLE; 3631 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3632 entry->sample.truncate = tcf_sample_truncate(act); 3633 entry->sample.rate = tcf_sample_rate(act); 3634 tcf_sample_get_group(entry, act); 3635 } else if (is_tcf_police(act)) { 3636 entry->id = FLOW_ACTION_POLICE; 3637 entry->police.burst = tcf_police_tcfp_burst(act); 3638 entry->police.rate_bytes_ps = 3639 tcf_police_rate_bytes_ps(act); 3640 } else if (is_tcf_ct(act)) { 3641 entry->id = FLOW_ACTION_CT; 3642 entry->ct.action = tcf_ct_action(act); 3643 entry->ct.zone = tcf_ct_zone(act); 3644 entry->ct.flow_table = tcf_ct_ft(act); 3645 } else if (is_tcf_mpls(act)) { 3646 switch (tcf_mpls_action(act)) { 3647 case TCA_MPLS_ACT_PUSH: 3648 entry->id = FLOW_ACTION_MPLS_PUSH; 3649 entry->mpls_push.proto = tcf_mpls_proto(act); 3650 entry->mpls_push.label = tcf_mpls_label(act); 3651 entry->mpls_push.tc = tcf_mpls_tc(act); 3652 entry->mpls_push.bos = tcf_mpls_bos(act); 3653 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3654 break; 3655 case TCA_MPLS_ACT_POP: 3656 entry->id = FLOW_ACTION_MPLS_POP; 3657 entry->mpls_pop.proto = tcf_mpls_proto(act); 3658 break; 3659 case TCA_MPLS_ACT_MODIFY: 3660 entry->id = FLOW_ACTION_MPLS_MANGLE; 3661 entry->mpls_mangle.label = tcf_mpls_label(act); 3662 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3663 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3664 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3665 break; 3666 default: 3667 goto err_out_locked; 3668 } 3669 } else if (is_tcf_skbedit_ptype(act)) { 3670 entry->id = FLOW_ACTION_PTYPE; 3671 entry->ptype = tcf_skbedit_ptype(act); 3672 } else if (is_tcf_skbedit_priority(act)) { 3673 entry->id = FLOW_ACTION_PRIORITY; 3674 entry->priority = tcf_skbedit_priority(act); 3675 } else { 3676 err = -EOPNOTSUPP; 3677 goto err_out_locked; 3678 } 3679 spin_unlock_bh(&act->tcfa_lock); 3680 3681 if (!is_tcf_pedit(act)) 3682 j++; 3683 } 3684 3685 err_out: 3686 if (err) 3687 tc_cleanup_flow_action(flow_action); 3688 3689 return err; 3690 err_out_locked: 3691 spin_unlock_bh(&act->tcfa_lock); 3692 goto err_out; 3693 } 3694 EXPORT_SYMBOL(tc_setup_flow_action); 3695 3696 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3697 { 3698 unsigned int num_acts = 0; 3699 struct tc_action *act; 3700 int i; 3701 3702 tcf_exts_for_each_action(i, act, exts) { 3703 if (is_tcf_pedit(act)) 3704 num_acts += tcf_pedit_nkeys(act); 3705 else 3706 num_acts++; 3707 } 3708 return num_acts; 3709 } 3710 EXPORT_SYMBOL(tcf_exts_num_actions); 3711 3712 static __net_init int tcf_net_init(struct net *net) 3713 { 3714 struct tcf_net *tn = net_generic(net, tcf_net_id); 3715 3716 spin_lock_init(&tn->idr_lock); 3717 idr_init(&tn->idr); 3718 return 0; 3719 } 3720 3721 static void __net_exit tcf_net_exit(struct net *net) 3722 { 3723 struct tcf_net *tn = net_generic(net, tcf_net_id); 3724 3725 idr_destroy(&tn->idr); 3726 } 3727 3728 static struct pernet_operations tcf_net_ops = { 3729 .init = tcf_net_init, 3730 .exit = tcf_net_exit, 3731 .id = &tcf_net_id, 3732 .size = sizeof(struct tcf_net), 3733 }; 3734 3735 static struct flow_indr_block_entry block_entry = { 3736 .cb = tc_indr_block_get_and_cmd, 3737 .list = LIST_HEAD_INIT(block_entry.list), 3738 }; 3739 3740 static int __init tc_filter_init(void) 3741 { 3742 int err; 3743 3744 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3745 if (!tc_filter_wq) 3746 return -ENOMEM; 3747 3748 err = register_pernet_subsys(&tcf_net_ops); 3749 if (err) 3750 goto err_register_pernet_subsys; 3751 3752 flow_indr_add_block_cb(&block_entry); 3753 3754 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3755 RTNL_FLAG_DOIT_UNLOCKED); 3756 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3757 RTNL_FLAG_DOIT_UNLOCKED); 3758 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3759 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3760 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3761 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3762 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3763 tc_dump_chain, 0); 3764 3765 return 0; 3766 3767 err_register_pernet_subsys: 3768 destroy_workqueue(tc_filter_wq); 3769 return err; 3770 } 3771 3772 subsys_initcall(tc_filter_init); 3773