1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/rhashtable.h> 24 #include <linux/jhash.h> 25 #include <linux/rculist.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/netlink.h> 29 #include <net/pkt_sched.h> 30 #include <net/pkt_cls.h> 31 #include <net/tc_act/tc_pedit.h> 32 #include <net/tc_act/tc_mirred.h> 33 #include <net/tc_act/tc_vlan.h> 34 #include <net/tc_act/tc_tunnel_key.h> 35 #include <net/tc_act/tc_csum.h> 36 #include <net/tc_act/tc_gact.h> 37 #include <net/tc_act/tc_police.h> 38 #include <net/tc_act/tc_sample.h> 39 #include <net/tc_act/tc_skbedit.h> 40 #include <net/tc_act/tc_ct.h> 41 #include <net/tc_act/tc_mpls.h> 42 #include <net/tc_act/tc_gate.h> 43 #include <net/flow_offload.h> 44 45 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 46 47 /* The list of all installed classifier types */ 48 static LIST_HEAD(tcf_proto_base); 49 50 /* Protects list of registered TC modules. It is pure SMP lock. */ 51 static DEFINE_RWLOCK(cls_mod_lock); 52 53 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 54 { 55 return jhash_3words(tp->chain->index, tp->prio, 56 (__force __u32)tp->protocol, 0); 57 } 58 59 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 60 struct tcf_proto *tp) 61 { 62 struct tcf_block *block = chain->block; 63 64 mutex_lock(&block->proto_destroy_lock); 65 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 66 destroy_obj_hashfn(tp)); 67 mutex_unlock(&block->proto_destroy_lock); 68 } 69 70 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 71 const struct tcf_proto *tp2) 72 { 73 return tp1->chain->index == tp2->chain->index && 74 tp1->prio == tp2->prio && 75 tp1->protocol == tp2->protocol; 76 } 77 78 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 79 struct tcf_proto *tp) 80 { 81 u32 hash = destroy_obj_hashfn(tp); 82 struct tcf_proto *iter; 83 bool found = false; 84 85 rcu_read_lock(); 86 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 87 destroy_ht_node, hash) { 88 if (tcf_proto_cmp(tp, iter)) { 89 found = true; 90 break; 91 } 92 } 93 rcu_read_unlock(); 94 95 return found; 96 } 97 98 static void 99 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 100 { 101 struct tcf_block *block = chain->block; 102 103 mutex_lock(&block->proto_destroy_lock); 104 if (hash_hashed(&tp->destroy_ht_node)) 105 hash_del_rcu(&tp->destroy_ht_node); 106 mutex_unlock(&block->proto_destroy_lock); 107 } 108 109 /* Find classifier type by string name */ 110 111 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 112 { 113 const struct tcf_proto_ops *t, *res = NULL; 114 115 if (kind) { 116 read_lock(&cls_mod_lock); 117 list_for_each_entry(t, &tcf_proto_base, head) { 118 if (strcmp(kind, t->kind) == 0) { 119 if (try_module_get(t->owner)) 120 res = t; 121 break; 122 } 123 } 124 read_unlock(&cls_mod_lock); 125 } 126 return res; 127 } 128 129 static const struct tcf_proto_ops * 130 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 131 struct netlink_ext_ack *extack) 132 { 133 const struct tcf_proto_ops *ops; 134 135 ops = __tcf_proto_lookup_ops(kind); 136 if (ops) 137 return ops; 138 #ifdef CONFIG_MODULES 139 if (rtnl_held) 140 rtnl_unlock(); 141 request_module("cls_%s", kind); 142 if (rtnl_held) 143 rtnl_lock(); 144 ops = __tcf_proto_lookup_ops(kind); 145 /* We dropped the RTNL semaphore in order to perform 146 * the module load. So, even if we succeeded in loading 147 * the module we have to replay the request. We indicate 148 * this using -EAGAIN. 149 */ 150 if (ops) { 151 module_put(ops->owner); 152 return ERR_PTR(-EAGAIN); 153 } 154 #endif 155 NL_SET_ERR_MSG(extack, "TC classifier not found"); 156 return ERR_PTR(-ENOENT); 157 } 158 159 /* Register(unregister) new classifier type */ 160 161 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 162 { 163 struct tcf_proto_ops *t; 164 int rc = -EEXIST; 165 166 write_lock(&cls_mod_lock); 167 list_for_each_entry(t, &tcf_proto_base, head) 168 if (!strcmp(ops->kind, t->kind)) 169 goto out; 170 171 list_add_tail(&ops->head, &tcf_proto_base); 172 rc = 0; 173 out: 174 write_unlock(&cls_mod_lock); 175 return rc; 176 } 177 EXPORT_SYMBOL(register_tcf_proto_ops); 178 179 static struct workqueue_struct *tc_filter_wq; 180 181 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 182 { 183 struct tcf_proto_ops *t; 184 int rc = -ENOENT; 185 186 /* Wait for outstanding call_rcu()s, if any, from a 187 * tcf_proto_ops's destroy() handler. 188 */ 189 rcu_barrier(); 190 flush_workqueue(tc_filter_wq); 191 192 write_lock(&cls_mod_lock); 193 list_for_each_entry(t, &tcf_proto_base, head) { 194 if (t == ops) { 195 list_del(&t->head); 196 rc = 0; 197 break; 198 } 199 } 200 write_unlock(&cls_mod_lock); 201 return rc; 202 } 203 EXPORT_SYMBOL(unregister_tcf_proto_ops); 204 205 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 206 { 207 INIT_RCU_WORK(rwork, func); 208 return queue_rcu_work(tc_filter_wq, rwork); 209 } 210 EXPORT_SYMBOL(tcf_queue_work); 211 212 /* Select new prio value from the range, managed by kernel. */ 213 214 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 215 { 216 u32 first = TC_H_MAKE(0xC0000000U, 0U); 217 218 if (tp) 219 first = tp->prio - 1; 220 221 return TC_H_MAJ(first); 222 } 223 224 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 225 { 226 if (kind) 227 return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ; 228 memset(name, 0, IFNAMSIZ); 229 return false; 230 } 231 232 static bool tcf_proto_is_unlocked(const char *kind) 233 { 234 const struct tcf_proto_ops *ops; 235 bool ret; 236 237 if (strlen(kind) == 0) 238 return false; 239 240 ops = tcf_proto_lookup_ops(kind, false, NULL); 241 /* On error return false to take rtnl lock. Proto lookup/create 242 * functions will perform lookup again and properly handle errors. 243 */ 244 if (IS_ERR(ops)) 245 return false; 246 247 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 248 module_put(ops->owner); 249 return ret; 250 } 251 252 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 253 u32 prio, struct tcf_chain *chain, 254 bool rtnl_held, 255 struct netlink_ext_ack *extack) 256 { 257 struct tcf_proto *tp; 258 int err; 259 260 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 261 if (!tp) 262 return ERR_PTR(-ENOBUFS); 263 264 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 265 if (IS_ERR(tp->ops)) { 266 err = PTR_ERR(tp->ops); 267 goto errout; 268 } 269 tp->classify = tp->ops->classify; 270 tp->protocol = protocol; 271 tp->prio = prio; 272 tp->chain = chain; 273 spin_lock_init(&tp->lock); 274 refcount_set(&tp->refcnt, 1); 275 276 err = tp->ops->init(tp); 277 if (err) { 278 module_put(tp->ops->owner); 279 goto errout; 280 } 281 return tp; 282 283 errout: 284 kfree(tp); 285 return ERR_PTR(err); 286 } 287 288 static void tcf_proto_get(struct tcf_proto *tp) 289 { 290 refcount_inc(&tp->refcnt); 291 } 292 293 static void tcf_chain_put(struct tcf_chain *chain); 294 295 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 296 bool sig_destroy, struct netlink_ext_ack *extack) 297 { 298 tp->ops->destroy(tp, rtnl_held, extack); 299 if (sig_destroy) 300 tcf_proto_signal_destroyed(tp->chain, tp); 301 tcf_chain_put(tp->chain); 302 module_put(tp->ops->owner); 303 kfree_rcu(tp, rcu); 304 } 305 306 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 307 struct netlink_ext_ack *extack) 308 { 309 if (refcount_dec_and_test(&tp->refcnt)) 310 tcf_proto_destroy(tp, rtnl_held, true, extack); 311 } 312 313 static bool tcf_proto_check_delete(struct tcf_proto *tp) 314 { 315 if (tp->ops->delete_empty) 316 return tp->ops->delete_empty(tp); 317 318 tp->deleting = true; 319 return tp->deleting; 320 } 321 322 static void tcf_proto_mark_delete(struct tcf_proto *tp) 323 { 324 spin_lock(&tp->lock); 325 tp->deleting = true; 326 spin_unlock(&tp->lock); 327 } 328 329 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 330 { 331 bool deleting; 332 333 spin_lock(&tp->lock); 334 deleting = tp->deleting; 335 spin_unlock(&tp->lock); 336 337 return deleting; 338 } 339 340 #define ASSERT_BLOCK_LOCKED(block) \ 341 lockdep_assert_held(&(block)->lock) 342 343 struct tcf_filter_chain_list_item { 344 struct list_head list; 345 tcf_chain_head_change_t *chain_head_change; 346 void *chain_head_change_priv; 347 }; 348 349 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 350 u32 chain_index) 351 { 352 struct tcf_chain *chain; 353 354 ASSERT_BLOCK_LOCKED(block); 355 356 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 357 if (!chain) 358 return NULL; 359 list_add_tail_rcu(&chain->list, &block->chain_list); 360 mutex_init(&chain->filter_chain_lock); 361 chain->block = block; 362 chain->index = chain_index; 363 chain->refcnt = 1; 364 if (!chain->index) 365 block->chain0.chain = chain; 366 return chain; 367 } 368 369 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 370 struct tcf_proto *tp_head) 371 { 372 if (item->chain_head_change) 373 item->chain_head_change(tp_head, item->chain_head_change_priv); 374 } 375 376 static void tcf_chain0_head_change(struct tcf_chain *chain, 377 struct tcf_proto *tp_head) 378 { 379 struct tcf_filter_chain_list_item *item; 380 struct tcf_block *block = chain->block; 381 382 if (chain->index) 383 return; 384 385 mutex_lock(&block->lock); 386 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 387 tcf_chain_head_change_item(item, tp_head); 388 mutex_unlock(&block->lock); 389 } 390 391 /* Returns true if block can be safely freed. */ 392 393 static bool tcf_chain_detach(struct tcf_chain *chain) 394 { 395 struct tcf_block *block = chain->block; 396 397 ASSERT_BLOCK_LOCKED(block); 398 399 list_del_rcu(&chain->list); 400 if (!chain->index) 401 block->chain0.chain = NULL; 402 403 if (list_empty(&block->chain_list) && 404 refcount_read(&block->refcnt) == 0) 405 return true; 406 407 return false; 408 } 409 410 static void tcf_block_destroy(struct tcf_block *block) 411 { 412 mutex_destroy(&block->lock); 413 mutex_destroy(&block->proto_destroy_lock); 414 kfree_rcu(block, rcu); 415 } 416 417 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 418 { 419 struct tcf_block *block = chain->block; 420 421 mutex_destroy(&chain->filter_chain_lock); 422 kfree_rcu(chain, rcu); 423 if (free_block) 424 tcf_block_destroy(block); 425 } 426 427 static void tcf_chain_hold(struct tcf_chain *chain) 428 { 429 ASSERT_BLOCK_LOCKED(chain->block); 430 431 ++chain->refcnt; 432 } 433 434 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 435 { 436 ASSERT_BLOCK_LOCKED(chain->block); 437 438 /* In case all the references are action references, this 439 * chain should not be shown to the user. 440 */ 441 return chain->refcnt == chain->action_refcnt; 442 } 443 444 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 445 u32 chain_index) 446 { 447 struct tcf_chain *chain; 448 449 ASSERT_BLOCK_LOCKED(block); 450 451 list_for_each_entry(chain, &block->chain_list, list) { 452 if (chain->index == chain_index) 453 return chain; 454 } 455 return NULL; 456 } 457 458 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 459 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 460 u32 chain_index) 461 { 462 struct tcf_chain *chain; 463 464 list_for_each_entry_rcu(chain, &block->chain_list, list) { 465 if (chain->index == chain_index) 466 return chain; 467 } 468 return NULL; 469 } 470 #endif 471 472 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 473 u32 seq, u16 flags, int event, bool unicast); 474 475 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 476 u32 chain_index, bool create, 477 bool by_act) 478 { 479 struct tcf_chain *chain = NULL; 480 bool is_first_reference; 481 482 mutex_lock(&block->lock); 483 chain = tcf_chain_lookup(block, chain_index); 484 if (chain) { 485 tcf_chain_hold(chain); 486 } else { 487 if (!create) 488 goto errout; 489 chain = tcf_chain_create(block, chain_index); 490 if (!chain) 491 goto errout; 492 } 493 494 if (by_act) 495 ++chain->action_refcnt; 496 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 497 mutex_unlock(&block->lock); 498 499 /* Send notification only in case we got the first 500 * non-action reference. Until then, the chain acts only as 501 * a placeholder for actions pointing to it and user ought 502 * not know about them. 503 */ 504 if (is_first_reference && !by_act) 505 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 506 RTM_NEWCHAIN, false); 507 508 return chain; 509 510 errout: 511 mutex_unlock(&block->lock); 512 return chain; 513 } 514 515 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 516 bool create) 517 { 518 return __tcf_chain_get(block, chain_index, create, false); 519 } 520 521 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 522 { 523 return __tcf_chain_get(block, chain_index, true, true); 524 } 525 EXPORT_SYMBOL(tcf_chain_get_by_act); 526 527 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 528 void *tmplt_priv); 529 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 530 void *tmplt_priv, u32 chain_index, 531 struct tcf_block *block, struct sk_buff *oskb, 532 u32 seq, u16 flags, bool unicast); 533 534 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 535 bool explicitly_created) 536 { 537 struct tcf_block *block = chain->block; 538 const struct tcf_proto_ops *tmplt_ops; 539 bool free_block = false; 540 unsigned int refcnt; 541 void *tmplt_priv; 542 543 mutex_lock(&block->lock); 544 if (explicitly_created) { 545 if (!chain->explicitly_created) { 546 mutex_unlock(&block->lock); 547 return; 548 } 549 chain->explicitly_created = false; 550 } 551 552 if (by_act) 553 chain->action_refcnt--; 554 555 /* tc_chain_notify_delete can't be called while holding block lock. 556 * However, when block is unlocked chain can be changed concurrently, so 557 * save these to temporary variables. 558 */ 559 refcnt = --chain->refcnt; 560 tmplt_ops = chain->tmplt_ops; 561 tmplt_priv = chain->tmplt_priv; 562 563 /* The last dropped non-action reference will trigger notification. */ 564 if (refcnt - chain->action_refcnt == 0 && !by_act) { 565 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 566 block, NULL, 0, 0, false); 567 /* Last reference to chain, no need to lock. */ 568 chain->flushing = false; 569 } 570 571 if (refcnt == 0) 572 free_block = tcf_chain_detach(chain); 573 mutex_unlock(&block->lock); 574 575 if (refcnt == 0) { 576 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 577 tcf_chain_destroy(chain, free_block); 578 } 579 } 580 581 static void tcf_chain_put(struct tcf_chain *chain) 582 { 583 __tcf_chain_put(chain, false, false); 584 } 585 586 void tcf_chain_put_by_act(struct tcf_chain *chain) 587 { 588 __tcf_chain_put(chain, true, false); 589 } 590 EXPORT_SYMBOL(tcf_chain_put_by_act); 591 592 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 593 { 594 __tcf_chain_put(chain, false, true); 595 } 596 597 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 598 { 599 struct tcf_proto *tp, *tp_next; 600 601 mutex_lock(&chain->filter_chain_lock); 602 tp = tcf_chain_dereference(chain->filter_chain, chain); 603 while (tp) { 604 tp_next = rcu_dereference_protected(tp->next, 1); 605 tcf_proto_signal_destroying(chain, tp); 606 tp = tp_next; 607 } 608 tp = tcf_chain_dereference(chain->filter_chain, chain); 609 RCU_INIT_POINTER(chain->filter_chain, NULL); 610 tcf_chain0_head_change(chain, NULL); 611 chain->flushing = true; 612 mutex_unlock(&chain->filter_chain_lock); 613 614 while (tp) { 615 tp_next = rcu_dereference_protected(tp->next, 1); 616 tcf_proto_put(tp, rtnl_held, NULL); 617 tp = tp_next; 618 } 619 } 620 621 static int tcf_block_setup(struct tcf_block *block, 622 struct flow_block_offload *bo); 623 624 static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block, 625 flow_indr_block_bind_cb_t *cb, void *cb_priv, 626 enum flow_block_command command, bool ingress) 627 { 628 struct flow_block_offload bo = { 629 .command = command, 630 .binder_type = ingress ? 631 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS : 632 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 633 .net = dev_net(dev), 634 .block_shared = tcf_block_non_null_shared(block), 635 }; 636 INIT_LIST_HEAD(&bo.cb_list); 637 638 if (!block) 639 return; 640 641 bo.block = &block->flow_block; 642 643 down_write(&block->cb_lock); 644 cb(dev, cb_priv, TC_SETUP_BLOCK, &bo); 645 646 tcf_block_setup(block, &bo); 647 up_write(&block->cb_lock); 648 } 649 650 static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress) 651 { 652 const struct Qdisc_class_ops *cops; 653 const struct Qdisc_ops *ops; 654 struct Qdisc *qdisc; 655 656 if (!dev_ingress_queue(dev)) 657 return NULL; 658 659 qdisc = dev_ingress_queue(dev)->qdisc_sleeping; 660 if (!qdisc) 661 return NULL; 662 663 ops = qdisc->ops; 664 if (!ops) 665 return NULL; 666 667 if (!ingress && !strcmp("ingress", ops->id)) 668 return NULL; 669 670 cops = ops->cl_ops; 671 if (!cops) 672 return NULL; 673 674 if (!cops->tcf_block) 675 return NULL; 676 677 return cops->tcf_block(qdisc, 678 ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS, 679 NULL); 680 } 681 682 static void tc_indr_block_get_and_cmd(struct net_device *dev, 683 flow_indr_block_bind_cb_t *cb, 684 void *cb_priv, 685 enum flow_block_command command) 686 { 687 struct tcf_block *block; 688 689 block = tc_dev_block(dev, true); 690 tc_indr_block_cmd(dev, block, cb, cb_priv, command, true); 691 692 block = tc_dev_block(dev, false); 693 tc_indr_block_cmd(dev, block, cb, cb_priv, command, false); 694 } 695 696 static void tc_indr_block_call(struct tcf_block *block, 697 struct net_device *dev, 698 struct tcf_block_ext_info *ei, 699 enum flow_block_command command, 700 struct netlink_ext_ack *extack) 701 { 702 struct flow_block_offload bo = { 703 .command = command, 704 .binder_type = ei->binder_type, 705 .net = dev_net(dev), 706 .block = &block->flow_block, 707 .block_shared = tcf_block_shared(block), 708 .extack = extack, 709 }; 710 INIT_LIST_HEAD(&bo.cb_list); 711 712 flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK); 713 tcf_block_setup(block, &bo); 714 } 715 716 static bool tcf_block_offload_in_use(struct tcf_block *block) 717 { 718 return atomic_read(&block->offloadcnt); 719 } 720 721 static int tcf_block_offload_cmd(struct tcf_block *block, 722 struct net_device *dev, 723 struct tcf_block_ext_info *ei, 724 enum flow_block_command command, 725 struct netlink_ext_ack *extack) 726 { 727 struct flow_block_offload bo = {}; 728 int err; 729 730 bo.net = dev_net(dev); 731 bo.command = command; 732 bo.binder_type = ei->binder_type; 733 bo.block = &block->flow_block; 734 bo.block_shared = tcf_block_shared(block); 735 bo.extack = extack; 736 INIT_LIST_HEAD(&bo.cb_list); 737 738 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 739 if (err < 0) { 740 if (err != -EOPNOTSUPP) 741 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 742 return err; 743 } 744 745 return tcf_block_setup(block, &bo); 746 } 747 748 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 749 struct tcf_block_ext_info *ei, 750 struct netlink_ext_ack *extack) 751 { 752 struct net_device *dev = q->dev_queue->dev; 753 int err; 754 755 down_write(&block->cb_lock); 756 if (!dev->netdev_ops->ndo_setup_tc) 757 goto no_offload_dev_inc; 758 759 /* If tc offload feature is disabled and the block we try to bind 760 * to already has some offloaded filters, forbid to bind. 761 */ 762 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) { 763 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 764 err = -EOPNOTSUPP; 765 goto err_unlock; 766 } 767 768 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack); 769 if (err == -EOPNOTSUPP) 770 goto no_offload_dev_inc; 771 if (err) 772 goto err_unlock; 773 774 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 775 up_write(&block->cb_lock); 776 return 0; 777 778 no_offload_dev_inc: 779 if (tcf_block_offload_in_use(block)) { 780 err = -EOPNOTSUPP; 781 goto err_unlock; 782 } 783 err = 0; 784 block->nooffloaddevcnt++; 785 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack); 786 err_unlock: 787 up_write(&block->cb_lock); 788 return err; 789 } 790 791 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 792 struct tcf_block_ext_info *ei) 793 { 794 struct net_device *dev = q->dev_queue->dev; 795 int err; 796 797 down_write(&block->cb_lock); 798 tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 799 800 if (!dev->netdev_ops->ndo_setup_tc) 801 goto no_offload_dev_dec; 802 err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL); 803 if (err == -EOPNOTSUPP) 804 goto no_offload_dev_dec; 805 up_write(&block->cb_lock); 806 return; 807 808 no_offload_dev_dec: 809 WARN_ON(block->nooffloaddevcnt-- == 0); 810 up_write(&block->cb_lock); 811 } 812 813 static int 814 tcf_chain0_head_change_cb_add(struct tcf_block *block, 815 struct tcf_block_ext_info *ei, 816 struct netlink_ext_ack *extack) 817 { 818 struct tcf_filter_chain_list_item *item; 819 struct tcf_chain *chain0; 820 821 item = kmalloc(sizeof(*item), GFP_KERNEL); 822 if (!item) { 823 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 824 return -ENOMEM; 825 } 826 item->chain_head_change = ei->chain_head_change; 827 item->chain_head_change_priv = ei->chain_head_change_priv; 828 829 mutex_lock(&block->lock); 830 chain0 = block->chain0.chain; 831 if (chain0) 832 tcf_chain_hold(chain0); 833 else 834 list_add(&item->list, &block->chain0.filter_chain_list); 835 mutex_unlock(&block->lock); 836 837 if (chain0) { 838 struct tcf_proto *tp_head; 839 840 mutex_lock(&chain0->filter_chain_lock); 841 842 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 843 if (tp_head) 844 tcf_chain_head_change_item(item, tp_head); 845 846 mutex_lock(&block->lock); 847 list_add(&item->list, &block->chain0.filter_chain_list); 848 mutex_unlock(&block->lock); 849 850 mutex_unlock(&chain0->filter_chain_lock); 851 tcf_chain_put(chain0); 852 } 853 854 return 0; 855 } 856 857 static void 858 tcf_chain0_head_change_cb_del(struct tcf_block *block, 859 struct tcf_block_ext_info *ei) 860 { 861 struct tcf_filter_chain_list_item *item; 862 863 mutex_lock(&block->lock); 864 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 865 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 866 (item->chain_head_change == ei->chain_head_change && 867 item->chain_head_change_priv == ei->chain_head_change_priv)) { 868 if (block->chain0.chain) 869 tcf_chain_head_change_item(item, NULL); 870 list_del(&item->list); 871 mutex_unlock(&block->lock); 872 873 kfree(item); 874 return; 875 } 876 } 877 mutex_unlock(&block->lock); 878 WARN_ON(1); 879 } 880 881 struct tcf_net { 882 spinlock_t idr_lock; /* Protects idr */ 883 struct idr idr; 884 }; 885 886 static unsigned int tcf_net_id; 887 888 static int tcf_block_insert(struct tcf_block *block, struct net *net, 889 struct netlink_ext_ack *extack) 890 { 891 struct tcf_net *tn = net_generic(net, tcf_net_id); 892 int err; 893 894 idr_preload(GFP_KERNEL); 895 spin_lock(&tn->idr_lock); 896 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 897 GFP_NOWAIT); 898 spin_unlock(&tn->idr_lock); 899 idr_preload_end(); 900 901 return err; 902 } 903 904 static void tcf_block_remove(struct tcf_block *block, struct net *net) 905 { 906 struct tcf_net *tn = net_generic(net, tcf_net_id); 907 908 spin_lock(&tn->idr_lock); 909 idr_remove(&tn->idr, block->index); 910 spin_unlock(&tn->idr_lock); 911 } 912 913 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 914 u32 block_index, 915 struct netlink_ext_ack *extack) 916 { 917 struct tcf_block *block; 918 919 block = kzalloc(sizeof(*block), GFP_KERNEL); 920 if (!block) { 921 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 922 return ERR_PTR(-ENOMEM); 923 } 924 mutex_init(&block->lock); 925 mutex_init(&block->proto_destroy_lock); 926 init_rwsem(&block->cb_lock); 927 flow_block_init(&block->flow_block); 928 INIT_LIST_HEAD(&block->chain_list); 929 INIT_LIST_HEAD(&block->owner_list); 930 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 931 932 refcount_set(&block->refcnt, 1); 933 block->net = net; 934 block->index = block_index; 935 936 /* Don't store q pointer for blocks which are shared */ 937 if (!tcf_block_shared(block)) 938 block->q = q; 939 return block; 940 } 941 942 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 943 { 944 struct tcf_net *tn = net_generic(net, tcf_net_id); 945 946 return idr_find(&tn->idr, block_index); 947 } 948 949 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 950 { 951 struct tcf_block *block; 952 953 rcu_read_lock(); 954 block = tcf_block_lookup(net, block_index); 955 if (block && !refcount_inc_not_zero(&block->refcnt)) 956 block = NULL; 957 rcu_read_unlock(); 958 959 return block; 960 } 961 962 static struct tcf_chain * 963 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 964 { 965 mutex_lock(&block->lock); 966 if (chain) 967 chain = list_is_last(&chain->list, &block->chain_list) ? 968 NULL : list_next_entry(chain, list); 969 else 970 chain = list_first_entry_or_null(&block->chain_list, 971 struct tcf_chain, list); 972 973 /* skip all action-only chains */ 974 while (chain && tcf_chain_held_by_acts_only(chain)) 975 chain = list_is_last(&chain->list, &block->chain_list) ? 976 NULL : list_next_entry(chain, list); 977 978 if (chain) 979 tcf_chain_hold(chain); 980 mutex_unlock(&block->lock); 981 982 return chain; 983 } 984 985 /* Function to be used by all clients that want to iterate over all chains on 986 * block. It properly obtains block->lock and takes reference to chain before 987 * returning it. Users of this function must be tolerant to concurrent chain 988 * insertion/deletion or ensure that no concurrent chain modification is 989 * possible. Note that all netlink dump callbacks cannot guarantee to provide 990 * consistent dump because rtnl lock is released each time skb is filled with 991 * data and sent to user-space. 992 */ 993 994 struct tcf_chain * 995 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 996 { 997 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 998 999 if (chain) 1000 tcf_chain_put(chain); 1001 1002 return chain_next; 1003 } 1004 EXPORT_SYMBOL(tcf_get_next_chain); 1005 1006 static struct tcf_proto * 1007 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 1008 { 1009 u32 prio = 0; 1010 1011 ASSERT_RTNL(); 1012 mutex_lock(&chain->filter_chain_lock); 1013 1014 if (!tp) { 1015 tp = tcf_chain_dereference(chain->filter_chain, chain); 1016 } else if (tcf_proto_is_deleting(tp)) { 1017 /* 'deleting' flag is set and chain->filter_chain_lock was 1018 * unlocked, which means next pointer could be invalid. Restart 1019 * search. 1020 */ 1021 prio = tp->prio + 1; 1022 tp = tcf_chain_dereference(chain->filter_chain, chain); 1023 1024 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 1025 if (!tp->deleting && tp->prio >= prio) 1026 break; 1027 } else { 1028 tp = tcf_chain_dereference(tp->next, chain); 1029 } 1030 1031 if (tp) 1032 tcf_proto_get(tp); 1033 1034 mutex_unlock(&chain->filter_chain_lock); 1035 1036 return tp; 1037 } 1038 1039 /* Function to be used by all clients that want to iterate over all tp's on 1040 * chain. Users of this function must be tolerant to concurrent tp 1041 * insertion/deletion or ensure that no concurrent chain modification is 1042 * possible. Note that all netlink dump callbacks cannot guarantee to provide 1043 * consistent dump because rtnl lock is released each time skb is filled with 1044 * data and sent to user-space. 1045 */ 1046 1047 struct tcf_proto * 1048 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp, 1049 bool rtnl_held) 1050 { 1051 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 1052 1053 if (tp) 1054 tcf_proto_put(tp, rtnl_held, NULL); 1055 1056 return tp_next; 1057 } 1058 EXPORT_SYMBOL(tcf_get_next_proto); 1059 1060 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1061 { 1062 struct tcf_chain *chain; 1063 1064 /* Last reference to block. At this point chains cannot be added or 1065 * removed concurrently. 1066 */ 1067 for (chain = tcf_get_next_chain(block, NULL); 1068 chain; 1069 chain = tcf_get_next_chain(block, chain)) { 1070 tcf_chain_put_explicitly_created(chain); 1071 tcf_chain_flush(chain, rtnl_held); 1072 } 1073 } 1074 1075 /* Lookup Qdisc and increments its reference counter. 1076 * Set parent, if necessary. 1077 */ 1078 1079 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1080 u32 *parent, int ifindex, bool rtnl_held, 1081 struct netlink_ext_ack *extack) 1082 { 1083 const struct Qdisc_class_ops *cops; 1084 struct net_device *dev; 1085 int err = 0; 1086 1087 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1088 return 0; 1089 1090 rcu_read_lock(); 1091 1092 /* Find link */ 1093 dev = dev_get_by_index_rcu(net, ifindex); 1094 if (!dev) { 1095 rcu_read_unlock(); 1096 return -ENODEV; 1097 } 1098 1099 /* Find qdisc */ 1100 if (!*parent) { 1101 *q = dev->qdisc; 1102 *parent = (*q)->handle; 1103 } else { 1104 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1105 if (!*q) { 1106 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1107 err = -EINVAL; 1108 goto errout_rcu; 1109 } 1110 } 1111 1112 *q = qdisc_refcount_inc_nz(*q); 1113 if (!*q) { 1114 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1115 err = -EINVAL; 1116 goto errout_rcu; 1117 } 1118 1119 /* Is it classful? */ 1120 cops = (*q)->ops->cl_ops; 1121 if (!cops) { 1122 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1123 err = -EINVAL; 1124 goto errout_qdisc; 1125 } 1126 1127 if (!cops->tcf_block) { 1128 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1129 err = -EOPNOTSUPP; 1130 goto errout_qdisc; 1131 } 1132 1133 errout_rcu: 1134 /* At this point we know that qdisc is not noop_qdisc, 1135 * which means that qdisc holds a reference to net_device 1136 * and we hold a reference to qdisc, so it is safe to release 1137 * rcu read lock. 1138 */ 1139 rcu_read_unlock(); 1140 return err; 1141 1142 errout_qdisc: 1143 rcu_read_unlock(); 1144 1145 if (rtnl_held) 1146 qdisc_put(*q); 1147 else 1148 qdisc_put_unlocked(*q); 1149 *q = NULL; 1150 1151 return err; 1152 } 1153 1154 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1155 int ifindex, struct netlink_ext_ack *extack) 1156 { 1157 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1158 return 0; 1159 1160 /* Do we search for filter, attached to class? */ 1161 if (TC_H_MIN(parent)) { 1162 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1163 1164 *cl = cops->find(q, parent); 1165 if (*cl == 0) { 1166 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1167 return -ENOENT; 1168 } 1169 } 1170 1171 return 0; 1172 } 1173 1174 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1175 unsigned long cl, int ifindex, 1176 u32 block_index, 1177 struct netlink_ext_ack *extack) 1178 { 1179 struct tcf_block *block; 1180 1181 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1182 block = tcf_block_refcnt_get(net, block_index); 1183 if (!block) { 1184 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1185 return ERR_PTR(-EINVAL); 1186 } 1187 } else { 1188 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1189 1190 block = cops->tcf_block(q, cl, extack); 1191 if (!block) 1192 return ERR_PTR(-EINVAL); 1193 1194 if (tcf_block_shared(block)) { 1195 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1196 return ERR_PTR(-EOPNOTSUPP); 1197 } 1198 1199 /* Always take reference to block in order to support execution 1200 * of rules update path of cls API without rtnl lock. Caller 1201 * must release block when it is finished using it. 'if' block 1202 * of this conditional obtain reference to block by calling 1203 * tcf_block_refcnt_get(). 1204 */ 1205 refcount_inc(&block->refcnt); 1206 } 1207 1208 return block; 1209 } 1210 1211 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1212 struct tcf_block_ext_info *ei, bool rtnl_held) 1213 { 1214 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1215 /* Flushing/putting all chains will cause the block to be 1216 * deallocated when last chain is freed. However, if chain_list 1217 * is empty, block has to be manually deallocated. After block 1218 * reference counter reached 0, it is no longer possible to 1219 * increment it or add new chains to block. 1220 */ 1221 bool free_block = list_empty(&block->chain_list); 1222 1223 mutex_unlock(&block->lock); 1224 if (tcf_block_shared(block)) 1225 tcf_block_remove(block, block->net); 1226 1227 if (q) 1228 tcf_block_offload_unbind(block, q, ei); 1229 1230 if (free_block) 1231 tcf_block_destroy(block); 1232 else 1233 tcf_block_flush_all_chains(block, rtnl_held); 1234 } else if (q) { 1235 tcf_block_offload_unbind(block, q, ei); 1236 } 1237 } 1238 1239 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1240 { 1241 __tcf_block_put(block, NULL, NULL, rtnl_held); 1242 } 1243 1244 /* Find tcf block. 1245 * Set q, parent, cl when appropriate. 1246 */ 1247 1248 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1249 u32 *parent, unsigned long *cl, 1250 int ifindex, u32 block_index, 1251 struct netlink_ext_ack *extack) 1252 { 1253 struct tcf_block *block; 1254 int err = 0; 1255 1256 ASSERT_RTNL(); 1257 1258 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1259 if (err) 1260 goto errout; 1261 1262 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1263 if (err) 1264 goto errout_qdisc; 1265 1266 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1267 if (IS_ERR(block)) { 1268 err = PTR_ERR(block); 1269 goto errout_qdisc; 1270 } 1271 1272 return block; 1273 1274 errout_qdisc: 1275 if (*q) 1276 qdisc_put(*q); 1277 errout: 1278 *q = NULL; 1279 return ERR_PTR(err); 1280 } 1281 1282 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1283 bool rtnl_held) 1284 { 1285 if (!IS_ERR_OR_NULL(block)) 1286 tcf_block_refcnt_put(block, rtnl_held); 1287 1288 if (q) { 1289 if (rtnl_held) 1290 qdisc_put(q); 1291 else 1292 qdisc_put_unlocked(q); 1293 } 1294 } 1295 1296 struct tcf_block_owner_item { 1297 struct list_head list; 1298 struct Qdisc *q; 1299 enum flow_block_binder_type binder_type; 1300 }; 1301 1302 static void 1303 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1304 struct Qdisc *q, 1305 enum flow_block_binder_type binder_type) 1306 { 1307 if (block->keep_dst && 1308 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1309 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1310 netif_keep_dst(qdisc_dev(q)); 1311 } 1312 1313 void tcf_block_netif_keep_dst(struct tcf_block *block) 1314 { 1315 struct tcf_block_owner_item *item; 1316 1317 block->keep_dst = true; 1318 list_for_each_entry(item, &block->owner_list, list) 1319 tcf_block_owner_netif_keep_dst(block, item->q, 1320 item->binder_type); 1321 } 1322 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1323 1324 static int tcf_block_owner_add(struct tcf_block *block, 1325 struct Qdisc *q, 1326 enum flow_block_binder_type binder_type) 1327 { 1328 struct tcf_block_owner_item *item; 1329 1330 item = kmalloc(sizeof(*item), GFP_KERNEL); 1331 if (!item) 1332 return -ENOMEM; 1333 item->q = q; 1334 item->binder_type = binder_type; 1335 list_add(&item->list, &block->owner_list); 1336 return 0; 1337 } 1338 1339 static void tcf_block_owner_del(struct tcf_block *block, 1340 struct Qdisc *q, 1341 enum flow_block_binder_type binder_type) 1342 { 1343 struct tcf_block_owner_item *item; 1344 1345 list_for_each_entry(item, &block->owner_list, list) { 1346 if (item->q == q && item->binder_type == binder_type) { 1347 list_del(&item->list); 1348 kfree(item); 1349 return; 1350 } 1351 } 1352 WARN_ON(1); 1353 } 1354 1355 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1356 struct tcf_block_ext_info *ei, 1357 struct netlink_ext_ack *extack) 1358 { 1359 struct net *net = qdisc_net(q); 1360 struct tcf_block *block = NULL; 1361 int err; 1362 1363 if (ei->block_index) 1364 /* block_index not 0 means the shared block is requested */ 1365 block = tcf_block_refcnt_get(net, ei->block_index); 1366 1367 if (!block) { 1368 block = tcf_block_create(net, q, ei->block_index, extack); 1369 if (IS_ERR(block)) 1370 return PTR_ERR(block); 1371 if (tcf_block_shared(block)) { 1372 err = tcf_block_insert(block, net, extack); 1373 if (err) 1374 goto err_block_insert; 1375 } 1376 } 1377 1378 err = tcf_block_owner_add(block, q, ei->binder_type); 1379 if (err) 1380 goto err_block_owner_add; 1381 1382 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1383 1384 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1385 if (err) 1386 goto err_chain0_head_change_cb_add; 1387 1388 err = tcf_block_offload_bind(block, q, ei, extack); 1389 if (err) 1390 goto err_block_offload_bind; 1391 1392 *p_block = block; 1393 return 0; 1394 1395 err_block_offload_bind: 1396 tcf_chain0_head_change_cb_del(block, ei); 1397 err_chain0_head_change_cb_add: 1398 tcf_block_owner_del(block, q, ei->binder_type); 1399 err_block_owner_add: 1400 err_block_insert: 1401 tcf_block_refcnt_put(block, true); 1402 return err; 1403 } 1404 EXPORT_SYMBOL(tcf_block_get_ext); 1405 1406 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1407 { 1408 struct tcf_proto __rcu **p_filter_chain = priv; 1409 1410 rcu_assign_pointer(*p_filter_chain, tp_head); 1411 } 1412 1413 int tcf_block_get(struct tcf_block **p_block, 1414 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1415 struct netlink_ext_ack *extack) 1416 { 1417 struct tcf_block_ext_info ei = { 1418 .chain_head_change = tcf_chain_head_change_dflt, 1419 .chain_head_change_priv = p_filter_chain, 1420 }; 1421 1422 WARN_ON(!p_filter_chain); 1423 return tcf_block_get_ext(p_block, q, &ei, extack); 1424 } 1425 EXPORT_SYMBOL(tcf_block_get); 1426 1427 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1428 * actions should be all removed after flushing. 1429 */ 1430 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1431 struct tcf_block_ext_info *ei) 1432 { 1433 if (!block) 1434 return; 1435 tcf_chain0_head_change_cb_del(block, ei); 1436 tcf_block_owner_del(block, q, ei->binder_type); 1437 1438 __tcf_block_put(block, q, ei, true); 1439 } 1440 EXPORT_SYMBOL(tcf_block_put_ext); 1441 1442 void tcf_block_put(struct tcf_block *block) 1443 { 1444 struct tcf_block_ext_info ei = {0, }; 1445 1446 if (!block) 1447 return; 1448 tcf_block_put_ext(block, block->q, &ei); 1449 } 1450 1451 EXPORT_SYMBOL(tcf_block_put); 1452 1453 static int 1454 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1455 void *cb_priv, bool add, bool offload_in_use, 1456 struct netlink_ext_ack *extack) 1457 { 1458 struct tcf_chain *chain, *chain_prev; 1459 struct tcf_proto *tp, *tp_prev; 1460 int err; 1461 1462 lockdep_assert_held(&block->cb_lock); 1463 1464 for (chain = __tcf_get_next_chain(block, NULL); 1465 chain; 1466 chain_prev = chain, 1467 chain = __tcf_get_next_chain(block, chain), 1468 tcf_chain_put(chain_prev)) { 1469 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1470 tp_prev = tp, 1471 tp = __tcf_get_next_proto(chain, tp), 1472 tcf_proto_put(tp_prev, true, NULL)) { 1473 if (tp->ops->reoffload) { 1474 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1475 extack); 1476 if (err && add) 1477 goto err_playback_remove; 1478 } else if (add && offload_in_use) { 1479 err = -EOPNOTSUPP; 1480 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1481 goto err_playback_remove; 1482 } 1483 } 1484 } 1485 1486 return 0; 1487 1488 err_playback_remove: 1489 tcf_proto_put(tp, true, NULL); 1490 tcf_chain_put(chain); 1491 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1492 extack); 1493 return err; 1494 } 1495 1496 static int tcf_block_bind(struct tcf_block *block, 1497 struct flow_block_offload *bo) 1498 { 1499 struct flow_block_cb *block_cb, *next; 1500 int err, i = 0; 1501 1502 lockdep_assert_held(&block->cb_lock); 1503 1504 list_for_each_entry(block_cb, &bo->cb_list, list) { 1505 err = tcf_block_playback_offloads(block, block_cb->cb, 1506 block_cb->cb_priv, true, 1507 tcf_block_offload_in_use(block), 1508 bo->extack); 1509 if (err) 1510 goto err_unroll; 1511 if (!bo->unlocked_driver_cb) 1512 block->lockeddevcnt++; 1513 1514 i++; 1515 } 1516 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1517 1518 return 0; 1519 1520 err_unroll: 1521 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1522 if (i-- > 0) { 1523 list_del(&block_cb->list); 1524 tcf_block_playback_offloads(block, block_cb->cb, 1525 block_cb->cb_priv, false, 1526 tcf_block_offload_in_use(block), 1527 NULL); 1528 if (!bo->unlocked_driver_cb) 1529 block->lockeddevcnt--; 1530 } 1531 flow_block_cb_free(block_cb); 1532 } 1533 1534 return err; 1535 } 1536 1537 static void tcf_block_unbind(struct tcf_block *block, 1538 struct flow_block_offload *bo) 1539 { 1540 struct flow_block_cb *block_cb, *next; 1541 1542 lockdep_assert_held(&block->cb_lock); 1543 1544 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1545 tcf_block_playback_offloads(block, block_cb->cb, 1546 block_cb->cb_priv, false, 1547 tcf_block_offload_in_use(block), 1548 NULL); 1549 list_del(&block_cb->list); 1550 flow_block_cb_free(block_cb); 1551 if (!bo->unlocked_driver_cb) 1552 block->lockeddevcnt--; 1553 } 1554 } 1555 1556 static int tcf_block_setup(struct tcf_block *block, 1557 struct flow_block_offload *bo) 1558 { 1559 int err; 1560 1561 switch (bo->command) { 1562 case FLOW_BLOCK_BIND: 1563 err = tcf_block_bind(block, bo); 1564 break; 1565 case FLOW_BLOCK_UNBIND: 1566 err = 0; 1567 tcf_block_unbind(block, bo); 1568 break; 1569 default: 1570 WARN_ON_ONCE(1); 1571 err = -EOPNOTSUPP; 1572 } 1573 1574 return err; 1575 } 1576 1577 /* Main classifier routine: scans classifier chain attached 1578 * to this qdisc, (optionally) tests for protocol and asks 1579 * specific classifiers. 1580 */ 1581 static inline int __tcf_classify(struct sk_buff *skb, 1582 const struct tcf_proto *tp, 1583 const struct tcf_proto *orig_tp, 1584 struct tcf_result *res, 1585 bool compat_mode, 1586 u32 *last_executed_chain) 1587 { 1588 #ifdef CONFIG_NET_CLS_ACT 1589 const int max_reclassify_loop = 4; 1590 const struct tcf_proto *first_tp; 1591 int limit = 0; 1592 1593 reclassify: 1594 #endif 1595 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1596 __be16 protocol = tc_skb_protocol(skb); 1597 int err; 1598 1599 if (tp->protocol != protocol && 1600 tp->protocol != htons(ETH_P_ALL)) 1601 continue; 1602 1603 err = tp->classify(skb, tp, res); 1604 #ifdef CONFIG_NET_CLS_ACT 1605 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1606 first_tp = orig_tp; 1607 *last_executed_chain = first_tp->chain->index; 1608 goto reset; 1609 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1610 first_tp = res->goto_tp; 1611 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1612 goto reset; 1613 } 1614 #endif 1615 if (err >= 0) 1616 return err; 1617 } 1618 1619 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1620 #ifdef CONFIG_NET_CLS_ACT 1621 reset: 1622 if (unlikely(limit++ >= max_reclassify_loop)) { 1623 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1624 tp->chain->block->index, 1625 tp->prio & 0xffff, 1626 ntohs(tp->protocol)); 1627 return TC_ACT_SHOT; 1628 } 1629 1630 tp = first_tp; 1631 goto reclassify; 1632 #endif 1633 } 1634 1635 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 1636 struct tcf_result *res, bool compat_mode) 1637 { 1638 u32 last_executed_chain = 0; 1639 1640 return __tcf_classify(skb, tp, tp, res, compat_mode, 1641 &last_executed_chain); 1642 } 1643 EXPORT_SYMBOL(tcf_classify); 1644 1645 int tcf_classify_ingress(struct sk_buff *skb, 1646 const struct tcf_block *ingress_block, 1647 const struct tcf_proto *tp, 1648 struct tcf_result *res, bool compat_mode) 1649 { 1650 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1651 u32 last_executed_chain = 0; 1652 1653 return __tcf_classify(skb, tp, tp, res, compat_mode, 1654 &last_executed_chain); 1655 #else 1656 u32 last_executed_chain = tp ? tp->chain->index : 0; 1657 const struct tcf_proto *orig_tp = tp; 1658 struct tc_skb_ext *ext; 1659 int ret; 1660 1661 ext = skb_ext_find(skb, TC_SKB_EXT); 1662 1663 if (ext && ext->chain) { 1664 struct tcf_chain *fchain; 1665 1666 fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain); 1667 if (!fchain) 1668 return TC_ACT_SHOT; 1669 1670 /* Consume, so cloned/redirect skbs won't inherit ext */ 1671 skb_ext_del(skb, TC_SKB_EXT); 1672 1673 tp = rcu_dereference_bh(fchain->filter_chain); 1674 last_executed_chain = fchain->index; 1675 } 1676 1677 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1678 &last_executed_chain); 1679 1680 /* If we missed on some chain */ 1681 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1682 ext = skb_ext_add(skb, TC_SKB_EXT); 1683 if (WARN_ON_ONCE(!ext)) 1684 return TC_ACT_SHOT; 1685 ext->chain = last_executed_chain; 1686 } 1687 1688 return ret; 1689 #endif 1690 } 1691 EXPORT_SYMBOL(tcf_classify_ingress); 1692 1693 struct tcf_chain_info { 1694 struct tcf_proto __rcu **pprev; 1695 struct tcf_proto __rcu *next; 1696 }; 1697 1698 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1699 struct tcf_chain_info *chain_info) 1700 { 1701 return tcf_chain_dereference(*chain_info->pprev, chain); 1702 } 1703 1704 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1705 struct tcf_chain_info *chain_info, 1706 struct tcf_proto *tp) 1707 { 1708 if (chain->flushing) 1709 return -EAGAIN; 1710 1711 if (*chain_info->pprev == chain->filter_chain) 1712 tcf_chain0_head_change(chain, tp); 1713 tcf_proto_get(tp); 1714 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1715 rcu_assign_pointer(*chain_info->pprev, tp); 1716 1717 return 0; 1718 } 1719 1720 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1721 struct tcf_chain_info *chain_info, 1722 struct tcf_proto *tp) 1723 { 1724 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1725 1726 tcf_proto_mark_delete(tp); 1727 if (tp == chain->filter_chain) 1728 tcf_chain0_head_change(chain, next); 1729 RCU_INIT_POINTER(*chain_info->pprev, next); 1730 } 1731 1732 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1733 struct tcf_chain_info *chain_info, 1734 u32 protocol, u32 prio, 1735 bool prio_allocate); 1736 1737 /* Try to insert new proto. 1738 * If proto with specified priority already exists, free new proto 1739 * and return existing one. 1740 */ 1741 1742 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1743 struct tcf_proto *tp_new, 1744 u32 protocol, u32 prio, 1745 bool rtnl_held) 1746 { 1747 struct tcf_chain_info chain_info; 1748 struct tcf_proto *tp; 1749 int err = 0; 1750 1751 mutex_lock(&chain->filter_chain_lock); 1752 1753 if (tcf_proto_exists_destroying(chain, tp_new)) { 1754 mutex_unlock(&chain->filter_chain_lock); 1755 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1756 return ERR_PTR(-EAGAIN); 1757 } 1758 1759 tp = tcf_chain_tp_find(chain, &chain_info, 1760 protocol, prio, false); 1761 if (!tp) 1762 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1763 mutex_unlock(&chain->filter_chain_lock); 1764 1765 if (tp) { 1766 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1767 tp_new = tp; 1768 } else if (err) { 1769 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1770 tp_new = ERR_PTR(err); 1771 } 1772 1773 return tp_new; 1774 } 1775 1776 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1777 struct tcf_proto *tp, bool rtnl_held, 1778 struct netlink_ext_ack *extack) 1779 { 1780 struct tcf_chain_info chain_info; 1781 struct tcf_proto *tp_iter; 1782 struct tcf_proto **pprev; 1783 struct tcf_proto *next; 1784 1785 mutex_lock(&chain->filter_chain_lock); 1786 1787 /* Atomically find and remove tp from chain. */ 1788 for (pprev = &chain->filter_chain; 1789 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1790 pprev = &tp_iter->next) { 1791 if (tp_iter == tp) { 1792 chain_info.pprev = pprev; 1793 chain_info.next = tp_iter->next; 1794 WARN_ON(tp_iter->deleting); 1795 break; 1796 } 1797 } 1798 /* Verify that tp still exists and no new filters were inserted 1799 * concurrently. 1800 * Mark tp for deletion if it is empty. 1801 */ 1802 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1803 mutex_unlock(&chain->filter_chain_lock); 1804 return; 1805 } 1806 1807 tcf_proto_signal_destroying(chain, tp); 1808 next = tcf_chain_dereference(chain_info.next, chain); 1809 if (tp == chain->filter_chain) 1810 tcf_chain0_head_change(chain, next); 1811 RCU_INIT_POINTER(*chain_info.pprev, next); 1812 mutex_unlock(&chain->filter_chain_lock); 1813 1814 tcf_proto_put(tp, rtnl_held, extack); 1815 } 1816 1817 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1818 struct tcf_chain_info *chain_info, 1819 u32 protocol, u32 prio, 1820 bool prio_allocate) 1821 { 1822 struct tcf_proto **pprev; 1823 struct tcf_proto *tp; 1824 1825 /* Check the chain for existence of proto-tcf with this priority */ 1826 for (pprev = &chain->filter_chain; 1827 (tp = tcf_chain_dereference(*pprev, chain)); 1828 pprev = &tp->next) { 1829 if (tp->prio >= prio) { 1830 if (tp->prio == prio) { 1831 if (prio_allocate || 1832 (tp->protocol != protocol && protocol)) 1833 return ERR_PTR(-EINVAL); 1834 } else { 1835 tp = NULL; 1836 } 1837 break; 1838 } 1839 } 1840 chain_info->pprev = pprev; 1841 if (tp) { 1842 chain_info->next = tp->next; 1843 tcf_proto_get(tp); 1844 } else { 1845 chain_info->next = NULL; 1846 } 1847 return tp; 1848 } 1849 1850 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1851 struct tcf_proto *tp, struct tcf_block *block, 1852 struct Qdisc *q, u32 parent, void *fh, 1853 u32 portid, u32 seq, u16 flags, int event, 1854 bool rtnl_held) 1855 { 1856 struct tcmsg *tcm; 1857 struct nlmsghdr *nlh; 1858 unsigned char *b = skb_tail_pointer(skb); 1859 1860 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1861 if (!nlh) 1862 goto out_nlmsg_trim; 1863 tcm = nlmsg_data(nlh); 1864 tcm->tcm_family = AF_UNSPEC; 1865 tcm->tcm__pad1 = 0; 1866 tcm->tcm__pad2 = 0; 1867 if (q) { 1868 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1869 tcm->tcm_parent = parent; 1870 } else { 1871 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1872 tcm->tcm_block_index = block->index; 1873 } 1874 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1875 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1876 goto nla_put_failure; 1877 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1878 goto nla_put_failure; 1879 if (!fh) { 1880 tcm->tcm_handle = 0; 1881 } else { 1882 if (tp->ops->dump && 1883 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1884 goto nla_put_failure; 1885 } 1886 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1887 return skb->len; 1888 1889 out_nlmsg_trim: 1890 nla_put_failure: 1891 nlmsg_trim(skb, b); 1892 return -1; 1893 } 1894 1895 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1896 struct nlmsghdr *n, struct tcf_proto *tp, 1897 struct tcf_block *block, struct Qdisc *q, 1898 u32 parent, void *fh, int event, bool unicast, 1899 bool rtnl_held) 1900 { 1901 struct sk_buff *skb; 1902 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1903 int err = 0; 1904 1905 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1906 if (!skb) 1907 return -ENOBUFS; 1908 1909 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1910 n->nlmsg_seq, n->nlmsg_flags, event, 1911 rtnl_held) <= 0) { 1912 kfree_skb(skb); 1913 return -EINVAL; 1914 } 1915 1916 if (unicast) 1917 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1918 else 1919 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1920 n->nlmsg_flags & NLM_F_ECHO); 1921 1922 if (err > 0) 1923 err = 0; 1924 return err; 1925 } 1926 1927 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1928 struct nlmsghdr *n, struct tcf_proto *tp, 1929 struct tcf_block *block, struct Qdisc *q, 1930 u32 parent, void *fh, bool unicast, bool *last, 1931 bool rtnl_held, struct netlink_ext_ack *extack) 1932 { 1933 struct sk_buff *skb; 1934 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1935 int err; 1936 1937 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1938 if (!skb) 1939 return -ENOBUFS; 1940 1941 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1942 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1943 rtnl_held) <= 0) { 1944 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1945 kfree_skb(skb); 1946 return -EINVAL; 1947 } 1948 1949 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1950 if (err) { 1951 kfree_skb(skb); 1952 return err; 1953 } 1954 1955 if (unicast) 1956 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 1957 else 1958 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1959 n->nlmsg_flags & NLM_F_ECHO); 1960 if (err < 0) 1961 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1962 1963 if (err > 0) 1964 err = 0; 1965 return err; 1966 } 1967 1968 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1969 struct tcf_block *block, struct Qdisc *q, 1970 u32 parent, struct nlmsghdr *n, 1971 struct tcf_chain *chain, int event, 1972 bool rtnl_held) 1973 { 1974 struct tcf_proto *tp; 1975 1976 for (tp = tcf_get_next_proto(chain, NULL, rtnl_held); 1977 tp; tp = tcf_get_next_proto(chain, tp, rtnl_held)) 1978 tfilter_notify(net, oskb, n, tp, block, 1979 q, parent, NULL, event, false, rtnl_held); 1980 } 1981 1982 static void tfilter_put(struct tcf_proto *tp, void *fh) 1983 { 1984 if (tp->ops->put && fh) 1985 tp->ops->put(tp, fh); 1986 } 1987 1988 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1989 struct netlink_ext_ack *extack) 1990 { 1991 struct net *net = sock_net(skb->sk); 1992 struct nlattr *tca[TCA_MAX + 1]; 1993 char name[IFNAMSIZ]; 1994 struct tcmsg *t; 1995 u32 protocol; 1996 u32 prio; 1997 bool prio_allocate; 1998 u32 parent; 1999 u32 chain_index; 2000 struct Qdisc *q = NULL; 2001 struct tcf_chain_info chain_info; 2002 struct tcf_chain *chain = NULL; 2003 struct tcf_block *block; 2004 struct tcf_proto *tp; 2005 unsigned long cl; 2006 void *fh; 2007 int err; 2008 int tp_created; 2009 bool rtnl_held = false; 2010 2011 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2012 return -EPERM; 2013 2014 replay: 2015 tp_created = 0; 2016 2017 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2018 rtm_tca_policy, extack); 2019 if (err < 0) 2020 return err; 2021 2022 t = nlmsg_data(n); 2023 protocol = TC_H_MIN(t->tcm_info); 2024 prio = TC_H_MAJ(t->tcm_info); 2025 prio_allocate = false; 2026 parent = t->tcm_parent; 2027 tp = NULL; 2028 cl = 0; 2029 block = NULL; 2030 2031 if (prio == 0) { 2032 /* If no priority is provided by the user, 2033 * we allocate one. 2034 */ 2035 if (n->nlmsg_flags & NLM_F_CREATE) { 2036 prio = TC_H_MAKE(0x80000000U, 0U); 2037 prio_allocate = true; 2038 } else { 2039 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2040 return -ENOENT; 2041 } 2042 } 2043 2044 /* Find head of filter chain. */ 2045 2046 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2047 if (err) 2048 return err; 2049 2050 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2051 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2052 err = -EINVAL; 2053 goto errout; 2054 } 2055 2056 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2057 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2058 * type is not specified, classifier is not unlocked. 2059 */ 2060 if (rtnl_held || 2061 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2062 !tcf_proto_is_unlocked(name)) { 2063 rtnl_held = true; 2064 rtnl_lock(); 2065 } 2066 2067 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2068 if (err) 2069 goto errout; 2070 2071 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2072 extack); 2073 if (IS_ERR(block)) { 2074 err = PTR_ERR(block); 2075 goto errout; 2076 } 2077 2078 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2079 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2080 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2081 err = -EINVAL; 2082 goto errout; 2083 } 2084 chain = tcf_chain_get(block, chain_index, true); 2085 if (!chain) { 2086 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2087 err = -ENOMEM; 2088 goto errout; 2089 } 2090 2091 mutex_lock(&chain->filter_chain_lock); 2092 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2093 prio, prio_allocate); 2094 if (IS_ERR(tp)) { 2095 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2096 err = PTR_ERR(tp); 2097 goto errout_locked; 2098 } 2099 2100 if (tp == NULL) { 2101 struct tcf_proto *tp_new = NULL; 2102 2103 if (chain->flushing) { 2104 err = -EAGAIN; 2105 goto errout_locked; 2106 } 2107 2108 /* Proto-tcf does not exist, create new one */ 2109 2110 if (tca[TCA_KIND] == NULL || !protocol) { 2111 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2112 err = -EINVAL; 2113 goto errout_locked; 2114 } 2115 2116 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2117 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2118 err = -ENOENT; 2119 goto errout_locked; 2120 } 2121 2122 if (prio_allocate) 2123 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2124 &chain_info)); 2125 2126 mutex_unlock(&chain->filter_chain_lock); 2127 tp_new = tcf_proto_create(name, protocol, prio, chain, 2128 rtnl_held, extack); 2129 if (IS_ERR(tp_new)) { 2130 err = PTR_ERR(tp_new); 2131 goto errout_tp; 2132 } 2133 2134 tp_created = 1; 2135 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2136 rtnl_held); 2137 if (IS_ERR(tp)) { 2138 err = PTR_ERR(tp); 2139 goto errout_tp; 2140 } 2141 } else { 2142 mutex_unlock(&chain->filter_chain_lock); 2143 } 2144 2145 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2146 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2147 err = -EINVAL; 2148 goto errout; 2149 } 2150 2151 fh = tp->ops->get(tp, t->tcm_handle); 2152 2153 if (!fh) { 2154 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2155 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2156 err = -ENOENT; 2157 goto errout; 2158 } 2159 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2160 tfilter_put(tp, fh); 2161 NL_SET_ERR_MSG(extack, "Filter already exists"); 2162 err = -EEXIST; 2163 goto errout; 2164 } 2165 2166 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2167 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2168 err = -EINVAL; 2169 goto errout; 2170 } 2171 2172 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2173 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE, 2174 rtnl_held, extack); 2175 if (err == 0) { 2176 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2177 RTM_NEWTFILTER, false, rtnl_held); 2178 tfilter_put(tp, fh); 2179 /* q pointer is NULL for shared blocks */ 2180 if (q) 2181 q->flags &= ~TCQ_F_CAN_BYPASS; 2182 } 2183 2184 errout: 2185 if (err && tp_created) 2186 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2187 errout_tp: 2188 if (chain) { 2189 if (tp && !IS_ERR(tp)) 2190 tcf_proto_put(tp, rtnl_held, NULL); 2191 if (!tp_created) 2192 tcf_chain_put(chain); 2193 } 2194 tcf_block_release(q, block, rtnl_held); 2195 2196 if (rtnl_held) 2197 rtnl_unlock(); 2198 2199 if (err == -EAGAIN) { 2200 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2201 * of target chain. 2202 */ 2203 rtnl_held = true; 2204 /* Replay the request. */ 2205 goto replay; 2206 } 2207 return err; 2208 2209 errout_locked: 2210 mutex_unlock(&chain->filter_chain_lock); 2211 goto errout; 2212 } 2213 2214 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2215 struct netlink_ext_ack *extack) 2216 { 2217 struct net *net = sock_net(skb->sk); 2218 struct nlattr *tca[TCA_MAX + 1]; 2219 char name[IFNAMSIZ]; 2220 struct tcmsg *t; 2221 u32 protocol; 2222 u32 prio; 2223 u32 parent; 2224 u32 chain_index; 2225 struct Qdisc *q = NULL; 2226 struct tcf_chain_info chain_info; 2227 struct tcf_chain *chain = NULL; 2228 struct tcf_block *block = NULL; 2229 struct tcf_proto *tp = NULL; 2230 unsigned long cl = 0; 2231 void *fh = NULL; 2232 int err; 2233 bool rtnl_held = false; 2234 2235 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2236 return -EPERM; 2237 2238 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2239 rtm_tca_policy, extack); 2240 if (err < 0) 2241 return err; 2242 2243 t = nlmsg_data(n); 2244 protocol = TC_H_MIN(t->tcm_info); 2245 prio = TC_H_MAJ(t->tcm_info); 2246 parent = t->tcm_parent; 2247 2248 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2249 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2250 return -ENOENT; 2251 } 2252 2253 /* Find head of filter chain. */ 2254 2255 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2256 if (err) 2257 return err; 2258 2259 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2260 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2261 err = -EINVAL; 2262 goto errout; 2263 } 2264 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2265 * found), qdisc is not unlocked, classifier type is not specified, 2266 * classifier is not unlocked. 2267 */ 2268 if (!prio || 2269 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2270 !tcf_proto_is_unlocked(name)) { 2271 rtnl_held = true; 2272 rtnl_lock(); 2273 } 2274 2275 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2276 if (err) 2277 goto errout; 2278 2279 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2280 extack); 2281 if (IS_ERR(block)) { 2282 err = PTR_ERR(block); 2283 goto errout; 2284 } 2285 2286 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2287 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2288 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2289 err = -EINVAL; 2290 goto errout; 2291 } 2292 chain = tcf_chain_get(block, chain_index, false); 2293 if (!chain) { 2294 /* User requested flush on non-existent chain. Nothing to do, 2295 * so just return success. 2296 */ 2297 if (prio == 0) { 2298 err = 0; 2299 goto errout; 2300 } 2301 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2302 err = -ENOENT; 2303 goto errout; 2304 } 2305 2306 if (prio == 0) { 2307 tfilter_notify_chain(net, skb, block, q, parent, n, 2308 chain, RTM_DELTFILTER, rtnl_held); 2309 tcf_chain_flush(chain, rtnl_held); 2310 err = 0; 2311 goto errout; 2312 } 2313 2314 mutex_lock(&chain->filter_chain_lock); 2315 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2316 prio, false); 2317 if (!tp || IS_ERR(tp)) { 2318 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2319 err = tp ? PTR_ERR(tp) : -ENOENT; 2320 goto errout_locked; 2321 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2322 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2323 err = -EINVAL; 2324 goto errout_locked; 2325 } else if (t->tcm_handle == 0) { 2326 tcf_proto_signal_destroying(chain, tp); 2327 tcf_chain_tp_remove(chain, &chain_info, tp); 2328 mutex_unlock(&chain->filter_chain_lock); 2329 2330 tcf_proto_put(tp, rtnl_held, NULL); 2331 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2332 RTM_DELTFILTER, false, rtnl_held); 2333 err = 0; 2334 goto errout; 2335 } 2336 mutex_unlock(&chain->filter_chain_lock); 2337 2338 fh = tp->ops->get(tp, t->tcm_handle); 2339 2340 if (!fh) { 2341 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2342 err = -ENOENT; 2343 } else { 2344 bool last; 2345 2346 err = tfilter_del_notify(net, skb, n, tp, block, 2347 q, parent, fh, false, &last, 2348 rtnl_held, extack); 2349 2350 if (err) 2351 goto errout; 2352 if (last) 2353 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2354 } 2355 2356 errout: 2357 if (chain) { 2358 if (tp && !IS_ERR(tp)) 2359 tcf_proto_put(tp, rtnl_held, NULL); 2360 tcf_chain_put(chain); 2361 } 2362 tcf_block_release(q, block, rtnl_held); 2363 2364 if (rtnl_held) 2365 rtnl_unlock(); 2366 2367 return err; 2368 2369 errout_locked: 2370 mutex_unlock(&chain->filter_chain_lock); 2371 goto errout; 2372 } 2373 2374 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2375 struct netlink_ext_ack *extack) 2376 { 2377 struct net *net = sock_net(skb->sk); 2378 struct nlattr *tca[TCA_MAX + 1]; 2379 char name[IFNAMSIZ]; 2380 struct tcmsg *t; 2381 u32 protocol; 2382 u32 prio; 2383 u32 parent; 2384 u32 chain_index; 2385 struct Qdisc *q = NULL; 2386 struct tcf_chain_info chain_info; 2387 struct tcf_chain *chain = NULL; 2388 struct tcf_block *block = NULL; 2389 struct tcf_proto *tp = NULL; 2390 unsigned long cl = 0; 2391 void *fh = NULL; 2392 int err; 2393 bool rtnl_held = false; 2394 2395 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2396 rtm_tca_policy, extack); 2397 if (err < 0) 2398 return err; 2399 2400 t = nlmsg_data(n); 2401 protocol = TC_H_MIN(t->tcm_info); 2402 prio = TC_H_MAJ(t->tcm_info); 2403 parent = t->tcm_parent; 2404 2405 if (prio == 0) { 2406 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2407 return -ENOENT; 2408 } 2409 2410 /* Find head of filter chain. */ 2411 2412 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2413 if (err) 2414 return err; 2415 2416 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2417 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2418 err = -EINVAL; 2419 goto errout; 2420 } 2421 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2422 * unlocked, classifier type is not specified, classifier is not 2423 * unlocked. 2424 */ 2425 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2426 !tcf_proto_is_unlocked(name)) { 2427 rtnl_held = true; 2428 rtnl_lock(); 2429 } 2430 2431 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2432 if (err) 2433 goto errout; 2434 2435 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2436 extack); 2437 if (IS_ERR(block)) { 2438 err = PTR_ERR(block); 2439 goto errout; 2440 } 2441 2442 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2443 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2444 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2445 err = -EINVAL; 2446 goto errout; 2447 } 2448 chain = tcf_chain_get(block, chain_index, false); 2449 if (!chain) { 2450 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2451 err = -EINVAL; 2452 goto errout; 2453 } 2454 2455 mutex_lock(&chain->filter_chain_lock); 2456 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2457 prio, false); 2458 mutex_unlock(&chain->filter_chain_lock); 2459 if (!tp || IS_ERR(tp)) { 2460 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2461 err = tp ? PTR_ERR(tp) : -ENOENT; 2462 goto errout; 2463 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2464 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2465 err = -EINVAL; 2466 goto errout; 2467 } 2468 2469 fh = tp->ops->get(tp, t->tcm_handle); 2470 2471 if (!fh) { 2472 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2473 err = -ENOENT; 2474 } else { 2475 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2476 fh, RTM_NEWTFILTER, true, rtnl_held); 2477 if (err < 0) 2478 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2479 } 2480 2481 tfilter_put(tp, fh); 2482 errout: 2483 if (chain) { 2484 if (tp && !IS_ERR(tp)) 2485 tcf_proto_put(tp, rtnl_held, NULL); 2486 tcf_chain_put(chain); 2487 } 2488 tcf_block_release(q, block, rtnl_held); 2489 2490 if (rtnl_held) 2491 rtnl_unlock(); 2492 2493 return err; 2494 } 2495 2496 struct tcf_dump_args { 2497 struct tcf_walker w; 2498 struct sk_buff *skb; 2499 struct netlink_callback *cb; 2500 struct tcf_block *block; 2501 struct Qdisc *q; 2502 u32 parent; 2503 }; 2504 2505 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2506 { 2507 struct tcf_dump_args *a = (void *)arg; 2508 struct net *net = sock_net(a->skb->sk); 2509 2510 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2511 n, NETLINK_CB(a->cb->skb).portid, 2512 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2513 RTM_NEWTFILTER, true); 2514 } 2515 2516 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2517 struct sk_buff *skb, struct netlink_callback *cb, 2518 long index_start, long *p_index) 2519 { 2520 struct net *net = sock_net(skb->sk); 2521 struct tcf_block *block = chain->block; 2522 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2523 struct tcf_proto *tp, *tp_prev; 2524 struct tcf_dump_args arg; 2525 2526 for (tp = __tcf_get_next_proto(chain, NULL); 2527 tp; 2528 tp_prev = tp, 2529 tp = __tcf_get_next_proto(chain, tp), 2530 tcf_proto_put(tp_prev, true, NULL), 2531 (*p_index)++) { 2532 if (*p_index < index_start) 2533 continue; 2534 if (TC_H_MAJ(tcm->tcm_info) && 2535 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2536 continue; 2537 if (TC_H_MIN(tcm->tcm_info) && 2538 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2539 continue; 2540 if (*p_index > index_start) 2541 memset(&cb->args[1], 0, 2542 sizeof(cb->args) - sizeof(cb->args[0])); 2543 if (cb->args[1] == 0) { 2544 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2545 NETLINK_CB(cb->skb).portid, 2546 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2547 RTM_NEWTFILTER, true) <= 0) 2548 goto errout; 2549 cb->args[1] = 1; 2550 } 2551 if (!tp->ops->walk) 2552 continue; 2553 arg.w.fn = tcf_node_dump; 2554 arg.skb = skb; 2555 arg.cb = cb; 2556 arg.block = block; 2557 arg.q = q; 2558 arg.parent = parent; 2559 arg.w.stop = 0; 2560 arg.w.skip = cb->args[1] - 1; 2561 arg.w.count = 0; 2562 arg.w.cookie = cb->args[2]; 2563 tp->ops->walk(tp, &arg.w, true); 2564 cb->args[2] = arg.w.cookie; 2565 cb->args[1] = arg.w.count + 1; 2566 if (arg.w.stop) 2567 goto errout; 2568 } 2569 return true; 2570 2571 errout: 2572 tcf_proto_put(tp, true, NULL); 2573 return false; 2574 } 2575 2576 /* called with RTNL */ 2577 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2578 { 2579 struct tcf_chain *chain, *chain_prev; 2580 struct net *net = sock_net(skb->sk); 2581 struct nlattr *tca[TCA_MAX + 1]; 2582 struct Qdisc *q = NULL; 2583 struct tcf_block *block; 2584 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2585 long index_start; 2586 long index; 2587 u32 parent; 2588 int err; 2589 2590 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2591 return skb->len; 2592 2593 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2594 NULL, cb->extack); 2595 if (err) 2596 return err; 2597 2598 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2599 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2600 if (!block) 2601 goto out; 2602 /* If we work with block index, q is NULL and parent value 2603 * will never be used in the following code. The check 2604 * in tcf_fill_node prevents it. However, compiler does not 2605 * see that far, so set parent to zero to silence the warning 2606 * about parent being uninitialized. 2607 */ 2608 parent = 0; 2609 } else { 2610 const struct Qdisc_class_ops *cops; 2611 struct net_device *dev; 2612 unsigned long cl = 0; 2613 2614 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2615 if (!dev) 2616 return skb->len; 2617 2618 parent = tcm->tcm_parent; 2619 if (!parent) { 2620 q = dev->qdisc; 2621 parent = q->handle; 2622 } else { 2623 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2624 } 2625 if (!q) 2626 goto out; 2627 cops = q->ops->cl_ops; 2628 if (!cops) 2629 goto out; 2630 if (!cops->tcf_block) 2631 goto out; 2632 if (TC_H_MIN(tcm->tcm_parent)) { 2633 cl = cops->find(q, tcm->tcm_parent); 2634 if (cl == 0) 2635 goto out; 2636 } 2637 block = cops->tcf_block(q, cl, NULL); 2638 if (!block) 2639 goto out; 2640 if (tcf_block_shared(block)) 2641 q = NULL; 2642 } 2643 2644 index_start = cb->args[0]; 2645 index = 0; 2646 2647 for (chain = __tcf_get_next_chain(block, NULL); 2648 chain; 2649 chain_prev = chain, 2650 chain = __tcf_get_next_chain(block, chain), 2651 tcf_chain_put(chain_prev)) { 2652 if (tca[TCA_CHAIN] && 2653 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2654 continue; 2655 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2656 index_start, &index)) { 2657 tcf_chain_put(chain); 2658 err = -EMSGSIZE; 2659 break; 2660 } 2661 } 2662 2663 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2664 tcf_block_refcnt_put(block, true); 2665 cb->args[0] = index; 2666 2667 out: 2668 /* If we did no progress, the error (EMSGSIZE) is real */ 2669 if (skb->len == 0 && err) 2670 return err; 2671 return skb->len; 2672 } 2673 2674 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2675 void *tmplt_priv, u32 chain_index, 2676 struct net *net, struct sk_buff *skb, 2677 struct tcf_block *block, 2678 u32 portid, u32 seq, u16 flags, int event) 2679 { 2680 unsigned char *b = skb_tail_pointer(skb); 2681 const struct tcf_proto_ops *ops; 2682 struct nlmsghdr *nlh; 2683 struct tcmsg *tcm; 2684 void *priv; 2685 2686 ops = tmplt_ops; 2687 priv = tmplt_priv; 2688 2689 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2690 if (!nlh) 2691 goto out_nlmsg_trim; 2692 tcm = nlmsg_data(nlh); 2693 tcm->tcm_family = AF_UNSPEC; 2694 tcm->tcm__pad1 = 0; 2695 tcm->tcm__pad2 = 0; 2696 tcm->tcm_handle = 0; 2697 if (block->q) { 2698 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2699 tcm->tcm_parent = block->q->handle; 2700 } else { 2701 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2702 tcm->tcm_block_index = block->index; 2703 } 2704 2705 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2706 goto nla_put_failure; 2707 2708 if (ops) { 2709 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2710 goto nla_put_failure; 2711 if (ops->tmplt_dump(skb, net, priv) < 0) 2712 goto nla_put_failure; 2713 } 2714 2715 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2716 return skb->len; 2717 2718 out_nlmsg_trim: 2719 nla_put_failure: 2720 nlmsg_trim(skb, b); 2721 return -EMSGSIZE; 2722 } 2723 2724 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2725 u32 seq, u16 flags, int event, bool unicast) 2726 { 2727 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2728 struct tcf_block *block = chain->block; 2729 struct net *net = block->net; 2730 struct sk_buff *skb; 2731 int err = 0; 2732 2733 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2734 if (!skb) 2735 return -ENOBUFS; 2736 2737 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2738 chain->index, net, skb, block, portid, 2739 seq, flags, event) <= 0) { 2740 kfree_skb(skb); 2741 return -EINVAL; 2742 } 2743 2744 if (unicast) 2745 err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2746 else 2747 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2748 flags & NLM_F_ECHO); 2749 2750 if (err > 0) 2751 err = 0; 2752 return err; 2753 } 2754 2755 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2756 void *tmplt_priv, u32 chain_index, 2757 struct tcf_block *block, struct sk_buff *oskb, 2758 u32 seq, u16 flags, bool unicast) 2759 { 2760 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2761 struct net *net = block->net; 2762 struct sk_buff *skb; 2763 2764 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2765 if (!skb) 2766 return -ENOBUFS; 2767 2768 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2769 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2770 kfree_skb(skb); 2771 return -EINVAL; 2772 } 2773 2774 if (unicast) 2775 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 2776 2777 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2778 } 2779 2780 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2781 struct nlattr **tca, 2782 struct netlink_ext_ack *extack) 2783 { 2784 const struct tcf_proto_ops *ops; 2785 char name[IFNAMSIZ]; 2786 void *tmplt_priv; 2787 2788 /* If kind is not set, user did not specify template. */ 2789 if (!tca[TCA_KIND]) 2790 return 0; 2791 2792 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2793 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2794 return -EINVAL; 2795 } 2796 2797 ops = tcf_proto_lookup_ops(name, true, extack); 2798 if (IS_ERR(ops)) 2799 return PTR_ERR(ops); 2800 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2801 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2802 return -EOPNOTSUPP; 2803 } 2804 2805 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2806 if (IS_ERR(tmplt_priv)) { 2807 module_put(ops->owner); 2808 return PTR_ERR(tmplt_priv); 2809 } 2810 chain->tmplt_ops = ops; 2811 chain->tmplt_priv = tmplt_priv; 2812 return 0; 2813 } 2814 2815 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2816 void *tmplt_priv) 2817 { 2818 /* If template ops are set, no work to do for us. */ 2819 if (!tmplt_ops) 2820 return; 2821 2822 tmplt_ops->tmplt_destroy(tmplt_priv); 2823 module_put(tmplt_ops->owner); 2824 } 2825 2826 /* Add/delete/get a chain */ 2827 2828 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2829 struct netlink_ext_ack *extack) 2830 { 2831 struct net *net = sock_net(skb->sk); 2832 struct nlattr *tca[TCA_MAX + 1]; 2833 struct tcmsg *t; 2834 u32 parent; 2835 u32 chain_index; 2836 struct Qdisc *q = NULL; 2837 struct tcf_chain *chain = NULL; 2838 struct tcf_block *block; 2839 unsigned long cl; 2840 int err; 2841 2842 if (n->nlmsg_type != RTM_GETCHAIN && 2843 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2844 return -EPERM; 2845 2846 replay: 2847 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2848 rtm_tca_policy, extack); 2849 if (err < 0) 2850 return err; 2851 2852 t = nlmsg_data(n); 2853 parent = t->tcm_parent; 2854 cl = 0; 2855 2856 block = tcf_block_find(net, &q, &parent, &cl, 2857 t->tcm_ifindex, t->tcm_block_index, extack); 2858 if (IS_ERR(block)) 2859 return PTR_ERR(block); 2860 2861 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2862 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2863 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2864 err = -EINVAL; 2865 goto errout_block; 2866 } 2867 2868 mutex_lock(&block->lock); 2869 chain = tcf_chain_lookup(block, chain_index); 2870 if (n->nlmsg_type == RTM_NEWCHAIN) { 2871 if (chain) { 2872 if (tcf_chain_held_by_acts_only(chain)) { 2873 /* The chain exists only because there is 2874 * some action referencing it. 2875 */ 2876 tcf_chain_hold(chain); 2877 } else { 2878 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2879 err = -EEXIST; 2880 goto errout_block_locked; 2881 } 2882 } else { 2883 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2884 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2885 err = -ENOENT; 2886 goto errout_block_locked; 2887 } 2888 chain = tcf_chain_create(block, chain_index); 2889 if (!chain) { 2890 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2891 err = -ENOMEM; 2892 goto errout_block_locked; 2893 } 2894 } 2895 } else { 2896 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2897 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2898 err = -EINVAL; 2899 goto errout_block_locked; 2900 } 2901 tcf_chain_hold(chain); 2902 } 2903 2904 if (n->nlmsg_type == RTM_NEWCHAIN) { 2905 /* Modifying chain requires holding parent block lock. In case 2906 * the chain was successfully added, take a reference to the 2907 * chain. This ensures that an empty chain does not disappear at 2908 * the end of this function. 2909 */ 2910 tcf_chain_hold(chain); 2911 chain->explicitly_created = true; 2912 } 2913 mutex_unlock(&block->lock); 2914 2915 switch (n->nlmsg_type) { 2916 case RTM_NEWCHAIN: 2917 err = tc_chain_tmplt_add(chain, net, tca, extack); 2918 if (err) { 2919 tcf_chain_put_explicitly_created(chain); 2920 goto errout; 2921 } 2922 2923 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2924 RTM_NEWCHAIN, false); 2925 break; 2926 case RTM_DELCHAIN: 2927 tfilter_notify_chain(net, skb, block, q, parent, n, 2928 chain, RTM_DELTFILTER, true); 2929 /* Flush the chain first as the user requested chain removal. */ 2930 tcf_chain_flush(chain, true); 2931 /* In case the chain was successfully deleted, put a reference 2932 * to the chain previously taken during addition. 2933 */ 2934 tcf_chain_put_explicitly_created(chain); 2935 break; 2936 case RTM_GETCHAIN: 2937 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2938 n->nlmsg_seq, n->nlmsg_type, true); 2939 if (err < 0) 2940 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2941 break; 2942 default: 2943 err = -EOPNOTSUPP; 2944 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2945 goto errout; 2946 } 2947 2948 errout: 2949 tcf_chain_put(chain); 2950 errout_block: 2951 tcf_block_release(q, block, true); 2952 if (err == -EAGAIN) 2953 /* Replay the request. */ 2954 goto replay; 2955 return err; 2956 2957 errout_block_locked: 2958 mutex_unlock(&block->lock); 2959 goto errout_block; 2960 } 2961 2962 /* called with RTNL */ 2963 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2964 { 2965 struct net *net = sock_net(skb->sk); 2966 struct nlattr *tca[TCA_MAX + 1]; 2967 struct Qdisc *q = NULL; 2968 struct tcf_block *block; 2969 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2970 struct tcf_chain *chain; 2971 long index_start; 2972 long index; 2973 u32 parent; 2974 int err; 2975 2976 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2977 return skb->len; 2978 2979 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2980 rtm_tca_policy, cb->extack); 2981 if (err) 2982 return err; 2983 2984 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2985 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2986 if (!block) 2987 goto out; 2988 /* If we work with block index, q is NULL and parent value 2989 * will never be used in the following code. The check 2990 * in tcf_fill_node prevents it. However, compiler does not 2991 * see that far, so set parent to zero to silence the warning 2992 * about parent being uninitialized. 2993 */ 2994 parent = 0; 2995 } else { 2996 const struct Qdisc_class_ops *cops; 2997 struct net_device *dev; 2998 unsigned long cl = 0; 2999 3000 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 3001 if (!dev) 3002 return skb->len; 3003 3004 parent = tcm->tcm_parent; 3005 if (!parent) { 3006 q = dev->qdisc; 3007 parent = q->handle; 3008 } else { 3009 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 3010 } 3011 if (!q) 3012 goto out; 3013 cops = q->ops->cl_ops; 3014 if (!cops) 3015 goto out; 3016 if (!cops->tcf_block) 3017 goto out; 3018 if (TC_H_MIN(tcm->tcm_parent)) { 3019 cl = cops->find(q, tcm->tcm_parent); 3020 if (cl == 0) 3021 goto out; 3022 } 3023 block = cops->tcf_block(q, cl, NULL); 3024 if (!block) 3025 goto out; 3026 if (tcf_block_shared(block)) 3027 q = NULL; 3028 } 3029 3030 index_start = cb->args[0]; 3031 index = 0; 3032 3033 mutex_lock(&block->lock); 3034 list_for_each_entry(chain, &block->chain_list, list) { 3035 if ((tca[TCA_CHAIN] && 3036 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 3037 continue; 3038 if (index < index_start) { 3039 index++; 3040 continue; 3041 } 3042 if (tcf_chain_held_by_acts_only(chain)) 3043 continue; 3044 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 3045 chain->index, net, skb, block, 3046 NETLINK_CB(cb->skb).portid, 3047 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3048 RTM_NEWCHAIN); 3049 if (err <= 0) 3050 break; 3051 index++; 3052 } 3053 mutex_unlock(&block->lock); 3054 3055 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3056 tcf_block_refcnt_put(block, true); 3057 cb->args[0] = index; 3058 3059 out: 3060 /* If we did no progress, the error (EMSGSIZE) is real */ 3061 if (skb->len == 0 && err) 3062 return err; 3063 return skb->len; 3064 } 3065 3066 void tcf_exts_destroy(struct tcf_exts *exts) 3067 { 3068 #ifdef CONFIG_NET_CLS_ACT 3069 if (exts->actions) { 3070 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3071 kfree(exts->actions); 3072 } 3073 exts->nr_actions = 0; 3074 #endif 3075 } 3076 EXPORT_SYMBOL(tcf_exts_destroy); 3077 3078 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3079 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr, 3080 bool rtnl_held, struct netlink_ext_ack *extack) 3081 { 3082 #ifdef CONFIG_NET_CLS_ACT 3083 { 3084 struct tc_action *act; 3085 size_t attr_size = 0; 3086 3087 if (exts->police && tb[exts->police]) { 3088 act = tcf_action_init_1(net, tp, tb[exts->police], 3089 rate_tlv, "police", ovr, 3090 TCA_ACT_BIND, rtnl_held, 3091 extack); 3092 if (IS_ERR(act)) 3093 return PTR_ERR(act); 3094 3095 act->type = exts->type = TCA_OLD_COMPAT; 3096 exts->actions[0] = act; 3097 exts->nr_actions = 1; 3098 } else if (exts->action && tb[exts->action]) { 3099 int err; 3100 3101 err = tcf_action_init(net, tp, tb[exts->action], 3102 rate_tlv, NULL, ovr, TCA_ACT_BIND, 3103 exts->actions, &attr_size, 3104 rtnl_held, extack); 3105 if (err < 0) 3106 return err; 3107 exts->nr_actions = err; 3108 } 3109 } 3110 #else 3111 if ((exts->action && tb[exts->action]) || 3112 (exts->police && tb[exts->police])) { 3113 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3114 return -EOPNOTSUPP; 3115 } 3116 #endif 3117 3118 return 0; 3119 } 3120 EXPORT_SYMBOL(tcf_exts_validate); 3121 3122 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3123 { 3124 #ifdef CONFIG_NET_CLS_ACT 3125 struct tcf_exts old = *dst; 3126 3127 *dst = *src; 3128 tcf_exts_destroy(&old); 3129 #endif 3130 } 3131 EXPORT_SYMBOL(tcf_exts_change); 3132 3133 #ifdef CONFIG_NET_CLS_ACT 3134 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3135 { 3136 if (exts->nr_actions == 0) 3137 return NULL; 3138 else 3139 return exts->actions[0]; 3140 } 3141 #endif 3142 3143 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3144 { 3145 #ifdef CONFIG_NET_CLS_ACT 3146 struct nlattr *nest; 3147 3148 if (exts->action && tcf_exts_has_actions(exts)) { 3149 /* 3150 * again for backward compatible mode - we want 3151 * to work with both old and new modes of entering 3152 * tc data even if iproute2 was newer - jhs 3153 */ 3154 if (exts->type != TCA_OLD_COMPAT) { 3155 nest = nla_nest_start_noflag(skb, exts->action); 3156 if (nest == NULL) 3157 goto nla_put_failure; 3158 3159 if (tcf_action_dump(skb, exts->actions, 0, 0) < 0) 3160 goto nla_put_failure; 3161 nla_nest_end(skb, nest); 3162 } else if (exts->police) { 3163 struct tc_action *act = tcf_exts_first_act(exts); 3164 nest = nla_nest_start_noflag(skb, exts->police); 3165 if (nest == NULL || !act) 3166 goto nla_put_failure; 3167 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3168 goto nla_put_failure; 3169 nla_nest_end(skb, nest); 3170 } 3171 } 3172 return 0; 3173 3174 nla_put_failure: 3175 nla_nest_cancel(skb, nest); 3176 return -1; 3177 #else 3178 return 0; 3179 #endif 3180 } 3181 EXPORT_SYMBOL(tcf_exts_dump); 3182 3183 3184 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3185 { 3186 #ifdef CONFIG_NET_CLS_ACT 3187 struct tc_action *a = tcf_exts_first_act(exts); 3188 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3189 return -1; 3190 #endif 3191 return 0; 3192 } 3193 EXPORT_SYMBOL(tcf_exts_dump_stats); 3194 3195 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3196 { 3197 if (*flags & TCA_CLS_FLAGS_IN_HW) 3198 return; 3199 *flags |= TCA_CLS_FLAGS_IN_HW; 3200 atomic_inc(&block->offloadcnt); 3201 } 3202 3203 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3204 { 3205 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3206 return; 3207 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3208 atomic_dec(&block->offloadcnt); 3209 } 3210 3211 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3212 struct tcf_proto *tp, u32 *cnt, 3213 u32 *flags, u32 diff, bool add) 3214 { 3215 lockdep_assert_held(&block->cb_lock); 3216 3217 spin_lock(&tp->lock); 3218 if (add) { 3219 if (!*cnt) 3220 tcf_block_offload_inc(block, flags); 3221 *cnt += diff; 3222 } else { 3223 *cnt -= diff; 3224 if (!*cnt) 3225 tcf_block_offload_dec(block, flags); 3226 } 3227 spin_unlock(&tp->lock); 3228 } 3229 3230 static void 3231 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3232 u32 *cnt, u32 *flags) 3233 { 3234 lockdep_assert_held(&block->cb_lock); 3235 3236 spin_lock(&tp->lock); 3237 tcf_block_offload_dec(block, flags); 3238 *cnt = 0; 3239 spin_unlock(&tp->lock); 3240 } 3241 3242 static int 3243 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3244 void *type_data, bool err_stop) 3245 { 3246 struct flow_block_cb *block_cb; 3247 int ok_count = 0; 3248 int err; 3249 3250 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3251 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3252 if (err) { 3253 if (err_stop) 3254 return err; 3255 } else { 3256 ok_count++; 3257 } 3258 } 3259 return ok_count; 3260 } 3261 3262 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3263 void *type_data, bool err_stop, bool rtnl_held) 3264 { 3265 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3266 int ok_count; 3267 3268 retry: 3269 if (take_rtnl) 3270 rtnl_lock(); 3271 down_read(&block->cb_lock); 3272 /* Need to obtain rtnl lock if block is bound to devs that require it. 3273 * In block bind code cb_lock is obtained while holding rtnl, so we must 3274 * obtain the locks in same order here. 3275 */ 3276 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3277 up_read(&block->cb_lock); 3278 take_rtnl = true; 3279 goto retry; 3280 } 3281 3282 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3283 3284 up_read(&block->cb_lock); 3285 if (take_rtnl) 3286 rtnl_unlock(); 3287 return ok_count; 3288 } 3289 EXPORT_SYMBOL(tc_setup_cb_call); 3290 3291 /* Non-destructive filter add. If filter that wasn't already in hardware is 3292 * successfully offloaded, increment block offloads counter. On failure, 3293 * previously offloaded filter is considered to be intact and offloads counter 3294 * is not decremented. 3295 */ 3296 3297 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3298 enum tc_setup_type type, void *type_data, bool err_stop, 3299 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3300 { 3301 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3302 int ok_count; 3303 3304 retry: 3305 if (take_rtnl) 3306 rtnl_lock(); 3307 down_read(&block->cb_lock); 3308 /* Need to obtain rtnl lock if block is bound to devs that require it. 3309 * In block bind code cb_lock is obtained while holding rtnl, so we must 3310 * obtain the locks in same order here. 3311 */ 3312 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3313 up_read(&block->cb_lock); 3314 take_rtnl = true; 3315 goto retry; 3316 } 3317 3318 /* Make sure all netdevs sharing this block are offload-capable. */ 3319 if (block->nooffloaddevcnt && err_stop) { 3320 ok_count = -EOPNOTSUPP; 3321 goto err_unlock; 3322 } 3323 3324 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3325 if (ok_count < 0) 3326 goto err_unlock; 3327 3328 if (tp->ops->hw_add) 3329 tp->ops->hw_add(tp, type_data); 3330 if (ok_count > 0) 3331 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3332 ok_count, true); 3333 err_unlock: 3334 up_read(&block->cb_lock); 3335 if (take_rtnl) 3336 rtnl_unlock(); 3337 return ok_count < 0 ? ok_count : 0; 3338 } 3339 EXPORT_SYMBOL(tc_setup_cb_add); 3340 3341 /* Destructive filter replace. If filter that wasn't already in hardware is 3342 * successfully offloaded, increment block offload counter. On failure, 3343 * previously offloaded filter is considered to be destroyed and offload counter 3344 * is decremented. 3345 */ 3346 3347 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3348 enum tc_setup_type type, void *type_data, bool err_stop, 3349 u32 *old_flags, unsigned int *old_in_hw_count, 3350 u32 *new_flags, unsigned int *new_in_hw_count, 3351 bool rtnl_held) 3352 { 3353 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3354 int ok_count; 3355 3356 retry: 3357 if (take_rtnl) 3358 rtnl_lock(); 3359 down_read(&block->cb_lock); 3360 /* Need to obtain rtnl lock if block is bound to devs that require it. 3361 * In block bind code cb_lock is obtained while holding rtnl, so we must 3362 * obtain the locks in same order here. 3363 */ 3364 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3365 up_read(&block->cb_lock); 3366 take_rtnl = true; 3367 goto retry; 3368 } 3369 3370 /* Make sure all netdevs sharing this block are offload-capable. */ 3371 if (block->nooffloaddevcnt && err_stop) { 3372 ok_count = -EOPNOTSUPP; 3373 goto err_unlock; 3374 } 3375 3376 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3377 if (tp->ops->hw_del) 3378 tp->ops->hw_del(tp, type_data); 3379 3380 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3381 if (ok_count < 0) 3382 goto err_unlock; 3383 3384 if (tp->ops->hw_add) 3385 tp->ops->hw_add(tp, type_data); 3386 if (ok_count > 0) 3387 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3388 new_flags, ok_count, true); 3389 err_unlock: 3390 up_read(&block->cb_lock); 3391 if (take_rtnl) 3392 rtnl_unlock(); 3393 return ok_count < 0 ? ok_count : 0; 3394 } 3395 EXPORT_SYMBOL(tc_setup_cb_replace); 3396 3397 /* Destroy filter and decrement block offload counter, if filter was previously 3398 * offloaded. 3399 */ 3400 3401 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3402 enum tc_setup_type type, void *type_data, bool err_stop, 3403 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3404 { 3405 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3406 int ok_count; 3407 3408 retry: 3409 if (take_rtnl) 3410 rtnl_lock(); 3411 down_read(&block->cb_lock); 3412 /* Need to obtain rtnl lock if block is bound to devs that require it. 3413 * In block bind code cb_lock is obtained while holding rtnl, so we must 3414 * obtain the locks in same order here. 3415 */ 3416 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3417 up_read(&block->cb_lock); 3418 take_rtnl = true; 3419 goto retry; 3420 } 3421 3422 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3423 3424 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3425 if (tp->ops->hw_del) 3426 tp->ops->hw_del(tp, type_data); 3427 3428 up_read(&block->cb_lock); 3429 if (take_rtnl) 3430 rtnl_unlock(); 3431 return ok_count < 0 ? ok_count : 0; 3432 } 3433 EXPORT_SYMBOL(tc_setup_cb_destroy); 3434 3435 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3436 bool add, flow_setup_cb_t *cb, 3437 enum tc_setup_type type, void *type_data, 3438 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3439 { 3440 int err = cb(type, type_data, cb_priv); 3441 3442 if (err) { 3443 if (add && tc_skip_sw(*flags)) 3444 return err; 3445 } else { 3446 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3447 add); 3448 } 3449 3450 return 0; 3451 } 3452 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3453 3454 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3455 const struct tc_action *act) 3456 { 3457 struct tc_cookie *cookie; 3458 int err = 0; 3459 3460 rcu_read_lock(); 3461 cookie = rcu_dereference(act->act_cookie); 3462 if (cookie) { 3463 entry->cookie = flow_action_cookie_create(cookie->data, 3464 cookie->len, 3465 GFP_ATOMIC); 3466 if (!entry->cookie) 3467 err = -ENOMEM; 3468 } 3469 rcu_read_unlock(); 3470 return err; 3471 } 3472 3473 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3474 { 3475 flow_action_cookie_destroy(entry->cookie); 3476 } 3477 3478 void tc_cleanup_flow_action(struct flow_action *flow_action) 3479 { 3480 struct flow_action_entry *entry; 3481 int i; 3482 3483 flow_action_for_each(i, entry, flow_action) { 3484 tcf_act_put_cookie(entry); 3485 if (entry->destructor) 3486 entry->destructor(entry->destructor_priv); 3487 } 3488 } 3489 EXPORT_SYMBOL(tc_cleanup_flow_action); 3490 3491 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3492 const struct tc_action *act) 3493 { 3494 #ifdef CONFIG_NET_CLS_ACT 3495 entry->dev = act->ops->get_dev(act, &entry->destructor); 3496 if (!entry->dev) 3497 return; 3498 entry->destructor_priv = entry->dev; 3499 #endif 3500 } 3501 3502 static void tcf_tunnel_encap_put_tunnel(void *priv) 3503 { 3504 struct ip_tunnel_info *tunnel = priv; 3505 3506 kfree(tunnel); 3507 } 3508 3509 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3510 const struct tc_action *act) 3511 { 3512 entry->tunnel = tcf_tunnel_info_copy(act); 3513 if (!entry->tunnel) 3514 return -ENOMEM; 3515 entry->destructor = tcf_tunnel_encap_put_tunnel; 3516 entry->destructor_priv = entry->tunnel; 3517 return 0; 3518 } 3519 3520 static void tcf_sample_get_group(struct flow_action_entry *entry, 3521 const struct tc_action *act) 3522 { 3523 #ifdef CONFIG_NET_CLS_ACT 3524 entry->sample.psample_group = 3525 act->ops->get_psample_group(act, &entry->destructor); 3526 entry->destructor_priv = entry->sample.psample_group; 3527 #endif 3528 } 3529 3530 static void tcf_gate_entry_destructor(void *priv) 3531 { 3532 struct action_gate_entry *oe = priv; 3533 3534 kfree(oe); 3535 } 3536 3537 static int tcf_gate_get_entries(struct flow_action_entry *entry, 3538 const struct tc_action *act) 3539 { 3540 entry->gate.entries = tcf_gate_get_list(act); 3541 3542 if (!entry->gate.entries) 3543 return -EINVAL; 3544 3545 entry->destructor = tcf_gate_entry_destructor; 3546 entry->destructor_priv = entry->gate.entries; 3547 3548 return 0; 3549 } 3550 3551 int tc_setup_flow_action(struct flow_action *flow_action, 3552 const struct tcf_exts *exts) 3553 { 3554 struct tc_action *act; 3555 int i, j, k, err = 0; 3556 3557 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3558 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3559 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3560 3561 if (!exts) 3562 return 0; 3563 3564 j = 0; 3565 tcf_exts_for_each_action(i, act, exts) { 3566 struct flow_action_entry *entry; 3567 3568 entry = &flow_action->entries[j]; 3569 spin_lock_bh(&act->tcfa_lock); 3570 err = tcf_act_get_cookie(entry, act); 3571 if (err) 3572 goto err_out_locked; 3573 3574 entry->hw_stats = act->hw_stats; 3575 3576 if (is_tcf_gact_ok(act)) { 3577 entry->id = FLOW_ACTION_ACCEPT; 3578 } else if (is_tcf_gact_shot(act)) { 3579 entry->id = FLOW_ACTION_DROP; 3580 } else if (is_tcf_gact_trap(act)) { 3581 entry->id = FLOW_ACTION_TRAP; 3582 } else if (is_tcf_gact_goto_chain(act)) { 3583 entry->id = FLOW_ACTION_GOTO; 3584 entry->chain_index = tcf_gact_goto_chain_index(act); 3585 } else if (is_tcf_mirred_egress_redirect(act)) { 3586 entry->id = FLOW_ACTION_REDIRECT; 3587 tcf_mirred_get_dev(entry, act); 3588 } else if (is_tcf_mirred_egress_mirror(act)) { 3589 entry->id = FLOW_ACTION_MIRRED; 3590 tcf_mirred_get_dev(entry, act); 3591 } else if (is_tcf_mirred_ingress_redirect(act)) { 3592 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3593 tcf_mirred_get_dev(entry, act); 3594 } else if (is_tcf_mirred_ingress_mirror(act)) { 3595 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3596 tcf_mirred_get_dev(entry, act); 3597 } else if (is_tcf_vlan(act)) { 3598 switch (tcf_vlan_action(act)) { 3599 case TCA_VLAN_ACT_PUSH: 3600 entry->id = FLOW_ACTION_VLAN_PUSH; 3601 entry->vlan.vid = tcf_vlan_push_vid(act); 3602 entry->vlan.proto = tcf_vlan_push_proto(act); 3603 entry->vlan.prio = tcf_vlan_push_prio(act); 3604 break; 3605 case TCA_VLAN_ACT_POP: 3606 entry->id = FLOW_ACTION_VLAN_POP; 3607 break; 3608 case TCA_VLAN_ACT_MODIFY: 3609 entry->id = FLOW_ACTION_VLAN_MANGLE; 3610 entry->vlan.vid = tcf_vlan_push_vid(act); 3611 entry->vlan.proto = tcf_vlan_push_proto(act); 3612 entry->vlan.prio = tcf_vlan_push_prio(act); 3613 break; 3614 default: 3615 err = -EOPNOTSUPP; 3616 goto err_out_locked; 3617 } 3618 } else if (is_tcf_tunnel_set(act)) { 3619 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3620 err = tcf_tunnel_encap_get_tunnel(entry, act); 3621 if (err) 3622 goto err_out_locked; 3623 } else if (is_tcf_tunnel_release(act)) { 3624 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3625 } else if (is_tcf_pedit(act)) { 3626 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3627 switch (tcf_pedit_cmd(act, k)) { 3628 case TCA_PEDIT_KEY_EX_CMD_SET: 3629 entry->id = FLOW_ACTION_MANGLE; 3630 break; 3631 case TCA_PEDIT_KEY_EX_CMD_ADD: 3632 entry->id = FLOW_ACTION_ADD; 3633 break; 3634 default: 3635 err = -EOPNOTSUPP; 3636 goto err_out_locked; 3637 } 3638 entry->mangle.htype = tcf_pedit_htype(act, k); 3639 entry->mangle.mask = tcf_pedit_mask(act, k); 3640 entry->mangle.val = tcf_pedit_val(act, k); 3641 entry->mangle.offset = tcf_pedit_offset(act, k); 3642 entry->hw_stats = act->hw_stats; 3643 entry = &flow_action->entries[++j]; 3644 } 3645 } else if (is_tcf_csum(act)) { 3646 entry->id = FLOW_ACTION_CSUM; 3647 entry->csum_flags = tcf_csum_update_flags(act); 3648 } else if (is_tcf_skbedit_mark(act)) { 3649 entry->id = FLOW_ACTION_MARK; 3650 entry->mark = tcf_skbedit_mark(act); 3651 } else if (is_tcf_sample(act)) { 3652 entry->id = FLOW_ACTION_SAMPLE; 3653 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3654 entry->sample.truncate = tcf_sample_truncate(act); 3655 entry->sample.rate = tcf_sample_rate(act); 3656 tcf_sample_get_group(entry, act); 3657 } else if (is_tcf_police(act)) { 3658 entry->id = FLOW_ACTION_POLICE; 3659 entry->police.burst = tcf_police_tcfp_burst(act); 3660 entry->police.rate_bytes_ps = 3661 tcf_police_rate_bytes_ps(act); 3662 } else if (is_tcf_ct(act)) { 3663 entry->id = FLOW_ACTION_CT; 3664 entry->ct.action = tcf_ct_action(act); 3665 entry->ct.zone = tcf_ct_zone(act); 3666 entry->ct.flow_table = tcf_ct_ft(act); 3667 } else if (is_tcf_mpls(act)) { 3668 switch (tcf_mpls_action(act)) { 3669 case TCA_MPLS_ACT_PUSH: 3670 entry->id = FLOW_ACTION_MPLS_PUSH; 3671 entry->mpls_push.proto = tcf_mpls_proto(act); 3672 entry->mpls_push.label = tcf_mpls_label(act); 3673 entry->mpls_push.tc = tcf_mpls_tc(act); 3674 entry->mpls_push.bos = tcf_mpls_bos(act); 3675 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3676 break; 3677 case TCA_MPLS_ACT_POP: 3678 entry->id = FLOW_ACTION_MPLS_POP; 3679 entry->mpls_pop.proto = tcf_mpls_proto(act); 3680 break; 3681 case TCA_MPLS_ACT_MODIFY: 3682 entry->id = FLOW_ACTION_MPLS_MANGLE; 3683 entry->mpls_mangle.label = tcf_mpls_label(act); 3684 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3685 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3686 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3687 break; 3688 default: 3689 goto err_out_locked; 3690 } 3691 } else if (is_tcf_skbedit_ptype(act)) { 3692 entry->id = FLOW_ACTION_PTYPE; 3693 entry->ptype = tcf_skbedit_ptype(act); 3694 } else if (is_tcf_skbedit_priority(act)) { 3695 entry->id = FLOW_ACTION_PRIORITY; 3696 entry->priority = tcf_skbedit_priority(act); 3697 } else if (is_tcf_gate(act)) { 3698 entry->id = FLOW_ACTION_GATE; 3699 entry->gate.index = tcf_gate_index(act); 3700 entry->gate.prio = tcf_gate_prio(act); 3701 entry->gate.basetime = tcf_gate_basetime(act); 3702 entry->gate.cycletime = tcf_gate_cycletime(act); 3703 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 3704 entry->gate.num_entries = tcf_gate_num_entries(act); 3705 err = tcf_gate_get_entries(entry, act); 3706 if (err) 3707 goto err_out; 3708 } else { 3709 err = -EOPNOTSUPP; 3710 goto err_out_locked; 3711 } 3712 spin_unlock_bh(&act->tcfa_lock); 3713 3714 if (!is_tcf_pedit(act)) 3715 j++; 3716 } 3717 3718 err_out: 3719 if (err) 3720 tc_cleanup_flow_action(flow_action); 3721 3722 return err; 3723 err_out_locked: 3724 spin_unlock_bh(&act->tcfa_lock); 3725 goto err_out; 3726 } 3727 EXPORT_SYMBOL(tc_setup_flow_action); 3728 3729 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3730 { 3731 unsigned int num_acts = 0; 3732 struct tc_action *act; 3733 int i; 3734 3735 tcf_exts_for_each_action(i, act, exts) { 3736 if (is_tcf_pedit(act)) 3737 num_acts += tcf_pedit_nkeys(act); 3738 else 3739 num_acts++; 3740 } 3741 return num_acts; 3742 } 3743 EXPORT_SYMBOL(tcf_exts_num_actions); 3744 3745 static __net_init int tcf_net_init(struct net *net) 3746 { 3747 struct tcf_net *tn = net_generic(net, tcf_net_id); 3748 3749 spin_lock_init(&tn->idr_lock); 3750 idr_init(&tn->idr); 3751 return 0; 3752 } 3753 3754 static void __net_exit tcf_net_exit(struct net *net) 3755 { 3756 struct tcf_net *tn = net_generic(net, tcf_net_id); 3757 3758 idr_destroy(&tn->idr); 3759 } 3760 3761 static struct pernet_operations tcf_net_ops = { 3762 .init = tcf_net_init, 3763 .exit = tcf_net_exit, 3764 .id = &tcf_net_id, 3765 .size = sizeof(struct tcf_net), 3766 }; 3767 3768 static struct flow_indr_block_entry block_entry = { 3769 .cb = tc_indr_block_get_and_cmd, 3770 .list = LIST_HEAD_INIT(block_entry.list), 3771 }; 3772 3773 static int __init tc_filter_init(void) 3774 { 3775 int err; 3776 3777 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3778 if (!tc_filter_wq) 3779 return -ENOMEM; 3780 3781 err = register_pernet_subsys(&tcf_net_ops); 3782 if (err) 3783 goto err_register_pernet_subsys; 3784 3785 flow_indr_add_block_cb(&block_entry); 3786 3787 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3788 RTNL_FLAG_DOIT_UNLOCKED); 3789 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3790 RTNL_FLAG_DOIT_UNLOCKED); 3791 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3792 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3793 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3794 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3795 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3796 tc_dump_chain, 0); 3797 3798 return 0; 3799 3800 err_register_pernet_subsys: 3801 destroy_workqueue(tc_filter_wq); 3802 return err; 3803 } 3804 3805 subsys_initcall(tc_filter_init); 3806