1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/cls_api.c Packet classifier API. 4 * 5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 6 * 7 * Changes: 8 * 9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 10 */ 11 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/kernel.h> 15 #include <linux/string.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/slab.h> 22 #include <linux/idr.h> 23 #include <linux/jhash.h> 24 #include <linux/rculist.h> 25 #include <net/net_namespace.h> 26 #include <net/sock.h> 27 #include <net/netlink.h> 28 #include <net/pkt_sched.h> 29 #include <net/pkt_cls.h> 30 #include <net/tc_act/tc_pedit.h> 31 #include <net/tc_act/tc_mirred.h> 32 #include <net/tc_act/tc_vlan.h> 33 #include <net/tc_act/tc_tunnel_key.h> 34 #include <net/tc_act/tc_csum.h> 35 #include <net/tc_act/tc_gact.h> 36 #include <net/tc_act/tc_police.h> 37 #include <net/tc_act/tc_sample.h> 38 #include <net/tc_act/tc_skbedit.h> 39 #include <net/tc_act/tc_ct.h> 40 #include <net/tc_act/tc_mpls.h> 41 #include <net/tc_act/tc_gate.h> 42 #include <net/flow_offload.h> 43 44 extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; 45 46 /* The list of all installed classifier types */ 47 static LIST_HEAD(tcf_proto_base); 48 49 /* Protects list of registered TC modules. It is pure SMP lock. */ 50 static DEFINE_RWLOCK(cls_mod_lock); 51 52 static u32 destroy_obj_hashfn(const struct tcf_proto *tp) 53 { 54 return jhash_3words(tp->chain->index, tp->prio, 55 (__force __u32)tp->protocol, 0); 56 } 57 58 static void tcf_proto_signal_destroying(struct tcf_chain *chain, 59 struct tcf_proto *tp) 60 { 61 struct tcf_block *block = chain->block; 62 63 mutex_lock(&block->proto_destroy_lock); 64 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, 65 destroy_obj_hashfn(tp)); 66 mutex_unlock(&block->proto_destroy_lock); 67 } 68 69 static bool tcf_proto_cmp(const struct tcf_proto *tp1, 70 const struct tcf_proto *tp2) 71 { 72 return tp1->chain->index == tp2->chain->index && 73 tp1->prio == tp2->prio && 74 tp1->protocol == tp2->protocol; 75 } 76 77 static bool tcf_proto_exists_destroying(struct tcf_chain *chain, 78 struct tcf_proto *tp) 79 { 80 u32 hash = destroy_obj_hashfn(tp); 81 struct tcf_proto *iter; 82 bool found = false; 83 84 rcu_read_lock(); 85 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, 86 destroy_ht_node, hash) { 87 if (tcf_proto_cmp(tp, iter)) { 88 found = true; 89 break; 90 } 91 } 92 rcu_read_unlock(); 93 94 return found; 95 } 96 97 static void 98 tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) 99 { 100 struct tcf_block *block = chain->block; 101 102 mutex_lock(&block->proto_destroy_lock); 103 if (hash_hashed(&tp->destroy_ht_node)) 104 hash_del_rcu(&tp->destroy_ht_node); 105 mutex_unlock(&block->proto_destroy_lock); 106 } 107 108 /* Find classifier type by string name */ 109 110 static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 111 { 112 const struct tcf_proto_ops *t, *res = NULL; 113 114 if (kind) { 115 read_lock(&cls_mod_lock); 116 list_for_each_entry(t, &tcf_proto_base, head) { 117 if (strcmp(kind, t->kind) == 0) { 118 if (try_module_get(t->owner)) 119 res = t; 120 break; 121 } 122 } 123 read_unlock(&cls_mod_lock); 124 } 125 return res; 126 } 127 128 static const struct tcf_proto_ops * 129 tcf_proto_lookup_ops(const char *kind, bool rtnl_held, 130 struct netlink_ext_ack *extack) 131 { 132 const struct tcf_proto_ops *ops; 133 134 ops = __tcf_proto_lookup_ops(kind); 135 if (ops) 136 return ops; 137 #ifdef CONFIG_MODULES 138 if (rtnl_held) 139 rtnl_unlock(); 140 request_module("cls_%s", kind); 141 if (rtnl_held) 142 rtnl_lock(); 143 ops = __tcf_proto_lookup_ops(kind); 144 /* We dropped the RTNL semaphore in order to perform 145 * the module load. So, even if we succeeded in loading 146 * the module we have to replay the request. We indicate 147 * this using -EAGAIN. 148 */ 149 if (ops) { 150 module_put(ops->owner); 151 return ERR_PTR(-EAGAIN); 152 } 153 #endif 154 NL_SET_ERR_MSG(extack, "TC classifier not found"); 155 return ERR_PTR(-ENOENT); 156 } 157 158 /* Register(unregister) new classifier type */ 159 160 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 161 { 162 struct tcf_proto_ops *t; 163 int rc = -EEXIST; 164 165 write_lock(&cls_mod_lock); 166 list_for_each_entry(t, &tcf_proto_base, head) 167 if (!strcmp(ops->kind, t->kind)) 168 goto out; 169 170 list_add_tail(&ops->head, &tcf_proto_base); 171 rc = 0; 172 out: 173 write_unlock(&cls_mod_lock); 174 return rc; 175 } 176 EXPORT_SYMBOL(register_tcf_proto_ops); 177 178 static struct workqueue_struct *tc_filter_wq; 179 180 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 181 { 182 struct tcf_proto_ops *t; 183 int rc = -ENOENT; 184 185 /* Wait for outstanding call_rcu()s, if any, from a 186 * tcf_proto_ops's destroy() handler. 187 */ 188 rcu_barrier(); 189 flush_workqueue(tc_filter_wq); 190 191 write_lock(&cls_mod_lock); 192 list_for_each_entry(t, &tcf_proto_base, head) { 193 if (t == ops) { 194 list_del(&t->head); 195 rc = 0; 196 break; 197 } 198 } 199 write_unlock(&cls_mod_lock); 200 return rc; 201 } 202 EXPORT_SYMBOL(unregister_tcf_proto_ops); 203 204 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) 205 { 206 INIT_RCU_WORK(rwork, func); 207 return queue_rcu_work(tc_filter_wq, rwork); 208 } 209 EXPORT_SYMBOL(tcf_queue_work); 210 211 /* Select new prio value from the range, managed by kernel. */ 212 213 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 214 { 215 u32 first = TC_H_MAKE(0xC0000000U, 0U); 216 217 if (tp) 218 first = tp->prio - 1; 219 220 return TC_H_MAJ(first); 221 } 222 223 static bool tcf_proto_check_kind(struct nlattr *kind, char *name) 224 { 225 if (kind) 226 return nla_strscpy(name, kind, IFNAMSIZ) < 0; 227 memset(name, 0, IFNAMSIZ); 228 return false; 229 } 230 231 static bool tcf_proto_is_unlocked(const char *kind) 232 { 233 const struct tcf_proto_ops *ops; 234 bool ret; 235 236 if (strlen(kind) == 0) 237 return false; 238 239 ops = tcf_proto_lookup_ops(kind, false, NULL); 240 /* On error return false to take rtnl lock. Proto lookup/create 241 * functions will perform lookup again and properly handle errors. 242 */ 243 if (IS_ERR(ops)) 244 return false; 245 246 ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); 247 module_put(ops->owner); 248 return ret; 249 } 250 251 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 252 u32 prio, struct tcf_chain *chain, 253 bool rtnl_held, 254 struct netlink_ext_ack *extack) 255 { 256 struct tcf_proto *tp; 257 int err; 258 259 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 260 if (!tp) 261 return ERR_PTR(-ENOBUFS); 262 263 tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); 264 if (IS_ERR(tp->ops)) { 265 err = PTR_ERR(tp->ops); 266 goto errout; 267 } 268 tp->classify = tp->ops->classify; 269 tp->protocol = protocol; 270 tp->prio = prio; 271 tp->chain = chain; 272 spin_lock_init(&tp->lock); 273 refcount_set(&tp->refcnt, 1); 274 275 err = tp->ops->init(tp); 276 if (err) { 277 module_put(tp->ops->owner); 278 goto errout; 279 } 280 return tp; 281 282 errout: 283 kfree(tp); 284 return ERR_PTR(err); 285 } 286 287 static void tcf_proto_get(struct tcf_proto *tp) 288 { 289 refcount_inc(&tp->refcnt); 290 } 291 292 static void tcf_chain_put(struct tcf_chain *chain); 293 294 static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 295 bool sig_destroy, struct netlink_ext_ack *extack) 296 { 297 tp->ops->destroy(tp, rtnl_held, extack); 298 if (sig_destroy) 299 tcf_proto_signal_destroyed(tp->chain, tp); 300 tcf_chain_put(tp->chain); 301 module_put(tp->ops->owner); 302 kfree_rcu(tp, rcu); 303 } 304 305 static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, 306 struct netlink_ext_ack *extack) 307 { 308 if (refcount_dec_and_test(&tp->refcnt)) 309 tcf_proto_destroy(tp, rtnl_held, true, extack); 310 } 311 312 static bool tcf_proto_check_delete(struct tcf_proto *tp) 313 { 314 if (tp->ops->delete_empty) 315 return tp->ops->delete_empty(tp); 316 317 tp->deleting = true; 318 return tp->deleting; 319 } 320 321 static void tcf_proto_mark_delete(struct tcf_proto *tp) 322 { 323 spin_lock(&tp->lock); 324 tp->deleting = true; 325 spin_unlock(&tp->lock); 326 } 327 328 static bool tcf_proto_is_deleting(struct tcf_proto *tp) 329 { 330 bool deleting; 331 332 spin_lock(&tp->lock); 333 deleting = tp->deleting; 334 spin_unlock(&tp->lock); 335 336 return deleting; 337 } 338 339 #define ASSERT_BLOCK_LOCKED(block) \ 340 lockdep_assert_held(&(block)->lock) 341 342 struct tcf_filter_chain_list_item { 343 struct list_head list; 344 tcf_chain_head_change_t *chain_head_change; 345 void *chain_head_change_priv; 346 }; 347 348 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 349 u32 chain_index) 350 { 351 struct tcf_chain *chain; 352 353 ASSERT_BLOCK_LOCKED(block); 354 355 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 356 if (!chain) 357 return NULL; 358 list_add_tail_rcu(&chain->list, &block->chain_list); 359 mutex_init(&chain->filter_chain_lock); 360 chain->block = block; 361 chain->index = chain_index; 362 chain->refcnt = 1; 363 if (!chain->index) 364 block->chain0.chain = chain; 365 return chain; 366 } 367 368 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, 369 struct tcf_proto *tp_head) 370 { 371 if (item->chain_head_change) 372 item->chain_head_change(tp_head, item->chain_head_change_priv); 373 } 374 375 static void tcf_chain0_head_change(struct tcf_chain *chain, 376 struct tcf_proto *tp_head) 377 { 378 struct tcf_filter_chain_list_item *item; 379 struct tcf_block *block = chain->block; 380 381 if (chain->index) 382 return; 383 384 mutex_lock(&block->lock); 385 list_for_each_entry(item, &block->chain0.filter_chain_list, list) 386 tcf_chain_head_change_item(item, tp_head); 387 mutex_unlock(&block->lock); 388 } 389 390 /* Returns true if block can be safely freed. */ 391 392 static bool tcf_chain_detach(struct tcf_chain *chain) 393 { 394 struct tcf_block *block = chain->block; 395 396 ASSERT_BLOCK_LOCKED(block); 397 398 list_del_rcu(&chain->list); 399 if (!chain->index) 400 block->chain0.chain = NULL; 401 402 if (list_empty(&block->chain_list) && 403 refcount_read(&block->refcnt) == 0) 404 return true; 405 406 return false; 407 } 408 409 static void tcf_block_destroy(struct tcf_block *block) 410 { 411 mutex_destroy(&block->lock); 412 mutex_destroy(&block->proto_destroy_lock); 413 kfree_rcu(block, rcu); 414 } 415 416 static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) 417 { 418 struct tcf_block *block = chain->block; 419 420 mutex_destroy(&chain->filter_chain_lock); 421 kfree_rcu(chain, rcu); 422 if (free_block) 423 tcf_block_destroy(block); 424 } 425 426 static void tcf_chain_hold(struct tcf_chain *chain) 427 { 428 ASSERT_BLOCK_LOCKED(chain->block); 429 430 ++chain->refcnt; 431 } 432 433 static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) 434 { 435 ASSERT_BLOCK_LOCKED(chain->block); 436 437 /* In case all the references are action references, this 438 * chain should not be shown to the user. 439 */ 440 return chain->refcnt == chain->action_refcnt; 441 } 442 443 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, 444 u32 chain_index) 445 { 446 struct tcf_chain *chain; 447 448 ASSERT_BLOCK_LOCKED(block); 449 450 list_for_each_entry(chain, &block->chain_list, list) { 451 if (chain->index == chain_index) 452 return chain; 453 } 454 return NULL; 455 } 456 457 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 458 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, 459 u32 chain_index) 460 { 461 struct tcf_chain *chain; 462 463 list_for_each_entry_rcu(chain, &block->chain_list, list) { 464 if (chain->index == chain_index) 465 return chain; 466 } 467 return NULL; 468 } 469 #endif 470 471 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 472 u32 seq, u16 flags, int event, bool unicast); 473 474 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, 475 u32 chain_index, bool create, 476 bool by_act) 477 { 478 struct tcf_chain *chain = NULL; 479 bool is_first_reference; 480 481 mutex_lock(&block->lock); 482 chain = tcf_chain_lookup(block, chain_index); 483 if (chain) { 484 tcf_chain_hold(chain); 485 } else { 486 if (!create) 487 goto errout; 488 chain = tcf_chain_create(block, chain_index); 489 if (!chain) 490 goto errout; 491 } 492 493 if (by_act) 494 ++chain->action_refcnt; 495 is_first_reference = chain->refcnt - chain->action_refcnt == 1; 496 mutex_unlock(&block->lock); 497 498 /* Send notification only in case we got the first 499 * non-action reference. Until then, the chain acts only as 500 * a placeholder for actions pointing to it and user ought 501 * not know about them. 502 */ 503 if (is_first_reference && !by_act) 504 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 505 RTM_NEWCHAIN, false); 506 507 return chain; 508 509 errout: 510 mutex_unlock(&block->lock); 511 return chain; 512 } 513 514 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 515 bool create) 516 { 517 return __tcf_chain_get(block, chain_index, create, false); 518 } 519 520 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) 521 { 522 return __tcf_chain_get(block, chain_index, true, true); 523 } 524 EXPORT_SYMBOL(tcf_chain_get_by_act); 525 526 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 527 void *tmplt_priv); 528 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 529 void *tmplt_priv, u32 chain_index, 530 struct tcf_block *block, struct sk_buff *oskb, 531 u32 seq, u16 flags, bool unicast); 532 533 static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, 534 bool explicitly_created) 535 { 536 struct tcf_block *block = chain->block; 537 const struct tcf_proto_ops *tmplt_ops; 538 bool free_block = false; 539 unsigned int refcnt; 540 void *tmplt_priv; 541 542 mutex_lock(&block->lock); 543 if (explicitly_created) { 544 if (!chain->explicitly_created) { 545 mutex_unlock(&block->lock); 546 return; 547 } 548 chain->explicitly_created = false; 549 } 550 551 if (by_act) 552 chain->action_refcnt--; 553 554 /* tc_chain_notify_delete can't be called while holding block lock. 555 * However, when block is unlocked chain can be changed concurrently, so 556 * save these to temporary variables. 557 */ 558 refcnt = --chain->refcnt; 559 tmplt_ops = chain->tmplt_ops; 560 tmplt_priv = chain->tmplt_priv; 561 562 /* The last dropped non-action reference will trigger notification. */ 563 if (refcnt - chain->action_refcnt == 0 && !by_act) { 564 tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, 565 block, NULL, 0, 0, false); 566 /* Last reference to chain, no need to lock. */ 567 chain->flushing = false; 568 } 569 570 if (refcnt == 0) 571 free_block = tcf_chain_detach(chain); 572 mutex_unlock(&block->lock); 573 574 if (refcnt == 0) { 575 tc_chain_tmplt_del(tmplt_ops, tmplt_priv); 576 tcf_chain_destroy(chain, free_block); 577 } 578 } 579 580 static void tcf_chain_put(struct tcf_chain *chain) 581 { 582 __tcf_chain_put(chain, false, false); 583 } 584 585 void tcf_chain_put_by_act(struct tcf_chain *chain) 586 { 587 __tcf_chain_put(chain, true, false); 588 } 589 EXPORT_SYMBOL(tcf_chain_put_by_act); 590 591 static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) 592 { 593 __tcf_chain_put(chain, false, true); 594 } 595 596 static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) 597 { 598 struct tcf_proto *tp, *tp_next; 599 600 mutex_lock(&chain->filter_chain_lock); 601 tp = tcf_chain_dereference(chain->filter_chain, chain); 602 while (tp) { 603 tp_next = rcu_dereference_protected(tp->next, 1); 604 tcf_proto_signal_destroying(chain, tp); 605 tp = tp_next; 606 } 607 tp = tcf_chain_dereference(chain->filter_chain, chain); 608 RCU_INIT_POINTER(chain->filter_chain, NULL); 609 tcf_chain0_head_change(chain, NULL); 610 chain->flushing = true; 611 mutex_unlock(&chain->filter_chain_lock); 612 613 while (tp) { 614 tp_next = rcu_dereference_protected(tp->next, 1); 615 tcf_proto_put(tp, rtnl_held, NULL); 616 tp = tp_next; 617 } 618 } 619 620 static int tcf_block_setup(struct tcf_block *block, 621 struct flow_block_offload *bo); 622 623 static void tcf_block_offload_init(struct flow_block_offload *bo, 624 struct net_device *dev, struct Qdisc *sch, 625 enum flow_block_command command, 626 enum flow_block_binder_type binder_type, 627 struct flow_block *flow_block, 628 bool shared, struct netlink_ext_ack *extack) 629 { 630 bo->net = dev_net(dev); 631 bo->command = command; 632 bo->binder_type = binder_type; 633 bo->block = flow_block; 634 bo->block_shared = shared; 635 bo->extack = extack; 636 bo->sch = sch; 637 INIT_LIST_HEAD(&bo->cb_list); 638 } 639 640 static void tcf_block_unbind(struct tcf_block *block, 641 struct flow_block_offload *bo); 642 643 static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) 644 { 645 struct tcf_block *block = block_cb->indr.data; 646 struct net_device *dev = block_cb->indr.dev; 647 struct Qdisc *sch = block_cb->indr.sch; 648 struct netlink_ext_ack extack = {}; 649 struct flow_block_offload bo = {}; 650 651 tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, 652 block_cb->indr.binder_type, 653 &block->flow_block, tcf_block_shared(block), 654 &extack); 655 rtnl_lock(); 656 down_write(&block->cb_lock); 657 list_del(&block_cb->driver_list); 658 list_move(&block_cb->list, &bo.cb_list); 659 tcf_block_unbind(block, &bo); 660 up_write(&block->cb_lock); 661 rtnl_unlock(); 662 } 663 664 static bool tcf_block_offload_in_use(struct tcf_block *block) 665 { 666 return atomic_read(&block->offloadcnt); 667 } 668 669 static int tcf_block_offload_cmd(struct tcf_block *block, 670 struct net_device *dev, struct Qdisc *sch, 671 struct tcf_block_ext_info *ei, 672 enum flow_block_command command, 673 struct netlink_ext_ack *extack) 674 { 675 struct flow_block_offload bo = {}; 676 677 tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, 678 &block->flow_block, tcf_block_shared(block), 679 extack); 680 681 if (dev->netdev_ops->ndo_setup_tc) { 682 int err; 683 684 err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 685 if (err < 0) { 686 if (err != -EOPNOTSUPP) 687 NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); 688 return err; 689 } 690 691 return tcf_block_setup(block, &bo); 692 } 693 694 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, 695 tc_block_indr_cleanup); 696 tcf_block_setup(block, &bo); 697 698 return -EOPNOTSUPP; 699 } 700 701 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 702 struct tcf_block_ext_info *ei, 703 struct netlink_ext_ack *extack) 704 { 705 struct net_device *dev = q->dev_queue->dev; 706 int err; 707 708 down_write(&block->cb_lock); 709 710 /* If tc offload feature is disabled and the block we try to bind 711 * to already has some offloaded filters, forbid to bind. 712 */ 713 if (dev->netdev_ops->ndo_setup_tc && 714 !tc_can_offload(dev) && 715 tcf_block_offload_in_use(block)) { 716 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); 717 err = -EOPNOTSUPP; 718 goto err_unlock; 719 } 720 721 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); 722 if (err == -EOPNOTSUPP) 723 goto no_offload_dev_inc; 724 if (err) 725 goto err_unlock; 726 727 up_write(&block->cb_lock); 728 return 0; 729 730 no_offload_dev_inc: 731 if (tcf_block_offload_in_use(block)) 732 goto err_unlock; 733 734 err = 0; 735 block->nooffloaddevcnt++; 736 err_unlock: 737 up_write(&block->cb_lock); 738 return err; 739 } 740 741 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 742 struct tcf_block_ext_info *ei) 743 { 744 struct net_device *dev = q->dev_queue->dev; 745 int err; 746 747 down_write(&block->cb_lock); 748 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); 749 if (err == -EOPNOTSUPP) 750 goto no_offload_dev_dec; 751 up_write(&block->cb_lock); 752 return; 753 754 no_offload_dev_dec: 755 WARN_ON(block->nooffloaddevcnt-- == 0); 756 up_write(&block->cb_lock); 757 } 758 759 static int 760 tcf_chain0_head_change_cb_add(struct tcf_block *block, 761 struct tcf_block_ext_info *ei, 762 struct netlink_ext_ack *extack) 763 { 764 struct tcf_filter_chain_list_item *item; 765 struct tcf_chain *chain0; 766 767 item = kmalloc(sizeof(*item), GFP_KERNEL); 768 if (!item) { 769 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); 770 return -ENOMEM; 771 } 772 item->chain_head_change = ei->chain_head_change; 773 item->chain_head_change_priv = ei->chain_head_change_priv; 774 775 mutex_lock(&block->lock); 776 chain0 = block->chain0.chain; 777 if (chain0) 778 tcf_chain_hold(chain0); 779 else 780 list_add(&item->list, &block->chain0.filter_chain_list); 781 mutex_unlock(&block->lock); 782 783 if (chain0) { 784 struct tcf_proto *tp_head; 785 786 mutex_lock(&chain0->filter_chain_lock); 787 788 tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); 789 if (tp_head) 790 tcf_chain_head_change_item(item, tp_head); 791 792 mutex_lock(&block->lock); 793 list_add(&item->list, &block->chain0.filter_chain_list); 794 mutex_unlock(&block->lock); 795 796 mutex_unlock(&chain0->filter_chain_lock); 797 tcf_chain_put(chain0); 798 } 799 800 return 0; 801 } 802 803 static void 804 tcf_chain0_head_change_cb_del(struct tcf_block *block, 805 struct tcf_block_ext_info *ei) 806 { 807 struct tcf_filter_chain_list_item *item; 808 809 mutex_lock(&block->lock); 810 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { 811 if ((!ei->chain_head_change && !ei->chain_head_change_priv) || 812 (item->chain_head_change == ei->chain_head_change && 813 item->chain_head_change_priv == ei->chain_head_change_priv)) { 814 if (block->chain0.chain) 815 tcf_chain_head_change_item(item, NULL); 816 list_del(&item->list); 817 mutex_unlock(&block->lock); 818 819 kfree(item); 820 return; 821 } 822 } 823 mutex_unlock(&block->lock); 824 WARN_ON(1); 825 } 826 827 struct tcf_net { 828 spinlock_t idr_lock; /* Protects idr */ 829 struct idr idr; 830 }; 831 832 static unsigned int tcf_net_id; 833 834 static int tcf_block_insert(struct tcf_block *block, struct net *net, 835 struct netlink_ext_ack *extack) 836 { 837 struct tcf_net *tn = net_generic(net, tcf_net_id); 838 int err; 839 840 idr_preload(GFP_KERNEL); 841 spin_lock(&tn->idr_lock); 842 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, 843 GFP_NOWAIT); 844 spin_unlock(&tn->idr_lock); 845 idr_preload_end(); 846 847 return err; 848 } 849 850 static void tcf_block_remove(struct tcf_block *block, struct net *net) 851 { 852 struct tcf_net *tn = net_generic(net, tcf_net_id); 853 854 spin_lock(&tn->idr_lock); 855 idr_remove(&tn->idr, block->index); 856 spin_unlock(&tn->idr_lock); 857 } 858 859 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 860 u32 block_index, 861 struct netlink_ext_ack *extack) 862 { 863 struct tcf_block *block; 864 865 block = kzalloc(sizeof(*block), GFP_KERNEL); 866 if (!block) { 867 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); 868 return ERR_PTR(-ENOMEM); 869 } 870 mutex_init(&block->lock); 871 mutex_init(&block->proto_destroy_lock); 872 init_rwsem(&block->cb_lock); 873 flow_block_init(&block->flow_block); 874 INIT_LIST_HEAD(&block->chain_list); 875 INIT_LIST_HEAD(&block->owner_list); 876 INIT_LIST_HEAD(&block->chain0.filter_chain_list); 877 878 refcount_set(&block->refcnt, 1); 879 block->net = net; 880 block->index = block_index; 881 882 /* Don't store q pointer for blocks which are shared */ 883 if (!tcf_block_shared(block)) 884 block->q = q; 885 return block; 886 } 887 888 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) 889 { 890 struct tcf_net *tn = net_generic(net, tcf_net_id); 891 892 return idr_find(&tn->idr, block_index); 893 } 894 895 static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) 896 { 897 struct tcf_block *block; 898 899 rcu_read_lock(); 900 block = tcf_block_lookup(net, block_index); 901 if (block && !refcount_inc_not_zero(&block->refcnt)) 902 block = NULL; 903 rcu_read_unlock(); 904 905 return block; 906 } 907 908 static struct tcf_chain * 909 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 910 { 911 mutex_lock(&block->lock); 912 if (chain) 913 chain = list_is_last(&chain->list, &block->chain_list) ? 914 NULL : list_next_entry(chain, list); 915 else 916 chain = list_first_entry_or_null(&block->chain_list, 917 struct tcf_chain, list); 918 919 /* skip all action-only chains */ 920 while (chain && tcf_chain_held_by_acts_only(chain)) 921 chain = list_is_last(&chain->list, &block->chain_list) ? 922 NULL : list_next_entry(chain, list); 923 924 if (chain) 925 tcf_chain_hold(chain); 926 mutex_unlock(&block->lock); 927 928 return chain; 929 } 930 931 /* Function to be used by all clients that want to iterate over all chains on 932 * block. It properly obtains block->lock and takes reference to chain before 933 * returning it. Users of this function must be tolerant to concurrent chain 934 * insertion/deletion or ensure that no concurrent chain modification is 935 * possible. Note that all netlink dump callbacks cannot guarantee to provide 936 * consistent dump because rtnl lock is released each time skb is filled with 937 * data and sent to user-space. 938 */ 939 940 struct tcf_chain * 941 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) 942 { 943 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); 944 945 if (chain) 946 tcf_chain_put(chain); 947 948 return chain_next; 949 } 950 EXPORT_SYMBOL(tcf_get_next_chain); 951 952 static struct tcf_proto * 953 __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 954 { 955 u32 prio = 0; 956 957 ASSERT_RTNL(); 958 mutex_lock(&chain->filter_chain_lock); 959 960 if (!tp) { 961 tp = tcf_chain_dereference(chain->filter_chain, chain); 962 } else if (tcf_proto_is_deleting(tp)) { 963 /* 'deleting' flag is set and chain->filter_chain_lock was 964 * unlocked, which means next pointer could be invalid. Restart 965 * search. 966 */ 967 prio = tp->prio + 1; 968 tp = tcf_chain_dereference(chain->filter_chain, chain); 969 970 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) 971 if (!tp->deleting && tp->prio >= prio) 972 break; 973 } else { 974 tp = tcf_chain_dereference(tp->next, chain); 975 } 976 977 if (tp) 978 tcf_proto_get(tp); 979 980 mutex_unlock(&chain->filter_chain_lock); 981 982 return tp; 983 } 984 985 /* Function to be used by all clients that want to iterate over all tp's on 986 * chain. Users of this function must be tolerant to concurrent tp 987 * insertion/deletion or ensure that no concurrent chain modification is 988 * possible. Note that all netlink dump callbacks cannot guarantee to provide 989 * consistent dump because rtnl lock is released each time skb is filled with 990 * data and sent to user-space. 991 */ 992 993 struct tcf_proto * 994 tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) 995 { 996 struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); 997 998 if (tp) 999 tcf_proto_put(tp, true, NULL); 1000 1001 return tp_next; 1002 } 1003 EXPORT_SYMBOL(tcf_get_next_proto); 1004 1005 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) 1006 { 1007 struct tcf_chain *chain; 1008 1009 /* Last reference to block. At this point chains cannot be added or 1010 * removed concurrently. 1011 */ 1012 for (chain = tcf_get_next_chain(block, NULL); 1013 chain; 1014 chain = tcf_get_next_chain(block, chain)) { 1015 tcf_chain_put_explicitly_created(chain); 1016 tcf_chain_flush(chain, rtnl_held); 1017 } 1018 } 1019 1020 /* Lookup Qdisc and increments its reference counter. 1021 * Set parent, if necessary. 1022 */ 1023 1024 static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, 1025 u32 *parent, int ifindex, bool rtnl_held, 1026 struct netlink_ext_ack *extack) 1027 { 1028 const struct Qdisc_class_ops *cops; 1029 struct net_device *dev; 1030 int err = 0; 1031 1032 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1033 return 0; 1034 1035 rcu_read_lock(); 1036 1037 /* Find link */ 1038 dev = dev_get_by_index_rcu(net, ifindex); 1039 if (!dev) { 1040 rcu_read_unlock(); 1041 return -ENODEV; 1042 } 1043 1044 /* Find qdisc */ 1045 if (!*parent) { 1046 *q = dev->qdisc; 1047 *parent = (*q)->handle; 1048 } else { 1049 *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); 1050 if (!*q) { 1051 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1052 err = -EINVAL; 1053 goto errout_rcu; 1054 } 1055 } 1056 1057 *q = qdisc_refcount_inc_nz(*q); 1058 if (!*q) { 1059 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); 1060 err = -EINVAL; 1061 goto errout_rcu; 1062 } 1063 1064 /* Is it classful? */ 1065 cops = (*q)->ops->cl_ops; 1066 if (!cops) { 1067 NL_SET_ERR_MSG(extack, "Qdisc not classful"); 1068 err = -EINVAL; 1069 goto errout_qdisc; 1070 } 1071 1072 if (!cops->tcf_block) { 1073 NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); 1074 err = -EOPNOTSUPP; 1075 goto errout_qdisc; 1076 } 1077 1078 errout_rcu: 1079 /* At this point we know that qdisc is not noop_qdisc, 1080 * which means that qdisc holds a reference to net_device 1081 * and we hold a reference to qdisc, so it is safe to release 1082 * rcu read lock. 1083 */ 1084 rcu_read_unlock(); 1085 return err; 1086 1087 errout_qdisc: 1088 rcu_read_unlock(); 1089 1090 if (rtnl_held) 1091 qdisc_put(*q); 1092 else 1093 qdisc_put_unlocked(*q); 1094 *q = NULL; 1095 1096 return err; 1097 } 1098 1099 static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, 1100 int ifindex, struct netlink_ext_ack *extack) 1101 { 1102 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) 1103 return 0; 1104 1105 /* Do we search for filter, attached to class? */ 1106 if (TC_H_MIN(parent)) { 1107 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1108 1109 *cl = cops->find(q, parent); 1110 if (*cl == 0) { 1111 NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); 1112 return -ENOENT; 1113 } 1114 } 1115 1116 return 0; 1117 } 1118 1119 static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, 1120 unsigned long cl, int ifindex, 1121 u32 block_index, 1122 struct netlink_ext_ack *extack) 1123 { 1124 struct tcf_block *block; 1125 1126 if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 1127 block = tcf_block_refcnt_get(net, block_index); 1128 if (!block) { 1129 NL_SET_ERR_MSG(extack, "Block of given index was not found"); 1130 return ERR_PTR(-EINVAL); 1131 } 1132 } else { 1133 const struct Qdisc_class_ops *cops = q->ops->cl_ops; 1134 1135 block = cops->tcf_block(q, cl, extack); 1136 if (!block) 1137 return ERR_PTR(-EINVAL); 1138 1139 if (tcf_block_shared(block)) { 1140 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); 1141 return ERR_PTR(-EOPNOTSUPP); 1142 } 1143 1144 /* Always take reference to block in order to support execution 1145 * of rules update path of cls API without rtnl lock. Caller 1146 * must release block when it is finished using it. 'if' block 1147 * of this conditional obtain reference to block by calling 1148 * tcf_block_refcnt_get(). 1149 */ 1150 refcount_inc(&block->refcnt); 1151 } 1152 1153 return block; 1154 } 1155 1156 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, 1157 struct tcf_block_ext_info *ei, bool rtnl_held) 1158 { 1159 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { 1160 /* Flushing/putting all chains will cause the block to be 1161 * deallocated when last chain is freed. However, if chain_list 1162 * is empty, block has to be manually deallocated. After block 1163 * reference counter reached 0, it is no longer possible to 1164 * increment it or add new chains to block. 1165 */ 1166 bool free_block = list_empty(&block->chain_list); 1167 1168 mutex_unlock(&block->lock); 1169 if (tcf_block_shared(block)) 1170 tcf_block_remove(block, block->net); 1171 1172 if (q) 1173 tcf_block_offload_unbind(block, q, ei); 1174 1175 if (free_block) 1176 tcf_block_destroy(block); 1177 else 1178 tcf_block_flush_all_chains(block, rtnl_held); 1179 } else if (q) { 1180 tcf_block_offload_unbind(block, q, ei); 1181 } 1182 } 1183 1184 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) 1185 { 1186 __tcf_block_put(block, NULL, NULL, rtnl_held); 1187 } 1188 1189 /* Find tcf block. 1190 * Set q, parent, cl when appropriate. 1191 */ 1192 1193 static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, 1194 u32 *parent, unsigned long *cl, 1195 int ifindex, u32 block_index, 1196 struct netlink_ext_ack *extack) 1197 { 1198 struct tcf_block *block; 1199 int err = 0; 1200 1201 ASSERT_RTNL(); 1202 1203 err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); 1204 if (err) 1205 goto errout; 1206 1207 err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); 1208 if (err) 1209 goto errout_qdisc; 1210 1211 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); 1212 if (IS_ERR(block)) { 1213 err = PTR_ERR(block); 1214 goto errout_qdisc; 1215 } 1216 1217 return block; 1218 1219 errout_qdisc: 1220 if (*q) 1221 qdisc_put(*q); 1222 errout: 1223 *q = NULL; 1224 return ERR_PTR(err); 1225 } 1226 1227 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, 1228 bool rtnl_held) 1229 { 1230 if (!IS_ERR_OR_NULL(block)) 1231 tcf_block_refcnt_put(block, rtnl_held); 1232 1233 if (q) { 1234 if (rtnl_held) 1235 qdisc_put(q); 1236 else 1237 qdisc_put_unlocked(q); 1238 } 1239 } 1240 1241 struct tcf_block_owner_item { 1242 struct list_head list; 1243 struct Qdisc *q; 1244 enum flow_block_binder_type binder_type; 1245 }; 1246 1247 static void 1248 tcf_block_owner_netif_keep_dst(struct tcf_block *block, 1249 struct Qdisc *q, 1250 enum flow_block_binder_type binder_type) 1251 { 1252 if (block->keep_dst && 1253 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1254 binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) 1255 netif_keep_dst(qdisc_dev(q)); 1256 } 1257 1258 void tcf_block_netif_keep_dst(struct tcf_block *block) 1259 { 1260 struct tcf_block_owner_item *item; 1261 1262 block->keep_dst = true; 1263 list_for_each_entry(item, &block->owner_list, list) 1264 tcf_block_owner_netif_keep_dst(block, item->q, 1265 item->binder_type); 1266 } 1267 EXPORT_SYMBOL(tcf_block_netif_keep_dst); 1268 1269 static int tcf_block_owner_add(struct tcf_block *block, 1270 struct Qdisc *q, 1271 enum flow_block_binder_type binder_type) 1272 { 1273 struct tcf_block_owner_item *item; 1274 1275 item = kmalloc(sizeof(*item), GFP_KERNEL); 1276 if (!item) 1277 return -ENOMEM; 1278 item->q = q; 1279 item->binder_type = binder_type; 1280 list_add(&item->list, &block->owner_list); 1281 return 0; 1282 } 1283 1284 static void tcf_block_owner_del(struct tcf_block *block, 1285 struct Qdisc *q, 1286 enum flow_block_binder_type binder_type) 1287 { 1288 struct tcf_block_owner_item *item; 1289 1290 list_for_each_entry(item, &block->owner_list, list) { 1291 if (item->q == q && item->binder_type == binder_type) { 1292 list_del(&item->list); 1293 kfree(item); 1294 return; 1295 } 1296 } 1297 WARN_ON(1); 1298 } 1299 1300 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, 1301 struct tcf_block_ext_info *ei, 1302 struct netlink_ext_ack *extack) 1303 { 1304 struct net *net = qdisc_net(q); 1305 struct tcf_block *block = NULL; 1306 int err; 1307 1308 if (ei->block_index) 1309 /* block_index not 0 means the shared block is requested */ 1310 block = tcf_block_refcnt_get(net, ei->block_index); 1311 1312 if (!block) { 1313 block = tcf_block_create(net, q, ei->block_index, extack); 1314 if (IS_ERR(block)) 1315 return PTR_ERR(block); 1316 if (tcf_block_shared(block)) { 1317 err = tcf_block_insert(block, net, extack); 1318 if (err) 1319 goto err_block_insert; 1320 } 1321 } 1322 1323 err = tcf_block_owner_add(block, q, ei->binder_type); 1324 if (err) 1325 goto err_block_owner_add; 1326 1327 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); 1328 1329 err = tcf_chain0_head_change_cb_add(block, ei, extack); 1330 if (err) 1331 goto err_chain0_head_change_cb_add; 1332 1333 err = tcf_block_offload_bind(block, q, ei, extack); 1334 if (err) 1335 goto err_block_offload_bind; 1336 1337 *p_block = block; 1338 return 0; 1339 1340 err_block_offload_bind: 1341 tcf_chain0_head_change_cb_del(block, ei); 1342 err_chain0_head_change_cb_add: 1343 tcf_block_owner_del(block, q, ei->binder_type); 1344 err_block_owner_add: 1345 err_block_insert: 1346 tcf_block_refcnt_put(block, true); 1347 return err; 1348 } 1349 EXPORT_SYMBOL(tcf_block_get_ext); 1350 1351 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) 1352 { 1353 struct tcf_proto __rcu **p_filter_chain = priv; 1354 1355 rcu_assign_pointer(*p_filter_chain, tp_head); 1356 } 1357 1358 int tcf_block_get(struct tcf_block **p_block, 1359 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 1360 struct netlink_ext_ack *extack) 1361 { 1362 struct tcf_block_ext_info ei = { 1363 .chain_head_change = tcf_chain_head_change_dflt, 1364 .chain_head_change_priv = p_filter_chain, 1365 }; 1366 1367 WARN_ON(!p_filter_chain); 1368 return tcf_block_get_ext(p_block, q, &ei, extack); 1369 } 1370 EXPORT_SYMBOL(tcf_block_get); 1371 1372 /* XXX: Standalone actions are not allowed to jump to any chain, and bound 1373 * actions should be all removed after flushing. 1374 */ 1375 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 1376 struct tcf_block_ext_info *ei) 1377 { 1378 if (!block) 1379 return; 1380 tcf_chain0_head_change_cb_del(block, ei); 1381 tcf_block_owner_del(block, q, ei->binder_type); 1382 1383 __tcf_block_put(block, q, ei, true); 1384 } 1385 EXPORT_SYMBOL(tcf_block_put_ext); 1386 1387 void tcf_block_put(struct tcf_block *block) 1388 { 1389 struct tcf_block_ext_info ei = {0, }; 1390 1391 if (!block) 1392 return; 1393 tcf_block_put_ext(block, block->q, &ei); 1394 } 1395 1396 EXPORT_SYMBOL(tcf_block_put); 1397 1398 static int 1399 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, 1400 void *cb_priv, bool add, bool offload_in_use, 1401 struct netlink_ext_ack *extack) 1402 { 1403 struct tcf_chain *chain, *chain_prev; 1404 struct tcf_proto *tp, *tp_prev; 1405 int err; 1406 1407 lockdep_assert_held(&block->cb_lock); 1408 1409 for (chain = __tcf_get_next_chain(block, NULL); 1410 chain; 1411 chain_prev = chain, 1412 chain = __tcf_get_next_chain(block, chain), 1413 tcf_chain_put(chain_prev)) { 1414 for (tp = __tcf_get_next_proto(chain, NULL); tp; 1415 tp_prev = tp, 1416 tp = __tcf_get_next_proto(chain, tp), 1417 tcf_proto_put(tp_prev, true, NULL)) { 1418 if (tp->ops->reoffload) { 1419 err = tp->ops->reoffload(tp, add, cb, cb_priv, 1420 extack); 1421 if (err && add) 1422 goto err_playback_remove; 1423 } else if (add && offload_in_use) { 1424 err = -EOPNOTSUPP; 1425 NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); 1426 goto err_playback_remove; 1427 } 1428 } 1429 } 1430 1431 return 0; 1432 1433 err_playback_remove: 1434 tcf_proto_put(tp, true, NULL); 1435 tcf_chain_put(chain); 1436 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, 1437 extack); 1438 return err; 1439 } 1440 1441 static int tcf_block_bind(struct tcf_block *block, 1442 struct flow_block_offload *bo) 1443 { 1444 struct flow_block_cb *block_cb, *next; 1445 int err, i = 0; 1446 1447 lockdep_assert_held(&block->cb_lock); 1448 1449 list_for_each_entry(block_cb, &bo->cb_list, list) { 1450 err = tcf_block_playback_offloads(block, block_cb->cb, 1451 block_cb->cb_priv, true, 1452 tcf_block_offload_in_use(block), 1453 bo->extack); 1454 if (err) 1455 goto err_unroll; 1456 if (!bo->unlocked_driver_cb) 1457 block->lockeddevcnt++; 1458 1459 i++; 1460 } 1461 list_splice(&bo->cb_list, &block->flow_block.cb_list); 1462 1463 return 0; 1464 1465 err_unroll: 1466 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1467 if (i-- > 0) { 1468 list_del(&block_cb->list); 1469 tcf_block_playback_offloads(block, block_cb->cb, 1470 block_cb->cb_priv, false, 1471 tcf_block_offload_in_use(block), 1472 NULL); 1473 if (!bo->unlocked_driver_cb) 1474 block->lockeddevcnt--; 1475 } 1476 flow_block_cb_free(block_cb); 1477 } 1478 1479 return err; 1480 } 1481 1482 static void tcf_block_unbind(struct tcf_block *block, 1483 struct flow_block_offload *bo) 1484 { 1485 struct flow_block_cb *block_cb, *next; 1486 1487 lockdep_assert_held(&block->cb_lock); 1488 1489 list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { 1490 tcf_block_playback_offloads(block, block_cb->cb, 1491 block_cb->cb_priv, false, 1492 tcf_block_offload_in_use(block), 1493 NULL); 1494 list_del(&block_cb->list); 1495 flow_block_cb_free(block_cb); 1496 if (!bo->unlocked_driver_cb) 1497 block->lockeddevcnt--; 1498 } 1499 } 1500 1501 static int tcf_block_setup(struct tcf_block *block, 1502 struct flow_block_offload *bo) 1503 { 1504 int err; 1505 1506 switch (bo->command) { 1507 case FLOW_BLOCK_BIND: 1508 err = tcf_block_bind(block, bo); 1509 break; 1510 case FLOW_BLOCK_UNBIND: 1511 err = 0; 1512 tcf_block_unbind(block, bo); 1513 break; 1514 default: 1515 WARN_ON_ONCE(1); 1516 err = -EOPNOTSUPP; 1517 } 1518 1519 return err; 1520 } 1521 1522 /* Main classifier routine: scans classifier chain attached 1523 * to this qdisc, (optionally) tests for protocol and asks 1524 * specific classifiers. 1525 */ 1526 static inline int __tcf_classify(struct sk_buff *skb, 1527 const struct tcf_proto *tp, 1528 const struct tcf_proto *orig_tp, 1529 struct tcf_result *res, 1530 bool compat_mode, 1531 u32 *last_executed_chain) 1532 { 1533 #ifdef CONFIG_NET_CLS_ACT 1534 const int max_reclassify_loop = 16; 1535 const struct tcf_proto *first_tp; 1536 int limit = 0; 1537 1538 reclassify: 1539 #endif 1540 for (; tp; tp = rcu_dereference_bh(tp->next)) { 1541 __be16 protocol = skb_protocol(skb, false); 1542 int err; 1543 1544 if (tp->protocol != protocol && 1545 tp->protocol != htons(ETH_P_ALL)) 1546 continue; 1547 1548 err = tp->classify(skb, tp, res); 1549 #ifdef CONFIG_NET_CLS_ACT 1550 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 1551 first_tp = orig_tp; 1552 *last_executed_chain = first_tp->chain->index; 1553 goto reset; 1554 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 1555 first_tp = res->goto_tp; 1556 *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; 1557 goto reset; 1558 } 1559 #endif 1560 if (err >= 0) 1561 return err; 1562 } 1563 1564 return TC_ACT_UNSPEC; /* signal: continue lookup */ 1565 #ifdef CONFIG_NET_CLS_ACT 1566 reset: 1567 if (unlikely(limit++ >= max_reclassify_loop)) { 1568 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", 1569 tp->chain->block->index, 1570 tp->prio & 0xffff, 1571 ntohs(tp->protocol)); 1572 return TC_ACT_SHOT; 1573 } 1574 1575 tp = first_tp; 1576 goto reclassify; 1577 #endif 1578 } 1579 1580 int tcf_classify(struct sk_buff *skb, 1581 const struct tcf_block *block, 1582 const struct tcf_proto *tp, 1583 struct tcf_result *res, bool compat_mode) 1584 { 1585 #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) 1586 u32 last_executed_chain = 0; 1587 1588 return __tcf_classify(skb, tp, tp, res, compat_mode, 1589 &last_executed_chain); 1590 #else 1591 u32 last_executed_chain = tp ? tp->chain->index : 0; 1592 const struct tcf_proto *orig_tp = tp; 1593 struct tc_skb_ext *ext; 1594 int ret; 1595 1596 if (block) { 1597 ext = skb_ext_find(skb, TC_SKB_EXT); 1598 1599 if (ext && ext->chain) { 1600 struct tcf_chain *fchain; 1601 1602 fchain = tcf_chain_lookup_rcu(block, ext->chain); 1603 if (!fchain) 1604 return TC_ACT_SHOT; 1605 1606 /* Consume, so cloned/redirect skbs won't inherit ext */ 1607 skb_ext_del(skb, TC_SKB_EXT); 1608 1609 tp = rcu_dereference_bh(fchain->filter_chain); 1610 last_executed_chain = fchain->index; 1611 } 1612 } 1613 1614 ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, 1615 &last_executed_chain); 1616 1617 /* If we missed on some chain */ 1618 if (ret == TC_ACT_UNSPEC && last_executed_chain) { 1619 ext = tc_skb_ext_alloc(skb); 1620 if (WARN_ON_ONCE(!ext)) 1621 return TC_ACT_SHOT; 1622 ext->chain = last_executed_chain; 1623 ext->mru = qdisc_skb_cb(skb)->mru; 1624 ext->post_ct = qdisc_skb_cb(skb)->post_ct; 1625 } 1626 1627 return ret; 1628 #endif 1629 } 1630 EXPORT_SYMBOL(tcf_classify); 1631 1632 struct tcf_chain_info { 1633 struct tcf_proto __rcu **pprev; 1634 struct tcf_proto __rcu *next; 1635 }; 1636 1637 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, 1638 struct tcf_chain_info *chain_info) 1639 { 1640 return tcf_chain_dereference(*chain_info->pprev, chain); 1641 } 1642 1643 static int tcf_chain_tp_insert(struct tcf_chain *chain, 1644 struct tcf_chain_info *chain_info, 1645 struct tcf_proto *tp) 1646 { 1647 if (chain->flushing) 1648 return -EAGAIN; 1649 1650 if (*chain_info->pprev == chain->filter_chain) 1651 tcf_chain0_head_change(chain, tp); 1652 tcf_proto_get(tp); 1653 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); 1654 rcu_assign_pointer(*chain_info->pprev, tp); 1655 1656 return 0; 1657 } 1658 1659 static void tcf_chain_tp_remove(struct tcf_chain *chain, 1660 struct tcf_chain_info *chain_info, 1661 struct tcf_proto *tp) 1662 { 1663 struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); 1664 1665 tcf_proto_mark_delete(tp); 1666 if (tp == chain->filter_chain) 1667 tcf_chain0_head_change(chain, next); 1668 RCU_INIT_POINTER(*chain_info->pprev, next); 1669 } 1670 1671 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1672 struct tcf_chain_info *chain_info, 1673 u32 protocol, u32 prio, 1674 bool prio_allocate); 1675 1676 /* Try to insert new proto. 1677 * If proto with specified priority already exists, free new proto 1678 * and return existing one. 1679 */ 1680 1681 static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, 1682 struct tcf_proto *tp_new, 1683 u32 protocol, u32 prio, 1684 bool rtnl_held) 1685 { 1686 struct tcf_chain_info chain_info; 1687 struct tcf_proto *tp; 1688 int err = 0; 1689 1690 mutex_lock(&chain->filter_chain_lock); 1691 1692 if (tcf_proto_exists_destroying(chain, tp_new)) { 1693 mutex_unlock(&chain->filter_chain_lock); 1694 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1695 return ERR_PTR(-EAGAIN); 1696 } 1697 1698 tp = tcf_chain_tp_find(chain, &chain_info, 1699 protocol, prio, false); 1700 if (!tp) 1701 err = tcf_chain_tp_insert(chain, &chain_info, tp_new); 1702 mutex_unlock(&chain->filter_chain_lock); 1703 1704 if (tp) { 1705 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1706 tp_new = tp; 1707 } else if (err) { 1708 tcf_proto_destroy(tp_new, rtnl_held, false, NULL); 1709 tp_new = ERR_PTR(err); 1710 } 1711 1712 return tp_new; 1713 } 1714 1715 static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, 1716 struct tcf_proto *tp, bool rtnl_held, 1717 struct netlink_ext_ack *extack) 1718 { 1719 struct tcf_chain_info chain_info; 1720 struct tcf_proto *tp_iter; 1721 struct tcf_proto **pprev; 1722 struct tcf_proto *next; 1723 1724 mutex_lock(&chain->filter_chain_lock); 1725 1726 /* Atomically find and remove tp from chain. */ 1727 for (pprev = &chain->filter_chain; 1728 (tp_iter = tcf_chain_dereference(*pprev, chain)); 1729 pprev = &tp_iter->next) { 1730 if (tp_iter == tp) { 1731 chain_info.pprev = pprev; 1732 chain_info.next = tp_iter->next; 1733 WARN_ON(tp_iter->deleting); 1734 break; 1735 } 1736 } 1737 /* Verify that tp still exists and no new filters were inserted 1738 * concurrently. 1739 * Mark tp for deletion if it is empty. 1740 */ 1741 if (!tp_iter || !tcf_proto_check_delete(tp)) { 1742 mutex_unlock(&chain->filter_chain_lock); 1743 return; 1744 } 1745 1746 tcf_proto_signal_destroying(chain, tp); 1747 next = tcf_chain_dereference(chain_info.next, chain); 1748 if (tp == chain->filter_chain) 1749 tcf_chain0_head_change(chain, next); 1750 RCU_INIT_POINTER(*chain_info.pprev, next); 1751 mutex_unlock(&chain->filter_chain_lock); 1752 1753 tcf_proto_put(tp, rtnl_held, extack); 1754 } 1755 1756 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 1757 struct tcf_chain_info *chain_info, 1758 u32 protocol, u32 prio, 1759 bool prio_allocate) 1760 { 1761 struct tcf_proto **pprev; 1762 struct tcf_proto *tp; 1763 1764 /* Check the chain for existence of proto-tcf with this priority */ 1765 for (pprev = &chain->filter_chain; 1766 (tp = tcf_chain_dereference(*pprev, chain)); 1767 pprev = &tp->next) { 1768 if (tp->prio >= prio) { 1769 if (tp->prio == prio) { 1770 if (prio_allocate || 1771 (tp->protocol != protocol && protocol)) 1772 return ERR_PTR(-EINVAL); 1773 } else { 1774 tp = NULL; 1775 } 1776 break; 1777 } 1778 } 1779 chain_info->pprev = pprev; 1780 if (tp) { 1781 chain_info->next = tp->next; 1782 tcf_proto_get(tp); 1783 } else { 1784 chain_info->next = NULL; 1785 } 1786 return tp; 1787 } 1788 1789 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 1790 struct tcf_proto *tp, struct tcf_block *block, 1791 struct Qdisc *q, u32 parent, void *fh, 1792 u32 portid, u32 seq, u16 flags, int event, 1793 bool terse_dump, bool rtnl_held) 1794 { 1795 struct tcmsg *tcm; 1796 struct nlmsghdr *nlh; 1797 unsigned char *b = skb_tail_pointer(skb); 1798 1799 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 1800 if (!nlh) 1801 goto out_nlmsg_trim; 1802 tcm = nlmsg_data(nlh); 1803 tcm->tcm_family = AF_UNSPEC; 1804 tcm->tcm__pad1 = 0; 1805 tcm->tcm__pad2 = 0; 1806 if (q) { 1807 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 1808 tcm->tcm_parent = parent; 1809 } else { 1810 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 1811 tcm->tcm_block_index = block->index; 1812 } 1813 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 1814 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 1815 goto nla_put_failure; 1816 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 1817 goto nla_put_failure; 1818 if (!fh) { 1819 tcm->tcm_handle = 0; 1820 } else if (terse_dump) { 1821 if (tp->ops->terse_dump) { 1822 if (tp->ops->terse_dump(net, tp, fh, skb, tcm, 1823 rtnl_held) < 0) 1824 goto nla_put_failure; 1825 } else { 1826 goto cls_op_not_supp; 1827 } 1828 } else { 1829 if (tp->ops->dump && 1830 tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) 1831 goto nla_put_failure; 1832 } 1833 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1834 return skb->len; 1835 1836 out_nlmsg_trim: 1837 nla_put_failure: 1838 cls_op_not_supp: 1839 nlmsg_trim(skb, b); 1840 return -1; 1841 } 1842 1843 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 1844 struct nlmsghdr *n, struct tcf_proto *tp, 1845 struct tcf_block *block, struct Qdisc *q, 1846 u32 parent, void *fh, int event, bool unicast, 1847 bool rtnl_held) 1848 { 1849 struct sk_buff *skb; 1850 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1851 int err = 0; 1852 1853 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1854 if (!skb) 1855 return -ENOBUFS; 1856 1857 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1858 n->nlmsg_seq, n->nlmsg_flags, event, 1859 false, rtnl_held) <= 0) { 1860 kfree_skb(skb); 1861 return -EINVAL; 1862 } 1863 1864 if (unicast) 1865 err = rtnl_unicast(skb, net, portid); 1866 else 1867 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1868 n->nlmsg_flags & NLM_F_ECHO); 1869 return err; 1870 } 1871 1872 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 1873 struct nlmsghdr *n, struct tcf_proto *tp, 1874 struct tcf_block *block, struct Qdisc *q, 1875 u32 parent, void *fh, bool unicast, bool *last, 1876 bool rtnl_held, struct netlink_ext_ack *extack) 1877 { 1878 struct sk_buff *skb; 1879 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 1880 int err; 1881 1882 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1883 if (!skb) 1884 return -ENOBUFS; 1885 1886 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, 1887 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, 1888 false, rtnl_held) <= 0) { 1889 NL_SET_ERR_MSG(extack, "Failed to build del event notification"); 1890 kfree_skb(skb); 1891 return -EINVAL; 1892 } 1893 1894 err = tp->ops->delete(tp, fh, last, rtnl_held, extack); 1895 if (err) { 1896 kfree_skb(skb); 1897 return err; 1898 } 1899 1900 if (unicast) 1901 err = rtnl_unicast(skb, net, portid); 1902 else 1903 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1904 n->nlmsg_flags & NLM_F_ECHO); 1905 if (err < 0) 1906 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); 1907 1908 return err; 1909 } 1910 1911 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 1912 struct tcf_block *block, struct Qdisc *q, 1913 u32 parent, struct nlmsghdr *n, 1914 struct tcf_chain *chain, int event) 1915 { 1916 struct tcf_proto *tp; 1917 1918 for (tp = tcf_get_next_proto(chain, NULL); 1919 tp; tp = tcf_get_next_proto(chain, tp)) 1920 tfilter_notify(net, oskb, n, tp, block, 1921 q, parent, NULL, event, false, true); 1922 } 1923 1924 static void tfilter_put(struct tcf_proto *tp, void *fh) 1925 { 1926 if (tp->ops->put && fh) 1927 tp->ops->put(tp, fh); 1928 } 1929 1930 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 1931 struct netlink_ext_ack *extack) 1932 { 1933 struct net *net = sock_net(skb->sk); 1934 struct nlattr *tca[TCA_MAX + 1]; 1935 char name[IFNAMSIZ]; 1936 struct tcmsg *t; 1937 u32 protocol; 1938 u32 prio; 1939 bool prio_allocate; 1940 u32 parent; 1941 u32 chain_index; 1942 struct Qdisc *q = NULL; 1943 struct tcf_chain_info chain_info; 1944 struct tcf_chain *chain = NULL; 1945 struct tcf_block *block; 1946 struct tcf_proto *tp; 1947 unsigned long cl; 1948 void *fh; 1949 int err; 1950 int tp_created; 1951 bool rtnl_held = false; 1952 u32 flags; 1953 1954 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 1955 return -EPERM; 1956 1957 replay: 1958 tp_created = 0; 1959 1960 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 1961 rtm_tca_policy, extack); 1962 if (err < 0) 1963 return err; 1964 1965 t = nlmsg_data(n); 1966 protocol = TC_H_MIN(t->tcm_info); 1967 prio = TC_H_MAJ(t->tcm_info); 1968 prio_allocate = false; 1969 parent = t->tcm_parent; 1970 tp = NULL; 1971 cl = 0; 1972 block = NULL; 1973 flags = 0; 1974 1975 if (prio == 0) { 1976 /* If no priority is provided by the user, 1977 * we allocate one. 1978 */ 1979 if (n->nlmsg_flags & NLM_F_CREATE) { 1980 prio = TC_H_MAKE(0x80000000U, 0U); 1981 prio_allocate = true; 1982 } else { 1983 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 1984 return -ENOENT; 1985 } 1986 } 1987 1988 /* Find head of filter chain. */ 1989 1990 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 1991 if (err) 1992 return err; 1993 1994 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 1995 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 1996 err = -EINVAL; 1997 goto errout; 1998 } 1999 2000 /* Take rtnl mutex if rtnl_held was set to true on previous iteration, 2001 * block is shared (no qdisc found), qdisc is not unlocked, classifier 2002 * type is not specified, classifier is not unlocked. 2003 */ 2004 if (rtnl_held || 2005 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2006 !tcf_proto_is_unlocked(name)) { 2007 rtnl_held = true; 2008 rtnl_lock(); 2009 } 2010 2011 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2012 if (err) 2013 goto errout; 2014 2015 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2016 extack); 2017 if (IS_ERR(block)) { 2018 err = PTR_ERR(block); 2019 goto errout; 2020 } 2021 block->classid = parent; 2022 2023 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2024 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2025 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2026 err = -EINVAL; 2027 goto errout; 2028 } 2029 chain = tcf_chain_get(block, chain_index, true); 2030 if (!chain) { 2031 NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); 2032 err = -ENOMEM; 2033 goto errout; 2034 } 2035 2036 mutex_lock(&chain->filter_chain_lock); 2037 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2038 prio, prio_allocate); 2039 if (IS_ERR(tp)) { 2040 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2041 err = PTR_ERR(tp); 2042 goto errout_locked; 2043 } 2044 2045 if (tp == NULL) { 2046 struct tcf_proto *tp_new = NULL; 2047 2048 if (chain->flushing) { 2049 err = -EAGAIN; 2050 goto errout_locked; 2051 } 2052 2053 /* Proto-tcf does not exist, create new one */ 2054 2055 if (tca[TCA_KIND] == NULL || !protocol) { 2056 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); 2057 err = -EINVAL; 2058 goto errout_locked; 2059 } 2060 2061 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2062 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2063 err = -ENOENT; 2064 goto errout_locked; 2065 } 2066 2067 if (prio_allocate) 2068 prio = tcf_auto_prio(tcf_chain_tp_prev(chain, 2069 &chain_info)); 2070 2071 mutex_unlock(&chain->filter_chain_lock); 2072 tp_new = tcf_proto_create(name, protocol, prio, chain, 2073 rtnl_held, extack); 2074 if (IS_ERR(tp_new)) { 2075 err = PTR_ERR(tp_new); 2076 goto errout_tp; 2077 } 2078 2079 tp_created = 1; 2080 tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, 2081 rtnl_held); 2082 if (IS_ERR(tp)) { 2083 err = PTR_ERR(tp); 2084 goto errout_tp; 2085 } 2086 } else { 2087 mutex_unlock(&chain->filter_chain_lock); 2088 } 2089 2090 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2091 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2092 err = -EINVAL; 2093 goto errout; 2094 } 2095 2096 fh = tp->ops->get(tp, t->tcm_handle); 2097 2098 if (!fh) { 2099 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2100 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); 2101 err = -ENOENT; 2102 goto errout; 2103 } 2104 } else if (n->nlmsg_flags & NLM_F_EXCL) { 2105 tfilter_put(tp, fh); 2106 NL_SET_ERR_MSG(extack, "Filter already exists"); 2107 err = -EEXIST; 2108 goto errout; 2109 } 2110 2111 if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { 2112 NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); 2113 err = -EINVAL; 2114 goto errout; 2115 } 2116 2117 if (!(n->nlmsg_flags & NLM_F_CREATE)) 2118 flags |= TCA_ACT_FLAGS_REPLACE; 2119 if (!rtnl_held) 2120 flags |= TCA_ACT_FLAGS_NO_RTNL; 2121 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 2122 flags, extack); 2123 if (err == 0) { 2124 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2125 RTM_NEWTFILTER, false, rtnl_held); 2126 tfilter_put(tp, fh); 2127 /* q pointer is NULL for shared blocks */ 2128 if (q) 2129 q->flags &= ~TCQ_F_CAN_BYPASS; 2130 } 2131 2132 errout: 2133 if (err && tp_created) 2134 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); 2135 errout_tp: 2136 if (chain) { 2137 if (tp && !IS_ERR(tp)) 2138 tcf_proto_put(tp, rtnl_held, NULL); 2139 if (!tp_created) 2140 tcf_chain_put(chain); 2141 } 2142 tcf_block_release(q, block, rtnl_held); 2143 2144 if (rtnl_held) 2145 rtnl_unlock(); 2146 2147 if (err == -EAGAIN) { 2148 /* Take rtnl lock in case EAGAIN is caused by concurrent flush 2149 * of target chain. 2150 */ 2151 rtnl_held = true; 2152 /* Replay the request. */ 2153 goto replay; 2154 } 2155 return err; 2156 2157 errout_locked: 2158 mutex_unlock(&chain->filter_chain_lock); 2159 goto errout; 2160 } 2161 2162 static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2163 struct netlink_ext_ack *extack) 2164 { 2165 struct net *net = sock_net(skb->sk); 2166 struct nlattr *tca[TCA_MAX + 1]; 2167 char name[IFNAMSIZ]; 2168 struct tcmsg *t; 2169 u32 protocol; 2170 u32 prio; 2171 u32 parent; 2172 u32 chain_index; 2173 struct Qdisc *q = NULL; 2174 struct tcf_chain_info chain_info; 2175 struct tcf_chain *chain = NULL; 2176 struct tcf_block *block = NULL; 2177 struct tcf_proto *tp = NULL; 2178 unsigned long cl = 0; 2179 void *fh = NULL; 2180 int err; 2181 bool rtnl_held = false; 2182 2183 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2184 return -EPERM; 2185 2186 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2187 rtm_tca_policy, extack); 2188 if (err < 0) 2189 return err; 2190 2191 t = nlmsg_data(n); 2192 protocol = TC_H_MIN(t->tcm_info); 2193 prio = TC_H_MAJ(t->tcm_info); 2194 parent = t->tcm_parent; 2195 2196 if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { 2197 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); 2198 return -ENOENT; 2199 } 2200 2201 /* Find head of filter chain. */ 2202 2203 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2204 if (err) 2205 return err; 2206 2207 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2208 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2209 err = -EINVAL; 2210 goto errout; 2211 } 2212 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc 2213 * found), qdisc is not unlocked, classifier type is not specified, 2214 * classifier is not unlocked. 2215 */ 2216 if (!prio || 2217 (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2218 !tcf_proto_is_unlocked(name)) { 2219 rtnl_held = true; 2220 rtnl_lock(); 2221 } 2222 2223 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2224 if (err) 2225 goto errout; 2226 2227 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2228 extack); 2229 if (IS_ERR(block)) { 2230 err = PTR_ERR(block); 2231 goto errout; 2232 } 2233 2234 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2235 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2236 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2237 err = -EINVAL; 2238 goto errout; 2239 } 2240 chain = tcf_chain_get(block, chain_index, false); 2241 if (!chain) { 2242 /* User requested flush on non-existent chain. Nothing to do, 2243 * so just return success. 2244 */ 2245 if (prio == 0) { 2246 err = 0; 2247 goto errout; 2248 } 2249 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2250 err = -ENOENT; 2251 goto errout; 2252 } 2253 2254 if (prio == 0) { 2255 tfilter_notify_chain(net, skb, block, q, parent, n, 2256 chain, RTM_DELTFILTER); 2257 tcf_chain_flush(chain, rtnl_held); 2258 err = 0; 2259 goto errout; 2260 } 2261 2262 mutex_lock(&chain->filter_chain_lock); 2263 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2264 prio, false); 2265 if (!tp || IS_ERR(tp)) { 2266 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2267 err = tp ? PTR_ERR(tp) : -ENOENT; 2268 goto errout_locked; 2269 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2270 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2271 err = -EINVAL; 2272 goto errout_locked; 2273 } else if (t->tcm_handle == 0) { 2274 tcf_proto_signal_destroying(chain, tp); 2275 tcf_chain_tp_remove(chain, &chain_info, tp); 2276 mutex_unlock(&chain->filter_chain_lock); 2277 2278 tcf_proto_put(tp, rtnl_held, NULL); 2279 tfilter_notify(net, skb, n, tp, block, q, parent, fh, 2280 RTM_DELTFILTER, false, rtnl_held); 2281 err = 0; 2282 goto errout; 2283 } 2284 mutex_unlock(&chain->filter_chain_lock); 2285 2286 fh = tp->ops->get(tp, t->tcm_handle); 2287 2288 if (!fh) { 2289 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2290 err = -ENOENT; 2291 } else { 2292 bool last; 2293 2294 err = tfilter_del_notify(net, skb, n, tp, block, 2295 q, parent, fh, false, &last, 2296 rtnl_held, extack); 2297 2298 if (err) 2299 goto errout; 2300 if (last) 2301 tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); 2302 } 2303 2304 errout: 2305 if (chain) { 2306 if (tp && !IS_ERR(tp)) 2307 tcf_proto_put(tp, rtnl_held, NULL); 2308 tcf_chain_put(chain); 2309 } 2310 tcf_block_release(q, block, rtnl_held); 2311 2312 if (rtnl_held) 2313 rtnl_unlock(); 2314 2315 return err; 2316 2317 errout_locked: 2318 mutex_unlock(&chain->filter_chain_lock); 2319 goto errout; 2320 } 2321 2322 static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 2323 struct netlink_ext_ack *extack) 2324 { 2325 struct net *net = sock_net(skb->sk); 2326 struct nlattr *tca[TCA_MAX + 1]; 2327 char name[IFNAMSIZ]; 2328 struct tcmsg *t; 2329 u32 protocol; 2330 u32 prio; 2331 u32 parent; 2332 u32 chain_index; 2333 struct Qdisc *q = NULL; 2334 struct tcf_chain_info chain_info; 2335 struct tcf_chain *chain = NULL; 2336 struct tcf_block *block = NULL; 2337 struct tcf_proto *tp = NULL; 2338 unsigned long cl = 0; 2339 void *fh = NULL; 2340 int err; 2341 bool rtnl_held = false; 2342 2343 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2344 rtm_tca_policy, extack); 2345 if (err < 0) 2346 return err; 2347 2348 t = nlmsg_data(n); 2349 protocol = TC_H_MIN(t->tcm_info); 2350 prio = TC_H_MAJ(t->tcm_info); 2351 parent = t->tcm_parent; 2352 2353 if (prio == 0) { 2354 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); 2355 return -ENOENT; 2356 } 2357 2358 /* Find head of filter chain. */ 2359 2360 err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); 2361 if (err) 2362 return err; 2363 2364 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2365 NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); 2366 err = -EINVAL; 2367 goto errout; 2368 } 2369 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not 2370 * unlocked, classifier type is not specified, classifier is not 2371 * unlocked. 2372 */ 2373 if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || 2374 !tcf_proto_is_unlocked(name)) { 2375 rtnl_held = true; 2376 rtnl_lock(); 2377 } 2378 2379 err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); 2380 if (err) 2381 goto errout; 2382 2383 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, 2384 extack); 2385 if (IS_ERR(block)) { 2386 err = PTR_ERR(block); 2387 goto errout; 2388 } 2389 2390 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2391 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2392 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2393 err = -EINVAL; 2394 goto errout; 2395 } 2396 chain = tcf_chain_get(block, chain_index, false); 2397 if (!chain) { 2398 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2399 err = -EINVAL; 2400 goto errout; 2401 } 2402 2403 mutex_lock(&chain->filter_chain_lock); 2404 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 2405 prio, false); 2406 mutex_unlock(&chain->filter_chain_lock); 2407 if (!tp || IS_ERR(tp)) { 2408 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); 2409 err = tp ? PTR_ERR(tp) : -ENOENT; 2410 goto errout; 2411 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 2412 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); 2413 err = -EINVAL; 2414 goto errout; 2415 } 2416 2417 fh = tp->ops->get(tp, t->tcm_handle); 2418 2419 if (!fh) { 2420 NL_SET_ERR_MSG(extack, "Specified filter handle not found"); 2421 err = -ENOENT; 2422 } else { 2423 err = tfilter_notify(net, skb, n, tp, block, q, parent, 2424 fh, RTM_NEWTFILTER, true, rtnl_held); 2425 if (err < 0) 2426 NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); 2427 } 2428 2429 tfilter_put(tp, fh); 2430 errout: 2431 if (chain) { 2432 if (tp && !IS_ERR(tp)) 2433 tcf_proto_put(tp, rtnl_held, NULL); 2434 tcf_chain_put(chain); 2435 } 2436 tcf_block_release(q, block, rtnl_held); 2437 2438 if (rtnl_held) 2439 rtnl_unlock(); 2440 2441 return err; 2442 } 2443 2444 struct tcf_dump_args { 2445 struct tcf_walker w; 2446 struct sk_buff *skb; 2447 struct netlink_callback *cb; 2448 struct tcf_block *block; 2449 struct Qdisc *q; 2450 u32 parent; 2451 bool terse_dump; 2452 }; 2453 2454 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 2455 { 2456 struct tcf_dump_args *a = (void *)arg; 2457 struct net *net = sock_net(a->skb->sk); 2458 2459 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, 2460 n, NETLINK_CB(a->cb->skb).portid, 2461 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 2462 RTM_NEWTFILTER, a->terse_dump, true); 2463 } 2464 2465 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 2466 struct sk_buff *skb, struct netlink_callback *cb, 2467 long index_start, long *p_index, bool terse) 2468 { 2469 struct net *net = sock_net(skb->sk); 2470 struct tcf_block *block = chain->block; 2471 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2472 struct tcf_proto *tp, *tp_prev; 2473 struct tcf_dump_args arg; 2474 2475 for (tp = __tcf_get_next_proto(chain, NULL); 2476 tp; 2477 tp_prev = tp, 2478 tp = __tcf_get_next_proto(chain, tp), 2479 tcf_proto_put(tp_prev, true, NULL), 2480 (*p_index)++) { 2481 if (*p_index < index_start) 2482 continue; 2483 if (TC_H_MAJ(tcm->tcm_info) && 2484 TC_H_MAJ(tcm->tcm_info) != tp->prio) 2485 continue; 2486 if (TC_H_MIN(tcm->tcm_info) && 2487 TC_H_MIN(tcm->tcm_info) != tp->protocol) 2488 continue; 2489 if (*p_index > index_start) 2490 memset(&cb->args[1], 0, 2491 sizeof(cb->args) - sizeof(cb->args[0])); 2492 if (cb->args[1] == 0) { 2493 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, 2494 NETLINK_CB(cb->skb).portid, 2495 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2496 RTM_NEWTFILTER, false, true) <= 0) 2497 goto errout; 2498 cb->args[1] = 1; 2499 } 2500 if (!tp->ops->walk) 2501 continue; 2502 arg.w.fn = tcf_node_dump; 2503 arg.skb = skb; 2504 arg.cb = cb; 2505 arg.block = block; 2506 arg.q = q; 2507 arg.parent = parent; 2508 arg.w.stop = 0; 2509 arg.w.skip = cb->args[1] - 1; 2510 arg.w.count = 0; 2511 arg.w.cookie = cb->args[2]; 2512 arg.terse_dump = terse; 2513 tp->ops->walk(tp, &arg.w, true); 2514 cb->args[2] = arg.w.cookie; 2515 cb->args[1] = arg.w.count + 1; 2516 if (arg.w.stop) 2517 goto errout; 2518 } 2519 return true; 2520 2521 errout: 2522 tcf_proto_put(tp, true, NULL); 2523 return false; 2524 } 2525 2526 static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { 2527 [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), 2528 }; 2529 2530 /* called with RTNL */ 2531 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 2532 { 2533 struct tcf_chain *chain, *chain_prev; 2534 struct net *net = sock_net(skb->sk); 2535 struct nlattr *tca[TCA_MAX + 1]; 2536 struct Qdisc *q = NULL; 2537 struct tcf_block *block; 2538 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2539 bool terse_dump = false; 2540 long index_start; 2541 long index; 2542 u32 parent; 2543 int err; 2544 2545 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2546 return skb->len; 2547 2548 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2549 tcf_tfilter_dump_policy, cb->extack); 2550 if (err) 2551 return err; 2552 2553 if (tca[TCA_DUMP_FLAGS]) { 2554 struct nla_bitfield32 flags = 2555 nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); 2556 2557 terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; 2558 } 2559 2560 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2561 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2562 if (!block) 2563 goto out; 2564 /* If we work with block index, q is NULL and parent value 2565 * will never be used in the following code. The check 2566 * in tcf_fill_node prevents it. However, compiler does not 2567 * see that far, so set parent to zero to silence the warning 2568 * about parent being uninitialized. 2569 */ 2570 parent = 0; 2571 } else { 2572 const struct Qdisc_class_ops *cops; 2573 struct net_device *dev; 2574 unsigned long cl = 0; 2575 2576 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2577 if (!dev) 2578 return skb->len; 2579 2580 parent = tcm->tcm_parent; 2581 if (!parent) 2582 q = dev->qdisc; 2583 else 2584 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2585 if (!q) 2586 goto out; 2587 cops = q->ops->cl_ops; 2588 if (!cops) 2589 goto out; 2590 if (!cops->tcf_block) 2591 goto out; 2592 if (TC_H_MIN(tcm->tcm_parent)) { 2593 cl = cops->find(q, tcm->tcm_parent); 2594 if (cl == 0) 2595 goto out; 2596 } 2597 block = cops->tcf_block(q, cl, NULL); 2598 if (!block) 2599 goto out; 2600 parent = block->classid; 2601 if (tcf_block_shared(block)) 2602 q = NULL; 2603 } 2604 2605 index_start = cb->args[0]; 2606 index = 0; 2607 2608 for (chain = __tcf_get_next_chain(block, NULL); 2609 chain; 2610 chain_prev = chain, 2611 chain = __tcf_get_next_chain(block, chain), 2612 tcf_chain_put(chain_prev)) { 2613 if (tca[TCA_CHAIN] && 2614 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 2615 continue; 2616 if (!tcf_chain_dump(chain, q, parent, skb, cb, 2617 index_start, &index, terse_dump)) { 2618 tcf_chain_put(chain); 2619 err = -EMSGSIZE; 2620 break; 2621 } 2622 } 2623 2624 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 2625 tcf_block_refcnt_put(block, true); 2626 cb->args[0] = index; 2627 2628 out: 2629 /* If we did no progress, the error (EMSGSIZE) is real */ 2630 if (skb->len == 0 && err) 2631 return err; 2632 return skb->len; 2633 } 2634 2635 static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, 2636 void *tmplt_priv, u32 chain_index, 2637 struct net *net, struct sk_buff *skb, 2638 struct tcf_block *block, 2639 u32 portid, u32 seq, u16 flags, int event) 2640 { 2641 unsigned char *b = skb_tail_pointer(skb); 2642 const struct tcf_proto_ops *ops; 2643 struct nlmsghdr *nlh; 2644 struct tcmsg *tcm; 2645 void *priv; 2646 2647 ops = tmplt_ops; 2648 priv = tmplt_priv; 2649 2650 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 2651 if (!nlh) 2652 goto out_nlmsg_trim; 2653 tcm = nlmsg_data(nlh); 2654 tcm->tcm_family = AF_UNSPEC; 2655 tcm->tcm__pad1 = 0; 2656 tcm->tcm__pad2 = 0; 2657 tcm->tcm_handle = 0; 2658 if (block->q) { 2659 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; 2660 tcm->tcm_parent = block->q->handle; 2661 } else { 2662 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; 2663 tcm->tcm_block_index = block->index; 2664 } 2665 2666 if (nla_put_u32(skb, TCA_CHAIN, chain_index)) 2667 goto nla_put_failure; 2668 2669 if (ops) { 2670 if (nla_put_string(skb, TCA_KIND, ops->kind)) 2671 goto nla_put_failure; 2672 if (ops->tmplt_dump(skb, net, priv) < 0) 2673 goto nla_put_failure; 2674 } 2675 2676 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2677 return skb->len; 2678 2679 out_nlmsg_trim: 2680 nla_put_failure: 2681 nlmsg_trim(skb, b); 2682 return -EMSGSIZE; 2683 } 2684 2685 static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, 2686 u32 seq, u16 flags, int event, bool unicast) 2687 { 2688 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2689 struct tcf_block *block = chain->block; 2690 struct net *net = block->net; 2691 struct sk_buff *skb; 2692 int err = 0; 2693 2694 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2695 if (!skb) 2696 return -ENOBUFS; 2697 2698 if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2699 chain->index, net, skb, block, portid, 2700 seq, flags, event) <= 0) { 2701 kfree_skb(skb); 2702 return -EINVAL; 2703 } 2704 2705 if (unicast) 2706 err = rtnl_unicast(skb, net, portid); 2707 else 2708 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 2709 flags & NLM_F_ECHO); 2710 2711 return err; 2712 } 2713 2714 static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, 2715 void *tmplt_priv, u32 chain_index, 2716 struct tcf_block *block, struct sk_buff *oskb, 2717 u32 seq, u16 flags, bool unicast) 2718 { 2719 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 2720 struct net *net = block->net; 2721 struct sk_buff *skb; 2722 2723 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2724 if (!skb) 2725 return -ENOBUFS; 2726 2727 if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, 2728 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { 2729 kfree_skb(skb); 2730 return -EINVAL; 2731 } 2732 2733 if (unicast) 2734 return rtnl_unicast(skb, net, portid); 2735 2736 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); 2737 } 2738 2739 static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, 2740 struct nlattr **tca, 2741 struct netlink_ext_ack *extack) 2742 { 2743 const struct tcf_proto_ops *ops; 2744 char name[IFNAMSIZ]; 2745 void *tmplt_priv; 2746 2747 /* If kind is not set, user did not specify template. */ 2748 if (!tca[TCA_KIND]) 2749 return 0; 2750 2751 if (tcf_proto_check_kind(tca[TCA_KIND], name)) { 2752 NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); 2753 return -EINVAL; 2754 } 2755 2756 ops = tcf_proto_lookup_ops(name, true, extack); 2757 if (IS_ERR(ops)) 2758 return PTR_ERR(ops); 2759 if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) { 2760 NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); 2761 return -EOPNOTSUPP; 2762 } 2763 2764 tmplt_priv = ops->tmplt_create(net, chain, tca, extack); 2765 if (IS_ERR(tmplt_priv)) { 2766 module_put(ops->owner); 2767 return PTR_ERR(tmplt_priv); 2768 } 2769 chain->tmplt_ops = ops; 2770 chain->tmplt_priv = tmplt_priv; 2771 return 0; 2772 } 2773 2774 static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, 2775 void *tmplt_priv) 2776 { 2777 /* If template ops are set, no work to do for us. */ 2778 if (!tmplt_ops) 2779 return; 2780 2781 tmplt_ops->tmplt_destroy(tmplt_priv); 2782 module_put(tmplt_ops->owner); 2783 } 2784 2785 /* Add/delete/get a chain */ 2786 2787 static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, 2788 struct netlink_ext_ack *extack) 2789 { 2790 struct net *net = sock_net(skb->sk); 2791 struct nlattr *tca[TCA_MAX + 1]; 2792 struct tcmsg *t; 2793 u32 parent; 2794 u32 chain_index; 2795 struct Qdisc *q = NULL; 2796 struct tcf_chain *chain = NULL; 2797 struct tcf_block *block; 2798 unsigned long cl; 2799 int err; 2800 2801 if (n->nlmsg_type != RTM_GETCHAIN && 2802 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 2803 return -EPERM; 2804 2805 replay: 2806 err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, 2807 rtm_tca_policy, extack); 2808 if (err < 0) 2809 return err; 2810 2811 t = nlmsg_data(n); 2812 parent = t->tcm_parent; 2813 cl = 0; 2814 2815 block = tcf_block_find(net, &q, &parent, &cl, 2816 t->tcm_ifindex, t->tcm_block_index, extack); 2817 if (IS_ERR(block)) 2818 return PTR_ERR(block); 2819 2820 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 2821 if (chain_index > TC_ACT_EXT_VAL_MASK) { 2822 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); 2823 err = -EINVAL; 2824 goto errout_block; 2825 } 2826 2827 mutex_lock(&block->lock); 2828 chain = tcf_chain_lookup(block, chain_index); 2829 if (n->nlmsg_type == RTM_NEWCHAIN) { 2830 if (chain) { 2831 if (tcf_chain_held_by_acts_only(chain)) { 2832 /* The chain exists only because there is 2833 * some action referencing it. 2834 */ 2835 tcf_chain_hold(chain); 2836 } else { 2837 NL_SET_ERR_MSG(extack, "Filter chain already exists"); 2838 err = -EEXIST; 2839 goto errout_block_locked; 2840 } 2841 } else { 2842 if (!(n->nlmsg_flags & NLM_F_CREATE)) { 2843 NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); 2844 err = -ENOENT; 2845 goto errout_block_locked; 2846 } 2847 chain = tcf_chain_create(block, chain_index); 2848 if (!chain) { 2849 NL_SET_ERR_MSG(extack, "Failed to create filter chain"); 2850 err = -ENOMEM; 2851 goto errout_block_locked; 2852 } 2853 } 2854 } else { 2855 if (!chain || tcf_chain_held_by_acts_only(chain)) { 2856 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); 2857 err = -EINVAL; 2858 goto errout_block_locked; 2859 } 2860 tcf_chain_hold(chain); 2861 } 2862 2863 if (n->nlmsg_type == RTM_NEWCHAIN) { 2864 /* Modifying chain requires holding parent block lock. In case 2865 * the chain was successfully added, take a reference to the 2866 * chain. This ensures that an empty chain does not disappear at 2867 * the end of this function. 2868 */ 2869 tcf_chain_hold(chain); 2870 chain->explicitly_created = true; 2871 } 2872 mutex_unlock(&block->lock); 2873 2874 switch (n->nlmsg_type) { 2875 case RTM_NEWCHAIN: 2876 err = tc_chain_tmplt_add(chain, net, tca, extack); 2877 if (err) { 2878 tcf_chain_put_explicitly_created(chain); 2879 goto errout; 2880 } 2881 2882 tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, 2883 RTM_NEWCHAIN, false); 2884 break; 2885 case RTM_DELCHAIN: 2886 tfilter_notify_chain(net, skb, block, q, parent, n, 2887 chain, RTM_DELTFILTER); 2888 /* Flush the chain first as the user requested chain removal. */ 2889 tcf_chain_flush(chain, true); 2890 /* In case the chain was successfully deleted, put a reference 2891 * to the chain previously taken during addition. 2892 */ 2893 tcf_chain_put_explicitly_created(chain); 2894 break; 2895 case RTM_GETCHAIN: 2896 err = tc_chain_notify(chain, skb, n->nlmsg_seq, 2897 n->nlmsg_flags, n->nlmsg_type, true); 2898 if (err < 0) 2899 NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); 2900 break; 2901 default: 2902 err = -EOPNOTSUPP; 2903 NL_SET_ERR_MSG(extack, "Unsupported message type"); 2904 goto errout; 2905 } 2906 2907 errout: 2908 tcf_chain_put(chain); 2909 errout_block: 2910 tcf_block_release(q, block, true); 2911 if (err == -EAGAIN) 2912 /* Replay the request. */ 2913 goto replay; 2914 return err; 2915 2916 errout_block_locked: 2917 mutex_unlock(&block->lock); 2918 goto errout_block; 2919 } 2920 2921 /* called with RTNL */ 2922 static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) 2923 { 2924 struct net *net = sock_net(skb->sk); 2925 struct nlattr *tca[TCA_MAX + 1]; 2926 struct Qdisc *q = NULL; 2927 struct tcf_block *block; 2928 struct tcmsg *tcm = nlmsg_data(cb->nlh); 2929 struct tcf_chain *chain; 2930 long index_start; 2931 long index; 2932 int err; 2933 2934 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 2935 return skb->len; 2936 2937 err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, 2938 rtm_tca_policy, cb->extack); 2939 if (err) 2940 return err; 2941 2942 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { 2943 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); 2944 if (!block) 2945 goto out; 2946 } else { 2947 const struct Qdisc_class_ops *cops; 2948 struct net_device *dev; 2949 unsigned long cl = 0; 2950 2951 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 2952 if (!dev) 2953 return skb->len; 2954 2955 if (!tcm->tcm_parent) 2956 q = dev->qdisc; 2957 else 2958 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 2959 2960 if (!q) 2961 goto out; 2962 cops = q->ops->cl_ops; 2963 if (!cops) 2964 goto out; 2965 if (!cops->tcf_block) 2966 goto out; 2967 if (TC_H_MIN(tcm->tcm_parent)) { 2968 cl = cops->find(q, tcm->tcm_parent); 2969 if (cl == 0) 2970 goto out; 2971 } 2972 block = cops->tcf_block(q, cl, NULL); 2973 if (!block) 2974 goto out; 2975 if (tcf_block_shared(block)) 2976 q = NULL; 2977 } 2978 2979 index_start = cb->args[0]; 2980 index = 0; 2981 2982 mutex_lock(&block->lock); 2983 list_for_each_entry(chain, &block->chain_list, list) { 2984 if ((tca[TCA_CHAIN] && 2985 nla_get_u32(tca[TCA_CHAIN]) != chain->index)) 2986 continue; 2987 if (index < index_start) { 2988 index++; 2989 continue; 2990 } 2991 if (tcf_chain_held_by_acts_only(chain)) 2992 continue; 2993 err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, 2994 chain->index, net, skb, block, 2995 NETLINK_CB(cb->skb).portid, 2996 cb->nlh->nlmsg_seq, NLM_F_MULTI, 2997 RTM_NEWCHAIN); 2998 if (err <= 0) 2999 break; 3000 index++; 3001 } 3002 mutex_unlock(&block->lock); 3003 3004 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) 3005 tcf_block_refcnt_put(block, true); 3006 cb->args[0] = index; 3007 3008 out: 3009 /* If we did no progress, the error (EMSGSIZE) is real */ 3010 if (skb->len == 0 && err) 3011 return err; 3012 return skb->len; 3013 } 3014 3015 void tcf_exts_destroy(struct tcf_exts *exts) 3016 { 3017 #ifdef CONFIG_NET_CLS_ACT 3018 if (exts->actions) { 3019 tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); 3020 kfree(exts->actions); 3021 } 3022 exts->nr_actions = 0; 3023 #endif 3024 } 3025 EXPORT_SYMBOL(tcf_exts_destroy); 3026 3027 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 3028 struct nlattr *rate_tlv, struct tcf_exts *exts, 3029 u32 flags, struct netlink_ext_ack *extack) 3030 { 3031 #ifdef CONFIG_NET_CLS_ACT 3032 { 3033 int init_res[TCA_ACT_MAX_PRIO] = {}; 3034 struct tc_action *act; 3035 size_t attr_size = 0; 3036 3037 if (exts->police && tb[exts->police]) { 3038 struct tc_action_ops *a_o; 3039 3040 a_o = tc_action_load_ops(tb[exts->police], true, 3041 !(flags & TCA_ACT_FLAGS_NO_RTNL), 3042 extack); 3043 if (IS_ERR(a_o)) 3044 return PTR_ERR(a_o); 3045 flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; 3046 act = tcf_action_init_1(net, tp, tb[exts->police], 3047 rate_tlv, a_o, init_res, flags, 3048 extack); 3049 module_put(a_o->owner); 3050 if (IS_ERR(act)) 3051 return PTR_ERR(act); 3052 3053 act->type = exts->type = TCA_OLD_COMPAT; 3054 exts->actions[0] = act; 3055 exts->nr_actions = 1; 3056 tcf_idr_insert_many(exts->actions); 3057 } else if (exts->action && tb[exts->action]) { 3058 int err; 3059 3060 flags |= TCA_ACT_FLAGS_BIND; 3061 err = tcf_action_init(net, tp, tb[exts->action], 3062 rate_tlv, exts->actions, init_res, 3063 &attr_size, flags, extack); 3064 if (err < 0) 3065 return err; 3066 exts->nr_actions = err; 3067 } 3068 } 3069 #else 3070 if ((exts->action && tb[exts->action]) || 3071 (exts->police && tb[exts->police])) { 3072 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); 3073 return -EOPNOTSUPP; 3074 } 3075 #endif 3076 3077 return 0; 3078 } 3079 EXPORT_SYMBOL(tcf_exts_validate); 3080 3081 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 3082 { 3083 #ifdef CONFIG_NET_CLS_ACT 3084 struct tcf_exts old = *dst; 3085 3086 *dst = *src; 3087 tcf_exts_destroy(&old); 3088 #endif 3089 } 3090 EXPORT_SYMBOL(tcf_exts_change); 3091 3092 #ifdef CONFIG_NET_CLS_ACT 3093 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 3094 { 3095 if (exts->nr_actions == 0) 3096 return NULL; 3097 else 3098 return exts->actions[0]; 3099 } 3100 #endif 3101 3102 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 3103 { 3104 #ifdef CONFIG_NET_CLS_ACT 3105 struct nlattr *nest; 3106 3107 if (exts->action && tcf_exts_has_actions(exts)) { 3108 /* 3109 * again for backward compatible mode - we want 3110 * to work with both old and new modes of entering 3111 * tc data even if iproute2 was newer - jhs 3112 */ 3113 if (exts->type != TCA_OLD_COMPAT) { 3114 nest = nla_nest_start_noflag(skb, exts->action); 3115 if (nest == NULL) 3116 goto nla_put_failure; 3117 3118 if (tcf_action_dump(skb, exts->actions, 0, 0, false) 3119 < 0) 3120 goto nla_put_failure; 3121 nla_nest_end(skb, nest); 3122 } else if (exts->police) { 3123 struct tc_action *act = tcf_exts_first_act(exts); 3124 nest = nla_nest_start_noflag(skb, exts->police); 3125 if (nest == NULL || !act) 3126 goto nla_put_failure; 3127 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 3128 goto nla_put_failure; 3129 nla_nest_end(skb, nest); 3130 } 3131 } 3132 return 0; 3133 3134 nla_put_failure: 3135 nla_nest_cancel(skb, nest); 3136 return -1; 3137 #else 3138 return 0; 3139 #endif 3140 } 3141 EXPORT_SYMBOL(tcf_exts_dump); 3142 3143 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) 3144 { 3145 #ifdef CONFIG_NET_CLS_ACT 3146 struct nlattr *nest; 3147 3148 if (!exts->action || !tcf_exts_has_actions(exts)) 3149 return 0; 3150 3151 nest = nla_nest_start_noflag(skb, exts->action); 3152 if (!nest) 3153 goto nla_put_failure; 3154 3155 if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) 3156 goto nla_put_failure; 3157 nla_nest_end(skb, nest); 3158 return 0; 3159 3160 nla_put_failure: 3161 nla_nest_cancel(skb, nest); 3162 return -1; 3163 #else 3164 return 0; 3165 #endif 3166 } 3167 EXPORT_SYMBOL(tcf_exts_terse_dump); 3168 3169 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 3170 { 3171 #ifdef CONFIG_NET_CLS_ACT 3172 struct tc_action *a = tcf_exts_first_act(exts); 3173 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 3174 return -1; 3175 #endif 3176 return 0; 3177 } 3178 EXPORT_SYMBOL(tcf_exts_dump_stats); 3179 3180 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) 3181 { 3182 if (*flags & TCA_CLS_FLAGS_IN_HW) 3183 return; 3184 *flags |= TCA_CLS_FLAGS_IN_HW; 3185 atomic_inc(&block->offloadcnt); 3186 } 3187 3188 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) 3189 { 3190 if (!(*flags & TCA_CLS_FLAGS_IN_HW)) 3191 return; 3192 *flags &= ~TCA_CLS_FLAGS_IN_HW; 3193 atomic_dec(&block->offloadcnt); 3194 } 3195 3196 static void tc_cls_offload_cnt_update(struct tcf_block *block, 3197 struct tcf_proto *tp, u32 *cnt, 3198 u32 *flags, u32 diff, bool add) 3199 { 3200 lockdep_assert_held(&block->cb_lock); 3201 3202 spin_lock(&tp->lock); 3203 if (add) { 3204 if (!*cnt) 3205 tcf_block_offload_inc(block, flags); 3206 *cnt += diff; 3207 } else { 3208 *cnt -= diff; 3209 if (!*cnt) 3210 tcf_block_offload_dec(block, flags); 3211 } 3212 spin_unlock(&tp->lock); 3213 } 3214 3215 static void 3216 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, 3217 u32 *cnt, u32 *flags) 3218 { 3219 lockdep_assert_held(&block->cb_lock); 3220 3221 spin_lock(&tp->lock); 3222 tcf_block_offload_dec(block, flags); 3223 *cnt = 0; 3224 spin_unlock(&tp->lock); 3225 } 3226 3227 static int 3228 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3229 void *type_data, bool err_stop) 3230 { 3231 struct flow_block_cb *block_cb; 3232 int ok_count = 0; 3233 int err; 3234 3235 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { 3236 err = block_cb->cb(type, type_data, block_cb->cb_priv); 3237 if (err) { 3238 if (err_stop) 3239 return err; 3240 } else { 3241 ok_count++; 3242 } 3243 } 3244 return ok_count; 3245 } 3246 3247 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, 3248 void *type_data, bool err_stop, bool rtnl_held) 3249 { 3250 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3251 int ok_count; 3252 3253 retry: 3254 if (take_rtnl) 3255 rtnl_lock(); 3256 down_read(&block->cb_lock); 3257 /* Need to obtain rtnl lock if block is bound to devs that require it. 3258 * In block bind code cb_lock is obtained while holding rtnl, so we must 3259 * obtain the locks in same order here. 3260 */ 3261 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3262 up_read(&block->cb_lock); 3263 take_rtnl = true; 3264 goto retry; 3265 } 3266 3267 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3268 3269 up_read(&block->cb_lock); 3270 if (take_rtnl) 3271 rtnl_unlock(); 3272 return ok_count; 3273 } 3274 EXPORT_SYMBOL(tc_setup_cb_call); 3275 3276 /* Non-destructive filter add. If filter that wasn't already in hardware is 3277 * successfully offloaded, increment block offloads counter. On failure, 3278 * previously offloaded filter is considered to be intact and offloads counter 3279 * is not decremented. 3280 */ 3281 3282 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, 3283 enum tc_setup_type type, void *type_data, bool err_stop, 3284 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3285 { 3286 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3287 int ok_count; 3288 3289 retry: 3290 if (take_rtnl) 3291 rtnl_lock(); 3292 down_read(&block->cb_lock); 3293 /* Need to obtain rtnl lock if block is bound to devs that require it. 3294 * In block bind code cb_lock is obtained while holding rtnl, so we must 3295 * obtain the locks in same order here. 3296 */ 3297 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3298 up_read(&block->cb_lock); 3299 take_rtnl = true; 3300 goto retry; 3301 } 3302 3303 /* Make sure all netdevs sharing this block are offload-capable. */ 3304 if (block->nooffloaddevcnt && err_stop) { 3305 ok_count = -EOPNOTSUPP; 3306 goto err_unlock; 3307 } 3308 3309 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3310 if (ok_count < 0) 3311 goto err_unlock; 3312 3313 if (tp->ops->hw_add) 3314 tp->ops->hw_add(tp, type_data); 3315 if (ok_count > 0) 3316 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 3317 ok_count, true); 3318 err_unlock: 3319 up_read(&block->cb_lock); 3320 if (take_rtnl) 3321 rtnl_unlock(); 3322 return ok_count < 0 ? ok_count : 0; 3323 } 3324 EXPORT_SYMBOL(tc_setup_cb_add); 3325 3326 /* Destructive filter replace. If filter that wasn't already in hardware is 3327 * successfully offloaded, increment block offload counter. On failure, 3328 * previously offloaded filter is considered to be destroyed and offload counter 3329 * is decremented. 3330 */ 3331 3332 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, 3333 enum tc_setup_type type, void *type_data, bool err_stop, 3334 u32 *old_flags, unsigned int *old_in_hw_count, 3335 u32 *new_flags, unsigned int *new_in_hw_count, 3336 bool rtnl_held) 3337 { 3338 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3339 int ok_count; 3340 3341 retry: 3342 if (take_rtnl) 3343 rtnl_lock(); 3344 down_read(&block->cb_lock); 3345 /* Need to obtain rtnl lock if block is bound to devs that require it. 3346 * In block bind code cb_lock is obtained while holding rtnl, so we must 3347 * obtain the locks in same order here. 3348 */ 3349 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3350 up_read(&block->cb_lock); 3351 take_rtnl = true; 3352 goto retry; 3353 } 3354 3355 /* Make sure all netdevs sharing this block are offload-capable. */ 3356 if (block->nooffloaddevcnt && err_stop) { 3357 ok_count = -EOPNOTSUPP; 3358 goto err_unlock; 3359 } 3360 3361 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); 3362 if (tp->ops->hw_del) 3363 tp->ops->hw_del(tp, type_data); 3364 3365 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3366 if (ok_count < 0) 3367 goto err_unlock; 3368 3369 if (tp->ops->hw_add) 3370 tp->ops->hw_add(tp, type_data); 3371 if (ok_count > 0) 3372 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, 3373 new_flags, ok_count, true); 3374 err_unlock: 3375 up_read(&block->cb_lock); 3376 if (take_rtnl) 3377 rtnl_unlock(); 3378 return ok_count < 0 ? ok_count : 0; 3379 } 3380 EXPORT_SYMBOL(tc_setup_cb_replace); 3381 3382 /* Destroy filter and decrement block offload counter, if filter was previously 3383 * offloaded. 3384 */ 3385 3386 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, 3387 enum tc_setup_type type, void *type_data, bool err_stop, 3388 u32 *flags, unsigned int *in_hw_count, bool rtnl_held) 3389 { 3390 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; 3391 int ok_count; 3392 3393 retry: 3394 if (take_rtnl) 3395 rtnl_lock(); 3396 down_read(&block->cb_lock); 3397 /* Need to obtain rtnl lock if block is bound to devs that require it. 3398 * In block bind code cb_lock is obtained while holding rtnl, so we must 3399 * obtain the locks in same order here. 3400 */ 3401 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { 3402 up_read(&block->cb_lock); 3403 take_rtnl = true; 3404 goto retry; 3405 } 3406 3407 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); 3408 3409 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); 3410 if (tp->ops->hw_del) 3411 tp->ops->hw_del(tp, type_data); 3412 3413 up_read(&block->cb_lock); 3414 if (take_rtnl) 3415 rtnl_unlock(); 3416 return ok_count < 0 ? ok_count : 0; 3417 } 3418 EXPORT_SYMBOL(tc_setup_cb_destroy); 3419 3420 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, 3421 bool add, flow_setup_cb_t *cb, 3422 enum tc_setup_type type, void *type_data, 3423 void *cb_priv, u32 *flags, unsigned int *in_hw_count) 3424 { 3425 int err = cb(type, type_data, cb_priv); 3426 3427 if (err) { 3428 if (add && tc_skip_sw(*flags)) 3429 return err; 3430 } else { 3431 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, 3432 add); 3433 } 3434 3435 return 0; 3436 } 3437 EXPORT_SYMBOL(tc_setup_cb_reoffload); 3438 3439 static int tcf_act_get_cookie(struct flow_action_entry *entry, 3440 const struct tc_action *act) 3441 { 3442 struct tc_cookie *cookie; 3443 int err = 0; 3444 3445 rcu_read_lock(); 3446 cookie = rcu_dereference(act->act_cookie); 3447 if (cookie) { 3448 entry->cookie = flow_action_cookie_create(cookie->data, 3449 cookie->len, 3450 GFP_ATOMIC); 3451 if (!entry->cookie) 3452 err = -ENOMEM; 3453 } 3454 rcu_read_unlock(); 3455 return err; 3456 } 3457 3458 static void tcf_act_put_cookie(struct flow_action_entry *entry) 3459 { 3460 flow_action_cookie_destroy(entry->cookie); 3461 } 3462 3463 void tc_cleanup_flow_action(struct flow_action *flow_action) 3464 { 3465 struct flow_action_entry *entry; 3466 int i; 3467 3468 flow_action_for_each(i, entry, flow_action) { 3469 tcf_act_put_cookie(entry); 3470 if (entry->destructor) 3471 entry->destructor(entry->destructor_priv); 3472 } 3473 } 3474 EXPORT_SYMBOL(tc_cleanup_flow_action); 3475 3476 static void tcf_mirred_get_dev(struct flow_action_entry *entry, 3477 const struct tc_action *act) 3478 { 3479 #ifdef CONFIG_NET_CLS_ACT 3480 entry->dev = act->ops->get_dev(act, &entry->destructor); 3481 if (!entry->dev) 3482 return; 3483 entry->destructor_priv = entry->dev; 3484 #endif 3485 } 3486 3487 static void tcf_tunnel_encap_put_tunnel(void *priv) 3488 { 3489 struct ip_tunnel_info *tunnel = priv; 3490 3491 kfree(tunnel); 3492 } 3493 3494 static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, 3495 const struct tc_action *act) 3496 { 3497 entry->tunnel = tcf_tunnel_info_copy(act); 3498 if (!entry->tunnel) 3499 return -ENOMEM; 3500 entry->destructor = tcf_tunnel_encap_put_tunnel; 3501 entry->destructor_priv = entry->tunnel; 3502 return 0; 3503 } 3504 3505 static void tcf_sample_get_group(struct flow_action_entry *entry, 3506 const struct tc_action *act) 3507 { 3508 #ifdef CONFIG_NET_CLS_ACT 3509 entry->sample.psample_group = 3510 act->ops->get_psample_group(act, &entry->destructor); 3511 entry->destructor_priv = entry->sample.psample_group; 3512 #endif 3513 } 3514 3515 static void tcf_gate_entry_destructor(void *priv) 3516 { 3517 struct action_gate_entry *oe = priv; 3518 3519 kfree(oe); 3520 } 3521 3522 static int tcf_gate_get_entries(struct flow_action_entry *entry, 3523 const struct tc_action *act) 3524 { 3525 entry->gate.entries = tcf_gate_get_list(act); 3526 3527 if (!entry->gate.entries) 3528 return -EINVAL; 3529 3530 entry->destructor = tcf_gate_entry_destructor; 3531 entry->destructor_priv = entry->gate.entries; 3532 3533 return 0; 3534 } 3535 3536 static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats) 3537 { 3538 if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY)) 3539 return FLOW_ACTION_HW_STATS_DONT_CARE; 3540 else if (!hw_stats) 3541 return FLOW_ACTION_HW_STATS_DISABLED; 3542 3543 return hw_stats; 3544 } 3545 3546 int tc_setup_flow_action(struct flow_action *flow_action, 3547 const struct tcf_exts *exts) 3548 { 3549 struct tc_action *act; 3550 int i, j, k, err = 0; 3551 3552 BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); 3553 BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); 3554 BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); 3555 3556 if (!exts) 3557 return 0; 3558 3559 j = 0; 3560 tcf_exts_for_each_action(i, act, exts) { 3561 struct flow_action_entry *entry; 3562 3563 entry = &flow_action->entries[j]; 3564 spin_lock_bh(&act->tcfa_lock); 3565 err = tcf_act_get_cookie(entry, act); 3566 if (err) 3567 goto err_out_locked; 3568 3569 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3570 3571 if (is_tcf_gact_ok(act)) { 3572 entry->id = FLOW_ACTION_ACCEPT; 3573 } else if (is_tcf_gact_shot(act)) { 3574 entry->id = FLOW_ACTION_DROP; 3575 } else if (is_tcf_gact_trap(act)) { 3576 entry->id = FLOW_ACTION_TRAP; 3577 } else if (is_tcf_gact_goto_chain(act)) { 3578 entry->id = FLOW_ACTION_GOTO; 3579 entry->chain_index = tcf_gact_goto_chain_index(act); 3580 } else if (is_tcf_mirred_egress_redirect(act)) { 3581 entry->id = FLOW_ACTION_REDIRECT; 3582 tcf_mirred_get_dev(entry, act); 3583 } else if (is_tcf_mirred_egress_mirror(act)) { 3584 entry->id = FLOW_ACTION_MIRRED; 3585 tcf_mirred_get_dev(entry, act); 3586 } else if (is_tcf_mirred_ingress_redirect(act)) { 3587 entry->id = FLOW_ACTION_REDIRECT_INGRESS; 3588 tcf_mirred_get_dev(entry, act); 3589 } else if (is_tcf_mirred_ingress_mirror(act)) { 3590 entry->id = FLOW_ACTION_MIRRED_INGRESS; 3591 tcf_mirred_get_dev(entry, act); 3592 } else if (is_tcf_vlan(act)) { 3593 switch (tcf_vlan_action(act)) { 3594 case TCA_VLAN_ACT_PUSH: 3595 entry->id = FLOW_ACTION_VLAN_PUSH; 3596 entry->vlan.vid = tcf_vlan_push_vid(act); 3597 entry->vlan.proto = tcf_vlan_push_proto(act); 3598 entry->vlan.prio = tcf_vlan_push_prio(act); 3599 break; 3600 case TCA_VLAN_ACT_POP: 3601 entry->id = FLOW_ACTION_VLAN_POP; 3602 break; 3603 case TCA_VLAN_ACT_MODIFY: 3604 entry->id = FLOW_ACTION_VLAN_MANGLE; 3605 entry->vlan.vid = tcf_vlan_push_vid(act); 3606 entry->vlan.proto = tcf_vlan_push_proto(act); 3607 entry->vlan.prio = tcf_vlan_push_prio(act); 3608 break; 3609 default: 3610 err = -EOPNOTSUPP; 3611 goto err_out_locked; 3612 } 3613 } else if (is_tcf_tunnel_set(act)) { 3614 entry->id = FLOW_ACTION_TUNNEL_ENCAP; 3615 err = tcf_tunnel_encap_get_tunnel(entry, act); 3616 if (err) 3617 goto err_out_locked; 3618 } else if (is_tcf_tunnel_release(act)) { 3619 entry->id = FLOW_ACTION_TUNNEL_DECAP; 3620 } else if (is_tcf_pedit(act)) { 3621 for (k = 0; k < tcf_pedit_nkeys(act); k++) { 3622 switch (tcf_pedit_cmd(act, k)) { 3623 case TCA_PEDIT_KEY_EX_CMD_SET: 3624 entry->id = FLOW_ACTION_MANGLE; 3625 break; 3626 case TCA_PEDIT_KEY_EX_CMD_ADD: 3627 entry->id = FLOW_ACTION_ADD; 3628 break; 3629 default: 3630 err = -EOPNOTSUPP; 3631 goto err_out_locked; 3632 } 3633 entry->mangle.htype = tcf_pedit_htype(act, k); 3634 entry->mangle.mask = tcf_pedit_mask(act, k); 3635 entry->mangle.val = tcf_pedit_val(act, k); 3636 entry->mangle.offset = tcf_pedit_offset(act, k); 3637 entry->hw_stats = tc_act_hw_stats(act->hw_stats); 3638 entry = &flow_action->entries[++j]; 3639 } 3640 } else if (is_tcf_csum(act)) { 3641 entry->id = FLOW_ACTION_CSUM; 3642 entry->csum_flags = tcf_csum_update_flags(act); 3643 } else if (is_tcf_skbedit_mark(act)) { 3644 entry->id = FLOW_ACTION_MARK; 3645 entry->mark = tcf_skbedit_mark(act); 3646 } else if (is_tcf_sample(act)) { 3647 entry->id = FLOW_ACTION_SAMPLE; 3648 entry->sample.trunc_size = tcf_sample_trunc_size(act); 3649 entry->sample.truncate = tcf_sample_truncate(act); 3650 entry->sample.rate = tcf_sample_rate(act); 3651 tcf_sample_get_group(entry, act); 3652 } else if (is_tcf_police(act)) { 3653 entry->id = FLOW_ACTION_POLICE; 3654 entry->police.burst = tcf_police_burst(act); 3655 entry->police.rate_bytes_ps = 3656 tcf_police_rate_bytes_ps(act); 3657 entry->police.burst_pkt = tcf_police_burst_pkt(act); 3658 entry->police.rate_pkt_ps = 3659 tcf_police_rate_pkt_ps(act); 3660 entry->police.mtu = tcf_police_tcfp_mtu(act); 3661 entry->police.index = act->tcfa_index; 3662 } else if (is_tcf_ct(act)) { 3663 entry->id = FLOW_ACTION_CT; 3664 entry->ct.action = tcf_ct_action(act); 3665 entry->ct.zone = tcf_ct_zone(act); 3666 entry->ct.flow_table = tcf_ct_ft(act); 3667 } else if (is_tcf_mpls(act)) { 3668 switch (tcf_mpls_action(act)) { 3669 case TCA_MPLS_ACT_PUSH: 3670 entry->id = FLOW_ACTION_MPLS_PUSH; 3671 entry->mpls_push.proto = tcf_mpls_proto(act); 3672 entry->mpls_push.label = tcf_mpls_label(act); 3673 entry->mpls_push.tc = tcf_mpls_tc(act); 3674 entry->mpls_push.bos = tcf_mpls_bos(act); 3675 entry->mpls_push.ttl = tcf_mpls_ttl(act); 3676 break; 3677 case TCA_MPLS_ACT_POP: 3678 entry->id = FLOW_ACTION_MPLS_POP; 3679 entry->mpls_pop.proto = tcf_mpls_proto(act); 3680 break; 3681 case TCA_MPLS_ACT_MODIFY: 3682 entry->id = FLOW_ACTION_MPLS_MANGLE; 3683 entry->mpls_mangle.label = tcf_mpls_label(act); 3684 entry->mpls_mangle.tc = tcf_mpls_tc(act); 3685 entry->mpls_mangle.bos = tcf_mpls_bos(act); 3686 entry->mpls_mangle.ttl = tcf_mpls_ttl(act); 3687 break; 3688 default: 3689 goto err_out_locked; 3690 } 3691 } else if (is_tcf_skbedit_ptype(act)) { 3692 entry->id = FLOW_ACTION_PTYPE; 3693 entry->ptype = tcf_skbedit_ptype(act); 3694 } else if (is_tcf_skbedit_priority(act)) { 3695 entry->id = FLOW_ACTION_PRIORITY; 3696 entry->priority = tcf_skbedit_priority(act); 3697 } else if (is_tcf_gate(act)) { 3698 entry->id = FLOW_ACTION_GATE; 3699 entry->gate.index = tcf_gate_index(act); 3700 entry->gate.prio = tcf_gate_prio(act); 3701 entry->gate.basetime = tcf_gate_basetime(act); 3702 entry->gate.cycletime = tcf_gate_cycletime(act); 3703 entry->gate.cycletimeext = tcf_gate_cycletimeext(act); 3704 entry->gate.num_entries = tcf_gate_num_entries(act); 3705 err = tcf_gate_get_entries(entry, act); 3706 if (err) 3707 goto err_out_locked; 3708 } else { 3709 err = -EOPNOTSUPP; 3710 goto err_out_locked; 3711 } 3712 spin_unlock_bh(&act->tcfa_lock); 3713 3714 if (!is_tcf_pedit(act)) 3715 j++; 3716 } 3717 3718 err_out: 3719 if (err) 3720 tc_cleanup_flow_action(flow_action); 3721 3722 return err; 3723 err_out_locked: 3724 spin_unlock_bh(&act->tcfa_lock); 3725 goto err_out; 3726 } 3727 EXPORT_SYMBOL(tc_setup_flow_action); 3728 3729 unsigned int tcf_exts_num_actions(struct tcf_exts *exts) 3730 { 3731 unsigned int num_acts = 0; 3732 struct tc_action *act; 3733 int i; 3734 3735 tcf_exts_for_each_action(i, act, exts) { 3736 if (is_tcf_pedit(act)) 3737 num_acts += tcf_pedit_nkeys(act); 3738 else 3739 num_acts++; 3740 } 3741 return num_acts; 3742 } 3743 EXPORT_SYMBOL(tcf_exts_num_actions); 3744 3745 #ifdef CONFIG_NET_CLS_ACT 3746 static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, 3747 u32 *p_block_index, 3748 struct netlink_ext_ack *extack) 3749 { 3750 *p_block_index = nla_get_u32(block_index_attr); 3751 if (!*p_block_index) { 3752 NL_SET_ERR_MSG(extack, "Block number may not be zero"); 3753 return -EINVAL; 3754 } 3755 3756 return 0; 3757 } 3758 3759 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, 3760 enum flow_block_binder_type binder_type, 3761 struct nlattr *block_index_attr, 3762 struct netlink_ext_ack *extack) 3763 { 3764 u32 block_index; 3765 int err; 3766 3767 if (!block_index_attr) 3768 return 0; 3769 3770 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3771 if (err) 3772 return err; 3773 3774 if (!block_index) 3775 return 0; 3776 3777 qe->info.binder_type = binder_type; 3778 qe->info.chain_head_change = tcf_chain_head_change_dflt; 3779 qe->info.chain_head_change_priv = &qe->filter_chain; 3780 qe->info.block_index = block_index; 3781 3782 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); 3783 } 3784 EXPORT_SYMBOL(tcf_qevent_init); 3785 3786 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) 3787 { 3788 if (qe->info.block_index) 3789 tcf_block_put_ext(qe->block, sch, &qe->info); 3790 } 3791 EXPORT_SYMBOL(tcf_qevent_destroy); 3792 3793 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, 3794 struct netlink_ext_ack *extack) 3795 { 3796 u32 block_index; 3797 int err; 3798 3799 if (!block_index_attr) 3800 return 0; 3801 3802 err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); 3803 if (err) 3804 return err; 3805 3806 /* Bounce newly-configured block or change in block. */ 3807 if (block_index != qe->info.block_index) { 3808 NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); 3809 return -EINVAL; 3810 } 3811 3812 return 0; 3813 } 3814 EXPORT_SYMBOL(tcf_qevent_validate_change); 3815 3816 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, 3817 struct sk_buff **to_free, int *ret) 3818 { 3819 struct tcf_result cl_res; 3820 struct tcf_proto *fl; 3821 3822 if (!qe->info.block_index) 3823 return skb; 3824 3825 fl = rcu_dereference_bh(qe->filter_chain); 3826 3827 switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { 3828 case TC_ACT_SHOT: 3829 qdisc_qstats_drop(sch); 3830 __qdisc_drop(skb, to_free); 3831 *ret = __NET_XMIT_BYPASS; 3832 return NULL; 3833 case TC_ACT_STOLEN: 3834 case TC_ACT_QUEUED: 3835 case TC_ACT_TRAP: 3836 __qdisc_drop(skb, to_free); 3837 *ret = __NET_XMIT_STOLEN; 3838 return NULL; 3839 case TC_ACT_REDIRECT: 3840 skb_do_redirect(skb); 3841 *ret = __NET_XMIT_STOLEN; 3842 return NULL; 3843 } 3844 3845 return skb; 3846 } 3847 EXPORT_SYMBOL(tcf_qevent_handle); 3848 3849 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) 3850 { 3851 if (!qe->info.block_index) 3852 return 0; 3853 return nla_put_u32(skb, attr_name, qe->info.block_index); 3854 } 3855 EXPORT_SYMBOL(tcf_qevent_dump); 3856 #endif 3857 3858 static __net_init int tcf_net_init(struct net *net) 3859 { 3860 struct tcf_net *tn = net_generic(net, tcf_net_id); 3861 3862 spin_lock_init(&tn->idr_lock); 3863 idr_init(&tn->idr); 3864 return 0; 3865 } 3866 3867 static void __net_exit tcf_net_exit(struct net *net) 3868 { 3869 struct tcf_net *tn = net_generic(net, tcf_net_id); 3870 3871 idr_destroy(&tn->idr); 3872 } 3873 3874 static struct pernet_operations tcf_net_ops = { 3875 .init = tcf_net_init, 3876 .exit = tcf_net_exit, 3877 .id = &tcf_net_id, 3878 .size = sizeof(struct tcf_net), 3879 }; 3880 3881 static int __init tc_filter_init(void) 3882 { 3883 int err; 3884 3885 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); 3886 if (!tc_filter_wq) 3887 return -ENOMEM; 3888 3889 err = register_pernet_subsys(&tcf_net_ops); 3890 if (err) 3891 goto err_register_pernet_subsys; 3892 3893 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL, 3894 RTNL_FLAG_DOIT_UNLOCKED); 3895 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL, 3896 RTNL_FLAG_DOIT_UNLOCKED); 3897 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter, 3898 tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED); 3899 rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0); 3900 rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0); 3901 rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain, 3902 tc_dump_chain, 0); 3903 3904 return 0; 3905 3906 err_register_pernet_subsys: 3907 destroy_workqueue(tc_filter_wq); 3908 return err; 3909 } 3910 3911 subsys_initcall(tc_filter_init); 3912