1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_api.c Packet action API. 4 * 5 * Author: Jamal Hadi Salim 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/string.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <linux/kmod.h> 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/act_api.h> 23 #include <net/netlink.h> 24 25 #ifdef CONFIG_INET 26 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); 27 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count); 28 #endif 29 30 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) 31 { 32 #ifdef CONFIG_INET 33 if (static_branch_unlikely(&tcf_frag_xmit_count)) 34 return sch_frag_xmit_hook(skb, xmit); 35 #endif 36 37 return xmit(skb); 38 } 39 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit); 40 41 static void tcf_action_goto_chain_exec(const struct tc_action *a, 42 struct tcf_result *res) 43 { 44 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); 45 46 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 47 } 48 49 static void tcf_free_cookie_rcu(struct rcu_head *p) 50 { 51 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 52 53 kfree(cookie->data); 54 kfree(cookie); 55 } 56 57 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 58 struct tc_cookie *new_cookie) 59 { 60 struct tc_cookie *old; 61 62 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); 63 if (old) 64 call_rcu(&old->rcu, tcf_free_cookie_rcu); 65 } 66 67 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, 68 struct tcf_chain **newchain, 69 struct netlink_ext_ack *extack) 70 { 71 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; 72 u32 chain_index; 73 74 if (!opcode) 75 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; 76 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) 77 ret = 0; 78 if (ret) { 79 NL_SET_ERR_MSG(extack, "invalid control action"); 80 goto end; 81 } 82 83 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { 84 chain_index = action & TC_ACT_EXT_VAL_MASK; 85 if (!tp || !newchain) { 86 ret = -EINVAL; 87 NL_SET_ERR_MSG(extack, 88 "can't goto NULL proto/chain"); 89 goto end; 90 } 91 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); 92 if (!*newchain) { 93 ret = -ENOMEM; 94 NL_SET_ERR_MSG(extack, 95 "can't allocate goto_chain"); 96 } 97 } 98 end: 99 return ret; 100 } 101 EXPORT_SYMBOL(tcf_action_check_ctrlact); 102 103 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, 104 struct tcf_chain *goto_chain) 105 { 106 a->tcfa_action = action; 107 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); 108 return goto_chain; 109 } 110 EXPORT_SYMBOL(tcf_action_set_ctrlact); 111 112 /* XXX: For standalone actions, we don't need a RCU grace period either, because 113 * actions are always connected to filters and filters are already destroyed in 114 * RCU callbacks, so after a RCU grace period actions are already disconnected 115 * from filters. Readers later can not find us. 116 */ 117 static void free_tcf(struct tc_action *p) 118 { 119 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); 120 121 free_percpu(p->cpu_bstats); 122 free_percpu(p->cpu_bstats_hw); 123 free_percpu(p->cpu_qstats); 124 125 tcf_set_action_cookie(&p->act_cookie, NULL); 126 if (chain) 127 tcf_chain_put_by_act(chain); 128 129 kfree(p); 130 } 131 132 static void tcf_action_cleanup(struct tc_action *p) 133 { 134 if (p->ops->cleanup) 135 p->ops->cleanup(p); 136 137 gen_kill_estimator(&p->tcfa_rate_est); 138 free_tcf(p); 139 } 140 141 static int __tcf_action_put(struct tc_action *p, bool bind) 142 { 143 struct tcf_idrinfo *idrinfo = p->idrinfo; 144 145 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 146 if (bind) 147 atomic_dec(&p->tcfa_bindcnt); 148 idr_remove(&idrinfo->action_idr, p->tcfa_index); 149 mutex_unlock(&idrinfo->lock); 150 151 tcf_action_cleanup(p); 152 return 1; 153 } 154 155 if (bind) 156 atomic_dec(&p->tcfa_bindcnt); 157 158 return 0; 159 } 160 161 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 162 { 163 int ret = 0; 164 165 /* Release with strict==1 and bind==0 is only called through act API 166 * interface (classifiers always bind). Only case when action with 167 * positive reference count and zero bind count can exist is when it was 168 * also created with act API (unbinding last classifier will destroy the 169 * action if it was created by classifier). So only case when bind count 170 * can be changed after initial check is when unbound action is 171 * destroyed by act API while classifier binds to action with same id 172 * concurrently. This result either creation of new action(same behavior 173 * as before), or reusing existing action if concurrent process 174 * increments reference count before action is deleted. Both scenarios 175 * are acceptable. 176 */ 177 if (p) { 178 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 179 return -EPERM; 180 181 if (__tcf_action_put(p, bind)) 182 ret = ACT_P_DELETED; 183 } 184 185 return ret; 186 } 187 EXPORT_SYMBOL(__tcf_idr_release); 188 189 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 190 { 191 struct tc_cookie *act_cookie; 192 u32 cookie_len = 0; 193 194 rcu_read_lock(); 195 act_cookie = rcu_dereference(act->act_cookie); 196 197 if (act_cookie) 198 cookie_len = nla_total_size(act_cookie->len); 199 rcu_read_unlock(); 200 201 return nla_total_size(0) /* action number nested */ 202 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 203 + cookie_len /* TCA_ACT_COOKIE */ 204 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */ 205 + nla_total_size(0) /* TCA_ACT_STATS nested */ 206 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */ 207 /* TCA_STATS_BASIC */ 208 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 209 /* TCA_STATS_PKT64 */ 210 + nla_total_size_64bit(sizeof(u64)) 211 /* TCA_STATS_QUEUE */ 212 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 213 + nla_total_size(0) /* TCA_OPTIONS nested */ 214 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 215 } 216 217 static size_t tcf_action_full_attrs_size(size_t sz) 218 { 219 return NLMSG_HDRLEN /* struct nlmsghdr */ 220 + sizeof(struct tcamsg) 221 + nla_total_size(0) /* TCA_ACT_TAB nested */ 222 + sz; 223 } 224 225 static size_t tcf_action_fill_size(const struct tc_action *act) 226 { 227 size_t sz = tcf_action_shared_attrs_size(act); 228 229 if (act->ops->get_fill_size) 230 return act->ops->get_fill_size(act) + sz; 231 return sz; 232 } 233 234 static int 235 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act) 236 { 237 unsigned char *b = skb_tail_pointer(skb); 238 struct tc_cookie *cookie; 239 240 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 241 goto nla_put_failure; 242 if (tcf_action_copy_stats(skb, a, 0)) 243 goto nla_put_failure; 244 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index)) 245 goto nla_put_failure; 246 247 rcu_read_lock(); 248 cookie = rcu_dereference(a->act_cookie); 249 if (cookie) { 250 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 251 rcu_read_unlock(); 252 goto nla_put_failure; 253 } 254 } 255 rcu_read_unlock(); 256 257 return 0; 258 259 nla_put_failure: 260 nlmsg_trim(skb, b); 261 return -1; 262 } 263 264 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 265 struct netlink_callback *cb) 266 { 267 int err = 0, index = -1, s_i = 0, n_i = 0; 268 u32 act_flags = cb->args[2]; 269 unsigned long jiffy_since = cb->args[3]; 270 struct nlattr *nest; 271 struct idr *idr = &idrinfo->action_idr; 272 struct tc_action *p; 273 unsigned long id = 1; 274 unsigned long tmp; 275 276 mutex_lock(&idrinfo->lock); 277 278 s_i = cb->args[0]; 279 280 idr_for_each_entry_ul(idr, p, tmp, id) { 281 index++; 282 if (index < s_i) 283 continue; 284 if (IS_ERR(p)) 285 continue; 286 287 if (jiffy_since && 288 time_after(jiffy_since, 289 (unsigned long)p->tcfa_tm.lastuse)) 290 continue; 291 292 nest = nla_nest_start_noflag(skb, n_i); 293 if (!nest) { 294 index--; 295 goto nla_put_failure; 296 } 297 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ? 298 tcf_action_dump_terse(skb, p, true) : 299 tcf_action_dump_1(skb, p, 0, 0); 300 if (err < 0) { 301 index--; 302 nlmsg_trim(skb, nest); 303 goto done; 304 } 305 nla_nest_end(skb, nest); 306 n_i++; 307 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) && 308 n_i >= TCA_ACT_MAX_PRIO) 309 goto done; 310 } 311 done: 312 if (index >= 0) 313 cb->args[0] = index + 1; 314 315 mutex_unlock(&idrinfo->lock); 316 if (n_i) { 317 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) 318 cb->args[1] = n_i; 319 } 320 return n_i; 321 322 nla_put_failure: 323 nla_nest_cancel(skb, nest); 324 goto done; 325 } 326 327 static int tcf_idr_release_unsafe(struct tc_action *p) 328 { 329 if (atomic_read(&p->tcfa_bindcnt) > 0) 330 return -EPERM; 331 332 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 333 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); 334 tcf_action_cleanup(p); 335 return ACT_P_DELETED; 336 } 337 338 return 0; 339 } 340 341 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 342 const struct tc_action_ops *ops) 343 { 344 struct nlattr *nest; 345 int n_i = 0; 346 int ret = -EINVAL; 347 struct idr *idr = &idrinfo->action_idr; 348 struct tc_action *p; 349 unsigned long id = 1; 350 unsigned long tmp; 351 352 nest = nla_nest_start_noflag(skb, 0); 353 if (nest == NULL) 354 goto nla_put_failure; 355 if (nla_put_string(skb, TCA_KIND, ops->kind)) 356 goto nla_put_failure; 357 358 mutex_lock(&idrinfo->lock); 359 idr_for_each_entry_ul(idr, p, tmp, id) { 360 if (IS_ERR(p)) 361 continue; 362 ret = tcf_idr_release_unsafe(p); 363 if (ret == ACT_P_DELETED) { 364 module_put(ops->owner); 365 n_i++; 366 } else if (ret < 0) { 367 mutex_unlock(&idrinfo->lock); 368 goto nla_put_failure; 369 } 370 } 371 mutex_unlock(&idrinfo->lock); 372 373 if (nla_put_u32(skb, TCA_FCNT, n_i)) 374 goto nla_put_failure; 375 nla_nest_end(skb, nest); 376 377 return n_i; 378 nla_put_failure: 379 nla_nest_cancel(skb, nest); 380 return ret; 381 } 382 383 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 384 struct netlink_callback *cb, int type, 385 const struct tc_action_ops *ops, 386 struct netlink_ext_ack *extack) 387 { 388 struct tcf_idrinfo *idrinfo = tn->idrinfo; 389 390 if (type == RTM_DELACTION) { 391 return tcf_del_walker(idrinfo, skb, ops); 392 } else if (type == RTM_GETACTION) { 393 return tcf_dump_walker(idrinfo, skb, cb); 394 } else { 395 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 396 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 397 return -EINVAL; 398 } 399 } 400 EXPORT_SYMBOL(tcf_generic_walker); 401 402 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 403 { 404 struct tcf_idrinfo *idrinfo = tn->idrinfo; 405 struct tc_action *p; 406 407 mutex_lock(&idrinfo->lock); 408 p = idr_find(&idrinfo->action_idr, index); 409 if (IS_ERR(p)) 410 p = NULL; 411 else if (p) 412 refcount_inc(&p->tcfa_refcnt); 413 mutex_unlock(&idrinfo->lock); 414 415 if (p) { 416 *a = p; 417 return true; 418 } 419 return false; 420 } 421 EXPORT_SYMBOL(tcf_idr_search); 422 423 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) 424 { 425 struct tc_action *p; 426 int ret = 0; 427 428 mutex_lock(&idrinfo->lock); 429 p = idr_find(&idrinfo->action_idr, index); 430 if (!p) { 431 mutex_unlock(&idrinfo->lock); 432 return -ENOENT; 433 } 434 435 if (!atomic_read(&p->tcfa_bindcnt)) { 436 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 437 struct module *owner = p->ops->owner; 438 439 WARN_ON(p != idr_remove(&idrinfo->action_idr, 440 p->tcfa_index)); 441 mutex_unlock(&idrinfo->lock); 442 443 tcf_action_cleanup(p); 444 module_put(owner); 445 return 0; 446 } 447 ret = 0; 448 } else { 449 ret = -EPERM; 450 } 451 452 mutex_unlock(&idrinfo->lock); 453 return ret; 454 } 455 456 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 457 struct tc_action **a, const struct tc_action_ops *ops, 458 int bind, bool cpustats, u32 flags) 459 { 460 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 461 struct tcf_idrinfo *idrinfo = tn->idrinfo; 462 int err = -ENOMEM; 463 464 if (unlikely(!p)) 465 return -ENOMEM; 466 refcount_set(&p->tcfa_refcnt, 1); 467 if (bind) 468 atomic_set(&p->tcfa_bindcnt, 1); 469 470 if (cpustats) { 471 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 472 if (!p->cpu_bstats) 473 goto err1; 474 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 475 if (!p->cpu_bstats_hw) 476 goto err2; 477 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 478 if (!p->cpu_qstats) 479 goto err3; 480 } 481 spin_lock_init(&p->tcfa_lock); 482 p->tcfa_index = index; 483 p->tcfa_tm.install = jiffies; 484 p->tcfa_tm.lastuse = jiffies; 485 p->tcfa_tm.firstuse = 0; 486 p->tcfa_flags = flags; 487 if (est) { 488 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 489 &p->tcfa_rate_est, 490 &p->tcfa_lock, NULL, est); 491 if (err) 492 goto err4; 493 } 494 495 p->idrinfo = idrinfo; 496 p->ops = ops; 497 *a = p; 498 return 0; 499 err4: 500 free_percpu(p->cpu_qstats); 501 err3: 502 free_percpu(p->cpu_bstats_hw); 503 err2: 504 free_percpu(p->cpu_bstats); 505 err1: 506 kfree(p); 507 return err; 508 } 509 EXPORT_SYMBOL(tcf_idr_create); 510 511 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, 512 struct nlattr *est, struct tc_action **a, 513 const struct tc_action_ops *ops, int bind, 514 u32 flags) 515 { 516 /* Set cpustats according to actions flags. */ 517 return tcf_idr_create(tn, index, est, a, ops, bind, 518 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags); 519 } 520 EXPORT_SYMBOL(tcf_idr_create_from_flags); 521 522 /* Cleanup idr index that was allocated but not initialized. */ 523 524 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) 525 { 526 struct tcf_idrinfo *idrinfo = tn->idrinfo; 527 528 mutex_lock(&idrinfo->lock); 529 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 530 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); 531 mutex_unlock(&idrinfo->lock); 532 } 533 EXPORT_SYMBOL(tcf_idr_cleanup); 534 535 /* Check if action with specified index exists. If actions is found, increments 536 * its reference and bind counters, and return 1. Otherwise insert temporary 537 * error pointer (to prevent concurrent users from inserting actions with same 538 * index) and return 0. 539 */ 540 541 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 542 struct tc_action **a, int bind) 543 { 544 struct tcf_idrinfo *idrinfo = tn->idrinfo; 545 struct tc_action *p; 546 int ret; 547 548 again: 549 mutex_lock(&idrinfo->lock); 550 if (*index) { 551 p = idr_find(&idrinfo->action_idr, *index); 552 if (IS_ERR(p)) { 553 /* This means that another process allocated 554 * index but did not assign the pointer yet. 555 */ 556 mutex_unlock(&idrinfo->lock); 557 goto again; 558 } 559 560 if (p) { 561 refcount_inc(&p->tcfa_refcnt); 562 if (bind) 563 atomic_inc(&p->tcfa_bindcnt); 564 *a = p; 565 ret = 1; 566 } else { 567 *a = NULL; 568 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 569 *index, GFP_KERNEL); 570 if (!ret) 571 idr_replace(&idrinfo->action_idr, 572 ERR_PTR(-EBUSY), *index); 573 } 574 } else { 575 *index = 1; 576 *a = NULL; 577 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 578 UINT_MAX, GFP_KERNEL); 579 if (!ret) 580 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), 581 *index); 582 } 583 mutex_unlock(&idrinfo->lock); 584 return ret; 585 } 586 EXPORT_SYMBOL(tcf_idr_check_alloc); 587 588 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 589 struct tcf_idrinfo *idrinfo) 590 { 591 struct idr *idr = &idrinfo->action_idr; 592 struct tc_action *p; 593 int ret; 594 unsigned long id = 1; 595 unsigned long tmp; 596 597 idr_for_each_entry_ul(idr, p, tmp, id) { 598 ret = __tcf_idr_release(p, false, true); 599 if (ret == ACT_P_DELETED) 600 module_put(ops->owner); 601 else if (ret < 0) 602 return; 603 } 604 idr_destroy(&idrinfo->action_idr); 605 } 606 EXPORT_SYMBOL(tcf_idrinfo_destroy); 607 608 static LIST_HEAD(act_base); 609 static DEFINE_RWLOCK(act_mod_lock); 610 611 int tcf_register_action(struct tc_action_ops *act, 612 struct pernet_operations *ops) 613 { 614 struct tc_action_ops *a; 615 int ret; 616 617 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 618 return -EINVAL; 619 620 /* We have to register pernet ops before making the action ops visible, 621 * otherwise tcf_action_init_1() could get a partially initialized 622 * netns. 623 */ 624 ret = register_pernet_subsys(ops); 625 if (ret) 626 return ret; 627 628 write_lock(&act_mod_lock); 629 list_for_each_entry(a, &act_base, head) { 630 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { 631 write_unlock(&act_mod_lock); 632 unregister_pernet_subsys(ops); 633 return -EEXIST; 634 } 635 } 636 list_add_tail(&act->head, &act_base); 637 write_unlock(&act_mod_lock); 638 639 return 0; 640 } 641 EXPORT_SYMBOL(tcf_register_action); 642 643 int tcf_unregister_action(struct tc_action_ops *act, 644 struct pernet_operations *ops) 645 { 646 struct tc_action_ops *a; 647 int err = -ENOENT; 648 649 write_lock(&act_mod_lock); 650 list_for_each_entry(a, &act_base, head) { 651 if (a == act) { 652 list_del(&act->head); 653 err = 0; 654 break; 655 } 656 } 657 write_unlock(&act_mod_lock); 658 if (!err) 659 unregister_pernet_subsys(ops); 660 return err; 661 } 662 EXPORT_SYMBOL(tcf_unregister_action); 663 664 /* lookup by name */ 665 static struct tc_action_ops *tc_lookup_action_n(char *kind) 666 { 667 struct tc_action_ops *a, *res = NULL; 668 669 if (kind) { 670 read_lock(&act_mod_lock); 671 list_for_each_entry(a, &act_base, head) { 672 if (strcmp(kind, a->kind) == 0) { 673 if (try_module_get(a->owner)) 674 res = a; 675 break; 676 } 677 } 678 read_unlock(&act_mod_lock); 679 } 680 return res; 681 } 682 683 /* lookup by nlattr */ 684 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 685 { 686 struct tc_action_ops *a, *res = NULL; 687 688 if (kind) { 689 read_lock(&act_mod_lock); 690 list_for_each_entry(a, &act_base, head) { 691 if (nla_strcmp(kind, a->kind) == 0) { 692 if (try_module_get(a->owner)) 693 res = a; 694 break; 695 } 696 } 697 read_unlock(&act_mod_lock); 698 } 699 return res; 700 } 701 702 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */ 703 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 704 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 705 int nr_actions, struct tcf_result *res) 706 { 707 u32 jmp_prgcnt = 0; 708 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 709 int i; 710 int ret = TC_ACT_OK; 711 712 if (skb_skip_tc_classify(skb)) 713 return TC_ACT_OK; 714 715 restart_act_graph: 716 for (i = 0; i < nr_actions; i++) { 717 const struct tc_action *a = actions[i]; 718 719 if (jmp_prgcnt > 0) { 720 jmp_prgcnt -= 1; 721 continue; 722 } 723 repeat: 724 ret = a->ops->act(skb, a, res); 725 if (ret == TC_ACT_REPEAT) 726 goto repeat; /* we need a ttl - JHS */ 727 728 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 729 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 730 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 731 /* faulty opcode, stop pipeline */ 732 return TC_ACT_OK; 733 } else { 734 jmp_ttl -= 1; 735 if (jmp_ttl > 0) 736 goto restart_act_graph; 737 else /* faulty graph, stop pipeline */ 738 return TC_ACT_OK; 739 } 740 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 741 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 742 net_warn_ratelimited("can't go to NULL chain!\n"); 743 return TC_ACT_SHOT; 744 } 745 tcf_action_goto_chain_exec(a, res); 746 } 747 748 if (ret != TC_ACT_PIPE) 749 break; 750 } 751 752 return ret; 753 } 754 EXPORT_SYMBOL(tcf_action_exec); 755 756 int tcf_action_destroy(struct tc_action *actions[], int bind) 757 { 758 const struct tc_action_ops *ops; 759 struct tc_action *a; 760 int ret = 0, i; 761 762 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 763 a = actions[i]; 764 actions[i] = NULL; 765 ops = a->ops; 766 ret = __tcf_idr_release(a, bind, true); 767 if (ret == ACT_P_DELETED) 768 module_put(ops->owner); 769 else if (ret < 0) 770 return ret; 771 } 772 return ret; 773 } 774 775 static int tcf_action_put(struct tc_action *p) 776 { 777 return __tcf_action_put(p, false); 778 } 779 780 /* Put all actions in this array, skip those NULL's. */ 781 static void tcf_action_put_many(struct tc_action *actions[]) 782 { 783 int i; 784 785 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 786 struct tc_action *a = actions[i]; 787 const struct tc_action_ops *ops; 788 789 if (!a) 790 continue; 791 ops = a->ops; 792 if (tcf_action_put(a)) 793 module_put(ops->owner); 794 } 795 } 796 797 int 798 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 799 { 800 return a->ops->dump(skb, a, bind, ref); 801 } 802 803 int 804 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 805 { 806 int err = -EINVAL; 807 unsigned char *b = skb_tail_pointer(skb); 808 struct nlattr *nest; 809 810 if (tcf_action_dump_terse(skb, a, false)) 811 goto nla_put_failure; 812 813 if (a->hw_stats != TCA_ACT_HW_STATS_ANY && 814 nla_put_bitfield32(skb, TCA_ACT_HW_STATS, 815 a->hw_stats, TCA_ACT_HW_STATS_ANY)) 816 goto nla_put_failure; 817 818 if (a->used_hw_stats_valid && 819 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, 820 a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) 821 goto nla_put_failure; 822 823 if (a->tcfa_flags && 824 nla_put_bitfield32(skb, TCA_ACT_FLAGS, 825 a->tcfa_flags, a->tcfa_flags)) 826 goto nla_put_failure; 827 828 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 829 if (nest == NULL) 830 goto nla_put_failure; 831 err = tcf_action_dump_old(skb, a, bind, ref); 832 if (err > 0) { 833 nla_nest_end(skb, nest); 834 return err; 835 } 836 837 nla_put_failure: 838 nlmsg_trim(skb, b); 839 return -1; 840 } 841 EXPORT_SYMBOL(tcf_action_dump_1); 842 843 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], 844 int bind, int ref, bool terse) 845 { 846 struct tc_action *a; 847 int err = -EINVAL, i; 848 struct nlattr *nest; 849 850 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 851 a = actions[i]; 852 nest = nla_nest_start_noflag(skb, i + 1); 853 if (nest == NULL) 854 goto nla_put_failure; 855 err = terse ? tcf_action_dump_terse(skb, a, false) : 856 tcf_action_dump_1(skb, a, bind, ref); 857 if (err < 0) 858 goto errout; 859 nla_nest_end(skb, nest); 860 } 861 862 return 0; 863 864 nla_put_failure: 865 err = -EINVAL; 866 errout: 867 nla_nest_cancel(skb, nest); 868 return err; 869 } 870 871 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 872 { 873 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 874 if (!c) 875 return NULL; 876 877 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 878 if (!c->data) { 879 kfree(c); 880 return NULL; 881 } 882 c->len = nla_len(tb[TCA_ACT_COOKIE]); 883 884 return c; 885 } 886 887 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr) 888 { 889 struct nla_bitfield32 hw_stats_bf; 890 891 /* If the user did not pass the attr, that means he does 892 * not care about the type. Return "any" in that case 893 * which is setting on all supported types. 894 */ 895 if (!hw_stats_attr) 896 return TCA_ACT_HW_STATS_ANY; 897 hw_stats_bf = nla_get_bitfield32(hw_stats_attr); 898 return hw_stats_bf.value; 899 } 900 901 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { 902 [TCA_ACT_KIND] = { .type = NLA_STRING }, 903 [TCA_ACT_INDEX] = { .type = NLA_U32 }, 904 [TCA_ACT_COOKIE] = { .type = NLA_BINARY, 905 .len = TC_COOKIE_MAX_SIZE }, 906 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, 907 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS), 908 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), 909 }; 910 911 void tcf_idr_insert_many(struct tc_action *actions[]) 912 { 913 int i; 914 915 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 916 struct tc_action *a = actions[i]; 917 struct tcf_idrinfo *idrinfo; 918 919 if (!a) 920 continue; 921 idrinfo = a->idrinfo; 922 mutex_lock(&idrinfo->lock); 923 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if 924 * it is just created, otherwise this is just a nop. 925 */ 926 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 927 mutex_unlock(&idrinfo->lock); 928 } 929 } 930 931 struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla, 932 bool rtnl_held, 933 struct netlink_ext_ack *extack) 934 { 935 struct nlattr *tb[TCA_ACT_MAX + 1]; 936 struct tc_action_ops *a_o; 937 char act_name[IFNAMSIZ]; 938 struct nlattr *kind; 939 int err; 940 941 if (name == NULL) { 942 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 943 tcf_action_policy, extack); 944 if (err < 0) 945 return ERR_PTR(err); 946 err = -EINVAL; 947 kind = tb[TCA_ACT_KIND]; 948 if (!kind) { 949 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 950 return ERR_PTR(err); 951 } 952 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) { 953 NL_SET_ERR_MSG(extack, "TC action name too long"); 954 return ERR_PTR(err); 955 } 956 } else { 957 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) { 958 NL_SET_ERR_MSG(extack, "TC action name too long"); 959 return ERR_PTR(-EINVAL); 960 } 961 } 962 963 a_o = tc_lookup_action_n(act_name); 964 if (a_o == NULL) { 965 #ifdef CONFIG_MODULES 966 if (rtnl_held) 967 rtnl_unlock(); 968 request_module("act_%s", act_name); 969 if (rtnl_held) 970 rtnl_lock(); 971 972 a_o = tc_lookup_action_n(act_name); 973 974 /* We dropped the RTNL semaphore in order to 975 * perform the module load. So, even if we 976 * succeeded in loading the module we have to 977 * tell the caller to replay the request. We 978 * indicate this using -EAGAIN. 979 */ 980 if (a_o != NULL) { 981 module_put(a_o->owner); 982 return ERR_PTR(-EAGAIN); 983 } 984 #endif 985 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 986 return ERR_PTR(-ENOENT); 987 } 988 989 return a_o; 990 } 991 992 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 993 struct nlattr *nla, struct nlattr *est, 994 char *name, int ovr, int bind, 995 struct tc_action_ops *a_o, bool rtnl_held, 996 struct netlink_ext_ack *extack) 997 { 998 struct nla_bitfield32 flags = { 0, 0 }; 999 u8 hw_stats = TCA_ACT_HW_STATS_ANY; 1000 struct nlattr *tb[TCA_ACT_MAX + 1]; 1001 struct tc_cookie *cookie = NULL; 1002 struct tc_action *a; 1003 int err; 1004 1005 /* backward compatibility for policer */ 1006 if (name == NULL) { 1007 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1008 tcf_action_policy, extack); 1009 if (err < 0) 1010 return ERR_PTR(err); 1011 if (tb[TCA_ACT_COOKIE]) { 1012 cookie = nla_memdup_cookie(tb); 1013 if (!cookie) { 1014 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 1015 err = -ENOMEM; 1016 goto err_out; 1017 } 1018 } 1019 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); 1020 if (tb[TCA_ACT_FLAGS]) 1021 flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); 1022 1023 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind, 1024 rtnl_held, tp, flags.value, extack); 1025 } else { 1026 err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held, 1027 tp, flags.value, extack); 1028 } 1029 if (err < 0) 1030 goto err_out; 1031 1032 if (!name && tb[TCA_ACT_COOKIE]) 1033 tcf_set_action_cookie(&a->act_cookie, cookie); 1034 1035 if (!name) 1036 a->hw_stats = hw_stats; 1037 1038 /* module count goes up only when brand new policy is created 1039 * if it exists and is only bound to in a_o->init() then 1040 * ACT_P_CREATED is not returned (a zero is). 1041 */ 1042 if (err != ACT_P_CREATED) 1043 module_put(a_o->owner); 1044 1045 return a; 1046 1047 err_out: 1048 if (cookie) { 1049 kfree(cookie->data); 1050 kfree(cookie); 1051 } 1052 return ERR_PTR(err); 1053 } 1054 1055 /* Returns numbers of initialized actions or negative error. */ 1056 1057 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 1058 struct nlattr *est, char *name, int ovr, int bind, 1059 struct tc_action *actions[], size_t *attr_size, 1060 bool rtnl_held, struct netlink_ext_ack *extack) 1061 { 1062 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; 1063 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1064 struct tc_action *act; 1065 size_t sz = 0; 1066 int err; 1067 int i; 1068 1069 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1070 extack); 1071 if (err < 0) 1072 return err; 1073 1074 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1075 struct tc_action_ops *a_o; 1076 1077 a_o = tc_action_load_ops(name, tb[i], rtnl_held, extack); 1078 if (IS_ERR(a_o)) { 1079 err = PTR_ERR(a_o); 1080 goto err_mod; 1081 } 1082 ops[i - 1] = a_o; 1083 } 1084 1085 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1086 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind, 1087 ops[i - 1], rtnl_held, extack); 1088 if (IS_ERR(act)) { 1089 err = PTR_ERR(act); 1090 goto err; 1091 } 1092 sz += tcf_action_fill_size(act); 1093 /* Start from index 0 */ 1094 actions[i - 1] = act; 1095 } 1096 1097 /* We have to commit them all together, because if any error happened in 1098 * between, we could not handle the failure gracefully. 1099 */ 1100 tcf_idr_insert_many(actions); 1101 1102 *attr_size = tcf_action_full_attrs_size(sz); 1103 return i - 1; 1104 1105 err: 1106 tcf_action_destroy(actions, bind); 1107 err_mod: 1108 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1109 if (ops[i]) 1110 module_put(ops[i]->owner); 1111 } 1112 return err; 1113 } 1114 1115 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, 1116 u64 drops, bool hw) 1117 { 1118 if (a->cpu_bstats) { 1119 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 1120 1121 this_cpu_ptr(a->cpu_qstats)->drops += drops; 1122 1123 if (hw) 1124 _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw), 1125 bytes, packets); 1126 return; 1127 } 1128 1129 _bstats_update(&a->tcfa_bstats, bytes, packets); 1130 a->tcfa_qstats.drops += drops; 1131 if (hw) 1132 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1133 } 1134 EXPORT_SYMBOL(tcf_action_update_stats); 1135 1136 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1137 int compat_mode) 1138 { 1139 int err = 0; 1140 struct gnet_dump d; 1141 1142 if (p == NULL) 1143 goto errout; 1144 1145 /* compat_mode being true specifies a call that is supposed 1146 * to add additional backward compatibility statistic TLVs. 1147 */ 1148 if (compat_mode) { 1149 if (p->type == TCA_OLD_COMPAT) 1150 err = gnet_stats_start_copy_compat(skb, 0, 1151 TCA_STATS, 1152 TCA_XSTATS, 1153 &p->tcfa_lock, &d, 1154 TCA_PAD); 1155 else 1156 return 0; 1157 } else 1158 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 1159 &p->tcfa_lock, &d, TCA_ACT_PAD); 1160 1161 if (err < 0) 1162 goto errout; 1163 1164 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 1165 gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw, 1166 &p->tcfa_bstats_hw) < 0 || 1167 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1168 gnet_stats_copy_queue(&d, p->cpu_qstats, 1169 &p->tcfa_qstats, 1170 p->tcfa_qstats.qlen) < 0) 1171 goto errout; 1172 1173 if (gnet_stats_finish_copy(&d) < 0) 1174 goto errout; 1175 1176 return 0; 1177 1178 errout: 1179 return -1; 1180 } 1181 1182 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], 1183 u32 portid, u32 seq, u16 flags, int event, int bind, 1184 int ref) 1185 { 1186 struct tcamsg *t; 1187 struct nlmsghdr *nlh; 1188 unsigned char *b = skb_tail_pointer(skb); 1189 struct nlattr *nest; 1190 1191 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 1192 if (!nlh) 1193 goto out_nlmsg_trim; 1194 t = nlmsg_data(nlh); 1195 t->tca_family = AF_UNSPEC; 1196 t->tca__pad1 = 0; 1197 t->tca__pad2 = 0; 1198 1199 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1200 if (!nest) 1201 goto out_nlmsg_trim; 1202 1203 if (tcf_action_dump(skb, actions, bind, ref, false) < 0) 1204 goto out_nlmsg_trim; 1205 1206 nla_nest_end(skb, nest); 1207 1208 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1209 return skb->len; 1210 1211 out_nlmsg_trim: 1212 nlmsg_trim(skb, b); 1213 return -1; 1214 } 1215 1216 static int 1217 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 1218 struct tc_action *actions[], int event, 1219 struct netlink_ext_ack *extack) 1220 { 1221 struct sk_buff *skb; 1222 1223 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1224 if (!skb) 1225 return -ENOBUFS; 1226 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 1227 0, 1) <= 0) { 1228 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1229 kfree_skb(skb); 1230 return -EINVAL; 1231 } 1232 1233 return rtnl_unicast(skb, net, portid); 1234 } 1235 1236 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 1237 struct nlmsghdr *n, u32 portid, 1238 struct netlink_ext_ack *extack) 1239 { 1240 struct nlattr *tb[TCA_ACT_MAX + 1]; 1241 const struct tc_action_ops *ops; 1242 struct tc_action *a; 1243 int index; 1244 int err; 1245 1246 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1247 tcf_action_policy, extack); 1248 if (err < 0) 1249 goto err_out; 1250 1251 err = -EINVAL; 1252 if (tb[TCA_ACT_INDEX] == NULL || 1253 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1254 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1255 goto err_out; 1256 } 1257 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1258 1259 err = -EINVAL; 1260 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1261 if (!ops) { /* could happen in batch of actions */ 1262 NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); 1263 goto err_out; 1264 } 1265 err = -ENOENT; 1266 if (ops->lookup(net, &a, index) == 0) { 1267 NL_SET_ERR_MSG(extack, "TC action with specified index not found"); 1268 goto err_mod; 1269 } 1270 1271 module_put(ops->owner); 1272 return a; 1273 1274 err_mod: 1275 module_put(ops->owner); 1276 err_out: 1277 return ERR_PTR(err); 1278 } 1279 1280 static int tca_action_flush(struct net *net, struct nlattr *nla, 1281 struct nlmsghdr *n, u32 portid, 1282 struct netlink_ext_ack *extack) 1283 { 1284 struct sk_buff *skb; 1285 unsigned char *b; 1286 struct nlmsghdr *nlh; 1287 struct tcamsg *t; 1288 struct netlink_callback dcb; 1289 struct nlattr *nest; 1290 struct nlattr *tb[TCA_ACT_MAX + 1]; 1291 const struct tc_action_ops *ops; 1292 struct nlattr *kind; 1293 int err = -ENOMEM; 1294 1295 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1296 if (!skb) 1297 return err; 1298 1299 b = skb_tail_pointer(skb); 1300 1301 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1302 tcf_action_policy, extack); 1303 if (err < 0) 1304 goto err_out; 1305 1306 err = -EINVAL; 1307 kind = tb[TCA_ACT_KIND]; 1308 ops = tc_lookup_action(kind); 1309 if (!ops) { /*some idjot trying to flush unknown action */ 1310 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1311 goto err_out; 1312 } 1313 1314 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1315 sizeof(*t), 0); 1316 if (!nlh) { 1317 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1318 goto out_module_put; 1319 } 1320 t = nlmsg_data(nlh); 1321 t->tca_family = AF_UNSPEC; 1322 t->tca__pad1 = 0; 1323 t->tca__pad2 = 0; 1324 1325 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1326 if (!nest) { 1327 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1328 goto out_module_put; 1329 } 1330 1331 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); 1332 if (err <= 0) { 1333 nla_nest_cancel(skb, nest); 1334 goto out_module_put; 1335 } 1336 1337 nla_nest_end(skb, nest); 1338 1339 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1340 nlh->nlmsg_flags |= NLM_F_ROOT; 1341 module_put(ops->owner); 1342 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1343 n->nlmsg_flags & NLM_F_ECHO); 1344 if (err > 0) 1345 return 0; 1346 if (err < 0) 1347 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1348 1349 return err; 1350 1351 out_module_put: 1352 module_put(ops->owner); 1353 err_out: 1354 kfree_skb(skb); 1355 return err; 1356 } 1357 1358 static int tcf_action_delete(struct net *net, struct tc_action *actions[]) 1359 { 1360 int i; 1361 1362 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1363 struct tc_action *a = actions[i]; 1364 const struct tc_action_ops *ops = a->ops; 1365 /* Actions can be deleted concurrently so we must save their 1366 * type and id to search again after reference is released. 1367 */ 1368 struct tcf_idrinfo *idrinfo = a->idrinfo; 1369 u32 act_index = a->tcfa_index; 1370 1371 actions[i] = NULL; 1372 if (tcf_action_put(a)) { 1373 /* last reference, action was deleted concurrently */ 1374 module_put(ops->owner); 1375 } else { 1376 int ret; 1377 1378 /* now do the delete */ 1379 ret = tcf_idr_delete_index(idrinfo, act_index); 1380 if (ret < 0) 1381 return ret; 1382 } 1383 } 1384 return 0; 1385 } 1386 1387 static int 1388 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1389 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1390 { 1391 int ret; 1392 struct sk_buff *skb; 1393 1394 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1395 GFP_KERNEL); 1396 if (!skb) 1397 return -ENOBUFS; 1398 1399 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1400 0, 2) <= 0) { 1401 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1402 kfree_skb(skb); 1403 return -EINVAL; 1404 } 1405 1406 /* now do the delete */ 1407 ret = tcf_action_delete(net, actions); 1408 if (ret < 0) { 1409 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1410 kfree_skb(skb); 1411 return ret; 1412 } 1413 1414 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1415 n->nlmsg_flags & NLM_F_ECHO); 1416 if (ret > 0) 1417 return 0; 1418 return ret; 1419 } 1420 1421 static int 1422 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1423 u32 portid, int event, struct netlink_ext_ack *extack) 1424 { 1425 int i, ret; 1426 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1427 struct tc_action *act; 1428 size_t attr_size = 0; 1429 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1430 1431 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1432 extack); 1433 if (ret < 0) 1434 return ret; 1435 1436 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1437 if (tb[1]) 1438 return tca_action_flush(net, tb[1], n, portid, extack); 1439 1440 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1441 return -EINVAL; 1442 } 1443 1444 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1445 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1446 if (IS_ERR(act)) { 1447 ret = PTR_ERR(act); 1448 goto err; 1449 } 1450 attr_size += tcf_action_fill_size(act); 1451 actions[i - 1] = act; 1452 } 1453 1454 attr_size = tcf_action_full_attrs_size(attr_size); 1455 1456 if (event == RTM_GETACTION) 1457 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1458 else { /* delete */ 1459 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); 1460 if (ret) 1461 goto err; 1462 return 0; 1463 } 1464 err: 1465 tcf_action_put_many(actions); 1466 return ret; 1467 } 1468 1469 static int 1470 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1471 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1472 { 1473 struct sk_buff *skb; 1474 int err = 0; 1475 1476 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1477 GFP_KERNEL); 1478 if (!skb) 1479 return -ENOBUFS; 1480 1481 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1482 RTM_NEWACTION, 0, 0) <= 0) { 1483 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1484 kfree_skb(skb); 1485 return -EINVAL; 1486 } 1487 1488 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1489 n->nlmsg_flags & NLM_F_ECHO); 1490 if (err > 0) 1491 err = 0; 1492 return err; 1493 } 1494 1495 static int tcf_action_add(struct net *net, struct nlattr *nla, 1496 struct nlmsghdr *n, u32 portid, int ovr, 1497 struct netlink_ext_ack *extack) 1498 { 1499 size_t attr_size = 0; 1500 int loop, ret; 1501 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1502 1503 for (loop = 0; loop < 10; loop++) { 1504 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, 1505 actions, &attr_size, true, extack); 1506 if (ret != -EAGAIN) 1507 break; 1508 } 1509 1510 if (ret < 0) 1511 return ret; 1512 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); 1513 if (ovr) 1514 tcf_action_put_many(actions); 1515 1516 return ret; 1517 } 1518 1519 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1520 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON | 1521 TCA_ACT_FLAG_TERSE_DUMP), 1522 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1523 }; 1524 1525 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1526 struct netlink_ext_ack *extack) 1527 { 1528 struct net *net = sock_net(skb->sk); 1529 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1530 u32 portid = NETLINK_CB(skb).portid; 1531 int ret = 0, ovr = 0; 1532 1533 if ((n->nlmsg_type != RTM_GETACTION) && 1534 !netlink_capable(skb, CAP_NET_ADMIN)) 1535 return -EPERM; 1536 1537 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, 1538 TCA_ROOT_MAX, NULL, extack); 1539 if (ret < 0) 1540 return ret; 1541 1542 if (tca[TCA_ACT_TAB] == NULL) { 1543 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 1544 return -EINVAL; 1545 } 1546 1547 /* n->nlmsg_flags & NLM_F_CREATE */ 1548 switch (n->nlmsg_type) { 1549 case RTM_NEWACTION: 1550 /* we are going to assume all other flags 1551 * imply create only if it doesn't exist 1552 * Note that CREATE | EXCL implies that 1553 * but since we want avoid ambiguity (eg when flags 1554 * is zero) then just set this 1555 */ 1556 if (n->nlmsg_flags & NLM_F_REPLACE) 1557 ovr = 1; 1558 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr, 1559 extack); 1560 break; 1561 case RTM_DELACTION: 1562 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1563 portid, RTM_DELACTION, extack); 1564 break; 1565 case RTM_GETACTION: 1566 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1567 portid, RTM_GETACTION, extack); 1568 break; 1569 default: 1570 BUG(); 1571 } 1572 1573 return ret; 1574 } 1575 1576 static struct nlattr *find_dump_kind(struct nlattr **nla) 1577 { 1578 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1579 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1580 struct nlattr *kind; 1581 1582 tb1 = nla[TCA_ACT_TAB]; 1583 if (tb1 == NULL) 1584 return NULL; 1585 1586 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1587 return NULL; 1588 1589 if (tb[1] == NULL) 1590 return NULL; 1591 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) 1592 return NULL; 1593 kind = tb2[TCA_ACT_KIND]; 1594 1595 return kind; 1596 } 1597 1598 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1599 { 1600 struct net *net = sock_net(skb->sk); 1601 struct nlmsghdr *nlh; 1602 unsigned char *b = skb_tail_pointer(skb); 1603 struct nlattr *nest; 1604 struct tc_action_ops *a_o; 1605 int ret = 0; 1606 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1607 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1608 struct nlattr *count_attr = NULL; 1609 unsigned long jiffy_since = 0; 1610 struct nlattr *kind = NULL; 1611 struct nla_bitfield32 bf; 1612 u32 msecs_since = 0; 1613 u32 act_count = 0; 1614 1615 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, 1616 TCA_ROOT_MAX, tcaa_policy, cb->extack); 1617 if (ret < 0) 1618 return ret; 1619 1620 kind = find_dump_kind(tb); 1621 if (kind == NULL) { 1622 pr_info("tc_dump_action: action bad kind\n"); 1623 return 0; 1624 } 1625 1626 a_o = tc_lookup_action(kind); 1627 if (a_o == NULL) 1628 return 0; 1629 1630 cb->args[2] = 0; 1631 if (tb[TCA_ROOT_FLAGS]) { 1632 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1633 cb->args[2] = bf.value; 1634 } 1635 1636 if (tb[TCA_ROOT_TIME_DELTA]) { 1637 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1638 } 1639 1640 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1641 cb->nlh->nlmsg_type, sizeof(*t), 0); 1642 if (!nlh) 1643 goto out_module_put; 1644 1645 if (msecs_since) 1646 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1647 1648 t = nlmsg_data(nlh); 1649 t->tca_family = AF_UNSPEC; 1650 t->tca__pad1 = 0; 1651 t->tca__pad2 = 0; 1652 cb->args[3] = jiffy_since; 1653 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1654 if (!count_attr) 1655 goto out_module_put; 1656 1657 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1658 if (nest == NULL) 1659 goto out_module_put; 1660 1661 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); 1662 if (ret < 0) 1663 goto out_module_put; 1664 1665 if (ret > 0) { 1666 nla_nest_end(skb, nest); 1667 ret = skb->len; 1668 act_count = cb->args[1]; 1669 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1670 cb->args[1] = 0; 1671 } else 1672 nlmsg_trim(skb, b); 1673 1674 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1675 if (NETLINK_CB(cb->skb).portid && ret) 1676 nlh->nlmsg_flags |= NLM_F_MULTI; 1677 module_put(a_o->owner); 1678 return skb->len; 1679 1680 out_module_put: 1681 module_put(a_o->owner); 1682 nlmsg_trim(skb, b); 1683 return skb->len; 1684 } 1685 1686 static int __init tc_action_init(void) 1687 { 1688 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1689 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1690 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1691 0); 1692 1693 return 0; 1694 } 1695 1696 subsys_initcall(tc_action_init); 1697