1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_api.c Packet action API. 4 * 5 * Author: Jamal Hadi Salim 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/string.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <linux/kmod.h> 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/tc_act/tc_pedit.h> 23 #include <net/act_api.h> 24 #include <net/netlink.h> 25 #include <net/flow_offload.h> 26 27 #ifdef CONFIG_INET 28 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); 29 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count); 30 #endif 31 32 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) 33 { 34 #ifdef CONFIG_INET 35 if (static_branch_unlikely(&tcf_frag_xmit_count)) 36 return sch_frag_xmit_hook(skb, xmit); 37 #endif 38 39 return xmit(skb); 40 } 41 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit); 42 43 static void tcf_action_goto_chain_exec(const struct tc_action *a, 44 struct tcf_result *res) 45 { 46 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); 47 48 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 49 } 50 51 static void tcf_free_cookie_rcu(struct rcu_head *p) 52 { 53 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 54 55 kfree(cookie->data); 56 kfree(cookie); 57 } 58 59 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 60 struct tc_cookie *new_cookie) 61 { 62 struct tc_cookie *old; 63 64 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); 65 if (old) 66 call_rcu(&old->rcu, tcf_free_cookie_rcu); 67 } 68 69 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, 70 struct tcf_chain **newchain, 71 struct netlink_ext_ack *extack) 72 { 73 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; 74 u32 chain_index; 75 76 if (!opcode) 77 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; 78 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) 79 ret = 0; 80 if (ret) { 81 NL_SET_ERR_MSG(extack, "invalid control action"); 82 goto end; 83 } 84 85 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { 86 chain_index = action & TC_ACT_EXT_VAL_MASK; 87 if (!tp || !newchain) { 88 ret = -EINVAL; 89 NL_SET_ERR_MSG(extack, 90 "can't goto NULL proto/chain"); 91 goto end; 92 } 93 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); 94 if (!*newchain) { 95 ret = -ENOMEM; 96 NL_SET_ERR_MSG(extack, 97 "can't allocate goto_chain"); 98 } 99 } 100 end: 101 return ret; 102 } 103 EXPORT_SYMBOL(tcf_action_check_ctrlact); 104 105 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, 106 struct tcf_chain *goto_chain) 107 { 108 a->tcfa_action = action; 109 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); 110 return goto_chain; 111 } 112 EXPORT_SYMBOL(tcf_action_set_ctrlact); 113 114 /* XXX: For standalone actions, we don't need a RCU grace period either, because 115 * actions are always connected to filters and filters are already destroyed in 116 * RCU callbacks, so after a RCU grace period actions are already disconnected 117 * from filters. Readers later can not find us. 118 */ 119 static void free_tcf(struct tc_action *p) 120 { 121 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); 122 123 free_percpu(p->cpu_bstats); 124 free_percpu(p->cpu_bstats_hw); 125 free_percpu(p->cpu_qstats); 126 127 tcf_set_action_cookie(&p->act_cookie, NULL); 128 if (chain) 129 tcf_chain_put_by_act(chain); 130 131 kfree(p); 132 } 133 134 static void offload_action_hw_count_set(struct tc_action *act, 135 u32 hw_count) 136 { 137 act->in_hw_count = hw_count; 138 } 139 140 static void offload_action_hw_count_inc(struct tc_action *act, 141 u32 hw_count) 142 { 143 act->in_hw_count += hw_count; 144 } 145 146 static void offload_action_hw_count_dec(struct tc_action *act, 147 u32 hw_count) 148 { 149 act->in_hw_count = act->in_hw_count > hw_count ? 150 act->in_hw_count - hw_count : 0; 151 } 152 153 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act) 154 { 155 if (is_tcf_pedit(act)) 156 return tcf_pedit_nkeys(act); 157 else 158 return 1; 159 } 160 161 static bool tc_act_skip_hw(u32 flags) 162 { 163 return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false; 164 } 165 166 static bool tc_act_skip_sw(u32 flags) 167 { 168 return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false; 169 } 170 171 static bool tc_act_in_hw(struct tc_action *act) 172 { 173 return !!act->in_hw_count; 174 } 175 176 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 177 static bool tc_act_flags_valid(u32 flags) 178 { 179 flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW; 180 181 return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW); 182 } 183 184 static int offload_action_init(struct flow_offload_action *fl_action, 185 struct tc_action *act, 186 enum offload_act_command cmd, 187 struct netlink_ext_ack *extack) 188 { 189 int err; 190 191 fl_action->extack = extack; 192 fl_action->command = cmd; 193 fl_action->index = act->tcfa_index; 194 195 if (act->ops->offload_act_setup) { 196 spin_lock_bh(&act->tcfa_lock); 197 err = act->ops->offload_act_setup(act, fl_action, NULL, 198 false, extack); 199 spin_unlock_bh(&act->tcfa_lock); 200 return err; 201 } 202 203 return -EOPNOTSUPP; 204 } 205 206 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act, 207 u32 *hw_count) 208 { 209 int err; 210 211 err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT, 212 fl_act, NULL, NULL); 213 if (err < 0) 214 return err; 215 216 if (hw_count) 217 *hw_count = err; 218 219 return 0; 220 } 221 222 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act, 223 u32 *hw_count, 224 flow_indr_block_bind_cb_t *cb, 225 void *cb_priv) 226 { 227 int err; 228 229 err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL); 230 if (err < 0) 231 return err; 232 233 if (hw_count) 234 *hw_count = 1; 235 236 return 0; 237 } 238 239 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act, 240 u32 *hw_count, 241 flow_indr_block_bind_cb_t *cb, 242 void *cb_priv) 243 { 244 return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count, 245 cb, cb_priv) : 246 tcf_action_offload_cmd_ex(fl_act, hw_count); 247 } 248 249 static int tcf_action_offload_add_ex(struct tc_action *action, 250 struct netlink_ext_ack *extack, 251 flow_indr_block_bind_cb_t *cb, 252 void *cb_priv) 253 { 254 bool skip_sw = tc_act_skip_sw(action->tcfa_flags); 255 struct tc_action *actions[TCA_ACT_MAX_PRIO] = { 256 [0] = action, 257 }; 258 struct flow_offload_action *fl_action; 259 u32 in_hw_count = 0; 260 int num, err = 0; 261 262 if (tc_act_skip_hw(action->tcfa_flags)) 263 return 0; 264 265 num = tcf_offload_act_num_actions_single(action); 266 fl_action = offload_action_alloc(num); 267 if (!fl_action) 268 return -ENOMEM; 269 270 err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack); 271 if (err) 272 goto fl_err; 273 274 err = tc_setup_action(&fl_action->action, actions, extack); 275 if (err) { 276 NL_SET_ERR_MSG_MOD(extack, 277 "Failed to setup tc actions for offload"); 278 goto fl_err; 279 } 280 281 err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv); 282 if (!err) 283 cb ? offload_action_hw_count_inc(action, in_hw_count) : 284 offload_action_hw_count_set(action, in_hw_count); 285 286 if (skip_sw && !tc_act_in_hw(action)) 287 err = -EINVAL; 288 289 tc_cleanup_offload_action(&fl_action->action); 290 291 fl_err: 292 kfree(fl_action); 293 294 return err; 295 } 296 297 /* offload the tc action after it is inserted */ 298 static int tcf_action_offload_add(struct tc_action *action, 299 struct netlink_ext_ack *extack) 300 { 301 return tcf_action_offload_add_ex(action, extack, NULL, NULL); 302 } 303 304 int tcf_action_update_hw_stats(struct tc_action *action) 305 { 306 struct flow_offload_action fl_act = {}; 307 int err; 308 309 if (!tc_act_in_hw(action)) 310 return -EOPNOTSUPP; 311 312 err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL); 313 if (err) 314 return err; 315 316 err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL); 317 if (!err) { 318 preempt_disable(); 319 tcf_action_stats_update(action, fl_act.stats.bytes, 320 fl_act.stats.pkts, 321 fl_act.stats.drops, 322 fl_act.stats.lastused, 323 true); 324 preempt_enable(); 325 action->used_hw_stats = fl_act.stats.used_hw_stats; 326 action->used_hw_stats_valid = true; 327 } else { 328 return -EOPNOTSUPP; 329 } 330 331 return 0; 332 } 333 EXPORT_SYMBOL(tcf_action_update_hw_stats); 334 335 static int tcf_action_offload_del_ex(struct tc_action *action, 336 flow_indr_block_bind_cb_t *cb, 337 void *cb_priv) 338 { 339 struct flow_offload_action fl_act = {}; 340 u32 in_hw_count = 0; 341 int err = 0; 342 343 if (!tc_act_in_hw(action)) 344 return 0; 345 346 err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL); 347 if (err) 348 return err; 349 350 err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv); 351 if (err < 0) 352 return err; 353 354 if (!cb && action->in_hw_count != in_hw_count) 355 return -EINVAL; 356 357 /* do not need to update hw state when deleting action */ 358 if (cb && in_hw_count) 359 offload_action_hw_count_dec(action, in_hw_count); 360 361 return 0; 362 } 363 364 static int tcf_action_offload_del(struct tc_action *action) 365 { 366 return tcf_action_offload_del_ex(action, NULL, NULL); 367 } 368 369 static void tcf_action_cleanup(struct tc_action *p) 370 { 371 tcf_action_offload_del(p); 372 if (p->ops->cleanup) 373 p->ops->cleanup(p); 374 375 gen_kill_estimator(&p->tcfa_rate_est); 376 free_tcf(p); 377 } 378 379 static int __tcf_action_put(struct tc_action *p, bool bind) 380 { 381 struct tcf_idrinfo *idrinfo = p->idrinfo; 382 383 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 384 if (bind) 385 atomic_dec(&p->tcfa_bindcnt); 386 idr_remove(&idrinfo->action_idr, p->tcfa_index); 387 mutex_unlock(&idrinfo->lock); 388 389 tcf_action_cleanup(p); 390 return 1; 391 } 392 393 if (bind) 394 atomic_dec(&p->tcfa_bindcnt); 395 396 return 0; 397 } 398 399 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 400 { 401 int ret = 0; 402 403 /* Release with strict==1 and bind==0 is only called through act API 404 * interface (classifiers always bind). Only case when action with 405 * positive reference count and zero bind count can exist is when it was 406 * also created with act API (unbinding last classifier will destroy the 407 * action if it was created by classifier). So only case when bind count 408 * can be changed after initial check is when unbound action is 409 * destroyed by act API while classifier binds to action with same id 410 * concurrently. This result either creation of new action(same behavior 411 * as before), or reusing existing action if concurrent process 412 * increments reference count before action is deleted. Both scenarios 413 * are acceptable. 414 */ 415 if (p) { 416 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 417 return -EPERM; 418 419 if (__tcf_action_put(p, bind)) 420 ret = ACT_P_DELETED; 421 } 422 423 return ret; 424 } 425 426 int tcf_idr_release(struct tc_action *a, bool bind) 427 { 428 const struct tc_action_ops *ops = a->ops; 429 int ret; 430 431 ret = __tcf_idr_release(a, bind, false); 432 if (ret == ACT_P_DELETED) 433 module_put(ops->owner); 434 return ret; 435 } 436 EXPORT_SYMBOL(tcf_idr_release); 437 438 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 439 { 440 struct tc_cookie *act_cookie; 441 u32 cookie_len = 0; 442 443 rcu_read_lock(); 444 act_cookie = rcu_dereference(act->act_cookie); 445 446 if (act_cookie) 447 cookie_len = nla_total_size(act_cookie->len); 448 rcu_read_unlock(); 449 450 return nla_total_size(0) /* action number nested */ 451 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 452 + cookie_len /* TCA_ACT_COOKIE */ 453 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */ 454 + nla_total_size(0) /* TCA_ACT_STATS nested */ 455 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */ 456 /* TCA_STATS_BASIC */ 457 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 458 /* TCA_STATS_PKT64 */ 459 + nla_total_size_64bit(sizeof(u64)) 460 /* TCA_STATS_QUEUE */ 461 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 462 + nla_total_size(0) /* TCA_OPTIONS nested */ 463 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 464 } 465 466 static size_t tcf_action_full_attrs_size(size_t sz) 467 { 468 return NLMSG_HDRLEN /* struct nlmsghdr */ 469 + sizeof(struct tcamsg) 470 + nla_total_size(0) /* TCA_ACT_TAB nested */ 471 + sz; 472 } 473 474 static size_t tcf_action_fill_size(const struct tc_action *act) 475 { 476 size_t sz = tcf_action_shared_attrs_size(act); 477 478 if (act->ops->get_fill_size) 479 return act->ops->get_fill_size(act) + sz; 480 return sz; 481 } 482 483 static int 484 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act) 485 { 486 unsigned char *b = skb_tail_pointer(skb); 487 struct tc_cookie *cookie; 488 489 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 490 goto nla_put_failure; 491 if (tcf_action_copy_stats(skb, a, 0)) 492 goto nla_put_failure; 493 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index)) 494 goto nla_put_failure; 495 496 rcu_read_lock(); 497 cookie = rcu_dereference(a->act_cookie); 498 if (cookie) { 499 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 500 rcu_read_unlock(); 501 goto nla_put_failure; 502 } 503 } 504 rcu_read_unlock(); 505 506 return 0; 507 508 nla_put_failure: 509 nlmsg_trim(skb, b); 510 return -1; 511 } 512 513 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 514 struct netlink_callback *cb) 515 { 516 int err = 0, index = -1, s_i = 0, n_i = 0; 517 u32 act_flags = cb->args[2]; 518 unsigned long jiffy_since = cb->args[3]; 519 struct nlattr *nest; 520 struct idr *idr = &idrinfo->action_idr; 521 struct tc_action *p; 522 unsigned long id = 1; 523 unsigned long tmp; 524 525 mutex_lock(&idrinfo->lock); 526 527 s_i = cb->args[0]; 528 529 idr_for_each_entry_ul(idr, p, tmp, id) { 530 index++; 531 if (index < s_i) 532 continue; 533 if (IS_ERR(p)) 534 continue; 535 536 if (jiffy_since && 537 time_after(jiffy_since, 538 (unsigned long)p->tcfa_tm.lastuse)) 539 continue; 540 541 nest = nla_nest_start_noflag(skb, n_i); 542 if (!nest) { 543 index--; 544 goto nla_put_failure; 545 } 546 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ? 547 tcf_action_dump_terse(skb, p, true) : 548 tcf_action_dump_1(skb, p, 0, 0); 549 if (err < 0) { 550 index--; 551 nlmsg_trim(skb, nest); 552 goto done; 553 } 554 nla_nest_end(skb, nest); 555 n_i++; 556 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) && 557 n_i >= TCA_ACT_MAX_PRIO) 558 goto done; 559 } 560 done: 561 if (index >= 0) 562 cb->args[0] = index + 1; 563 564 mutex_unlock(&idrinfo->lock); 565 if (n_i) { 566 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) 567 cb->args[1] = n_i; 568 } 569 return n_i; 570 571 nla_put_failure: 572 nla_nest_cancel(skb, nest); 573 goto done; 574 } 575 576 static int tcf_idr_release_unsafe(struct tc_action *p) 577 { 578 if (atomic_read(&p->tcfa_bindcnt) > 0) 579 return -EPERM; 580 581 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 582 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); 583 tcf_action_cleanup(p); 584 return ACT_P_DELETED; 585 } 586 587 return 0; 588 } 589 590 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 591 const struct tc_action_ops *ops, 592 struct netlink_ext_ack *extack) 593 { 594 struct nlattr *nest; 595 int n_i = 0; 596 int ret = -EINVAL; 597 struct idr *idr = &idrinfo->action_idr; 598 struct tc_action *p; 599 unsigned long id = 1; 600 unsigned long tmp; 601 602 nest = nla_nest_start_noflag(skb, 0); 603 if (nest == NULL) 604 goto nla_put_failure; 605 if (nla_put_string(skb, TCA_KIND, ops->kind)) 606 goto nla_put_failure; 607 608 ret = 0; 609 mutex_lock(&idrinfo->lock); 610 idr_for_each_entry_ul(idr, p, tmp, id) { 611 if (IS_ERR(p)) 612 continue; 613 ret = tcf_idr_release_unsafe(p); 614 if (ret == ACT_P_DELETED) 615 module_put(ops->owner); 616 else if (ret < 0) 617 break; 618 n_i++; 619 } 620 mutex_unlock(&idrinfo->lock); 621 if (ret < 0) { 622 if (n_i) 623 NL_SET_ERR_MSG(extack, "Unable to flush all TC actions"); 624 else 625 goto nla_put_failure; 626 } 627 628 ret = nla_put_u32(skb, TCA_FCNT, n_i); 629 if (ret) 630 goto nla_put_failure; 631 nla_nest_end(skb, nest); 632 633 return n_i; 634 nla_put_failure: 635 nla_nest_cancel(skb, nest); 636 return ret; 637 } 638 639 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 640 struct netlink_callback *cb, int type, 641 const struct tc_action_ops *ops, 642 struct netlink_ext_ack *extack) 643 { 644 struct tcf_idrinfo *idrinfo = tn->idrinfo; 645 646 if (type == RTM_DELACTION) { 647 return tcf_del_walker(idrinfo, skb, ops, extack); 648 } else if (type == RTM_GETACTION) { 649 return tcf_dump_walker(idrinfo, skb, cb); 650 } else { 651 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 652 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 653 return -EINVAL; 654 } 655 } 656 EXPORT_SYMBOL(tcf_generic_walker); 657 658 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 659 { 660 struct tcf_idrinfo *idrinfo = tn->idrinfo; 661 struct tc_action *p; 662 663 mutex_lock(&idrinfo->lock); 664 p = idr_find(&idrinfo->action_idr, index); 665 if (IS_ERR(p)) 666 p = NULL; 667 else if (p) 668 refcount_inc(&p->tcfa_refcnt); 669 mutex_unlock(&idrinfo->lock); 670 671 if (p) { 672 *a = p; 673 return true; 674 } 675 return false; 676 } 677 EXPORT_SYMBOL(tcf_idr_search); 678 679 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) 680 { 681 struct tc_action *p; 682 int ret = 0; 683 684 mutex_lock(&idrinfo->lock); 685 p = idr_find(&idrinfo->action_idr, index); 686 if (!p) { 687 mutex_unlock(&idrinfo->lock); 688 return -ENOENT; 689 } 690 691 if (!atomic_read(&p->tcfa_bindcnt)) { 692 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 693 struct module *owner = p->ops->owner; 694 695 WARN_ON(p != idr_remove(&idrinfo->action_idr, 696 p->tcfa_index)); 697 mutex_unlock(&idrinfo->lock); 698 699 tcf_action_cleanup(p); 700 module_put(owner); 701 return 0; 702 } 703 ret = 0; 704 } else { 705 ret = -EPERM; 706 } 707 708 mutex_unlock(&idrinfo->lock); 709 return ret; 710 } 711 712 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 713 struct tc_action **a, const struct tc_action_ops *ops, 714 int bind, bool cpustats, u32 flags) 715 { 716 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 717 struct tcf_idrinfo *idrinfo = tn->idrinfo; 718 int err = -ENOMEM; 719 720 if (unlikely(!p)) 721 return -ENOMEM; 722 refcount_set(&p->tcfa_refcnt, 1); 723 if (bind) 724 atomic_set(&p->tcfa_bindcnt, 1); 725 726 if (cpustats) { 727 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); 728 if (!p->cpu_bstats) 729 goto err1; 730 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); 731 if (!p->cpu_bstats_hw) 732 goto err2; 733 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 734 if (!p->cpu_qstats) 735 goto err3; 736 } 737 gnet_stats_basic_sync_init(&p->tcfa_bstats); 738 gnet_stats_basic_sync_init(&p->tcfa_bstats_hw); 739 spin_lock_init(&p->tcfa_lock); 740 p->tcfa_index = index; 741 p->tcfa_tm.install = jiffies; 742 p->tcfa_tm.lastuse = jiffies; 743 p->tcfa_tm.firstuse = 0; 744 p->tcfa_flags = flags; 745 if (est) { 746 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 747 &p->tcfa_rate_est, 748 &p->tcfa_lock, false, est); 749 if (err) 750 goto err4; 751 } 752 753 p->idrinfo = idrinfo; 754 __module_get(ops->owner); 755 p->ops = ops; 756 *a = p; 757 return 0; 758 err4: 759 free_percpu(p->cpu_qstats); 760 err3: 761 free_percpu(p->cpu_bstats_hw); 762 err2: 763 free_percpu(p->cpu_bstats); 764 err1: 765 kfree(p); 766 return err; 767 } 768 EXPORT_SYMBOL(tcf_idr_create); 769 770 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, 771 struct nlattr *est, struct tc_action **a, 772 const struct tc_action_ops *ops, int bind, 773 u32 flags) 774 { 775 /* Set cpustats according to actions flags. */ 776 return tcf_idr_create(tn, index, est, a, ops, bind, 777 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags); 778 } 779 EXPORT_SYMBOL(tcf_idr_create_from_flags); 780 781 /* Cleanup idr index that was allocated but not initialized. */ 782 783 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) 784 { 785 struct tcf_idrinfo *idrinfo = tn->idrinfo; 786 787 mutex_lock(&idrinfo->lock); 788 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 789 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); 790 mutex_unlock(&idrinfo->lock); 791 } 792 EXPORT_SYMBOL(tcf_idr_cleanup); 793 794 /* Check if action with specified index exists. If actions is found, increments 795 * its reference and bind counters, and return 1. Otherwise insert temporary 796 * error pointer (to prevent concurrent users from inserting actions with same 797 * index) and return 0. 798 */ 799 800 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 801 struct tc_action **a, int bind) 802 { 803 struct tcf_idrinfo *idrinfo = tn->idrinfo; 804 struct tc_action *p; 805 int ret; 806 807 again: 808 mutex_lock(&idrinfo->lock); 809 if (*index) { 810 p = idr_find(&idrinfo->action_idr, *index); 811 if (IS_ERR(p)) { 812 /* This means that another process allocated 813 * index but did not assign the pointer yet. 814 */ 815 mutex_unlock(&idrinfo->lock); 816 goto again; 817 } 818 819 if (p) { 820 refcount_inc(&p->tcfa_refcnt); 821 if (bind) 822 atomic_inc(&p->tcfa_bindcnt); 823 *a = p; 824 ret = 1; 825 } else { 826 *a = NULL; 827 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 828 *index, GFP_KERNEL); 829 if (!ret) 830 idr_replace(&idrinfo->action_idr, 831 ERR_PTR(-EBUSY), *index); 832 } 833 } else { 834 *index = 1; 835 *a = NULL; 836 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 837 UINT_MAX, GFP_KERNEL); 838 if (!ret) 839 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), 840 *index); 841 } 842 mutex_unlock(&idrinfo->lock); 843 return ret; 844 } 845 EXPORT_SYMBOL(tcf_idr_check_alloc); 846 847 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 848 struct tcf_idrinfo *idrinfo) 849 { 850 struct idr *idr = &idrinfo->action_idr; 851 struct tc_action *p; 852 int ret; 853 unsigned long id = 1; 854 unsigned long tmp; 855 856 idr_for_each_entry_ul(idr, p, tmp, id) { 857 ret = __tcf_idr_release(p, false, true); 858 if (ret == ACT_P_DELETED) 859 module_put(ops->owner); 860 else if (ret < 0) 861 return; 862 } 863 idr_destroy(&idrinfo->action_idr); 864 } 865 EXPORT_SYMBOL(tcf_idrinfo_destroy); 866 867 static LIST_HEAD(act_base); 868 static DEFINE_RWLOCK(act_mod_lock); 869 /* since act ops id is stored in pernet subsystem list, 870 * then there is no way to walk through only all the action 871 * subsystem, so we keep tc action pernet ops id for 872 * reoffload to walk through. 873 */ 874 static LIST_HEAD(act_pernet_id_list); 875 static DEFINE_MUTEX(act_id_mutex); 876 struct tc_act_pernet_id { 877 struct list_head list; 878 unsigned int id; 879 }; 880 881 static int tcf_pernet_add_id_list(unsigned int id) 882 { 883 struct tc_act_pernet_id *id_ptr; 884 int ret = 0; 885 886 mutex_lock(&act_id_mutex); 887 list_for_each_entry(id_ptr, &act_pernet_id_list, list) { 888 if (id_ptr->id == id) { 889 ret = -EEXIST; 890 goto err_out; 891 } 892 } 893 894 id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL); 895 if (!id_ptr) { 896 ret = -ENOMEM; 897 goto err_out; 898 } 899 id_ptr->id = id; 900 901 list_add_tail(&id_ptr->list, &act_pernet_id_list); 902 903 err_out: 904 mutex_unlock(&act_id_mutex); 905 return ret; 906 } 907 908 static void tcf_pernet_del_id_list(unsigned int id) 909 { 910 struct tc_act_pernet_id *id_ptr; 911 912 mutex_lock(&act_id_mutex); 913 list_for_each_entry(id_ptr, &act_pernet_id_list, list) { 914 if (id_ptr->id == id) { 915 list_del(&id_ptr->list); 916 kfree(id_ptr); 917 break; 918 } 919 } 920 mutex_unlock(&act_id_mutex); 921 } 922 923 int tcf_register_action(struct tc_action_ops *act, 924 struct pernet_operations *ops) 925 { 926 struct tc_action_ops *a; 927 int ret; 928 929 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 930 return -EINVAL; 931 932 /* We have to register pernet ops before making the action ops visible, 933 * otherwise tcf_action_init_1() could get a partially initialized 934 * netns. 935 */ 936 ret = register_pernet_subsys(ops); 937 if (ret) 938 return ret; 939 940 if (ops->id) { 941 ret = tcf_pernet_add_id_list(*ops->id); 942 if (ret) 943 goto err_id; 944 } 945 946 write_lock(&act_mod_lock); 947 list_for_each_entry(a, &act_base, head) { 948 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { 949 ret = -EEXIST; 950 goto err_out; 951 } 952 } 953 list_add_tail(&act->head, &act_base); 954 write_unlock(&act_mod_lock); 955 956 return 0; 957 958 err_out: 959 write_unlock(&act_mod_lock); 960 if (ops->id) 961 tcf_pernet_del_id_list(*ops->id); 962 err_id: 963 unregister_pernet_subsys(ops); 964 return ret; 965 } 966 EXPORT_SYMBOL(tcf_register_action); 967 968 int tcf_unregister_action(struct tc_action_ops *act, 969 struct pernet_operations *ops) 970 { 971 struct tc_action_ops *a; 972 int err = -ENOENT; 973 974 write_lock(&act_mod_lock); 975 list_for_each_entry(a, &act_base, head) { 976 if (a == act) { 977 list_del(&act->head); 978 err = 0; 979 break; 980 } 981 } 982 write_unlock(&act_mod_lock); 983 if (!err) { 984 unregister_pernet_subsys(ops); 985 if (ops->id) 986 tcf_pernet_del_id_list(*ops->id); 987 } 988 return err; 989 } 990 EXPORT_SYMBOL(tcf_unregister_action); 991 992 /* lookup by name */ 993 static struct tc_action_ops *tc_lookup_action_n(char *kind) 994 { 995 struct tc_action_ops *a, *res = NULL; 996 997 if (kind) { 998 read_lock(&act_mod_lock); 999 list_for_each_entry(a, &act_base, head) { 1000 if (strcmp(kind, a->kind) == 0) { 1001 if (try_module_get(a->owner)) 1002 res = a; 1003 break; 1004 } 1005 } 1006 read_unlock(&act_mod_lock); 1007 } 1008 return res; 1009 } 1010 1011 /* lookup by nlattr */ 1012 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 1013 { 1014 struct tc_action_ops *a, *res = NULL; 1015 1016 if (kind) { 1017 read_lock(&act_mod_lock); 1018 list_for_each_entry(a, &act_base, head) { 1019 if (nla_strcmp(kind, a->kind) == 0) { 1020 if (try_module_get(a->owner)) 1021 res = a; 1022 break; 1023 } 1024 } 1025 read_unlock(&act_mod_lock); 1026 } 1027 return res; 1028 } 1029 1030 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */ 1031 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 1032 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 1033 int nr_actions, struct tcf_result *res) 1034 { 1035 u32 jmp_prgcnt = 0; 1036 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 1037 int i; 1038 int ret = TC_ACT_OK; 1039 1040 if (skb_skip_tc_classify(skb)) 1041 return TC_ACT_OK; 1042 1043 restart_act_graph: 1044 for (i = 0; i < nr_actions; i++) { 1045 const struct tc_action *a = actions[i]; 1046 int repeat_ttl; 1047 1048 if (jmp_prgcnt > 0) { 1049 jmp_prgcnt -= 1; 1050 continue; 1051 } 1052 1053 if (tc_act_skip_sw(a->tcfa_flags)) 1054 continue; 1055 1056 repeat_ttl = 32; 1057 repeat: 1058 ret = a->ops->act(skb, a, res); 1059 if (unlikely(ret == TC_ACT_REPEAT)) { 1060 if (--repeat_ttl != 0) 1061 goto repeat; 1062 /* suspicious opcode, stop pipeline */ 1063 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n"); 1064 return TC_ACT_OK; 1065 } 1066 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 1067 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 1068 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 1069 /* faulty opcode, stop pipeline */ 1070 return TC_ACT_OK; 1071 } else { 1072 jmp_ttl -= 1; 1073 if (jmp_ttl > 0) 1074 goto restart_act_graph; 1075 else /* faulty graph, stop pipeline */ 1076 return TC_ACT_OK; 1077 } 1078 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 1079 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 1080 net_warn_ratelimited("can't go to NULL chain!\n"); 1081 return TC_ACT_SHOT; 1082 } 1083 tcf_action_goto_chain_exec(a, res); 1084 } 1085 1086 if (ret != TC_ACT_PIPE) 1087 break; 1088 } 1089 1090 return ret; 1091 } 1092 EXPORT_SYMBOL(tcf_action_exec); 1093 1094 int tcf_action_destroy(struct tc_action *actions[], int bind) 1095 { 1096 const struct tc_action_ops *ops; 1097 struct tc_action *a; 1098 int ret = 0, i; 1099 1100 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1101 a = actions[i]; 1102 actions[i] = NULL; 1103 ops = a->ops; 1104 ret = __tcf_idr_release(a, bind, true); 1105 if (ret == ACT_P_DELETED) 1106 module_put(ops->owner); 1107 else if (ret < 0) 1108 return ret; 1109 } 1110 return ret; 1111 } 1112 1113 static int tcf_action_put(struct tc_action *p) 1114 { 1115 return __tcf_action_put(p, false); 1116 } 1117 1118 /* Put all actions in this array, skip those NULL's. */ 1119 static void tcf_action_put_many(struct tc_action *actions[]) 1120 { 1121 int i; 1122 1123 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1124 struct tc_action *a = actions[i]; 1125 const struct tc_action_ops *ops; 1126 1127 if (!a) 1128 continue; 1129 ops = a->ops; 1130 if (tcf_action_put(a)) 1131 module_put(ops->owner); 1132 } 1133 } 1134 1135 int 1136 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 1137 { 1138 return a->ops->dump(skb, a, bind, ref); 1139 } 1140 1141 int 1142 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 1143 { 1144 int err = -EINVAL; 1145 unsigned char *b = skb_tail_pointer(skb); 1146 struct nlattr *nest; 1147 u32 flags; 1148 1149 if (tcf_action_dump_terse(skb, a, false)) 1150 goto nla_put_failure; 1151 1152 if (a->hw_stats != TCA_ACT_HW_STATS_ANY && 1153 nla_put_bitfield32(skb, TCA_ACT_HW_STATS, 1154 a->hw_stats, TCA_ACT_HW_STATS_ANY)) 1155 goto nla_put_failure; 1156 1157 if (a->used_hw_stats_valid && 1158 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, 1159 a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) 1160 goto nla_put_failure; 1161 1162 flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; 1163 if (flags && 1164 nla_put_bitfield32(skb, TCA_ACT_FLAGS, 1165 flags, flags)) 1166 goto nla_put_failure; 1167 1168 if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) 1169 goto nla_put_failure; 1170 1171 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1172 if (nest == NULL) 1173 goto nla_put_failure; 1174 err = tcf_action_dump_old(skb, a, bind, ref); 1175 if (err > 0) { 1176 nla_nest_end(skb, nest); 1177 return err; 1178 } 1179 1180 nla_put_failure: 1181 nlmsg_trim(skb, b); 1182 return -1; 1183 } 1184 EXPORT_SYMBOL(tcf_action_dump_1); 1185 1186 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], 1187 int bind, int ref, bool terse) 1188 { 1189 struct tc_action *a; 1190 int err = -EINVAL, i; 1191 struct nlattr *nest; 1192 1193 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1194 a = actions[i]; 1195 nest = nla_nest_start_noflag(skb, i + 1); 1196 if (nest == NULL) 1197 goto nla_put_failure; 1198 err = terse ? tcf_action_dump_terse(skb, a, false) : 1199 tcf_action_dump_1(skb, a, bind, ref); 1200 if (err < 0) 1201 goto errout; 1202 nla_nest_end(skb, nest); 1203 } 1204 1205 return 0; 1206 1207 nla_put_failure: 1208 err = -EINVAL; 1209 errout: 1210 nla_nest_cancel(skb, nest); 1211 return err; 1212 } 1213 1214 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 1215 { 1216 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 1217 if (!c) 1218 return NULL; 1219 1220 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 1221 if (!c->data) { 1222 kfree(c); 1223 return NULL; 1224 } 1225 c->len = nla_len(tb[TCA_ACT_COOKIE]); 1226 1227 return c; 1228 } 1229 1230 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr) 1231 { 1232 struct nla_bitfield32 hw_stats_bf; 1233 1234 /* If the user did not pass the attr, that means he does 1235 * not care about the type. Return "any" in that case 1236 * which is setting on all supported types. 1237 */ 1238 if (!hw_stats_attr) 1239 return TCA_ACT_HW_STATS_ANY; 1240 hw_stats_bf = nla_get_bitfield32(hw_stats_attr); 1241 return hw_stats_bf.value; 1242 } 1243 1244 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { 1245 [TCA_ACT_KIND] = { .type = NLA_STRING }, 1246 [TCA_ACT_INDEX] = { .type = NLA_U32 }, 1247 [TCA_ACT_COOKIE] = { .type = NLA_BINARY, 1248 .len = TC_COOKIE_MAX_SIZE }, 1249 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, 1250 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS | 1251 TCA_ACT_FLAGS_SKIP_HW | 1252 TCA_ACT_FLAGS_SKIP_SW), 1253 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), 1254 }; 1255 1256 void tcf_idr_insert_many(struct tc_action *actions[]) 1257 { 1258 int i; 1259 1260 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1261 struct tc_action *a = actions[i]; 1262 struct tcf_idrinfo *idrinfo; 1263 1264 if (!a) 1265 continue; 1266 idrinfo = a->idrinfo; 1267 mutex_lock(&idrinfo->lock); 1268 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if 1269 * it is just created, otherwise this is just a nop. 1270 */ 1271 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 1272 mutex_unlock(&idrinfo->lock); 1273 } 1274 } 1275 1276 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police, 1277 bool rtnl_held, 1278 struct netlink_ext_ack *extack) 1279 { 1280 struct nlattr *tb[TCA_ACT_MAX + 1]; 1281 struct tc_action_ops *a_o; 1282 char act_name[IFNAMSIZ]; 1283 struct nlattr *kind; 1284 int err; 1285 1286 if (!police) { 1287 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1288 tcf_action_policy, extack); 1289 if (err < 0) 1290 return ERR_PTR(err); 1291 err = -EINVAL; 1292 kind = tb[TCA_ACT_KIND]; 1293 if (!kind) { 1294 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 1295 return ERR_PTR(err); 1296 } 1297 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) { 1298 NL_SET_ERR_MSG(extack, "TC action name too long"); 1299 return ERR_PTR(err); 1300 } 1301 } else { 1302 if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) { 1303 NL_SET_ERR_MSG(extack, "TC action name too long"); 1304 return ERR_PTR(-EINVAL); 1305 } 1306 } 1307 1308 a_o = tc_lookup_action_n(act_name); 1309 if (a_o == NULL) { 1310 #ifdef CONFIG_MODULES 1311 if (rtnl_held) 1312 rtnl_unlock(); 1313 request_module("act_%s", act_name); 1314 if (rtnl_held) 1315 rtnl_lock(); 1316 1317 a_o = tc_lookup_action_n(act_name); 1318 1319 /* We dropped the RTNL semaphore in order to 1320 * perform the module load. So, even if we 1321 * succeeded in loading the module we have to 1322 * tell the caller to replay the request. We 1323 * indicate this using -EAGAIN. 1324 */ 1325 if (a_o != NULL) { 1326 module_put(a_o->owner); 1327 return ERR_PTR(-EAGAIN); 1328 } 1329 #endif 1330 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 1331 return ERR_PTR(-ENOENT); 1332 } 1333 1334 return a_o; 1335 } 1336 1337 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 1338 struct nlattr *nla, struct nlattr *est, 1339 struct tc_action_ops *a_o, int *init_res, 1340 u32 flags, struct netlink_ext_ack *extack) 1341 { 1342 bool police = flags & TCA_ACT_FLAGS_POLICE; 1343 struct nla_bitfield32 userflags = { 0, 0 }; 1344 u8 hw_stats = TCA_ACT_HW_STATS_ANY; 1345 struct nlattr *tb[TCA_ACT_MAX + 1]; 1346 struct tc_cookie *cookie = NULL; 1347 struct tc_action *a; 1348 int err; 1349 1350 /* backward compatibility for policer */ 1351 if (!police) { 1352 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1353 tcf_action_policy, extack); 1354 if (err < 0) 1355 return ERR_PTR(err); 1356 if (tb[TCA_ACT_COOKIE]) { 1357 cookie = nla_memdup_cookie(tb); 1358 if (!cookie) { 1359 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 1360 err = -ENOMEM; 1361 goto err_out; 1362 } 1363 } 1364 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); 1365 if (tb[TCA_ACT_FLAGS]) { 1366 userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); 1367 if (!tc_act_flags_valid(userflags.value)) { 1368 err = -EINVAL; 1369 goto err_out; 1370 } 1371 } 1372 1373 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp, 1374 userflags.value | flags, extack); 1375 } else { 1376 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags, 1377 extack); 1378 } 1379 if (err < 0) 1380 goto err_out; 1381 *init_res = err; 1382 1383 if (!police && tb[TCA_ACT_COOKIE]) 1384 tcf_set_action_cookie(&a->act_cookie, cookie); 1385 1386 if (!police) 1387 a->hw_stats = hw_stats; 1388 1389 return a; 1390 1391 err_out: 1392 if (cookie) { 1393 kfree(cookie->data); 1394 kfree(cookie); 1395 } 1396 return ERR_PTR(err); 1397 } 1398 1399 static bool tc_act_bind(u32 flags) 1400 { 1401 return !!(flags & TCA_ACT_FLAGS_BIND); 1402 } 1403 1404 /* Returns numbers of initialized actions or negative error. */ 1405 1406 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 1407 struct nlattr *est, struct tc_action *actions[], 1408 int init_res[], size_t *attr_size, 1409 u32 flags, u32 fl_flags, 1410 struct netlink_ext_ack *extack) 1411 { 1412 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; 1413 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1414 struct tc_action *act; 1415 size_t sz = 0; 1416 int err; 1417 int i; 1418 1419 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1420 extack); 1421 if (err < 0) 1422 return err; 1423 1424 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1425 struct tc_action_ops *a_o; 1426 1427 a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE, 1428 !(flags & TCA_ACT_FLAGS_NO_RTNL), 1429 extack); 1430 if (IS_ERR(a_o)) { 1431 err = PTR_ERR(a_o); 1432 goto err_mod; 1433 } 1434 ops[i - 1] = a_o; 1435 } 1436 1437 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1438 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1], 1439 &init_res[i - 1], flags, extack); 1440 if (IS_ERR(act)) { 1441 err = PTR_ERR(act); 1442 goto err; 1443 } 1444 sz += tcf_action_fill_size(act); 1445 /* Start from index 0 */ 1446 actions[i - 1] = act; 1447 if (tc_act_bind(flags)) { 1448 bool skip_sw = tc_skip_sw(fl_flags); 1449 bool skip_hw = tc_skip_hw(fl_flags); 1450 1451 if (tc_act_bind(act->tcfa_flags)) 1452 continue; 1453 if (skip_sw != tc_act_skip_sw(act->tcfa_flags) || 1454 skip_hw != tc_act_skip_hw(act->tcfa_flags)) { 1455 NL_SET_ERR_MSG(extack, 1456 "Mismatch between action and filter offload flags"); 1457 err = -EINVAL; 1458 goto err; 1459 } 1460 } else { 1461 err = tcf_action_offload_add(act, extack); 1462 if (tc_act_skip_sw(act->tcfa_flags) && err) 1463 goto err; 1464 } 1465 } 1466 1467 /* We have to commit them all together, because if any error happened in 1468 * between, we could not handle the failure gracefully. 1469 */ 1470 tcf_idr_insert_many(actions); 1471 1472 *attr_size = tcf_action_full_attrs_size(sz); 1473 err = i - 1; 1474 goto err_mod; 1475 1476 err: 1477 tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND); 1478 err_mod: 1479 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1480 if (ops[i]) 1481 module_put(ops[i]->owner); 1482 } 1483 return err; 1484 } 1485 1486 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, 1487 u64 drops, bool hw) 1488 { 1489 if (a->cpu_bstats) { 1490 _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 1491 1492 this_cpu_ptr(a->cpu_qstats)->drops += drops; 1493 1494 if (hw) 1495 _bstats_update(this_cpu_ptr(a->cpu_bstats_hw), 1496 bytes, packets); 1497 return; 1498 } 1499 1500 _bstats_update(&a->tcfa_bstats, bytes, packets); 1501 a->tcfa_qstats.drops += drops; 1502 if (hw) 1503 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1504 } 1505 EXPORT_SYMBOL(tcf_action_update_stats); 1506 1507 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1508 int compat_mode) 1509 { 1510 int err = 0; 1511 struct gnet_dump d; 1512 1513 if (p == NULL) 1514 goto errout; 1515 1516 /* update hw stats for this action */ 1517 tcf_action_update_hw_stats(p); 1518 1519 /* compat_mode being true specifies a call that is supposed 1520 * to add additional backward compatibility statistic TLVs. 1521 */ 1522 if (compat_mode) { 1523 if (p->type == TCA_OLD_COMPAT) 1524 err = gnet_stats_start_copy_compat(skb, 0, 1525 TCA_STATS, 1526 TCA_XSTATS, 1527 &p->tcfa_lock, &d, 1528 TCA_PAD); 1529 else 1530 return 0; 1531 } else 1532 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 1533 &p->tcfa_lock, &d, TCA_ACT_PAD); 1534 1535 if (err < 0) 1536 goto errout; 1537 1538 if (gnet_stats_copy_basic(&d, p->cpu_bstats, 1539 &p->tcfa_bstats, false) < 0 || 1540 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw, 1541 &p->tcfa_bstats_hw, false) < 0 || 1542 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1543 gnet_stats_copy_queue(&d, p->cpu_qstats, 1544 &p->tcfa_qstats, 1545 p->tcfa_qstats.qlen) < 0) 1546 goto errout; 1547 1548 if (gnet_stats_finish_copy(&d) < 0) 1549 goto errout; 1550 1551 return 0; 1552 1553 errout: 1554 return -1; 1555 } 1556 1557 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], 1558 u32 portid, u32 seq, u16 flags, int event, int bind, 1559 int ref) 1560 { 1561 struct tcamsg *t; 1562 struct nlmsghdr *nlh; 1563 unsigned char *b = skb_tail_pointer(skb); 1564 struct nlattr *nest; 1565 1566 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 1567 if (!nlh) 1568 goto out_nlmsg_trim; 1569 t = nlmsg_data(nlh); 1570 t->tca_family = AF_UNSPEC; 1571 t->tca__pad1 = 0; 1572 t->tca__pad2 = 0; 1573 1574 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1575 if (!nest) 1576 goto out_nlmsg_trim; 1577 1578 if (tcf_action_dump(skb, actions, bind, ref, false) < 0) 1579 goto out_nlmsg_trim; 1580 1581 nla_nest_end(skb, nest); 1582 1583 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1584 return skb->len; 1585 1586 out_nlmsg_trim: 1587 nlmsg_trim(skb, b); 1588 return -1; 1589 } 1590 1591 static int 1592 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 1593 struct tc_action *actions[], int event, 1594 struct netlink_ext_ack *extack) 1595 { 1596 struct sk_buff *skb; 1597 1598 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1599 if (!skb) 1600 return -ENOBUFS; 1601 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 1602 0, 1) <= 0) { 1603 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1604 kfree_skb(skb); 1605 return -EINVAL; 1606 } 1607 1608 return rtnl_unicast(skb, net, portid); 1609 } 1610 1611 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 1612 struct nlmsghdr *n, u32 portid, 1613 struct netlink_ext_ack *extack) 1614 { 1615 struct nlattr *tb[TCA_ACT_MAX + 1]; 1616 const struct tc_action_ops *ops; 1617 struct tc_action *a; 1618 int index; 1619 int err; 1620 1621 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1622 tcf_action_policy, extack); 1623 if (err < 0) 1624 goto err_out; 1625 1626 err = -EINVAL; 1627 if (tb[TCA_ACT_INDEX] == NULL || 1628 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1629 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1630 goto err_out; 1631 } 1632 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1633 1634 err = -EINVAL; 1635 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1636 if (!ops) { /* could happen in batch of actions */ 1637 NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); 1638 goto err_out; 1639 } 1640 err = -ENOENT; 1641 if (ops->lookup(net, &a, index) == 0) { 1642 NL_SET_ERR_MSG(extack, "TC action with specified index not found"); 1643 goto err_mod; 1644 } 1645 1646 module_put(ops->owner); 1647 return a; 1648 1649 err_mod: 1650 module_put(ops->owner); 1651 err_out: 1652 return ERR_PTR(err); 1653 } 1654 1655 static int tca_action_flush(struct net *net, struct nlattr *nla, 1656 struct nlmsghdr *n, u32 portid, 1657 struct netlink_ext_ack *extack) 1658 { 1659 struct sk_buff *skb; 1660 unsigned char *b; 1661 struct nlmsghdr *nlh; 1662 struct tcamsg *t; 1663 struct netlink_callback dcb; 1664 struct nlattr *nest; 1665 struct nlattr *tb[TCA_ACT_MAX + 1]; 1666 const struct tc_action_ops *ops; 1667 struct nlattr *kind; 1668 int err = -ENOMEM; 1669 1670 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1671 if (!skb) 1672 return err; 1673 1674 b = skb_tail_pointer(skb); 1675 1676 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1677 tcf_action_policy, extack); 1678 if (err < 0) 1679 goto err_out; 1680 1681 err = -EINVAL; 1682 kind = tb[TCA_ACT_KIND]; 1683 ops = tc_lookup_action(kind); 1684 if (!ops) { /*some idjot trying to flush unknown action */ 1685 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1686 goto err_out; 1687 } 1688 1689 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1690 sizeof(*t), 0); 1691 if (!nlh) { 1692 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1693 goto out_module_put; 1694 } 1695 t = nlmsg_data(nlh); 1696 t->tca_family = AF_UNSPEC; 1697 t->tca__pad1 = 0; 1698 t->tca__pad2 = 0; 1699 1700 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1701 if (!nest) { 1702 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1703 goto out_module_put; 1704 } 1705 1706 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack); 1707 if (err <= 0) { 1708 nla_nest_cancel(skb, nest); 1709 goto out_module_put; 1710 } 1711 1712 nla_nest_end(skb, nest); 1713 1714 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1715 nlh->nlmsg_flags |= NLM_F_ROOT; 1716 module_put(ops->owner); 1717 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1718 n->nlmsg_flags & NLM_F_ECHO); 1719 if (err < 0) 1720 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1721 1722 return err; 1723 1724 out_module_put: 1725 module_put(ops->owner); 1726 err_out: 1727 kfree_skb(skb); 1728 return err; 1729 } 1730 1731 static int tcf_action_delete(struct net *net, struct tc_action *actions[]) 1732 { 1733 int i; 1734 1735 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1736 struct tc_action *a = actions[i]; 1737 const struct tc_action_ops *ops = a->ops; 1738 /* Actions can be deleted concurrently so we must save their 1739 * type and id to search again after reference is released. 1740 */ 1741 struct tcf_idrinfo *idrinfo = a->idrinfo; 1742 u32 act_index = a->tcfa_index; 1743 1744 actions[i] = NULL; 1745 if (tcf_action_put(a)) { 1746 /* last reference, action was deleted concurrently */ 1747 module_put(ops->owner); 1748 } else { 1749 int ret; 1750 1751 /* now do the delete */ 1752 ret = tcf_idr_delete_index(idrinfo, act_index); 1753 if (ret < 0) 1754 return ret; 1755 } 1756 } 1757 return 0; 1758 } 1759 1760 static int 1761 tcf_reoffload_del_notify(struct net *net, struct tc_action *action) 1762 { 1763 size_t attr_size = tcf_action_fill_size(action); 1764 struct tc_action *actions[TCA_ACT_MAX_PRIO] = { 1765 [0] = action, 1766 }; 1767 const struct tc_action_ops *ops = action->ops; 1768 struct sk_buff *skb; 1769 int ret; 1770 1771 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1772 GFP_KERNEL); 1773 if (!skb) 1774 return -ENOBUFS; 1775 1776 if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) { 1777 kfree_skb(skb); 1778 return -EINVAL; 1779 } 1780 1781 ret = tcf_idr_release_unsafe(action); 1782 if (ret == ACT_P_DELETED) { 1783 module_put(ops->owner); 1784 ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0); 1785 } else { 1786 kfree_skb(skb); 1787 } 1788 1789 return ret; 1790 } 1791 1792 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, 1793 void *cb_priv, bool add) 1794 { 1795 struct tc_act_pernet_id *id_ptr; 1796 struct tcf_idrinfo *idrinfo; 1797 struct tc_action_net *tn; 1798 struct tc_action *p; 1799 unsigned int act_id; 1800 unsigned long tmp; 1801 unsigned long id; 1802 struct idr *idr; 1803 struct net *net; 1804 int ret; 1805 1806 if (!cb) 1807 return -EINVAL; 1808 1809 down_read(&net_rwsem); 1810 mutex_lock(&act_id_mutex); 1811 1812 for_each_net(net) { 1813 list_for_each_entry(id_ptr, &act_pernet_id_list, list) { 1814 act_id = id_ptr->id; 1815 tn = net_generic(net, act_id); 1816 if (!tn) 1817 continue; 1818 idrinfo = tn->idrinfo; 1819 if (!idrinfo) 1820 continue; 1821 1822 mutex_lock(&idrinfo->lock); 1823 idr = &idrinfo->action_idr; 1824 idr_for_each_entry_ul(idr, p, tmp, id) { 1825 if (IS_ERR(p) || tc_act_bind(p->tcfa_flags)) 1826 continue; 1827 if (add) { 1828 tcf_action_offload_add_ex(p, NULL, cb, 1829 cb_priv); 1830 continue; 1831 } 1832 1833 /* cb unregister to update hw count */ 1834 ret = tcf_action_offload_del_ex(p, cb, cb_priv); 1835 if (ret < 0) 1836 continue; 1837 if (tc_act_skip_sw(p->tcfa_flags) && 1838 !tc_act_in_hw(p)) 1839 tcf_reoffload_del_notify(net, p); 1840 } 1841 mutex_unlock(&idrinfo->lock); 1842 } 1843 } 1844 mutex_unlock(&act_id_mutex); 1845 up_read(&net_rwsem); 1846 1847 return 0; 1848 } 1849 1850 static int 1851 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1852 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1853 { 1854 int ret; 1855 struct sk_buff *skb; 1856 1857 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1858 GFP_KERNEL); 1859 if (!skb) 1860 return -ENOBUFS; 1861 1862 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1863 0, 2) <= 0) { 1864 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1865 kfree_skb(skb); 1866 return -EINVAL; 1867 } 1868 1869 /* now do the delete */ 1870 ret = tcf_action_delete(net, actions); 1871 if (ret < 0) { 1872 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1873 kfree_skb(skb); 1874 return ret; 1875 } 1876 1877 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1878 n->nlmsg_flags & NLM_F_ECHO); 1879 return ret; 1880 } 1881 1882 static int 1883 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1884 u32 portid, int event, struct netlink_ext_ack *extack) 1885 { 1886 int i, ret; 1887 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1888 struct tc_action *act; 1889 size_t attr_size = 0; 1890 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1891 1892 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1893 extack); 1894 if (ret < 0) 1895 return ret; 1896 1897 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1898 if (tb[1]) 1899 return tca_action_flush(net, tb[1], n, portid, extack); 1900 1901 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1902 return -EINVAL; 1903 } 1904 1905 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1906 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1907 if (IS_ERR(act)) { 1908 ret = PTR_ERR(act); 1909 goto err; 1910 } 1911 attr_size += tcf_action_fill_size(act); 1912 actions[i - 1] = act; 1913 } 1914 1915 attr_size = tcf_action_full_attrs_size(attr_size); 1916 1917 if (event == RTM_GETACTION) 1918 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1919 else { /* delete */ 1920 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); 1921 if (ret) 1922 goto err; 1923 return 0; 1924 } 1925 err: 1926 tcf_action_put_many(actions); 1927 return ret; 1928 } 1929 1930 static int 1931 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1932 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1933 { 1934 struct sk_buff *skb; 1935 1936 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1937 GFP_KERNEL); 1938 if (!skb) 1939 return -ENOBUFS; 1940 1941 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1942 RTM_NEWACTION, 0, 0) <= 0) { 1943 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1944 kfree_skb(skb); 1945 return -EINVAL; 1946 } 1947 1948 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1949 n->nlmsg_flags & NLM_F_ECHO); 1950 } 1951 1952 static int tcf_action_add(struct net *net, struct nlattr *nla, 1953 struct nlmsghdr *n, u32 portid, u32 flags, 1954 struct netlink_ext_ack *extack) 1955 { 1956 size_t attr_size = 0; 1957 int loop, ret, i; 1958 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1959 int init_res[TCA_ACT_MAX_PRIO] = {}; 1960 1961 for (loop = 0; loop < 10; loop++) { 1962 ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res, 1963 &attr_size, flags, 0, extack); 1964 if (ret != -EAGAIN) 1965 break; 1966 } 1967 1968 if (ret < 0) 1969 return ret; 1970 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); 1971 1972 /* only put existing actions */ 1973 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) 1974 if (init_res[i] == ACT_P_CREATED) 1975 actions[i] = NULL; 1976 tcf_action_put_many(actions); 1977 1978 return ret; 1979 } 1980 1981 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1982 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON | 1983 TCA_ACT_FLAG_TERSE_DUMP), 1984 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1985 }; 1986 1987 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1988 struct netlink_ext_ack *extack) 1989 { 1990 struct net *net = sock_net(skb->sk); 1991 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1992 u32 portid = NETLINK_CB(skb).portid; 1993 u32 flags = 0; 1994 int ret = 0; 1995 1996 if ((n->nlmsg_type != RTM_GETACTION) && 1997 !netlink_capable(skb, CAP_NET_ADMIN)) 1998 return -EPERM; 1999 2000 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, 2001 TCA_ROOT_MAX, NULL, extack); 2002 if (ret < 0) 2003 return ret; 2004 2005 if (tca[TCA_ACT_TAB] == NULL) { 2006 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 2007 return -EINVAL; 2008 } 2009 2010 /* n->nlmsg_flags & NLM_F_CREATE */ 2011 switch (n->nlmsg_type) { 2012 case RTM_NEWACTION: 2013 /* we are going to assume all other flags 2014 * imply create only if it doesn't exist 2015 * Note that CREATE | EXCL implies that 2016 * but since we want avoid ambiguity (eg when flags 2017 * is zero) then just set this 2018 */ 2019 if (n->nlmsg_flags & NLM_F_REPLACE) 2020 flags = TCA_ACT_FLAGS_REPLACE; 2021 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags, 2022 extack); 2023 break; 2024 case RTM_DELACTION: 2025 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 2026 portid, RTM_DELACTION, extack); 2027 break; 2028 case RTM_GETACTION: 2029 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 2030 portid, RTM_GETACTION, extack); 2031 break; 2032 default: 2033 BUG(); 2034 } 2035 2036 return ret; 2037 } 2038 2039 static struct nlattr *find_dump_kind(struct nlattr **nla) 2040 { 2041 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 2042 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 2043 struct nlattr *kind; 2044 2045 tb1 = nla[TCA_ACT_TAB]; 2046 if (tb1 == NULL) 2047 return NULL; 2048 2049 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 2050 return NULL; 2051 2052 if (tb[1] == NULL) 2053 return NULL; 2054 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) 2055 return NULL; 2056 kind = tb2[TCA_ACT_KIND]; 2057 2058 return kind; 2059 } 2060 2061 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 2062 { 2063 struct net *net = sock_net(skb->sk); 2064 struct nlmsghdr *nlh; 2065 unsigned char *b = skb_tail_pointer(skb); 2066 struct nlattr *nest; 2067 struct tc_action_ops *a_o; 2068 int ret = 0; 2069 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 2070 struct nlattr *tb[TCA_ROOT_MAX + 1]; 2071 struct nlattr *count_attr = NULL; 2072 unsigned long jiffy_since = 0; 2073 struct nlattr *kind = NULL; 2074 struct nla_bitfield32 bf; 2075 u32 msecs_since = 0; 2076 u32 act_count = 0; 2077 2078 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, 2079 TCA_ROOT_MAX, tcaa_policy, cb->extack); 2080 if (ret < 0) 2081 return ret; 2082 2083 kind = find_dump_kind(tb); 2084 if (kind == NULL) { 2085 pr_info("tc_dump_action: action bad kind\n"); 2086 return 0; 2087 } 2088 2089 a_o = tc_lookup_action(kind); 2090 if (a_o == NULL) 2091 return 0; 2092 2093 cb->args[2] = 0; 2094 if (tb[TCA_ROOT_FLAGS]) { 2095 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 2096 cb->args[2] = bf.value; 2097 } 2098 2099 if (tb[TCA_ROOT_TIME_DELTA]) { 2100 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 2101 } 2102 2103 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2104 cb->nlh->nlmsg_type, sizeof(*t), 0); 2105 if (!nlh) 2106 goto out_module_put; 2107 2108 if (msecs_since) 2109 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 2110 2111 t = nlmsg_data(nlh); 2112 t->tca_family = AF_UNSPEC; 2113 t->tca__pad1 = 0; 2114 t->tca__pad2 = 0; 2115 cb->args[3] = jiffy_since; 2116 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 2117 if (!count_attr) 2118 goto out_module_put; 2119 2120 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 2121 if (nest == NULL) 2122 goto out_module_put; 2123 2124 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL); 2125 if (ret < 0) 2126 goto out_module_put; 2127 2128 if (ret > 0) { 2129 nla_nest_end(skb, nest); 2130 ret = skb->len; 2131 act_count = cb->args[1]; 2132 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 2133 cb->args[1] = 0; 2134 } else 2135 nlmsg_trim(skb, b); 2136 2137 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2138 if (NETLINK_CB(cb->skb).portid && ret) 2139 nlh->nlmsg_flags |= NLM_F_MULTI; 2140 module_put(a_o->owner); 2141 return skb->len; 2142 2143 out_module_put: 2144 module_put(a_o->owner); 2145 nlmsg_trim(skb, b); 2146 return skb->len; 2147 } 2148 2149 static int __init tc_action_init(void) 2150 { 2151 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 2152 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 2153 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 2154 0); 2155 2156 return 0; 2157 } 2158 2159 subsys_initcall(tc_action_init); 2160