1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * net/sched/act_api.c Packet action API. 4 * 5 * Author: Jamal Hadi Salim 6 */ 7 8 #include <linux/types.h> 9 #include <linux/kernel.h> 10 #include <linux/string.h> 11 #include <linux/errno.h> 12 #include <linux/slab.h> 13 #include <linux/skbuff.h> 14 #include <linux/init.h> 15 #include <linux/kmod.h> 16 #include <linux/err.h> 17 #include <linux/module.h> 18 #include <net/net_namespace.h> 19 #include <net/sock.h> 20 #include <net/sch_generic.h> 21 #include <net/pkt_cls.h> 22 #include <net/tc_act/tc_pedit.h> 23 #include <net/act_api.h> 24 #include <net/netlink.h> 25 #include <net/flow_offload.h> 26 #include <net/tc_wrapper.h> 27 28 #ifdef CONFIG_INET 29 DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count); 30 EXPORT_SYMBOL_GPL(tcf_frag_xmit_count); 31 #endif 32 33 int tcf_dev_queue_xmit(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) 34 { 35 #ifdef CONFIG_INET 36 if (static_branch_unlikely(&tcf_frag_xmit_count)) 37 return sch_frag_xmit_hook(skb, xmit); 38 #endif 39 40 return xmit(skb); 41 } 42 EXPORT_SYMBOL_GPL(tcf_dev_queue_xmit); 43 44 static void tcf_action_goto_chain_exec(const struct tc_action *a, 45 struct tcf_result *res) 46 { 47 const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain); 48 49 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 50 } 51 52 static void tcf_free_cookie_rcu(struct rcu_head *p) 53 { 54 struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu); 55 56 kfree(cookie->data); 57 kfree(cookie); 58 } 59 60 static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, 61 struct tc_cookie *new_cookie) 62 { 63 struct tc_cookie *old; 64 65 old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); 66 if (old) 67 call_rcu(&old->rcu, tcf_free_cookie_rcu); 68 } 69 70 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp, 71 struct tcf_chain **newchain, 72 struct netlink_ext_ack *extack) 73 { 74 int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL; 75 u32 chain_index; 76 77 if (!opcode) 78 ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0; 79 else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC) 80 ret = 0; 81 if (ret) { 82 NL_SET_ERR_MSG(extack, "invalid control action"); 83 goto end; 84 } 85 86 if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) { 87 chain_index = action & TC_ACT_EXT_VAL_MASK; 88 if (!tp || !newchain) { 89 ret = -EINVAL; 90 NL_SET_ERR_MSG(extack, 91 "can't goto NULL proto/chain"); 92 goto end; 93 } 94 *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index); 95 if (!*newchain) { 96 ret = -ENOMEM; 97 NL_SET_ERR_MSG(extack, 98 "can't allocate goto_chain"); 99 } 100 } 101 end: 102 return ret; 103 } 104 EXPORT_SYMBOL(tcf_action_check_ctrlact); 105 106 struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action, 107 struct tcf_chain *goto_chain) 108 { 109 a->tcfa_action = action; 110 goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1); 111 return goto_chain; 112 } 113 EXPORT_SYMBOL(tcf_action_set_ctrlact); 114 115 /* XXX: For standalone actions, we don't need a RCU grace period either, because 116 * actions are always connected to filters and filters are already destroyed in 117 * RCU callbacks, so after a RCU grace period actions are already disconnected 118 * from filters. Readers later can not find us. 119 */ 120 static void free_tcf(struct tc_action *p) 121 { 122 struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1); 123 124 free_percpu(p->cpu_bstats); 125 free_percpu(p->cpu_bstats_hw); 126 free_percpu(p->cpu_qstats); 127 128 tcf_set_action_cookie(&p->act_cookie, NULL); 129 if (chain) 130 tcf_chain_put_by_act(chain); 131 132 kfree(p); 133 } 134 135 static void offload_action_hw_count_set(struct tc_action *act, 136 u32 hw_count) 137 { 138 act->in_hw_count = hw_count; 139 } 140 141 static void offload_action_hw_count_inc(struct tc_action *act, 142 u32 hw_count) 143 { 144 act->in_hw_count += hw_count; 145 } 146 147 static void offload_action_hw_count_dec(struct tc_action *act, 148 u32 hw_count) 149 { 150 act->in_hw_count = act->in_hw_count > hw_count ? 151 act->in_hw_count - hw_count : 0; 152 } 153 154 static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act) 155 { 156 if (is_tcf_pedit(act)) 157 return tcf_pedit_nkeys(act); 158 else 159 return 1; 160 } 161 162 static bool tc_act_skip_hw(u32 flags) 163 { 164 return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false; 165 } 166 167 static bool tc_act_skip_sw(u32 flags) 168 { 169 return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false; 170 } 171 172 static bool tc_act_in_hw(struct tc_action *act) 173 { 174 return !!act->in_hw_count; 175 } 176 177 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */ 178 static bool tc_act_flags_valid(u32 flags) 179 { 180 flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW; 181 182 return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW); 183 } 184 185 static int offload_action_init(struct flow_offload_action *fl_action, 186 struct tc_action *act, 187 enum offload_act_command cmd, 188 struct netlink_ext_ack *extack) 189 { 190 int err; 191 192 fl_action->extack = extack; 193 fl_action->command = cmd; 194 fl_action->index = act->tcfa_index; 195 196 if (act->ops->offload_act_setup) { 197 spin_lock_bh(&act->tcfa_lock); 198 err = act->ops->offload_act_setup(act, fl_action, NULL, 199 false, extack); 200 spin_unlock_bh(&act->tcfa_lock); 201 return err; 202 } 203 204 return -EOPNOTSUPP; 205 } 206 207 static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act, 208 u32 *hw_count) 209 { 210 int err; 211 212 err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT, 213 fl_act, NULL, NULL); 214 if (err < 0) 215 return err; 216 217 if (hw_count) 218 *hw_count = err; 219 220 return 0; 221 } 222 223 static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act, 224 u32 *hw_count, 225 flow_indr_block_bind_cb_t *cb, 226 void *cb_priv) 227 { 228 int err; 229 230 err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL); 231 if (err < 0) 232 return err; 233 234 if (hw_count) 235 *hw_count = 1; 236 237 return 0; 238 } 239 240 static int tcf_action_offload_cmd(struct flow_offload_action *fl_act, 241 u32 *hw_count, 242 flow_indr_block_bind_cb_t *cb, 243 void *cb_priv) 244 { 245 return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count, 246 cb, cb_priv) : 247 tcf_action_offload_cmd_ex(fl_act, hw_count); 248 } 249 250 static int tcf_action_offload_add_ex(struct tc_action *action, 251 struct netlink_ext_ack *extack, 252 flow_indr_block_bind_cb_t *cb, 253 void *cb_priv) 254 { 255 bool skip_sw = tc_act_skip_sw(action->tcfa_flags); 256 struct tc_action *actions[TCA_ACT_MAX_PRIO] = { 257 [0] = action, 258 }; 259 struct flow_offload_action *fl_action; 260 u32 in_hw_count = 0; 261 int num, err = 0; 262 263 if (tc_act_skip_hw(action->tcfa_flags)) 264 return 0; 265 266 num = tcf_offload_act_num_actions_single(action); 267 fl_action = offload_action_alloc(num); 268 if (!fl_action) 269 return -ENOMEM; 270 271 err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack); 272 if (err) 273 goto fl_err; 274 275 err = tc_setup_action(&fl_action->action, actions, extack); 276 if (err) { 277 NL_SET_ERR_MSG_MOD(extack, 278 "Failed to setup tc actions for offload"); 279 goto fl_err; 280 } 281 282 err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv); 283 if (!err) 284 cb ? offload_action_hw_count_inc(action, in_hw_count) : 285 offload_action_hw_count_set(action, in_hw_count); 286 287 if (skip_sw && !tc_act_in_hw(action)) 288 err = -EINVAL; 289 290 tc_cleanup_offload_action(&fl_action->action); 291 292 fl_err: 293 kfree(fl_action); 294 295 return err; 296 } 297 298 /* offload the tc action after it is inserted */ 299 static int tcf_action_offload_add(struct tc_action *action, 300 struct netlink_ext_ack *extack) 301 { 302 return tcf_action_offload_add_ex(action, extack, NULL, NULL); 303 } 304 305 int tcf_action_update_hw_stats(struct tc_action *action) 306 { 307 struct flow_offload_action fl_act = {}; 308 int err; 309 310 if (!tc_act_in_hw(action)) 311 return -EOPNOTSUPP; 312 313 err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL); 314 if (err) 315 return err; 316 317 err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL); 318 if (!err) { 319 preempt_disable(); 320 tcf_action_stats_update(action, fl_act.stats.bytes, 321 fl_act.stats.pkts, 322 fl_act.stats.drops, 323 fl_act.stats.lastused, 324 true); 325 preempt_enable(); 326 action->used_hw_stats = fl_act.stats.used_hw_stats; 327 action->used_hw_stats_valid = true; 328 } else { 329 return -EOPNOTSUPP; 330 } 331 332 return 0; 333 } 334 EXPORT_SYMBOL(tcf_action_update_hw_stats); 335 336 static int tcf_action_offload_del_ex(struct tc_action *action, 337 flow_indr_block_bind_cb_t *cb, 338 void *cb_priv) 339 { 340 struct flow_offload_action fl_act = {}; 341 u32 in_hw_count = 0; 342 int err = 0; 343 344 if (!tc_act_in_hw(action)) 345 return 0; 346 347 err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL); 348 if (err) 349 return err; 350 351 err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv); 352 if (err < 0) 353 return err; 354 355 if (!cb && action->in_hw_count != in_hw_count) 356 return -EINVAL; 357 358 /* do not need to update hw state when deleting action */ 359 if (cb && in_hw_count) 360 offload_action_hw_count_dec(action, in_hw_count); 361 362 return 0; 363 } 364 365 static int tcf_action_offload_del(struct tc_action *action) 366 { 367 return tcf_action_offload_del_ex(action, NULL, NULL); 368 } 369 370 static void tcf_action_cleanup(struct tc_action *p) 371 { 372 tcf_action_offload_del(p); 373 if (p->ops->cleanup) 374 p->ops->cleanup(p); 375 376 gen_kill_estimator(&p->tcfa_rate_est); 377 free_tcf(p); 378 } 379 380 static int __tcf_action_put(struct tc_action *p, bool bind) 381 { 382 struct tcf_idrinfo *idrinfo = p->idrinfo; 383 384 if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) { 385 if (bind) 386 atomic_dec(&p->tcfa_bindcnt); 387 idr_remove(&idrinfo->action_idr, p->tcfa_index); 388 mutex_unlock(&idrinfo->lock); 389 390 tcf_action_cleanup(p); 391 return 1; 392 } 393 394 if (bind) 395 atomic_dec(&p->tcfa_bindcnt); 396 397 return 0; 398 } 399 400 static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 401 { 402 int ret = 0; 403 404 /* Release with strict==1 and bind==0 is only called through act API 405 * interface (classifiers always bind). Only case when action with 406 * positive reference count and zero bind count can exist is when it was 407 * also created with act API (unbinding last classifier will destroy the 408 * action if it was created by classifier). So only case when bind count 409 * can be changed after initial check is when unbound action is 410 * destroyed by act API while classifier binds to action with same id 411 * concurrently. This result either creation of new action(same behavior 412 * as before), or reusing existing action if concurrent process 413 * increments reference count before action is deleted. Both scenarios 414 * are acceptable. 415 */ 416 if (p) { 417 if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0) 418 return -EPERM; 419 420 if (__tcf_action_put(p, bind)) 421 ret = ACT_P_DELETED; 422 } 423 424 return ret; 425 } 426 427 int tcf_idr_release(struct tc_action *a, bool bind) 428 { 429 const struct tc_action_ops *ops = a->ops; 430 int ret; 431 432 ret = __tcf_idr_release(a, bind, false); 433 if (ret == ACT_P_DELETED) 434 module_put(ops->owner); 435 return ret; 436 } 437 EXPORT_SYMBOL(tcf_idr_release); 438 439 static size_t tcf_action_shared_attrs_size(const struct tc_action *act) 440 { 441 struct tc_cookie *act_cookie; 442 u32 cookie_len = 0; 443 444 rcu_read_lock(); 445 act_cookie = rcu_dereference(act->act_cookie); 446 447 if (act_cookie) 448 cookie_len = nla_total_size(act_cookie->len); 449 rcu_read_unlock(); 450 451 return nla_total_size(0) /* action number nested */ 452 + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */ 453 + cookie_len /* TCA_ACT_COOKIE */ 454 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */ 455 + nla_total_size(0) /* TCA_ACT_STATS nested */ 456 + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */ 457 /* TCA_STATS_BASIC */ 458 + nla_total_size_64bit(sizeof(struct gnet_stats_basic)) 459 /* TCA_STATS_PKT64 */ 460 + nla_total_size_64bit(sizeof(u64)) 461 /* TCA_STATS_QUEUE */ 462 + nla_total_size_64bit(sizeof(struct gnet_stats_queue)) 463 + nla_total_size(0) /* TCA_OPTIONS nested */ 464 + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */ 465 } 466 467 static size_t tcf_action_full_attrs_size(size_t sz) 468 { 469 return NLMSG_HDRLEN /* struct nlmsghdr */ 470 + sizeof(struct tcamsg) 471 + nla_total_size(0) /* TCA_ACT_TAB nested */ 472 + sz; 473 } 474 475 static size_t tcf_action_fill_size(const struct tc_action *act) 476 { 477 size_t sz = tcf_action_shared_attrs_size(act); 478 479 if (act->ops->get_fill_size) 480 return act->ops->get_fill_size(act) + sz; 481 return sz; 482 } 483 484 static int 485 tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a, bool from_act) 486 { 487 unsigned char *b = skb_tail_pointer(skb); 488 struct tc_cookie *cookie; 489 490 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 491 goto nla_put_failure; 492 if (tcf_action_copy_stats(skb, a, 0)) 493 goto nla_put_failure; 494 if (from_act && nla_put_u32(skb, TCA_ACT_INDEX, a->tcfa_index)) 495 goto nla_put_failure; 496 497 rcu_read_lock(); 498 cookie = rcu_dereference(a->act_cookie); 499 if (cookie) { 500 if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) { 501 rcu_read_unlock(); 502 goto nla_put_failure; 503 } 504 } 505 rcu_read_unlock(); 506 507 return 0; 508 509 nla_put_failure: 510 nlmsg_trim(skb, b); 511 return -1; 512 } 513 514 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 515 struct netlink_callback *cb) 516 { 517 int err = 0, index = -1, s_i = 0, n_i = 0; 518 u32 act_flags = cb->args[2]; 519 unsigned long jiffy_since = cb->args[3]; 520 struct nlattr *nest; 521 struct idr *idr = &idrinfo->action_idr; 522 struct tc_action *p; 523 unsigned long id = 1; 524 unsigned long tmp; 525 526 mutex_lock(&idrinfo->lock); 527 528 s_i = cb->args[0]; 529 530 idr_for_each_entry_ul(idr, p, tmp, id) { 531 index++; 532 if (index < s_i) 533 continue; 534 if (IS_ERR(p)) 535 continue; 536 537 if (jiffy_since && 538 time_after(jiffy_since, 539 (unsigned long)p->tcfa_tm.lastuse)) 540 continue; 541 542 nest = nla_nest_start_noflag(skb, n_i); 543 if (!nest) { 544 index--; 545 goto nla_put_failure; 546 } 547 err = (act_flags & TCA_ACT_FLAG_TERSE_DUMP) ? 548 tcf_action_dump_terse(skb, p, true) : 549 tcf_action_dump_1(skb, p, 0, 0); 550 if (err < 0) { 551 index--; 552 nlmsg_trim(skb, nest); 553 goto done; 554 } 555 nla_nest_end(skb, nest); 556 n_i++; 557 if (!(act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) && 558 n_i >= TCA_ACT_MAX_PRIO) 559 goto done; 560 } 561 done: 562 if (index >= 0) 563 cb->args[0] = index + 1; 564 565 mutex_unlock(&idrinfo->lock); 566 if (n_i) { 567 if (act_flags & TCA_ACT_FLAG_LARGE_DUMP_ON) 568 cb->args[1] = n_i; 569 } 570 return n_i; 571 572 nla_put_failure: 573 nla_nest_cancel(skb, nest); 574 goto done; 575 } 576 577 static int tcf_idr_release_unsafe(struct tc_action *p) 578 { 579 if (atomic_read(&p->tcfa_bindcnt) > 0) 580 return -EPERM; 581 582 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 583 idr_remove(&p->idrinfo->action_idr, p->tcfa_index); 584 tcf_action_cleanup(p); 585 return ACT_P_DELETED; 586 } 587 588 return 0; 589 } 590 591 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 592 const struct tc_action_ops *ops, 593 struct netlink_ext_ack *extack) 594 { 595 struct nlattr *nest; 596 int n_i = 0; 597 int ret = -EINVAL; 598 struct idr *idr = &idrinfo->action_idr; 599 struct tc_action *p; 600 unsigned long id = 1; 601 unsigned long tmp; 602 603 nest = nla_nest_start_noflag(skb, 0); 604 if (nest == NULL) 605 goto nla_put_failure; 606 if (nla_put_string(skb, TCA_KIND, ops->kind)) 607 goto nla_put_failure; 608 609 ret = 0; 610 mutex_lock(&idrinfo->lock); 611 idr_for_each_entry_ul(idr, p, tmp, id) { 612 if (IS_ERR(p)) 613 continue; 614 ret = tcf_idr_release_unsafe(p); 615 if (ret == ACT_P_DELETED) 616 module_put(ops->owner); 617 else if (ret < 0) 618 break; 619 n_i++; 620 } 621 mutex_unlock(&idrinfo->lock); 622 if (ret < 0) { 623 if (n_i) 624 NL_SET_ERR_MSG(extack, "Unable to flush all TC actions"); 625 else 626 goto nla_put_failure; 627 } 628 629 ret = nla_put_u32(skb, TCA_FCNT, n_i); 630 if (ret) 631 goto nla_put_failure; 632 nla_nest_end(skb, nest); 633 634 return n_i; 635 nla_put_failure: 636 nla_nest_cancel(skb, nest); 637 return ret; 638 } 639 640 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 641 struct netlink_callback *cb, int type, 642 const struct tc_action_ops *ops, 643 struct netlink_ext_ack *extack) 644 { 645 struct tcf_idrinfo *idrinfo = tn->idrinfo; 646 647 if (type == RTM_DELACTION) { 648 return tcf_del_walker(idrinfo, skb, ops, extack); 649 } else if (type == RTM_GETACTION) { 650 return tcf_dump_walker(idrinfo, skb, cb); 651 } else { 652 WARN(1, "tcf_generic_walker: unknown command %d\n", type); 653 NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command"); 654 return -EINVAL; 655 } 656 } 657 EXPORT_SYMBOL(tcf_generic_walker); 658 659 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 660 { 661 struct tcf_idrinfo *idrinfo = tn->idrinfo; 662 struct tc_action *p; 663 664 mutex_lock(&idrinfo->lock); 665 p = idr_find(&idrinfo->action_idr, index); 666 if (IS_ERR(p)) 667 p = NULL; 668 else if (p) 669 refcount_inc(&p->tcfa_refcnt); 670 mutex_unlock(&idrinfo->lock); 671 672 if (p) { 673 *a = p; 674 return true; 675 } 676 return false; 677 } 678 EXPORT_SYMBOL(tcf_idr_search); 679 680 static int __tcf_generic_walker(struct net *net, struct sk_buff *skb, 681 struct netlink_callback *cb, int type, 682 const struct tc_action_ops *ops, 683 struct netlink_ext_ack *extack) 684 { 685 struct tc_action_net *tn = net_generic(net, ops->net_id); 686 687 if (unlikely(ops->walk)) 688 return ops->walk(net, skb, cb, type, ops, extack); 689 690 return tcf_generic_walker(tn, skb, cb, type, ops, extack); 691 } 692 693 static int __tcf_idr_search(struct net *net, 694 const struct tc_action_ops *ops, 695 struct tc_action **a, u32 index) 696 { 697 struct tc_action_net *tn = net_generic(net, ops->net_id); 698 699 if (unlikely(ops->lookup)) 700 return ops->lookup(net, a, index); 701 702 return tcf_idr_search(tn, a, index); 703 } 704 705 static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index) 706 { 707 struct tc_action *p; 708 int ret = 0; 709 710 mutex_lock(&idrinfo->lock); 711 p = idr_find(&idrinfo->action_idr, index); 712 if (!p) { 713 mutex_unlock(&idrinfo->lock); 714 return -ENOENT; 715 } 716 717 if (!atomic_read(&p->tcfa_bindcnt)) { 718 if (refcount_dec_and_test(&p->tcfa_refcnt)) { 719 struct module *owner = p->ops->owner; 720 721 WARN_ON(p != idr_remove(&idrinfo->action_idr, 722 p->tcfa_index)); 723 mutex_unlock(&idrinfo->lock); 724 725 tcf_action_cleanup(p); 726 module_put(owner); 727 return 0; 728 } 729 ret = 0; 730 } else { 731 ret = -EPERM; 732 } 733 734 mutex_unlock(&idrinfo->lock); 735 return ret; 736 } 737 738 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 739 struct tc_action **a, const struct tc_action_ops *ops, 740 int bind, bool cpustats, u32 flags) 741 { 742 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 743 struct tcf_idrinfo *idrinfo = tn->idrinfo; 744 int err = -ENOMEM; 745 746 if (unlikely(!p)) 747 return -ENOMEM; 748 refcount_set(&p->tcfa_refcnt, 1); 749 if (bind) 750 atomic_set(&p->tcfa_bindcnt, 1); 751 752 if (cpustats) { 753 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); 754 if (!p->cpu_bstats) 755 goto err1; 756 p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync); 757 if (!p->cpu_bstats_hw) 758 goto err2; 759 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 760 if (!p->cpu_qstats) 761 goto err3; 762 } 763 gnet_stats_basic_sync_init(&p->tcfa_bstats); 764 gnet_stats_basic_sync_init(&p->tcfa_bstats_hw); 765 spin_lock_init(&p->tcfa_lock); 766 p->tcfa_index = index; 767 p->tcfa_tm.install = jiffies; 768 p->tcfa_tm.lastuse = jiffies; 769 p->tcfa_tm.firstuse = 0; 770 p->tcfa_flags = flags; 771 if (est) { 772 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 773 &p->tcfa_rate_est, 774 &p->tcfa_lock, false, est); 775 if (err) 776 goto err4; 777 } 778 779 p->idrinfo = idrinfo; 780 __module_get(ops->owner); 781 p->ops = ops; 782 *a = p; 783 return 0; 784 err4: 785 free_percpu(p->cpu_qstats); 786 err3: 787 free_percpu(p->cpu_bstats_hw); 788 err2: 789 free_percpu(p->cpu_bstats); 790 err1: 791 kfree(p); 792 return err; 793 } 794 EXPORT_SYMBOL(tcf_idr_create); 795 796 int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index, 797 struct nlattr *est, struct tc_action **a, 798 const struct tc_action_ops *ops, int bind, 799 u32 flags) 800 { 801 /* Set cpustats according to actions flags. */ 802 return tcf_idr_create(tn, index, est, a, ops, bind, 803 !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags); 804 } 805 EXPORT_SYMBOL(tcf_idr_create_from_flags); 806 807 /* Cleanup idr index that was allocated but not initialized. */ 808 809 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index) 810 { 811 struct tcf_idrinfo *idrinfo = tn->idrinfo; 812 813 mutex_lock(&idrinfo->lock); 814 /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */ 815 WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index))); 816 mutex_unlock(&idrinfo->lock); 817 } 818 EXPORT_SYMBOL(tcf_idr_cleanup); 819 820 /* Check if action with specified index exists. If actions is found, increments 821 * its reference and bind counters, and return 1. Otherwise insert temporary 822 * error pointer (to prevent concurrent users from inserting actions with same 823 * index) and return 0. 824 */ 825 826 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index, 827 struct tc_action **a, int bind) 828 { 829 struct tcf_idrinfo *idrinfo = tn->idrinfo; 830 struct tc_action *p; 831 int ret; 832 833 again: 834 mutex_lock(&idrinfo->lock); 835 if (*index) { 836 p = idr_find(&idrinfo->action_idr, *index); 837 if (IS_ERR(p)) { 838 /* This means that another process allocated 839 * index but did not assign the pointer yet. 840 */ 841 mutex_unlock(&idrinfo->lock); 842 goto again; 843 } 844 845 if (p) { 846 refcount_inc(&p->tcfa_refcnt); 847 if (bind) 848 atomic_inc(&p->tcfa_bindcnt); 849 *a = p; 850 ret = 1; 851 } else { 852 *a = NULL; 853 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 854 *index, GFP_KERNEL); 855 if (!ret) 856 idr_replace(&idrinfo->action_idr, 857 ERR_PTR(-EBUSY), *index); 858 } 859 } else { 860 *index = 1; 861 *a = NULL; 862 ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index, 863 UINT_MAX, GFP_KERNEL); 864 if (!ret) 865 idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY), 866 *index); 867 } 868 mutex_unlock(&idrinfo->lock); 869 return ret; 870 } 871 EXPORT_SYMBOL(tcf_idr_check_alloc); 872 873 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 874 struct tcf_idrinfo *idrinfo) 875 { 876 struct idr *idr = &idrinfo->action_idr; 877 struct tc_action *p; 878 int ret; 879 unsigned long id = 1; 880 unsigned long tmp; 881 882 idr_for_each_entry_ul(idr, p, tmp, id) { 883 ret = __tcf_idr_release(p, false, true); 884 if (ret == ACT_P_DELETED) 885 module_put(ops->owner); 886 else if (ret < 0) 887 return; 888 } 889 idr_destroy(&idrinfo->action_idr); 890 } 891 EXPORT_SYMBOL(tcf_idrinfo_destroy); 892 893 static LIST_HEAD(act_base); 894 static DEFINE_RWLOCK(act_mod_lock); 895 /* since act ops id is stored in pernet subsystem list, 896 * then there is no way to walk through only all the action 897 * subsystem, so we keep tc action pernet ops id for 898 * reoffload to walk through. 899 */ 900 static LIST_HEAD(act_pernet_id_list); 901 static DEFINE_MUTEX(act_id_mutex); 902 struct tc_act_pernet_id { 903 struct list_head list; 904 unsigned int id; 905 }; 906 907 static int tcf_pernet_add_id_list(unsigned int id) 908 { 909 struct tc_act_pernet_id *id_ptr; 910 int ret = 0; 911 912 mutex_lock(&act_id_mutex); 913 list_for_each_entry(id_ptr, &act_pernet_id_list, list) { 914 if (id_ptr->id == id) { 915 ret = -EEXIST; 916 goto err_out; 917 } 918 } 919 920 id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL); 921 if (!id_ptr) { 922 ret = -ENOMEM; 923 goto err_out; 924 } 925 id_ptr->id = id; 926 927 list_add_tail(&id_ptr->list, &act_pernet_id_list); 928 929 err_out: 930 mutex_unlock(&act_id_mutex); 931 return ret; 932 } 933 934 static void tcf_pernet_del_id_list(unsigned int id) 935 { 936 struct tc_act_pernet_id *id_ptr; 937 938 mutex_lock(&act_id_mutex); 939 list_for_each_entry(id_ptr, &act_pernet_id_list, list) { 940 if (id_ptr->id == id) { 941 list_del(&id_ptr->list); 942 kfree(id_ptr); 943 break; 944 } 945 } 946 mutex_unlock(&act_id_mutex); 947 } 948 949 int tcf_register_action(struct tc_action_ops *act, 950 struct pernet_operations *ops) 951 { 952 struct tc_action_ops *a; 953 int ret; 954 955 if (!act->act || !act->dump || !act->init) 956 return -EINVAL; 957 958 /* We have to register pernet ops before making the action ops visible, 959 * otherwise tcf_action_init_1() could get a partially initialized 960 * netns. 961 */ 962 ret = register_pernet_subsys(ops); 963 if (ret) 964 return ret; 965 966 if (ops->id) { 967 ret = tcf_pernet_add_id_list(*ops->id); 968 if (ret) 969 goto err_id; 970 } 971 972 write_lock(&act_mod_lock); 973 list_for_each_entry(a, &act_base, head) { 974 if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) { 975 ret = -EEXIST; 976 goto err_out; 977 } 978 } 979 list_add_tail(&act->head, &act_base); 980 write_unlock(&act_mod_lock); 981 982 return 0; 983 984 err_out: 985 write_unlock(&act_mod_lock); 986 if (ops->id) 987 tcf_pernet_del_id_list(*ops->id); 988 err_id: 989 unregister_pernet_subsys(ops); 990 return ret; 991 } 992 EXPORT_SYMBOL(tcf_register_action); 993 994 int tcf_unregister_action(struct tc_action_ops *act, 995 struct pernet_operations *ops) 996 { 997 struct tc_action_ops *a; 998 int err = -ENOENT; 999 1000 write_lock(&act_mod_lock); 1001 list_for_each_entry(a, &act_base, head) { 1002 if (a == act) { 1003 list_del(&act->head); 1004 err = 0; 1005 break; 1006 } 1007 } 1008 write_unlock(&act_mod_lock); 1009 if (!err) { 1010 unregister_pernet_subsys(ops); 1011 if (ops->id) 1012 tcf_pernet_del_id_list(*ops->id); 1013 } 1014 return err; 1015 } 1016 EXPORT_SYMBOL(tcf_unregister_action); 1017 1018 /* lookup by name */ 1019 static struct tc_action_ops *tc_lookup_action_n(char *kind) 1020 { 1021 struct tc_action_ops *a, *res = NULL; 1022 1023 if (kind) { 1024 read_lock(&act_mod_lock); 1025 list_for_each_entry(a, &act_base, head) { 1026 if (strcmp(kind, a->kind) == 0) { 1027 if (try_module_get(a->owner)) 1028 res = a; 1029 break; 1030 } 1031 } 1032 read_unlock(&act_mod_lock); 1033 } 1034 return res; 1035 } 1036 1037 /* lookup by nlattr */ 1038 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 1039 { 1040 struct tc_action_ops *a, *res = NULL; 1041 1042 if (kind) { 1043 read_lock(&act_mod_lock); 1044 list_for_each_entry(a, &act_base, head) { 1045 if (nla_strcmp(kind, a->kind) == 0) { 1046 if (try_module_get(a->owner)) 1047 res = a; 1048 break; 1049 } 1050 } 1051 read_unlock(&act_mod_lock); 1052 } 1053 return res; 1054 } 1055 1056 /*TCA_ACT_MAX_PRIO is 32, there count up to 32 */ 1057 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 1058 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 1059 int nr_actions, struct tcf_result *res) 1060 { 1061 u32 jmp_prgcnt = 0; 1062 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 1063 int i; 1064 int ret = TC_ACT_OK; 1065 1066 if (skb_skip_tc_classify(skb)) 1067 return TC_ACT_OK; 1068 1069 restart_act_graph: 1070 for (i = 0; i < nr_actions; i++) { 1071 const struct tc_action *a = actions[i]; 1072 int repeat_ttl; 1073 1074 if (jmp_prgcnt > 0) { 1075 jmp_prgcnt -= 1; 1076 continue; 1077 } 1078 1079 if (tc_act_skip_sw(a->tcfa_flags)) 1080 continue; 1081 1082 repeat_ttl = 32; 1083 repeat: 1084 ret = tc_act(skb, a, res); 1085 if (unlikely(ret == TC_ACT_REPEAT)) { 1086 if (--repeat_ttl != 0) 1087 goto repeat; 1088 /* suspicious opcode, stop pipeline */ 1089 net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n"); 1090 return TC_ACT_OK; 1091 } 1092 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 1093 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 1094 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 1095 /* faulty opcode, stop pipeline */ 1096 return TC_ACT_OK; 1097 } else { 1098 jmp_ttl -= 1; 1099 if (jmp_ttl > 0) 1100 goto restart_act_graph; 1101 else /* faulty graph, stop pipeline */ 1102 return TC_ACT_OK; 1103 } 1104 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 1105 if (unlikely(!rcu_access_pointer(a->goto_chain))) { 1106 net_warn_ratelimited("can't go to NULL chain!\n"); 1107 return TC_ACT_SHOT; 1108 } 1109 tcf_action_goto_chain_exec(a, res); 1110 } 1111 1112 if (ret != TC_ACT_PIPE) 1113 break; 1114 } 1115 1116 return ret; 1117 } 1118 EXPORT_SYMBOL(tcf_action_exec); 1119 1120 int tcf_action_destroy(struct tc_action *actions[], int bind) 1121 { 1122 const struct tc_action_ops *ops; 1123 struct tc_action *a; 1124 int ret = 0, i; 1125 1126 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1127 a = actions[i]; 1128 actions[i] = NULL; 1129 ops = a->ops; 1130 ret = __tcf_idr_release(a, bind, true); 1131 if (ret == ACT_P_DELETED) 1132 module_put(ops->owner); 1133 else if (ret < 0) 1134 return ret; 1135 } 1136 return ret; 1137 } 1138 1139 static int tcf_action_put(struct tc_action *p) 1140 { 1141 return __tcf_action_put(p, false); 1142 } 1143 1144 /* Put all actions in this array, skip those NULL's. */ 1145 static void tcf_action_put_many(struct tc_action *actions[]) 1146 { 1147 int i; 1148 1149 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1150 struct tc_action *a = actions[i]; 1151 const struct tc_action_ops *ops; 1152 1153 if (!a) 1154 continue; 1155 ops = a->ops; 1156 if (tcf_action_put(a)) 1157 module_put(ops->owner); 1158 } 1159 } 1160 1161 int 1162 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 1163 { 1164 return a->ops->dump(skb, a, bind, ref); 1165 } 1166 1167 int 1168 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 1169 { 1170 int err = -EINVAL; 1171 unsigned char *b = skb_tail_pointer(skb); 1172 struct nlattr *nest; 1173 u32 flags; 1174 1175 if (tcf_action_dump_terse(skb, a, false)) 1176 goto nla_put_failure; 1177 1178 if (a->hw_stats != TCA_ACT_HW_STATS_ANY && 1179 nla_put_bitfield32(skb, TCA_ACT_HW_STATS, 1180 a->hw_stats, TCA_ACT_HW_STATS_ANY)) 1181 goto nla_put_failure; 1182 1183 if (a->used_hw_stats_valid && 1184 nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS, 1185 a->used_hw_stats, TCA_ACT_HW_STATS_ANY)) 1186 goto nla_put_failure; 1187 1188 flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK; 1189 if (flags && 1190 nla_put_bitfield32(skb, TCA_ACT_FLAGS, 1191 flags, flags)) 1192 goto nla_put_failure; 1193 1194 if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count)) 1195 goto nla_put_failure; 1196 1197 nest = nla_nest_start_noflag(skb, TCA_OPTIONS); 1198 if (nest == NULL) 1199 goto nla_put_failure; 1200 err = tcf_action_dump_old(skb, a, bind, ref); 1201 if (err > 0) { 1202 nla_nest_end(skb, nest); 1203 return err; 1204 } 1205 1206 nla_put_failure: 1207 nlmsg_trim(skb, b); 1208 return -1; 1209 } 1210 EXPORT_SYMBOL(tcf_action_dump_1); 1211 1212 int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], 1213 int bind, int ref, bool terse) 1214 { 1215 struct tc_action *a; 1216 int err = -EINVAL, i; 1217 struct nlattr *nest; 1218 1219 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1220 a = actions[i]; 1221 nest = nla_nest_start_noflag(skb, i + 1); 1222 if (nest == NULL) 1223 goto nla_put_failure; 1224 err = terse ? tcf_action_dump_terse(skb, a, false) : 1225 tcf_action_dump_1(skb, a, bind, ref); 1226 if (err < 0) 1227 goto errout; 1228 nla_nest_end(skb, nest); 1229 } 1230 1231 return 0; 1232 1233 nla_put_failure: 1234 err = -EINVAL; 1235 errout: 1236 nla_nest_cancel(skb, nest); 1237 return err; 1238 } 1239 1240 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 1241 { 1242 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 1243 if (!c) 1244 return NULL; 1245 1246 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 1247 if (!c->data) { 1248 kfree(c); 1249 return NULL; 1250 } 1251 c->len = nla_len(tb[TCA_ACT_COOKIE]); 1252 1253 return c; 1254 } 1255 1256 static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr) 1257 { 1258 struct nla_bitfield32 hw_stats_bf; 1259 1260 /* If the user did not pass the attr, that means he does 1261 * not care about the type. Return "any" in that case 1262 * which is setting on all supported types. 1263 */ 1264 if (!hw_stats_attr) 1265 return TCA_ACT_HW_STATS_ANY; 1266 hw_stats_bf = nla_get_bitfield32(hw_stats_attr); 1267 return hw_stats_bf.value; 1268 } 1269 1270 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = { 1271 [TCA_ACT_KIND] = { .type = NLA_STRING }, 1272 [TCA_ACT_INDEX] = { .type = NLA_U32 }, 1273 [TCA_ACT_COOKIE] = { .type = NLA_BINARY, 1274 .len = TC_COOKIE_MAX_SIZE }, 1275 [TCA_ACT_OPTIONS] = { .type = NLA_NESTED }, 1276 [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS | 1277 TCA_ACT_FLAGS_SKIP_HW | 1278 TCA_ACT_FLAGS_SKIP_SW), 1279 [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY), 1280 }; 1281 1282 void tcf_idr_insert_many(struct tc_action *actions[]) 1283 { 1284 int i; 1285 1286 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1287 struct tc_action *a = actions[i]; 1288 struct tcf_idrinfo *idrinfo; 1289 1290 if (!a) 1291 continue; 1292 idrinfo = a->idrinfo; 1293 mutex_lock(&idrinfo->lock); 1294 /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if 1295 * it is just created, otherwise this is just a nop. 1296 */ 1297 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 1298 mutex_unlock(&idrinfo->lock); 1299 } 1300 } 1301 1302 struct tc_action_ops *tc_action_load_ops(struct nlattr *nla, bool police, 1303 bool rtnl_held, 1304 struct netlink_ext_ack *extack) 1305 { 1306 struct nlattr *tb[TCA_ACT_MAX + 1]; 1307 struct tc_action_ops *a_o; 1308 char act_name[IFNAMSIZ]; 1309 struct nlattr *kind; 1310 int err; 1311 1312 if (!police) { 1313 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1314 tcf_action_policy, extack); 1315 if (err < 0) 1316 return ERR_PTR(err); 1317 err = -EINVAL; 1318 kind = tb[TCA_ACT_KIND]; 1319 if (!kind) { 1320 NL_SET_ERR_MSG(extack, "TC action kind must be specified"); 1321 return ERR_PTR(err); 1322 } 1323 if (nla_strscpy(act_name, kind, IFNAMSIZ) < 0) { 1324 NL_SET_ERR_MSG(extack, "TC action name too long"); 1325 return ERR_PTR(err); 1326 } 1327 } else { 1328 if (strlcpy(act_name, "police", IFNAMSIZ) >= IFNAMSIZ) { 1329 NL_SET_ERR_MSG(extack, "TC action name too long"); 1330 return ERR_PTR(-EINVAL); 1331 } 1332 } 1333 1334 a_o = tc_lookup_action_n(act_name); 1335 if (a_o == NULL) { 1336 #ifdef CONFIG_MODULES 1337 if (rtnl_held) 1338 rtnl_unlock(); 1339 request_module("act_%s", act_name); 1340 if (rtnl_held) 1341 rtnl_lock(); 1342 1343 a_o = tc_lookup_action_n(act_name); 1344 1345 /* We dropped the RTNL semaphore in order to 1346 * perform the module load. So, even if we 1347 * succeeded in loading the module we have to 1348 * tell the caller to replay the request. We 1349 * indicate this using -EAGAIN. 1350 */ 1351 if (a_o != NULL) { 1352 module_put(a_o->owner); 1353 return ERR_PTR(-EAGAIN); 1354 } 1355 #endif 1356 NL_SET_ERR_MSG(extack, "Failed to load TC action module"); 1357 return ERR_PTR(-ENOENT); 1358 } 1359 1360 return a_o; 1361 } 1362 1363 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 1364 struct nlattr *nla, struct nlattr *est, 1365 struct tc_action_ops *a_o, int *init_res, 1366 u32 flags, struct netlink_ext_ack *extack) 1367 { 1368 bool police = flags & TCA_ACT_FLAGS_POLICE; 1369 struct nla_bitfield32 userflags = { 0, 0 }; 1370 u8 hw_stats = TCA_ACT_HW_STATS_ANY; 1371 struct nlattr *tb[TCA_ACT_MAX + 1]; 1372 struct tc_cookie *cookie = NULL; 1373 struct tc_action *a; 1374 int err; 1375 1376 /* backward compatibility for policer */ 1377 if (!police) { 1378 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1379 tcf_action_policy, extack); 1380 if (err < 0) 1381 return ERR_PTR(err); 1382 if (tb[TCA_ACT_COOKIE]) { 1383 cookie = nla_memdup_cookie(tb); 1384 if (!cookie) { 1385 NL_SET_ERR_MSG(extack, "No memory to generate TC cookie"); 1386 err = -ENOMEM; 1387 goto err_out; 1388 } 1389 } 1390 hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]); 1391 if (tb[TCA_ACT_FLAGS]) { 1392 userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]); 1393 if (!tc_act_flags_valid(userflags.value)) { 1394 err = -EINVAL; 1395 goto err_out; 1396 } 1397 } 1398 1399 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp, 1400 userflags.value | flags, extack); 1401 } else { 1402 err = a_o->init(net, nla, est, &a, tp, userflags.value | flags, 1403 extack); 1404 } 1405 if (err < 0) 1406 goto err_out; 1407 *init_res = err; 1408 1409 if (!police && tb[TCA_ACT_COOKIE]) 1410 tcf_set_action_cookie(&a->act_cookie, cookie); 1411 1412 if (!police) 1413 a->hw_stats = hw_stats; 1414 1415 return a; 1416 1417 err_out: 1418 if (cookie) { 1419 kfree(cookie->data); 1420 kfree(cookie); 1421 } 1422 return ERR_PTR(err); 1423 } 1424 1425 static bool tc_act_bind(u32 flags) 1426 { 1427 return !!(flags & TCA_ACT_FLAGS_BIND); 1428 } 1429 1430 /* Returns numbers of initialized actions or negative error. */ 1431 1432 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 1433 struct nlattr *est, struct tc_action *actions[], 1434 int init_res[], size_t *attr_size, 1435 u32 flags, u32 fl_flags, 1436 struct netlink_ext_ack *extack) 1437 { 1438 struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {}; 1439 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1440 struct tc_action *act; 1441 size_t sz = 0; 1442 int err; 1443 int i; 1444 1445 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1446 extack); 1447 if (err < 0) 1448 return err; 1449 1450 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1451 struct tc_action_ops *a_o; 1452 1453 a_o = tc_action_load_ops(tb[i], flags & TCA_ACT_FLAGS_POLICE, 1454 !(flags & TCA_ACT_FLAGS_NO_RTNL), 1455 extack); 1456 if (IS_ERR(a_o)) { 1457 err = PTR_ERR(a_o); 1458 goto err_mod; 1459 } 1460 ops[i - 1] = a_o; 1461 } 1462 1463 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1464 act = tcf_action_init_1(net, tp, tb[i], est, ops[i - 1], 1465 &init_res[i - 1], flags, extack); 1466 if (IS_ERR(act)) { 1467 err = PTR_ERR(act); 1468 goto err; 1469 } 1470 sz += tcf_action_fill_size(act); 1471 /* Start from index 0 */ 1472 actions[i - 1] = act; 1473 if (tc_act_bind(flags)) { 1474 bool skip_sw = tc_skip_sw(fl_flags); 1475 bool skip_hw = tc_skip_hw(fl_flags); 1476 1477 if (tc_act_bind(act->tcfa_flags)) 1478 continue; 1479 if (skip_sw != tc_act_skip_sw(act->tcfa_flags) || 1480 skip_hw != tc_act_skip_hw(act->tcfa_flags)) { 1481 NL_SET_ERR_MSG(extack, 1482 "Mismatch between action and filter offload flags"); 1483 err = -EINVAL; 1484 goto err; 1485 } 1486 } else { 1487 err = tcf_action_offload_add(act, extack); 1488 if (tc_act_skip_sw(act->tcfa_flags) && err) 1489 goto err; 1490 } 1491 } 1492 1493 /* We have to commit them all together, because if any error happened in 1494 * between, we could not handle the failure gracefully. 1495 */ 1496 tcf_idr_insert_many(actions); 1497 1498 *attr_size = tcf_action_full_attrs_size(sz); 1499 err = i - 1; 1500 goto err_mod; 1501 1502 err: 1503 tcf_action_destroy(actions, flags & TCA_ACT_FLAGS_BIND); 1504 err_mod: 1505 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) { 1506 if (ops[i]) 1507 module_put(ops[i]->owner); 1508 } 1509 return err; 1510 } 1511 1512 void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets, 1513 u64 drops, bool hw) 1514 { 1515 if (a->cpu_bstats) { 1516 _bstats_update(this_cpu_ptr(a->cpu_bstats), bytes, packets); 1517 1518 this_cpu_ptr(a->cpu_qstats)->drops += drops; 1519 1520 if (hw) 1521 _bstats_update(this_cpu_ptr(a->cpu_bstats_hw), 1522 bytes, packets); 1523 return; 1524 } 1525 1526 _bstats_update(&a->tcfa_bstats, bytes, packets); 1527 a->tcfa_qstats.drops += drops; 1528 if (hw) 1529 _bstats_update(&a->tcfa_bstats_hw, bytes, packets); 1530 } 1531 EXPORT_SYMBOL(tcf_action_update_stats); 1532 1533 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 1534 int compat_mode) 1535 { 1536 int err = 0; 1537 struct gnet_dump d; 1538 1539 if (p == NULL) 1540 goto errout; 1541 1542 /* update hw stats for this action */ 1543 tcf_action_update_hw_stats(p); 1544 1545 /* compat_mode being true specifies a call that is supposed 1546 * to add additional backward compatibility statistic TLVs. 1547 */ 1548 if (compat_mode) { 1549 if (p->type == TCA_OLD_COMPAT) 1550 err = gnet_stats_start_copy_compat(skb, 0, 1551 TCA_STATS, 1552 TCA_XSTATS, 1553 &p->tcfa_lock, &d, 1554 TCA_PAD); 1555 else 1556 return 0; 1557 } else 1558 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 1559 &p->tcfa_lock, &d, TCA_ACT_PAD); 1560 1561 if (err < 0) 1562 goto errout; 1563 1564 if (gnet_stats_copy_basic(&d, p->cpu_bstats, 1565 &p->tcfa_bstats, false) < 0 || 1566 gnet_stats_copy_basic_hw(&d, p->cpu_bstats_hw, 1567 &p->tcfa_bstats_hw, false) < 0 || 1568 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 1569 gnet_stats_copy_queue(&d, p->cpu_qstats, 1570 &p->tcfa_qstats, 1571 p->tcfa_qstats.qlen) < 0) 1572 goto errout; 1573 1574 if (gnet_stats_finish_copy(&d) < 0) 1575 goto errout; 1576 1577 return 0; 1578 1579 errout: 1580 return -1; 1581 } 1582 1583 static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], 1584 u32 portid, u32 seq, u16 flags, int event, int bind, 1585 int ref) 1586 { 1587 struct tcamsg *t; 1588 struct nlmsghdr *nlh; 1589 unsigned char *b = skb_tail_pointer(skb); 1590 struct nlattr *nest; 1591 1592 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 1593 if (!nlh) 1594 goto out_nlmsg_trim; 1595 t = nlmsg_data(nlh); 1596 t->tca_family = AF_UNSPEC; 1597 t->tca__pad1 = 0; 1598 t->tca__pad2 = 0; 1599 1600 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1601 if (!nest) 1602 goto out_nlmsg_trim; 1603 1604 if (tcf_action_dump(skb, actions, bind, ref, false) < 0) 1605 goto out_nlmsg_trim; 1606 1607 nla_nest_end(skb, nest); 1608 1609 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1610 return skb->len; 1611 1612 out_nlmsg_trim: 1613 nlmsg_trim(skb, b); 1614 return -1; 1615 } 1616 1617 static int 1618 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 1619 struct tc_action *actions[], int event, 1620 struct netlink_ext_ack *extack) 1621 { 1622 struct sk_buff *skb; 1623 1624 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1625 if (!skb) 1626 return -ENOBUFS; 1627 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 1628 0, 1) <= 0) { 1629 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1630 kfree_skb(skb); 1631 return -EINVAL; 1632 } 1633 1634 return rtnl_unicast(skb, net, portid); 1635 } 1636 1637 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 1638 struct nlmsghdr *n, u32 portid, 1639 struct netlink_ext_ack *extack) 1640 { 1641 struct nlattr *tb[TCA_ACT_MAX + 1]; 1642 const struct tc_action_ops *ops; 1643 struct tc_action *a; 1644 int index; 1645 int err; 1646 1647 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1648 tcf_action_policy, extack); 1649 if (err < 0) 1650 goto err_out; 1651 1652 err = -EINVAL; 1653 if (tb[TCA_ACT_INDEX] == NULL || 1654 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) { 1655 NL_SET_ERR_MSG(extack, "Invalid TC action index value"); 1656 goto err_out; 1657 } 1658 index = nla_get_u32(tb[TCA_ACT_INDEX]); 1659 1660 err = -EINVAL; 1661 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 1662 if (!ops) { /* could happen in batch of actions */ 1663 NL_SET_ERR_MSG(extack, "Specified TC action kind not found"); 1664 goto err_out; 1665 } 1666 err = -ENOENT; 1667 if (__tcf_idr_search(net, ops, &a, index) == 0) { 1668 NL_SET_ERR_MSG(extack, "TC action with specified index not found"); 1669 goto err_mod; 1670 } 1671 1672 module_put(ops->owner); 1673 return a; 1674 1675 err_mod: 1676 module_put(ops->owner); 1677 err_out: 1678 return ERR_PTR(err); 1679 } 1680 1681 static int tca_action_flush(struct net *net, struct nlattr *nla, 1682 struct nlmsghdr *n, u32 portid, 1683 struct netlink_ext_ack *extack) 1684 { 1685 struct sk_buff *skb; 1686 unsigned char *b; 1687 struct nlmsghdr *nlh; 1688 struct tcamsg *t; 1689 struct netlink_callback dcb; 1690 struct nlattr *nest; 1691 struct nlattr *tb[TCA_ACT_MAX + 1]; 1692 const struct tc_action_ops *ops; 1693 struct nlattr *kind; 1694 int err = -ENOMEM; 1695 1696 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1697 if (!skb) 1698 return err; 1699 1700 b = skb_tail_pointer(skb); 1701 1702 err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla, 1703 tcf_action_policy, extack); 1704 if (err < 0) 1705 goto err_out; 1706 1707 err = -EINVAL; 1708 kind = tb[TCA_ACT_KIND]; 1709 ops = tc_lookup_action(kind); 1710 if (!ops) { /*some idjot trying to flush unknown action */ 1711 NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action"); 1712 goto err_out; 1713 } 1714 1715 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 1716 sizeof(*t), 0); 1717 if (!nlh) { 1718 NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification"); 1719 goto out_module_put; 1720 } 1721 t = nlmsg_data(nlh); 1722 t->tca_family = AF_UNSPEC; 1723 t->tca__pad1 = 0; 1724 t->tca__pad2 = 0; 1725 1726 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 1727 if (!nest) { 1728 NL_SET_ERR_MSG(extack, "Failed to add new netlink message"); 1729 goto out_module_put; 1730 } 1731 1732 err = __tcf_generic_walker(net, skb, &dcb, RTM_DELACTION, ops, extack); 1733 if (err <= 0) { 1734 nla_nest_cancel(skb, nest); 1735 goto out_module_put; 1736 } 1737 1738 nla_nest_end(skb, nest); 1739 1740 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1741 nlh->nlmsg_flags |= NLM_F_ROOT; 1742 module_put(ops->owner); 1743 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1744 n->nlmsg_flags & NLM_F_ECHO); 1745 if (err < 0) 1746 NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification"); 1747 1748 return err; 1749 1750 out_module_put: 1751 module_put(ops->owner); 1752 err_out: 1753 kfree_skb(skb); 1754 return err; 1755 } 1756 1757 static int tcf_action_delete(struct net *net, struct tc_action *actions[]) 1758 { 1759 int i; 1760 1761 for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) { 1762 struct tc_action *a = actions[i]; 1763 const struct tc_action_ops *ops = a->ops; 1764 /* Actions can be deleted concurrently so we must save their 1765 * type and id to search again after reference is released. 1766 */ 1767 struct tcf_idrinfo *idrinfo = a->idrinfo; 1768 u32 act_index = a->tcfa_index; 1769 1770 actions[i] = NULL; 1771 if (tcf_action_put(a)) { 1772 /* last reference, action was deleted concurrently */ 1773 module_put(ops->owner); 1774 } else { 1775 int ret; 1776 1777 /* now do the delete */ 1778 ret = tcf_idr_delete_index(idrinfo, act_index); 1779 if (ret < 0) 1780 return ret; 1781 } 1782 } 1783 return 0; 1784 } 1785 1786 static int 1787 tcf_reoffload_del_notify(struct net *net, struct tc_action *action) 1788 { 1789 size_t attr_size = tcf_action_fill_size(action); 1790 struct tc_action *actions[TCA_ACT_MAX_PRIO] = { 1791 [0] = action, 1792 }; 1793 const struct tc_action_ops *ops = action->ops; 1794 struct sk_buff *skb; 1795 int ret; 1796 1797 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1798 GFP_KERNEL); 1799 if (!skb) 1800 return -ENOBUFS; 1801 1802 if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) { 1803 kfree_skb(skb); 1804 return -EINVAL; 1805 } 1806 1807 ret = tcf_idr_release_unsafe(action); 1808 if (ret == ACT_P_DELETED) { 1809 module_put(ops->owner); 1810 ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0); 1811 } else { 1812 kfree_skb(skb); 1813 } 1814 1815 return ret; 1816 } 1817 1818 int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb, 1819 void *cb_priv, bool add) 1820 { 1821 struct tc_act_pernet_id *id_ptr; 1822 struct tcf_idrinfo *idrinfo; 1823 struct tc_action_net *tn; 1824 struct tc_action *p; 1825 unsigned int act_id; 1826 unsigned long tmp; 1827 unsigned long id; 1828 struct idr *idr; 1829 struct net *net; 1830 int ret; 1831 1832 if (!cb) 1833 return -EINVAL; 1834 1835 down_read(&net_rwsem); 1836 mutex_lock(&act_id_mutex); 1837 1838 for_each_net(net) { 1839 list_for_each_entry(id_ptr, &act_pernet_id_list, list) { 1840 act_id = id_ptr->id; 1841 tn = net_generic(net, act_id); 1842 if (!tn) 1843 continue; 1844 idrinfo = tn->idrinfo; 1845 if (!idrinfo) 1846 continue; 1847 1848 mutex_lock(&idrinfo->lock); 1849 idr = &idrinfo->action_idr; 1850 idr_for_each_entry_ul(idr, p, tmp, id) { 1851 if (IS_ERR(p) || tc_act_bind(p->tcfa_flags)) 1852 continue; 1853 if (add) { 1854 tcf_action_offload_add_ex(p, NULL, cb, 1855 cb_priv); 1856 continue; 1857 } 1858 1859 /* cb unregister to update hw count */ 1860 ret = tcf_action_offload_del_ex(p, cb, cb_priv); 1861 if (ret < 0) 1862 continue; 1863 if (tc_act_skip_sw(p->tcfa_flags) && 1864 !tc_act_in_hw(p)) 1865 tcf_reoffload_del_notify(net, p); 1866 } 1867 mutex_unlock(&idrinfo->lock); 1868 } 1869 } 1870 mutex_unlock(&act_id_mutex); 1871 up_read(&net_rwsem); 1872 1873 return 0; 1874 } 1875 1876 static int 1877 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1878 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1879 { 1880 int ret; 1881 struct sk_buff *skb; 1882 1883 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1884 GFP_KERNEL); 1885 if (!skb) 1886 return -ENOBUFS; 1887 1888 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 1889 0, 2) <= 0) { 1890 NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes"); 1891 kfree_skb(skb); 1892 return -EINVAL; 1893 } 1894 1895 /* now do the delete */ 1896 ret = tcf_action_delete(net, actions); 1897 if (ret < 0) { 1898 NL_SET_ERR_MSG(extack, "Failed to delete TC action"); 1899 kfree_skb(skb); 1900 return ret; 1901 } 1902 1903 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1904 n->nlmsg_flags & NLM_F_ECHO); 1905 return ret; 1906 } 1907 1908 static int 1909 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1910 u32 portid, int event, struct netlink_ext_ack *extack) 1911 { 1912 int i, ret; 1913 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1914 struct tc_action *act; 1915 size_t attr_size = 0; 1916 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1917 1918 ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL, 1919 extack); 1920 if (ret < 0) 1921 return ret; 1922 1923 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1924 if (tb[1]) 1925 return tca_action_flush(net, tb[1], n, portid, extack); 1926 1927 NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action"); 1928 return -EINVAL; 1929 } 1930 1931 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1932 act = tcf_action_get_1(net, tb[i], n, portid, extack); 1933 if (IS_ERR(act)) { 1934 ret = PTR_ERR(act); 1935 goto err; 1936 } 1937 attr_size += tcf_action_fill_size(act); 1938 actions[i - 1] = act; 1939 } 1940 1941 attr_size = tcf_action_full_attrs_size(attr_size); 1942 1943 if (event == RTM_GETACTION) 1944 ret = tcf_get_notify(net, portid, n, actions, event, extack); 1945 else { /* delete */ 1946 ret = tcf_del_notify(net, n, actions, portid, attr_size, extack); 1947 if (ret) 1948 goto err; 1949 return 0; 1950 } 1951 err: 1952 tcf_action_put_many(actions); 1953 return ret; 1954 } 1955 1956 static int 1957 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[], 1958 u32 portid, size_t attr_size, struct netlink_ext_ack *extack) 1959 { 1960 struct sk_buff *skb; 1961 1962 skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size, 1963 GFP_KERNEL); 1964 if (!skb) 1965 return -ENOBUFS; 1966 1967 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1968 RTM_NEWACTION, 0, 0) <= 0) { 1969 NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action"); 1970 kfree_skb(skb); 1971 return -EINVAL; 1972 } 1973 1974 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1975 n->nlmsg_flags & NLM_F_ECHO); 1976 } 1977 1978 static int tcf_action_add(struct net *net, struct nlattr *nla, 1979 struct nlmsghdr *n, u32 portid, u32 flags, 1980 struct netlink_ext_ack *extack) 1981 { 1982 size_t attr_size = 0; 1983 int loop, ret, i; 1984 struct tc_action *actions[TCA_ACT_MAX_PRIO] = {}; 1985 int init_res[TCA_ACT_MAX_PRIO] = {}; 1986 1987 for (loop = 0; loop < 10; loop++) { 1988 ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res, 1989 &attr_size, flags, 0, extack); 1990 if (ret != -EAGAIN) 1991 break; 1992 } 1993 1994 if (ret < 0) 1995 return ret; 1996 ret = tcf_add_notify(net, n, actions, portid, attr_size, extack); 1997 1998 /* only put existing actions */ 1999 for (i = 0; i < TCA_ACT_MAX_PRIO; i++) 2000 if (init_res[i] == ACT_P_CREATED) 2001 actions[i] = NULL; 2002 tcf_action_put_many(actions); 2003 2004 return ret; 2005 } 2006 2007 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 2008 [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAG_LARGE_DUMP_ON | 2009 TCA_ACT_FLAG_TERSE_DUMP), 2010 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 2011 }; 2012 2013 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 2014 struct netlink_ext_ack *extack) 2015 { 2016 struct net *net = sock_net(skb->sk); 2017 struct nlattr *tca[TCA_ROOT_MAX + 1]; 2018 u32 portid = NETLINK_CB(skb).portid; 2019 u32 flags = 0; 2020 int ret = 0; 2021 2022 if ((n->nlmsg_type != RTM_GETACTION) && 2023 !netlink_capable(skb, CAP_NET_ADMIN)) 2024 return -EPERM; 2025 2026 ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca, 2027 TCA_ROOT_MAX, NULL, extack); 2028 if (ret < 0) 2029 return ret; 2030 2031 if (tca[TCA_ACT_TAB] == NULL) { 2032 NL_SET_ERR_MSG(extack, "Netlink action attributes missing"); 2033 return -EINVAL; 2034 } 2035 2036 /* n->nlmsg_flags & NLM_F_CREATE */ 2037 switch (n->nlmsg_type) { 2038 case RTM_NEWACTION: 2039 /* we are going to assume all other flags 2040 * imply create only if it doesn't exist 2041 * Note that CREATE | EXCL implies that 2042 * but since we want avoid ambiguity (eg when flags 2043 * is zero) then just set this 2044 */ 2045 if (n->nlmsg_flags & NLM_F_REPLACE) 2046 flags = TCA_ACT_FLAGS_REPLACE; 2047 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, flags, 2048 extack); 2049 break; 2050 case RTM_DELACTION: 2051 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 2052 portid, RTM_DELACTION, extack); 2053 break; 2054 case RTM_GETACTION: 2055 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 2056 portid, RTM_GETACTION, extack); 2057 break; 2058 default: 2059 BUG(); 2060 } 2061 2062 return ret; 2063 } 2064 2065 static struct nlattr *find_dump_kind(struct nlattr **nla) 2066 { 2067 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 2068 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 2069 struct nlattr *kind; 2070 2071 tb1 = nla[TCA_ACT_TAB]; 2072 if (tb1 == NULL) 2073 return NULL; 2074 2075 if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 2076 return NULL; 2077 2078 if (tb[1] == NULL) 2079 return NULL; 2080 if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0) 2081 return NULL; 2082 kind = tb2[TCA_ACT_KIND]; 2083 2084 return kind; 2085 } 2086 2087 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 2088 { 2089 struct net *net = sock_net(skb->sk); 2090 struct nlmsghdr *nlh; 2091 unsigned char *b = skb_tail_pointer(skb); 2092 struct nlattr *nest; 2093 struct tc_action_ops *a_o; 2094 int ret = 0; 2095 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 2096 struct nlattr *tb[TCA_ROOT_MAX + 1]; 2097 struct nlattr *count_attr = NULL; 2098 unsigned long jiffy_since = 0; 2099 struct nlattr *kind = NULL; 2100 struct nla_bitfield32 bf; 2101 u32 msecs_since = 0; 2102 u32 act_count = 0; 2103 2104 ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb, 2105 TCA_ROOT_MAX, tcaa_policy, cb->extack); 2106 if (ret < 0) 2107 return ret; 2108 2109 kind = find_dump_kind(tb); 2110 if (kind == NULL) { 2111 pr_info("tc_dump_action: action bad kind\n"); 2112 return 0; 2113 } 2114 2115 a_o = tc_lookup_action(kind); 2116 if (a_o == NULL) 2117 return 0; 2118 2119 cb->args[2] = 0; 2120 if (tb[TCA_ROOT_FLAGS]) { 2121 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 2122 cb->args[2] = bf.value; 2123 } 2124 2125 if (tb[TCA_ROOT_TIME_DELTA]) { 2126 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 2127 } 2128 2129 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2130 cb->nlh->nlmsg_type, sizeof(*t), 0); 2131 if (!nlh) 2132 goto out_module_put; 2133 2134 if (msecs_since) 2135 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 2136 2137 t = nlmsg_data(nlh); 2138 t->tca_family = AF_UNSPEC; 2139 t->tca__pad1 = 0; 2140 t->tca__pad2 = 0; 2141 cb->args[3] = jiffy_since; 2142 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 2143 if (!count_attr) 2144 goto out_module_put; 2145 2146 nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); 2147 if (nest == NULL) 2148 goto out_module_put; 2149 2150 ret = __tcf_generic_walker(net, skb, cb, RTM_GETACTION, a_o, NULL); 2151 if (ret < 0) 2152 goto out_module_put; 2153 2154 if (ret > 0) { 2155 nla_nest_end(skb, nest); 2156 ret = skb->len; 2157 act_count = cb->args[1]; 2158 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 2159 cb->args[1] = 0; 2160 } else 2161 nlmsg_trim(skb, b); 2162 2163 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2164 if (NETLINK_CB(cb->skb).portid && ret) 2165 nlh->nlmsg_flags |= NLM_F_MULTI; 2166 module_put(a_o->owner); 2167 return skb->len; 2168 2169 out_module_put: 2170 module_put(a_o->owner); 2171 nlmsg_trim(skb, b); 2172 return skb->len; 2173 } 2174 2175 static int __init tc_action_init(void) 2176 { 2177 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 2178 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 2179 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 2180 0); 2181 2182 return 0; 2183 } 2184 2185 subsys_initcall(tc_action_init); 2186