1 /* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/slab.h> 19 #include <linux/skbuff.h> 20 #include <linux/init.h> 21 #include <linux/kmod.h> 22 #include <linux/err.h> 23 #include <linux/module.h> 24 #include <linux/rhashtable.h> 25 #include <linux/list.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/sch_generic.h> 29 #include <net/pkt_cls.h> 30 #include <net/act_api.h> 31 #include <net/netlink.h> 32 33 static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) 34 { 35 u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK; 36 37 if (!tp) 38 return -EINVAL; 39 a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true); 40 if (!a->goto_chain) 41 return -ENOMEM; 42 return 0; 43 } 44 45 static void tcf_action_goto_chain_fini(struct tc_action *a) 46 { 47 tcf_chain_put(a->goto_chain); 48 } 49 50 static void tcf_action_goto_chain_exec(const struct tc_action *a, 51 struct tcf_result *res) 52 { 53 const struct tcf_chain *chain = a->goto_chain; 54 55 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 56 } 57 58 /* XXX: For standalone actions, we don't need a RCU grace period either, because 59 * actions are always connected to filters and filters are already destroyed in 60 * RCU callbacks, so after a RCU grace period actions are already disconnected 61 * from filters. Readers later can not find us. 62 */ 63 static void free_tcf(struct tc_action *p) 64 { 65 free_percpu(p->cpu_bstats); 66 free_percpu(p->cpu_qstats); 67 68 if (p->act_cookie) { 69 kfree(p->act_cookie->data); 70 kfree(p->act_cookie); 71 } 72 if (p->goto_chain) 73 tcf_action_goto_chain_fini(p); 74 75 kfree(p); 76 } 77 78 static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) 79 { 80 spin_lock_bh(&idrinfo->lock); 81 idr_remove(&idrinfo->action_idr, p->tcfa_index); 82 spin_unlock_bh(&idrinfo->lock); 83 gen_kill_estimator(&p->tcfa_rate_est); 84 free_tcf(p); 85 } 86 87 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 88 { 89 int ret = 0; 90 91 ASSERT_RTNL(); 92 93 if (p) { 94 if (bind) 95 p->tcfa_bindcnt--; 96 else if (strict && p->tcfa_bindcnt > 0) 97 return -EPERM; 98 99 p->tcfa_refcnt--; 100 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { 101 if (p->ops->cleanup) 102 p->ops->cleanup(p); 103 tcf_idr_remove(p->idrinfo, p); 104 ret = ACT_P_DELETED; 105 } 106 } 107 108 return ret; 109 } 110 EXPORT_SYMBOL(__tcf_idr_release); 111 112 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 113 struct netlink_callback *cb) 114 { 115 int err = 0, index = -1, s_i = 0, n_i = 0; 116 u32 act_flags = cb->args[2]; 117 unsigned long jiffy_since = cb->args[3]; 118 struct nlattr *nest; 119 struct idr *idr = &idrinfo->action_idr; 120 struct tc_action *p; 121 unsigned long id = 1; 122 123 spin_lock_bh(&idrinfo->lock); 124 125 s_i = cb->args[0]; 126 127 idr_for_each_entry_ul(idr, p, id) { 128 index++; 129 if (index < s_i) 130 continue; 131 132 if (jiffy_since && 133 time_after(jiffy_since, 134 (unsigned long)p->tcfa_tm.lastuse)) 135 continue; 136 137 nest = nla_nest_start(skb, n_i); 138 if (!nest) { 139 index--; 140 goto nla_put_failure; 141 } 142 err = tcf_action_dump_1(skb, p, 0, 0); 143 if (err < 0) { 144 index--; 145 nlmsg_trim(skb, nest); 146 goto done; 147 } 148 nla_nest_end(skb, nest); 149 n_i++; 150 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) && 151 n_i >= TCA_ACT_MAX_PRIO) 152 goto done; 153 } 154 done: 155 if (index >= 0) 156 cb->args[0] = index + 1; 157 158 spin_unlock_bh(&idrinfo->lock); 159 if (n_i) { 160 if (act_flags & TCA_FLAG_LARGE_DUMP_ON) 161 cb->args[1] = n_i; 162 } 163 return n_i; 164 165 nla_put_failure: 166 nla_nest_cancel(skb, nest); 167 goto done; 168 } 169 170 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 171 const struct tc_action_ops *ops) 172 { 173 struct nlattr *nest; 174 int n_i = 0; 175 int ret = -EINVAL; 176 struct idr *idr = &idrinfo->action_idr; 177 struct tc_action *p; 178 unsigned long id = 1; 179 180 nest = nla_nest_start(skb, 0); 181 if (nest == NULL) 182 goto nla_put_failure; 183 if (nla_put_string(skb, TCA_KIND, ops->kind)) 184 goto nla_put_failure; 185 186 idr_for_each_entry_ul(idr, p, id) { 187 ret = __tcf_idr_release(p, false, true); 188 if (ret == ACT_P_DELETED) { 189 module_put(ops->owner); 190 n_i++; 191 } else if (ret < 0) { 192 goto nla_put_failure; 193 } 194 } 195 if (nla_put_u32(skb, TCA_FCNT, n_i)) 196 goto nla_put_failure; 197 nla_nest_end(skb, nest); 198 199 return n_i; 200 nla_put_failure: 201 nla_nest_cancel(skb, nest); 202 return ret; 203 } 204 205 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 206 struct netlink_callback *cb, int type, 207 const struct tc_action_ops *ops) 208 { 209 struct tcf_idrinfo *idrinfo = tn->idrinfo; 210 211 if (type == RTM_DELACTION) { 212 return tcf_del_walker(idrinfo, skb, ops); 213 } else if (type == RTM_GETACTION) { 214 return tcf_dump_walker(idrinfo, skb, cb); 215 } else { 216 WARN(1, "tcf_generic_walker: unknown action %d\n", type); 217 return -EINVAL; 218 } 219 } 220 EXPORT_SYMBOL(tcf_generic_walker); 221 222 static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) 223 { 224 struct tc_action *p = NULL; 225 226 spin_lock_bh(&idrinfo->lock); 227 p = idr_find(&idrinfo->action_idr, index); 228 spin_unlock_bh(&idrinfo->lock); 229 230 return p; 231 } 232 233 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 234 { 235 struct tcf_idrinfo *idrinfo = tn->idrinfo; 236 struct tc_action *p = tcf_idr_lookup(index, idrinfo); 237 238 if (p) { 239 *a = p; 240 return 1; 241 } 242 return 0; 243 } 244 EXPORT_SYMBOL(tcf_idr_search); 245 246 bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, 247 int bind) 248 { 249 struct tcf_idrinfo *idrinfo = tn->idrinfo; 250 struct tc_action *p = tcf_idr_lookup(index, idrinfo); 251 252 if (index && p) { 253 if (bind) 254 p->tcfa_bindcnt++; 255 p->tcfa_refcnt++; 256 *a = p; 257 return true; 258 } 259 return false; 260 } 261 EXPORT_SYMBOL(tcf_idr_check); 262 263 void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est) 264 { 265 if (est) 266 gen_kill_estimator(&a->tcfa_rate_est); 267 free_tcf(a); 268 } 269 EXPORT_SYMBOL(tcf_idr_cleanup); 270 271 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 272 struct tc_action **a, const struct tc_action_ops *ops, 273 int bind, bool cpustats) 274 { 275 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 276 struct tcf_idrinfo *idrinfo = tn->idrinfo; 277 struct idr *idr = &idrinfo->action_idr; 278 int err = -ENOMEM; 279 280 if (unlikely(!p)) 281 return -ENOMEM; 282 p->tcfa_refcnt = 1; 283 if (bind) 284 p->tcfa_bindcnt = 1; 285 286 if (cpustats) { 287 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 288 if (!p->cpu_bstats) 289 goto err1; 290 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 291 if (!p->cpu_qstats) 292 goto err2; 293 } 294 spin_lock_init(&p->tcfa_lock); 295 idr_preload(GFP_KERNEL); 296 spin_lock_bh(&idrinfo->lock); 297 /* user doesn't specify an index */ 298 if (!index) { 299 index = 1; 300 err = idr_alloc_u32(idr, NULL, &index, UINT_MAX, GFP_ATOMIC); 301 } else { 302 err = idr_alloc_u32(idr, NULL, &index, index, GFP_ATOMIC); 303 } 304 spin_unlock_bh(&idrinfo->lock); 305 idr_preload_end(); 306 if (err) 307 goto err3; 308 309 p->tcfa_index = index; 310 p->tcfa_tm.install = jiffies; 311 p->tcfa_tm.lastuse = jiffies; 312 p->tcfa_tm.firstuse = 0; 313 if (est) { 314 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 315 &p->tcfa_rate_est, 316 &p->tcfa_lock, NULL, est); 317 if (err) 318 goto err4; 319 } 320 321 p->idrinfo = idrinfo; 322 p->ops = ops; 323 INIT_LIST_HEAD(&p->list); 324 *a = p; 325 return 0; 326 err4: 327 idr_remove(idr, index); 328 err3: 329 free_percpu(p->cpu_qstats); 330 err2: 331 free_percpu(p->cpu_bstats); 332 err1: 333 kfree(p); 334 return err; 335 } 336 EXPORT_SYMBOL(tcf_idr_create); 337 338 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) 339 { 340 struct tcf_idrinfo *idrinfo = tn->idrinfo; 341 342 spin_lock_bh(&idrinfo->lock); 343 idr_replace(&idrinfo->action_idr, a, a->tcfa_index); 344 spin_unlock_bh(&idrinfo->lock); 345 } 346 EXPORT_SYMBOL(tcf_idr_insert); 347 348 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 349 struct tcf_idrinfo *idrinfo) 350 { 351 struct idr *idr = &idrinfo->action_idr; 352 struct tc_action *p; 353 int ret; 354 unsigned long id = 1; 355 356 idr_for_each_entry_ul(idr, p, id) { 357 ret = __tcf_idr_release(p, false, true); 358 if (ret == ACT_P_DELETED) 359 module_put(ops->owner); 360 else if (ret < 0) 361 return; 362 } 363 idr_destroy(&idrinfo->action_idr); 364 } 365 EXPORT_SYMBOL(tcf_idrinfo_destroy); 366 367 static LIST_HEAD(act_base); 368 static DEFINE_RWLOCK(act_mod_lock); 369 370 int tcf_register_action(struct tc_action_ops *act, 371 struct pernet_operations *ops) 372 { 373 struct tc_action_ops *a; 374 int ret; 375 376 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 377 return -EINVAL; 378 379 /* We have to register pernet ops before making the action ops visible, 380 * otherwise tcf_action_init_1() could get a partially initialized 381 * netns. 382 */ 383 ret = register_pernet_subsys(ops); 384 if (ret) 385 return ret; 386 387 write_lock(&act_mod_lock); 388 list_for_each_entry(a, &act_base, head) { 389 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 390 write_unlock(&act_mod_lock); 391 unregister_pernet_subsys(ops); 392 return -EEXIST; 393 } 394 } 395 list_add_tail(&act->head, &act_base); 396 write_unlock(&act_mod_lock); 397 398 return 0; 399 } 400 EXPORT_SYMBOL(tcf_register_action); 401 402 int tcf_unregister_action(struct tc_action_ops *act, 403 struct pernet_operations *ops) 404 { 405 struct tc_action_ops *a; 406 int err = -ENOENT; 407 408 write_lock(&act_mod_lock); 409 list_for_each_entry(a, &act_base, head) { 410 if (a == act) { 411 list_del(&act->head); 412 err = 0; 413 break; 414 } 415 } 416 write_unlock(&act_mod_lock); 417 if (!err) 418 unregister_pernet_subsys(ops); 419 return err; 420 } 421 EXPORT_SYMBOL(tcf_unregister_action); 422 423 /* lookup by name */ 424 static struct tc_action_ops *tc_lookup_action_n(char *kind) 425 { 426 struct tc_action_ops *a, *res = NULL; 427 428 if (kind) { 429 read_lock(&act_mod_lock); 430 list_for_each_entry(a, &act_base, head) { 431 if (strcmp(kind, a->kind) == 0) { 432 if (try_module_get(a->owner)) 433 res = a; 434 break; 435 } 436 } 437 read_unlock(&act_mod_lock); 438 } 439 return res; 440 } 441 442 /* lookup by nlattr */ 443 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 444 { 445 struct tc_action_ops *a, *res = NULL; 446 447 if (kind) { 448 read_lock(&act_mod_lock); 449 list_for_each_entry(a, &act_base, head) { 450 if (nla_strcmp(kind, a->kind) == 0) { 451 if (try_module_get(a->owner)) 452 res = a; 453 break; 454 } 455 } 456 read_unlock(&act_mod_lock); 457 } 458 return res; 459 } 460 461 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */ 462 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 463 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 464 int nr_actions, struct tcf_result *res) 465 { 466 u32 jmp_prgcnt = 0; 467 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 468 int i; 469 int ret = TC_ACT_OK; 470 471 if (skb_skip_tc_classify(skb)) 472 return TC_ACT_OK; 473 474 restart_act_graph: 475 for (i = 0; i < nr_actions; i++) { 476 const struct tc_action *a = actions[i]; 477 478 if (jmp_prgcnt > 0) { 479 jmp_prgcnt -= 1; 480 continue; 481 } 482 repeat: 483 ret = a->ops->act(skb, a, res); 484 if (ret == TC_ACT_REPEAT) 485 goto repeat; /* we need a ttl - JHS */ 486 487 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 488 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 489 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 490 /* faulty opcode, stop pipeline */ 491 return TC_ACT_OK; 492 } else { 493 jmp_ttl -= 1; 494 if (jmp_ttl > 0) 495 goto restart_act_graph; 496 else /* faulty graph, stop pipeline */ 497 return TC_ACT_OK; 498 } 499 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 500 tcf_action_goto_chain_exec(a, res); 501 } 502 503 if (ret != TC_ACT_PIPE) 504 break; 505 } 506 507 return ret; 508 } 509 EXPORT_SYMBOL(tcf_action_exec); 510 511 int tcf_action_destroy(struct list_head *actions, int bind) 512 { 513 const struct tc_action_ops *ops; 514 struct tc_action *a, *tmp; 515 int ret = 0; 516 517 list_for_each_entry_safe(a, tmp, actions, list) { 518 ops = a->ops; 519 ret = __tcf_idr_release(a, bind, true); 520 if (ret == ACT_P_DELETED) 521 module_put(ops->owner); 522 else if (ret < 0) 523 return ret; 524 } 525 return ret; 526 } 527 528 int 529 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 530 { 531 return a->ops->dump(skb, a, bind, ref); 532 } 533 534 int 535 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 536 { 537 int err = -EINVAL; 538 unsigned char *b = skb_tail_pointer(skb); 539 struct nlattr *nest; 540 541 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 542 goto nla_put_failure; 543 if (tcf_action_copy_stats(skb, a, 0)) 544 goto nla_put_failure; 545 if (a->act_cookie) { 546 if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len, 547 a->act_cookie->data)) 548 goto nla_put_failure; 549 } 550 551 nest = nla_nest_start(skb, TCA_OPTIONS); 552 if (nest == NULL) 553 goto nla_put_failure; 554 err = tcf_action_dump_old(skb, a, bind, ref); 555 if (err > 0) { 556 nla_nest_end(skb, nest); 557 return err; 558 } 559 560 nla_put_failure: 561 nlmsg_trim(skb, b); 562 return -1; 563 } 564 EXPORT_SYMBOL(tcf_action_dump_1); 565 566 int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, 567 int bind, int ref) 568 { 569 struct tc_action *a; 570 int err = -EINVAL; 571 struct nlattr *nest; 572 573 list_for_each_entry(a, actions, list) { 574 nest = nla_nest_start(skb, a->order); 575 if (nest == NULL) 576 goto nla_put_failure; 577 err = tcf_action_dump_1(skb, a, bind, ref); 578 if (err < 0) 579 goto errout; 580 nla_nest_end(skb, nest); 581 } 582 583 return 0; 584 585 nla_put_failure: 586 err = -EINVAL; 587 errout: 588 nla_nest_cancel(skb, nest); 589 return err; 590 } 591 592 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 593 { 594 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 595 if (!c) 596 return NULL; 597 598 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 599 if (!c->data) { 600 kfree(c); 601 return NULL; 602 } 603 c->len = nla_len(tb[TCA_ACT_COOKIE]); 604 605 return c; 606 } 607 608 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 609 struct nlattr *nla, struct nlattr *est, 610 char *name, int ovr, int bind) 611 { 612 struct tc_action *a; 613 struct tc_action_ops *a_o; 614 struct tc_cookie *cookie = NULL; 615 char act_name[IFNAMSIZ]; 616 struct nlattr *tb[TCA_ACT_MAX + 1]; 617 struct nlattr *kind; 618 int err; 619 620 if (name == NULL) { 621 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); 622 if (err < 0) 623 goto err_out; 624 err = -EINVAL; 625 kind = tb[TCA_ACT_KIND]; 626 if (kind == NULL) 627 goto err_out; 628 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 629 goto err_out; 630 if (tb[TCA_ACT_COOKIE]) { 631 int cklen = nla_len(tb[TCA_ACT_COOKIE]); 632 633 if (cklen > TC_COOKIE_MAX_SIZE) 634 goto err_out; 635 636 cookie = nla_memdup_cookie(tb); 637 if (!cookie) { 638 err = -ENOMEM; 639 goto err_out; 640 } 641 } 642 } else { 643 err = -EINVAL; 644 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 645 goto err_out; 646 } 647 648 a_o = tc_lookup_action_n(act_name); 649 if (a_o == NULL) { 650 #ifdef CONFIG_MODULES 651 rtnl_unlock(); 652 request_module("act_%s", act_name); 653 rtnl_lock(); 654 655 a_o = tc_lookup_action_n(act_name); 656 657 /* We dropped the RTNL semaphore in order to 658 * perform the module load. So, even if we 659 * succeeded in loading the module we have to 660 * tell the caller to replay the request. We 661 * indicate this using -EAGAIN. 662 */ 663 if (a_o != NULL) { 664 err = -EAGAIN; 665 goto err_mod; 666 } 667 #endif 668 err = -ENOENT; 669 goto err_out; 670 } 671 672 /* backward compatibility for policer */ 673 if (name == NULL) 674 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind); 675 else 676 err = a_o->init(net, nla, est, &a, ovr, bind); 677 if (err < 0) 678 goto err_mod; 679 680 if (name == NULL && tb[TCA_ACT_COOKIE]) { 681 if (a->act_cookie) { 682 kfree(a->act_cookie->data); 683 kfree(a->act_cookie); 684 } 685 a->act_cookie = cookie; 686 } 687 688 /* module count goes up only when brand new policy is created 689 * if it exists and is only bound to in a_o->init() then 690 * ACT_P_CREATED is not returned (a zero is). 691 */ 692 if (err != ACT_P_CREATED) 693 module_put(a_o->owner); 694 695 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { 696 err = tcf_action_goto_chain_init(a, tp); 697 if (err) { 698 LIST_HEAD(actions); 699 700 list_add_tail(&a->list, &actions); 701 tcf_action_destroy(&actions, bind); 702 return ERR_PTR(err); 703 } 704 } 705 706 return a; 707 708 err_mod: 709 module_put(a_o->owner); 710 err_out: 711 if (cookie) { 712 kfree(cookie->data); 713 kfree(cookie); 714 } 715 return ERR_PTR(err); 716 } 717 718 static void cleanup_a(struct list_head *actions, int ovr) 719 { 720 struct tc_action *a; 721 722 if (!ovr) 723 return; 724 725 list_for_each_entry(a, actions, list) 726 a->tcfa_refcnt--; 727 } 728 729 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 730 struct nlattr *est, char *name, int ovr, int bind, 731 struct list_head *actions) 732 { 733 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 734 struct tc_action *act; 735 int err; 736 int i; 737 738 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); 739 if (err < 0) 740 return err; 741 742 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 743 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind); 744 if (IS_ERR(act)) { 745 err = PTR_ERR(act); 746 goto err; 747 } 748 act->order = i; 749 if (ovr) 750 act->tcfa_refcnt++; 751 list_add_tail(&act->list, actions); 752 } 753 754 /* Remove the temp refcnt which was necessary to protect against 755 * destroying an existing action which was being replaced 756 */ 757 cleanup_a(actions, ovr); 758 return 0; 759 760 err: 761 tcf_action_destroy(actions, bind); 762 return err; 763 } 764 765 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 766 int compat_mode) 767 { 768 int err = 0; 769 struct gnet_dump d; 770 771 if (p == NULL) 772 goto errout; 773 774 /* compat_mode being true specifies a call that is supposed 775 * to add additional backward compatibility statistic TLVs. 776 */ 777 if (compat_mode) { 778 if (p->type == TCA_OLD_COMPAT) 779 err = gnet_stats_start_copy_compat(skb, 0, 780 TCA_STATS, 781 TCA_XSTATS, 782 &p->tcfa_lock, &d, 783 TCA_PAD); 784 else 785 return 0; 786 } else 787 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 788 &p->tcfa_lock, &d, TCA_ACT_PAD); 789 790 if (err < 0) 791 goto errout; 792 793 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 794 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 795 gnet_stats_copy_queue(&d, p->cpu_qstats, 796 &p->tcfa_qstats, 797 p->tcfa_qstats.qlen) < 0) 798 goto errout; 799 800 if (gnet_stats_finish_copy(&d) < 0) 801 goto errout; 802 803 return 0; 804 805 errout: 806 return -1; 807 } 808 809 static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, 810 u32 portid, u32 seq, u16 flags, int event, int bind, 811 int ref) 812 { 813 struct tcamsg *t; 814 struct nlmsghdr *nlh; 815 unsigned char *b = skb_tail_pointer(skb); 816 struct nlattr *nest; 817 818 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 819 if (!nlh) 820 goto out_nlmsg_trim; 821 t = nlmsg_data(nlh); 822 t->tca_family = AF_UNSPEC; 823 t->tca__pad1 = 0; 824 t->tca__pad2 = 0; 825 826 nest = nla_nest_start(skb, TCA_ACT_TAB); 827 if (nest == NULL) 828 goto out_nlmsg_trim; 829 830 if (tcf_action_dump(skb, actions, bind, ref) < 0) 831 goto out_nlmsg_trim; 832 833 nla_nest_end(skb, nest); 834 835 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 836 return skb->len; 837 838 out_nlmsg_trim: 839 nlmsg_trim(skb, b); 840 return -1; 841 } 842 843 static int 844 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 845 struct list_head *actions, int event) 846 { 847 struct sk_buff *skb; 848 849 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 850 if (!skb) 851 return -ENOBUFS; 852 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 853 0, 0) <= 0) { 854 kfree_skb(skb); 855 return -EINVAL; 856 } 857 858 return rtnl_unicast(skb, net, portid); 859 } 860 861 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 862 struct nlmsghdr *n, u32 portid) 863 { 864 struct nlattr *tb[TCA_ACT_MAX + 1]; 865 const struct tc_action_ops *ops; 866 struct tc_action *a; 867 int index; 868 int err; 869 870 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); 871 if (err < 0) 872 goto err_out; 873 874 err = -EINVAL; 875 if (tb[TCA_ACT_INDEX] == NULL || 876 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) 877 goto err_out; 878 index = nla_get_u32(tb[TCA_ACT_INDEX]); 879 880 err = -EINVAL; 881 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 882 if (!ops) /* could happen in batch of actions */ 883 goto err_out; 884 err = -ENOENT; 885 if (ops->lookup(net, &a, index) == 0) 886 goto err_mod; 887 888 module_put(ops->owner); 889 return a; 890 891 err_mod: 892 module_put(ops->owner); 893 err_out: 894 return ERR_PTR(err); 895 } 896 897 static int tca_action_flush(struct net *net, struct nlattr *nla, 898 struct nlmsghdr *n, u32 portid) 899 { 900 struct sk_buff *skb; 901 unsigned char *b; 902 struct nlmsghdr *nlh; 903 struct tcamsg *t; 904 struct netlink_callback dcb; 905 struct nlattr *nest; 906 struct nlattr *tb[TCA_ACT_MAX + 1]; 907 const struct tc_action_ops *ops; 908 struct nlattr *kind; 909 int err = -ENOMEM; 910 911 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 912 if (!skb) { 913 pr_debug("tca_action_flush: failed skb alloc\n"); 914 return err; 915 } 916 917 b = skb_tail_pointer(skb); 918 919 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); 920 if (err < 0) 921 goto err_out; 922 923 err = -EINVAL; 924 kind = tb[TCA_ACT_KIND]; 925 ops = tc_lookup_action(kind); 926 if (!ops) /*some idjot trying to flush unknown action */ 927 goto err_out; 928 929 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 930 sizeof(*t), 0); 931 if (!nlh) 932 goto out_module_put; 933 t = nlmsg_data(nlh); 934 t->tca_family = AF_UNSPEC; 935 t->tca__pad1 = 0; 936 t->tca__pad2 = 0; 937 938 nest = nla_nest_start(skb, TCA_ACT_TAB); 939 if (nest == NULL) 940 goto out_module_put; 941 942 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); 943 if (err <= 0) 944 goto out_module_put; 945 946 nla_nest_end(skb, nest); 947 948 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 949 nlh->nlmsg_flags |= NLM_F_ROOT; 950 module_put(ops->owner); 951 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 952 n->nlmsg_flags & NLM_F_ECHO); 953 if (err > 0) 954 return 0; 955 956 return err; 957 958 out_module_put: 959 module_put(ops->owner); 960 err_out: 961 kfree_skb(skb); 962 return err; 963 } 964 965 static int 966 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 967 u32 portid) 968 { 969 int ret; 970 struct sk_buff *skb; 971 972 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 973 if (!skb) 974 return -ENOBUFS; 975 976 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 977 0, 1) <= 0) { 978 kfree_skb(skb); 979 return -EINVAL; 980 } 981 982 /* now do the delete */ 983 ret = tcf_action_destroy(actions, 0); 984 if (ret < 0) { 985 kfree_skb(skb); 986 return ret; 987 } 988 989 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 990 n->nlmsg_flags & NLM_F_ECHO); 991 if (ret > 0) 992 return 0; 993 return ret; 994 } 995 996 static int 997 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 998 u32 portid, int event) 999 { 1000 int i, ret; 1001 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1002 struct tc_action *act; 1003 LIST_HEAD(actions); 1004 1005 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); 1006 if (ret < 0) 1007 return ret; 1008 1009 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1010 if (tb[1] != NULL) 1011 return tca_action_flush(net, tb[1], n, portid); 1012 else 1013 return -EINVAL; 1014 } 1015 1016 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1017 act = tcf_action_get_1(net, tb[i], n, portid); 1018 if (IS_ERR(act)) { 1019 ret = PTR_ERR(act); 1020 goto err; 1021 } 1022 act->order = i; 1023 list_add_tail(&act->list, &actions); 1024 } 1025 1026 if (event == RTM_GETACTION) 1027 ret = tcf_get_notify(net, portid, n, &actions, event); 1028 else { /* delete */ 1029 ret = tcf_del_notify(net, n, &actions, portid); 1030 if (ret) 1031 goto err; 1032 return ret; 1033 } 1034 err: 1035 if (event != RTM_GETACTION) 1036 tcf_action_destroy(&actions, 0); 1037 return ret; 1038 } 1039 1040 static int 1041 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 1042 u32 portid) 1043 { 1044 struct sk_buff *skb; 1045 int err = 0; 1046 1047 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1048 if (!skb) 1049 return -ENOBUFS; 1050 1051 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1052 RTM_NEWACTION, 0, 0) <= 0) { 1053 kfree_skb(skb); 1054 return -EINVAL; 1055 } 1056 1057 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1058 n->nlmsg_flags & NLM_F_ECHO); 1059 if (err > 0) 1060 err = 0; 1061 return err; 1062 } 1063 1064 static int tcf_action_add(struct net *net, struct nlattr *nla, 1065 struct nlmsghdr *n, u32 portid, int ovr) 1066 { 1067 int ret = 0; 1068 LIST_HEAD(actions); 1069 1070 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions); 1071 if (ret) 1072 return ret; 1073 1074 return tcf_add_notify(net, n, &actions, portid); 1075 } 1076 1077 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; 1078 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1079 [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32, 1080 .validation_data = &tcaa_root_flags_allowed }, 1081 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1082 }; 1083 1084 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1085 struct netlink_ext_ack *extack) 1086 { 1087 struct net *net = sock_net(skb->sk); 1088 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1089 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1090 int ret = 0, ovr = 0; 1091 1092 if ((n->nlmsg_type != RTM_GETACTION) && 1093 !netlink_capable(skb, CAP_NET_ADMIN)) 1094 return -EPERM; 1095 1096 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL, 1097 extack); 1098 if (ret < 0) 1099 return ret; 1100 1101 if (tca[TCA_ACT_TAB] == NULL) { 1102 pr_notice("tc_ctl_action: received NO action attribs\n"); 1103 return -EINVAL; 1104 } 1105 1106 /* n->nlmsg_flags & NLM_F_CREATE */ 1107 switch (n->nlmsg_type) { 1108 case RTM_NEWACTION: 1109 /* we are going to assume all other flags 1110 * imply create only if it doesn't exist 1111 * Note that CREATE | EXCL implies that 1112 * but since we want avoid ambiguity (eg when flags 1113 * is zero) then just set this 1114 */ 1115 if (n->nlmsg_flags & NLM_F_REPLACE) 1116 ovr = 1; 1117 replay: 1118 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr); 1119 if (ret == -EAGAIN) 1120 goto replay; 1121 break; 1122 case RTM_DELACTION: 1123 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1124 portid, RTM_DELACTION); 1125 break; 1126 case RTM_GETACTION: 1127 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1128 portid, RTM_GETACTION); 1129 break; 1130 default: 1131 BUG(); 1132 } 1133 1134 return ret; 1135 } 1136 1137 static struct nlattr *find_dump_kind(struct nlattr **nla) 1138 { 1139 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1140 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1141 struct nlattr *kind; 1142 1143 tb1 = nla[TCA_ACT_TAB]; 1144 if (tb1 == NULL) 1145 return NULL; 1146 1147 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1148 NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1149 return NULL; 1150 1151 if (tb[1] == NULL) 1152 return NULL; 1153 if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0) 1154 return NULL; 1155 kind = tb2[TCA_ACT_KIND]; 1156 1157 return kind; 1158 } 1159 1160 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1161 { 1162 struct net *net = sock_net(skb->sk); 1163 struct nlmsghdr *nlh; 1164 unsigned char *b = skb_tail_pointer(skb); 1165 struct nlattr *nest; 1166 struct tc_action_ops *a_o; 1167 int ret = 0; 1168 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1169 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1170 struct nlattr *count_attr = NULL; 1171 unsigned long jiffy_since = 0; 1172 struct nlattr *kind = NULL; 1173 struct nla_bitfield32 bf; 1174 u32 msecs_since = 0; 1175 u32 act_count = 0; 1176 1177 ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX, 1178 tcaa_policy, NULL); 1179 if (ret < 0) 1180 return ret; 1181 1182 kind = find_dump_kind(tb); 1183 if (kind == NULL) { 1184 pr_info("tc_dump_action: action bad kind\n"); 1185 return 0; 1186 } 1187 1188 a_o = tc_lookup_action(kind); 1189 if (a_o == NULL) 1190 return 0; 1191 1192 cb->args[2] = 0; 1193 if (tb[TCA_ROOT_FLAGS]) { 1194 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1195 cb->args[2] = bf.value; 1196 } 1197 1198 if (tb[TCA_ROOT_TIME_DELTA]) { 1199 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1200 } 1201 1202 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1203 cb->nlh->nlmsg_type, sizeof(*t), 0); 1204 if (!nlh) 1205 goto out_module_put; 1206 1207 if (msecs_since) 1208 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1209 1210 t = nlmsg_data(nlh); 1211 t->tca_family = AF_UNSPEC; 1212 t->tca__pad1 = 0; 1213 t->tca__pad2 = 0; 1214 cb->args[3] = jiffy_since; 1215 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1216 if (!count_attr) 1217 goto out_module_put; 1218 1219 nest = nla_nest_start(skb, TCA_ACT_TAB); 1220 if (nest == NULL) 1221 goto out_module_put; 1222 1223 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o); 1224 if (ret < 0) 1225 goto out_module_put; 1226 1227 if (ret > 0) { 1228 nla_nest_end(skb, nest); 1229 ret = skb->len; 1230 act_count = cb->args[1]; 1231 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1232 cb->args[1] = 0; 1233 } else 1234 nlmsg_trim(skb, b); 1235 1236 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1237 if (NETLINK_CB(cb->skb).portid && ret) 1238 nlh->nlmsg_flags |= NLM_F_MULTI; 1239 module_put(a_o->owner); 1240 return skb->len; 1241 1242 out_module_put: 1243 module_put(a_o->owner); 1244 nlmsg_trim(skb, b); 1245 return skb->len; 1246 } 1247 1248 struct tcf_action_net { 1249 struct rhashtable egdev_ht; 1250 }; 1251 1252 static unsigned int tcf_action_net_id; 1253 1254 struct tcf_action_egdev_cb { 1255 struct list_head list; 1256 tc_setup_cb_t *cb; 1257 void *cb_priv; 1258 }; 1259 1260 struct tcf_action_egdev { 1261 struct rhash_head ht_node; 1262 const struct net_device *dev; 1263 unsigned int refcnt; 1264 struct list_head cb_list; 1265 }; 1266 1267 static const struct rhashtable_params tcf_action_egdev_ht_params = { 1268 .key_offset = offsetof(struct tcf_action_egdev, dev), 1269 .head_offset = offsetof(struct tcf_action_egdev, ht_node), 1270 .key_len = sizeof(const struct net_device *), 1271 }; 1272 1273 static struct tcf_action_egdev * 1274 tcf_action_egdev_lookup(const struct net_device *dev) 1275 { 1276 struct net *net = dev_net(dev); 1277 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1278 1279 return rhashtable_lookup_fast(&tan->egdev_ht, &dev, 1280 tcf_action_egdev_ht_params); 1281 } 1282 1283 static struct tcf_action_egdev * 1284 tcf_action_egdev_get(const struct net_device *dev) 1285 { 1286 struct tcf_action_egdev *egdev; 1287 struct tcf_action_net *tan; 1288 1289 egdev = tcf_action_egdev_lookup(dev); 1290 if (egdev) 1291 goto inc_ref; 1292 1293 egdev = kzalloc(sizeof(*egdev), GFP_KERNEL); 1294 if (!egdev) 1295 return NULL; 1296 INIT_LIST_HEAD(&egdev->cb_list); 1297 egdev->dev = dev; 1298 tan = net_generic(dev_net(dev), tcf_action_net_id); 1299 rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node, 1300 tcf_action_egdev_ht_params); 1301 1302 inc_ref: 1303 egdev->refcnt++; 1304 return egdev; 1305 } 1306 1307 static void tcf_action_egdev_put(struct tcf_action_egdev *egdev) 1308 { 1309 struct tcf_action_net *tan; 1310 1311 if (--egdev->refcnt) 1312 return; 1313 tan = net_generic(dev_net(egdev->dev), tcf_action_net_id); 1314 rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node, 1315 tcf_action_egdev_ht_params); 1316 kfree(egdev); 1317 } 1318 1319 static struct tcf_action_egdev_cb * 1320 tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev, 1321 tc_setup_cb_t *cb, void *cb_priv) 1322 { 1323 struct tcf_action_egdev_cb *egdev_cb; 1324 1325 list_for_each_entry(egdev_cb, &egdev->cb_list, list) 1326 if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv) 1327 return egdev_cb; 1328 return NULL; 1329 } 1330 1331 static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev, 1332 enum tc_setup_type type, 1333 void *type_data, bool err_stop) 1334 { 1335 struct tcf_action_egdev_cb *egdev_cb; 1336 int ok_count = 0; 1337 int err; 1338 1339 list_for_each_entry(egdev_cb, &egdev->cb_list, list) { 1340 err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv); 1341 if (err) { 1342 if (err_stop) 1343 return err; 1344 } else { 1345 ok_count++; 1346 } 1347 } 1348 return ok_count; 1349 } 1350 1351 static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev, 1352 tc_setup_cb_t *cb, void *cb_priv) 1353 { 1354 struct tcf_action_egdev_cb *egdev_cb; 1355 1356 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv); 1357 if (WARN_ON(egdev_cb)) 1358 return -EEXIST; 1359 egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL); 1360 if (!egdev_cb) 1361 return -ENOMEM; 1362 egdev_cb->cb = cb; 1363 egdev_cb->cb_priv = cb_priv; 1364 list_add(&egdev_cb->list, &egdev->cb_list); 1365 return 0; 1366 } 1367 1368 static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev, 1369 tc_setup_cb_t *cb, void *cb_priv) 1370 { 1371 struct tcf_action_egdev_cb *egdev_cb; 1372 1373 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv); 1374 if (WARN_ON(!egdev_cb)) 1375 return; 1376 list_del(&egdev_cb->list); 1377 kfree(egdev_cb); 1378 } 1379 1380 static int __tc_setup_cb_egdev_register(const struct net_device *dev, 1381 tc_setup_cb_t *cb, void *cb_priv) 1382 { 1383 struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev); 1384 int err; 1385 1386 if (!egdev) 1387 return -ENOMEM; 1388 err = tcf_action_egdev_cb_add(egdev, cb, cb_priv); 1389 if (err) 1390 goto err_cb_add; 1391 return 0; 1392 1393 err_cb_add: 1394 tcf_action_egdev_put(egdev); 1395 return err; 1396 } 1397 int tc_setup_cb_egdev_register(const struct net_device *dev, 1398 tc_setup_cb_t *cb, void *cb_priv) 1399 { 1400 int err; 1401 1402 rtnl_lock(); 1403 err = __tc_setup_cb_egdev_register(dev, cb, cb_priv); 1404 rtnl_unlock(); 1405 return err; 1406 } 1407 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register); 1408 1409 static void __tc_setup_cb_egdev_unregister(const struct net_device *dev, 1410 tc_setup_cb_t *cb, void *cb_priv) 1411 { 1412 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev); 1413 1414 if (WARN_ON(!egdev)) 1415 return; 1416 tcf_action_egdev_cb_del(egdev, cb, cb_priv); 1417 tcf_action_egdev_put(egdev); 1418 } 1419 void tc_setup_cb_egdev_unregister(const struct net_device *dev, 1420 tc_setup_cb_t *cb, void *cb_priv) 1421 { 1422 rtnl_lock(); 1423 __tc_setup_cb_egdev_unregister(dev, cb, cb_priv); 1424 rtnl_unlock(); 1425 } 1426 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister); 1427 1428 int tc_setup_cb_egdev_call(const struct net_device *dev, 1429 enum tc_setup_type type, void *type_data, 1430 bool err_stop) 1431 { 1432 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev); 1433 1434 if (!egdev) 1435 return 0; 1436 return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop); 1437 } 1438 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call); 1439 1440 static __net_init int tcf_action_net_init(struct net *net) 1441 { 1442 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1443 1444 return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params); 1445 } 1446 1447 static void __net_exit tcf_action_net_exit(struct net *net) 1448 { 1449 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1450 1451 rhashtable_destroy(&tan->egdev_ht); 1452 } 1453 1454 static struct pernet_operations tcf_action_net_ops = { 1455 .init = tcf_action_net_init, 1456 .exit = tcf_action_net_exit, 1457 .id = &tcf_action_net_id, 1458 .size = sizeof(struct tcf_action_net), 1459 }; 1460 1461 static int __init tc_action_init(void) 1462 { 1463 int err; 1464 1465 err = register_pernet_subsys(&tcf_action_net_ops); 1466 if (err) 1467 return err; 1468 1469 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1470 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1471 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1472 0); 1473 1474 return 0; 1475 } 1476 1477 subsys_initcall(tc_action_init); 1478