1 /* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/slab.h> 19 #include <linux/skbuff.h> 20 #include <linux/init.h> 21 #include <linux/kmod.h> 22 #include <linux/err.h> 23 #include <linux/module.h> 24 #include <linux/rhashtable.h> 25 #include <linux/list.h> 26 #include <net/net_namespace.h> 27 #include <net/sock.h> 28 #include <net/sch_generic.h> 29 #include <net/pkt_cls.h> 30 #include <net/act_api.h> 31 #include <net/netlink.h> 32 33 static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp) 34 { 35 u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK; 36 37 if (!tp) 38 return -EINVAL; 39 a->goto_chain = tcf_chain_get(tp->chain->block, chain_index, true); 40 if (!a->goto_chain) 41 return -ENOMEM; 42 return 0; 43 } 44 45 static void tcf_action_goto_chain_fini(struct tc_action *a) 46 { 47 tcf_chain_put(a->goto_chain); 48 } 49 50 static void tcf_action_goto_chain_exec(const struct tc_action *a, 51 struct tcf_result *res) 52 { 53 const struct tcf_chain *chain = a->goto_chain; 54 55 res->goto_tp = rcu_dereference_bh(chain->filter_chain); 56 } 57 58 /* XXX: For standalone actions, we don't need a RCU grace period either, because 59 * actions are always connected to filters and filters are already destroyed in 60 * RCU callbacks, so after a RCU grace period actions are already disconnected 61 * from filters. Readers later can not find us. 62 */ 63 static void free_tcf(struct tc_action *p) 64 { 65 free_percpu(p->cpu_bstats); 66 free_percpu(p->cpu_qstats); 67 68 if (p->act_cookie) { 69 kfree(p->act_cookie->data); 70 kfree(p->act_cookie); 71 } 72 if (p->goto_chain) 73 tcf_action_goto_chain_fini(p); 74 75 kfree(p); 76 } 77 78 static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p) 79 { 80 spin_lock_bh(&idrinfo->lock); 81 idr_remove_ext(&idrinfo->action_idr, p->tcfa_index); 82 spin_unlock_bh(&idrinfo->lock); 83 gen_kill_estimator(&p->tcfa_rate_est); 84 free_tcf(p); 85 } 86 87 int __tcf_idr_release(struct tc_action *p, bool bind, bool strict) 88 { 89 int ret = 0; 90 91 if (p) { 92 if (bind) 93 p->tcfa_bindcnt--; 94 else if (strict && p->tcfa_bindcnt > 0) 95 return -EPERM; 96 97 p->tcfa_refcnt--; 98 if (p->tcfa_bindcnt <= 0 && p->tcfa_refcnt <= 0) { 99 if (p->ops->cleanup) 100 p->ops->cleanup(p, bind); 101 tcf_idr_remove(p->idrinfo, p); 102 ret = ACT_P_DELETED; 103 } 104 } 105 106 return ret; 107 } 108 EXPORT_SYMBOL(__tcf_idr_release); 109 110 static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 111 struct netlink_callback *cb) 112 { 113 int err = 0, index = -1, s_i = 0, n_i = 0; 114 u32 act_flags = cb->args[2]; 115 unsigned long jiffy_since = cb->args[3]; 116 struct nlattr *nest; 117 struct idr *idr = &idrinfo->action_idr; 118 struct tc_action *p; 119 unsigned long id = 1; 120 121 spin_lock_bh(&idrinfo->lock); 122 123 s_i = cb->args[0]; 124 125 idr_for_each_entry_ext(idr, p, id) { 126 index++; 127 if (index < s_i) 128 continue; 129 130 if (jiffy_since && 131 time_after(jiffy_since, 132 (unsigned long)p->tcfa_tm.lastuse)) 133 continue; 134 135 nest = nla_nest_start(skb, n_i); 136 if (!nest) 137 goto nla_put_failure; 138 err = tcf_action_dump_1(skb, p, 0, 0); 139 if (err < 0) { 140 index--; 141 nlmsg_trim(skb, nest); 142 goto done; 143 } 144 nla_nest_end(skb, nest); 145 n_i++; 146 if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) && 147 n_i >= TCA_ACT_MAX_PRIO) 148 goto done; 149 } 150 done: 151 if (index >= 0) 152 cb->args[0] = index + 1; 153 154 spin_unlock_bh(&idrinfo->lock); 155 if (n_i) { 156 if (act_flags & TCA_FLAG_LARGE_DUMP_ON) 157 cb->args[1] = n_i; 158 } 159 return n_i; 160 161 nla_put_failure: 162 nla_nest_cancel(skb, nest); 163 goto done; 164 } 165 166 static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb, 167 const struct tc_action_ops *ops) 168 { 169 struct nlattr *nest; 170 int n_i = 0; 171 int ret = -EINVAL; 172 struct idr *idr = &idrinfo->action_idr; 173 struct tc_action *p; 174 unsigned long id = 1; 175 176 nest = nla_nest_start(skb, 0); 177 if (nest == NULL) 178 goto nla_put_failure; 179 if (nla_put_string(skb, TCA_KIND, ops->kind)) 180 goto nla_put_failure; 181 182 idr_for_each_entry_ext(idr, p, id) { 183 ret = __tcf_idr_release(p, false, true); 184 if (ret == ACT_P_DELETED) { 185 module_put(ops->owner); 186 n_i++; 187 } else if (ret < 0) { 188 goto nla_put_failure; 189 } 190 } 191 if (nla_put_u32(skb, TCA_FCNT, n_i)) 192 goto nla_put_failure; 193 nla_nest_end(skb, nest); 194 195 return n_i; 196 nla_put_failure: 197 nla_nest_cancel(skb, nest); 198 return ret; 199 } 200 201 int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, 202 struct netlink_callback *cb, int type, 203 const struct tc_action_ops *ops) 204 { 205 struct tcf_idrinfo *idrinfo = tn->idrinfo; 206 207 if (type == RTM_DELACTION) { 208 return tcf_del_walker(idrinfo, skb, ops); 209 } else if (type == RTM_GETACTION) { 210 return tcf_dump_walker(idrinfo, skb, cb); 211 } else { 212 WARN(1, "tcf_generic_walker: unknown action %d\n", type); 213 return -EINVAL; 214 } 215 } 216 EXPORT_SYMBOL(tcf_generic_walker); 217 218 static struct tc_action *tcf_idr_lookup(u32 index, struct tcf_idrinfo *idrinfo) 219 { 220 struct tc_action *p = NULL; 221 222 spin_lock_bh(&idrinfo->lock); 223 p = idr_find_ext(&idrinfo->action_idr, index); 224 spin_unlock_bh(&idrinfo->lock); 225 226 return p; 227 } 228 229 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index) 230 { 231 struct tcf_idrinfo *idrinfo = tn->idrinfo; 232 struct tc_action *p = tcf_idr_lookup(index, idrinfo); 233 234 if (p) { 235 *a = p; 236 return 1; 237 } 238 return 0; 239 } 240 EXPORT_SYMBOL(tcf_idr_search); 241 242 bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a, 243 int bind) 244 { 245 struct tcf_idrinfo *idrinfo = tn->idrinfo; 246 struct tc_action *p = tcf_idr_lookup(index, idrinfo); 247 248 if (index && p) { 249 if (bind) 250 p->tcfa_bindcnt++; 251 p->tcfa_refcnt++; 252 *a = p; 253 return true; 254 } 255 return false; 256 } 257 EXPORT_SYMBOL(tcf_idr_check); 258 259 void tcf_idr_cleanup(struct tc_action *a, struct nlattr *est) 260 { 261 if (est) 262 gen_kill_estimator(&a->tcfa_rate_est); 263 free_tcf(a); 264 } 265 EXPORT_SYMBOL(tcf_idr_cleanup); 266 267 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est, 268 struct tc_action **a, const struct tc_action_ops *ops, 269 int bind, bool cpustats) 270 { 271 struct tc_action *p = kzalloc(ops->size, GFP_KERNEL); 272 struct tcf_idrinfo *idrinfo = tn->idrinfo; 273 struct idr *idr = &idrinfo->action_idr; 274 int err = -ENOMEM; 275 unsigned long idr_index; 276 277 if (unlikely(!p)) 278 return -ENOMEM; 279 p->tcfa_refcnt = 1; 280 if (bind) 281 p->tcfa_bindcnt = 1; 282 283 if (cpustats) { 284 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); 285 if (!p->cpu_bstats) { 286 err1: 287 kfree(p); 288 return err; 289 } 290 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue); 291 if (!p->cpu_qstats) { 292 err2: 293 free_percpu(p->cpu_bstats); 294 goto err1; 295 } 296 } 297 spin_lock_init(&p->tcfa_lock); 298 /* user doesn't specify an index */ 299 if (!index) { 300 idr_preload(GFP_KERNEL); 301 spin_lock_bh(&idrinfo->lock); 302 err = idr_alloc_ext(idr, NULL, &idr_index, 1, 0, 303 GFP_ATOMIC); 304 spin_unlock_bh(&idrinfo->lock); 305 idr_preload_end(); 306 if (err) { 307 err3: 308 free_percpu(p->cpu_qstats); 309 goto err2; 310 } 311 p->tcfa_index = idr_index; 312 } else { 313 idr_preload(GFP_KERNEL); 314 spin_lock_bh(&idrinfo->lock); 315 err = idr_alloc_ext(idr, NULL, NULL, index, index + 1, 316 GFP_ATOMIC); 317 spin_unlock_bh(&idrinfo->lock); 318 idr_preload_end(); 319 if (err) 320 goto err3; 321 p->tcfa_index = index; 322 } 323 324 p->tcfa_tm.install = jiffies; 325 p->tcfa_tm.lastuse = jiffies; 326 p->tcfa_tm.firstuse = 0; 327 if (est) { 328 err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats, 329 &p->tcfa_rate_est, 330 &p->tcfa_lock, NULL, est); 331 if (err) { 332 goto err3; 333 } 334 } 335 336 p->idrinfo = idrinfo; 337 p->ops = ops; 338 INIT_LIST_HEAD(&p->list); 339 *a = p; 340 return 0; 341 } 342 EXPORT_SYMBOL(tcf_idr_create); 343 344 void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a) 345 { 346 struct tcf_idrinfo *idrinfo = tn->idrinfo; 347 348 spin_lock_bh(&idrinfo->lock); 349 idr_replace_ext(&idrinfo->action_idr, a, a->tcfa_index); 350 spin_unlock_bh(&idrinfo->lock); 351 } 352 EXPORT_SYMBOL(tcf_idr_insert); 353 354 void tcf_idrinfo_destroy(const struct tc_action_ops *ops, 355 struct tcf_idrinfo *idrinfo) 356 { 357 struct idr *idr = &idrinfo->action_idr; 358 struct tc_action *p; 359 int ret; 360 unsigned long id = 1; 361 362 idr_for_each_entry_ext(idr, p, id) { 363 ret = __tcf_idr_release(p, false, true); 364 if (ret == ACT_P_DELETED) 365 module_put(ops->owner); 366 else if (ret < 0) 367 return; 368 } 369 idr_destroy(&idrinfo->action_idr); 370 } 371 EXPORT_SYMBOL(tcf_idrinfo_destroy); 372 373 static LIST_HEAD(act_base); 374 static DEFINE_RWLOCK(act_mod_lock); 375 376 int tcf_register_action(struct tc_action_ops *act, 377 struct pernet_operations *ops) 378 { 379 struct tc_action_ops *a; 380 int ret; 381 382 if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup) 383 return -EINVAL; 384 385 /* We have to register pernet ops before making the action ops visible, 386 * otherwise tcf_action_init_1() could get a partially initialized 387 * netns. 388 */ 389 ret = register_pernet_subsys(ops); 390 if (ret) 391 return ret; 392 393 write_lock(&act_mod_lock); 394 list_for_each_entry(a, &act_base, head) { 395 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 396 write_unlock(&act_mod_lock); 397 unregister_pernet_subsys(ops); 398 return -EEXIST; 399 } 400 } 401 list_add_tail(&act->head, &act_base); 402 write_unlock(&act_mod_lock); 403 404 return 0; 405 } 406 EXPORT_SYMBOL(tcf_register_action); 407 408 int tcf_unregister_action(struct tc_action_ops *act, 409 struct pernet_operations *ops) 410 { 411 struct tc_action_ops *a; 412 int err = -ENOENT; 413 414 write_lock(&act_mod_lock); 415 list_for_each_entry(a, &act_base, head) { 416 if (a == act) { 417 list_del(&act->head); 418 err = 0; 419 break; 420 } 421 } 422 write_unlock(&act_mod_lock); 423 if (!err) 424 unregister_pernet_subsys(ops); 425 return err; 426 } 427 EXPORT_SYMBOL(tcf_unregister_action); 428 429 /* lookup by name */ 430 static struct tc_action_ops *tc_lookup_action_n(char *kind) 431 { 432 struct tc_action_ops *a, *res = NULL; 433 434 if (kind) { 435 read_lock(&act_mod_lock); 436 list_for_each_entry(a, &act_base, head) { 437 if (strcmp(kind, a->kind) == 0) { 438 if (try_module_get(a->owner)) 439 res = a; 440 break; 441 } 442 } 443 read_unlock(&act_mod_lock); 444 } 445 return res; 446 } 447 448 /* lookup by nlattr */ 449 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 450 { 451 struct tc_action_ops *a, *res = NULL; 452 453 if (kind) { 454 read_lock(&act_mod_lock); 455 list_for_each_entry(a, &act_base, head) { 456 if (nla_strcmp(kind, a->kind) == 0) { 457 if (try_module_get(a->owner)) 458 res = a; 459 break; 460 } 461 } 462 read_unlock(&act_mod_lock); 463 } 464 return res; 465 } 466 467 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */ 468 #define TCA_ACT_MAX_PRIO_MASK 0x1FF 469 int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, 470 int nr_actions, struct tcf_result *res) 471 { 472 u32 jmp_prgcnt = 0; 473 u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ 474 int i; 475 int ret = TC_ACT_OK; 476 477 if (skb_skip_tc_classify(skb)) 478 return TC_ACT_OK; 479 480 restart_act_graph: 481 for (i = 0; i < nr_actions; i++) { 482 const struct tc_action *a = actions[i]; 483 484 if (jmp_prgcnt > 0) { 485 jmp_prgcnt -= 1; 486 continue; 487 } 488 repeat: 489 ret = a->ops->act(skb, a, res); 490 if (ret == TC_ACT_REPEAT) 491 goto repeat; /* we need a ttl - JHS */ 492 493 if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) { 494 jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK; 495 if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) { 496 /* faulty opcode, stop pipeline */ 497 return TC_ACT_OK; 498 } else { 499 jmp_ttl -= 1; 500 if (jmp_ttl > 0) 501 goto restart_act_graph; 502 else /* faulty graph, stop pipeline */ 503 return TC_ACT_OK; 504 } 505 } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) { 506 tcf_action_goto_chain_exec(a, res); 507 } 508 509 if (ret != TC_ACT_PIPE) 510 break; 511 } 512 513 return ret; 514 } 515 EXPORT_SYMBOL(tcf_action_exec); 516 517 int tcf_action_destroy(struct list_head *actions, int bind) 518 { 519 const struct tc_action_ops *ops; 520 struct tc_action *a, *tmp; 521 int ret = 0; 522 523 list_for_each_entry_safe(a, tmp, actions, list) { 524 ops = a->ops; 525 ret = __tcf_idr_release(a, bind, true); 526 if (ret == ACT_P_DELETED) 527 module_put(ops->owner); 528 else if (ret < 0) 529 return ret; 530 } 531 return ret; 532 } 533 534 int 535 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 536 { 537 return a->ops->dump(skb, a, bind, ref); 538 } 539 540 int 541 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 542 { 543 int err = -EINVAL; 544 unsigned char *b = skb_tail_pointer(skb); 545 struct nlattr *nest; 546 547 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 548 goto nla_put_failure; 549 if (tcf_action_copy_stats(skb, a, 0)) 550 goto nla_put_failure; 551 if (a->act_cookie) { 552 if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len, 553 a->act_cookie->data)) 554 goto nla_put_failure; 555 } 556 557 nest = nla_nest_start(skb, TCA_OPTIONS); 558 if (nest == NULL) 559 goto nla_put_failure; 560 err = tcf_action_dump_old(skb, a, bind, ref); 561 if (err > 0) { 562 nla_nest_end(skb, nest); 563 return err; 564 } 565 566 nla_put_failure: 567 nlmsg_trim(skb, b); 568 return -1; 569 } 570 EXPORT_SYMBOL(tcf_action_dump_1); 571 572 int tcf_action_dump(struct sk_buff *skb, struct list_head *actions, 573 int bind, int ref) 574 { 575 struct tc_action *a; 576 int err = -EINVAL; 577 struct nlattr *nest; 578 579 list_for_each_entry(a, actions, list) { 580 nest = nla_nest_start(skb, a->order); 581 if (nest == NULL) 582 goto nla_put_failure; 583 err = tcf_action_dump_1(skb, a, bind, ref); 584 if (err < 0) 585 goto errout; 586 nla_nest_end(skb, nest); 587 } 588 589 return 0; 590 591 nla_put_failure: 592 err = -EINVAL; 593 errout: 594 nla_nest_cancel(skb, nest); 595 return err; 596 } 597 598 static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb) 599 { 600 struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL); 601 if (!c) 602 return NULL; 603 604 c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL); 605 if (!c->data) { 606 kfree(c); 607 return NULL; 608 } 609 c->len = nla_len(tb[TCA_ACT_COOKIE]); 610 611 return c; 612 } 613 614 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, 615 struct nlattr *nla, struct nlattr *est, 616 char *name, int ovr, int bind) 617 { 618 struct tc_action *a; 619 struct tc_action_ops *a_o; 620 struct tc_cookie *cookie = NULL; 621 char act_name[IFNAMSIZ]; 622 struct nlattr *tb[TCA_ACT_MAX + 1]; 623 struct nlattr *kind; 624 int err; 625 626 if (name == NULL) { 627 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); 628 if (err < 0) 629 goto err_out; 630 err = -EINVAL; 631 kind = tb[TCA_ACT_KIND]; 632 if (kind == NULL) 633 goto err_out; 634 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 635 goto err_out; 636 if (tb[TCA_ACT_COOKIE]) { 637 int cklen = nla_len(tb[TCA_ACT_COOKIE]); 638 639 if (cklen > TC_COOKIE_MAX_SIZE) 640 goto err_out; 641 642 cookie = nla_memdup_cookie(tb); 643 if (!cookie) { 644 err = -ENOMEM; 645 goto err_out; 646 } 647 } 648 } else { 649 err = -EINVAL; 650 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 651 goto err_out; 652 } 653 654 a_o = tc_lookup_action_n(act_name); 655 if (a_o == NULL) { 656 #ifdef CONFIG_MODULES 657 rtnl_unlock(); 658 request_module("act_%s", act_name); 659 rtnl_lock(); 660 661 a_o = tc_lookup_action_n(act_name); 662 663 /* We dropped the RTNL semaphore in order to 664 * perform the module load. So, even if we 665 * succeeded in loading the module we have to 666 * tell the caller to replay the request. We 667 * indicate this using -EAGAIN. 668 */ 669 if (a_o != NULL) { 670 err = -EAGAIN; 671 goto err_mod; 672 } 673 #endif 674 err = -ENOENT; 675 goto err_out; 676 } 677 678 /* backward compatibility for policer */ 679 if (name == NULL) 680 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind); 681 else 682 err = a_o->init(net, nla, est, &a, ovr, bind); 683 if (err < 0) 684 goto err_mod; 685 686 if (name == NULL && tb[TCA_ACT_COOKIE]) { 687 if (a->act_cookie) { 688 kfree(a->act_cookie->data); 689 kfree(a->act_cookie); 690 } 691 a->act_cookie = cookie; 692 } 693 694 /* module count goes up only when brand new policy is created 695 * if it exists and is only bound to in a_o->init() then 696 * ACT_P_CREATED is not returned (a zero is). 697 */ 698 if (err != ACT_P_CREATED) 699 module_put(a_o->owner); 700 701 if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) { 702 err = tcf_action_goto_chain_init(a, tp); 703 if (err) { 704 LIST_HEAD(actions); 705 706 list_add_tail(&a->list, &actions); 707 tcf_action_destroy(&actions, bind); 708 return ERR_PTR(err); 709 } 710 } 711 712 return a; 713 714 err_mod: 715 module_put(a_o->owner); 716 err_out: 717 if (cookie) { 718 kfree(cookie->data); 719 kfree(cookie); 720 } 721 return ERR_PTR(err); 722 } 723 724 static void cleanup_a(struct list_head *actions, int ovr) 725 { 726 struct tc_action *a; 727 728 if (!ovr) 729 return; 730 731 list_for_each_entry(a, actions, list) 732 a->tcfa_refcnt--; 733 } 734 735 int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla, 736 struct nlattr *est, char *name, int ovr, int bind, 737 struct list_head *actions) 738 { 739 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 740 struct tc_action *act; 741 int err; 742 int i; 743 744 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); 745 if (err < 0) 746 return err; 747 748 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 749 act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind); 750 if (IS_ERR(act)) { 751 err = PTR_ERR(act); 752 goto err; 753 } 754 act->order = i; 755 if (ovr) 756 act->tcfa_refcnt++; 757 list_add_tail(&act->list, actions); 758 } 759 760 /* Remove the temp refcnt which was necessary to protect against 761 * destroying an existing action which was being replaced 762 */ 763 cleanup_a(actions, ovr); 764 return 0; 765 766 err: 767 tcf_action_destroy(actions, bind); 768 return err; 769 } 770 771 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p, 772 int compat_mode) 773 { 774 int err = 0; 775 struct gnet_dump d; 776 777 if (p == NULL) 778 goto errout; 779 780 /* compat_mode being true specifies a call that is supposed 781 * to add additional backward compatibility statistic TLVs. 782 */ 783 if (compat_mode) { 784 if (p->type == TCA_OLD_COMPAT) 785 err = gnet_stats_start_copy_compat(skb, 0, 786 TCA_STATS, 787 TCA_XSTATS, 788 &p->tcfa_lock, &d, 789 TCA_PAD); 790 else 791 return 0; 792 } else 793 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 794 &p->tcfa_lock, &d, TCA_ACT_PAD); 795 796 if (err < 0) 797 goto errout; 798 799 if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 || 800 gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 || 801 gnet_stats_copy_queue(&d, p->cpu_qstats, 802 &p->tcfa_qstats, 803 p->tcfa_qstats.qlen) < 0) 804 goto errout; 805 806 if (gnet_stats_finish_copy(&d) < 0) 807 goto errout; 808 809 return 0; 810 811 errout: 812 return -1; 813 } 814 815 static int tca_get_fill(struct sk_buff *skb, struct list_head *actions, 816 u32 portid, u32 seq, u16 flags, int event, int bind, 817 int ref) 818 { 819 struct tcamsg *t; 820 struct nlmsghdr *nlh; 821 unsigned char *b = skb_tail_pointer(skb); 822 struct nlattr *nest; 823 824 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 825 if (!nlh) 826 goto out_nlmsg_trim; 827 t = nlmsg_data(nlh); 828 t->tca_family = AF_UNSPEC; 829 t->tca__pad1 = 0; 830 t->tca__pad2 = 0; 831 832 nest = nla_nest_start(skb, TCA_ACT_TAB); 833 if (nest == NULL) 834 goto out_nlmsg_trim; 835 836 if (tcf_action_dump(skb, actions, bind, ref) < 0) 837 goto out_nlmsg_trim; 838 839 nla_nest_end(skb, nest); 840 841 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 842 return skb->len; 843 844 out_nlmsg_trim: 845 nlmsg_trim(skb, b); 846 return -1; 847 } 848 849 static int 850 tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 851 struct list_head *actions, int event) 852 { 853 struct sk_buff *skb; 854 855 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 856 if (!skb) 857 return -ENOBUFS; 858 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 859 0, 0) <= 0) { 860 kfree_skb(skb); 861 return -EINVAL; 862 } 863 864 return rtnl_unicast(skb, net, portid); 865 } 866 867 static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla, 868 struct nlmsghdr *n, u32 portid) 869 { 870 struct nlattr *tb[TCA_ACT_MAX + 1]; 871 const struct tc_action_ops *ops; 872 struct tc_action *a; 873 int index; 874 int err; 875 876 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); 877 if (err < 0) 878 goto err_out; 879 880 err = -EINVAL; 881 if (tb[TCA_ACT_INDEX] == NULL || 882 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) 883 goto err_out; 884 index = nla_get_u32(tb[TCA_ACT_INDEX]); 885 886 err = -EINVAL; 887 ops = tc_lookup_action(tb[TCA_ACT_KIND]); 888 if (!ops) /* could happen in batch of actions */ 889 goto err_out; 890 err = -ENOENT; 891 if (ops->lookup(net, &a, index) == 0) 892 goto err_mod; 893 894 module_put(ops->owner); 895 return a; 896 897 err_mod: 898 module_put(ops->owner); 899 err_out: 900 return ERR_PTR(err); 901 } 902 903 static int tca_action_flush(struct net *net, struct nlattr *nla, 904 struct nlmsghdr *n, u32 portid) 905 { 906 struct sk_buff *skb; 907 unsigned char *b; 908 struct nlmsghdr *nlh; 909 struct tcamsg *t; 910 struct netlink_callback dcb; 911 struct nlattr *nest; 912 struct nlattr *tb[TCA_ACT_MAX + 1]; 913 const struct tc_action_ops *ops; 914 struct nlattr *kind; 915 int err = -ENOMEM; 916 917 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 918 if (!skb) { 919 pr_debug("tca_action_flush: failed skb alloc\n"); 920 return err; 921 } 922 923 b = skb_tail_pointer(skb); 924 925 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL, NULL); 926 if (err < 0) 927 goto err_out; 928 929 err = -EINVAL; 930 kind = tb[TCA_ACT_KIND]; 931 ops = tc_lookup_action(kind); 932 if (!ops) /*some idjot trying to flush unknown action */ 933 goto err_out; 934 935 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, 936 sizeof(*t), 0); 937 if (!nlh) 938 goto out_module_put; 939 t = nlmsg_data(nlh); 940 t->tca_family = AF_UNSPEC; 941 t->tca__pad1 = 0; 942 t->tca__pad2 = 0; 943 944 nest = nla_nest_start(skb, TCA_ACT_TAB); 945 if (nest == NULL) 946 goto out_module_put; 947 948 err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops); 949 if (err <= 0) 950 goto out_module_put; 951 952 nla_nest_end(skb, nest); 953 954 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 955 nlh->nlmsg_flags |= NLM_F_ROOT; 956 module_put(ops->owner); 957 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 958 n->nlmsg_flags & NLM_F_ECHO); 959 if (err > 0) 960 return 0; 961 962 return err; 963 964 out_module_put: 965 module_put(ops->owner); 966 err_out: 967 kfree_skb(skb); 968 return err; 969 } 970 971 static int 972 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 973 u32 portid) 974 { 975 int ret; 976 struct sk_buff *skb; 977 978 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 979 if (!skb) 980 return -ENOBUFS; 981 982 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 983 0, 1) <= 0) { 984 kfree_skb(skb); 985 return -EINVAL; 986 } 987 988 /* now do the delete */ 989 ret = tcf_action_destroy(actions, 0); 990 if (ret < 0) { 991 kfree_skb(skb); 992 return ret; 993 } 994 995 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 996 n->nlmsg_flags & NLM_F_ECHO); 997 if (ret > 0) 998 return 0; 999 return ret; 1000 } 1001 1002 static int 1003 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 1004 u32 portid, int event) 1005 { 1006 int i, ret; 1007 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1008 struct tc_action *act; 1009 LIST_HEAD(actions); 1010 1011 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, NULL); 1012 if (ret < 0) 1013 return ret; 1014 1015 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 1016 if (tb[1] != NULL) 1017 return tca_action_flush(net, tb[1], n, portid); 1018 else 1019 return -EINVAL; 1020 } 1021 1022 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 1023 act = tcf_action_get_1(net, tb[i], n, portid); 1024 if (IS_ERR(act)) { 1025 ret = PTR_ERR(act); 1026 goto err; 1027 } 1028 act->order = i; 1029 list_add_tail(&act->list, &actions); 1030 } 1031 1032 if (event == RTM_GETACTION) 1033 ret = tcf_get_notify(net, portid, n, &actions, event); 1034 else { /* delete */ 1035 ret = tcf_del_notify(net, n, &actions, portid); 1036 if (ret) 1037 goto err; 1038 return ret; 1039 } 1040 err: 1041 if (event != RTM_GETACTION) 1042 tcf_action_destroy(&actions, 0); 1043 return ret; 1044 } 1045 1046 static int 1047 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 1048 u32 portid) 1049 { 1050 struct sk_buff *skb; 1051 int err = 0; 1052 1053 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1054 if (!skb) 1055 return -ENOBUFS; 1056 1057 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 1058 RTM_NEWACTION, 0, 0) <= 0) { 1059 kfree_skb(skb); 1060 return -EINVAL; 1061 } 1062 1063 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 1064 n->nlmsg_flags & NLM_F_ECHO); 1065 if (err > 0) 1066 err = 0; 1067 return err; 1068 } 1069 1070 static int tcf_action_add(struct net *net, struct nlattr *nla, 1071 struct nlmsghdr *n, u32 portid, int ovr) 1072 { 1073 int ret = 0; 1074 LIST_HEAD(actions); 1075 1076 ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, &actions); 1077 if (ret) 1078 return ret; 1079 1080 return tcf_add_notify(net, n, &actions, portid); 1081 } 1082 1083 static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; 1084 static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { 1085 [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32, 1086 .validation_data = &tcaa_root_flags_allowed }, 1087 [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, 1088 }; 1089 1090 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, 1091 struct netlink_ext_ack *extack) 1092 { 1093 struct net *net = sock_net(skb->sk); 1094 struct nlattr *tca[TCA_ROOT_MAX + 1]; 1095 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 1096 int ret = 0, ovr = 0; 1097 1098 if ((n->nlmsg_type != RTM_GETACTION) && 1099 !netlink_capable(skb, CAP_NET_ADMIN)) 1100 return -EPERM; 1101 1102 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL, 1103 extack); 1104 if (ret < 0) 1105 return ret; 1106 1107 if (tca[TCA_ACT_TAB] == NULL) { 1108 pr_notice("tc_ctl_action: received NO action attribs\n"); 1109 return -EINVAL; 1110 } 1111 1112 /* n->nlmsg_flags & NLM_F_CREATE */ 1113 switch (n->nlmsg_type) { 1114 case RTM_NEWACTION: 1115 /* we are going to assume all other flags 1116 * imply create only if it doesn't exist 1117 * Note that CREATE | EXCL implies that 1118 * but since we want avoid ambiguity (eg when flags 1119 * is zero) then just set this 1120 */ 1121 if (n->nlmsg_flags & NLM_F_REPLACE) 1122 ovr = 1; 1123 replay: 1124 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr); 1125 if (ret == -EAGAIN) 1126 goto replay; 1127 break; 1128 case RTM_DELACTION: 1129 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1130 portid, RTM_DELACTION); 1131 break; 1132 case RTM_GETACTION: 1133 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 1134 portid, RTM_GETACTION); 1135 break; 1136 default: 1137 BUG(); 1138 } 1139 1140 return ret; 1141 } 1142 1143 static struct nlattr *find_dump_kind(struct nlattr **nla) 1144 { 1145 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1146 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1147 struct nlattr *kind; 1148 1149 tb1 = nla[TCA_ACT_TAB]; 1150 if (tb1 == NULL) 1151 return NULL; 1152 1153 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1154 NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0) 1155 return NULL; 1156 1157 if (tb[1] == NULL) 1158 return NULL; 1159 if (nla_parse_nested(tb2, TCA_ACT_MAX, tb[1], NULL, NULL) < 0) 1160 return NULL; 1161 kind = tb2[TCA_ACT_KIND]; 1162 1163 return kind; 1164 } 1165 1166 static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1167 { 1168 struct net *net = sock_net(skb->sk); 1169 struct nlmsghdr *nlh; 1170 unsigned char *b = skb_tail_pointer(skb); 1171 struct nlattr *nest; 1172 struct tc_action_ops *a_o; 1173 int ret = 0; 1174 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1175 struct nlattr *tb[TCA_ROOT_MAX + 1]; 1176 struct nlattr *count_attr = NULL; 1177 unsigned long jiffy_since = 0; 1178 struct nlattr *kind = NULL; 1179 struct nla_bitfield32 bf; 1180 u32 msecs_since = 0; 1181 u32 act_count = 0; 1182 1183 ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX, 1184 tcaa_policy, NULL); 1185 if (ret < 0) 1186 return ret; 1187 1188 kind = find_dump_kind(tb); 1189 if (kind == NULL) { 1190 pr_info("tc_dump_action: action bad kind\n"); 1191 return 0; 1192 } 1193 1194 a_o = tc_lookup_action(kind); 1195 if (a_o == NULL) 1196 return 0; 1197 1198 cb->args[2] = 0; 1199 if (tb[TCA_ROOT_FLAGS]) { 1200 bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); 1201 cb->args[2] = bf.value; 1202 } 1203 1204 if (tb[TCA_ROOT_TIME_DELTA]) { 1205 msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); 1206 } 1207 1208 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1209 cb->nlh->nlmsg_type, sizeof(*t), 0); 1210 if (!nlh) 1211 goto out_module_put; 1212 1213 if (msecs_since) 1214 jiffy_since = jiffies - msecs_to_jiffies(msecs_since); 1215 1216 t = nlmsg_data(nlh); 1217 t->tca_family = AF_UNSPEC; 1218 t->tca__pad1 = 0; 1219 t->tca__pad2 = 0; 1220 cb->args[3] = jiffy_since; 1221 count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); 1222 if (!count_attr) 1223 goto out_module_put; 1224 1225 nest = nla_nest_start(skb, TCA_ACT_TAB); 1226 if (nest == NULL) 1227 goto out_module_put; 1228 1229 ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o); 1230 if (ret < 0) 1231 goto out_module_put; 1232 1233 if (ret > 0) { 1234 nla_nest_end(skb, nest); 1235 ret = skb->len; 1236 act_count = cb->args[1]; 1237 memcpy(nla_data(count_attr), &act_count, sizeof(u32)); 1238 cb->args[1] = 0; 1239 } else 1240 nlmsg_trim(skb, b); 1241 1242 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1243 if (NETLINK_CB(cb->skb).portid && ret) 1244 nlh->nlmsg_flags |= NLM_F_MULTI; 1245 module_put(a_o->owner); 1246 return skb->len; 1247 1248 out_module_put: 1249 module_put(a_o->owner); 1250 nlmsg_trim(skb, b); 1251 return skb->len; 1252 } 1253 1254 struct tcf_action_net { 1255 struct rhashtable egdev_ht; 1256 }; 1257 1258 static unsigned int tcf_action_net_id; 1259 1260 struct tcf_action_egdev_cb { 1261 struct list_head list; 1262 tc_setup_cb_t *cb; 1263 void *cb_priv; 1264 }; 1265 1266 struct tcf_action_egdev { 1267 struct rhash_head ht_node; 1268 const struct net_device *dev; 1269 unsigned int refcnt; 1270 struct list_head cb_list; 1271 }; 1272 1273 static const struct rhashtable_params tcf_action_egdev_ht_params = { 1274 .key_offset = offsetof(struct tcf_action_egdev, dev), 1275 .head_offset = offsetof(struct tcf_action_egdev, ht_node), 1276 .key_len = sizeof(const struct net_device *), 1277 }; 1278 1279 static struct tcf_action_egdev * 1280 tcf_action_egdev_lookup(const struct net_device *dev) 1281 { 1282 struct net *net = dev_net(dev); 1283 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1284 1285 return rhashtable_lookup_fast(&tan->egdev_ht, &dev, 1286 tcf_action_egdev_ht_params); 1287 } 1288 1289 static struct tcf_action_egdev * 1290 tcf_action_egdev_get(const struct net_device *dev) 1291 { 1292 struct tcf_action_egdev *egdev; 1293 struct tcf_action_net *tan; 1294 1295 egdev = tcf_action_egdev_lookup(dev); 1296 if (egdev) 1297 goto inc_ref; 1298 1299 egdev = kzalloc(sizeof(*egdev), GFP_KERNEL); 1300 if (!egdev) 1301 return NULL; 1302 INIT_LIST_HEAD(&egdev->cb_list); 1303 tan = net_generic(dev_net(dev), tcf_action_net_id); 1304 rhashtable_insert_fast(&tan->egdev_ht, &egdev->ht_node, 1305 tcf_action_egdev_ht_params); 1306 1307 inc_ref: 1308 egdev->refcnt++; 1309 return egdev; 1310 } 1311 1312 static void tcf_action_egdev_put(struct tcf_action_egdev *egdev) 1313 { 1314 struct tcf_action_net *tan; 1315 1316 if (--egdev->refcnt) 1317 return; 1318 tan = net_generic(dev_net(egdev->dev), tcf_action_net_id); 1319 rhashtable_remove_fast(&tan->egdev_ht, &egdev->ht_node, 1320 tcf_action_egdev_ht_params); 1321 kfree(egdev); 1322 } 1323 1324 static struct tcf_action_egdev_cb * 1325 tcf_action_egdev_cb_lookup(struct tcf_action_egdev *egdev, 1326 tc_setup_cb_t *cb, void *cb_priv) 1327 { 1328 struct tcf_action_egdev_cb *egdev_cb; 1329 1330 list_for_each_entry(egdev_cb, &egdev->cb_list, list) 1331 if (egdev_cb->cb == cb && egdev_cb->cb_priv == cb_priv) 1332 return egdev_cb; 1333 return NULL; 1334 } 1335 1336 static int tcf_action_egdev_cb_call(struct tcf_action_egdev *egdev, 1337 enum tc_setup_type type, 1338 void *type_data, bool err_stop) 1339 { 1340 struct tcf_action_egdev_cb *egdev_cb; 1341 int ok_count = 0; 1342 int err; 1343 1344 list_for_each_entry(egdev_cb, &egdev->cb_list, list) { 1345 err = egdev_cb->cb(type, type_data, egdev_cb->cb_priv); 1346 if (err) { 1347 if (err_stop) 1348 return err; 1349 } else { 1350 ok_count++; 1351 } 1352 } 1353 return ok_count; 1354 } 1355 1356 static int tcf_action_egdev_cb_add(struct tcf_action_egdev *egdev, 1357 tc_setup_cb_t *cb, void *cb_priv) 1358 { 1359 struct tcf_action_egdev_cb *egdev_cb; 1360 1361 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv); 1362 if (WARN_ON(egdev_cb)) 1363 return -EEXIST; 1364 egdev_cb = kzalloc(sizeof(*egdev_cb), GFP_KERNEL); 1365 if (!egdev_cb) 1366 return -ENOMEM; 1367 egdev_cb->cb = cb; 1368 egdev_cb->cb_priv = cb_priv; 1369 list_add(&egdev_cb->list, &egdev->cb_list); 1370 return 0; 1371 } 1372 1373 static void tcf_action_egdev_cb_del(struct tcf_action_egdev *egdev, 1374 tc_setup_cb_t *cb, void *cb_priv) 1375 { 1376 struct tcf_action_egdev_cb *egdev_cb; 1377 1378 egdev_cb = tcf_action_egdev_cb_lookup(egdev, cb, cb_priv); 1379 if (WARN_ON(!egdev_cb)) 1380 return; 1381 list_del(&egdev_cb->list); 1382 kfree(egdev_cb); 1383 } 1384 1385 static int __tc_setup_cb_egdev_register(const struct net_device *dev, 1386 tc_setup_cb_t *cb, void *cb_priv) 1387 { 1388 struct tcf_action_egdev *egdev = tcf_action_egdev_get(dev); 1389 int err; 1390 1391 if (!egdev) 1392 return -ENOMEM; 1393 err = tcf_action_egdev_cb_add(egdev, cb, cb_priv); 1394 if (err) 1395 goto err_cb_add; 1396 return 0; 1397 1398 err_cb_add: 1399 tcf_action_egdev_put(egdev); 1400 return err; 1401 } 1402 int tc_setup_cb_egdev_register(const struct net_device *dev, 1403 tc_setup_cb_t *cb, void *cb_priv) 1404 { 1405 int err; 1406 1407 rtnl_lock(); 1408 err = __tc_setup_cb_egdev_register(dev, cb, cb_priv); 1409 rtnl_unlock(); 1410 return err; 1411 } 1412 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_register); 1413 1414 static void __tc_setup_cb_egdev_unregister(const struct net_device *dev, 1415 tc_setup_cb_t *cb, void *cb_priv) 1416 { 1417 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev); 1418 1419 if (WARN_ON(!egdev)) 1420 return; 1421 tcf_action_egdev_cb_del(egdev, cb, cb_priv); 1422 tcf_action_egdev_put(egdev); 1423 } 1424 void tc_setup_cb_egdev_unregister(const struct net_device *dev, 1425 tc_setup_cb_t *cb, void *cb_priv) 1426 { 1427 rtnl_lock(); 1428 __tc_setup_cb_egdev_unregister(dev, cb, cb_priv); 1429 rtnl_unlock(); 1430 } 1431 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_unregister); 1432 1433 int tc_setup_cb_egdev_call(const struct net_device *dev, 1434 enum tc_setup_type type, void *type_data, 1435 bool err_stop) 1436 { 1437 struct tcf_action_egdev *egdev = tcf_action_egdev_lookup(dev); 1438 1439 if (!egdev) 1440 return 0; 1441 return tcf_action_egdev_cb_call(egdev, type, type_data, err_stop); 1442 } 1443 EXPORT_SYMBOL_GPL(tc_setup_cb_egdev_call); 1444 1445 static __net_init int tcf_action_net_init(struct net *net) 1446 { 1447 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1448 1449 return rhashtable_init(&tan->egdev_ht, &tcf_action_egdev_ht_params); 1450 } 1451 1452 static void __net_exit tcf_action_net_exit(struct net *net) 1453 { 1454 struct tcf_action_net *tan = net_generic(net, tcf_action_net_id); 1455 1456 rhashtable_destroy(&tan->egdev_ht); 1457 } 1458 1459 static struct pernet_operations tcf_action_net_ops = { 1460 .init = tcf_action_net_init, 1461 .exit = tcf_action_net_exit, 1462 .id = &tcf_action_net_id, 1463 .size = sizeof(struct tcf_action_net), 1464 }; 1465 1466 static int __init tc_action_init(void) 1467 { 1468 int err; 1469 1470 err = register_pernet_subsys(&tcf_action_net_ops); 1471 if (err) 1472 return err; 1473 1474 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); 1475 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); 1476 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1477 0); 1478 1479 return 0; 1480 } 1481 1482 subsys_initcall(tc_action_init); 1483