1 /* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14 #include <linux/types.h> 15 #include <linux/kernel.h> 16 #include <linux/string.h> 17 #include <linux/errno.h> 18 #include <linux/skbuff.h> 19 #include <linux/init.h> 20 #include <linux/kmod.h> 21 #include <linux/err.h> 22 #include <net/net_namespace.h> 23 #include <net/sock.h> 24 #include <net/sch_generic.h> 25 #include <net/act_api.h> 26 #include <net/netlink.h> 27 28 void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) 29 { 30 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 31 struct tcf_common **p1p; 32 33 for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { 34 if (*p1p == p) { 35 write_lock_bh(hinfo->lock); 36 *p1p = p->tcfc_next; 37 write_unlock_bh(hinfo->lock); 38 gen_kill_estimator(&p->tcfc_bstats, 39 &p->tcfc_rate_est); 40 kfree(p); 41 return; 42 } 43 } 44 WARN_ON(1); 45 } 46 EXPORT_SYMBOL(tcf_hash_destroy); 47 48 int tcf_hash_release(struct tcf_common *p, int bind, 49 struct tcf_hashinfo *hinfo) 50 { 51 int ret = 0; 52 53 if (p) { 54 if (bind) 55 p->tcfc_bindcnt--; 56 57 p->tcfc_refcnt--; 58 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 59 tcf_hash_destroy(p, hinfo); 60 ret = 1; 61 } 62 } 63 return ret; 64 } 65 EXPORT_SYMBOL(tcf_hash_release); 66 67 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 68 struct tc_action *a, struct tcf_hashinfo *hinfo) 69 { 70 struct tcf_common *p; 71 int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; 72 struct nlattr *nest; 73 74 read_lock_bh(hinfo->lock); 75 76 s_i = cb->args[0]; 77 78 for (i = 0; i < (hinfo->hmask + 1); i++) { 79 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 80 81 for (; p; p = p->tcfc_next) { 82 index++; 83 if (index < s_i) 84 continue; 85 a->priv = p; 86 a->order = n_i; 87 88 nest = nla_nest_start(skb, a->order); 89 if (nest == NULL) 90 goto nla_put_failure; 91 err = tcf_action_dump_1(skb, a, 0, 0); 92 if (err < 0) { 93 index--; 94 nlmsg_trim(skb, nest); 95 goto done; 96 } 97 nla_nest_end(skb, nest); 98 n_i++; 99 if (n_i >= TCA_ACT_MAX_PRIO) 100 goto done; 101 } 102 } 103 done: 104 read_unlock_bh(hinfo->lock); 105 if (n_i) 106 cb->args[0] += n_i; 107 return n_i; 108 109 nla_put_failure: 110 nla_nest_cancel(skb, nest); 111 goto done; 112 } 113 114 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, 115 struct tcf_hashinfo *hinfo) 116 { 117 struct tcf_common *p, *s_p; 118 struct nlattr *nest; 119 int i= 0, n_i = 0; 120 121 nest = nla_nest_start(skb, a->order); 122 if (nest == NULL) 123 goto nla_put_failure; 124 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 125 for (i = 0; i < (hinfo->hmask + 1); i++) { 126 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 127 128 while (p != NULL) { 129 s_p = p->tcfc_next; 130 if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) 131 module_put(a->ops->owner); 132 n_i++; 133 p = s_p; 134 } 135 } 136 NLA_PUT_U32(skb, TCA_FCNT, n_i); 137 nla_nest_end(skb, nest); 138 139 return n_i; 140 nla_put_failure: 141 nla_nest_cancel(skb, nest); 142 return -EINVAL; 143 } 144 145 int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 146 int type, struct tc_action *a) 147 { 148 struct tcf_hashinfo *hinfo = a->ops->hinfo; 149 150 if (type == RTM_DELACTION) { 151 return tcf_del_walker(skb, a, hinfo); 152 } else if (type == RTM_GETACTION) { 153 return tcf_dump_walker(skb, cb, a, hinfo); 154 } else { 155 printk("tcf_generic_walker: unknown action %d\n", type); 156 return -EINVAL; 157 } 158 } 159 EXPORT_SYMBOL(tcf_generic_walker); 160 161 struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) 162 { 163 struct tcf_common *p; 164 165 read_lock_bh(hinfo->lock); 166 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 167 p = p->tcfc_next) { 168 if (p->tcfc_index == index) 169 break; 170 } 171 read_unlock_bh(hinfo->lock); 172 173 return p; 174 } 175 EXPORT_SYMBOL(tcf_hash_lookup); 176 177 u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) 178 { 179 u32 val = *idx_gen; 180 181 do { 182 if (++val == 0) 183 val = 1; 184 } while (tcf_hash_lookup(val, hinfo)); 185 186 return (*idx_gen = val); 187 } 188 EXPORT_SYMBOL(tcf_hash_new_index); 189 190 int tcf_hash_search(struct tc_action *a, u32 index) 191 { 192 struct tcf_hashinfo *hinfo = a->ops->hinfo; 193 struct tcf_common *p = tcf_hash_lookup(index, hinfo); 194 195 if (p) { 196 a->priv = p; 197 return 1; 198 } 199 return 0; 200 } 201 EXPORT_SYMBOL(tcf_hash_search); 202 203 struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, 204 struct tcf_hashinfo *hinfo) 205 { 206 struct tcf_common *p = NULL; 207 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 208 if (bind) 209 p->tcfc_bindcnt++; 210 p->tcfc_refcnt++; 211 a->priv = p; 212 } 213 return p; 214 } 215 EXPORT_SYMBOL(tcf_hash_check); 216 217 struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) 218 { 219 struct tcf_common *p = kzalloc(size, GFP_KERNEL); 220 221 if (unlikely(!p)) 222 return p; 223 p->tcfc_refcnt = 1; 224 if (bind) 225 p->tcfc_bindcnt = 1; 226 227 spin_lock_init(&p->tcfc_lock); 228 p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); 229 p->tcfc_tm.install = jiffies; 230 p->tcfc_tm.lastuse = jiffies; 231 if (est) 232 gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 233 &p->tcfc_lock, est); 234 a->priv = (void *) p; 235 return p; 236 } 237 EXPORT_SYMBOL(tcf_hash_create); 238 239 void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) 240 { 241 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 242 243 write_lock_bh(hinfo->lock); 244 p->tcfc_next = hinfo->htab[h]; 245 hinfo->htab[h] = p; 246 write_unlock_bh(hinfo->lock); 247 } 248 EXPORT_SYMBOL(tcf_hash_insert); 249 250 static struct tc_action_ops *act_base = NULL; 251 static DEFINE_RWLOCK(act_mod_lock); 252 253 int tcf_register_action(struct tc_action_ops *act) 254 { 255 struct tc_action_ops *a, **ap; 256 257 write_lock(&act_mod_lock); 258 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) { 259 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 260 write_unlock(&act_mod_lock); 261 return -EEXIST; 262 } 263 } 264 act->next = NULL; 265 *ap = act; 266 write_unlock(&act_mod_lock); 267 return 0; 268 } 269 EXPORT_SYMBOL(tcf_register_action); 270 271 int tcf_unregister_action(struct tc_action_ops *act) 272 { 273 struct tc_action_ops *a, **ap; 274 int err = -ENOENT; 275 276 write_lock(&act_mod_lock); 277 for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) 278 if (a == act) 279 break; 280 if (a) { 281 *ap = a->next; 282 a->next = NULL; 283 err = 0; 284 } 285 write_unlock(&act_mod_lock); 286 return err; 287 } 288 EXPORT_SYMBOL(tcf_unregister_action); 289 290 /* lookup by name */ 291 static struct tc_action_ops *tc_lookup_action_n(char *kind) 292 { 293 struct tc_action_ops *a = NULL; 294 295 if (kind) { 296 read_lock(&act_mod_lock); 297 for (a = act_base; a; a = a->next) { 298 if (strcmp(kind, a->kind) == 0) { 299 if (!try_module_get(a->owner)) { 300 read_unlock(&act_mod_lock); 301 return NULL; 302 } 303 break; 304 } 305 } 306 read_unlock(&act_mod_lock); 307 } 308 return a; 309 } 310 311 /* lookup by nlattr */ 312 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 313 { 314 struct tc_action_ops *a = NULL; 315 316 if (kind) { 317 read_lock(&act_mod_lock); 318 for (a = act_base; a; a = a->next) { 319 if (nla_strcmp(kind, a->kind) == 0) { 320 if (!try_module_get(a->owner)) { 321 read_unlock(&act_mod_lock); 322 return NULL; 323 } 324 break; 325 } 326 } 327 read_unlock(&act_mod_lock); 328 } 329 return a; 330 } 331 332 #if 0 333 /* lookup by id */ 334 static struct tc_action_ops *tc_lookup_action_id(u32 type) 335 { 336 struct tc_action_ops *a = NULL; 337 338 if (type) { 339 read_lock(&act_mod_lock); 340 for (a = act_base; a; a = a->next) { 341 if (a->type == type) { 342 if (!try_module_get(a->owner)) { 343 read_unlock(&act_mod_lock); 344 return NULL; 345 } 346 break; 347 } 348 } 349 read_unlock(&act_mod_lock); 350 } 351 return a; 352 } 353 #endif 354 355 int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, 356 struct tcf_result *res) 357 { 358 struct tc_action *a; 359 int ret = -1; 360 361 if (skb->tc_verd & TC_NCLS) { 362 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 363 ret = TC_ACT_OK; 364 goto exec_done; 365 } 366 while ((a = act) != NULL) { 367 repeat: 368 if (a->ops && a->ops->act) { 369 ret = a->ops->act(skb, a, res); 370 if (TC_MUNGED & skb->tc_verd) { 371 /* copied already, allow trampling */ 372 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 373 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); 374 } 375 if (ret == TC_ACT_REPEAT) 376 goto repeat; /* we need a ttl - JHS */ 377 if (ret != TC_ACT_PIPE) 378 goto exec_done; 379 } 380 act = a->next; 381 } 382 exec_done: 383 return ret; 384 } 385 EXPORT_SYMBOL(tcf_action_exec); 386 387 void tcf_action_destroy(struct tc_action *act, int bind) 388 { 389 struct tc_action *a; 390 391 for (a = act; a; a = act) { 392 if (a->ops && a->ops->cleanup) { 393 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 394 module_put(a->ops->owner); 395 act = act->next; 396 kfree(a); 397 } else { /*FIXME: Remove later - catch insertion bugs*/ 398 printk("tcf_action_destroy: BUG? destroying NULL ops\n"); 399 act = act->next; 400 kfree(a); 401 } 402 } 403 } 404 405 int 406 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 407 { 408 int err = -EINVAL; 409 410 if (a->ops == NULL || a->ops->dump == NULL) 411 return err; 412 return a->ops->dump(skb, a, bind, ref); 413 } 414 415 int 416 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 417 { 418 int err = -EINVAL; 419 unsigned char *b = skb_tail_pointer(skb); 420 struct nlattr *nest; 421 422 if (a->ops == NULL || a->ops->dump == NULL) 423 return err; 424 425 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 426 if (tcf_action_copy_stats(skb, a, 0)) 427 goto nla_put_failure; 428 nest = nla_nest_start(skb, TCA_OPTIONS); 429 if (nest == NULL) 430 goto nla_put_failure; 431 if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { 432 nla_nest_end(skb, nest); 433 return err; 434 } 435 436 nla_put_failure: 437 nlmsg_trim(skb, b); 438 return -1; 439 } 440 EXPORT_SYMBOL(tcf_action_dump_1); 441 442 int 443 tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref) 444 { 445 struct tc_action *a; 446 int err = -EINVAL; 447 struct nlattr *nest; 448 449 while ((a = act) != NULL) { 450 act = a->next; 451 nest = nla_nest_start(skb, a->order); 452 if (nest == NULL) 453 goto nla_put_failure; 454 err = tcf_action_dump_1(skb, a, bind, ref); 455 if (err < 0) 456 goto errout; 457 nla_nest_end(skb, nest); 458 } 459 460 return 0; 461 462 nla_put_failure: 463 err = -EINVAL; 464 errout: 465 nla_nest_cancel(skb, nest); 466 return err; 467 } 468 469 struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est, 470 char *name, int ovr, int bind) 471 { 472 struct tc_action *a; 473 struct tc_action_ops *a_o; 474 char act_name[IFNAMSIZ]; 475 struct nlattr *tb[TCA_ACT_MAX+1]; 476 struct nlattr *kind; 477 int err; 478 479 if (name == NULL) { 480 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 481 if (err < 0) 482 goto err_out; 483 err = -EINVAL; 484 kind = tb[TCA_ACT_KIND]; 485 if (kind == NULL) 486 goto err_out; 487 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 488 goto err_out; 489 } else { 490 err = -EINVAL; 491 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 492 goto err_out; 493 } 494 495 a_o = tc_lookup_action_n(act_name); 496 if (a_o == NULL) { 497 #ifdef CONFIG_KMOD 498 rtnl_unlock(); 499 request_module("act_%s", act_name); 500 rtnl_lock(); 501 502 a_o = tc_lookup_action_n(act_name); 503 504 /* We dropped the RTNL semaphore in order to 505 * perform the module load. So, even if we 506 * succeeded in loading the module we have to 507 * tell the caller to replay the request. We 508 * indicate this using -EAGAIN. 509 */ 510 if (a_o != NULL) { 511 err = -EAGAIN; 512 goto err_mod; 513 } 514 #endif 515 err = -ENOENT; 516 goto err_out; 517 } 518 519 err = -ENOMEM; 520 a = kzalloc(sizeof(*a), GFP_KERNEL); 521 if (a == NULL) 522 goto err_mod; 523 524 /* backward compatibility for policer */ 525 if (name == NULL) 526 err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind); 527 else 528 err = a_o->init(nla, est, a, ovr, bind); 529 if (err < 0) 530 goto err_free; 531 532 /* module count goes up only when brand new policy is created 533 if it exists and is only bound to in a_o->init() then 534 ACT_P_CREATED is not returned (a zero is). 535 */ 536 if (err != ACT_P_CREATED) 537 module_put(a_o->owner); 538 a->ops = a_o; 539 540 return a; 541 542 err_free: 543 kfree(a); 544 err_mod: 545 module_put(a_o->owner); 546 err_out: 547 return ERR_PTR(err); 548 } 549 550 struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est, 551 char *name, int ovr, int bind) 552 { 553 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 554 struct tc_action *head = NULL, *act, *act_prev = NULL; 555 int err; 556 int i; 557 558 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 559 if (err < 0) 560 return ERR_PTR(err); 561 562 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 563 act = tcf_action_init_1(tb[i], est, name, ovr, bind); 564 if (IS_ERR(act)) 565 goto err; 566 act->order = i; 567 568 if (head == NULL) 569 head = act; 570 else 571 act_prev->next = act; 572 act_prev = act; 573 } 574 return head; 575 576 err: 577 if (head != NULL) 578 tcf_action_destroy(head, bind); 579 return act; 580 } 581 582 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, 583 int compat_mode) 584 { 585 int err = 0; 586 struct gnet_dump d; 587 struct tcf_act_hdr *h = a->priv; 588 589 if (h == NULL) 590 goto errout; 591 592 /* compat_mode being true specifies a call that is supposed 593 * to add additional backward compatiblity statistic TLVs. 594 */ 595 if (compat_mode) { 596 if (a->type == TCA_OLD_COMPAT) 597 err = gnet_stats_start_copy_compat(skb, 0, 598 TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d); 599 else 600 return 0; 601 } else 602 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 603 &h->tcf_lock, &d); 604 605 if (err < 0) 606 goto errout; 607 608 if (a->ops != NULL && a->ops->get_stats != NULL) 609 if (a->ops->get_stats(skb, a) < 0) 610 goto errout; 611 612 if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || 613 gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || 614 gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) 615 goto errout; 616 617 if (gnet_stats_finish_copy(&d) < 0) 618 goto errout; 619 620 return 0; 621 622 errout: 623 return -1; 624 } 625 626 static int 627 tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq, 628 u16 flags, int event, int bind, int ref) 629 { 630 struct tcamsg *t; 631 struct nlmsghdr *nlh; 632 unsigned char *b = skb_tail_pointer(skb); 633 struct nlattr *nest; 634 635 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 636 637 t = NLMSG_DATA(nlh); 638 t->tca_family = AF_UNSPEC; 639 t->tca__pad1 = 0; 640 t->tca__pad2 = 0; 641 642 nest = nla_nest_start(skb, TCA_ACT_TAB); 643 if (nest == NULL) 644 goto nla_put_failure; 645 646 if (tcf_action_dump(skb, a, bind, ref) < 0) 647 goto nla_put_failure; 648 649 nla_nest_end(skb, nest); 650 651 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 652 return skb->len; 653 654 nla_put_failure: 655 nlmsg_failure: 656 nlmsg_trim(skb, b); 657 return -1; 658 } 659 660 static int 661 act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) 662 { 663 struct sk_buff *skb; 664 665 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 666 if (!skb) 667 return -ENOBUFS; 668 if (tca_get_fill(skb, a, pid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { 669 kfree_skb(skb); 670 return -EINVAL; 671 } 672 673 return rtnl_unicast(skb, &init_net, pid); 674 } 675 676 static struct tc_action * 677 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 678 { 679 struct nlattr *tb[TCA_ACT_MAX+1]; 680 struct tc_action *a; 681 int index; 682 int err; 683 684 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 685 if (err < 0) 686 goto err_out; 687 688 err = -EINVAL; 689 if (tb[TCA_ACT_INDEX] == NULL || 690 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) 691 goto err_out; 692 index = nla_get_u32(tb[TCA_ACT_INDEX]); 693 694 err = -ENOMEM; 695 a = kzalloc(sizeof(struct tc_action), GFP_KERNEL); 696 if (a == NULL) 697 goto err_out; 698 699 err = -EINVAL; 700 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 701 if (a->ops == NULL) 702 goto err_free; 703 if (a->ops->lookup == NULL) 704 goto err_mod; 705 err = -ENOENT; 706 if (a->ops->lookup(a, index) == 0) 707 goto err_mod; 708 709 module_put(a->ops->owner); 710 return a; 711 712 err_mod: 713 module_put(a->ops->owner); 714 err_free: 715 kfree(a); 716 err_out: 717 return ERR_PTR(err); 718 } 719 720 static void cleanup_a(struct tc_action *act) 721 { 722 struct tc_action *a; 723 724 for (a = act; a; a = act) { 725 act = a->next; 726 kfree(a); 727 } 728 } 729 730 static struct tc_action *create_a(int i) 731 { 732 struct tc_action *act; 733 734 act = kzalloc(sizeof(*act), GFP_KERNEL); 735 if (act == NULL) { 736 printk("create_a: failed to alloc!\n"); 737 return NULL; 738 } 739 act->order = i; 740 return act; 741 } 742 743 static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 744 { 745 struct sk_buff *skb; 746 unsigned char *b; 747 struct nlmsghdr *nlh; 748 struct tcamsg *t; 749 struct netlink_callback dcb; 750 struct nlattr *nest; 751 struct nlattr *tb[TCA_ACT_MAX+1]; 752 struct nlattr *kind; 753 struct tc_action *a = create_a(0); 754 int err = -EINVAL; 755 756 if (a == NULL) { 757 printk("tca_action_flush: couldnt create tc_action\n"); 758 return err; 759 } 760 761 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 762 if (!skb) { 763 printk("tca_action_flush: failed skb alloc\n"); 764 kfree(a); 765 return -ENOBUFS; 766 } 767 768 b = skb_tail_pointer(skb); 769 770 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 771 if (err < 0) 772 goto err_out; 773 774 err = -EINVAL; 775 kind = tb[TCA_ACT_KIND]; 776 a->ops = tc_lookup_action(kind); 777 if (a->ops == NULL) 778 goto err_out; 779 780 nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t)); 781 t = NLMSG_DATA(nlh); 782 t->tca_family = AF_UNSPEC; 783 t->tca__pad1 = 0; 784 t->tca__pad2 = 0; 785 786 nest = nla_nest_start(skb, TCA_ACT_TAB); 787 if (nest == NULL) 788 goto nla_put_failure; 789 790 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 791 if (err < 0) 792 goto nla_put_failure; 793 794 nla_nest_end(skb, nest); 795 796 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 797 nlh->nlmsg_flags |= NLM_F_ROOT; 798 module_put(a->ops->owner); 799 kfree(a); 800 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 801 if (err > 0) 802 return 0; 803 804 return err; 805 806 nla_put_failure: 807 nlmsg_failure: 808 module_put(a->ops->owner); 809 err_out: 810 kfree_skb(skb); 811 kfree(a); 812 return err; 813 } 814 815 static int 816 tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) 817 { 818 int i, ret; 819 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 820 struct tc_action *head = NULL, *act, *act_prev = NULL; 821 822 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 823 if (ret < 0) 824 return ret; 825 826 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 827 if (tb[0] != NULL && tb[1] == NULL) 828 return tca_action_flush(tb[0], n, pid); 829 } 830 831 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 832 act = tcf_action_get_1(tb[i], n, pid); 833 if (IS_ERR(act)) { 834 ret = PTR_ERR(act); 835 goto err; 836 } 837 act->order = i; 838 839 if (head == NULL) 840 head = act; 841 else 842 act_prev->next = act; 843 act_prev = act; 844 } 845 846 if (event == RTM_GETACTION) 847 ret = act_get_notify(pid, n, head, event); 848 else { /* delete */ 849 struct sk_buff *skb; 850 851 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 852 if (!skb) { 853 ret = -ENOBUFS; 854 goto err; 855 } 856 857 if (tca_get_fill(skb, head, pid, n->nlmsg_seq, 0, event, 858 0, 1) <= 0) { 859 kfree_skb(skb); 860 ret = -EINVAL; 861 goto err; 862 } 863 864 /* now do the delete */ 865 tcf_action_destroy(head, 0); 866 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 867 n->nlmsg_flags&NLM_F_ECHO); 868 if (ret > 0) 869 return 0; 870 return ret; 871 } 872 err: 873 cleanup_a(head); 874 return ret; 875 } 876 877 static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, 878 u16 flags) 879 { 880 struct tcamsg *t; 881 struct nlmsghdr *nlh; 882 struct sk_buff *skb; 883 struct nlattr *nest; 884 unsigned char *b; 885 int err = 0; 886 887 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 888 if (!skb) 889 return -ENOBUFS; 890 891 b = skb_tail_pointer(skb); 892 893 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 894 t = NLMSG_DATA(nlh); 895 t->tca_family = AF_UNSPEC; 896 t->tca__pad1 = 0; 897 t->tca__pad2 = 0; 898 899 nest = nla_nest_start(skb, TCA_ACT_TAB); 900 if (nest == NULL) 901 goto nla_put_failure; 902 903 if (tcf_action_dump(skb, a, 0, 0) < 0) 904 goto nla_put_failure; 905 906 nla_nest_end(skb, nest); 907 908 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 909 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 910 911 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); 912 if (err > 0) 913 err = 0; 914 return err; 915 916 nla_put_failure: 917 nlmsg_failure: 918 kfree_skb(skb); 919 return -1; 920 } 921 922 923 static int 924 tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) 925 { 926 int ret = 0; 927 struct tc_action *act; 928 struct tc_action *a; 929 u32 seq = n->nlmsg_seq; 930 931 act = tcf_action_init(nla, NULL, NULL, ovr, 0); 932 if (act == NULL) 933 goto done; 934 if (IS_ERR(act)) { 935 ret = PTR_ERR(act); 936 goto done; 937 } 938 939 /* dump then free all the actions after update; inserted policy 940 * stays intact 941 * */ 942 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); 943 for (a = act; a; a = act) { 944 act = a->next; 945 kfree(a); 946 } 947 done: 948 return ret; 949 } 950 951 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 952 { 953 struct net *net = sock_net(skb->sk); 954 struct nlattr *tca[TCA_ACT_MAX + 1]; 955 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 956 int ret = 0, ovr = 0; 957 958 if (net != &init_net) 959 return -EINVAL; 960 961 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 962 if (ret < 0) 963 return ret; 964 965 if (tca[TCA_ACT_TAB] == NULL) { 966 printk("tc_ctl_action: received NO action attribs\n"); 967 return -EINVAL; 968 } 969 970 /* n->nlmsg_flags&NLM_F_CREATE 971 * */ 972 switch (n->nlmsg_type) { 973 case RTM_NEWACTION: 974 /* we are going to assume all other flags 975 * imply create only if it doesnt exist 976 * Note that CREATE | EXCL implies that 977 * but since we want avoid ambiguity (eg when flags 978 * is zero) then just set this 979 */ 980 if (n->nlmsg_flags&NLM_F_REPLACE) 981 ovr = 1; 982 replay: 983 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); 984 if (ret == -EAGAIN) 985 goto replay; 986 break; 987 case RTM_DELACTION: 988 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); 989 break; 990 case RTM_GETACTION: 991 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); 992 break; 993 default: 994 BUG(); 995 } 996 997 return ret; 998 } 999 1000 static struct nlattr * 1001 find_dump_kind(struct nlmsghdr *n) 1002 { 1003 struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; 1004 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1005 struct nlattr *nla[TCAA_MAX + 1]; 1006 struct nlattr *kind; 1007 1008 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) 1009 return NULL; 1010 tb1 = nla[TCA_ACT_TAB]; 1011 if (tb1 == NULL) 1012 return NULL; 1013 1014 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1015 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) 1016 return NULL; 1017 1018 if (tb[1] == NULL) 1019 return NULL; 1020 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), 1021 nla_len(tb[1]), NULL) < 0) 1022 return NULL; 1023 kind = tb2[TCA_ACT_KIND]; 1024 1025 return kind; 1026 } 1027 1028 static int 1029 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1030 { 1031 struct net *net = sock_net(skb->sk); 1032 struct nlmsghdr *nlh; 1033 unsigned char *b = skb_tail_pointer(skb); 1034 struct nlattr *nest; 1035 struct tc_action_ops *a_o; 1036 struct tc_action a; 1037 int ret = 0; 1038 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1039 struct nlattr *kind = find_dump_kind(cb->nlh); 1040 1041 if (net != &init_net) 1042 return 0; 1043 1044 if (kind == NULL) { 1045 printk("tc_dump_action: action bad kind\n"); 1046 return 0; 1047 } 1048 1049 a_o = tc_lookup_action(kind); 1050 if (a_o == NULL) { 1051 return 0; 1052 } 1053 1054 memset(&a, 0, sizeof(struct tc_action)); 1055 a.ops = a_o; 1056 1057 if (a_o->walk == NULL) { 1058 printk("tc_dump_action: %s !capable of dumping table\n", a_o->kind); 1059 goto nla_put_failure; 1060 } 1061 1062 nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 1063 cb->nlh->nlmsg_type, sizeof(*t)); 1064 t = NLMSG_DATA(nlh); 1065 t->tca_family = AF_UNSPEC; 1066 t->tca__pad1 = 0; 1067 t->tca__pad2 = 0; 1068 1069 nest = nla_nest_start(skb, TCA_ACT_TAB); 1070 if (nest == NULL) 1071 goto nla_put_failure; 1072 1073 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); 1074 if (ret < 0) 1075 goto nla_put_failure; 1076 1077 if (ret > 0) { 1078 nla_nest_end(skb, nest); 1079 ret = skb->len; 1080 } else 1081 nla_nest_cancel(skb, nest); 1082 1083 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1084 if (NETLINK_CB(cb->skb).pid && ret) 1085 nlh->nlmsg_flags |= NLM_F_MULTI; 1086 module_put(a_o->owner); 1087 return skb->len; 1088 1089 nla_put_failure: 1090 nlmsg_failure: 1091 module_put(a_o->owner); 1092 nlmsg_trim(skb, b); 1093 return skb->len; 1094 } 1095 1096 static int __init tc_action_init(void) 1097 { 1098 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL); 1099 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL); 1100 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action); 1101 1102 return 0; 1103 } 1104 1105 subsys_initcall(tc_action_init); 1106