1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/skbuff.h> 24 #include <linux/init.h> 25 #include <linux/kmod.h> 26 #include <linux/err.h> 27 #include <linux/slab.h> 28 #include <net/net_namespace.h> 29 #include <net/sock.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <net/pkt_cls.h> 33 34 /* The list of all installed classifier types */ 35 static LIST_HEAD(tcf_proto_base); 36 37 /* Protects list of registered TC modules. It is pure SMP lock. */ 38 static DEFINE_RWLOCK(cls_mod_lock); 39 40 /* Find classifier type by string name */ 41 42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) 43 { 44 const struct tcf_proto_ops *t, *res = NULL; 45 46 if (kind) { 47 read_lock(&cls_mod_lock); 48 list_for_each_entry(t, &tcf_proto_base, head) { 49 if (strcmp(kind, t->kind) == 0) { 50 if (try_module_get(t->owner)) 51 res = t; 52 break; 53 } 54 } 55 read_unlock(&cls_mod_lock); 56 } 57 return res; 58 } 59 60 /* Register(unregister) new classifier type */ 61 62 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 63 { 64 struct tcf_proto_ops *t; 65 int rc = -EEXIST; 66 67 write_lock(&cls_mod_lock); 68 list_for_each_entry(t, &tcf_proto_base, head) 69 if (!strcmp(ops->kind, t->kind)) 70 goto out; 71 72 list_add_tail(&ops->head, &tcf_proto_base); 73 rc = 0; 74 out: 75 write_unlock(&cls_mod_lock); 76 return rc; 77 } 78 EXPORT_SYMBOL(register_tcf_proto_ops); 79 80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 81 { 82 struct tcf_proto_ops *t; 83 int rc = -ENOENT; 84 85 /* Wait for outstanding call_rcu()s, if any, from a 86 * tcf_proto_ops's destroy() handler. 87 */ 88 rcu_barrier(); 89 90 write_lock(&cls_mod_lock); 91 list_for_each_entry(t, &tcf_proto_base, head) { 92 if (t == ops) { 93 list_del(&t->head); 94 rc = 0; 95 break; 96 } 97 } 98 write_unlock(&cls_mod_lock); 99 return rc; 100 } 101 EXPORT_SYMBOL(unregister_tcf_proto_ops); 102 103 /* Select new prio value from the range, managed by kernel. */ 104 105 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 106 { 107 u32 first = TC_H_MAKE(0xC0000000U, 0U); 108 109 if (tp) 110 first = tp->prio - 1; 111 112 return TC_H_MAJ(first); 113 } 114 115 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 116 u32 prio, u32 parent, struct Qdisc *q, 117 struct tcf_chain *chain) 118 { 119 struct tcf_proto *tp; 120 int err; 121 122 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 123 if (!tp) 124 return ERR_PTR(-ENOBUFS); 125 126 err = -ENOENT; 127 tp->ops = tcf_proto_lookup_ops(kind); 128 if (!tp->ops) { 129 #ifdef CONFIG_MODULES 130 rtnl_unlock(); 131 request_module("cls_%s", kind); 132 rtnl_lock(); 133 tp->ops = tcf_proto_lookup_ops(kind); 134 /* We dropped the RTNL semaphore in order to perform 135 * the module load. So, even if we succeeded in loading 136 * the module we have to replay the request. We indicate 137 * this using -EAGAIN. 138 */ 139 if (tp->ops) { 140 module_put(tp->ops->owner); 141 err = -EAGAIN; 142 } else { 143 err = -ENOENT; 144 } 145 goto errout; 146 #endif 147 } 148 tp->classify = tp->ops->classify; 149 tp->protocol = protocol; 150 tp->prio = prio; 151 tp->classid = parent; 152 tp->q = q; 153 tp->chain = chain; 154 155 err = tp->ops->init(tp); 156 if (err) { 157 module_put(tp->ops->owner); 158 goto errout; 159 } 160 return tp; 161 162 errout: 163 kfree(tp); 164 return ERR_PTR(err); 165 } 166 167 static void tcf_proto_destroy(struct tcf_proto *tp) 168 { 169 tp->ops->destroy(tp); 170 module_put(tp->ops->owner); 171 kfree_rcu(tp, rcu); 172 } 173 174 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 175 u32 chain_index) 176 { 177 struct tcf_chain *chain; 178 179 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 180 if (!chain) 181 return NULL; 182 list_add_tail(&chain->list, &block->chain_list); 183 chain->block = block; 184 chain->index = chain_index; 185 chain->refcnt = 1; 186 return chain; 187 } 188 189 static void tcf_chain_flush(struct tcf_chain *chain) 190 { 191 struct tcf_proto *tp; 192 193 if (chain->p_filter_chain) 194 RCU_INIT_POINTER(*chain->p_filter_chain, NULL); 195 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { 196 RCU_INIT_POINTER(chain->filter_chain, tp->next); 197 tcf_chain_put(chain); 198 tcf_proto_destroy(tp); 199 } 200 } 201 202 static void tcf_chain_destroy(struct tcf_chain *chain) 203 { 204 list_del(&chain->list); 205 kfree(chain); 206 } 207 208 static void tcf_chain_hold(struct tcf_chain *chain) 209 { 210 ++chain->refcnt; 211 } 212 213 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 214 bool create) 215 { 216 struct tcf_chain *chain; 217 218 list_for_each_entry(chain, &block->chain_list, list) { 219 if (chain->index == chain_index) { 220 tcf_chain_hold(chain); 221 return chain; 222 } 223 } 224 225 return create ? tcf_chain_create(block, chain_index) : NULL; 226 } 227 EXPORT_SYMBOL(tcf_chain_get); 228 229 void tcf_chain_put(struct tcf_chain *chain) 230 { 231 if (--chain->refcnt == 0) 232 tcf_chain_destroy(chain); 233 } 234 EXPORT_SYMBOL(tcf_chain_put); 235 236 static void 237 tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain, 238 struct tcf_proto __rcu **p_filter_chain) 239 { 240 chain->p_filter_chain = p_filter_chain; 241 } 242 243 int tcf_block_get(struct tcf_block **p_block, 244 struct tcf_proto __rcu **p_filter_chain) 245 { 246 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 247 struct tcf_chain *chain; 248 int err; 249 250 if (!block) 251 return -ENOMEM; 252 INIT_LIST_HEAD(&block->chain_list); 253 /* Create chain 0 by default, it has to be always present. */ 254 chain = tcf_chain_create(block, 0); 255 if (!chain) { 256 err = -ENOMEM; 257 goto err_chain_create; 258 } 259 tcf_chain_filter_chain_ptr_set(chain, p_filter_chain); 260 *p_block = block; 261 return 0; 262 263 err_chain_create: 264 kfree(block); 265 return err; 266 } 267 EXPORT_SYMBOL(tcf_block_get); 268 269 void tcf_block_put(struct tcf_block *block) 270 { 271 struct tcf_chain *chain, *tmp; 272 273 if (!block) 274 return; 275 276 /* XXX: Standalone actions are not allowed to jump to any chain, and 277 * bound actions should be all removed after flushing. However, 278 * filters are destroyed in RCU callbacks, we have to hold the chains 279 * first, otherwise we would always race with RCU callbacks on this list 280 * without proper locking. 281 */ 282 283 /* Wait for existing RCU callbacks to cool down. */ 284 rcu_barrier(); 285 286 /* Hold a refcnt for all chains, except 0, in case they are gone. */ 287 list_for_each_entry(chain, &block->chain_list, list) 288 if (chain->index) 289 tcf_chain_hold(chain); 290 291 /* No race on the list, because no chain could be destroyed. */ 292 list_for_each_entry(chain, &block->chain_list, list) 293 tcf_chain_flush(chain); 294 295 /* Wait for RCU callbacks to release the reference count. */ 296 rcu_barrier(); 297 298 /* At this point, all the chains should have refcnt == 1. */ 299 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 300 tcf_chain_put(chain); 301 kfree(block); 302 } 303 EXPORT_SYMBOL(tcf_block_put); 304 305 /* Main classifier routine: scans classifier chain attached 306 * to this qdisc, (optionally) tests for protocol and asks 307 * specific classifiers. 308 */ 309 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 310 struct tcf_result *res, bool compat_mode) 311 { 312 __be16 protocol = tc_skb_protocol(skb); 313 #ifdef CONFIG_NET_CLS_ACT 314 const int max_reclassify_loop = 4; 315 const struct tcf_proto *orig_tp = tp; 316 const struct tcf_proto *first_tp; 317 int limit = 0; 318 319 reclassify: 320 #endif 321 for (; tp; tp = rcu_dereference_bh(tp->next)) { 322 int err; 323 324 if (tp->protocol != protocol && 325 tp->protocol != htons(ETH_P_ALL)) 326 continue; 327 328 err = tp->classify(skb, tp, res); 329 #ifdef CONFIG_NET_CLS_ACT 330 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 331 first_tp = orig_tp; 332 goto reset; 333 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 334 first_tp = res->goto_tp; 335 goto reset; 336 } 337 #endif 338 if (err >= 0) 339 return err; 340 } 341 342 return TC_ACT_UNSPEC; /* signal: continue lookup */ 343 #ifdef CONFIG_NET_CLS_ACT 344 reset: 345 if (unlikely(limit++ >= max_reclassify_loop)) { 346 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", 347 tp->q->ops->id, tp->prio & 0xffff, 348 ntohs(tp->protocol)); 349 return TC_ACT_SHOT; 350 } 351 352 tp = first_tp; 353 protocol = tc_skb_protocol(skb); 354 goto reclassify; 355 #endif 356 } 357 EXPORT_SYMBOL(tcf_classify); 358 359 struct tcf_chain_info { 360 struct tcf_proto __rcu **pprev; 361 struct tcf_proto __rcu *next; 362 }; 363 364 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info) 365 { 366 return rtnl_dereference(*chain_info->pprev); 367 } 368 369 static void tcf_chain_tp_insert(struct tcf_chain *chain, 370 struct tcf_chain_info *chain_info, 371 struct tcf_proto *tp) 372 { 373 if (chain->p_filter_chain && 374 *chain_info->pprev == chain->filter_chain) 375 rcu_assign_pointer(*chain->p_filter_chain, tp); 376 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); 377 rcu_assign_pointer(*chain_info->pprev, tp); 378 tcf_chain_hold(chain); 379 } 380 381 static void tcf_chain_tp_remove(struct tcf_chain *chain, 382 struct tcf_chain_info *chain_info, 383 struct tcf_proto *tp) 384 { 385 struct tcf_proto *next = rtnl_dereference(chain_info->next); 386 387 if (chain->p_filter_chain && tp == chain->filter_chain) 388 RCU_INIT_POINTER(*chain->p_filter_chain, next); 389 RCU_INIT_POINTER(*chain_info->pprev, next); 390 tcf_chain_put(chain); 391 } 392 393 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 394 struct tcf_chain_info *chain_info, 395 u32 protocol, u32 prio, 396 bool prio_allocate) 397 { 398 struct tcf_proto **pprev; 399 struct tcf_proto *tp; 400 401 /* Check the chain for existence of proto-tcf with this priority */ 402 for (pprev = &chain->filter_chain; 403 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) { 404 if (tp->prio >= prio) { 405 if (tp->prio == prio) { 406 if (prio_allocate || 407 (tp->protocol != protocol && protocol)) 408 return ERR_PTR(-EINVAL); 409 } else { 410 tp = NULL; 411 } 412 break; 413 } 414 } 415 chain_info->pprev = pprev; 416 chain_info->next = tp ? tp->next : NULL; 417 return tp; 418 } 419 420 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 421 struct tcf_proto *tp, void *fh, u32 portid, 422 u32 seq, u16 flags, int event) 423 { 424 struct tcmsg *tcm; 425 struct nlmsghdr *nlh; 426 unsigned char *b = skb_tail_pointer(skb); 427 428 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 429 if (!nlh) 430 goto out_nlmsg_trim; 431 tcm = nlmsg_data(nlh); 432 tcm->tcm_family = AF_UNSPEC; 433 tcm->tcm__pad1 = 0; 434 tcm->tcm__pad2 = 0; 435 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; 436 tcm->tcm_parent = tp->classid; 437 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 438 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 439 goto nla_put_failure; 440 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 441 goto nla_put_failure; 442 if (!fh) { 443 tcm->tcm_handle = 0; 444 } else { 445 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) 446 goto nla_put_failure; 447 } 448 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 449 return skb->len; 450 451 out_nlmsg_trim: 452 nla_put_failure: 453 nlmsg_trim(skb, b); 454 return -1; 455 } 456 457 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 458 struct nlmsghdr *n, struct tcf_proto *tp, 459 void *fh, int event, bool unicast) 460 { 461 struct sk_buff *skb; 462 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 463 464 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 465 if (!skb) 466 return -ENOBUFS; 467 468 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 469 n->nlmsg_flags, event) <= 0) { 470 kfree_skb(skb); 471 return -EINVAL; 472 } 473 474 if (unicast) 475 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 476 477 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 478 n->nlmsg_flags & NLM_F_ECHO); 479 } 480 481 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 482 struct nlmsghdr *n, struct tcf_proto *tp, 483 void *fh, bool unicast, bool *last) 484 { 485 struct sk_buff *skb; 486 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 487 int err; 488 489 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 490 if (!skb) 491 return -ENOBUFS; 492 493 if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 494 n->nlmsg_flags, RTM_DELTFILTER) <= 0) { 495 kfree_skb(skb); 496 return -EINVAL; 497 } 498 499 err = tp->ops->delete(tp, fh, last); 500 if (err) { 501 kfree_skb(skb); 502 return err; 503 } 504 505 if (unicast) 506 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 507 508 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 509 n->nlmsg_flags & NLM_F_ECHO); 510 } 511 512 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 513 struct nlmsghdr *n, 514 struct tcf_chain *chain, int event) 515 { 516 struct tcf_proto *tp; 517 518 for (tp = rtnl_dereference(chain->filter_chain); 519 tp; tp = rtnl_dereference(tp->next)) 520 tfilter_notify(net, oskb, n, tp, 0, event, false); 521 } 522 523 /* Add/change/delete/get a filter node */ 524 525 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 526 struct netlink_ext_ack *extack) 527 { 528 struct net *net = sock_net(skb->sk); 529 struct nlattr *tca[TCA_MAX + 1]; 530 struct tcmsg *t; 531 u32 protocol; 532 u32 prio; 533 bool prio_allocate; 534 u32 parent; 535 u32 chain_index; 536 struct net_device *dev; 537 struct Qdisc *q; 538 struct tcf_chain_info chain_info; 539 struct tcf_chain *chain = NULL; 540 struct tcf_block *block; 541 struct tcf_proto *tp; 542 const struct Qdisc_class_ops *cops; 543 unsigned long cl; 544 void *fh; 545 int err; 546 int tp_created; 547 548 if ((n->nlmsg_type != RTM_GETTFILTER) && 549 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 550 return -EPERM; 551 552 replay: 553 tp_created = 0; 554 555 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 556 if (err < 0) 557 return err; 558 559 t = nlmsg_data(n); 560 protocol = TC_H_MIN(t->tcm_info); 561 prio = TC_H_MAJ(t->tcm_info); 562 prio_allocate = false; 563 parent = t->tcm_parent; 564 cl = 0; 565 566 if (prio == 0) { 567 switch (n->nlmsg_type) { 568 case RTM_DELTFILTER: 569 if (protocol || t->tcm_handle || tca[TCA_KIND]) 570 return -ENOENT; 571 break; 572 case RTM_NEWTFILTER: 573 /* If no priority is provided by the user, 574 * we allocate one. 575 */ 576 if (n->nlmsg_flags & NLM_F_CREATE) { 577 prio = TC_H_MAKE(0x80000000U, 0U); 578 prio_allocate = true; 579 break; 580 } 581 /* fall-through */ 582 default: 583 return -ENOENT; 584 } 585 } 586 587 /* Find head of filter chain. */ 588 589 /* Find link */ 590 dev = __dev_get_by_index(net, t->tcm_ifindex); 591 if (dev == NULL) 592 return -ENODEV; 593 594 /* Find qdisc */ 595 if (!parent) { 596 q = dev->qdisc; 597 parent = q->handle; 598 } else { 599 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 600 if (q == NULL) 601 return -EINVAL; 602 } 603 604 /* Is it classful? */ 605 cops = q->ops->cl_ops; 606 if (!cops) 607 return -EINVAL; 608 609 if (!cops->tcf_block) 610 return -EOPNOTSUPP; 611 612 /* Do we search for filter, attached to class? */ 613 if (TC_H_MIN(parent)) { 614 cl = cops->find(q, parent); 615 if (cl == 0) 616 return -ENOENT; 617 } 618 619 /* And the last stroke */ 620 block = cops->tcf_block(q, cl); 621 if (!block) { 622 err = -EINVAL; 623 goto errout; 624 } 625 626 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 627 if (chain_index > TC_ACT_EXT_VAL_MASK) { 628 err = -EINVAL; 629 goto errout; 630 } 631 chain = tcf_chain_get(block, chain_index, 632 n->nlmsg_type == RTM_NEWTFILTER); 633 if (!chain) { 634 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL; 635 goto errout; 636 } 637 638 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { 639 tfilter_notify_chain(net, skb, n, chain, RTM_DELTFILTER); 640 tcf_chain_flush(chain); 641 err = 0; 642 goto errout; 643 } 644 645 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 646 prio, prio_allocate); 647 if (IS_ERR(tp)) { 648 err = PTR_ERR(tp); 649 goto errout; 650 } 651 652 if (tp == NULL) { 653 /* Proto-tcf does not exist, create new one */ 654 655 if (tca[TCA_KIND] == NULL || !protocol) { 656 err = -EINVAL; 657 goto errout; 658 } 659 660 if (n->nlmsg_type != RTM_NEWTFILTER || 661 !(n->nlmsg_flags & NLM_F_CREATE)) { 662 err = -ENOENT; 663 goto errout; 664 } 665 666 if (prio_allocate) 667 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); 668 669 tp = tcf_proto_create(nla_data(tca[TCA_KIND]), 670 protocol, prio, parent, q, chain); 671 if (IS_ERR(tp)) { 672 err = PTR_ERR(tp); 673 goto errout; 674 } 675 tp_created = 1; 676 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 677 err = -EINVAL; 678 goto errout; 679 } 680 681 fh = tp->ops->get(tp, t->tcm_handle); 682 683 if (!fh) { 684 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 685 tcf_chain_tp_remove(chain, &chain_info, tp); 686 tfilter_notify(net, skb, n, tp, fh, 687 RTM_DELTFILTER, false); 688 tcf_proto_destroy(tp); 689 err = 0; 690 goto errout; 691 } 692 693 if (n->nlmsg_type != RTM_NEWTFILTER || 694 !(n->nlmsg_flags & NLM_F_CREATE)) { 695 err = -ENOENT; 696 goto errout; 697 } 698 } else { 699 bool last; 700 701 switch (n->nlmsg_type) { 702 case RTM_NEWTFILTER: 703 if (n->nlmsg_flags & NLM_F_EXCL) { 704 if (tp_created) 705 tcf_proto_destroy(tp); 706 err = -EEXIST; 707 goto errout; 708 } 709 break; 710 case RTM_DELTFILTER: 711 err = tfilter_del_notify(net, skb, n, tp, fh, false, 712 &last); 713 if (err) 714 goto errout; 715 if (last) { 716 tcf_chain_tp_remove(chain, &chain_info, tp); 717 tcf_proto_destroy(tp); 718 } 719 goto errout; 720 case RTM_GETTFILTER: 721 err = tfilter_notify(net, skb, n, tp, fh, 722 RTM_NEWTFILTER, true); 723 goto errout; 724 default: 725 err = -EINVAL; 726 goto errout; 727 } 728 } 729 730 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 731 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); 732 if (err == 0) { 733 if (tp_created) 734 tcf_chain_tp_insert(chain, &chain_info, tp); 735 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false); 736 } else { 737 if (tp_created) 738 tcf_proto_destroy(tp); 739 } 740 741 errout: 742 if (chain) 743 tcf_chain_put(chain); 744 if (err == -EAGAIN) 745 /* Replay the request. */ 746 goto replay; 747 return err; 748 } 749 750 struct tcf_dump_args { 751 struct tcf_walker w; 752 struct sk_buff *skb; 753 struct netlink_callback *cb; 754 }; 755 756 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 757 { 758 struct tcf_dump_args *a = (void *)arg; 759 struct net *net = sock_net(a->skb->sk); 760 761 return tcf_fill_node(net, a->skb, tp, n, NETLINK_CB(a->cb->skb).portid, 762 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 763 RTM_NEWTFILTER); 764 } 765 766 static bool tcf_chain_dump(struct tcf_chain *chain, struct sk_buff *skb, 767 struct netlink_callback *cb, 768 long index_start, long *p_index) 769 { 770 struct net *net = sock_net(skb->sk); 771 struct tcmsg *tcm = nlmsg_data(cb->nlh); 772 struct tcf_dump_args arg; 773 struct tcf_proto *tp; 774 775 for (tp = rtnl_dereference(chain->filter_chain); 776 tp; tp = rtnl_dereference(tp->next), (*p_index)++) { 777 if (*p_index < index_start) 778 continue; 779 if (TC_H_MAJ(tcm->tcm_info) && 780 TC_H_MAJ(tcm->tcm_info) != tp->prio) 781 continue; 782 if (TC_H_MIN(tcm->tcm_info) && 783 TC_H_MIN(tcm->tcm_info) != tp->protocol) 784 continue; 785 if (*p_index > index_start) 786 memset(&cb->args[1], 0, 787 sizeof(cb->args) - sizeof(cb->args[0])); 788 if (cb->args[1] == 0) { 789 if (tcf_fill_node(net, skb, tp, 0, 790 NETLINK_CB(cb->skb).portid, 791 cb->nlh->nlmsg_seq, NLM_F_MULTI, 792 RTM_NEWTFILTER) <= 0) 793 return false; 794 795 cb->args[1] = 1; 796 } 797 if (!tp->ops->walk) 798 continue; 799 arg.w.fn = tcf_node_dump; 800 arg.skb = skb; 801 arg.cb = cb; 802 arg.w.stop = 0; 803 arg.w.skip = cb->args[1] - 1; 804 arg.w.count = 0; 805 tp->ops->walk(tp, &arg.w); 806 cb->args[1] = arg.w.count + 1; 807 if (arg.w.stop) 808 return false; 809 } 810 return true; 811 } 812 813 /* called with RTNL */ 814 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 815 { 816 struct net *net = sock_net(skb->sk); 817 struct nlattr *tca[TCA_MAX + 1]; 818 struct net_device *dev; 819 struct Qdisc *q; 820 struct tcf_block *block; 821 struct tcf_chain *chain; 822 struct tcmsg *tcm = nlmsg_data(cb->nlh); 823 unsigned long cl = 0; 824 const struct Qdisc_class_ops *cops; 825 long index_start; 826 long index; 827 int err; 828 829 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 830 return skb->len; 831 832 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); 833 if (err) 834 return err; 835 836 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 837 if (!dev) 838 return skb->len; 839 840 if (!tcm->tcm_parent) 841 q = dev->qdisc; 842 else 843 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 844 if (!q) 845 goto out; 846 cops = q->ops->cl_ops; 847 if (!cops) 848 goto out; 849 if (!cops->tcf_block) 850 goto out; 851 if (TC_H_MIN(tcm->tcm_parent)) { 852 cl = cops->find(q, tcm->tcm_parent); 853 if (cl == 0) 854 goto out; 855 } 856 block = cops->tcf_block(q, cl); 857 if (!block) 858 goto out; 859 860 index_start = cb->args[0]; 861 index = 0; 862 863 list_for_each_entry(chain, &block->chain_list, list) { 864 if (tca[TCA_CHAIN] && 865 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 866 continue; 867 if (!tcf_chain_dump(chain, skb, cb, index_start, &index)) 868 break; 869 } 870 871 cb->args[0] = index; 872 873 out: 874 return skb->len; 875 } 876 877 void tcf_exts_destroy(struct tcf_exts *exts) 878 { 879 #ifdef CONFIG_NET_CLS_ACT 880 LIST_HEAD(actions); 881 882 tcf_exts_to_list(exts, &actions); 883 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 884 kfree(exts->actions); 885 exts->nr_actions = 0; 886 #endif 887 } 888 EXPORT_SYMBOL(tcf_exts_destroy); 889 890 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 891 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr) 892 { 893 #ifdef CONFIG_NET_CLS_ACT 894 { 895 struct tc_action *act; 896 897 if (exts->police && tb[exts->police]) { 898 act = tcf_action_init_1(net, tp, tb[exts->police], 899 rate_tlv, "police", ovr, 900 TCA_ACT_BIND); 901 if (IS_ERR(act)) 902 return PTR_ERR(act); 903 904 act->type = exts->type = TCA_OLD_COMPAT; 905 exts->actions[0] = act; 906 exts->nr_actions = 1; 907 } else if (exts->action && tb[exts->action]) { 908 LIST_HEAD(actions); 909 int err, i = 0; 910 911 err = tcf_action_init(net, tp, tb[exts->action], 912 rate_tlv, NULL, ovr, TCA_ACT_BIND, 913 &actions); 914 if (err) 915 return err; 916 list_for_each_entry(act, &actions, list) 917 exts->actions[i++] = act; 918 exts->nr_actions = i; 919 } 920 } 921 #else 922 if ((exts->action && tb[exts->action]) || 923 (exts->police && tb[exts->police])) 924 return -EOPNOTSUPP; 925 #endif 926 927 return 0; 928 } 929 EXPORT_SYMBOL(tcf_exts_validate); 930 931 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 932 { 933 #ifdef CONFIG_NET_CLS_ACT 934 struct tcf_exts old = *dst; 935 936 *dst = *src; 937 tcf_exts_destroy(&old); 938 #endif 939 } 940 EXPORT_SYMBOL(tcf_exts_change); 941 942 #ifdef CONFIG_NET_CLS_ACT 943 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 944 { 945 if (exts->nr_actions == 0) 946 return NULL; 947 else 948 return exts->actions[0]; 949 } 950 #endif 951 952 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 953 { 954 #ifdef CONFIG_NET_CLS_ACT 955 struct nlattr *nest; 956 957 if (exts->action && tcf_exts_has_actions(exts)) { 958 /* 959 * again for backward compatible mode - we want 960 * to work with both old and new modes of entering 961 * tc data even if iproute2 was newer - jhs 962 */ 963 if (exts->type != TCA_OLD_COMPAT) { 964 LIST_HEAD(actions); 965 966 nest = nla_nest_start(skb, exts->action); 967 if (nest == NULL) 968 goto nla_put_failure; 969 970 tcf_exts_to_list(exts, &actions); 971 if (tcf_action_dump(skb, &actions, 0, 0) < 0) 972 goto nla_put_failure; 973 nla_nest_end(skb, nest); 974 } else if (exts->police) { 975 struct tc_action *act = tcf_exts_first_act(exts); 976 nest = nla_nest_start(skb, exts->police); 977 if (nest == NULL || !act) 978 goto nla_put_failure; 979 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 980 goto nla_put_failure; 981 nla_nest_end(skb, nest); 982 } 983 } 984 return 0; 985 986 nla_put_failure: 987 nla_nest_cancel(skb, nest); 988 return -1; 989 #else 990 return 0; 991 #endif 992 } 993 EXPORT_SYMBOL(tcf_exts_dump); 994 995 996 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 997 { 998 #ifdef CONFIG_NET_CLS_ACT 999 struct tc_action *a = tcf_exts_first_act(exts); 1000 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 1001 return -1; 1002 #endif 1003 return 0; 1004 } 1005 EXPORT_SYMBOL(tcf_exts_dump_stats); 1006 1007 int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, 1008 struct net_device **hw_dev) 1009 { 1010 #ifdef CONFIG_NET_CLS_ACT 1011 const struct tc_action *a; 1012 LIST_HEAD(actions); 1013 1014 if (!tcf_exts_has_actions(exts)) 1015 return -EINVAL; 1016 1017 tcf_exts_to_list(exts, &actions); 1018 list_for_each_entry(a, &actions, list) { 1019 if (a->ops->get_dev) { 1020 a->ops->get_dev(a, dev_net(dev), hw_dev); 1021 break; 1022 } 1023 } 1024 if (*hw_dev) 1025 return 0; 1026 #endif 1027 return -EOPNOTSUPP; 1028 } 1029 EXPORT_SYMBOL(tcf_exts_get_dev); 1030 1031 static int __init tc_filter_init(void) 1032 { 1033 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1034 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1035 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1036 tc_dump_tfilter, 0); 1037 1038 return 0; 1039 } 1040 1041 subsys_initcall(tc_filter_init); 1042