1 /* 2 * net/sched/cls_api.c Packet classifier API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> 10 * 11 * Changes: 12 * 13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support 14 * 15 */ 16 17 #include <linux/module.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/skbuff.h> 24 #include <linux/init.h> 25 #include <linux/kmod.h> 26 #include <linux/err.h> 27 #include <linux/slab.h> 28 #include <net/net_namespace.h> 29 #include <net/sock.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <net/pkt_cls.h> 33 34 /* The list of all installed classifier types */ 35 static LIST_HEAD(tcf_proto_base); 36 37 /* Protects list of registered TC modules. It is pure SMP lock. */ 38 static DEFINE_RWLOCK(cls_mod_lock); 39 40 /* Find classifier type by string name */ 41 42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind) 43 { 44 const struct tcf_proto_ops *t, *res = NULL; 45 46 if (kind) { 47 read_lock(&cls_mod_lock); 48 list_for_each_entry(t, &tcf_proto_base, head) { 49 if (strcmp(kind, t->kind) == 0) { 50 if (try_module_get(t->owner)) 51 res = t; 52 break; 53 } 54 } 55 read_unlock(&cls_mod_lock); 56 } 57 return res; 58 } 59 60 /* Register(unregister) new classifier type */ 61 62 int register_tcf_proto_ops(struct tcf_proto_ops *ops) 63 { 64 struct tcf_proto_ops *t; 65 int rc = -EEXIST; 66 67 write_lock(&cls_mod_lock); 68 list_for_each_entry(t, &tcf_proto_base, head) 69 if (!strcmp(ops->kind, t->kind)) 70 goto out; 71 72 list_add_tail(&ops->head, &tcf_proto_base); 73 rc = 0; 74 out: 75 write_unlock(&cls_mod_lock); 76 return rc; 77 } 78 EXPORT_SYMBOL(register_tcf_proto_ops); 79 80 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) 81 { 82 struct tcf_proto_ops *t; 83 int rc = -ENOENT; 84 85 /* Wait for outstanding call_rcu()s, if any, from a 86 * tcf_proto_ops's destroy() handler. 87 */ 88 rcu_barrier(); 89 90 write_lock(&cls_mod_lock); 91 list_for_each_entry(t, &tcf_proto_base, head) { 92 if (t == ops) { 93 list_del(&t->head); 94 rc = 0; 95 break; 96 } 97 } 98 write_unlock(&cls_mod_lock); 99 return rc; 100 } 101 EXPORT_SYMBOL(unregister_tcf_proto_ops); 102 103 /* Select new prio value from the range, managed by kernel. */ 104 105 static inline u32 tcf_auto_prio(struct tcf_proto *tp) 106 { 107 u32 first = TC_H_MAKE(0xC0000000U, 0U); 108 109 if (tp) 110 first = tp->prio - 1; 111 112 return TC_H_MAJ(first); 113 } 114 115 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, 116 u32 prio, u32 parent, struct Qdisc *q, 117 struct tcf_chain *chain) 118 { 119 struct tcf_proto *tp; 120 int err; 121 122 tp = kzalloc(sizeof(*tp), GFP_KERNEL); 123 if (!tp) 124 return ERR_PTR(-ENOBUFS); 125 126 err = -ENOENT; 127 tp->ops = tcf_proto_lookup_ops(kind); 128 if (!tp->ops) { 129 #ifdef CONFIG_MODULES 130 rtnl_unlock(); 131 request_module("cls_%s", kind); 132 rtnl_lock(); 133 tp->ops = tcf_proto_lookup_ops(kind); 134 /* We dropped the RTNL semaphore in order to perform 135 * the module load. So, even if we succeeded in loading 136 * the module we have to replay the request. We indicate 137 * this using -EAGAIN. 138 */ 139 if (tp->ops) { 140 module_put(tp->ops->owner); 141 err = -EAGAIN; 142 } else { 143 err = -ENOENT; 144 } 145 goto errout; 146 #endif 147 } 148 tp->classify = tp->ops->classify; 149 tp->protocol = protocol; 150 tp->prio = prio; 151 tp->classid = parent; 152 tp->q = q; 153 tp->chain = chain; 154 155 err = tp->ops->init(tp); 156 if (err) { 157 module_put(tp->ops->owner); 158 goto errout; 159 } 160 return tp; 161 162 errout: 163 kfree(tp); 164 return ERR_PTR(err); 165 } 166 167 static void tcf_proto_destroy(struct tcf_proto *tp) 168 { 169 tp->ops->destroy(tp); 170 module_put(tp->ops->owner); 171 kfree_rcu(tp, rcu); 172 } 173 174 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, 175 u32 chain_index) 176 { 177 struct tcf_chain *chain; 178 179 chain = kzalloc(sizeof(*chain), GFP_KERNEL); 180 if (!chain) 181 return NULL; 182 list_add_tail(&chain->list, &block->chain_list); 183 chain->block = block; 184 chain->index = chain_index; 185 chain->refcnt = 1; 186 return chain; 187 } 188 189 static void tcf_chain_flush(struct tcf_chain *chain) 190 { 191 struct tcf_proto *tp; 192 193 if (chain->p_filter_chain) 194 RCU_INIT_POINTER(*chain->p_filter_chain, NULL); 195 while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { 196 RCU_INIT_POINTER(chain->filter_chain, tp->next); 197 tcf_chain_put(chain); 198 tcf_proto_destroy(tp); 199 } 200 } 201 202 static void tcf_chain_destroy(struct tcf_chain *chain) 203 { 204 list_del(&chain->list); 205 kfree(chain); 206 } 207 208 static void tcf_chain_hold(struct tcf_chain *chain) 209 { 210 ++chain->refcnt; 211 } 212 213 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, 214 bool create) 215 { 216 struct tcf_chain *chain; 217 218 list_for_each_entry(chain, &block->chain_list, list) { 219 if (chain->index == chain_index) { 220 tcf_chain_hold(chain); 221 return chain; 222 } 223 } 224 225 return create ? tcf_chain_create(block, chain_index) : NULL; 226 } 227 EXPORT_SYMBOL(tcf_chain_get); 228 229 void tcf_chain_put(struct tcf_chain *chain) 230 { 231 if (--chain->refcnt == 0) 232 tcf_chain_destroy(chain); 233 } 234 EXPORT_SYMBOL(tcf_chain_put); 235 236 static void 237 tcf_chain_filter_chain_ptr_set(struct tcf_chain *chain, 238 struct tcf_proto __rcu **p_filter_chain) 239 { 240 chain->p_filter_chain = p_filter_chain; 241 } 242 243 static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q, 244 struct tcf_block_ext_info *ei, 245 enum tc_block_command command) 246 { 247 struct net_device *dev = q->dev_queue->dev; 248 struct tc_block_offload bo = {}; 249 250 if (!tc_can_offload(dev)) 251 return; 252 bo.command = command; 253 bo.binder_type = ei->binder_type; 254 bo.block = block; 255 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); 256 } 257 258 static void tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, 259 struct tcf_block_ext_info *ei) 260 { 261 tcf_block_offload_cmd(block, q, ei, TC_BLOCK_BIND); 262 } 263 264 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, 265 struct tcf_block_ext_info *ei) 266 { 267 tcf_block_offload_cmd(block, q, ei, TC_BLOCK_UNBIND); 268 } 269 270 int tcf_block_get_ext(struct tcf_block **p_block, 271 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 272 struct tcf_block_ext_info *ei) 273 { 274 struct tcf_block *block = kzalloc(sizeof(*block), GFP_KERNEL); 275 struct tcf_chain *chain; 276 int err; 277 278 if (!block) 279 return -ENOMEM; 280 INIT_LIST_HEAD(&block->chain_list); 281 INIT_LIST_HEAD(&block->cb_list); 282 283 /* Create chain 0 by default, it has to be always present. */ 284 chain = tcf_chain_create(block, 0); 285 if (!chain) { 286 err = -ENOMEM; 287 goto err_chain_create; 288 } 289 tcf_chain_filter_chain_ptr_set(chain, p_filter_chain); 290 block->net = qdisc_net(q); 291 block->q = q; 292 tcf_block_offload_bind(block, q, ei); 293 *p_block = block; 294 return 0; 295 296 err_chain_create: 297 kfree(block); 298 return err; 299 } 300 EXPORT_SYMBOL(tcf_block_get_ext); 301 302 int tcf_block_get(struct tcf_block **p_block, 303 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q) 304 { 305 struct tcf_block_ext_info ei = {0, }; 306 307 return tcf_block_get_ext(p_block, p_filter_chain, q, &ei); 308 } 309 EXPORT_SYMBOL(tcf_block_get); 310 311 void tcf_block_put_ext(struct tcf_block *block, 312 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, 313 struct tcf_block_ext_info *ei) 314 { 315 struct tcf_chain *chain, *tmp; 316 317 if (!block) 318 return; 319 320 tcf_block_offload_unbind(block, q, ei); 321 322 /* XXX: Standalone actions are not allowed to jump to any chain, and 323 * bound actions should be all removed after flushing. However, 324 * filters are destroyed in RCU callbacks, we have to hold the chains 325 * first, otherwise we would always race with RCU callbacks on this list 326 * without proper locking. 327 */ 328 329 /* Wait for existing RCU callbacks to cool down. */ 330 rcu_barrier(); 331 332 /* Hold a refcnt for all chains, except 0, in case they are gone. */ 333 list_for_each_entry(chain, &block->chain_list, list) 334 if (chain->index) 335 tcf_chain_hold(chain); 336 337 /* No race on the list, because no chain could be destroyed. */ 338 list_for_each_entry(chain, &block->chain_list, list) 339 tcf_chain_flush(chain); 340 341 /* Wait for RCU callbacks to release the reference count. */ 342 rcu_barrier(); 343 344 /* At this point, all the chains should have refcnt == 1. */ 345 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 346 tcf_chain_put(chain); 347 kfree(block); 348 } 349 EXPORT_SYMBOL(tcf_block_put_ext); 350 351 void tcf_block_put(struct tcf_block *block) 352 { 353 struct tcf_block_ext_info ei = {0, }; 354 355 tcf_block_put_ext(block, NULL, block->q, &ei); 356 } 357 EXPORT_SYMBOL(tcf_block_put); 358 359 struct tcf_block_cb { 360 struct list_head list; 361 tc_setup_cb_t *cb; 362 void *cb_ident; 363 void *cb_priv; 364 unsigned int refcnt; 365 }; 366 367 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb) 368 { 369 return block_cb->cb_priv; 370 } 371 EXPORT_SYMBOL(tcf_block_cb_priv); 372 373 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block, 374 tc_setup_cb_t *cb, void *cb_ident) 375 { struct tcf_block_cb *block_cb; 376 377 list_for_each_entry(block_cb, &block->cb_list, list) 378 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident) 379 return block_cb; 380 return NULL; 381 } 382 EXPORT_SYMBOL(tcf_block_cb_lookup); 383 384 void tcf_block_cb_incref(struct tcf_block_cb *block_cb) 385 { 386 block_cb->refcnt++; 387 } 388 EXPORT_SYMBOL(tcf_block_cb_incref); 389 390 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) 391 { 392 return --block_cb->refcnt; 393 } 394 EXPORT_SYMBOL(tcf_block_cb_decref); 395 396 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, 397 tc_setup_cb_t *cb, void *cb_ident, 398 void *cb_priv) 399 { 400 struct tcf_block_cb *block_cb; 401 402 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL); 403 if (!block_cb) 404 return NULL; 405 block_cb->cb = cb; 406 block_cb->cb_ident = cb_ident; 407 block_cb->cb_priv = cb_priv; 408 list_add(&block_cb->list, &block->cb_list); 409 return block_cb; 410 } 411 EXPORT_SYMBOL(__tcf_block_cb_register); 412 413 int tcf_block_cb_register(struct tcf_block *block, 414 tc_setup_cb_t *cb, void *cb_ident, 415 void *cb_priv) 416 { 417 struct tcf_block_cb *block_cb; 418 419 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv); 420 return block_cb ? 0 : -ENOMEM; 421 } 422 EXPORT_SYMBOL(tcf_block_cb_register); 423 424 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) 425 { 426 list_del(&block_cb->list); 427 kfree(block_cb); 428 } 429 EXPORT_SYMBOL(__tcf_block_cb_unregister); 430 431 void tcf_block_cb_unregister(struct tcf_block *block, 432 tc_setup_cb_t *cb, void *cb_ident) 433 { 434 struct tcf_block_cb *block_cb; 435 436 block_cb = tcf_block_cb_lookup(block, cb, cb_ident); 437 if (!block_cb) 438 return; 439 __tcf_block_cb_unregister(block_cb); 440 } 441 EXPORT_SYMBOL(tcf_block_cb_unregister); 442 443 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type, 444 void *type_data, bool err_stop) 445 { 446 struct tcf_block_cb *block_cb; 447 int ok_count = 0; 448 int err; 449 450 list_for_each_entry(block_cb, &block->cb_list, list) { 451 err = block_cb->cb(type, type_data, block_cb->cb_priv); 452 if (err) { 453 if (err_stop) 454 return err; 455 } else { 456 ok_count++; 457 } 458 } 459 return ok_count; 460 } 461 462 /* Main classifier routine: scans classifier chain attached 463 * to this qdisc, (optionally) tests for protocol and asks 464 * specific classifiers. 465 */ 466 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, 467 struct tcf_result *res, bool compat_mode) 468 { 469 __be16 protocol = tc_skb_protocol(skb); 470 #ifdef CONFIG_NET_CLS_ACT 471 const int max_reclassify_loop = 4; 472 const struct tcf_proto *orig_tp = tp; 473 const struct tcf_proto *first_tp; 474 int limit = 0; 475 476 reclassify: 477 #endif 478 for (; tp; tp = rcu_dereference_bh(tp->next)) { 479 int err; 480 481 if (tp->protocol != protocol && 482 tp->protocol != htons(ETH_P_ALL)) 483 continue; 484 485 err = tp->classify(skb, tp, res); 486 #ifdef CONFIG_NET_CLS_ACT 487 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { 488 first_tp = orig_tp; 489 goto reset; 490 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { 491 first_tp = res->goto_tp; 492 goto reset; 493 } 494 #endif 495 if (err >= 0) 496 return err; 497 } 498 499 return TC_ACT_UNSPEC; /* signal: continue lookup */ 500 #ifdef CONFIG_NET_CLS_ACT 501 reset: 502 if (unlikely(limit++ >= max_reclassify_loop)) { 503 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n", 504 tp->q->ops->id, tp->prio & 0xffff, 505 ntohs(tp->protocol)); 506 return TC_ACT_SHOT; 507 } 508 509 tp = first_tp; 510 protocol = tc_skb_protocol(skb); 511 goto reclassify; 512 #endif 513 } 514 EXPORT_SYMBOL(tcf_classify); 515 516 struct tcf_chain_info { 517 struct tcf_proto __rcu **pprev; 518 struct tcf_proto __rcu *next; 519 }; 520 521 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info) 522 { 523 return rtnl_dereference(*chain_info->pprev); 524 } 525 526 static void tcf_chain_tp_insert(struct tcf_chain *chain, 527 struct tcf_chain_info *chain_info, 528 struct tcf_proto *tp) 529 { 530 if (chain->p_filter_chain && 531 *chain_info->pprev == chain->filter_chain) 532 rcu_assign_pointer(*chain->p_filter_chain, tp); 533 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info)); 534 rcu_assign_pointer(*chain_info->pprev, tp); 535 tcf_chain_hold(chain); 536 } 537 538 static void tcf_chain_tp_remove(struct tcf_chain *chain, 539 struct tcf_chain_info *chain_info, 540 struct tcf_proto *tp) 541 { 542 struct tcf_proto *next = rtnl_dereference(chain_info->next); 543 544 if (chain->p_filter_chain && tp == chain->filter_chain) 545 RCU_INIT_POINTER(*chain->p_filter_chain, next); 546 RCU_INIT_POINTER(*chain_info->pprev, next); 547 tcf_chain_put(chain); 548 } 549 550 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, 551 struct tcf_chain_info *chain_info, 552 u32 protocol, u32 prio, 553 bool prio_allocate) 554 { 555 struct tcf_proto **pprev; 556 struct tcf_proto *tp; 557 558 /* Check the chain for existence of proto-tcf with this priority */ 559 for (pprev = &chain->filter_chain; 560 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) { 561 if (tp->prio >= prio) { 562 if (tp->prio == prio) { 563 if (prio_allocate || 564 (tp->protocol != protocol && protocol)) 565 return ERR_PTR(-EINVAL); 566 } else { 567 tp = NULL; 568 } 569 break; 570 } 571 } 572 chain_info->pprev = pprev; 573 chain_info->next = tp ? tp->next : NULL; 574 return tp; 575 } 576 577 static int tcf_fill_node(struct net *net, struct sk_buff *skb, 578 struct tcf_proto *tp, struct Qdisc *q, u32 parent, 579 void *fh, u32 portid, u32 seq, u16 flags, int event) 580 { 581 struct tcmsg *tcm; 582 struct nlmsghdr *nlh; 583 unsigned char *b = skb_tail_pointer(skb); 584 585 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); 586 if (!nlh) 587 goto out_nlmsg_trim; 588 tcm = nlmsg_data(nlh); 589 tcm->tcm_family = AF_UNSPEC; 590 tcm->tcm__pad1 = 0; 591 tcm->tcm__pad2 = 0; 592 tcm->tcm_ifindex = qdisc_dev(q)->ifindex; 593 tcm->tcm_parent = parent; 594 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 595 if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) 596 goto nla_put_failure; 597 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) 598 goto nla_put_failure; 599 if (!fh) { 600 tcm->tcm_handle = 0; 601 } else { 602 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) 603 goto nla_put_failure; 604 } 605 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 606 return skb->len; 607 608 out_nlmsg_trim: 609 nla_put_failure: 610 nlmsg_trim(skb, b); 611 return -1; 612 } 613 614 static int tfilter_notify(struct net *net, struct sk_buff *oskb, 615 struct nlmsghdr *n, struct tcf_proto *tp, 616 struct Qdisc *q, u32 parent, 617 void *fh, int event, bool unicast) 618 { 619 struct sk_buff *skb; 620 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 621 622 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 623 if (!skb) 624 return -ENOBUFS; 625 626 if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq, 627 n->nlmsg_flags, event) <= 0) { 628 kfree_skb(skb); 629 return -EINVAL; 630 } 631 632 if (unicast) 633 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 634 635 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 636 n->nlmsg_flags & NLM_F_ECHO); 637 } 638 639 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, 640 struct nlmsghdr *n, struct tcf_proto *tp, 641 struct Qdisc *q, u32 parent, 642 void *fh, bool unicast, bool *last) 643 { 644 struct sk_buff *skb; 645 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; 646 int err; 647 648 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 649 if (!skb) 650 return -ENOBUFS; 651 652 if (tcf_fill_node(net, skb, tp, q, parent, fh, portid, n->nlmsg_seq, 653 n->nlmsg_flags, RTM_DELTFILTER) <= 0) { 654 kfree_skb(skb); 655 return -EINVAL; 656 } 657 658 err = tp->ops->delete(tp, fh, last); 659 if (err) { 660 kfree_skb(skb); 661 return err; 662 } 663 664 if (unicast) 665 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); 666 667 return rtnetlink_send(skb, net, portid, RTNLGRP_TC, 668 n->nlmsg_flags & NLM_F_ECHO); 669 } 670 671 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, 672 struct Qdisc *q, u32 parent, 673 struct nlmsghdr *n, 674 struct tcf_chain *chain, int event) 675 { 676 struct tcf_proto *tp; 677 678 for (tp = rtnl_dereference(chain->filter_chain); 679 tp; tp = rtnl_dereference(tp->next)) 680 tfilter_notify(net, oskb, n, tp, q, parent, 0, event, false); 681 } 682 683 /* Add/change/delete/get a filter node */ 684 685 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, 686 struct netlink_ext_ack *extack) 687 { 688 struct net *net = sock_net(skb->sk); 689 struct nlattr *tca[TCA_MAX + 1]; 690 struct tcmsg *t; 691 u32 protocol; 692 u32 prio; 693 bool prio_allocate; 694 u32 parent; 695 u32 chain_index; 696 struct net_device *dev; 697 struct Qdisc *q; 698 struct tcf_chain_info chain_info; 699 struct tcf_chain *chain = NULL; 700 struct tcf_block *block; 701 struct tcf_proto *tp; 702 const struct Qdisc_class_ops *cops; 703 unsigned long cl; 704 void *fh; 705 int err; 706 int tp_created; 707 708 if ((n->nlmsg_type != RTM_GETTFILTER) && 709 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) 710 return -EPERM; 711 712 replay: 713 tp_created = 0; 714 715 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); 716 if (err < 0) 717 return err; 718 719 t = nlmsg_data(n); 720 protocol = TC_H_MIN(t->tcm_info); 721 prio = TC_H_MAJ(t->tcm_info); 722 prio_allocate = false; 723 parent = t->tcm_parent; 724 cl = 0; 725 726 if (prio == 0) { 727 switch (n->nlmsg_type) { 728 case RTM_DELTFILTER: 729 if (protocol || t->tcm_handle || tca[TCA_KIND]) 730 return -ENOENT; 731 break; 732 case RTM_NEWTFILTER: 733 /* If no priority is provided by the user, 734 * we allocate one. 735 */ 736 if (n->nlmsg_flags & NLM_F_CREATE) { 737 prio = TC_H_MAKE(0x80000000U, 0U); 738 prio_allocate = true; 739 break; 740 } 741 /* fall-through */ 742 default: 743 return -ENOENT; 744 } 745 } 746 747 /* Find head of filter chain. */ 748 749 /* Find link */ 750 dev = __dev_get_by_index(net, t->tcm_ifindex); 751 if (dev == NULL) 752 return -ENODEV; 753 754 /* Find qdisc */ 755 if (!parent) { 756 q = dev->qdisc; 757 parent = q->handle; 758 } else { 759 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 760 if (q == NULL) 761 return -EINVAL; 762 } 763 764 /* Is it classful? */ 765 cops = q->ops->cl_ops; 766 if (!cops) 767 return -EINVAL; 768 769 if (!cops->tcf_block) 770 return -EOPNOTSUPP; 771 772 /* Do we search for filter, attached to class? */ 773 if (TC_H_MIN(parent)) { 774 cl = cops->find(q, parent); 775 if (cl == 0) 776 return -ENOENT; 777 } 778 779 /* And the last stroke */ 780 block = cops->tcf_block(q, cl); 781 if (!block) { 782 err = -EINVAL; 783 goto errout; 784 } 785 786 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0; 787 if (chain_index > TC_ACT_EXT_VAL_MASK) { 788 err = -EINVAL; 789 goto errout; 790 } 791 chain = tcf_chain_get(block, chain_index, 792 n->nlmsg_type == RTM_NEWTFILTER); 793 if (!chain) { 794 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL; 795 goto errout; 796 } 797 798 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) { 799 tfilter_notify_chain(net, skb, q, parent, n, 800 chain, RTM_DELTFILTER); 801 tcf_chain_flush(chain); 802 err = 0; 803 goto errout; 804 } 805 806 tp = tcf_chain_tp_find(chain, &chain_info, protocol, 807 prio, prio_allocate); 808 if (IS_ERR(tp)) { 809 err = PTR_ERR(tp); 810 goto errout; 811 } 812 813 if (tp == NULL) { 814 /* Proto-tcf does not exist, create new one */ 815 816 if (tca[TCA_KIND] == NULL || !protocol) { 817 err = -EINVAL; 818 goto errout; 819 } 820 821 if (n->nlmsg_type != RTM_NEWTFILTER || 822 !(n->nlmsg_flags & NLM_F_CREATE)) { 823 err = -ENOENT; 824 goto errout; 825 } 826 827 if (prio_allocate) 828 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info)); 829 830 tp = tcf_proto_create(nla_data(tca[TCA_KIND]), 831 protocol, prio, parent, q, chain); 832 if (IS_ERR(tp)) { 833 err = PTR_ERR(tp); 834 goto errout; 835 } 836 tp_created = 1; 837 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { 838 err = -EINVAL; 839 goto errout; 840 } 841 842 fh = tp->ops->get(tp, t->tcm_handle); 843 844 if (!fh) { 845 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 846 tcf_chain_tp_remove(chain, &chain_info, tp); 847 tfilter_notify(net, skb, n, tp, q, parent, fh, 848 RTM_DELTFILTER, false); 849 tcf_proto_destroy(tp); 850 err = 0; 851 goto errout; 852 } 853 854 if (n->nlmsg_type != RTM_NEWTFILTER || 855 !(n->nlmsg_flags & NLM_F_CREATE)) { 856 err = -ENOENT; 857 goto errout; 858 } 859 } else { 860 bool last; 861 862 switch (n->nlmsg_type) { 863 case RTM_NEWTFILTER: 864 if (n->nlmsg_flags & NLM_F_EXCL) { 865 if (tp_created) 866 tcf_proto_destroy(tp); 867 err = -EEXIST; 868 goto errout; 869 } 870 break; 871 case RTM_DELTFILTER: 872 err = tfilter_del_notify(net, skb, n, tp, q, parent, 873 fh, false, &last); 874 if (err) 875 goto errout; 876 if (last) { 877 tcf_chain_tp_remove(chain, &chain_info, tp); 878 tcf_proto_destroy(tp); 879 } 880 goto errout; 881 case RTM_GETTFILTER: 882 err = tfilter_notify(net, skb, n, tp, q, parent, fh, 883 RTM_NEWTFILTER, true); 884 goto errout; 885 default: 886 err = -EINVAL; 887 goto errout; 888 } 889 } 890 891 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, 892 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE); 893 if (err == 0) { 894 if (tp_created) 895 tcf_chain_tp_insert(chain, &chain_info, tp); 896 tfilter_notify(net, skb, n, tp, q, parent, fh, 897 RTM_NEWTFILTER, false); 898 } else { 899 if (tp_created) 900 tcf_proto_destroy(tp); 901 } 902 903 errout: 904 if (chain) 905 tcf_chain_put(chain); 906 if (err == -EAGAIN) 907 /* Replay the request. */ 908 goto replay; 909 return err; 910 } 911 912 struct tcf_dump_args { 913 struct tcf_walker w; 914 struct sk_buff *skb; 915 struct netlink_callback *cb; 916 struct Qdisc *q; 917 u32 parent; 918 }; 919 920 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) 921 { 922 struct tcf_dump_args *a = (void *)arg; 923 struct net *net = sock_net(a->skb->sk); 924 925 return tcf_fill_node(net, a->skb, tp, a->q, a->parent, 926 n, NETLINK_CB(a->cb->skb).portid, 927 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, 928 RTM_NEWTFILTER); 929 } 930 931 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, 932 struct sk_buff *skb, struct netlink_callback *cb, 933 long index_start, long *p_index) 934 { 935 struct net *net = sock_net(skb->sk); 936 struct tcmsg *tcm = nlmsg_data(cb->nlh); 937 struct tcf_dump_args arg; 938 struct tcf_proto *tp; 939 940 for (tp = rtnl_dereference(chain->filter_chain); 941 tp; tp = rtnl_dereference(tp->next), (*p_index)++) { 942 if (*p_index < index_start) 943 continue; 944 if (TC_H_MAJ(tcm->tcm_info) && 945 TC_H_MAJ(tcm->tcm_info) != tp->prio) 946 continue; 947 if (TC_H_MIN(tcm->tcm_info) && 948 TC_H_MIN(tcm->tcm_info) != tp->protocol) 949 continue; 950 if (*p_index > index_start) 951 memset(&cb->args[1], 0, 952 sizeof(cb->args) - sizeof(cb->args[0])); 953 if (cb->args[1] == 0) { 954 if (tcf_fill_node(net, skb, tp, q, parent, 0, 955 NETLINK_CB(cb->skb).portid, 956 cb->nlh->nlmsg_seq, NLM_F_MULTI, 957 RTM_NEWTFILTER) <= 0) 958 return false; 959 960 cb->args[1] = 1; 961 } 962 if (!tp->ops->walk) 963 continue; 964 arg.w.fn = tcf_node_dump; 965 arg.skb = skb; 966 arg.cb = cb; 967 arg.q = q; 968 arg.parent = parent; 969 arg.w.stop = 0; 970 arg.w.skip = cb->args[1] - 1; 971 arg.w.count = 0; 972 tp->ops->walk(tp, &arg.w); 973 cb->args[1] = arg.w.count + 1; 974 if (arg.w.stop) 975 return false; 976 } 977 return true; 978 } 979 980 /* called with RTNL */ 981 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 982 { 983 struct net *net = sock_net(skb->sk); 984 struct nlattr *tca[TCA_MAX + 1]; 985 struct net_device *dev; 986 struct Qdisc *q; 987 struct tcf_block *block; 988 struct tcf_chain *chain; 989 struct tcmsg *tcm = nlmsg_data(cb->nlh); 990 unsigned long cl = 0; 991 const struct Qdisc_class_ops *cops; 992 long index_start; 993 long index; 994 u32 parent; 995 int err; 996 997 if (nlmsg_len(cb->nlh) < sizeof(*tcm)) 998 return skb->len; 999 1000 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL); 1001 if (err) 1002 return err; 1003 1004 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 1005 if (!dev) 1006 return skb->len; 1007 1008 parent = tcm->tcm_parent; 1009 if (!parent) { 1010 q = dev->qdisc; 1011 parent = q->handle; 1012 } else { 1013 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 1014 } 1015 if (!q) 1016 goto out; 1017 cops = q->ops->cl_ops; 1018 if (!cops) 1019 goto out; 1020 if (!cops->tcf_block) 1021 goto out; 1022 if (TC_H_MIN(tcm->tcm_parent)) { 1023 cl = cops->find(q, tcm->tcm_parent); 1024 if (cl == 0) 1025 goto out; 1026 } 1027 block = cops->tcf_block(q, cl); 1028 if (!block) 1029 goto out; 1030 1031 index_start = cb->args[0]; 1032 index = 0; 1033 1034 list_for_each_entry(chain, &block->chain_list, list) { 1035 if (tca[TCA_CHAIN] && 1036 nla_get_u32(tca[TCA_CHAIN]) != chain->index) 1037 continue; 1038 if (!tcf_chain_dump(chain, q, parent, skb, cb, 1039 index_start, &index)) 1040 break; 1041 } 1042 1043 cb->args[0] = index; 1044 1045 out: 1046 return skb->len; 1047 } 1048 1049 void tcf_exts_destroy(struct tcf_exts *exts) 1050 { 1051 #ifdef CONFIG_NET_CLS_ACT 1052 LIST_HEAD(actions); 1053 1054 tcf_exts_to_list(exts, &actions); 1055 tcf_action_destroy(&actions, TCA_ACT_UNBIND); 1056 kfree(exts->actions); 1057 exts->nr_actions = 0; 1058 #endif 1059 } 1060 EXPORT_SYMBOL(tcf_exts_destroy); 1061 1062 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, 1063 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr) 1064 { 1065 #ifdef CONFIG_NET_CLS_ACT 1066 { 1067 struct tc_action *act; 1068 1069 if (exts->police && tb[exts->police]) { 1070 act = tcf_action_init_1(net, tp, tb[exts->police], 1071 rate_tlv, "police", ovr, 1072 TCA_ACT_BIND); 1073 if (IS_ERR(act)) 1074 return PTR_ERR(act); 1075 1076 act->type = exts->type = TCA_OLD_COMPAT; 1077 exts->actions[0] = act; 1078 exts->nr_actions = 1; 1079 } else if (exts->action && tb[exts->action]) { 1080 LIST_HEAD(actions); 1081 int err, i = 0; 1082 1083 err = tcf_action_init(net, tp, tb[exts->action], 1084 rate_tlv, NULL, ovr, TCA_ACT_BIND, 1085 &actions); 1086 if (err) 1087 return err; 1088 list_for_each_entry(act, &actions, list) 1089 exts->actions[i++] = act; 1090 exts->nr_actions = i; 1091 } 1092 } 1093 #else 1094 if ((exts->action && tb[exts->action]) || 1095 (exts->police && tb[exts->police])) 1096 return -EOPNOTSUPP; 1097 #endif 1098 1099 return 0; 1100 } 1101 EXPORT_SYMBOL(tcf_exts_validate); 1102 1103 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) 1104 { 1105 #ifdef CONFIG_NET_CLS_ACT 1106 struct tcf_exts old = *dst; 1107 1108 *dst = *src; 1109 tcf_exts_destroy(&old); 1110 #endif 1111 } 1112 EXPORT_SYMBOL(tcf_exts_change); 1113 1114 #ifdef CONFIG_NET_CLS_ACT 1115 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) 1116 { 1117 if (exts->nr_actions == 0) 1118 return NULL; 1119 else 1120 return exts->actions[0]; 1121 } 1122 #endif 1123 1124 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) 1125 { 1126 #ifdef CONFIG_NET_CLS_ACT 1127 struct nlattr *nest; 1128 1129 if (exts->action && tcf_exts_has_actions(exts)) { 1130 /* 1131 * again for backward compatible mode - we want 1132 * to work with both old and new modes of entering 1133 * tc data even if iproute2 was newer - jhs 1134 */ 1135 if (exts->type != TCA_OLD_COMPAT) { 1136 LIST_HEAD(actions); 1137 1138 nest = nla_nest_start(skb, exts->action); 1139 if (nest == NULL) 1140 goto nla_put_failure; 1141 1142 tcf_exts_to_list(exts, &actions); 1143 if (tcf_action_dump(skb, &actions, 0, 0) < 0) 1144 goto nla_put_failure; 1145 nla_nest_end(skb, nest); 1146 } else if (exts->police) { 1147 struct tc_action *act = tcf_exts_first_act(exts); 1148 nest = nla_nest_start(skb, exts->police); 1149 if (nest == NULL || !act) 1150 goto nla_put_failure; 1151 if (tcf_action_dump_old(skb, act, 0, 0) < 0) 1152 goto nla_put_failure; 1153 nla_nest_end(skb, nest); 1154 } 1155 } 1156 return 0; 1157 1158 nla_put_failure: 1159 nla_nest_cancel(skb, nest); 1160 return -1; 1161 #else 1162 return 0; 1163 #endif 1164 } 1165 EXPORT_SYMBOL(tcf_exts_dump); 1166 1167 1168 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) 1169 { 1170 #ifdef CONFIG_NET_CLS_ACT 1171 struct tc_action *a = tcf_exts_first_act(exts); 1172 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) 1173 return -1; 1174 #endif 1175 return 0; 1176 } 1177 EXPORT_SYMBOL(tcf_exts_dump_stats); 1178 1179 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts, 1180 enum tc_setup_type type, 1181 void *type_data, bool err_stop) 1182 { 1183 int ok_count = 0; 1184 #ifdef CONFIG_NET_CLS_ACT 1185 const struct tc_action *a; 1186 struct net_device *dev; 1187 int i, ret; 1188 1189 if (!tcf_exts_has_actions(exts)) 1190 return 0; 1191 1192 for (i = 0; i < exts->nr_actions; i++) { 1193 a = exts->actions[i]; 1194 if (!a->ops->get_dev) 1195 continue; 1196 dev = a->ops->get_dev(a); 1197 if (!dev || !tc_can_offload(dev)) 1198 continue; 1199 ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop); 1200 if (ret < 0) 1201 return ret; 1202 ok_count += ret; 1203 } 1204 #endif 1205 return ok_count; 1206 } 1207 1208 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts, 1209 enum tc_setup_type type, void *type_data, bool err_stop) 1210 { 1211 int ok_count; 1212 int ret; 1213 1214 ret = tcf_block_cb_call(block, type, type_data, err_stop); 1215 if (ret < 0) 1216 return ret; 1217 ok_count = ret; 1218 1219 if (!exts) 1220 return ok_count; 1221 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); 1222 if (ret < 0) 1223 return ret; 1224 ok_count += ret; 1225 1226 return ok_count; 1227 } 1228 EXPORT_SYMBOL(tc_setup_cb_call); 1229 1230 static int __init tc_filter_init(void) 1231 { 1232 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); 1233 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); 1234 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 1235 tc_dump_tfilter, 0); 1236 1237 return 0; 1238 } 1239 1240 subsys_initcall(tc_filter_init); 1241