1 /* 2 * net/sched/cls_flow.c Generic flow classifier 3 * 4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net> 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 2 9 * of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/list.h> 15 #include <linux/jhash.h> 16 #include <linux/random.h> 17 #include <linux/pkt_cls.h> 18 #include <linux/skbuff.h> 19 #include <linux/in.h> 20 #include <linux/ip.h> 21 #include <linux/ipv6.h> 22 #include <linux/if_vlan.h> 23 #include <linux/slab.h> 24 #include <linux/module.h> 25 26 #include <net/pkt_cls.h> 27 #include <net/ip.h> 28 #include <net/route.h> 29 #include <net/flow_dissector.h> 30 31 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 32 #include <net/netfilter/nf_conntrack.h> 33 #endif 34 35 struct flow_head { 36 struct list_head filters; 37 struct rcu_head rcu; 38 }; 39 40 struct flow_filter { 41 struct list_head list; 42 struct tcf_exts exts; 43 struct tcf_ematch_tree ematches; 44 struct tcf_proto *tp; 45 struct timer_list perturb_timer; 46 u32 perturb_period; 47 u32 handle; 48 49 u32 nkeys; 50 u32 keymask; 51 u32 mode; 52 u32 mask; 53 u32 xor; 54 u32 rshift; 55 u32 addend; 56 u32 divisor; 57 u32 baseclass; 58 u32 hashrnd; 59 struct rcu_head rcu; 60 }; 61 62 static inline u32 addr_fold(void *addr) 63 { 64 unsigned long a = (unsigned long)addr; 65 66 return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0); 67 } 68 69 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow) 70 { 71 __be32 src = flow_get_u32_src(flow); 72 73 if (src) 74 return ntohl(src); 75 76 return addr_fold(skb->sk); 77 } 78 79 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow) 80 { 81 __be32 dst = flow_get_u32_dst(flow); 82 83 if (dst) 84 return ntohl(dst); 85 86 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); 87 } 88 89 static u32 flow_get_proto(const struct sk_buff *skb, const struct flow_keys *flow) 90 { 91 return flow->basic.ip_proto; 92 } 93 94 static u32 flow_get_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) 95 { 96 if (flow->ports.ports) 97 return ntohs(flow->ports.src); 98 99 return addr_fold(skb->sk); 100 } 101 102 static u32 flow_get_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) 103 { 104 if (flow->ports.ports) 105 return ntohs(flow->ports.dst); 106 107 return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb); 108 } 109 110 static u32 flow_get_iif(const struct sk_buff *skb) 111 { 112 return skb->skb_iif; 113 } 114 115 static u32 flow_get_priority(const struct sk_buff *skb) 116 { 117 return skb->priority; 118 } 119 120 static u32 flow_get_mark(const struct sk_buff *skb) 121 { 122 return skb->mark; 123 } 124 125 static u32 flow_get_nfct(const struct sk_buff *skb) 126 { 127 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 128 return addr_fold(skb->nfct); 129 #else 130 return 0; 131 #endif 132 } 133 134 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 135 #define CTTUPLE(skb, member) \ 136 ({ \ 137 enum ip_conntrack_info ctinfo; \ 138 const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \ 139 if (ct == NULL) \ 140 goto fallback; \ 141 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \ 142 }) 143 #else 144 #define CTTUPLE(skb, member) \ 145 ({ \ 146 goto fallback; \ 147 0; \ 148 }) 149 #endif 150 151 static u32 flow_get_nfct_src(const struct sk_buff *skb, const struct flow_keys *flow) 152 { 153 switch (tc_skb_protocol(skb)) { 154 case htons(ETH_P_IP): 155 return ntohl(CTTUPLE(skb, src.u3.ip)); 156 case htons(ETH_P_IPV6): 157 return ntohl(CTTUPLE(skb, src.u3.ip6[3])); 158 } 159 fallback: 160 return flow_get_src(skb, flow); 161 } 162 163 static u32 flow_get_nfct_dst(const struct sk_buff *skb, const struct flow_keys *flow) 164 { 165 switch (tc_skb_protocol(skb)) { 166 case htons(ETH_P_IP): 167 return ntohl(CTTUPLE(skb, dst.u3.ip)); 168 case htons(ETH_P_IPV6): 169 return ntohl(CTTUPLE(skb, dst.u3.ip6[3])); 170 } 171 fallback: 172 return flow_get_dst(skb, flow); 173 } 174 175 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, const struct flow_keys *flow) 176 { 177 return ntohs(CTTUPLE(skb, src.u.all)); 178 fallback: 179 return flow_get_proto_src(skb, flow); 180 } 181 182 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, const struct flow_keys *flow) 183 { 184 return ntohs(CTTUPLE(skb, dst.u.all)); 185 fallback: 186 return flow_get_proto_dst(skb, flow); 187 } 188 189 static u32 flow_get_rtclassid(const struct sk_buff *skb) 190 { 191 #ifdef CONFIG_IP_ROUTE_CLASSID 192 if (skb_dst(skb)) 193 return skb_dst(skb)->tclassid; 194 #endif 195 return 0; 196 } 197 198 static u32 flow_get_skuid(const struct sk_buff *skb) 199 { 200 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { 201 kuid_t skuid = skb->sk->sk_socket->file->f_cred->fsuid; 202 return from_kuid(&init_user_ns, skuid); 203 } 204 return 0; 205 } 206 207 static u32 flow_get_skgid(const struct sk_buff *skb) 208 { 209 if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file) { 210 kgid_t skgid = skb->sk->sk_socket->file->f_cred->fsgid; 211 return from_kgid(&init_user_ns, skgid); 212 } 213 return 0; 214 } 215 216 static u32 flow_get_vlan_tag(const struct sk_buff *skb) 217 { 218 u16 uninitialized_var(tag); 219 220 if (vlan_get_tag(skb, &tag) < 0) 221 return 0; 222 return tag & VLAN_VID_MASK; 223 } 224 225 static u32 flow_get_rxhash(struct sk_buff *skb) 226 { 227 return skb_get_hash(skb); 228 } 229 230 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow) 231 { 232 switch (key) { 233 case FLOW_KEY_SRC: 234 return flow_get_src(skb, flow); 235 case FLOW_KEY_DST: 236 return flow_get_dst(skb, flow); 237 case FLOW_KEY_PROTO: 238 return flow_get_proto(skb, flow); 239 case FLOW_KEY_PROTO_SRC: 240 return flow_get_proto_src(skb, flow); 241 case FLOW_KEY_PROTO_DST: 242 return flow_get_proto_dst(skb, flow); 243 case FLOW_KEY_IIF: 244 return flow_get_iif(skb); 245 case FLOW_KEY_PRIORITY: 246 return flow_get_priority(skb); 247 case FLOW_KEY_MARK: 248 return flow_get_mark(skb); 249 case FLOW_KEY_NFCT: 250 return flow_get_nfct(skb); 251 case FLOW_KEY_NFCT_SRC: 252 return flow_get_nfct_src(skb, flow); 253 case FLOW_KEY_NFCT_DST: 254 return flow_get_nfct_dst(skb, flow); 255 case FLOW_KEY_NFCT_PROTO_SRC: 256 return flow_get_nfct_proto_src(skb, flow); 257 case FLOW_KEY_NFCT_PROTO_DST: 258 return flow_get_nfct_proto_dst(skb, flow); 259 case FLOW_KEY_RTCLASSID: 260 return flow_get_rtclassid(skb); 261 case FLOW_KEY_SKUID: 262 return flow_get_skuid(skb); 263 case FLOW_KEY_SKGID: 264 return flow_get_skgid(skb); 265 case FLOW_KEY_VLAN_TAG: 266 return flow_get_vlan_tag(skb); 267 case FLOW_KEY_RXHASH: 268 return flow_get_rxhash(skb); 269 default: 270 WARN_ON(1); 271 return 0; 272 } 273 } 274 275 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \ 276 (1 << FLOW_KEY_DST) | \ 277 (1 << FLOW_KEY_PROTO) | \ 278 (1 << FLOW_KEY_PROTO_SRC) | \ 279 (1 << FLOW_KEY_PROTO_DST) | \ 280 (1 << FLOW_KEY_NFCT_SRC) | \ 281 (1 << FLOW_KEY_NFCT_DST) | \ 282 (1 << FLOW_KEY_NFCT_PROTO_SRC) | \ 283 (1 << FLOW_KEY_NFCT_PROTO_DST)) 284 285 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp, 286 struct tcf_result *res) 287 { 288 struct flow_head *head = rcu_dereference_bh(tp->root); 289 struct flow_filter *f; 290 u32 keymask; 291 u32 classid; 292 unsigned int n, key; 293 int r; 294 295 list_for_each_entry_rcu(f, &head->filters, list) { 296 u32 keys[FLOW_KEY_MAX + 1]; 297 struct flow_keys flow_keys; 298 299 if (!tcf_em_tree_match(skb, &f->ematches, NULL)) 300 continue; 301 302 keymask = f->keymask; 303 if (keymask & FLOW_KEYS_NEEDED) 304 skb_flow_dissect_flow_keys(skb, &flow_keys, 0); 305 306 for (n = 0; n < f->nkeys; n++) { 307 key = ffs(keymask) - 1; 308 keymask &= ~(1 << key); 309 keys[n] = flow_key_get(skb, key, &flow_keys); 310 } 311 312 if (f->mode == FLOW_MODE_HASH) 313 classid = jhash2(keys, f->nkeys, f->hashrnd); 314 else { 315 classid = keys[0]; 316 classid = (classid & f->mask) ^ f->xor; 317 classid = (classid >> f->rshift) + f->addend; 318 } 319 320 if (f->divisor) 321 classid %= f->divisor; 322 323 res->class = 0; 324 res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid); 325 326 r = tcf_exts_exec(skb, &f->exts, res); 327 if (r < 0) 328 continue; 329 return r; 330 } 331 return -1; 332 } 333 334 static void flow_perturbation(unsigned long arg) 335 { 336 struct flow_filter *f = (struct flow_filter *)arg; 337 338 get_random_bytes(&f->hashrnd, 4); 339 if (f->perturb_period) 340 mod_timer(&f->perturb_timer, jiffies + f->perturb_period); 341 } 342 343 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { 344 [TCA_FLOW_KEYS] = { .type = NLA_U32 }, 345 [TCA_FLOW_MODE] = { .type = NLA_U32 }, 346 [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, 347 [TCA_FLOW_RSHIFT] = { .type = NLA_U32 }, 348 [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, 349 [TCA_FLOW_MASK] = { .type = NLA_U32 }, 350 [TCA_FLOW_XOR] = { .type = NLA_U32 }, 351 [TCA_FLOW_DIVISOR] = { .type = NLA_U32 }, 352 [TCA_FLOW_ACT] = { .type = NLA_NESTED }, 353 [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, 354 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, 355 [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, 356 }; 357 358 static void flow_destroy_filter(struct rcu_head *head) 359 { 360 struct flow_filter *f = container_of(head, struct flow_filter, rcu); 361 362 del_timer_sync(&f->perturb_timer); 363 tcf_exts_destroy(&f->exts); 364 tcf_em_tree_destroy(&f->ematches); 365 kfree(f); 366 } 367 368 static int flow_change(struct net *net, struct sk_buff *in_skb, 369 struct tcf_proto *tp, unsigned long base, 370 u32 handle, struct nlattr **tca, 371 unsigned long *arg, bool ovr) 372 { 373 struct flow_head *head = rtnl_dereference(tp->root); 374 struct flow_filter *fold, *fnew; 375 struct nlattr *opt = tca[TCA_OPTIONS]; 376 struct nlattr *tb[TCA_FLOW_MAX + 1]; 377 struct tcf_exts e; 378 struct tcf_ematch_tree t; 379 unsigned int nkeys = 0; 380 unsigned int perturb_period = 0; 381 u32 baseclass = 0; 382 u32 keymask = 0; 383 u32 mode; 384 int err; 385 386 if (opt == NULL) 387 return -EINVAL; 388 389 err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy); 390 if (err < 0) 391 return err; 392 393 if (tb[TCA_FLOW_BASECLASS]) { 394 baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]); 395 if (TC_H_MIN(baseclass) == 0) 396 return -EINVAL; 397 } 398 399 if (tb[TCA_FLOW_KEYS]) { 400 keymask = nla_get_u32(tb[TCA_FLOW_KEYS]); 401 402 nkeys = hweight32(keymask); 403 if (nkeys == 0) 404 return -EINVAL; 405 406 if (fls(keymask) - 1 > FLOW_KEY_MAX) 407 return -EOPNOTSUPP; 408 409 if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) && 410 sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns) 411 return -EOPNOTSUPP; 412 } 413 414 tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); 415 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); 416 if (err < 0) 417 return err; 418 419 err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); 420 if (err < 0) 421 goto err1; 422 423 err = -ENOBUFS; 424 fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); 425 if (!fnew) 426 goto err2; 427 428 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); 429 430 fold = (struct flow_filter *)*arg; 431 if (fold) { 432 err = -EINVAL; 433 if (fold->handle != handle && handle) 434 goto err2; 435 436 /* Copy fold into fnew */ 437 fnew->tp = fold->tp; 438 fnew->handle = fold->handle; 439 fnew->nkeys = fold->nkeys; 440 fnew->keymask = fold->keymask; 441 fnew->mode = fold->mode; 442 fnew->mask = fold->mask; 443 fnew->xor = fold->xor; 444 fnew->rshift = fold->rshift; 445 fnew->addend = fold->addend; 446 fnew->divisor = fold->divisor; 447 fnew->baseclass = fold->baseclass; 448 fnew->hashrnd = fold->hashrnd; 449 450 mode = fold->mode; 451 if (tb[TCA_FLOW_MODE]) 452 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 453 if (mode != FLOW_MODE_HASH && nkeys > 1) 454 goto err2; 455 456 if (mode == FLOW_MODE_HASH) 457 perturb_period = fold->perturb_period; 458 if (tb[TCA_FLOW_PERTURB]) { 459 if (mode != FLOW_MODE_HASH) 460 goto err2; 461 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; 462 } 463 } else { 464 err = -EINVAL; 465 if (!handle) 466 goto err2; 467 if (!tb[TCA_FLOW_KEYS]) 468 goto err2; 469 470 mode = FLOW_MODE_MAP; 471 if (tb[TCA_FLOW_MODE]) 472 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 473 if (mode != FLOW_MODE_HASH && nkeys > 1) 474 goto err2; 475 476 if (tb[TCA_FLOW_PERTURB]) { 477 if (mode != FLOW_MODE_HASH) 478 goto err2; 479 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; 480 } 481 482 if (TC_H_MAJ(baseclass) == 0) 483 baseclass = TC_H_MAKE(tp->q->handle, baseclass); 484 if (TC_H_MIN(baseclass) == 0) 485 baseclass = TC_H_MAKE(baseclass, 1); 486 487 fnew->handle = handle; 488 fnew->mask = ~0U; 489 fnew->tp = tp; 490 get_random_bytes(&fnew->hashrnd, 4); 491 } 492 493 fnew->perturb_timer.function = flow_perturbation; 494 fnew->perturb_timer.data = (unsigned long)fnew; 495 init_timer_deferrable(&fnew->perturb_timer); 496 497 tcf_exts_change(tp, &fnew->exts, &e); 498 tcf_em_tree_change(tp, &fnew->ematches, &t); 499 500 netif_keep_dst(qdisc_dev(tp->q)); 501 502 if (tb[TCA_FLOW_KEYS]) { 503 fnew->keymask = keymask; 504 fnew->nkeys = nkeys; 505 } 506 507 fnew->mode = mode; 508 509 if (tb[TCA_FLOW_MASK]) 510 fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]); 511 if (tb[TCA_FLOW_XOR]) 512 fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]); 513 if (tb[TCA_FLOW_RSHIFT]) 514 fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]); 515 if (tb[TCA_FLOW_ADDEND]) 516 fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]); 517 518 if (tb[TCA_FLOW_DIVISOR]) 519 fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]); 520 if (baseclass) 521 fnew->baseclass = baseclass; 522 523 fnew->perturb_period = perturb_period; 524 if (perturb_period) 525 mod_timer(&fnew->perturb_timer, jiffies + perturb_period); 526 527 if (*arg == 0) 528 list_add_tail_rcu(&fnew->list, &head->filters); 529 else 530 list_replace_rcu(&fold->list, &fnew->list); 531 532 *arg = (unsigned long)fnew; 533 534 if (fold) 535 call_rcu(&fold->rcu, flow_destroy_filter); 536 return 0; 537 538 err2: 539 tcf_em_tree_destroy(&t); 540 kfree(fnew); 541 err1: 542 tcf_exts_destroy(&e); 543 return err; 544 } 545 546 static int flow_delete(struct tcf_proto *tp, unsigned long arg) 547 { 548 struct flow_filter *f = (struct flow_filter *)arg; 549 550 list_del_rcu(&f->list); 551 call_rcu(&f->rcu, flow_destroy_filter); 552 return 0; 553 } 554 555 static int flow_init(struct tcf_proto *tp) 556 { 557 struct flow_head *head; 558 559 head = kzalloc(sizeof(*head), GFP_KERNEL); 560 if (head == NULL) 561 return -ENOBUFS; 562 INIT_LIST_HEAD(&head->filters); 563 rcu_assign_pointer(tp->root, head); 564 return 0; 565 } 566 567 static bool flow_destroy(struct tcf_proto *tp, bool force) 568 { 569 struct flow_head *head = rtnl_dereference(tp->root); 570 struct flow_filter *f, *next; 571 572 if (!force && !list_empty(&head->filters)) 573 return false; 574 575 list_for_each_entry_safe(f, next, &head->filters, list) { 576 list_del_rcu(&f->list); 577 call_rcu(&f->rcu, flow_destroy_filter); 578 } 579 RCU_INIT_POINTER(tp->root, NULL); 580 kfree_rcu(head, rcu); 581 return true; 582 } 583 584 static unsigned long flow_get(struct tcf_proto *tp, u32 handle) 585 { 586 struct flow_head *head = rtnl_dereference(tp->root); 587 struct flow_filter *f; 588 589 list_for_each_entry(f, &head->filters, list) 590 if (f->handle == handle) 591 return (unsigned long)f; 592 return 0; 593 } 594 595 static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, 596 struct sk_buff *skb, struct tcmsg *t) 597 { 598 struct flow_filter *f = (struct flow_filter *)fh; 599 struct nlattr *nest; 600 601 if (f == NULL) 602 return skb->len; 603 604 t->tcm_handle = f->handle; 605 606 nest = nla_nest_start(skb, TCA_OPTIONS); 607 if (nest == NULL) 608 goto nla_put_failure; 609 610 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) || 611 nla_put_u32(skb, TCA_FLOW_MODE, f->mode)) 612 goto nla_put_failure; 613 614 if (f->mask != ~0 || f->xor != 0) { 615 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) || 616 nla_put_u32(skb, TCA_FLOW_XOR, f->xor)) 617 goto nla_put_failure; 618 } 619 if (f->rshift && 620 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift)) 621 goto nla_put_failure; 622 if (f->addend && 623 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend)) 624 goto nla_put_failure; 625 626 if (f->divisor && 627 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor)) 628 goto nla_put_failure; 629 if (f->baseclass && 630 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass)) 631 goto nla_put_failure; 632 633 if (f->perturb_period && 634 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ)) 635 goto nla_put_failure; 636 637 if (tcf_exts_dump(skb, &f->exts) < 0) 638 goto nla_put_failure; 639 #ifdef CONFIG_NET_EMATCH 640 if (f->ematches.hdr.nmatches && 641 tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0) 642 goto nla_put_failure; 643 #endif 644 nla_nest_end(skb, nest); 645 646 if (tcf_exts_dump_stats(skb, &f->exts) < 0) 647 goto nla_put_failure; 648 649 return skb->len; 650 651 nla_put_failure: 652 nla_nest_cancel(skb, nest); 653 return -1; 654 } 655 656 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) 657 { 658 struct flow_head *head = rtnl_dereference(tp->root); 659 struct flow_filter *f; 660 661 list_for_each_entry(f, &head->filters, list) { 662 if (arg->count < arg->skip) 663 goto skip; 664 if (arg->fn(tp, (unsigned long)f, arg) < 0) { 665 arg->stop = 1; 666 break; 667 } 668 skip: 669 arg->count++; 670 } 671 } 672 673 static struct tcf_proto_ops cls_flow_ops __read_mostly = { 674 .kind = "flow", 675 .classify = flow_classify, 676 .init = flow_init, 677 .destroy = flow_destroy, 678 .change = flow_change, 679 .delete = flow_delete, 680 .get = flow_get, 681 .dump = flow_dump, 682 .walk = flow_walk, 683 .owner = THIS_MODULE, 684 }; 685 686 static int __init cls_flow_init(void) 687 { 688 return register_tcf_proto_ops(&cls_flow_ops); 689 } 690 691 static void __exit cls_flow_exit(void) 692 { 693 unregister_tcf_proto_ops(&cls_flow_ops); 694 } 695 696 module_init(cls_flow_init); 697 module_exit(cls_flow_exit); 698 699 MODULE_LICENSE("GPL"); 700 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 701 MODULE_DESCRIPTION("TC flow classifier"); 702