1 /* 2 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB 3 * 4 * Refer to: 5 * draft-ietf-forces-interfelfb-03 6 * and 7 * netdev01 paper: 8 * "Distributing Linux Traffic Control Classifier-Action 9 * Subsystem" 10 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 * 17 * copyright Jamal Hadi Salim (2015) 18 * 19 */ 20 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/errno.h> 25 #include <linux/skbuff.h> 26 #include <linux/rtnetlink.h> 27 #include <linux/module.h> 28 #include <linux/init.h> 29 #include <net/net_namespace.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <uapi/linux/tc_act/tc_ife.h> 33 #include <net/tc_act/tc_ife.h> 34 #include <linux/etherdevice.h> 35 #include <net/ife.h> 36 37 #define IFE_TAB_MASK 15 38 39 static unsigned int ife_net_id; 40 static int max_metacnt = IFE_META_MAX + 1; 41 static struct tc_action_ops act_ife_ops; 42 43 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { 44 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)}, 45 [TCA_IFE_DMAC] = { .len = ETH_ALEN}, 46 [TCA_IFE_SMAC] = { .len = ETH_ALEN}, 47 [TCA_IFE_TYPE] = { .type = NLA_U16}, 48 }; 49 50 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi) 51 { 52 u16 edata = 0; 53 54 if (mi->metaval) 55 edata = *(u16 *)mi->metaval; 56 else if (metaval) 57 edata = metaval; 58 59 if (!edata) /* will not encode */ 60 return 0; 61 62 edata = htons(edata); 63 return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata); 64 } 65 EXPORT_SYMBOL_GPL(ife_encode_meta_u16); 66 67 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi) 68 { 69 if (mi->metaval) 70 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval); 71 else 72 return nla_put(skb, mi->metaid, 0, NULL); 73 } 74 EXPORT_SYMBOL_GPL(ife_get_meta_u32); 75 76 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi) 77 { 78 if (metaval || mi->metaval) 79 return 8; /* T+L+V == 2+2+4 */ 80 81 return 0; 82 } 83 EXPORT_SYMBOL_GPL(ife_check_meta_u32); 84 85 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi) 86 { 87 if (metaval || mi->metaval) 88 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */ 89 90 return 0; 91 } 92 EXPORT_SYMBOL_GPL(ife_check_meta_u16); 93 94 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi) 95 { 96 u32 edata = metaval; 97 98 if (mi->metaval) 99 edata = *(u32 *)mi->metaval; 100 else if (metaval) 101 edata = metaval; 102 103 if (!edata) /* will not encode */ 104 return 0; 105 106 edata = htonl(edata); 107 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata); 108 } 109 EXPORT_SYMBOL_GPL(ife_encode_meta_u32); 110 111 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi) 112 { 113 if (mi->metaval) 114 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval); 115 else 116 return nla_put(skb, mi->metaid, 0, NULL); 117 } 118 EXPORT_SYMBOL_GPL(ife_get_meta_u16); 119 120 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) 121 { 122 mi->metaval = kmemdup(metaval, sizeof(u32), gfp); 123 if (!mi->metaval) 124 return -ENOMEM; 125 126 return 0; 127 } 128 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); 129 130 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp) 131 { 132 mi->metaval = kmemdup(metaval, sizeof(u16), gfp); 133 if (!mi->metaval) 134 return -ENOMEM; 135 136 return 0; 137 } 138 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16); 139 140 void ife_release_meta_gen(struct tcf_meta_info *mi) 141 { 142 kfree(mi->metaval); 143 } 144 EXPORT_SYMBOL_GPL(ife_release_meta_gen); 145 146 int ife_validate_meta_u32(void *val, int len) 147 { 148 if (len == sizeof(u32)) 149 return 0; 150 151 return -EINVAL; 152 } 153 EXPORT_SYMBOL_GPL(ife_validate_meta_u32); 154 155 int ife_validate_meta_u16(void *val, int len) 156 { 157 /* length will not include padding */ 158 if (len == sizeof(u16)) 159 return 0; 160 161 return -EINVAL; 162 } 163 EXPORT_SYMBOL_GPL(ife_validate_meta_u16); 164 165 static LIST_HEAD(ifeoplist); 166 static DEFINE_RWLOCK(ife_mod_lock); 167 168 static struct tcf_meta_ops *find_ife_oplist(u16 metaid) 169 { 170 struct tcf_meta_ops *o; 171 172 read_lock(&ife_mod_lock); 173 list_for_each_entry(o, &ifeoplist, list) { 174 if (o->metaid == metaid) { 175 if (!try_module_get(o->owner)) 176 o = NULL; 177 read_unlock(&ife_mod_lock); 178 return o; 179 } 180 } 181 read_unlock(&ife_mod_lock); 182 183 return NULL; 184 } 185 186 int register_ife_op(struct tcf_meta_ops *mops) 187 { 188 struct tcf_meta_ops *m; 189 190 if (!mops->metaid || !mops->metatype || !mops->name || 191 !mops->check_presence || !mops->encode || !mops->decode || 192 !mops->get || !mops->alloc) 193 return -EINVAL; 194 195 write_lock(&ife_mod_lock); 196 197 list_for_each_entry(m, &ifeoplist, list) { 198 if (m->metaid == mops->metaid || 199 (strcmp(mops->name, m->name) == 0)) { 200 write_unlock(&ife_mod_lock); 201 return -EEXIST; 202 } 203 } 204 205 if (!mops->release) 206 mops->release = ife_release_meta_gen; 207 208 list_add_tail(&mops->list, &ifeoplist); 209 write_unlock(&ife_mod_lock); 210 return 0; 211 } 212 EXPORT_SYMBOL_GPL(unregister_ife_op); 213 214 int unregister_ife_op(struct tcf_meta_ops *mops) 215 { 216 struct tcf_meta_ops *m; 217 int err = -ENOENT; 218 219 write_lock(&ife_mod_lock); 220 list_for_each_entry(m, &ifeoplist, list) { 221 if (m->metaid == mops->metaid) { 222 list_del(&mops->list); 223 err = 0; 224 break; 225 } 226 } 227 write_unlock(&ife_mod_lock); 228 229 return err; 230 } 231 EXPORT_SYMBOL_GPL(register_ife_op); 232 233 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) 234 { 235 int ret = 0; 236 /* XXX: unfortunately cant use nla_policy at this point 237 * because a length of 0 is valid in the case of 238 * "allow". "use" semantics do enforce for proper 239 * length and i couldve use nla_policy but it makes it hard 240 * to use it just for that.. 241 */ 242 if (ops->validate) 243 return ops->validate(val, len); 244 245 if (ops->metatype == NLA_U32) 246 ret = ife_validate_meta_u32(val, len); 247 else if (ops->metatype == NLA_U16) 248 ret = ife_validate_meta_u16(val, len); 249 250 return ret; 251 } 252 253 /* called when adding new meta information 254 * under ife->tcf_lock for existing action 255 */ 256 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, 257 void *val, int len, bool exists) 258 { 259 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 260 int ret = 0; 261 262 if (!ops) { 263 ret = -ENOENT; 264 #ifdef CONFIG_MODULES 265 if (exists) 266 spin_unlock_bh(&ife->tcf_lock); 267 rtnl_unlock(); 268 request_module("ifemeta%u", metaid); 269 rtnl_lock(); 270 if (exists) 271 spin_lock_bh(&ife->tcf_lock); 272 ops = find_ife_oplist(metaid); 273 #endif 274 } 275 276 if (ops) { 277 ret = 0; 278 if (len) 279 ret = ife_validate_metatype(ops, val, len); 280 281 module_put(ops->owner); 282 } 283 284 return ret; 285 } 286 287 /* called when adding new meta information 288 * under ife->tcf_lock for existing action 289 */ 290 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 291 int len, bool atomic) 292 { 293 struct tcf_meta_info *mi = NULL; 294 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 295 int ret = 0; 296 297 if (!ops) 298 return -ENOENT; 299 300 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL); 301 if (!mi) { 302 /*put back what find_ife_oplist took */ 303 module_put(ops->owner); 304 return -ENOMEM; 305 } 306 307 mi->metaid = metaid; 308 mi->ops = ops; 309 if (len > 0) { 310 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL); 311 if (ret != 0) { 312 kfree(mi); 313 module_put(ops->owner); 314 return ret; 315 } 316 } 317 318 list_add_tail(&mi->metalist, &ife->metalist); 319 320 return ret; 321 } 322 323 static int use_all_metadata(struct tcf_ife_info *ife) 324 { 325 struct tcf_meta_ops *o; 326 int rc = 0; 327 int installed = 0; 328 329 read_lock(&ife_mod_lock); 330 list_for_each_entry(o, &ifeoplist, list) { 331 rc = add_metainfo(ife, o->metaid, NULL, 0, true); 332 if (rc == 0) 333 installed += 1; 334 } 335 read_unlock(&ife_mod_lock); 336 337 if (installed) 338 return 0; 339 else 340 return -EINVAL; 341 } 342 343 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife) 344 { 345 struct tcf_meta_info *e; 346 struct nlattr *nest; 347 unsigned char *b = skb_tail_pointer(skb); 348 int total_encoded = 0; 349 350 /*can only happen on decode */ 351 if (list_empty(&ife->metalist)) 352 return 0; 353 354 nest = nla_nest_start(skb, TCA_IFE_METALST); 355 if (!nest) 356 goto out_nlmsg_trim; 357 358 list_for_each_entry(e, &ife->metalist, metalist) { 359 if (!e->ops->get(skb, e)) 360 total_encoded += 1; 361 } 362 363 if (!total_encoded) 364 goto out_nlmsg_trim; 365 366 nla_nest_end(skb, nest); 367 368 return 0; 369 370 out_nlmsg_trim: 371 nlmsg_trim(skb, b); 372 return -1; 373 } 374 375 /* under ife->tcf_lock */ 376 static void _tcf_ife_cleanup(struct tc_action *a, int bind) 377 { 378 struct tcf_ife_info *ife = to_ife(a); 379 struct tcf_meta_info *e, *n; 380 381 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 382 module_put(e->ops->owner); 383 list_del(&e->metalist); 384 if (e->metaval) { 385 if (e->ops->release) 386 e->ops->release(e); 387 else 388 kfree(e->metaval); 389 } 390 kfree(e); 391 } 392 } 393 394 static void tcf_ife_cleanup(struct tc_action *a, int bind) 395 { 396 struct tcf_ife_info *ife = to_ife(a); 397 398 spin_lock_bh(&ife->tcf_lock); 399 _tcf_ife_cleanup(a, bind); 400 spin_unlock_bh(&ife->tcf_lock); 401 } 402 403 /* under ife->tcf_lock for existing action */ 404 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, 405 bool exists) 406 { 407 int len = 0; 408 int rc = 0; 409 int i = 0; 410 void *val; 411 412 for (i = 1; i < max_metacnt; i++) { 413 if (tb[i]) { 414 val = nla_data(tb[i]); 415 len = nla_len(tb[i]); 416 417 rc = load_metaops_and_vet(ife, i, val, len, exists); 418 if (rc != 0) 419 return rc; 420 421 rc = add_metainfo(ife, i, val, len, exists); 422 if (rc) 423 return rc; 424 } 425 } 426 427 return rc; 428 } 429 430 static int tcf_ife_init(struct net *net, struct nlattr *nla, 431 struct nlattr *est, struct tc_action **a, 432 int ovr, int bind) 433 { 434 struct tc_action_net *tn = net_generic(net, ife_net_id); 435 struct nlattr *tb[TCA_IFE_MAX + 1]; 436 struct nlattr *tb2[IFE_META_MAX + 1]; 437 struct tcf_ife_info *ife; 438 struct tc_ife *parm; 439 u16 ife_type = 0; 440 u8 *daddr = NULL; 441 u8 *saddr = NULL; 442 bool exists = false; 443 int ret = 0; 444 int err; 445 446 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); 447 if (err < 0) 448 return err; 449 450 if (!tb[TCA_IFE_PARMS]) 451 return -EINVAL; 452 453 parm = nla_data(tb[TCA_IFE_PARMS]); 454 455 exists = tcf_hash_check(tn, parm->index, a, bind); 456 if (exists && bind) 457 return 0; 458 459 if (parm->flags & IFE_ENCODE) { 460 /* Until we get issued the ethertype, we cant have 461 * a default.. 462 **/ 463 if (!tb[TCA_IFE_TYPE]) { 464 if (exists) 465 tcf_hash_release(*a, bind); 466 pr_info("You MUST pass etherype for encoding\n"); 467 return -EINVAL; 468 } 469 } 470 471 if (!exists) { 472 ret = tcf_hash_create(tn, parm->index, est, a, &act_ife_ops, 473 bind, false); 474 if (ret) 475 return ret; 476 ret = ACT_P_CREATED; 477 } else { 478 tcf_hash_release(*a, bind); 479 if (!ovr) 480 return -EEXIST; 481 } 482 483 ife = to_ife(*a); 484 ife->flags = parm->flags; 485 486 if (parm->flags & IFE_ENCODE) { 487 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]); 488 if (tb[TCA_IFE_DMAC]) 489 daddr = nla_data(tb[TCA_IFE_DMAC]); 490 if (tb[TCA_IFE_SMAC]) 491 saddr = nla_data(tb[TCA_IFE_SMAC]); 492 } 493 494 if (exists) 495 spin_lock_bh(&ife->tcf_lock); 496 ife->tcf_action = parm->action; 497 498 if (parm->flags & IFE_ENCODE) { 499 if (daddr) 500 ether_addr_copy(ife->eth_dst, daddr); 501 else 502 eth_zero_addr(ife->eth_dst); 503 504 if (saddr) 505 ether_addr_copy(ife->eth_src, saddr); 506 else 507 eth_zero_addr(ife->eth_src); 508 509 ife->eth_type = ife_type; 510 } 511 512 if (ret == ACT_P_CREATED) 513 INIT_LIST_HEAD(&ife->metalist); 514 515 if (tb[TCA_IFE_METALST]) { 516 err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], 517 NULL); 518 if (err) { 519 metadata_parse_err: 520 if (exists) 521 tcf_hash_release(*a, bind); 522 if (ret == ACT_P_CREATED) 523 _tcf_ife_cleanup(*a, bind); 524 525 if (exists) 526 spin_unlock_bh(&ife->tcf_lock); 527 return err; 528 } 529 530 err = populate_metalist(ife, tb2, exists); 531 if (err) 532 goto metadata_parse_err; 533 534 } else { 535 /* if no passed metadata allow list or passed allow-all 536 * then here we process by adding as many supported metadatum 537 * as we can. You better have at least one else we are 538 * going to bail out 539 */ 540 err = use_all_metadata(ife); 541 if (err) { 542 if (ret == ACT_P_CREATED) 543 _tcf_ife_cleanup(*a, bind); 544 545 if (exists) 546 spin_unlock_bh(&ife->tcf_lock); 547 return err; 548 } 549 } 550 551 if (exists) 552 spin_unlock_bh(&ife->tcf_lock); 553 554 if (ret == ACT_P_CREATED) 555 tcf_hash_insert(tn, *a); 556 557 return ret; 558 } 559 560 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, 561 int ref) 562 { 563 unsigned char *b = skb_tail_pointer(skb); 564 struct tcf_ife_info *ife = to_ife(a); 565 struct tc_ife opt = { 566 .index = ife->tcf_index, 567 .refcnt = ife->tcf_refcnt - ref, 568 .bindcnt = ife->tcf_bindcnt - bind, 569 .action = ife->tcf_action, 570 .flags = ife->flags, 571 }; 572 struct tcf_t t; 573 574 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) 575 goto nla_put_failure; 576 577 tcf_tm_dump(&t, &ife->tcf_tm); 578 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) 579 goto nla_put_failure; 580 581 if (!is_zero_ether_addr(ife->eth_dst)) { 582 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst)) 583 goto nla_put_failure; 584 } 585 586 if (!is_zero_ether_addr(ife->eth_src)) { 587 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src)) 588 goto nla_put_failure; 589 } 590 591 if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type)) 592 goto nla_put_failure; 593 594 if (dump_metalist(skb, ife)) { 595 /*ignore failure to dump metalist */ 596 pr_info("Failed to dump metalist\n"); 597 } 598 599 return skb->len; 600 601 nla_put_failure: 602 nlmsg_trim(skb, b); 603 return -1; 604 } 605 606 int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, 607 u16 metaid, u16 mlen, void *mdata) 608 { 609 struct tcf_meta_info *e; 610 611 /* XXX: use hash to speed up */ 612 list_for_each_entry(e, &ife->metalist, metalist) { 613 if (metaid == e->metaid) { 614 if (e->ops) { 615 /* We check for decode presence already */ 616 return e->ops->decode(skb, mdata, mlen); 617 } 618 } 619 } 620 621 return 0; 622 } 623 624 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, 625 struct tcf_result *res) 626 { 627 struct tcf_ife_info *ife = to_ife(a); 628 int action = ife->tcf_action; 629 u8 *ifehdr_end; 630 u8 *tlv_data; 631 u16 metalen; 632 633 spin_lock(&ife->tcf_lock); 634 bstats_update(&ife->tcf_bstats, skb); 635 tcf_lastuse_update(&ife->tcf_tm); 636 spin_unlock(&ife->tcf_lock); 637 638 if (skb_at_tc_ingress(skb)) 639 skb_push(skb, skb->dev->hard_header_len); 640 641 tlv_data = ife_decode(skb, &metalen); 642 if (unlikely(!tlv_data)) { 643 spin_lock(&ife->tcf_lock); 644 ife->tcf_qstats.drops++; 645 spin_unlock(&ife->tcf_lock); 646 return TC_ACT_SHOT; 647 } 648 649 ifehdr_end = tlv_data + metalen; 650 for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) { 651 u8 *curr_data; 652 u16 mtype; 653 u16 dlen; 654 655 curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); 656 657 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { 658 /* abuse overlimits to count when we receive metadata 659 * but dont have an ops for it 660 */ 661 pr_info_ratelimited("Unknown metaid %d dlen %d\n", 662 mtype, dlen); 663 ife->tcf_qstats.overlimits++; 664 } 665 } 666 667 if (WARN_ON(tlv_data != ifehdr_end)) { 668 spin_lock(&ife->tcf_lock); 669 ife->tcf_qstats.drops++; 670 spin_unlock(&ife->tcf_lock); 671 return TC_ACT_SHOT; 672 } 673 674 skb->protocol = eth_type_trans(skb, skb->dev); 675 skb_reset_network_header(skb); 676 677 return action; 678 } 679 680 /*XXX: check if we can do this at install time instead of current 681 * send data path 682 **/ 683 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife) 684 { 685 struct tcf_meta_info *e, *n; 686 int tot_run_sz = 0, run_sz = 0; 687 688 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 689 if (e->ops->check_presence) { 690 run_sz = e->ops->check_presence(skb, e); 691 tot_run_sz += run_sz; 692 } 693 } 694 695 return tot_run_sz; 696 } 697 698 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, 699 struct tcf_result *res) 700 { 701 struct tcf_ife_info *ife = to_ife(a); 702 int action = ife->tcf_action; 703 struct ethhdr *oethh; /* outer ether header */ 704 struct tcf_meta_info *e; 705 /* 706 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA 707 where ORIGDATA = original ethernet header ... 708 */ 709 u16 metalen = ife_get_sz(skb, ife); 710 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; 711 unsigned int skboff = 0; 712 int new_len = skb->len + hdrm; 713 bool exceed_mtu = false; 714 void *ife_meta; 715 int err = 0; 716 717 if (!skb_at_tc_ingress(skb)) { 718 if (new_len > skb->dev->mtu) 719 exceed_mtu = true; 720 } 721 722 spin_lock(&ife->tcf_lock); 723 bstats_update(&ife->tcf_bstats, skb); 724 tcf_lastuse_update(&ife->tcf_tm); 725 726 if (!metalen) { /* no metadata to send */ 727 /* abuse overlimits to count when we allow packet 728 * with no metadata 729 */ 730 ife->tcf_qstats.overlimits++; 731 spin_unlock(&ife->tcf_lock); 732 return action; 733 } 734 /* could be stupid policy setup or mtu config 735 * so lets be conservative.. */ 736 if ((action == TC_ACT_SHOT) || exceed_mtu) { 737 ife->tcf_qstats.drops++; 738 spin_unlock(&ife->tcf_lock); 739 return TC_ACT_SHOT; 740 } 741 742 if (skb_at_tc_ingress(skb)) 743 skb_push(skb, skb->dev->hard_header_len); 744 745 ife_meta = ife_encode(skb, metalen); 746 747 /* XXX: we dont have a clever way of telling encode to 748 * not repeat some of the computations that are done by 749 * ops->presence_check... 750 */ 751 list_for_each_entry(e, &ife->metalist, metalist) { 752 if (e->ops->encode) { 753 err = e->ops->encode(skb, (void *)(ife_meta + skboff), 754 e); 755 } 756 if (err < 0) { 757 /* too corrupt to keep around if overwritten */ 758 ife->tcf_qstats.drops++; 759 spin_unlock(&ife->tcf_lock); 760 return TC_ACT_SHOT; 761 } 762 skboff += err; 763 } 764 oethh = (struct ethhdr *)skb->data; 765 766 if (!is_zero_ether_addr(ife->eth_src)) 767 ether_addr_copy(oethh->h_source, ife->eth_src); 768 if (!is_zero_ether_addr(ife->eth_dst)) 769 ether_addr_copy(oethh->h_dest, ife->eth_dst); 770 oethh->h_proto = htons(ife->eth_type); 771 772 if (skb_at_tc_ingress(skb)) 773 skb_pull(skb, skb->dev->hard_header_len); 774 775 spin_unlock(&ife->tcf_lock); 776 777 return action; 778 } 779 780 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, 781 struct tcf_result *res) 782 { 783 struct tcf_ife_info *ife = to_ife(a); 784 785 if (ife->flags & IFE_ENCODE) 786 return tcf_ife_encode(skb, a, res); 787 788 if (!(ife->flags & IFE_ENCODE)) 789 return tcf_ife_decode(skb, a, res); 790 791 pr_info_ratelimited("unknown failure(policy neither de/encode\n"); 792 spin_lock(&ife->tcf_lock); 793 bstats_update(&ife->tcf_bstats, skb); 794 tcf_lastuse_update(&ife->tcf_tm); 795 ife->tcf_qstats.drops++; 796 spin_unlock(&ife->tcf_lock); 797 798 return TC_ACT_SHOT; 799 } 800 801 static int tcf_ife_walker(struct net *net, struct sk_buff *skb, 802 struct netlink_callback *cb, int type, 803 const struct tc_action_ops *ops) 804 { 805 struct tc_action_net *tn = net_generic(net, ife_net_id); 806 807 return tcf_generic_walker(tn, skb, cb, type, ops); 808 } 809 810 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index) 811 { 812 struct tc_action_net *tn = net_generic(net, ife_net_id); 813 814 return tcf_hash_search(tn, a, index); 815 } 816 817 static struct tc_action_ops act_ife_ops = { 818 .kind = "ife", 819 .type = TCA_ACT_IFE, 820 .owner = THIS_MODULE, 821 .act = tcf_ife_act, 822 .dump = tcf_ife_dump, 823 .cleanup = tcf_ife_cleanup, 824 .init = tcf_ife_init, 825 .walk = tcf_ife_walker, 826 .lookup = tcf_ife_search, 827 .size = sizeof(struct tcf_ife_info), 828 }; 829 830 static __net_init int ife_init_net(struct net *net) 831 { 832 struct tc_action_net *tn = net_generic(net, ife_net_id); 833 834 return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK); 835 } 836 837 static void __net_exit ife_exit_net(struct net *net) 838 { 839 struct tc_action_net *tn = net_generic(net, ife_net_id); 840 841 tc_action_net_exit(tn); 842 } 843 844 static struct pernet_operations ife_net_ops = { 845 .init = ife_init_net, 846 .exit = ife_exit_net, 847 .id = &ife_net_id, 848 .size = sizeof(struct tc_action_net), 849 }; 850 851 static int __init ife_init_module(void) 852 { 853 return tcf_register_action(&act_ife_ops, &ife_net_ops); 854 } 855 856 static void __exit ife_cleanup_module(void) 857 { 858 tcf_unregister_action(&act_ife_ops, &ife_net_ops); 859 } 860 861 module_init(ife_init_module); 862 module_exit(ife_cleanup_module); 863 864 MODULE_AUTHOR("Jamal Hadi Salim(2015)"); 865 MODULE_DESCRIPTION("Inter-FE LFB action"); 866 MODULE_LICENSE("GPL"); 867