1 /* 2 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB 3 * 4 * Refer to: 5 * draft-ietf-forces-interfelfb-03 6 * and 7 * netdev01 paper: 8 * "Distributing Linux Traffic Control Classifier-Action 9 * Subsystem" 10 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai 11 * 12 * This program is free software; you can redistribute it and/or 13 * modify it under the terms of the GNU General Public License 14 * as published by the Free Software Foundation; either version 15 * 2 of the License, or (at your option) any later version. 16 * 17 * copyright Jamal Hadi Salim (2015) 18 * 19 */ 20 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/errno.h> 25 #include <linux/skbuff.h> 26 #include <linux/rtnetlink.h> 27 #include <linux/module.h> 28 #include <linux/init.h> 29 #include <net/net_namespace.h> 30 #include <net/netlink.h> 31 #include <net/pkt_sched.h> 32 #include <uapi/linux/tc_act/tc_ife.h> 33 #include <net/tc_act/tc_ife.h> 34 #include <linux/etherdevice.h> 35 36 #define IFE_TAB_MASK 15 37 38 static int ife_net_id; 39 static int max_metacnt = IFE_META_MAX + 1; 40 41 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = { 42 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)}, 43 [TCA_IFE_DMAC] = { .len = ETH_ALEN}, 44 [TCA_IFE_SMAC] = { .len = ETH_ALEN}, 45 [TCA_IFE_TYPE] = { .type = NLA_U16}, 46 }; 47 48 /* Caller takes care of presenting data in network order 49 */ 50 int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval) 51 { 52 u32 *tlv = (u32 *)(skbdata); 53 u16 totlen = nla_total_size(dlen); /*alignment + hdr */ 54 char *dptr = (char *)tlv + NLA_HDRLEN; 55 u32 htlv = attrtype << 16 | totlen; 56 57 *tlv = htonl(htlv); 58 memset(dptr, 0, totlen - NLA_HDRLEN); 59 memcpy(dptr, dval, dlen); 60 61 return totlen; 62 } 63 EXPORT_SYMBOL_GPL(ife_tlv_meta_encode); 64 65 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi) 66 { 67 if (mi->metaval) 68 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval); 69 else 70 return nla_put(skb, mi->metaid, 0, NULL); 71 } 72 EXPORT_SYMBOL_GPL(ife_get_meta_u32); 73 74 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi) 75 { 76 if (metaval || mi->metaval) 77 return 8; /* T+L+V == 2+2+4 */ 78 79 return 0; 80 } 81 EXPORT_SYMBOL_GPL(ife_check_meta_u32); 82 83 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi) 84 { 85 u32 edata = metaval; 86 87 if (mi->metaval) 88 edata = *(u32 *)mi->metaval; 89 else if (metaval) 90 edata = metaval; 91 92 if (!edata) /* will not encode */ 93 return 0; 94 95 edata = htonl(edata); 96 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata); 97 } 98 EXPORT_SYMBOL_GPL(ife_encode_meta_u32); 99 100 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi) 101 { 102 if (mi->metaval) 103 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval); 104 else 105 return nla_put(skb, mi->metaid, 0, NULL); 106 } 107 EXPORT_SYMBOL_GPL(ife_get_meta_u16); 108 109 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval) 110 { 111 mi->metaval = kmemdup(metaval, sizeof(u32), GFP_KERNEL); 112 if (!mi->metaval) 113 return -ENOMEM; 114 115 return 0; 116 } 117 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32); 118 119 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval) 120 { 121 mi->metaval = kmemdup(metaval, sizeof(u16), GFP_KERNEL); 122 if (!mi->metaval) 123 return -ENOMEM; 124 125 return 0; 126 } 127 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16); 128 129 void ife_release_meta_gen(struct tcf_meta_info *mi) 130 { 131 kfree(mi->metaval); 132 } 133 EXPORT_SYMBOL_GPL(ife_release_meta_gen); 134 135 int ife_validate_meta_u32(void *val, int len) 136 { 137 if (len == 4) 138 return 0; 139 140 return -EINVAL; 141 } 142 EXPORT_SYMBOL_GPL(ife_validate_meta_u32); 143 144 int ife_validate_meta_u16(void *val, int len) 145 { 146 /* length will include padding */ 147 if (len == NLA_ALIGN(2)) 148 return 0; 149 150 return -EINVAL; 151 } 152 EXPORT_SYMBOL_GPL(ife_validate_meta_u16); 153 154 static LIST_HEAD(ifeoplist); 155 static DEFINE_RWLOCK(ife_mod_lock); 156 157 static struct tcf_meta_ops *find_ife_oplist(u16 metaid) 158 { 159 struct tcf_meta_ops *o; 160 161 read_lock(&ife_mod_lock); 162 list_for_each_entry(o, &ifeoplist, list) { 163 if (o->metaid == metaid) { 164 if (!try_module_get(o->owner)) 165 o = NULL; 166 read_unlock(&ife_mod_lock); 167 return o; 168 } 169 } 170 read_unlock(&ife_mod_lock); 171 172 return NULL; 173 } 174 175 int register_ife_op(struct tcf_meta_ops *mops) 176 { 177 struct tcf_meta_ops *m; 178 179 if (!mops->metaid || !mops->metatype || !mops->name || 180 !mops->check_presence || !mops->encode || !mops->decode || 181 !mops->get || !mops->alloc) 182 return -EINVAL; 183 184 write_lock(&ife_mod_lock); 185 186 list_for_each_entry(m, &ifeoplist, list) { 187 if (m->metaid == mops->metaid || 188 (strcmp(mops->name, m->name) == 0)) { 189 write_unlock(&ife_mod_lock); 190 return -EEXIST; 191 } 192 } 193 194 if (!mops->release) 195 mops->release = ife_release_meta_gen; 196 197 list_add_tail(&mops->list, &ifeoplist); 198 write_unlock(&ife_mod_lock); 199 return 0; 200 } 201 EXPORT_SYMBOL_GPL(unregister_ife_op); 202 203 int unregister_ife_op(struct tcf_meta_ops *mops) 204 { 205 struct tcf_meta_ops *m; 206 int err = -ENOENT; 207 208 write_lock(&ife_mod_lock); 209 list_for_each_entry(m, &ifeoplist, list) { 210 if (m->metaid == mops->metaid) { 211 list_del(&mops->list); 212 err = 0; 213 break; 214 } 215 } 216 write_unlock(&ife_mod_lock); 217 218 return err; 219 } 220 EXPORT_SYMBOL_GPL(register_ife_op); 221 222 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len) 223 { 224 int ret = 0; 225 /* XXX: unfortunately cant use nla_policy at this point 226 * because a length of 0 is valid in the case of 227 * "allow". "use" semantics do enforce for proper 228 * length and i couldve use nla_policy but it makes it hard 229 * to use it just for that.. 230 */ 231 if (ops->validate) 232 return ops->validate(val, len); 233 234 if (ops->metatype == NLA_U32) 235 ret = ife_validate_meta_u32(val, len); 236 else if (ops->metatype == NLA_U16) 237 ret = ife_validate_meta_u16(val, len); 238 239 return ret; 240 } 241 242 /* called when adding new meta information 243 * under ife->tcf_lock 244 */ 245 static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid, 246 void *val, int len) 247 { 248 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 249 int ret = 0; 250 251 if (!ops) { 252 ret = -ENOENT; 253 #ifdef CONFIG_MODULES 254 spin_unlock_bh(&ife->tcf_lock); 255 rtnl_unlock(); 256 request_module("ifemeta%u", metaid); 257 rtnl_lock(); 258 spin_lock_bh(&ife->tcf_lock); 259 ops = find_ife_oplist(metaid); 260 #endif 261 } 262 263 if (ops) { 264 ret = 0; 265 if (len) 266 ret = ife_validate_metatype(ops, val, len); 267 268 module_put(ops->owner); 269 } 270 271 return ret; 272 } 273 274 /* called when adding new meta information 275 * under ife->tcf_lock 276 */ 277 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval, 278 int len) 279 { 280 struct tcf_meta_info *mi = NULL; 281 struct tcf_meta_ops *ops = find_ife_oplist(metaid); 282 int ret = 0; 283 284 if (!ops) 285 return -ENOENT; 286 287 mi = kzalloc(sizeof(*mi), GFP_KERNEL); 288 if (!mi) { 289 /*put back what find_ife_oplist took */ 290 module_put(ops->owner); 291 return -ENOMEM; 292 } 293 294 mi->metaid = metaid; 295 mi->ops = ops; 296 if (len > 0) { 297 ret = ops->alloc(mi, metaval); 298 if (ret != 0) { 299 kfree(mi); 300 module_put(ops->owner); 301 return ret; 302 } 303 } 304 305 list_add_tail(&mi->metalist, &ife->metalist); 306 307 return ret; 308 } 309 310 static int use_all_metadata(struct tcf_ife_info *ife) 311 { 312 struct tcf_meta_ops *o; 313 int rc = 0; 314 int installed = 0; 315 316 list_for_each_entry(o, &ifeoplist, list) { 317 rc = add_metainfo(ife, o->metaid, NULL, 0); 318 if (rc == 0) 319 installed += 1; 320 } 321 322 if (installed) 323 return 0; 324 else 325 return -EINVAL; 326 } 327 328 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife) 329 { 330 struct tcf_meta_info *e; 331 struct nlattr *nest; 332 unsigned char *b = skb_tail_pointer(skb); 333 int total_encoded = 0; 334 335 /*can only happen on decode */ 336 if (list_empty(&ife->metalist)) 337 return 0; 338 339 nest = nla_nest_start(skb, TCA_IFE_METALST); 340 if (!nest) 341 goto out_nlmsg_trim; 342 343 list_for_each_entry(e, &ife->metalist, metalist) { 344 if (!e->ops->get(skb, e)) 345 total_encoded += 1; 346 } 347 348 if (!total_encoded) 349 goto out_nlmsg_trim; 350 351 nla_nest_end(skb, nest); 352 353 return 0; 354 355 out_nlmsg_trim: 356 nlmsg_trim(skb, b); 357 return -1; 358 } 359 360 /* under ife->tcf_lock */ 361 static void _tcf_ife_cleanup(struct tc_action *a, int bind) 362 { 363 struct tcf_ife_info *ife = a->priv; 364 struct tcf_meta_info *e, *n; 365 366 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 367 module_put(e->ops->owner); 368 list_del(&e->metalist); 369 if (e->metaval) { 370 if (e->ops->release) 371 e->ops->release(e); 372 else 373 kfree(e->metaval); 374 } 375 kfree(e); 376 } 377 } 378 379 static void tcf_ife_cleanup(struct tc_action *a, int bind) 380 { 381 struct tcf_ife_info *ife = a->priv; 382 383 spin_lock_bh(&ife->tcf_lock); 384 _tcf_ife_cleanup(a, bind); 385 spin_unlock_bh(&ife->tcf_lock); 386 } 387 388 /* under ife->tcf_lock */ 389 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb) 390 { 391 int len = 0; 392 int rc = 0; 393 int i = 0; 394 void *val; 395 396 for (i = 1; i < max_metacnt; i++) { 397 if (tb[i]) { 398 val = nla_data(tb[i]); 399 len = nla_len(tb[i]); 400 401 rc = load_metaops_and_vet(ife, i, val, len); 402 if (rc != 0) 403 return rc; 404 405 rc = add_metainfo(ife, i, val, len); 406 if (rc) 407 return rc; 408 } 409 } 410 411 return rc; 412 } 413 414 static int tcf_ife_init(struct net *net, struct nlattr *nla, 415 struct nlattr *est, struct tc_action *a, 416 int ovr, int bind) 417 { 418 struct tc_action_net *tn = net_generic(net, ife_net_id); 419 struct nlattr *tb[TCA_IFE_MAX + 1]; 420 struct nlattr *tb2[IFE_META_MAX + 1]; 421 struct tcf_ife_info *ife; 422 struct tc_ife *parm; 423 u16 ife_type = 0; 424 u8 *daddr = NULL; 425 u8 *saddr = NULL; 426 int ret = 0, exists = 0; 427 int err; 428 429 err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy); 430 if (err < 0) 431 return err; 432 433 if (!tb[TCA_IFE_PARMS]) 434 return -EINVAL; 435 436 parm = nla_data(tb[TCA_IFE_PARMS]); 437 438 exists = tcf_hash_check(tn, parm->index, a, bind); 439 if (exists && bind) 440 return 0; 441 442 if (parm->flags & IFE_ENCODE) { 443 /* Until we get issued the ethertype, we cant have 444 * a default.. 445 **/ 446 if (!tb[TCA_IFE_TYPE]) { 447 if (exists) 448 tcf_hash_release(a, bind); 449 pr_info("You MUST pass etherype for encoding\n"); 450 return -EINVAL; 451 } 452 } 453 454 if (!exists) { 455 ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife), 456 bind, false); 457 if (ret) 458 return ret; 459 ret = ACT_P_CREATED; 460 } else { 461 tcf_hash_release(a, bind); 462 if (!ovr) 463 return -EEXIST; 464 } 465 466 ife = to_ife(a); 467 ife->flags = parm->flags; 468 469 if (parm->flags & IFE_ENCODE) { 470 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]); 471 if (tb[TCA_IFE_DMAC]) 472 daddr = nla_data(tb[TCA_IFE_DMAC]); 473 if (tb[TCA_IFE_SMAC]) 474 saddr = nla_data(tb[TCA_IFE_SMAC]); 475 } 476 477 spin_lock_bh(&ife->tcf_lock); 478 ife->tcf_action = parm->action; 479 480 if (parm->flags & IFE_ENCODE) { 481 if (daddr) 482 ether_addr_copy(ife->eth_dst, daddr); 483 else 484 eth_zero_addr(ife->eth_dst); 485 486 if (saddr) 487 ether_addr_copy(ife->eth_src, saddr); 488 else 489 eth_zero_addr(ife->eth_src); 490 491 ife->eth_type = ife_type; 492 } 493 494 if (ret == ACT_P_CREATED) 495 INIT_LIST_HEAD(&ife->metalist); 496 497 if (tb[TCA_IFE_METALST]) { 498 err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST], 499 NULL); 500 if (err) { 501 metadata_parse_err: 502 if (exists) 503 tcf_hash_release(a, bind); 504 if (ret == ACT_P_CREATED) 505 _tcf_ife_cleanup(a, bind); 506 507 spin_unlock_bh(&ife->tcf_lock); 508 return err; 509 } 510 511 err = populate_metalist(ife, tb2); 512 if (err) 513 goto metadata_parse_err; 514 515 } else { 516 /* if no passed metadata allow list or passed allow-all 517 * then here we process by adding as many supported metadatum 518 * as we can. You better have at least one else we are 519 * going to bail out 520 */ 521 err = use_all_metadata(ife); 522 if (err) { 523 if (ret == ACT_P_CREATED) 524 _tcf_ife_cleanup(a, bind); 525 526 spin_unlock_bh(&ife->tcf_lock); 527 return err; 528 } 529 } 530 531 spin_unlock_bh(&ife->tcf_lock); 532 533 if (ret == ACT_P_CREATED) 534 tcf_hash_insert(tn, a); 535 536 return ret; 537 } 538 539 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind, 540 int ref) 541 { 542 unsigned char *b = skb_tail_pointer(skb); 543 struct tcf_ife_info *ife = a->priv; 544 struct tc_ife opt = { 545 .index = ife->tcf_index, 546 .refcnt = ife->tcf_refcnt - ref, 547 .bindcnt = ife->tcf_bindcnt - bind, 548 .action = ife->tcf_action, 549 .flags = ife->flags, 550 }; 551 struct tcf_t t; 552 553 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt)) 554 goto nla_put_failure; 555 556 t.install = jiffies_to_clock_t(jiffies - ife->tcf_tm.install); 557 t.lastuse = jiffies_to_clock_t(jiffies - ife->tcf_tm.lastuse); 558 t.expires = jiffies_to_clock_t(ife->tcf_tm.expires); 559 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD)) 560 goto nla_put_failure; 561 562 if (!is_zero_ether_addr(ife->eth_dst)) { 563 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, ife->eth_dst)) 564 goto nla_put_failure; 565 } 566 567 if (!is_zero_ether_addr(ife->eth_src)) { 568 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, ife->eth_src)) 569 goto nla_put_failure; 570 } 571 572 if (nla_put(skb, TCA_IFE_TYPE, 2, &ife->eth_type)) 573 goto nla_put_failure; 574 575 if (dump_metalist(skb, ife)) { 576 /*ignore failure to dump metalist */ 577 pr_info("Failed to dump metalist\n"); 578 } 579 580 return skb->len; 581 582 nla_put_failure: 583 nlmsg_trim(skb, b); 584 return -1; 585 } 586 587 int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, 588 u16 metaid, u16 mlen, void *mdata) 589 { 590 struct tcf_meta_info *e; 591 592 /* XXX: use hash to speed up */ 593 list_for_each_entry(e, &ife->metalist, metalist) { 594 if (metaid == e->metaid) { 595 if (e->ops) { 596 /* We check for decode presence already */ 597 return e->ops->decode(skb, mdata, mlen); 598 } 599 } 600 } 601 602 return 0; 603 } 604 605 struct ifeheadr { 606 __be16 metalen; 607 u8 tlv_data[]; 608 }; 609 610 struct meta_tlvhdr { 611 __be16 type; 612 __be16 len; 613 }; 614 615 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, 616 struct tcf_result *res) 617 { 618 struct tcf_ife_info *ife = a->priv; 619 int action = ife->tcf_action; 620 struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data; 621 u16 ifehdrln = ifehdr->metalen; 622 struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data); 623 624 spin_lock(&ife->tcf_lock); 625 bstats_update(&ife->tcf_bstats, skb); 626 ife->tcf_tm.lastuse = jiffies; 627 spin_unlock(&ife->tcf_lock); 628 629 ifehdrln = ntohs(ifehdrln); 630 if (unlikely(!pskb_may_pull(skb, ifehdrln))) { 631 spin_lock(&ife->tcf_lock); 632 ife->tcf_qstats.drops++; 633 spin_unlock(&ife->tcf_lock); 634 return TC_ACT_SHOT; 635 } 636 637 skb_set_mac_header(skb, ifehdrln); 638 __skb_pull(skb, ifehdrln); 639 skb->protocol = eth_type_trans(skb, skb->dev); 640 ifehdrln -= IFE_METAHDRLEN; 641 642 while (ifehdrln > 0) { 643 u8 *tlvdata = (u8 *)tlv; 644 u16 mtype = tlv->type; 645 u16 mlen = tlv->len; 646 647 mtype = ntohs(mtype); 648 mlen = ntohs(mlen); 649 650 if (find_decode_metaid(skb, ife, mtype, (mlen - 4), 651 (void *)(tlvdata + 4))) { 652 /* abuse overlimits to count when we receive metadata 653 * but dont have an ops for it 654 */ 655 pr_info_ratelimited("Unknown metaid %d alnlen %d\n", 656 mtype, mlen); 657 ife->tcf_qstats.overlimits++; 658 } 659 660 tlvdata += mlen; 661 ifehdrln -= mlen; 662 tlv = (struct meta_tlvhdr *)tlvdata; 663 } 664 665 skb_reset_network_header(skb); 666 return action; 667 } 668 669 /*XXX: check if we can do this at install time instead of current 670 * send data path 671 **/ 672 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife) 673 { 674 struct tcf_meta_info *e, *n; 675 int tot_run_sz = 0, run_sz = 0; 676 677 list_for_each_entry_safe(e, n, &ife->metalist, metalist) { 678 if (e->ops->check_presence) { 679 run_sz = e->ops->check_presence(skb, e); 680 tot_run_sz += run_sz; 681 } 682 } 683 684 return tot_run_sz; 685 } 686 687 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a, 688 struct tcf_result *res) 689 { 690 struct tcf_ife_info *ife = a->priv; 691 int action = ife->tcf_action; 692 struct ethhdr *oethh; /* outer ether header */ 693 struct ethhdr *iethh; /* inner eth header */ 694 struct tcf_meta_info *e; 695 /* 696 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA 697 where ORIGDATA = original ethernet header ... 698 */ 699 u16 metalen = ife_get_sz(skb, ife); 700 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN; 701 unsigned int skboff = skb->dev->hard_header_len; 702 u32 at = G_TC_AT(skb->tc_verd); 703 int new_len = skb->len + hdrm; 704 bool exceed_mtu = false; 705 int err; 706 707 if (at & AT_EGRESS) { 708 if (new_len > skb->dev->mtu) 709 exceed_mtu = true; 710 } 711 712 spin_lock(&ife->tcf_lock); 713 bstats_update(&ife->tcf_bstats, skb); 714 ife->tcf_tm.lastuse = jiffies; 715 716 if (!metalen) { /* no metadata to send */ 717 /* abuse overlimits to count when we allow packet 718 * with no metadata 719 */ 720 ife->tcf_qstats.overlimits++; 721 spin_unlock(&ife->tcf_lock); 722 return action; 723 } 724 /* could be stupid policy setup or mtu config 725 * so lets be conservative.. */ 726 if ((action == TC_ACT_SHOT) || exceed_mtu) { 727 ife->tcf_qstats.drops++; 728 spin_unlock(&ife->tcf_lock); 729 return TC_ACT_SHOT; 730 } 731 732 iethh = eth_hdr(skb); 733 734 err = skb_cow_head(skb, hdrm); 735 if (unlikely(err)) { 736 ife->tcf_qstats.drops++; 737 spin_unlock(&ife->tcf_lock); 738 return TC_ACT_SHOT; 739 } 740 741 if (!(at & AT_EGRESS)) 742 skb_push(skb, skb->dev->hard_header_len); 743 744 __skb_push(skb, hdrm); 745 memcpy(skb->data, iethh, skb->mac_len); 746 skb_reset_mac_header(skb); 747 oethh = eth_hdr(skb); 748 749 /*total metadata length */ 750 metalen += IFE_METAHDRLEN; 751 metalen = htons(metalen); 752 memcpy((skb->data + skboff), &metalen, IFE_METAHDRLEN); 753 skboff += IFE_METAHDRLEN; 754 755 /* XXX: we dont have a clever way of telling encode to 756 * not repeat some of the computations that are done by 757 * ops->presence_check... 758 */ 759 list_for_each_entry(e, &ife->metalist, metalist) { 760 if (e->ops->encode) { 761 err = e->ops->encode(skb, (void *)(skb->data + skboff), 762 e); 763 } 764 if (err < 0) { 765 /* too corrupt to keep around if overwritten */ 766 ife->tcf_qstats.drops++; 767 spin_unlock(&ife->tcf_lock); 768 return TC_ACT_SHOT; 769 } 770 skboff += err; 771 } 772 773 if (!is_zero_ether_addr(ife->eth_src)) 774 ether_addr_copy(oethh->h_source, ife->eth_src); 775 else 776 ether_addr_copy(oethh->h_source, iethh->h_source); 777 if (!is_zero_ether_addr(ife->eth_dst)) 778 ether_addr_copy(oethh->h_dest, ife->eth_dst); 779 else 780 ether_addr_copy(oethh->h_dest, iethh->h_dest); 781 oethh->h_proto = htons(ife->eth_type); 782 783 if (!(at & AT_EGRESS)) 784 skb_pull(skb, skb->dev->hard_header_len); 785 786 spin_unlock(&ife->tcf_lock); 787 788 return action; 789 } 790 791 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a, 792 struct tcf_result *res) 793 { 794 struct tcf_ife_info *ife = a->priv; 795 796 if (ife->flags & IFE_ENCODE) 797 return tcf_ife_encode(skb, a, res); 798 799 if (!(ife->flags & IFE_ENCODE)) 800 return tcf_ife_decode(skb, a, res); 801 802 pr_info_ratelimited("unknown failure(policy neither de/encode\n"); 803 spin_lock(&ife->tcf_lock); 804 bstats_update(&ife->tcf_bstats, skb); 805 ife->tcf_tm.lastuse = jiffies; 806 ife->tcf_qstats.drops++; 807 spin_unlock(&ife->tcf_lock); 808 809 return TC_ACT_SHOT; 810 } 811 812 static int tcf_ife_walker(struct net *net, struct sk_buff *skb, 813 struct netlink_callback *cb, int type, 814 struct tc_action *a) 815 { 816 struct tc_action_net *tn = net_generic(net, ife_net_id); 817 818 return tcf_generic_walker(tn, skb, cb, type, a); 819 } 820 821 static int tcf_ife_search(struct net *net, struct tc_action *a, u32 index) 822 { 823 struct tc_action_net *tn = net_generic(net, ife_net_id); 824 825 return tcf_hash_search(tn, a, index); 826 } 827 828 static struct tc_action_ops act_ife_ops = { 829 .kind = "ife", 830 .type = TCA_ACT_IFE, 831 .owner = THIS_MODULE, 832 .act = tcf_ife_act, 833 .dump = tcf_ife_dump, 834 .cleanup = tcf_ife_cleanup, 835 .init = tcf_ife_init, 836 .walk = tcf_ife_walker, 837 .lookup = tcf_ife_search, 838 }; 839 840 static __net_init int ife_init_net(struct net *net) 841 { 842 struct tc_action_net *tn = net_generic(net, ife_net_id); 843 844 return tc_action_net_init(tn, &act_ife_ops, IFE_TAB_MASK); 845 } 846 847 static void __net_exit ife_exit_net(struct net *net) 848 { 849 struct tc_action_net *tn = net_generic(net, ife_net_id); 850 851 tc_action_net_exit(tn); 852 } 853 854 static struct pernet_operations ife_net_ops = { 855 .init = ife_init_net, 856 .exit = ife_exit_net, 857 .id = &ife_net_id, 858 .size = sizeof(struct tc_action_net), 859 }; 860 861 static int __init ife_init_module(void) 862 { 863 return tcf_register_action(&act_ife_ops, &ife_net_ops); 864 } 865 866 static void __exit ife_cleanup_module(void) 867 { 868 tcf_unregister_action(&act_ife_ops, &ife_net_ops); 869 } 870 871 module_init(ife_init_module); 872 module_exit(ife_cleanup_module); 873 874 MODULE_AUTHOR("Jamal Hadi Salim(2015)"); 875 MODULE_DESCRIPTION("Inter-FE LFB action"); 876 MODULE_LICENSE("GPL"); 877