1 /* 2 * Checksum updating actions 3 * 4 * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License as published by the Free 8 * Software Foundation; either version 2 of the License, or (at your option) 9 * any later version. 10 * 11 */ 12 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/spinlock.h> 18 19 #include <linux/netlink.h> 20 #include <net/netlink.h> 21 #include <linux/rtnetlink.h> 22 23 #include <linux/skbuff.h> 24 25 #include <net/ip.h> 26 #include <net/ipv6.h> 27 #include <net/icmp.h> 28 #include <linux/icmpv6.h> 29 #include <linux/igmp.h> 30 #include <net/tcp.h> 31 #include <net/udp.h> 32 #include <net/ip6_checksum.h> 33 34 #include <net/act_api.h> 35 36 #include <linux/tc_act/tc_csum.h> 37 #include <net/tc_act/tc_csum.h> 38 39 #define CSUM_TAB_MASK 15 40 41 static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { 42 [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, 43 }; 44 45 static int csum_net_id; 46 static struct tc_action_ops act_csum_ops; 47 48 static int tcf_csum_init(struct net *net, struct nlattr *nla, 49 struct nlattr *est, struct tc_action **a, int ovr, 50 int bind) 51 { 52 struct tc_action_net *tn = net_generic(net, csum_net_id); 53 struct nlattr *tb[TCA_CSUM_MAX + 1]; 54 struct tc_csum *parm; 55 struct tcf_csum *p; 56 int ret = 0, err; 57 58 if (nla == NULL) 59 return -EINVAL; 60 61 err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy); 62 if (err < 0) 63 return err; 64 65 if (tb[TCA_CSUM_PARMS] == NULL) 66 return -EINVAL; 67 parm = nla_data(tb[TCA_CSUM_PARMS]); 68 69 if (!tcf_hash_check(tn, parm->index, a, bind)) { 70 ret = tcf_hash_create(tn, parm->index, est, a, 71 &act_csum_ops, bind, false); 72 if (ret) 73 return ret; 74 ret = ACT_P_CREATED; 75 } else { 76 if (bind)/* dont override defaults */ 77 return 0; 78 tcf_hash_release(*a, bind); 79 if (!ovr) 80 return -EEXIST; 81 } 82 83 p = to_tcf_csum(*a); 84 spin_lock_bh(&p->tcf_lock); 85 p->tcf_action = parm->action; 86 p->update_flags = parm->update_flags; 87 spin_unlock_bh(&p->tcf_lock); 88 89 if (ret == ACT_P_CREATED) 90 tcf_hash_insert(tn, *a); 91 92 return ret; 93 } 94 95 /** 96 * tcf_csum_skb_nextlayer - Get next layer pointer 97 * @skb: sk_buff to use 98 * @ihl: previous summed headers length 99 * @ipl: complete packet length 100 * @jhl: next header length 101 * 102 * Check the expected next layer availability in the specified sk_buff. 103 * Return the next layer pointer if pass, NULL otherwise. 104 */ 105 static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, 106 unsigned int ihl, unsigned int ipl, 107 unsigned int jhl) 108 { 109 int ntkoff = skb_network_offset(skb); 110 int hl = ihl + jhl; 111 112 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || 113 skb_try_make_writable(skb, hl + ntkoff)) 114 return NULL; 115 else 116 return (void *)(skb_network_header(skb) + ihl); 117 } 118 119 static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, 120 unsigned int ipl) 121 { 122 struct icmphdr *icmph; 123 124 icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); 125 if (icmph == NULL) 126 return 0; 127 128 icmph->checksum = 0; 129 skb->csum = csum_partial(icmph, ipl - ihl, 0); 130 icmph->checksum = csum_fold(skb->csum); 131 132 skb->ip_summed = CHECKSUM_NONE; 133 134 return 1; 135 } 136 137 static int tcf_csum_ipv4_igmp(struct sk_buff *skb, 138 unsigned int ihl, unsigned int ipl) 139 { 140 struct igmphdr *igmph; 141 142 igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); 143 if (igmph == NULL) 144 return 0; 145 146 igmph->csum = 0; 147 skb->csum = csum_partial(igmph, ipl - ihl, 0); 148 igmph->csum = csum_fold(skb->csum); 149 150 skb->ip_summed = CHECKSUM_NONE; 151 152 return 1; 153 } 154 155 static int tcf_csum_ipv6_icmp(struct sk_buff *skb, unsigned int ihl, 156 unsigned int ipl) 157 { 158 struct icmp6hdr *icmp6h; 159 const struct ipv6hdr *ip6h; 160 161 icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); 162 if (icmp6h == NULL) 163 return 0; 164 165 ip6h = ipv6_hdr(skb); 166 icmp6h->icmp6_cksum = 0; 167 skb->csum = csum_partial(icmp6h, ipl - ihl, 0); 168 icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 169 ipl - ihl, IPPROTO_ICMPV6, 170 skb->csum); 171 172 skb->ip_summed = CHECKSUM_NONE; 173 174 return 1; 175 } 176 177 static int tcf_csum_ipv4_tcp(struct sk_buff *skb, unsigned int ihl, 178 unsigned int ipl) 179 { 180 struct tcphdr *tcph; 181 const struct iphdr *iph; 182 183 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 184 if (tcph == NULL) 185 return 0; 186 187 iph = ip_hdr(skb); 188 tcph->check = 0; 189 skb->csum = csum_partial(tcph, ipl - ihl, 0); 190 tcph->check = tcp_v4_check(ipl - ihl, 191 iph->saddr, iph->daddr, skb->csum); 192 193 skb->ip_summed = CHECKSUM_NONE; 194 195 return 1; 196 } 197 198 static int tcf_csum_ipv6_tcp(struct sk_buff *skb, unsigned int ihl, 199 unsigned int ipl) 200 { 201 struct tcphdr *tcph; 202 const struct ipv6hdr *ip6h; 203 204 tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); 205 if (tcph == NULL) 206 return 0; 207 208 ip6h = ipv6_hdr(skb); 209 tcph->check = 0; 210 skb->csum = csum_partial(tcph, ipl - ihl, 0); 211 tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 212 ipl - ihl, IPPROTO_TCP, 213 skb->csum); 214 215 skb->ip_summed = CHECKSUM_NONE; 216 217 return 1; 218 } 219 220 static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, 221 unsigned int ipl, int udplite) 222 { 223 struct udphdr *udph; 224 const struct iphdr *iph; 225 u16 ul; 226 227 /* 228 * Support both UDP and UDPLITE checksum algorithms, Don't use 229 * udph->len to get the real length without any protocol check, 230 * UDPLITE uses udph->len for another thing, 231 * Use iph->tot_len, or just ipl. 232 */ 233 234 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 235 if (udph == NULL) 236 return 0; 237 238 iph = ip_hdr(skb); 239 ul = ntohs(udph->len); 240 241 if (udplite || udph->check) { 242 243 udph->check = 0; 244 245 if (udplite) { 246 if (ul == 0) 247 skb->csum = csum_partial(udph, ipl - ihl, 0); 248 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 249 skb->csum = csum_partial(udph, ul, 0); 250 else 251 goto ignore_obscure_skb; 252 } else { 253 if (ul != ipl - ihl) 254 goto ignore_obscure_skb; 255 256 skb->csum = csum_partial(udph, ul, 0); 257 } 258 259 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, 260 ul, iph->protocol, 261 skb->csum); 262 263 if (!udph->check) 264 udph->check = CSUM_MANGLED_0; 265 } 266 267 skb->ip_summed = CHECKSUM_NONE; 268 269 ignore_obscure_skb: 270 return 1; 271 } 272 273 static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, 274 unsigned int ipl, int udplite) 275 { 276 struct udphdr *udph; 277 const struct ipv6hdr *ip6h; 278 u16 ul; 279 280 /* 281 * Support both UDP and UDPLITE checksum algorithms, Don't use 282 * udph->len to get the real length without any protocol check, 283 * UDPLITE uses udph->len for another thing, 284 * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. 285 */ 286 287 udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); 288 if (udph == NULL) 289 return 0; 290 291 ip6h = ipv6_hdr(skb); 292 ul = ntohs(udph->len); 293 294 udph->check = 0; 295 296 if (udplite) { 297 if (ul == 0) 298 skb->csum = csum_partial(udph, ipl - ihl, 0); 299 300 else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) 301 skb->csum = csum_partial(udph, ul, 0); 302 303 else 304 goto ignore_obscure_skb; 305 } else { 306 if (ul != ipl - ihl) 307 goto ignore_obscure_skb; 308 309 skb->csum = csum_partial(udph, ul, 0); 310 } 311 312 udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, 313 udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, 314 skb->csum); 315 316 if (!udph->check) 317 udph->check = CSUM_MANGLED_0; 318 319 skb->ip_summed = CHECKSUM_NONE; 320 321 ignore_obscure_skb: 322 return 1; 323 } 324 325 static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) 326 { 327 const struct iphdr *iph; 328 int ntkoff; 329 330 ntkoff = skb_network_offset(skb); 331 332 if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) 333 goto fail; 334 335 iph = ip_hdr(skb); 336 337 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { 338 case IPPROTO_ICMP: 339 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 340 if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, 341 ntohs(iph->tot_len))) 342 goto fail; 343 break; 344 case IPPROTO_IGMP: 345 if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) 346 if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, 347 ntohs(iph->tot_len))) 348 goto fail; 349 break; 350 case IPPROTO_TCP: 351 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 352 if (!tcf_csum_ipv4_tcp(skb, iph->ihl * 4, 353 ntohs(iph->tot_len))) 354 goto fail; 355 break; 356 case IPPROTO_UDP: 357 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 358 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 359 ntohs(iph->tot_len), 0)) 360 goto fail; 361 break; 362 case IPPROTO_UDPLITE: 363 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 364 if (!tcf_csum_ipv4_udp(skb, iph->ihl * 4, 365 ntohs(iph->tot_len), 1)) 366 goto fail; 367 break; 368 } 369 370 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { 371 if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff)) 372 goto fail; 373 374 ip_send_check(ip_hdr(skb)); 375 } 376 377 return 1; 378 379 fail: 380 return 0; 381 } 382 383 static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, 384 unsigned int *pl) 385 { 386 int off, len, optlen; 387 unsigned char *xh = (void *)ip6xh; 388 389 off = sizeof(*ip6xh); 390 len = ixhl - off; 391 392 while (len > 1) { 393 switch (xh[off]) { 394 case IPV6_TLV_PAD1: 395 optlen = 1; 396 break; 397 case IPV6_TLV_JUMBO: 398 optlen = xh[off + 1] + 2; 399 if (optlen != 6 || len < 6 || (off & 3) != 2) 400 /* wrong jumbo option length/alignment */ 401 return 0; 402 *pl = ntohl(*(__be32 *)(xh + off + 2)); 403 goto done; 404 default: 405 optlen = xh[off + 1] + 2; 406 if (optlen > len) 407 /* ignore obscure options */ 408 goto done; 409 break; 410 } 411 off += optlen; 412 len -= optlen; 413 } 414 415 done: 416 return 1; 417 } 418 419 static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) 420 { 421 struct ipv6hdr *ip6h; 422 struct ipv6_opt_hdr *ip6xh; 423 unsigned int hl, ixhl; 424 unsigned int pl; 425 int ntkoff; 426 u8 nexthdr; 427 428 ntkoff = skb_network_offset(skb); 429 430 hl = sizeof(*ip6h); 431 432 if (!pskb_may_pull(skb, hl + ntkoff)) 433 goto fail; 434 435 ip6h = ipv6_hdr(skb); 436 437 pl = ntohs(ip6h->payload_len); 438 nexthdr = ip6h->nexthdr; 439 440 do { 441 switch (nexthdr) { 442 case NEXTHDR_FRAGMENT: 443 goto ignore_skb; 444 case NEXTHDR_ROUTING: 445 case NEXTHDR_HOP: 446 case NEXTHDR_DEST: 447 if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) 448 goto fail; 449 ip6xh = (void *)(skb_network_header(skb) + hl); 450 ixhl = ipv6_optlen(ip6xh); 451 if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) 452 goto fail; 453 ip6xh = (void *)(skb_network_header(skb) + hl); 454 if ((nexthdr == NEXTHDR_HOP) && 455 !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) 456 goto fail; 457 nexthdr = ip6xh->nexthdr; 458 hl += ixhl; 459 break; 460 case IPPROTO_ICMPV6: 461 if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) 462 if (!tcf_csum_ipv6_icmp(skb, 463 hl, pl + sizeof(*ip6h))) 464 goto fail; 465 goto done; 466 case IPPROTO_TCP: 467 if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) 468 if (!tcf_csum_ipv6_tcp(skb, 469 hl, pl + sizeof(*ip6h))) 470 goto fail; 471 goto done; 472 case IPPROTO_UDP: 473 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) 474 if (!tcf_csum_ipv6_udp(skb, hl, 475 pl + sizeof(*ip6h), 0)) 476 goto fail; 477 goto done; 478 case IPPROTO_UDPLITE: 479 if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) 480 if (!tcf_csum_ipv6_udp(skb, hl, 481 pl + sizeof(*ip6h), 1)) 482 goto fail; 483 goto done; 484 default: 485 goto ignore_skb; 486 } 487 } while (pskb_may_pull(skb, hl + 1 + ntkoff)); 488 489 done: 490 ignore_skb: 491 return 1; 492 493 fail: 494 return 0; 495 } 496 497 static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, 498 struct tcf_result *res) 499 { 500 struct tcf_csum *p = to_tcf_csum(a); 501 int action; 502 u32 update_flags; 503 504 spin_lock(&p->tcf_lock); 505 tcf_lastuse_update(&p->tcf_tm); 506 bstats_update(&p->tcf_bstats, skb); 507 action = p->tcf_action; 508 update_flags = p->update_flags; 509 spin_unlock(&p->tcf_lock); 510 511 if (unlikely(action == TC_ACT_SHOT)) 512 goto drop; 513 514 switch (tc_skb_protocol(skb)) { 515 case cpu_to_be16(ETH_P_IP): 516 if (!tcf_csum_ipv4(skb, update_flags)) 517 goto drop; 518 break; 519 case cpu_to_be16(ETH_P_IPV6): 520 if (!tcf_csum_ipv6(skb, update_flags)) 521 goto drop; 522 break; 523 } 524 525 return action; 526 527 drop: 528 spin_lock(&p->tcf_lock); 529 p->tcf_qstats.drops++; 530 spin_unlock(&p->tcf_lock); 531 return TC_ACT_SHOT; 532 } 533 534 static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, 535 int ref) 536 { 537 unsigned char *b = skb_tail_pointer(skb); 538 struct tcf_csum *p = to_tcf_csum(a); 539 struct tc_csum opt = { 540 .update_flags = p->update_flags, 541 .index = p->tcf_index, 542 .action = p->tcf_action, 543 .refcnt = p->tcf_refcnt - ref, 544 .bindcnt = p->tcf_bindcnt - bind, 545 }; 546 struct tcf_t t; 547 548 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt)) 549 goto nla_put_failure; 550 551 tcf_tm_dump(&t, &p->tcf_tm); 552 if (nla_put_64bit(skb, TCA_CSUM_TM, sizeof(t), &t, TCA_CSUM_PAD)) 553 goto nla_put_failure; 554 555 return skb->len; 556 557 nla_put_failure: 558 nlmsg_trim(skb, b); 559 return -1; 560 } 561 562 static int tcf_csum_walker(struct net *net, struct sk_buff *skb, 563 struct netlink_callback *cb, int type, 564 const struct tc_action_ops *ops) 565 { 566 struct tc_action_net *tn = net_generic(net, csum_net_id); 567 568 return tcf_generic_walker(tn, skb, cb, type, ops); 569 } 570 571 static int tcf_csum_search(struct net *net, struct tc_action **a, u32 index) 572 { 573 struct tc_action_net *tn = net_generic(net, csum_net_id); 574 575 return tcf_hash_search(tn, a, index); 576 } 577 578 static struct tc_action_ops act_csum_ops = { 579 .kind = "csum", 580 .type = TCA_ACT_CSUM, 581 .owner = THIS_MODULE, 582 .act = tcf_csum, 583 .dump = tcf_csum_dump, 584 .init = tcf_csum_init, 585 .walk = tcf_csum_walker, 586 .lookup = tcf_csum_search, 587 .size = sizeof(struct tcf_csum), 588 }; 589 590 static __net_init int csum_init_net(struct net *net) 591 { 592 struct tc_action_net *tn = net_generic(net, csum_net_id); 593 594 return tc_action_net_init(tn, &act_csum_ops, CSUM_TAB_MASK); 595 } 596 597 static void __net_exit csum_exit_net(struct net *net) 598 { 599 struct tc_action_net *tn = net_generic(net, csum_net_id); 600 601 tc_action_net_exit(tn); 602 } 603 604 static struct pernet_operations csum_net_ops = { 605 .init = csum_init_net, 606 .exit = csum_exit_net, 607 .id = &csum_net_id, 608 .size = sizeof(struct tc_action_net), 609 }; 610 611 MODULE_DESCRIPTION("Checksum updating actions"); 612 MODULE_LICENSE("GPL"); 613 614 static int __init csum_init_module(void) 615 { 616 return tcf_register_action(&act_csum_ops, &csum_net_ops); 617 } 618 619 static void __exit csum_cleanup_module(void) 620 { 621 tcf_unregister_action(&act_csum_ops, &csum_net_ops); 622 } 623 624 module_init(csum_init_module); 625 module_exit(csum_cleanup_module); 626