1 /* 2 * Copyright (c) 2007-2013 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/skbuff.h> 22 #include <linux/in.h> 23 #include <linux/ip.h> 24 #include <linux/openvswitch.h> 25 #include <linux/sctp.h> 26 #include <linux/tcp.h> 27 #include <linux/udp.h> 28 #include <linux/in6.h> 29 #include <linux/if_arp.h> 30 #include <linux/if_vlan.h> 31 #include <net/ip.h> 32 #include <net/ipv6.h> 33 #include <net/checksum.h> 34 #include <net/dsfield.h> 35 #include <net/sctp/checksum.h> 36 37 #include "datapath.h" 38 #include "vport.h" 39 40 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, 41 const struct nlattr *attr, int len); 42 43 static int make_writable(struct sk_buff *skb, int write_len) 44 { 45 if (!pskb_may_pull(skb, write_len)) 46 return -ENOMEM; 47 48 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) 49 return 0; 50 51 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 52 } 53 54 /* remove VLAN header from packet and update csum accordingly. */ 55 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) 56 { 57 struct vlan_hdr *vhdr; 58 int err; 59 60 err = make_writable(skb, VLAN_ETH_HLEN); 61 if (unlikely(err)) 62 return err; 63 64 if (skb->ip_summed == CHECKSUM_COMPLETE) 65 skb->csum = csum_sub(skb->csum, csum_partial(skb->data 66 + (2 * ETH_ALEN), VLAN_HLEN, 0)); 67 68 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); 69 *current_tci = vhdr->h_vlan_TCI; 70 71 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); 72 __skb_pull(skb, VLAN_HLEN); 73 74 vlan_set_encap_proto(skb, vhdr); 75 skb->mac_header += VLAN_HLEN; 76 if (skb_network_offset(skb) < ETH_HLEN) 77 skb_set_network_header(skb, ETH_HLEN); 78 skb_reset_mac_len(skb); 79 80 return 0; 81 } 82 83 static int pop_vlan(struct sk_buff *skb) 84 { 85 __be16 tci; 86 int err; 87 88 if (likely(vlan_tx_tag_present(skb))) { 89 skb->vlan_tci = 0; 90 } else { 91 if (unlikely(skb->protocol != htons(ETH_P_8021Q) || 92 skb->len < VLAN_ETH_HLEN)) 93 return 0; 94 95 err = __pop_vlan_tci(skb, &tci); 96 if (err) 97 return err; 98 } 99 /* move next vlan tag to hw accel tag */ 100 if (likely(skb->protocol != htons(ETH_P_8021Q) || 101 skb->len < VLAN_ETH_HLEN)) 102 return 0; 103 104 err = __pop_vlan_tci(skb, &tci); 105 if (unlikely(err)) 106 return err; 107 108 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); 109 return 0; 110 } 111 112 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) 113 { 114 if (unlikely(vlan_tx_tag_present(skb))) { 115 u16 current_tag; 116 117 /* push down current VLAN tag */ 118 current_tag = vlan_tx_tag_get(skb); 119 120 if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) 121 return -ENOMEM; 122 123 if (skb->ip_summed == CHECKSUM_COMPLETE) 124 skb->csum = csum_add(skb->csum, csum_partial(skb->data 125 + (2 * ETH_ALEN), VLAN_HLEN, 0)); 126 127 } 128 __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); 129 return 0; 130 } 131 132 static int set_eth_addr(struct sk_buff *skb, 133 const struct ovs_key_ethernet *eth_key) 134 { 135 int err; 136 err = make_writable(skb, ETH_HLEN); 137 if (unlikely(err)) 138 return err; 139 140 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 141 142 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src); 143 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst); 144 145 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); 146 147 return 0; 148 } 149 150 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, 151 __be32 *addr, __be32 new_addr) 152 { 153 int transport_len = skb->len - skb_transport_offset(skb); 154 155 if (nh->protocol == IPPROTO_TCP) { 156 if (likely(transport_len >= sizeof(struct tcphdr))) 157 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 158 *addr, new_addr, 1); 159 } else if (nh->protocol == IPPROTO_UDP) { 160 if (likely(transport_len >= sizeof(struct udphdr))) { 161 struct udphdr *uh = udp_hdr(skb); 162 163 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 164 inet_proto_csum_replace4(&uh->check, skb, 165 *addr, new_addr, 1); 166 if (!uh->check) 167 uh->check = CSUM_MANGLED_0; 168 } 169 } 170 } 171 172 csum_replace4(&nh->check, *addr, new_addr); 173 skb_clear_hash(skb); 174 *addr = new_addr; 175 } 176 177 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, 178 __be32 addr[4], const __be32 new_addr[4]) 179 { 180 int transport_len = skb->len - skb_transport_offset(skb); 181 182 if (l4_proto == IPPROTO_TCP) { 183 if (likely(transport_len >= sizeof(struct tcphdr))) 184 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, 185 addr, new_addr, 1); 186 } else if (l4_proto == IPPROTO_UDP) { 187 if (likely(transport_len >= sizeof(struct udphdr))) { 188 struct udphdr *uh = udp_hdr(skb); 189 190 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 191 inet_proto_csum_replace16(&uh->check, skb, 192 addr, new_addr, 1); 193 if (!uh->check) 194 uh->check = CSUM_MANGLED_0; 195 } 196 } 197 } 198 } 199 200 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, 201 __be32 addr[4], const __be32 new_addr[4], 202 bool recalculate_csum) 203 { 204 if (recalculate_csum) 205 update_ipv6_checksum(skb, l4_proto, addr, new_addr); 206 207 skb_clear_hash(skb); 208 memcpy(addr, new_addr, sizeof(__be32[4])); 209 } 210 211 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) 212 { 213 nh->priority = tc >> 4; 214 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); 215 } 216 217 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) 218 { 219 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; 220 nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; 221 nh->flow_lbl[2] = fl & 0x000000FF; 222 } 223 224 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) 225 { 226 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); 227 nh->ttl = new_ttl; 228 } 229 230 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) 231 { 232 struct iphdr *nh; 233 int err; 234 235 err = make_writable(skb, skb_network_offset(skb) + 236 sizeof(struct iphdr)); 237 if (unlikely(err)) 238 return err; 239 240 nh = ip_hdr(skb); 241 242 if (ipv4_key->ipv4_src != nh->saddr) 243 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); 244 245 if (ipv4_key->ipv4_dst != nh->daddr) 246 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); 247 248 if (ipv4_key->ipv4_tos != nh->tos) 249 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); 250 251 if (ipv4_key->ipv4_ttl != nh->ttl) 252 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); 253 254 return 0; 255 } 256 257 static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) 258 { 259 struct ipv6hdr *nh; 260 int err; 261 __be32 *saddr; 262 __be32 *daddr; 263 264 err = make_writable(skb, skb_network_offset(skb) + 265 sizeof(struct ipv6hdr)); 266 if (unlikely(err)) 267 return err; 268 269 nh = ipv6_hdr(skb); 270 saddr = (__be32 *)&nh->saddr; 271 daddr = (__be32 *)&nh->daddr; 272 273 if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) 274 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, 275 ipv6_key->ipv6_src, true); 276 277 if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { 278 unsigned int offset = 0; 279 int flags = IP6_FH_F_SKIP_RH; 280 bool recalc_csum = true; 281 282 if (ipv6_ext_hdr(nh->nexthdr)) 283 recalc_csum = ipv6_find_hdr(skb, &offset, 284 NEXTHDR_ROUTING, NULL, 285 &flags) != NEXTHDR_ROUTING; 286 287 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, 288 ipv6_key->ipv6_dst, recalc_csum); 289 } 290 291 set_ipv6_tc(nh, ipv6_key->ipv6_tclass); 292 set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); 293 nh->hop_limit = ipv6_key->ipv6_hlimit; 294 295 return 0; 296 } 297 298 /* Must follow make_writable() since that can move the skb data. */ 299 static void set_tp_port(struct sk_buff *skb, __be16 *port, 300 __be16 new_port, __sum16 *check) 301 { 302 inet_proto_csum_replace2(check, skb, *port, new_port, 0); 303 *port = new_port; 304 skb_clear_hash(skb); 305 } 306 307 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) 308 { 309 struct udphdr *uh = udp_hdr(skb); 310 311 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { 312 set_tp_port(skb, port, new_port, &uh->check); 313 314 if (!uh->check) 315 uh->check = CSUM_MANGLED_0; 316 } else { 317 *port = new_port; 318 skb_clear_hash(skb); 319 } 320 } 321 322 static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) 323 { 324 struct udphdr *uh; 325 int err; 326 327 err = make_writable(skb, skb_transport_offset(skb) + 328 sizeof(struct udphdr)); 329 if (unlikely(err)) 330 return err; 331 332 uh = udp_hdr(skb); 333 if (udp_port_key->udp_src != uh->source) 334 set_udp_port(skb, &uh->source, udp_port_key->udp_src); 335 336 if (udp_port_key->udp_dst != uh->dest) 337 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); 338 339 return 0; 340 } 341 342 static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) 343 { 344 struct tcphdr *th; 345 int err; 346 347 err = make_writable(skb, skb_transport_offset(skb) + 348 sizeof(struct tcphdr)); 349 if (unlikely(err)) 350 return err; 351 352 th = tcp_hdr(skb); 353 if (tcp_port_key->tcp_src != th->source) 354 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); 355 356 if (tcp_port_key->tcp_dst != th->dest) 357 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); 358 359 return 0; 360 } 361 362 static int set_sctp(struct sk_buff *skb, 363 const struct ovs_key_sctp *sctp_port_key) 364 { 365 struct sctphdr *sh; 366 int err; 367 unsigned int sctphoff = skb_transport_offset(skb); 368 369 err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); 370 if (unlikely(err)) 371 return err; 372 373 sh = sctp_hdr(skb); 374 if (sctp_port_key->sctp_src != sh->source || 375 sctp_port_key->sctp_dst != sh->dest) { 376 __le32 old_correct_csum, new_csum, old_csum; 377 378 old_csum = sh->checksum; 379 old_correct_csum = sctp_compute_cksum(skb, sctphoff); 380 381 sh->source = sctp_port_key->sctp_src; 382 sh->dest = sctp_port_key->sctp_dst; 383 384 new_csum = sctp_compute_cksum(skb, sctphoff); 385 386 /* Carry any checksum errors through. */ 387 sh->checksum = old_csum ^ old_correct_csum ^ new_csum; 388 389 skb_clear_hash(skb); 390 } 391 392 return 0; 393 } 394 395 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) 396 { 397 struct vport *vport; 398 399 if (unlikely(!skb)) 400 return -ENOMEM; 401 402 vport = ovs_vport_rcu(dp, out_port); 403 if (unlikely(!vport)) { 404 kfree_skb(skb); 405 return -ENODEV; 406 } 407 408 ovs_vport_send(vport, skb); 409 return 0; 410 } 411 412 static int output_userspace(struct datapath *dp, struct sk_buff *skb, 413 const struct nlattr *attr) 414 { 415 struct dp_upcall_info upcall; 416 const struct nlattr *a; 417 int rem; 418 419 BUG_ON(!OVS_CB(skb)->pkt_key); 420 421 upcall.cmd = OVS_PACKET_CMD_ACTION; 422 upcall.key = OVS_CB(skb)->pkt_key; 423 upcall.userdata = NULL; 424 upcall.portid = 0; 425 426 for (a = nla_data(attr), rem = nla_len(attr); rem > 0; 427 a = nla_next(a, &rem)) { 428 switch (nla_type(a)) { 429 case OVS_USERSPACE_ATTR_USERDATA: 430 upcall.userdata = a; 431 break; 432 433 case OVS_USERSPACE_ATTR_PID: 434 upcall.portid = nla_get_u32(a); 435 break; 436 } 437 } 438 439 return ovs_dp_upcall(dp, skb, &upcall); 440 } 441 442 static bool last_action(const struct nlattr *a, int rem) 443 { 444 return a->nla_len == rem; 445 } 446 447 static int sample(struct datapath *dp, struct sk_buff *skb, 448 const struct nlattr *attr) 449 { 450 const struct nlattr *acts_list = NULL; 451 const struct nlattr *a; 452 struct sk_buff *sample_skb; 453 int rem; 454 455 for (a = nla_data(attr), rem = nla_len(attr); rem > 0; 456 a = nla_next(a, &rem)) { 457 switch (nla_type(a)) { 458 case OVS_SAMPLE_ATTR_PROBABILITY: 459 if (prandom_u32() >= nla_get_u32(a)) 460 return 0; 461 break; 462 463 case OVS_SAMPLE_ATTR_ACTIONS: 464 acts_list = a; 465 break; 466 } 467 } 468 469 rem = nla_len(acts_list); 470 a = nla_data(acts_list); 471 472 /* Actions list is either empty or only contains a single user-space 473 * action, the latter being a special case as it is the only known 474 * usage of the sample action. 475 * In these special cases don't clone the skb as there are no 476 * side-effects in the nested actions. 477 * Otherwise, clone in case the nested actions have side effects. 478 */ 479 if (likely(rem == 0 || (nla_type(a) == OVS_ACTION_ATTR_USERSPACE && 480 last_action(a, rem)))) { 481 sample_skb = skb; 482 skb_get(skb); 483 } else { 484 sample_skb = skb_clone(skb, GFP_ATOMIC); 485 if (!sample_skb) /* Skip sample action when out of memory. */ 486 return 0; 487 } 488 489 /* Note that do_execute_actions() never consumes skb. 490 * In the case where skb has been cloned above it is the clone that 491 * is consumed. Otherwise the skb_get(skb) call prevents 492 * consumption by do_execute_actions(). Thus, it is safe to simply 493 * return the error code and let the caller (also 494 * do_execute_actions()) free skb on error. 495 */ 496 return do_execute_actions(dp, sample_skb, a, rem); 497 } 498 499 static int execute_set_action(struct sk_buff *skb, 500 const struct nlattr *nested_attr) 501 { 502 int err = 0; 503 504 switch (nla_type(nested_attr)) { 505 case OVS_KEY_ATTR_PRIORITY: 506 skb->priority = nla_get_u32(nested_attr); 507 break; 508 509 case OVS_KEY_ATTR_SKB_MARK: 510 skb->mark = nla_get_u32(nested_attr); 511 break; 512 513 case OVS_KEY_ATTR_IPV4_TUNNEL: 514 OVS_CB(skb)->tun_key = nla_data(nested_attr); 515 break; 516 517 case OVS_KEY_ATTR_ETHERNET: 518 err = set_eth_addr(skb, nla_data(nested_attr)); 519 break; 520 521 case OVS_KEY_ATTR_IPV4: 522 err = set_ipv4(skb, nla_data(nested_attr)); 523 break; 524 525 case OVS_KEY_ATTR_IPV6: 526 err = set_ipv6(skb, nla_data(nested_attr)); 527 break; 528 529 case OVS_KEY_ATTR_TCP: 530 err = set_tcp(skb, nla_data(nested_attr)); 531 break; 532 533 case OVS_KEY_ATTR_UDP: 534 err = set_udp(skb, nla_data(nested_attr)); 535 break; 536 537 case OVS_KEY_ATTR_SCTP: 538 err = set_sctp(skb, nla_data(nested_attr)); 539 break; 540 } 541 542 return err; 543 } 544 545 /* Execute a list of actions against 'skb'. */ 546 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, 547 const struct nlattr *attr, int len) 548 { 549 /* Every output action needs a separate clone of 'skb', but the common 550 * case is just a single output action, so that doing a clone and 551 * then freeing the original skbuff is wasteful. So the following code 552 * is slightly obscure just to avoid that. */ 553 int prev_port = -1; 554 const struct nlattr *a; 555 int rem; 556 557 for (a = attr, rem = len; rem > 0; 558 a = nla_next(a, &rem)) { 559 int err = 0; 560 561 if (prev_port != -1) { 562 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); 563 prev_port = -1; 564 } 565 566 switch (nla_type(a)) { 567 case OVS_ACTION_ATTR_OUTPUT: 568 prev_port = nla_get_u32(a); 569 break; 570 571 case OVS_ACTION_ATTR_USERSPACE: 572 output_userspace(dp, skb, a); 573 break; 574 575 case OVS_ACTION_ATTR_PUSH_VLAN: 576 err = push_vlan(skb, nla_data(a)); 577 if (unlikely(err)) /* skb already freed. */ 578 return err; 579 break; 580 581 case OVS_ACTION_ATTR_POP_VLAN: 582 err = pop_vlan(skb); 583 break; 584 585 case OVS_ACTION_ATTR_SET: 586 err = execute_set_action(skb, nla_data(a)); 587 break; 588 589 case OVS_ACTION_ATTR_SAMPLE: 590 err = sample(dp, skb, a); 591 if (unlikely(err)) /* skb already freed. */ 592 return err; 593 break; 594 } 595 596 if (unlikely(err)) { 597 kfree_skb(skb); 598 return err; 599 } 600 } 601 602 if (prev_port != -1) 603 do_output(dp, skb, prev_port); 604 else 605 consume_skb(skb); 606 607 return 0; 608 } 609 610 /* Execute a list of actions against 'skb'. */ 611 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) 612 { 613 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); 614 615 OVS_CB(skb)->tun_key = NULL; 616 return do_execute_actions(dp, skb, acts->actions, acts->actions_len); 617 } 618