1 /* 2 * Copyright (c) 2007-2014 Nicira, Inc. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of version 2 of the GNU General Public 6 * License as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 16 * 02110-1301, USA 17 */ 18 19 #include <linux/uaccess.h> 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/if_ether.h> 23 #include <linux/if_vlan.h> 24 #include <net/llc_pdu.h> 25 #include <linux/kernel.h> 26 #include <linux/jhash.h> 27 #include <linux/jiffies.h> 28 #include <linux/llc.h> 29 #include <linux/module.h> 30 #include <linux/in.h> 31 #include <linux/rcupdate.h> 32 #include <linux/if_arp.h> 33 #include <linux/ip.h> 34 #include <linux/ipv6.h> 35 #include <linux/mpls.h> 36 #include <linux/sctp.h> 37 #include <linux/smp.h> 38 #include <linux/tcp.h> 39 #include <linux/udp.h> 40 #include <linux/icmp.h> 41 #include <linux/icmpv6.h> 42 #include <linux/rculist.h> 43 #include <net/ip.h> 44 #include <net/ip_tunnels.h> 45 #include <net/ipv6.h> 46 #include <net/mpls.h> 47 #include <net/ndisc.h> 48 49 #include "conntrack.h" 50 #include "datapath.h" 51 #include "flow.h" 52 #include "flow_netlink.h" 53 #include "vport.h" 54 55 u64 ovs_flow_used_time(unsigned long flow_jiffies) 56 { 57 struct timespec cur_ts; 58 u64 cur_ms, idle_ms; 59 60 ktime_get_ts(&cur_ts); 61 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); 62 cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + 63 cur_ts.tv_nsec / NSEC_PER_MSEC; 64 65 return cur_ms - idle_ms; 66 } 67 68 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 69 70 void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, 71 const struct sk_buff *skb) 72 { 73 struct flow_stats *stats; 74 int node = numa_node_id(); 75 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); 76 77 stats = rcu_dereference(flow->stats[node]); 78 79 /* Check if already have node-specific stats. */ 80 if (likely(stats)) { 81 spin_lock(&stats->lock); 82 /* Mark if we write on the pre-allocated stats. */ 83 if (node == 0 && unlikely(flow->stats_last_writer != node)) 84 flow->stats_last_writer = node; 85 } else { 86 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */ 87 spin_lock(&stats->lock); 88 89 /* If the current NUMA-node is the only writer on the 90 * pre-allocated stats keep using them. 91 */ 92 if (unlikely(flow->stats_last_writer != node)) { 93 /* A previous locker may have already allocated the 94 * stats, so we need to check again. If node-specific 95 * stats were already allocated, we update the pre- 96 * allocated stats as we have already locked them. 97 */ 98 if (likely(flow->stats_last_writer != NUMA_NO_NODE) 99 && likely(!rcu_access_pointer(flow->stats[node]))) { 100 /* Try to allocate node-specific stats. */ 101 struct flow_stats *new_stats; 102 103 new_stats = 104 kmem_cache_alloc_node(flow_stats_cache, 105 GFP_NOWAIT | 106 __GFP_THISNODE | 107 __GFP_NOWARN | 108 __GFP_NOMEMALLOC, 109 node); 110 if (likely(new_stats)) { 111 new_stats->used = jiffies; 112 new_stats->packet_count = 1; 113 new_stats->byte_count = len; 114 new_stats->tcp_flags = tcp_flags; 115 spin_lock_init(&new_stats->lock); 116 117 rcu_assign_pointer(flow->stats[node], 118 new_stats); 119 goto unlock; 120 } 121 } 122 flow->stats_last_writer = node; 123 } 124 } 125 126 stats->used = jiffies; 127 stats->packet_count++; 128 stats->byte_count += len; 129 stats->tcp_flags |= tcp_flags; 130 unlock: 131 spin_unlock(&stats->lock); 132 } 133 134 /* Must be called with rcu_read_lock or ovs_mutex. */ 135 void ovs_flow_stats_get(const struct sw_flow *flow, 136 struct ovs_flow_stats *ovs_stats, 137 unsigned long *used, __be16 *tcp_flags) 138 { 139 int node; 140 141 *used = 0; 142 *tcp_flags = 0; 143 memset(ovs_stats, 0, sizeof(*ovs_stats)); 144 145 for_each_node(node) { 146 struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[node]); 147 148 if (stats) { 149 /* Local CPU may write on non-local stats, so we must 150 * block bottom-halves here. 151 */ 152 spin_lock_bh(&stats->lock); 153 if (!*used || time_after(stats->used, *used)) 154 *used = stats->used; 155 *tcp_flags |= stats->tcp_flags; 156 ovs_stats->n_packets += stats->packet_count; 157 ovs_stats->n_bytes += stats->byte_count; 158 spin_unlock_bh(&stats->lock); 159 } 160 } 161 } 162 163 /* Called with ovs_mutex. */ 164 void ovs_flow_stats_clear(struct sw_flow *flow) 165 { 166 int node; 167 168 for_each_node(node) { 169 struct flow_stats *stats = ovsl_dereference(flow->stats[node]); 170 171 if (stats) { 172 spin_lock_bh(&stats->lock); 173 stats->used = 0; 174 stats->packet_count = 0; 175 stats->byte_count = 0; 176 stats->tcp_flags = 0; 177 spin_unlock_bh(&stats->lock); 178 } 179 } 180 } 181 182 static int check_header(struct sk_buff *skb, int len) 183 { 184 if (unlikely(skb->len < len)) 185 return -EINVAL; 186 if (unlikely(!pskb_may_pull(skb, len))) 187 return -ENOMEM; 188 return 0; 189 } 190 191 static bool arphdr_ok(struct sk_buff *skb) 192 { 193 return pskb_may_pull(skb, skb_network_offset(skb) + 194 sizeof(struct arp_eth_header)); 195 } 196 197 static int check_iphdr(struct sk_buff *skb) 198 { 199 unsigned int nh_ofs = skb_network_offset(skb); 200 unsigned int ip_len; 201 int err; 202 203 err = check_header(skb, nh_ofs + sizeof(struct iphdr)); 204 if (unlikely(err)) 205 return err; 206 207 ip_len = ip_hdrlen(skb); 208 if (unlikely(ip_len < sizeof(struct iphdr) || 209 skb->len < nh_ofs + ip_len)) 210 return -EINVAL; 211 212 skb_set_transport_header(skb, nh_ofs + ip_len); 213 return 0; 214 } 215 216 static bool tcphdr_ok(struct sk_buff *skb) 217 { 218 int th_ofs = skb_transport_offset(skb); 219 int tcp_len; 220 221 if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr)))) 222 return false; 223 224 tcp_len = tcp_hdrlen(skb); 225 if (unlikely(tcp_len < sizeof(struct tcphdr) || 226 skb->len < th_ofs + tcp_len)) 227 return false; 228 229 return true; 230 } 231 232 static bool udphdr_ok(struct sk_buff *skb) 233 { 234 return pskb_may_pull(skb, skb_transport_offset(skb) + 235 sizeof(struct udphdr)); 236 } 237 238 static bool sctphdr_ok(struct sk_buff *skb) 239 { 240 return pskb_may_pull(skb, skb_transport_offset(skb) + 241 sizeof(struct sctphdr)); 242 } 243 244 static bool icmphdr_ok(struct sk_buff *skb) 245 { 246 return pskb_may_pull(skb, skb_transport_offset(skb) + 247 sizeof(struct icmphdr)); 248 } 249 250 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) 251 { 252 unsigned int nh_ofs = skb_network_offset(skb); 253 unsigned int nh_len; 254 int payload_ofs; 255 struct ipv6hdr *nh; 256 uint8_t nexthdr; 257 __be16 frag_off; 258 int err; 259 260 err = check_header(skb, nh_ofs + sizeof(*nh)); 261 if (unlikely(err)) 262 return err; 263 264 nh = ipv6_hdr(skb); 265 nexthdr = nh->nexthdr; 266 payload_ofs = (u8 *)(nh + 1) - skb->data; 267 268 key->ip.proto = NEXTHDR_NONE; 269 key->ip.tos = ipv6_get_dsfield(nh); 270 key->ip.ttl = nh->hop_limit; 271 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); 272 key->ipv6.addr.src = nh->saddr; 273 key->ipv6.addr.dst = nh->daddr; 274 275 payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off); 276 277 if (frag_off) { 278 if (frag_off & htons(~0x7)) 279 key->ip.frag = OVS_FRAG_TYPE_LATER; 280 else 281 key->ip.frag = OVS_FRAG_TYPE_FIRST; 282 } else { 283 key->ip.frag = OVS_FRAG_TYPE_NONE; 284 } 285 286 /* Delayed handling of error in ipv6_skip_exthdr() as it 287 * always sets frag_off to a valid value which may be 288 * used to set key->ip.frag above. 289 */ 290 if (unlikely(payload_ofs < 0)) 291 return -EPROTO; 292 293 nh_len = payload_ofs - nh_ofs; 294 skb_set_transport_header(skb, nh_ofs + nh_len); 295 key->ip.proto = nexthdr; 296 return nh_len; 297 } 298 299 static bool icmp6hdr_ok(struct sk_buff *skb) 300 { 301 return pskb_may_pull(skb, skb_transport_offset(skb) + 302 sizeof(struct icmp6hdr)); 303 } 304 305 /** 306 * Parse vlan tag from vlan header. 307 * Returns ERROR on memory error. 308 * Returns 0 if it encounters a non-vlan or incomplete packet. 309 * Returns 1 after successfully parsing vlan tag. 310 */ 311 static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) 312 { 313 struct vlan_head *vh = (struct vlan_head *)skb->data; 314 315 if (likely(!eth_type_vlan(vh->tpid))) 316 return 0; 317 318 if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16))) 319 return 0; 320 321 if (unlikely(!pskb_may_pull(skb, sizeof(struct vlan_head) + 322 sizeof(__be16)))) 323 return -ENOMEM; 324 325 vh = (struct vlan_head *)skb->data; 326 key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT); 327 key_vh->tpid = vh->tpid; 328 329 __skb_pull(skb, sizeof(struct vlan_head)); 330 return 1; 331 } 332 333 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) 334 { 335 int res; 336 337 key->eth.vlan.tci = 0; 338 key->eth.vlan.tpid = 0; 339 key->eth.cvlan.tci = 0; 340 key->eth.cvlan.tpid = 0; 341 342 if (likely(skb_vlan_tag_present(skb))) { 343 key->eth.vlan.tci = htons(skb->vlan_tci); 344 key->eth.vlan.tpid = skb->vlan_proto; 345 } else { 346 /* Parse outer vlan tag in the non-accelerated case. */ 347 res = parse_vlan_tag(skb, &key->eth.vlan); 348 if (res <= 0) 349 return res; 350 } 351 352 /* Parse inner vlan tag. */ 353 res = parse_vlan_tag(skb, &key->eth.cvlan); 354 if (res <= 0) 355 return res; 356 357 return 0; 358 } 359 360 static __be16 parse_ethertype(struct sk_buff *skb) 361 { 362 struct llc_snap_hdr { 363 u8 dsap; /* Always 0xAA */ 364 u8 ssap; /* Always 0xAA */ 365 u8 ctrl; 366 u8 oui[3]; 367 __be16 ethertype; 368 }; 369 struct llc_snap_hdr *llc; 370 __be16 proto; 371 372 proto = *(__be16 *) skb->data; 373 __skb_pull(skb, sizeof(__be16)); 374 375 if (eth_proto_is_802_3(proto)) 376 return proto; 377 378 if (skb->len < sizeof(struct llc_snap_hdr)) 379 return htons(ETH_P_802_2); 380 381 if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr)))) 382 return htons(0); 383 384 llc = (struct llc_snap_hdr *) skb->data; 385 if (llc->dsap != LLC_SAP_SNAP || 386 llc->ssap != LLC_SAP_SNAP || 387 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) 388 return htons(ETH_P_802_2); 389 390 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 391 392 if (eth_proto_is_802_3(llc->ethertype)) 393 return llc->ethertype; 394 395 return htons(ETH_P_802_2); 396 } 397 398 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, 399 int nh_len) 400 { 401 struct icmp6hdr *icmp = icmp6_hdr(skb); 402 403 /* The ICMPv6 type and code fields use the 16-bit transport port 404 * fields, so we need to store them in 16-bit network byte order. 405 */ 406 key->tp.src = htons(icmp->icmp6_type); 407 key->tp.dst = htons(icmp->icmp6_code); 408 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd)); 409 410 if (icmp->icmp6_code == 0 && 411 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || 412 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { 413 int icmp_len = skb->len - skb_transport_offset(skb); 414 struct nd_msg *nd; 415 int offset; 416 417 /* In order to process neighbor discovery options, we need the 418 * entire packet. 419 */ 420 if (unlikely(icmp_len < sizeof(*nd))) 421 return 0; 422 423 if (unlikely(skb_linearize(skb))) 424 return -ENOMEM; 425 426 nd = (struct nd_msg *)skb_transport_header(skb); 427 key->ipv6.nd.target = nd->target; 428 429 icmp_len -= sizeof(*nd); 430 offset = 0; 431 while (icmp_len >= 8) { 432 struct nd_opt_hdr *nd_opt = 433 (struct nd_opt_hdr *)(nd->opt + offset); 434 int opt_len = nd_opt->nd_opt_len * 8; 435 436 if (unlikely(!opt_len || opt_len > icmp_len)) 437 return 0; 438 439 /* Store the link layer address if the appropriate 440 * option is provided. It is considered an error if 441 * the same link layer option is specified twice. 442 */ 443 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR 444 && opt_len == 8) { 445 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) 446 goto invalid; 447 ether_addr_copy(key->ipv6.nd.sll, 448 &nd->opt[offset+sizeof(*nd_opt)]); 449 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR 450 && opt_len == 8) { 451 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) 452 goto invalid; 453 ether_addr_copy(key->ipv6.nd.tll, 454 &nd->opt[offset+sizeof(*nd_opt)]); 455 } 456 457 icmp_len -= opt_len; 458 offset += opt_len; 459 } 460 } 461 462 return 0; 463 464 invalid: 465 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); 466 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); 467 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); 468 469 return 0; 470 } 471 472 /** 473 * key_extract - extracts a flow key from an Ethernet frame. 474 * @skb: sk_buff that contains the frame, with skb->data pointing to the 475 * Ethernet header 476 * @key: output flow key 477 * 478 * The caller must ensure that skb->len >= ETH_HLEN. 479 * 480 * Returns 0 if successful, otherwise a negative errno value. 481 * 482 * Initializes @skb header pointers as follows: 483 * 484 * - skb->mac_header: the Ethernet header. 485 * 486 * - skb->network_header: just past the Ethernet header, or just past the 487 * VLAN header, to the first byte of the Ethernet payload. 488 * 489 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 490 * on output, then just past the IP header, if one is present and 491 * of a correct length, otherwise the same as skb->network_header. 492 * For other key->eth.type values it is left untouched. 493 */ 494 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 495 { 496 int error; 497 struct ethhdr *eth; 498 499 /* Flags are always used as part of stats */ 500 key->tp.flags = 0; 501 502 skb_reset_mac_header(skb); 503 504 /* Link layer. We are guaranteed to have at least the 14 byte Ethernet 505 * header in the linear data area. 506 */ 507 eth = eth_hdr(skb); 508 ether_addr_copy(key->eth.src, eth->h_source); 509 ether_addr_copy(key->eth.dst, eth->h_dest); 510 511 __skb_pull(skb, 2 * ETH_ALEN); 512 /* We are going to push all headers that we pull, so no need to 513 * update skb->csum here. 514 */ 515 516 if (unlikely(parse_vlan(skb, key))) 517 return -ENOMEM; 518 519 key->eth.type = parse_ethertype(skb); 520 if (unlikely(key->eth.type == htons(0))) 521 return -ENOMEM; 522 523 skb_reset_network_header(skb); 524 skb_reset_mac_len(skb); 525 __skb_push(skb, skb->data - skb_mac_header(skb)); 526 527 /* Network layer. */ 528 if (key->eth.type == htons(ETH_P_IP)) { 529 struct iphdr *nh; 530 __be16 offset; 531 532 error = check_iphdr(skb); 533 if (unlikely(error)) { 534 memset(&key->ip, 0, sizeof(key->ip)); 535 memset(&key->ipv4, 0, sizeof(key->ipv4)); 536 if (error == -EINVAL) { 537 skb->transport_header = skb->network_header; 538 error = 0; 539 } 540 return error; 541 } 542 543 nh = ip_hdr(skb); 544 key->ipv4.addr.src = nh->saddr; 545 key->ipv4.addr.dst = nh->daddr; 546 547 key->ip.proto = nh->protocol; 548 key->ip.tos = nh->tos; 549 key->ip.ttl = nh->ttl; 550 551 offset = nh->frag_off & htons(IP_OFFSET); 552 if (offset) { 553 key->ip.frag = OVS_FRAG_TYPE_LATER; 554 return 0; 555 } 556 if (nh->frag_off & htons(IP_MF) || 557 skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 558 key->ip.frag = OVS_FRAG_TYPE_FIRST; 559 else 560 key->ip.frag = OVS_FRAG_TYPE_NONE; 561 562 /* Transport layer. */ 563 if (key->ip.proto == IPPROTO_TCP) { 564 if (tcphdr_ok(skb)) { 565 struct tcphdr *tcp = tcp_hdr(skb); 566 key->tp.src = tcp->source; 567 key->tp.dst = tcp->dest; 568 key->tp.flags = TCP_FLAGS_BE16(tcp); 569 } else { 570 memset(&key->tp, 0, sizeof(key->tp)); 571 } 572 573 } else if (key->ip.proto == IPPROTO_UDP) { 574 if (udphdr_ok(skb)) { 575 struct udphdr *udp = udp_hdr(skb); 576 key->tp.src = udp->source; 577 key->tp.dst = udp->dest; 578 } else { 579 memset(&key->tp, 0, sizeof(key->tp)); 580 } 581 } else if (key->ip.proto == IPPROTO_SCTP) { 582 if (sctphdr_ok(skb)) { 583 struct sctphdr *sctp = sctp_hdr(skb); 584 key->tp.src = sctp->source; 585 key->tp.dst = sctp->dest; 586 } else { 587 memset(&key->tp, 0, sizeof(key->tp)); 588 } 589 } else if (key->ip.proto == IPPROTO_ICMP) { 590 if (icmphdr_ok(skb)) { 591 struct icmphdr *icmp = icmp_hdr(skb); 592 /* The ICMP type and code fields use the 16-bit 593 * transport port fields, so we need to store 594 * them in 16-bit network byte order. */ 595 key->tp.src = htons(icmp->type); 596 key->tp.dst = htons(icmp->code); 597 } else { 598 memset(&key->tp, 0, sizeof(key->tp)); 599 } 600 } 601 602 } else if (key->eth.type == htons(ETH_P_ARP) || 603 key->eth.type == htons(ETH_P_RARP)) { 604 struct arp_eth_header *arp; 605 bool arp_available = arphdr_ok(skb); 606 607 arp = (struct arp_eth_header *)skb_network_header(skb); 608 609 if (arp_available && 610 arp->ar_hrd == htons(ARPHRD_ETHER) && 611 arp->ar_pro == htons(ETH_P_IP) && 612 arp->ar_hln == ETH_ALEN && 613 arp->ar_pln == 4) { 614 615 /* We only match on the lower 8 bits of the opcode. */ 616 if (ntohs(arp->ar_op) <= 0xff) 617 key->ip.proto = ntohs(arp->ar_op); 618 else 619 key->ip.proto = 0; 620 621 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); 622 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); 623 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); 624 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); 625 } else { 626 memset(&key->ip, 0, sizeof(key->ip)); 627 memset(&key->ipv4, 0, sizeof(key->ipv4)); 628 } 629 } else if (eth_p_mpls(key->eth.type)) { 630 size_t stack_len = MPLS_HLEN; 631 632 /* In the presence of an MPLS label stack the end of the L2 633 * header and the beginning of the L3 header differ. 634 * 635 * Advance network_header to the beginning of the L3 636 * header. mac_len corresponds to the end of the L2 header. 637 */ 638 while (1) { 639 __be32 lse; 640 641 error = check_header(skb, skb->mac_len + stack_len); 642 if (unlikely(error)) 643 return 0; 644 645 memcpy(&lse, skb_network_header(skb), MPLS_HLEN); 646 647 if (stack_len == MPLS_HLEN) 648 memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); 649 650 skb_set_network_header(skb, skb->mac_len + stack_len); 651 if (lse & htonl(MPLS_LS_S_MASK)) 652 break; 653 654 stack_len += MPLS_HLEN; 655 } 656 } else if (key->eth.type == htons(ETH_P_IPV6)) { 657 int nh_len; /* IPv6 Header + Extensions */ 658 659 nh_len = parse_ipv6hdr(skb, key); 660 if (unlikely(nh_len < 0)) { 661 switch (nh_len) { 662 case -EINVAL: 663 memset(&key->ip, 0, sizeof(key->ip)); 664 memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); 665 /* fall-through */ 666 case -EPROTO: 667 skb->transport_header = skb->network_header; 668 error = 0; 669 break; 670 default: 671 error = nh_len; 672 } 673 return error; 674 } 675 676 if (key->ip.frag == OVS_FRAG_TYPE_LATER) 677 return 0; 678 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 679 key->ip.frag = OVS_FRAG_TYPE_FIRST; 680 681 /* Transport layer. */ 682 if (key->ip.proto == NEXTHDR_TCP) { 683 if (tcphdr_ok(skb)) { 684 struct tcphdr *tcp = tcp_hdr(skb); 685 key->tp.src = tcp->source; 686 key->tp.dst = tcp->dest; 687 key->tp.flags = TCP_FLAGS_BE16(tcp); 688 } else { 689 memset(&key->tp, 0, sizeof(key->tp)); 690 } 691 } else if (key->ip.proto == NEXTHDR_UDP) { 692 if (udphdr_ok(skb)) { 693 struct udphdr *udp = udp_hdr(skb); 694 key->tp.src = udp->source; 695 key->tp.dst = udp->dest; 696 } else { 697 memset(&key->tp, 0, sizeof(key->tp)); 698 } 699 } else if (key->ip.proto == NEXTHDR_SCTP) { 700 if (sctphdr_ok(skb)) { 701 struct sctphdr *sctp = sctp_hdr(skb); 702 key->tp.src = sctp->source; 703 key->tp.dst = sctp->dest; 704 } else { 705 memset(&key->tp, 0, sizeof(key->tp)); 706 } 707 } else if (key->ip.proto == NEXTHDR_ICMP) { 708 if (icmp6hdr_ok(skb)) { 709 error = parse_icmpv6(skb, key, nh_len); 710 if (error) 711 return error; 712 } else { 713 memset(&key->tp, 0, sizeof(key->tp)); 714 } 715 } 716 } 717 return 0; 718 } 719 720 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) 721 { 722 return key_extract(skb, key); 723 } 724 725 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, 726 struct sk_buff *skb, struct sw_flow_key *key) 727 { 728 /* Extract metadata from packet. */ 729 if (tun_info) { 730 key->tun_proto = ip_tunnel_info_af(tun_info); 731 memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key)); 732 733 if (tun_info->options_len) { 734 BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) * 735 8)) - 1 736 > sizeof(key->tun_opts)); 737 738 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len), 739 tun_info); 740 key->tun_opts_len = tun_info->options_len; 741 } else { 742 key->tun_opts_len = 0; 743 } 744 } else { 745 key->tun_proto = 0; 746 key->tun_opts_len = 0; 747 memset(&key->tun_key, 0, sizeof(key->tun_key)); 748 } 749 750 key->phy.priority = skb->priority; 751 key->phy.in_port = OVS_CB(skb)->input_vport->port_no; 752 key->phy.skb_mark = skb->mark; 753 ovs_ct_fill_key(skb, key); 754 key->ovs_flow_hash = 0; 755 key->recirc_id = 0; 756 757 return key_extract(skb, key); 758 } 759 760 int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, 761 struct sk_buff *skb, 762 struct sw_flow_key *key, bool log) 763 { 764 int err; 765 766 memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE); 767 768 /* Extract metadata from netlink attributes. */ 769 err = ovs_nla_get_flow_metadata(net, attr, key, log); 770 if (err) 771 return err; 772 773 return key_extract(skb, key); 774 } 775