1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com> 3 */ 4 5 #include <net/flow.h> 6 #include <net/ip.h> 7 8 #include "ipvlan.h" 9 10 static u32 ipvlan_jhash_secret __read_mostly; 11 12 void ipvlan_init_secret(void) 13 { 14 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret)); 15 } 16 17 void ipvlan_count_rx(const struct ipvl_dev *ipvlan, 18 unsigned int len, bool success, bool mcast) 19 { 20 if (likely(success)) { 21 struct ipvl_pcpu_stats *pcptr; 22 23 pcptr = this_cpu_ptr(ipvlan->pcpu_stats); 24 u64_stats_update_begin(&pcptr->syncp); 25 u64_stats_inc(&pcptr->rx_pkts); 26 u64_stats_add(&pcptr->rx_bytes, len); 27 if (mcast) 28 u64_stats_inc(&pcptr->rx_mcast); 29 u64_stats_update_end(&pcptr->syncp); 30 } else { 31 this_cpu_inc(ipvlan->pcpu_stats->rx_errs); 32 } 33 } 34 EXPORT_SYMBOL_GPL(ipvlan_count_rx); 35 36 #if IS_ENABLED(CONFIG_IPV6) 37 static u8 ipvlan_get_v6_hash(const void *iaddr) 38 { 39 const struct in6_addr *ip6_addr = iaddr; 40 41 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) & 42 IPVLAN_HASH_MASK; 43 } 44 #else 45 static u8 ipvlan_get_v6_hash(const void *iaddr) 46 { 47 return 0; 48 } 49 #endif 50 51 static u8 ipvlan_get_v4_hash(const void *iaddr) 52 { 53 const struct in_addr *ip4_addr = iaddr; 54 55 return jhash_1word((__force u32)ip4_addr->s_addr, ipvlan_jhash_secret) & 56 IPVLAN_HASH_MASK; 57 } 58 59 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr) 60 { 61 if (!is_v6 && addr->atype == IPVL_IPV4) { 62 struct in_addr *i4addr = (struct in_addr *)iaddr; 63 64 return addr->ip4addr.s_addr == i4addr->s_addr; 65 #if IS_ENABLED(CONFIG_IPV6) 66 } else if (is_v6 && addr->atype == IPVL_IPV6) { 67 struct in6_addr *i6addr = (struct in6_addr *)iaddr; 68 69 return ipv6_addr_equal(&addr->ip6addr, i6addr); 70 #endif 71 } 72 73 return false; 74 } 75 76 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 77 const void *iaddr, bool is_v6) 78 { 79 struct ipvl_addr *addr; 80 u8 hash; 81 82 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) : 83 ipvlan_get_v4_hash(iaddr); 84 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) 85 if (addr_equal(is_v6, addr, iaddr)) 86 return addr; 87 return NULL; 88 } 89 90 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) 91 { 92 struct ipvl_port *port = ipvlan->port; 93 u8 hash; 94 95 hash = (addr->atype == IPVL_IPV6) ? 96 ipvlan_get_v6_hash(&addr->ip6addr) : 97 ipvlan_get_v4_hash(&addr->ip4addr); 98 if (hlist_unhashed(&addr->hlnode)) 99 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 100 } 101 102 void ipvlan_ht_addr_del(struct ipvl_addr *addr) 103 { 104 hlist_del_init_rcu(&addr->hlnode); 105 } 106 107 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 108 const void *iaddr, bool is_v6) 109 { 110 struct ipvl_addr *addr; 111 112 assert_spin_locked(&ipvlan->port->addrs_lock); 113 114 list_for_each_entry(addr, &ipvlan->addrs, anode) { 115 if (addr_equal(is_v6, addr, iaddr)) 116 return addr; 117 } 118 return NULL; 119 } 120 121 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) 122 { 123 struct ipvl_dev *ipvlan; 124 bool ret = false; 125 126 rcu_read_lock(); 127 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { 128 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) { 129 ret = true; 130 break; 131 } 132 } 133 rcu_read_unlock(); 134 return ret; 135 } 136 137 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) 138 { 139 void *lyr3h = NULL; 140 141 switch (skb->protocol) { 142 case htons(ETH_P_ARP): { 143 struct arphdr *arph; 144 145 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev)))) 146 return NULL; 147 148 arph = arp_hdr(skb); 149 *type = IPVL_ARP; 150 lyr3h = arph; 151 break; 152 } 153 case htons(ETH_P_IP): { 154 u32 pktlen; 155 struct iphdr *ip4h; 156 157 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h)))) 158 return NULL; 159 160 ip4h = ip_hdr(skb); 161 pktlen = skb_ip_totlen(skb); 162 if (ip4h->ihl < 5 || ip4h->version != 4) 163 return NULL; 164 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4)) 165 return NULL; 166 167 *type = IPVL_IPV4; 168 lyr3h = ip4h; 169 break; 170 } 171 #if IS_ENABLED(CONFIG_IPV6) 172 case htons(ETH_P_IPV6): { 173 struct ipv6hdr *ip6h; 174 175 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h)))) 176 return NULL; 177 178 ip6h = ipv6_hdr(skb); 179 if (ip6h->version != 6) 180 return NULL; 181 182 *type = IPVL_IPV6; 183 lyr3h = ip6h; 184 /* Only Neighbour Solicitation pkts need different treatment */ 185 if (ipv6_addr_any(&ip6h->saddr) && 186 ip6h->nexthdr == NEXTHDR_ICMP) { 187 struct icmp6hdr *icmph; 188 189 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)))) 190 return NULL; 191 192 ip6h = ipv6_hdr(skb); 193 icmph = (struct icmp6hdr *)(ip6h + 1); 194 195 if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { 196 /* Need to access the ipv6 address in body */ 197 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph) 198 + sizeof(struct in6_addr)))) 199 return NULL; 200 201 ip6h = ipv6_hdr(skb); 202 icmph = (struct icmp6hdr *)(ip6h + 1); 203 } 204 205 *type = IPVL_ICMPV6; 206 lyr3h = icmph; 207 } 208 break; 209 } 210 #endif 211 default: 212 return NULL; 213 } 214 215 return lyr3h; 216 } 217 218 unsigned int ipvlan_mac_hash(const unsigned char *addr) 219 { 220 u32 hash = jhash_1word(get_unaligned((u32 *)(addr + 2)), 221 ipvlan_jhash_secret); 222 223 return hash & IPVLAN_MAC_FILTER_MASK; 224 } 225 226 void ipvlan_process_multicast(struct work_struct *work) 227 { 228 struct ipvl_port *port = container_of(work, struct ipvl_port, wq); 229 struct ethhdr *ethh; 230 struct ipvl_dev *ipvlan; 231 struct sk_buff *skb, *nskb; 232 struct sk_buff_head list; 233 unsigned int len; 234 unsigned int mac_hash; 235 int ret; 236 u8 pkt_type; 237 bool tx_pkt; 238 239 __skb_queue_head_init(&list); 240 241 spin_lock_bh(&port->backlog.lock); 242 skb_queue_splice_tail_init(&port->backlog, &list); 243 spin_unlock_bh(&port->backlog.lock); 244 245 while ((skb = __skb_dequeue(&list)) != NULL) { 246 struct net_device *dev = skb->dev; 247 bool consumed = false; 248 249 ethh = eth_hdr(skb); 250 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt; 251 mac_hash = ipvlan_mac_hash(ethh->h_dest); 252 253 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) 254 pkt_type = PACKET_BROADCAST; 255 else 256 pkt_type = PACKET_MULTICAST; 257 258 rcu_read_lock(); 259 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { 260 if (tx_pkt && (ipvlan->dev == skb->dev)) 261 continue; 262 if (!test_bit(mac_hash, ipvlan->mac_filters)) 263 continue; 264 if (!(ipvlan->dev->flags & IFF_UP)) 265 continue; 266 ret = NET_RX_DROP; 267 len = skb->len + ETH_HLEN; 268 nskb = skb_clone(skb, GFP_ATOMIC); 269 local_bh_disable(); 270 if (nskb) { 271 consumed = true; 272 nskb->pkt_type = pkt_type; 273 nskb->dev = ipvlan->dev; 274 if (tx_pkt) 275 ret = dev_forward_skb(ipvlan->dev, nskb); 276 else 277 ret = netif_rx(nskb); 278 } 279 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); 280 local_bh_enable(); 281 } 282 rcu_read_unlock(); 283 284 if (tx_pkt) { 285 /* If the packet originated here, send it out. */ 286 skb->dev = port->dev; 287 skb->pkt_type = pkt_type; 288 dev_queue_xmit(skb); 289 } else { 290 if (consumed) 291 consume_skb(skb); 292 else 293 kfree_skb(skb); 294 } 295 dev_put(dev); 296 cond_resched(); 297 } 298 } 299 300 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev) 301 { 302 bool xnet = true; 303 304 if (dev) 305 xnet = !net_eq(dev_net(skb->dev), dev_net(dev)); 306 307 skb_scrub_packet(skb, xnet); 308 if (dev) 309 skb->dev = dev; 310 } 311 312 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, 313 bool local) 314 { 315 struct ipvl_dev *ipvlan = addr->master; 316 struct net_device *dev = ipvlan->dev; 317 unsigned int len; 318 rx_handler_result_t ret = RX_HANDLER_CONSUMED; 319 bool success = false; 320 struct sk_buff *skb = *pskb; 321 322 len = skb->len + ETH_HLEN; 323 /* Only packets exchanged between two local slaves need to have 324 * device-up check as well as skb-share check. 325 */ 326 if (local) { 327 if (unlikely(!(dev->flags & IFF_UP))) { 328 kfree_skb(skb); 329 goto out; 330 } 331 332 skb = skb_share_check(skb, GFP_ATOMIC); 333 if (!skb) 334 goto out; 335 336 *pskb = skb; 337 } 338 339 if (local) { 340 skb->pkt_type = PACKET_HOST; 341 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) 342 success = true; 343 } else { 344 skb->dev = dev; 345 ret = RX_HANDLER_ANOTHER; 346 success = true; 347 } 348 349 out: 350 ipvlan_count_rx(ipvlan, len, success, false); 351 return ret; 352 } 353 354 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, 355 int addr_type, bool use_dest) 356 { 357 struct ipvl_addr *addr = NULL; 358 359 switch (addr_type) { 360 #if IS_ENABLED(CONFIG_IPV6) 361 case IPVL_IPV6: { 362 struct ipv6hdr *ip6h; 363 struct in6_addr *i6addr; 364 365 ip6h = (struct ipv6hdr *)lyr3h; 366 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr; 367 addr = ipvlan_ht_addr_lookup(port, i6addr, true); 368 break; 369 } 370 case IPVL_ICMPV6: { 371 struct nd_msg *ndmh; 372 struct in6_addr *i6addr; 373 374 /* Make sure that the NeighborSolicitation ICMPv6 packets 375 * are handled to avoid DAD issue. 376 */ 377 ndmh = (struct nd_msg *)lyr3h; 378 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { 379 i6addr = &ndmh->target; 380 addr = ipvlan_ht_addr_lookup(port, i6addr, true); 381 } 382 break; 383 } 384 #endif 385 case IPVL_IPV4: { 386 struct iphdr *ip4h; 387 __be32 *i4addr; 388 389 ip4h = (struct iphdr *)lyr3h; 390 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr; 391 addr = ipvlan_ht_addr_lookup(port, i4addr, false); 392 break; 393 } 394 case IPVL_ARP: { 395 struct arphdr *arph; 396 unsigned char *arp_ptr; 397 __be32 dip; 398 399 arph = (struct arphdr *)lyr3h; 400 arp_ptr = (unsigned char *)(arph + 1); 401 if (use_dest) 402 arp_ptr += (2 * port->dev->addr_len) + 4; 403 else 404 arp_ptr += port->dev->addr_len; 405 406 memcpy(&dip, arp_ptr, 4); 407 addr = ipvlan_ht_addr_lookup(port, &dip, false); 408 break; 409 } 410 } 411 412 return addr; 413 } 414 415 static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) 416 { 417 struct net_device *dev = skb->dev; 418 struct net *net = dev_net(dev); 419 int err, ret = NET_XMIT_DROP; 420 const struct iphdr *ip4h; 421 struct rtable *rt; 422 struct flowi4 fl4 = { 423 .flowi4_oif = dev->ifindex, 424 .flowi4_flags = FLOWI_FLAG_ANYSRC, 425 .flowi4_mark = skb->mark, 426 }; 427 428 if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) 429 goto err; 430 431 ip4h = ip_hdr(skb); 432 fl4.daddr = ip4h->daddr; 433 fl4.saddr = ip4h->saddr; 434 fl4.flowi4_dscp = ip4h_dscp(ip4h); 435 436 rt = ip_route_output_flow(net, &fl4, NULL); 437 if (IS_ERR(rt)) 438 goto err; 439 440 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) { 441 ip_rt_put(rt); 442 goto err; 443 } 444 skb_dst_set(skb, &rt->dst); 445 446 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); 447 448 err = ip_local_out(net, NULL, skb); 449 if (unlikely(net_xmit_eval(err))) 450 DEV_STATS_INC(dev, tx_errors); 451 else 452 ret = NET_XMIT_SUCCESS; 453 goto out; 454 err: 455 DEV_STATS_INC(dev, tx_errors); 456 kfree_skb(skb); 457 out: 458 return ret; 459 } 460 461 #if IS_ENABLED(CONFIG_IPV6) 462 463 static noinline_for_stack int 464 ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb) 465 { 466 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 467 struct flowi6 fl6 = { 468 .flowi6_oif = dev->ifindex, 469 .daddr = ip6h->daddr, 470 .saddr = ip6h->saddr, 471 .flowi6_flags = FLOWI_FLAG_ANYSRC, 472 .flowlabel = ip6_flowinfo(ip6h), 473 .flowi6_mark = skb->mark, 474 .flowi6_proto = ip6h->nexthdr, 475 }; 476 struct dst_entry *dst; 477 int err; 478 479 dst = ip6_route_output(dev_net(dev), NULL, &fl6); 480 err = dst->error; 481 if (err) { 482 dst_release(dst); 483 return err; 484 } 485 skb_dst_set(skb, dst); 486 return 0; 487 } 488 489 static int ipvlan_process_v6_outbound(struct sk_buff *skb) 490 { 491 struct net_device *dev = skb->dev; 492 int err, ret = NET_XMIT_DROP; 493 494 if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) { 495 DEV_STATS_INC(dev, tx_errors); 496 kfree_skb(skb); 497 return ret; 498 } 499 500 err = ipvlan_route_v6_outbound(dev, skb); 501 if (unlikely(err)) { 502 DEV_STATS_INC(dev, tx_errors); 503 kfree_skb(skb); 504 return err; 505 } 506 507 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); 508 509 err = ip6_local_out(dev_net(dev), NULL, skb); 510 if (unlikely(net_xmit_eval(err))) 511 DEV_STATS_INC(dev, tx_errors); 512 else 513 ret = NET_XMIT_SUCCESS; 514 return ret; 515 } 516 #else 517 static int ipvlan_process_v6_outbound(struct sk_buff *skb) 518 { 519 return NET_XMIT_DROP; 520 } 521 #endif 522 523 static int ipvlan_process_outbound(struct sk_buff *skb) 524 { 525 int ret = NET_XMIT_DROP; 526 527 /* The ipvlan is a pseudo-L2 device, so the packets that we receive 528 * will have L2; which need to discarded and processed further 529 * in the net-ns of the main-device. 530 */ 531 if (skb_mac_header_was_set(skb)) { 532 /* In this mode we dont care about 533 * multicast and broadcast traffic */ 534 struct ethhdr *ethh = eth_hdr(skb); 535 536 if (is_multicast_ether_addr(ethh->h_dest)) { 537 pr_debug_ratelimited( 538 "Dropped {multi|broad}cast of type=[%x]\n", 539 ntohs(skb->protocol)); 540 kfree_skb(skb); 541 goto out; 542 } 543 544 skb_pull(skb, sizeof(*ethh)); 545 skb->mac_header = (typeof(skb->mac_header))~0U; 546 skb_reset_network_header(skb); 547 } 548 549 if (skb->protocol == htons(ETH_P_IPV6)) 550 ret = ipvlan_process_v6_outbound(skb); 551 else if (skb->protocol == htons(ETH_P_IP)) 552 ret = ipvlan_process_v4_outbound(skb); 553 else { 554 pr_warn_ratelimited("Dropped outbound packet type=%x\n", 555 ntohs(skb->protocol)); 556 kfree_skb(skb); 557 } 558 out: 559 return ret; 560 } 561 562 static void ipvlan_multicast_enqueue(struct ipvl_port *port, 563 struct sk_buff *skb, bool tx_pkt) 564 { 565 if (skb->protocol == htons(ETH_P_PAUSE)) { 566 kfree_skb(skb); 567 return; 568 } 569 570 /* Record that the deferred packet is from TX or RX path. By 571 * looking at mac-addresses on packet will lead to erronus decisions. 572 * (This would be true for a loopback-mode on master device or a 573 * hair-pin mode of the switch.) 574 */ 575 IPVL_SKB_CB(skb)->tx_pkt = tx_pkt; 576 577 spin_lock(&port->backlog.lock); 578 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { 579 dev_hold(skb->dev); 580 __skb_queue_tail(&port->backlog, skb); 581 spin_unlock(&port->backlog.lock); 582 schedule_work(&port->wq); 583 } else { 584 spin_unlock(&port->backlog.lock); 585 dev_core_stats_rx_dropped_inc(skb->dev); 586 kfree_skb(skb); 587 } 588 } 589 590 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) 591 { 592 const struct ipvl_dev *ipvlan = netdev_priv(dev); 593 void *lyr3h; 594 struct ipvl_addr *addr; 595 int addr_type; 596 597 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); 598 if (!lyr3h) 599 goto out; 600 601 if (!ipvlan_is_vepa(ipvlan->port)) { 602 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 603 if (addr) { 604 if (ipvlan_is_private(ipvlan->port)) { 605 consume_skb(skb); 606 return NET_XMIT_DROP; 607 } 608 ipvlan_rcv_frame(addr, &skb, true); 609 return NET_XMIT_SUCCESS; 610 } 611 } 612 out: 613 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev); 614 return ipvlan_process_outbound(skb); 615 } 616 617 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) 618 { 619 const struct ipvl_dev *ipvlan = netdev_priv(dev); 620 struct ethhdr *eth = skb_eth_hdr(skb); 621 struct ipvl_addr *addr; 622 void *lyr3h; 623 int addr_type; 624 625 if (!ipvlan_is_vepa(ipvlan->port) && 626 ether_addr_equal(eth->h_dest, eth->h_source)) { 627 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); 628 if (lyr3h) { 629 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); 630 if (addr) { 631 if (ipvlan_is_private(ipvlan->port)) { 632 consume_skb(skb); 633 return NET_XMIT_DROP; 634 } 635 ipvlan_rcv_frame(addr, &skb, true); 636 return NET_XMIT_SUCCESS; 637 } 638 } 639 skb = skb_share_check(skb, GFP_ATOMIC); 640 if (!skb) 641 return NET_XMIT_DROP; 642 643 /* Packet definitely does not belong to any of the 644 * virtual devices, but the dest is local. So forward 645 * the skb for the main-dev. At the RX side we just return 646 * RX_PASS for it to be processed further on the stack. 647 */ 648 dev_forward_skb(ipvlan->phy_dev, skb); 649 return NET_XMIT_SUCCESS; 650 651 } else if (is_multicast_ether_addr(eth->h_dest)) { 652 skb_reset_mac_header(skb); 653 ipvlan_skb_crossing_ns(skb, NULL); 654 ipvlan_multicast_enqueue(ipvlan->port, skb, true); 655 return NET_XMIT_SUCCESS; 656 } 657 658 skb->dev = ipvlan->phy_dev; 659 return dev_queue_xmit(skb); 660 } 661 662 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 663 { 664 struct ipvl_dev *ipvlan = netdev_priv(dev); 665 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); 666 667 if (!port) 668 goto out; 669 670 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) 671 goto out; 672 673 switch(port->mode) { 674 case IPVLAN_MODE_L2: 675 return ipvlan_xmit_mode_l2(skb, dev); 676 case IPVLAN_MODE_L3: 677 #ifdef CONFIG_IPVLAN_L3S 678 case IPVLAN_MODE_L3S: 679 #endif 680 return ipvlan_xmit_mode_l3(skb, dev); 681 } 682 683 /* Should not reach here */ 684 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode); 685 out: 686 kfree_skb(skb); 687 return NET_XMIT_DROP; 688 } 689 690 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) 691 { 692 struct ethhdr *eth = eth_hdr(skb); 693 struct ipvl_addr *addr; 694 void *lyr3h; 695 int addr_type; 696 697 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { 698 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); 699 if (!lyr3h) 700 return true; 701 702 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false); 703 if (addr) 704 return false; 705 } 706 707 return true; 708 } 709 710 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, 711 struct ipvl_port *port) 712 { 713 void *lyr3h; 714 int addr_type; 715 struct ipvl_addr *addr; 716 struct sk_buff *skb = *pskb; 717 rx_handler_result_t ret = RX_HANDLER_PASS; 718 719 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); 720 if (!lyr3h) 721 goto out; 722 723 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true); 724 if (addr) 725 ret = ipvlan_rcv_frame(addr, pskb, false); 726 727 out: 728 return ret; 729 } 730 731 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, 732 struct ipvl_port *port) 733 { 734 struct sk_buff *skb = *pskb; 735 struct ethhdr *eth = eth_hdr(skb); 736 rx_handler_result_t ret = RX_HANDLER_PASS; 737 738 if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) 739 return RX_HANDLER_PASS; 740 741 if (is_multicast_ether_addr(eth->h_dest)) { 742 if (ipvlan_external_frame(skb, port)) { 743 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); 744 745 /* External frames are queued for device local 746 * distribution, but a copy is given to master 747 * straight away to avoid sending duplicates later 748 * when work-queue processes this frame. This is 749 * achieved by returning RX_HANDLER_PASS. 750 */ 751 if (nskb) { 752 ipvlan_skb_crossing_ns(nskb, NULL); 753 ipvlan_multicast_enqueue(port, nskb, false); 754 } 755 } 756 } else { 757 /* Perform like l3 mode for non-multicast packet */ 758 ret = ipvlan_handle_mode_l3(pskb, port); 759 } 760 761 return ret; 762 } 763 764 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb) 765 { 766 struct sk_buff *skb = *pskb; 767 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev); 768 769 if (!port) 770 return RX_HANDLER_PASS; 771 772 switch (port->mode) { 773 case IPVLAN_MODE_L2: 774 return ipvlan_handle_mode_l2(pskb, port); 775 case IPVLAN_MODE_L3: 776 return ipvlan_handle_mode_l3(pskb, port); 777 #ifdef CONFIG_IPVLAN_L3S 778 case IPVLAN_MODE_L3S: 779 return RX_HANDLER_PASS; 780 #endif 781 } 782 783 /* Should not reach here */ 784 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode); 785 kfree_skb(skb); 786 return RX_HANDLER_CONSUMED; 787 } 788