xref: /linux/drivers/net/ipvlan/ipvlan_core.c (revision 1f2367a39f17bd553a75e179a747f9b257bc9478)
1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of
6  * the License, or (at your option) any later version.
7  *
8  */
9 
10 #include "ipvlan.h"
11 
12 static u32 ipvlan_jhash_secret __read_mostly;
13 
14 void ipvlan_init_secret(void)
15 {
16 	net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
17 }
18 
19 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
20 			    unsigned int len, bool success, bool mcast)
21 {
22 	if (likely(success)) {
23 		struct ipvl_pcpu_stats *pcptr;
24 
25 		pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
26 		u64_stats_update_begin(&pcptr->syncp);
27 		pcptr->rx_pkts++;
28 		pcptr->rx_bytes += len;
29 		if (mcast)
30 			pcptr->rx_mcast++;
31 		u64_stats_update_end(&pcptr->syncp);
32 	} else {
33 		this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
34 	}
35 }
36 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
37 
38 #if IS_ENABLED(CONFIG_IPV6)
39 static u8 ipvlan_get_v6_hash(const void *iaddr)
40 {
41 	const struct in6_addr *ip6_addr = iaddr;
42 
43 	return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
44 	       IPVLAN_HASH_MASK;
45 }
46 #else
47 static u8 ipvlan_get_v6_hash(const void *iaddr)
48 {
49 	return 0;
50 }
51 #endif
52 
53 static u8 ipvlan_get_v4_hash(const void *iaddr)
54 {
55 	const struct in_addr *ip4_addr = iaddr;
56 
57 	return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
58 	       IPVLAN_HASH_MASK;
59 }
60 
61 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
62 {
63 	if (!is_v6 && addr->atype == IPVL_IPV4) {
64 		struct in_addr *i4addr = (struct in_addr *)iaddr;
65 
66 		return addr->ip4addr.s_addr == i4addr->s_addr;
67 #if IS_ENABLED(CONFIG_IPV6)
68 	} else if (is_v6 && addr->atype == IPVL_IPV6) {
69 		struct in6_addr *i6addr = (struct in6_addr *)iaddr;
70 
71 		return ipv6_addr_equal(&addr->ip6addr, i6addr);
72 #endif
73 	}
74 
75 	return false;
76 }
77 
78 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
79 					       const void *iaddr, bool is_v6)
80 {
81 	struct ipvl_addr *addr;
82 	u8 hash;
83 
84 	hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
85 	       ipvlan_get_v4_hash(iaddr);
86 	hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
87 		if (addr_equal(is_v6, addr, iaddr))
88 			return addr;
89 	return NULL;
90 }
91 
92 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
93 {
94 	struct ipvl_port *port = ipvlan->port;
95 	u8 hash;
96 
97 	hash = (addr->atype == IPVL_IPV6) ?
98 	       ipvlan_get_v6_hash(&addr->ip6addr) :
99 	       ipvlan_get_v4_hash(&addr->ip4addr);
100 	if (hlist_unhashed(&addr->hlnode))
101 		hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
102 }
103 
104 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
105 {
106 	hlist_del_init_rcu(&addr->hlnode);
107 }
108 
109 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
110 				   const void *iaddr, bool is_v6)
111 {
112 	struct ipvl_addr *addr, *ret = NULL;
113 
114 	rcu_read_lock();
115 	list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
116 		if (addr_equal(is_v6, addr, iaddr)) {
117 			ret = addr;
118 			break;
119 		}
120 	}
121 	rcu_read_unlock();
122 	return ret;
123 }
124 
125 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
126 {
127 	struct ipvl_dev *ipvlan;
128 	bool ret = false;
129 
130 	rcu_read_lock();
131 	list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
132 		if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
133 			ret = true;
134 			break;
135 		}
136 	}
137 	rcu_read_unlock();
138 	return ret;
139 }
140 
141 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
142 {
143 	void *lyr3h = NULL;
144 
145 	switch (skb->protocol) {
146 	case htons(ETH_P_ARP): {
147 		struct arphdr *arph;
148 
149 		if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
150 			return NULL;
151 
152 		arph = arp_hdr(skb);
153 		*type = IPVL_ARP;
154 		lyr3h = arph;
155 		break;
156 	}
157 	case htons(ETH_P_IP): {
158 		u32 pktlen;
159 		struct iphdr *ip4h;
160 
161 		if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
162 			return NULL;
163 
164 		ip4h = ip_hdr(skb);
165 		pktlen = ntohs(ip4h->tot_len);
166 		if (ip4h->ihl < 5 || ip4h->version != 4)
167 			return NULL;
168 		if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
169 			return NULL;
170 
171 		*type = IPVL_IPV4;
172 		lyr3h = ip4h;
173 		break;
174 	}
175 #if IS_ENABLED(CONFIG_IPV6)
176 	case htons(ETH_P_IPV6): {
177 		struct ipv6hdr *ip6h;
178 
179 		if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
180 			return NULL;
181 
182 		ip6h = ipv6_hdr(skb);
183 		if (ip6h->version != 6)
184 			return NULL;
185 
186 		*type = IPVL_IPV6;
187 		lyr3h = ip6h;
188 		/* Only Neighbour Solicitation pkts need different treatment */
189 		if (ipv6_addr_any(&ip6h->saddr) &&
190 		    ip6h->nexthdr == NEXTHDR_ICMP) {
191 			struct icmp6hdr	*icmph;
192 
193 			if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
194 				return NULL;
195 
196 			ip6h = ipv6_hdr(skb);
197 			icmph = (struct icmp6hdr *)(ip6h + 1);
198 
199 			if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
200 				/* Need to access the ipv6 address in body */
201 				if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
202 						+ sizeof(struct in6_addr))))
203 					return NULL;
204 
205 				ip6h = ipv6_hdr(skb);
206 				icmph = (struct icmp6hdr *)(ip6h + 1);
207 			}
208 
209 			*type = IPVL_ICMPV6;
210 			lyr3h = icmph;
211 		}
212 		break;
213 	}
214 #endif
215 	default:
216 		return NULL;
217 	}
218 
219 	return lyr3h;
220 }
221 
222 unsigned int ipvlan_mac_hash(const unsigned char *addr)
223 {
224 	u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
225 			       ipvlan_jhash_secret);
226 
227 	return hash & IPVLAN_MAC_FILTER_MASK;
228 }
229 
230 void ipvlan_process_multicast(struct work_struct *work)
231 {
232 	struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
233 	struct ethhdr *ethh;
234 	struct ipvl_dev *ipvlan;
235 	struct sk_buff *skb, *nskb;
236 	struct sk_buff_head list;
237 	unsigned int len;
238 	unsigned int mac_hash;
239 	int ret;
240 	u8 pkt_type;
241 	bool tx_pkt;
242 
243 	__skb_queue_head_init(&list);
244 
245 	spin_lock_bh(&port->backlog.lock);
246 	skb_queue_splice_tail_init(&port->backlog, &list);
247 	spin_unlock_bh(&port->backlog.lock);
248 
249 	while ((skb = __skb_dequeue(&list)) != NULL) {
250 		struct net_device *dev = skb->dev;
251 		bool consumed = false;
252 
253 		ethh = eth_hdr(skb);
254 		tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
255 		mac_hash = ipvlan_mac_hash(ethh->h_dest);
256 
257 		if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
258 			pkt_type = PACKET_BROADCAST;
259 		else
260 			pkt_type = PACKET_MULTICAST;
261 
262 		rcu_read_lock();
263 		list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
264 			if (tx_pkt && (ipvlan->dev == skb->dev))
265 				continue;
266 			if (!test_bit(mac_hash, ipvlan->mac_filters))
267 				continue;
268 			if (!(ipvlan->dev->flags & IFF_UP))
269 				continue;
270 			ret = NET_RX_DROP;
271 			len = skb->len + ETH_HLEN;
272 			nskb = skb_clone(skb, GFP_ATOMIC);
273 			local_bh_disable();
274 			if (nskb) {
275 				consumed = true;
276 				nskb->pkt_type = pkt_type;
277 				nskb->dev = ipvlan->dev;
278 				if (tx_pkt)
279 					ret = dev_forward_skb(ipvlan->dev, nskb);
280 				else
281 					ret = netif_rx(nskb);
282 			}
283 			ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
284 			local_bh_enable();
285 		}
286 		rcu_read_unlock();
287 
288 		if (tx_pkt) {
289 			/* If the packet originated here, send it out. */
290 			skb->dev = port->dev;
291 			skb->pkt_type = pkt_type;
292 			dev_queue_xmit(skb);
293 		} else {
294 			if (consumed)
295 				consume_skb(skb);
296 			else
297 				kfree_skb(skb);
298 		}
299 		if (dev)
300 			dev_put(dev);
301 	}
302 }
303 
304 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
305 {
306 	bool xnet = true;
307 
308 	if (dev)
309 		xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
310 
311 	skb_scrub_packet(skb, xnet);
312 	if (dev)
313 		skb->dev = dev;
314 }
315 
316 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
317 			    bool local)
318 {
319 	struct ipvl_dev *ipvlan = addr->master;
320 	struct net_device *dev = ipvlan->dev;
321 	unsigned int len;
322 	rx_handler_result_t ret = RX_HANDLER_CONSUMED;
323 	bool success = false;
324 	struct sk_buff *skb = *pskb;
325 
326 	len = skb->len + ETH_HLEN;
327 	/* Only packets exchanged between two local slaves need to have
328 	 * device-up check as well as skb-share check.
329 	 */
330 	if (local) {
331 		if (unlikely(!(dev->flags & IFF_UP))) {
332 			kfree_skb(skb);
333 			goto out;
334 		}
335 
336 		skb = skb_share_check(skb, GFP_ATOMIC);
337 		if (!skb)
338 			goto out;
339 
340 		*pskb = skb;
341 	}
342 
343 	if (local) {
344 		skb->pkt_type = PACKET_HOST;
345 		if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
346 			success = true;
347 	} else {
348 		skb->dev = dev;
349 		ret = RX_HANDLER_ANOTHER;
350 		success = true;
351 	}
352 
353 out:
354 	ipvlan_count_rx(ipvlan, len, success, false);
355 	return ret;
356 }
357 
358 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
359 				     int addr_type, bool use_dest)
360 {
361 	struct ipvl_addr *addr = NULL;
362 
363 	switch (addr_type) {
364 #if IS_ENABLED(CONFIG_IPV6)
365 	case IPVL_IPV6: {
366 		struct ipv6hdr *ip6h;
367 		struct in6_addr *i6addr;
368 
369 		ip6h = (struct ipv6hdr *)lyr3h;
370 		i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
371 		addr = ipvlan_ht_addr_lookup(port, i6addr, true);
372 		break;
373 	}
374 	case IPVL_ICMPV6: {
375 		struct nd_msg *ndmh;
376 		struct in6_addr *i6addr;
377 
378 		/* Make sure that the NeighborSolicitation ICMPv6 packets
379 		 * are handled to avoid DAD issue.
380 		 */
381 		ndmh = (struct nd_msg *)lyr3h;
382 		if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
383 			i6addr = &ndmh->target;
384 			addr = ipvlan_ht_addr_lookup(port, i6addr, true);
385 		}
386 		break;
387 	}
388 #endif
389 	case IPVL_IPV4: {
390 		struct iphdr *ip4h;
391 		__be32 *i4addr;
392 
393 		ip4h = (struct iphdr *)lyr3h;
394 		i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
395 		addr = ipvlan_ht_addr_lookup(port, i4addr, false);
396 		break;
397 	}
398 	case IPVL_ARP: {
399 		struct arphdr *arph;
400 		unsigned char *arp_ptr;
401 		__be32 dip;
402 
403 		arph = (struct arphdr *)lyr3h;
404 		arp_ptr = (unsigned char *)(arph + 1);
405 		if (use_dest)
406 			arp_ptr += (2 * port->dev->addr_len) + 4;
407 		else
408 			arp_ptr += port->dev->addr_len;
409 
410 		memcpy(&dip, arp_ptr, 4);
411 		addr = ipvlan_ht_addr_lookup(port, &dip, false);
412 		break;
413 	}
414 	}
415 
416 	return addr;
417 }
418 
419 static int ipvlan_process_v4_outbound(struct sk_buff *skb)
420 {
421 	const struct iphdr *ip4h = ip_hdr(skb);
422 	struct net_device *dev = skb->dev;
423 	struct net *net = dev_net(dev);
424 	struct rtable *rt;
425 	int err, ret = NET_XMIT_DROP;
426 	struct flowi4 fl4 = {
427 		.flowi4_oif = dev->ifindex,
428 		.flowi4_tos = RT_TOS(ip4h->tos),
429 		.flowi4_flags = FLOWI_FLAG_ANYSRC,
430 		.flowi4_mark = skb->mark,
431 		.daddr = ip4h->daddr,
432 		.saddr = ip4h->saddr,
433 	};
434 
435 	rt = ip_route_output_flow(net, &fl4, NULL);
436 	if (IS_ERR(rt))
437 		goto err;
438 
439 	if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
440 		ip_rt_put(rt);
441 		goto err;
442 	}
443 	skb_dst_set(skb, &rt->dst);
444 	err = ip_local_out(net, skb->sk, skb);
445 	if (unlikely(net_xmit_eval(err)))
446 		dev->stats.tx_errors++;
447 	else
448 		ret = NET_XMIT_SUCCESS;
449 	goto out;
450 err:
451 	dev->stats.tx_errors++;
452 	kfree_skb(skb);
453 out:
454 	return ret;
455 }
456 
457 #if IS_ENABLED(CONFIG_IPV6)
458 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
459 {
460 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
461 	struct net_device *dev = skb->dev;
462 	struct net *net = dev_net(dev);
463 	struct dst_entry *dst;
464 	int err, ret = NET_XMIT_DROP;
465 	struct flowi6 fl6 = {
466 		.flowi6_oif = dev->ifindex,
467 		.daddr = ip6h->daddr,
468 		.saddr = ip6h->saddr,
469 		.flowi6_flags = FLOWI_FLAG_ANYSRC,
470 		.flowlabel = ip6_flowinfo(ip6h),
471 		.flowi6_mark = skb->mark,
472 		.flowi6_proto = ip6h->nexthdr,
473 	};
474 
475 	dst = ip6_route_output(net, NULL, &fl6);
476 	if (dst->error) {
477 		ret = dst->error;
478 		dst_release(dst);
479 		goto err;
480 	}
481 	skb_dst_set(skb, dst);
482 	err = ip6_local_out(net, skb->sk, skb);
483 	if (unlikely(net_xmit_eval(err)))
484 		dev->stats.tx_errors++;
485 	else
486 		ret = NET_XMIT_SUCCESS;
487 	goto out;
488 err:
489 	dev->stats.tx_errors++;
490 	kfree_skb(skb);
491 out:
492 	return ret;
493 }
494 #else
495 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
496 {
497 	return NET_XMIT_DROP;
498 }
499 #endif
500 
501 static int ipvlan_process_outbound(struct sk_buff *skb)
502 {
503 	struct ethhdr *ethh = eth_hdr(skb);
504 	int ret = NET_XMIT_DROP;
505 
506 	/* In this mode we dont care about multicast and broadcast traffic */
507 	if (is_multicast_ether_addr(ethh->h_dest)) {
508 		pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
509 				     ntohs(skb->protocol));
510 		kfree_skb(skb);
511 		goto out;
512 	}
513 
514 	/* The ipvlan is a pseudo-L2 device, so the packets that we receive
515 	 * will have L2; which need to discarded and processed further
516 	 * in the net-ns of the main-device.
517 	 */
518 	if (skb_mac_header_was_set(skb)) {
519 		skb_pull(skb, sizeof(*ethh));
520 		skb->mac_header = (typeof(skb->mac_header))~0U;
521 		skb_reset_network_header(skb);
522 	}
523 
524 	if (skb->protocol == htons(ETH_P_IPV6))
525 		ret = ipvlan_process_v6_outbound(skb);
526 	else if (skb->protocol == htons(ETH_P_IP))
527 		ret = ipvlan_process_v4_outbound(skb);
528 	else {
529 		pr_warn_ratelimited("Dropped outbound packet type=%x\n",
530 				    ntohs(skb->protocol));
531 		kfree_skb(skb);
532 	}
533 out:
534 	return ret;
535 }
536 
537 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
538 				     struct sk_buff *skb, bool tx_pkt)
539 {
540 	if (skb->protocol == htons(ETH_P_PAUSE)) {
541 		kfree_skb(skb);
542 		return;
543 	}
544 
545 	/* Record that the deferred packet is from TX or RX path. By
546 	 * looking at mac-addresses on packet will lead to erronus decisions.
547 	 * (This would be true for a loopback-mode on master device or a
548 	 * hair-pin mode of the switch.)
549 	 */
550 	IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
551 
552 	spin_lock(&port->backlog.lock);
553 	if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
554 		if (skb->dev)
555 			dev_hold(skb->dev);
556 		__skb_queue_tail(&port->backlog, skb);
557 		spin_unlock(&port->backlog.lock);
558 		schedule_work(&port->wq);
559 	} else {
560 		spin_unlock(&port->backlog.lock);
561 		atomic_long_inc(&skb->dev->rx_dropped);
562 		kfree_skb(skb);
563 	}
564 }
565 
566 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
567 {
568 	const struct ipvl_dev *ipvlan = netdev_priv(dev);
569 	void *lyr3h;
570 	struct ipvl_addr *addr;
571 	int addr_type;
572 
573 	lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
574 	if (!lyr3h)
575 		goto out;
576 
577 	if (!ipvlan_is_vepa(ipvlan->port)) {
578 		addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
579 		if (addr) {
580 			if (ipvlan_is_private(ipvlan->port)) {
581 				consume_skb(skb);
582 				return NET_XMIT_DROP;
583 			}
584 			return ipvlan_rcv_frame(addr, &skb, true);
585 		}
586 	}
587 out:
588 	ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
589 	return ipvlan_process_outbound(skb);
590 }
591 
592 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
593 {
594 	const struct ipvl_dev *ipvlan = netdev_priv(dev);
595 	struct ethhdr *eth = eth_hdr(skb);
596 	struct ipvl_addr *addr;
597 	void *lyr3h;
598 	int addr_type;
599 
600 	if (!ipvlan_is_vepa(ipvlan->port) &&
601 	    ether_addr_equal(eth->h_dest, eth->h_source)) {
602 		lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
603 		if (lyr3h) {
604 			addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
605 			if (addr) {
606 				if (ipvlan_is_private(ipvlan->port)) {
607 					consume_skb(skb);
608 					return NET_XMIT_DROP;
609 				}
610 				return ipvlan_rcv_frame(addr, &skb, true);
611 			}
612 		}
613 		skb = skb_share_check(skb, GFP_ATOMIC);
614 		if (!skb)
615 			return NET_XMIT_DROP;
616 
617 		/* Packet definitely does not belong to any of the
618 		 * virtual devices, but the dest is local. So forward
619 		 * the skb for the main-dev. At the RX side we just return
620 		 * RX_PASS for it to be processed further on the stack.
621 		 */
622 		return dev_forward_skb(ipvlan->phy_dev, skb);
623 
624 	} else if (is_multicast_ether_addr(eth->h_dest)) {
625 		ipvlan_skb_crossing_ns(skb, NULL);
626 		ipvlan_multicast_enqueue(ipvlan->port, skb, true);
627 		return NET_XMIT_SUCCESS;
628 	}
629 
630 	skb->dev = ipvlan->phy_dev;
631 	return dev_queue_xmit(skb);
632 }
633 
634 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
635 {
636 	struct ipvl_dev *ipvlan = netdev_priv(dev);
637 	struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
638 
639 	if (!port)
640 		goto out;
641 
642 	if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
643 		goto out;
644 
645 	switch(port->mode) {
646 	case IPVLAN_MODE_L2:
647 		return ipvlan_xmit_mode_l2(skb, dev);
648 	case IPVLAN_MODE_L3:
649 #ifdef CONFIG_IPVLAN_L3S
650 	case IPVLAN_MODE_L3S:
651 #endif
652 		return ipvlan_xmit_mode_l3(skb, dev);
653 	}
654 
655 	/* Should not reach here */
656 	WARN_ONCE(true, "ipvlan_queue_xmit() called for mode = [%hx]\n",
657 			  port->mode);
658 out:
659 	kfree_skb(skb);
660 	return NET_XMIT_DROP;
661 }
662 
663 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
664 {
665 	struct ethhdr *eth = eth_hdr(skb);
666 	struct ipvl_addr *addr;
667 	void *lyr3h;
668 	int addr_type;
669 
670 	if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
671 		lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
672 		if (!lyr3h)
673 			return true;
674 
675 		addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
676 		if (addr)
677 			return false;
678 	}
679 
680 	return true;
681 }
682 
683 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
684 						 struct ipvl_port *port)
685 {
686 	void *lyr3h;
687 	int addr_type;
688 	struct ipvl_addr *addr;
689 	struct sk_buff *skb = *pskb;
690 	rx_handler_result_t ret = RX_HANDLER_PASS;
691 
692 	lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
693 	if (!lyr3h)
694 		goto out;
695 
696 	addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
697 	if (addr)
698 		ret = ipvlan_rcv_frame(addr, pskb, false);
699 
700 out:
701 	return ret;
702 }
703 
704 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
705 						 struct ipvl_port *port)
706 {
707 	struct sk_buff *skb = *pskb;
708 	struct ethhdr *eth = eth_hdr(skb);
709 	rx_handler_result_t ret = RX_HANDLER_PASS;
710 
711 	if (is_multicast_ether_addr(eth->h_dest)) {
712 		if (ipvlan_external_frame(skb, port)) {
713 			struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
714 
715 			/* External frames are queued for device local
716 			 * distribution, but a copy is given to master
717 			 * straight away to avoid sending duplicates later
718 			 * when work-queue processes this frame. This is
719 			 * achieved by returning RX_HANDLER_PASS.
720 			 */
721 			if (nskb) {
722 				ipvlan_skb_crossing_ns(nskb, NULL);
723 				ipvlan_multicast_enqueue(port, nskb, false);
724 			}
725 		}
726 	} else {
727 		/* Perform like l3 mode for non-multicast packet */
728 		ret = ipvlan_handle_mode_l3(pskb, port);
729 	}
730 
731 	return ret;
732 }
733 
734 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
735 {
736 	struct sk_buff *skb = *pskb;
737 	struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
738 
739 	if (!port)
740 		return RX_HANDLER_PASS;
741 
742 	switch (port->mode) {
743 	case IPVLAN_MODE_L2:
744 		return ipvlan_handle_mode_l2(pskb, port);
745 	case IPVLAN_MODE_L3:
746 		return ipvlan_handle_mode_l3(pskb, port);
747 #ifdef CONFIG_IPVLAN_L3S
748 	case IPVLAN_MODE_L3S:
749 		return RX_HANDLER_PASS;
750 #endif
751 	}
752 
753 	/* Should not reach here */
754 	WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
755 			  port->mode);
756 	kfree_skb(skb);
757 	return RX_HANDLER_CONSUMED;
758 }
759