xref: /linux/drivers/net/ipvlan/ipvlan_core.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3  */
4 
5 #include <net/flow.h>
6 #include <net/ip.h>
7 
8 #include "ipvlan.h"
9 
10 static u32 ipvlan_jhash_secret __read_mostly;
11 
12 void ipvlan_init_secret(void)
13 {
14 	net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
15 }
16 
17 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
18 			    unsigned int len, bool success, bool mcast)
19 {
20 	if (likely(success)) {
21 		struct ipvl_pcpu_stats *pcptr;
22 
23 		pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
24 		u64_stats_update_begin(&pcptr->syncp);
25 		u64_stats_inc(&pcptr->rx_pkts);
26 		u64_stats_add(&pcptr->rx_bytes, len);
27 		if (mcast)
28 			u64_stats_inc(&pcptr->rx_mcast);
29 		u64_stats_update_end(&pcptr->syncp);
30 	} else {
31 		this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
32 	}
33 }
34 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
35 
36 #if IS_ENABLED(CONFIG_IPV6)
37 static u8 ipvlan_get_v6_hash(const void *iaddr)
38 {
39 	const struct in6_addr *ip6_addr = iaddr;
40 
41 	return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
42 	       IPVLAN_HASH_MASK;
43 }
44 #else
45 static u8 ipvlan_get_v6_hash(const void *iaddr)
46 {
47 	return 0;
48 }
49 #endif
50 
51 static u8 ipvlan_get_v4_hash(__be32 addr)
52 {
53 	return jhash_1word((__force u32)addr, ipvlan_jhash_secret) &
54 			   IPVLAN_HASH_MASK;
55 }
56 
57 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
58 {
59 	if (!is_v6 && addr->atype == IPVL_IPV4) {
60 		struct in_addr *i4addr = (struct in_addr *)iaddr;
61 
62 		return addr->ip4addr.s_addr == i4addr->s_addr;
63 #if IS_ENABLED(CONFIG_IPV6)
64 	} else if (is_v6 && addr->atype == IPVL_IPV6) {
65 		struct in6_addr *i6addr = (struct in6_addr *)iaddr;
66 
67 		return ipv6_addr_equal(&addr->ip6addr, i6addr);
68 #endif
69 	}
70 
71 	return false;
72 }
73 
74 #if IS_ENABLED(CONFIG_IPV6)
75 static struct ipvl_addr *ipvlan_ht_addr_lookup6(const struct ipvl_port *port,
76 						const void *iaddr)
77 {
78 	struct ipvl_addr *addr;
79 	u8 hash;
80 
81 	hash = ipvlan_get_v6_hash(iaddr);
82 	hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
83 		if (addr_equal(true, addr, iaddr))
84 			return addr;
85 	return NULL;
86 }
87 #endif
88 
89 static struct ipvl_addr *ipvlan_ht_addr_lookup4(const struct ipvl_port *port,
90 						__be32 addr4)
91 {
92 	struct ipvl_addr *addr;
93 	u8 hash;
94 
95 	hash = ipvlan_get_v4_hash(addr4);
96 	hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
97 		if (addr->atype == IPVL_IPV4 && addr->ip4addr.s_addr == addr4)
98 			return addr;
99 	return NULL;
100 }
101 
102 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
103 {
104 	struct ipvl_port *port = ipvlan->port;
105 	u8 hash;
106 
107 	hash = (addr->atype == IPVL_IPV6) ?
108 	       ipvlan_get_v6_hash(&addr->ip6addr) :
109 	       ipvlan_get_v4_hash(addr->ip4addr.s_addr);
110 	if (hlist_unhashed(&addr->hlnode))
111 		hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
112 }
113 
114 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
115 {
116 	hlist_del_init_rcu(&addr->hlnode);
117 }
118 
119 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
120 				   const void *iaddr, bool is_v6)
121 {
122 	struct ipvl_addr *addr;
123 
124 	assert_spin_locked(&ipvlan->port->addrs_lock);
125 
126 	list_for_each_entry(addr, &ipvlan->addrs, anode) {
127 		if (addr_equal(is_v6, addr, iaddr))
128 			return addr;
129 	}
130 	return NULL;
131 }
132 
133 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
134 {
135 	struct ipvl_dev *ipvlan;
136 	bool ret = false;
137 
138 	rcu_read_lock();
139 	list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
140 		if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
141 			ret = true;
142 			break;
143 		}
144 	}
145 	rcu_read_unlock();
146 	return ret;
147 }
148 
149 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
150 {
151 	void *lyr3h = NULL;
152 
153 	switch (skb->protocol) {
154 	case htons(ETH_P_ARP): {
155 		struct arphdr *arph;
156 
157 		if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
158 			return NULL;
159 
160 		arph = arp_hdr(skb);
161 		*type = IPVL_ARP;
162 		lyr3h = arph;
163 		break;
164 	}
165 	case htons(ETH_P_IP): {
166 		u32 pktlen;
167 		struct iphdr *ip4h;
168 
169 		if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
170 			return NULL;
171 
172 		ip4h = ip_hdr(skb);
173 		pktlen = skb_ip_totlen(skb);
174 		if (ip4h->ihl < 5 || ip4h->version != 4)
175 			return NULL;
176 		if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
177 			return NULL;
178 
179 		*type = IPVL_IPV4;
180 		lyr3h = ip4h;
181 		break;
182 	}
183 #if IS_ENABLED(CONFIG_IPV6)
184 	case htons(ETH_P_IPV6): {
185 		struct ipv6hdr *ip6h;
186 
187 		if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
188 			return NULL;
189 
190 		ip6h = ipv6_hdr(skb);
191 		if (ip6h->version != 6)
192 			return NULL;
193 
194 		*type = IPVL_IPV6;
195 		lyr3h = ip6h;
196 		/* Only Neighbour Solicitation pkts need different treatment */
197 		if (ipv6_addr_any(&ip6h->saddr) &&
198 		    ip6h->nexthdr == NEXTHDR_ICMP) {
199 			struct icmp6hdr	*icmph;
200 
201 			if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
202 				return NULL;
203 
204 			ip6h = ipv6_hdr(skb);
205 			icmph = (struct icmp6hdr *)(ip6h + 1);
206 
207 			if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
208 				/* Need to access the ipv6 address in body */
209 				if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
210 						+ sizeof(struct in6_addr))))
211 					return NULL;
212 
213 				ip6h = ipv6_hdr(skb);
214 				icmph = (struct icmp6hdr *)(ip6h + 1);
215 			}
216 
217 			*type = IPVL_ICMPV6;
218 			lyr3h = icmph;
219 		}
220 		break;
221 	}
222 #endif
223 	default:
224 		return NULL;
225 	}
226 
227 	return lyr3h;
228 }
229 
230 unsigned int ipvlan_mac_hash(const unsigned char *addr)
231 {
232 	u32 hash = jhash_1word(get_unaligned((u32 *)(addr + 2)),
233 			       ipvlan_jhash_secret);
234 
235 	return hash & IPVLAN_MAC_FILTER_MASK;
236 }
237 
238 void ipvlan_process_multicast(struct work_struct *work)
239 {
240 	struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
241 	struct ethhdr *ethh;
242 	struct ipvl_dev *ipvlan;
243 	struct sk_buff *skb, *nskb;
244 	struct sk_buff_head list;
245 	unsigned int len;
246 	unsigned int mac_hash;
247 	int ret;
248 	u8 pkt_type;
249 	bool tx_pkt;
250 
251 	__skb_queue_head_init(&list);
252 
253 	spin_lock_bh(&port->backlog.lock);
254 	skb_queue_splice_tail_init(&port->backlog, &list);
255 	spin_unlock_bh(&port->backlog.lock);
256 
257 	while ((skb = __skb_dequeue(&list)) != NULL) {
258 		struct net_device *dev = skb->dev;
259 		bool consumed = false;
260 
261 		ethh = eth_hdr(skb);
262 		tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
263 		mac_hash = ipvlan_mac_hash(ethh->h_dest);
264 
265 		if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
266 			pkt_type = PACKET_BROADCAST;
267 		else
268 			pkt_type = PACKET_MULTICAST;
269 
270 		rcu_read_lock();
271 		list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
272 			if (tx_pkt && (ipvlan->dev == skb->dev))
273 				continue;
274 			if (!test_bit(mac_hash, ipvlan->mac_filters))
275 				continue;
276 			if (!(ipvlan->dev->flags & IFF_UP))
277 				continue;
278 			ret = NET_RX_DROP;
279 			len = skb->len + ETH_HLEN;
280 			nskb = skb_clone(skb, GFP_ATOMIC);
281 			local_bh_disable();
282 			if (nskb) {
283 				consumed = true;
284 				nskb->pkt_type = pkt_type;
285 				nskb->dev = ipvlan->dev;
286 				if (tx_pkt)
287 					ret = dev_forward_skb(ipvlan->dev, nskb);
288 				else
289 					ret = netif_rx(nskb);
290 			}
291 			ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
292 			local_bh_enable();
293 		}
294 		rcu_read_unlock();
295 
296 		if (tx_pkt) {
297 			/* If the packet originated here, send it out. */
298 			skb->dev = port->dev;
299 			skb->pkt_type = pkt_type;
300 			dev_queue_xmit(skb);
301 		} else {
302 			if (consumed)
303 				consume_skb(skb);
304 			else
305 				kfree_skb(skb);
306 		}
307 		dev_put(dev);
308 		cond_resched();
309 	}
310 }
311 
312 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
313 {
314 	bool xnet = true;
315 
316 	if (dev)
317 		xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
318 
319 	skb_scrub_packet(skb, xnet);
320 	if (dev)
321 		skb->dev = dev;
322 }
323 
324 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
325 			    bool local)
326 {
327 	struct ipvl_dev *ipvlan = addr->master;
328 	struct net_device *dev = ipvlan->dev;
329 	unsigned int len;
330 	rx_handler_result_t ret = RX_HANDLER_CONSUMED;
331 	bool success = false;
332 	struct sk_buff *skb = *pskb;
333 
334 	len = skb->len + ETH_HLEN;
335 	/* Only packets exchanged between two local slaves need to have
336 	 * device-up check as well as skb-share check.
337 	 */
338 	if (local) {
339 		if (unlikely(!(dev->flags & IFF_UP))) {
340 			kfree_skb(skb);
341 			goto out;
342 		}
343 
344 		skb = skb_share_check(skb, GFP_ATOMIC);
345 		if (!skb)
346 			goto out;
347 
348 		*pskb = skb;
349 	}
350 
351 	if (local) {
352 		skb->pkt_type = PACKET_HOST;
353 		if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
354 			success = true;
355 	} else {
356 		skb->dev = dev;
357 		ret = RX_HANDLER_ANOTHER;
358 		success = true;
359 	}
360 
361 out:
362 	ipvlan_count_rx(ipvlan, len, success, false);
363 	return ret;
364 }
365 
366 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
367 				     int addr_type, bool use_dest)
368 {
369 	struct ipvl_addr *addr = NULL;
370 #if IS_ENABLED(CONFIG_IPV6)
371 	struct in6_addr *i6addr;
372 #endif
373 	__be32 addr4;
374 
375 	switch (addr_type) {
376 #if IS_ENABLED(CONFIG_IPV6)
377 	case IPVL_IPV6: {
378 		struct ipv6hdr *ip6h;
379 
380 		ip6h = (struct ipv6hdr *)lyr3h;
381 		i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
382 lookup6:
383 		addr = ipvlan_ht_addr_lookup6(port, i6addr);
384 		break;
385 	}
386 	case IPVL_ICMPV6: {
387 		struct nd_msg *ndmh;
388 
389 		/* Make sure that the NeighborSolicitation ICMPv6 packets
390 		 * are handled to avoid DAD issue.
391 		 */
392 		ndmh = (struct nd_msg *)lyr3h;
393 		if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
394 			i6addr = &ndmh->target;
395 			goto lookup6;
396 		}
397 		break;
398 	}
399 #endif
400 	case IPVL_IPV4: {
401 		struct iphdr *ip4h;
402 
403 		ip4h = (struct iphdr *)lyr3h;
404 		addr4 = use_dest ? ip4h->daddr : ip4h->saddr;
405 lookup4:
406 		addr = ipvlan_ht_addr_lookup4(port, addr4);
407 		break;
408 	}
409 	case IPVL_ARP: {
410 		struct arphdr *arph;
411 		unsigned char *arp_ptr;
412 
413 		arph = (struct arphdr *)lyr3h;
414 		arp_ptr = (unsigned char *)(arph + 1);
415 		if (use_dest)
416 			arp_ptr += (2 * port->dev->addr_len) + 4;
417 		else
418 			arp_ptr += port->dev->addr_len;
419 
420 		addr4 = get_unaligned((__be32 *)arp_ptr);
421 		goto lookup4;
422 	}
423 	}
424 
425 	return addr;
426 }
427 
428 static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
429 {
430 	struct net_device *dev = skb->dev;
431 	struct net *net = dev_net(dev);
432 	int err, ret = NET_XMIT_DROP;
433 	const struct iphdr *ip4h;
434 	struct rtable *rt;
435 	struct flowi4 fl4 = {
436 		.flowi4_oif = dev->ifindex,
437 		.flowi4_flags = FLOWI_FLAG_ANYSRC,
438 		.flowi4_mark = skb->mark,
439 	};
440 
441 	if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
442 		goto err;
443 
444 	ip4h = ip_hdr(skb);
445 	fl4.daddr = ip4h->daddr;
446 	fl4.saddr = ip4h->saddr;
447 	fl4.flowi4_dscp = ip4h_dscp(ip4h);
448 
449 	rt = ip_route_output_flow(net, &fl4, NULL);
450 	if (IS_ERR(rt))
451 		goto err;
452 
453 	if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
454 		ip_rt_put(rt);
455 		goto err;
456 	}
457 	skb_dst_set(skb, &rt->dst);
458 
459 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
460 
461 	err = ip_local_out(net, NULL, skb);
462 	if (unlikely(net_xmit_eval(err)))
463 		DEV_STATS_INC(dev, tx_errors);
464 	else
465 		ret = NET_XMIT_SUCCESS;
466 	goto out;
467 err:
468 	DEV_STATS_INC(dev, tx_errors);
469 	kfree_skb(skb);
470 out:
471 	return ret;
472 }
473 
474 #if IS_ENABLED(CONFIG_IPV6)
475 
476 static noinline_for_stack int
477 ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
478 {
479 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
480 	struct flowi6 fl6 = {
481 		.flowi6_oif = dev->ifindex,
482 		.daddr = ip6h->daddr,
483 		.saddr = ip6h->saddr,
484 		.flowi6_flags = FLOWI_FLAG_ANYSRC,
485 		.flowlabel = ip6_flowinfo(ip6h),
486 		.flowi6_mark = skb->mark,
487 		.flowi6_proto = ip6h->nexthdr,
488 	};
489 	struct dst_entry *dst;
490 	int err;
491 
492 	dst = ip6_route_output(dev_net(dev), NULL, &fl6);
493 	err = dst->error;
494 	if (err) {
495 		dst_release(dst);
496 		return err;
497 	}
498 	skb_dst_set(skb, dst);
499 	return 0;
500 }
501 
502 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
503 {
504 	struct net_device *dev = skb->dev;
505 	int err, ret = NET_XMIT_DROP;
506 
507 	if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
508 		DEV_STATS_INC(dev, tx_errors);
509 		kfree_skb(skb);
510 		return ret;
511 	}
512 
513 	err = ipvlan_route_v6_outbound(dev, skb);
514 	if (unlikely(err)) {
515 		DEV_STATS_INC(dev, tx_errors);
516 		kfree_skb(skb);
517 		return err;
518 	}
519 
520 	memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
521 
522 	err = ip6_local_out(dev_net(dev), NULL, skb);
523 	if (unlikely(net_xmit_eval(err)))
524 		DEV_STATS_INC(dev, tx_errors);
525 	else
526 		ret = NET_XMIT_SUCCESS;
527 	return ret;
528 }
529 #else
530 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
531 {
532 	return NET_XMIT_DROP;
533 }
534 #endif
535 
536 static int ipvlan_process_outbound(struct sk_buff *skb)
537 {
538 	int ret = NET_XMIT_DROP;
539 
540 	/* The ipvlan is a pseudo-L2 device, so the packets that we receive
541 	 * will have L2; which need to discarded and processed further
542 	 * in the net-ns of the main-device.
543 	 */
544 	if (skb_mac_header_was_set(skb)) {
545 		/* In this mode we dont care about
546 		 * multicast and broadcast traffic */
547 		struct ethhdr *ethh = eth_hdr(skb);
548 
549 		if (is_multicast_ether_addr(ethh->h_dest)) {
550 			pr_debug_ratelimited(
551 				"Dropped {multi|broad}cast of type=[%x]\n",
552 				ntohs(skb->protocol));
553 			kfree_skb(skb);
554 			goto out;
555 		}
556 
557 		skb_pull(skb, sizeof(*ethh));
558 		skb->mac_header = (typeof(skb->mac_header))~0U;
559 		skb_reset_network_header(skb);
560 	}
561 
562 	if (skb->protocol == htons(ETH_P_IPV6))
563 		ret = ipvlan_process_v6_outbound(skb);
564 	else if (skb->protocol == htons(ETH_P_IP))
565 		ret = ipvlan_process_v4_outbound(skb);
566 	else {
567 		pr_warn_ratelimited("Dropped outbound packet type=%x\n",
568 				    ntohs(skb->protocol));
569 		kfree_skb(skb);
570 	}
571 out:
572 	return ret;
573 }
574 
575 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
576 				     struct sk_buff *skb, bool tx_pkt)
577 {
578 	if (skb->protocol == htons(ETH_P_PAUSE)) {
579 		kfree_skb(skb);
580 		return;
581 	}
582 
583 	/* Record that the deferred packet is from TX or RX path. By
584 	 * looking at mac-addresses on packet will lead to erronus decisions.
585 	 * (This would be true for a loopback-mode on master device or a
586 	 * hair-pin mode of the switch.)
587 	 */
588 	IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
589 
590 	spin_lock(&port->backlog.lock);
591 	if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
592 		dev_hold(skb->dev);
593 		__skb_queue_tail(&port->backlog, skb);
594 		spin_unlock(&port->backlog.lock);
595 		schedule_work(&port->wq);
596 	} else {
597 		spin_unlock(&port->backlog.lock);
598 		dev_core_stats_rx_dropped_inc(skb->dev);
599 		kfree_skb(skb);
600 	}
601 }
602 
603 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
604 {
605 	const struct ipvl_dev *ipvlan = netdev_priv(dev);
606 	void *lyr3h;
607 	struct ipvl_addr *addr;
608 	int addr_type;
609 
610 	lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
611 	if (!lyr3h)
612 		goto out;
613 
614 	if (!ipvlan_is_vepa(ipvlan->port)) {
615 		addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
616 		if (addr) {
617 			if (ipvlan_is_private(ipvlan->port)) {
618 				consume_skb(skb);
619 				return NET_XMIT_DROP;
620 			}
621 			ipvlan_rcv_frame(addr, &skb, true);
622 			return NET_XMIT_SUCCESS;
623 		}
624 	}
625 out:
626 	ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
627 	return ipvlan_process_outbound(skb);
628 }
629 
630 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
631 {
632 	const struct ipvl_dev *ipvlan = netdev_priv(dev);
633 	struct ethhdr *eth = skb_eth_hdr(skb);
634 	struct ipvl_addr *addr;
635 	void *lyr3h;
636 	int addr_type;
637 
638 	if (!ipvlan_is_vepa(ipvlan->port) &&
639 	    ether_addr_equal(eth->h_dest, eth->h_source)) {
640 		lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
641 		if (lyr3h) {
642 			addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
643 			if (addr) {
644 				if (ipvlan_is_private(ipvlan->port)) {
645 					consume_skb(skb);
646 					return NET_XMIT_DROP;
647 				}
648 				ipvlan_rcv_frame(addr, &skb, true);
649 				return NET_XMIT_SUCCESS;
650 			}
651 		}
652 		skb = skb_share_check(skb, GFP_ATOMIC);
653 		if (!skb)
654 			return NET_XMIT_DROP;
655 
656 		/* Packet definitely does not belong to any of the
657 		 * virtual devices, but the dest is local. So forward
658 		 * the skb for the main-dev. At the RX side we just return
659 		 * RX_PASS for it to be processed further on the stack.
660 		 */
661 		dev_forward_skb(ipvlan->phy_dev, skb);
662 		return NET_XMIT_SUCCESS;
663 
664 	} else if (is_multicast_ether_addr(eth->h_dest)) {
665 		skb_reset_mac_header(skb);
666 		ipvlan_skb_crossing_ns(skb, NULL);
667 		ipvlan_multicast_enqueue(ipvlan->port, skb, true);
668 		return NET_XMIT_SUCCESS;
669 	}
670 
671 	skb->dev = ipvlan->phy_dev;
672 	return dev_queue_xmit(skb);
673 }
674 
675 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
676 {
677 	struct ipvl_dev *ipvlan = netdev_priv(dev);
678 	struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
679 
680 	if (!port)
681 		goto out;
682 
683 	if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
684 		goto out;
685 
686 	switch(port->mode) {
687 	case IPVLAN_MODE_L2:
688 		return ipvlan_xmit_mode_l2(skb, dev);
689 	case IPVLAN_MODE_L3:
690 #ifdef CONFIG_IPVLAN_L3S
691 	case IPVLAN_MODE_L3S:
692 #endif
693 		return ipvlan_xmit_mode_l3(skb, dev);
694 	}
695 
696 	/* Should not reach here */
697 	WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
698 out:
699 	kfree_skb(skb);
700 	return NET_XMIT_DROP;
701 }
702 
703 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
704 {
705 	struct ethhdr *eth = eth_hdr(skb);
706 	struct ipvl_addr *addr;
707 	void *lyr3h;
708 	int addr_type;
709 
710 	if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
711 		lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
712 		if (!lyr3h)
713 			return true;
714 
715 		addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
716 		if (addr)
717 			return false;
718 	}
719 
720 	return true;
721 }
722 
723 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
724 						 struct ipvl_port *port)
725 {
726 	void *lyr3h;
727 	int addr_type;
728 	struct ipvl_addr *addr;
729 	struct sk_buff *skb = *pskb;
730 	rx_handler_result_t ret = RX_HANDLER_PASS;
731 
732 	lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
733 	if (!lyr3h)
734 		goto out;
735 
736 	addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
737 	if (addr)
738 		ret = ipvlan_rcv_frame(addr, pskb, false);
739 
740 out:
741 	return ret;
742 }
743 
744 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
745 						 struct ipvl_port *port)
746 {
747 	struct sk_buff *skb = *pskb;
748 	struct ethhdr *eth = eth_hdr(skb);
749 	rx_handler_result_t ret = RX_HANDLER_PASS;
750 
751 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
752 		return RX_HANDLER_PASS;
753 
754 	if (is_multicast_ether_addr(eth->h_dest)) {
755 		if (ipvlan_external_frame(skb, port)) {
756 			struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
757 
758 			/* External frames are queued for device local
759 			 * distribution, but a copy is given to master
760 			 * straight away to avoid sending duplicates later
761 			 * when work-queue processes this frame. This is
762 			 * achieved by returning RX_HANDLER_PASS.
763 			 */
764 			if (nskb) {
765 				ipvlan_skb_crossing_ns(nskb, NULL);
766 				ipvlan_multicast_enqueue(port, nskb, false);
767 			}
768 		}
769 	} else {
770 		/* Perform like l3 mode for non-multicast packet */
771 		ret = ipvlan_handle_mode_l3(pskb, port);
772 	}
773 
774 	return ret;
775 }
776 
777 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
778 {
779 	struct sk_buff *skb = *pskb;
780 	struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
781 
782 	if (!port)
783 		return RX_HANDLER_PASS;
784 
785 	switch (port->mode) {
786 	case IPVLAN_MODE_L2:
787 		return ipvlan_handle_mode_l2(pskb, port);
788 	case IPVLAN_MODE_L3:
789 		return ipvlan_handle_mode_l3(pskb, port);
790 #ifdef CONFIG_IPVLAN_L3S
791 	case IPVLAN_MODE_L3S:
792 		return RX_HANDLER_PASS;
793 #endif
794 	}
795 
796 	/* Should not reach here */
797 	WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
798 	kfree_skb(skb);
799 	return RX_HANDLER_CONSUMED;
800 }
801