1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
3 */
4
5 #include <net/inet_dscp.h>
6
7 #include "ipvlan.h"
8
9 static u32 ipvlan_jhash_secret __read_mostly;
10
ipvlan_init_secret(void)11 void ipvlan_init_secret(void)
12 {
13 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
14 }
15
ipvlan_count_rx(const struct ipvl_dev * ipvlan,unsigned int len,bool success,bool mcast)16 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
17 unsigned int len, bool success, bool mcast)
18 {
19 if (likely(success)) {
20 struct ipvl_pcpu_stats *pcptr;
21
22 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
23 u64_stats_update_begin(&pcptr->syncp);
24 u64_stats_inc(&pcptr->rx_pkts);
25 u64_stats_add(&pcptr->rx_bytes, len);
26 if (mcast)
27 u64_stats_inc(&pcptr->rx_mcast);
28 u64_stats_update_end(&pcptr->syncp);
29 } else {
30 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
31 }
32 }
33 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
34
35 #if IS_ENABLED(CONFIG_IPV6)
ipvlan_get_v6_hash(const void * iaddr)36 static u8 ipvlan_get_v6_hash(const void *iaddr)
37 {
38 const struct in6_addr *ip6_addr = iaddr;
39
40 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
41 IPVLAN_HASH_MASK;
42 }
43 #else
ipvlan_get_v6_hash(const void * iaddr)44 static u8 ipvlan_get_v6_hash(const void *iaddr)
45 {
46 return 0;
47 }
48 #endif
49
ipvlan_get_v4_hash(const void * iaddr)50 static u8 ipvlan_get_v4_hash(const void *iaddr)
51 {
52 const struct in_addr *ip4_addr = iaddr;
53
54 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
55 IPVLAN_HASH_MASK;
56 }
57
addr_equal(bool is_v6,struct ipvl_addr * addr,const void * iaddr)58 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
59 {
60 if (!is_v6 && addr->atype == IPVL_IPV4) {
61 struct in_addr *i4addr = (struct in_addr *)iaddr;
62
63 return addr->ip4addr.s_addr == i4addr->s_addr;
64 #if IS_ENABLED(CONFIG_IPV6)
65 } else if (is_v6 && addr->atype == IPVL_IPV6) {
66 struct in6_addr *i6addr = (struct in6_addr *)iaddr;
67
68 return ipv6_addr_equal(&addr->ip6addr, i6addr);
69 #endif
70 }
71
72 return false;
73 }
74
ipvlan_ht_addr_lookup(const struct ipvl_port * port,const void * iaddr,bool is_v6)75 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
76 const void *iaddr, bool is_v6)
77 {
78 struct ipvl_addr *addr;
79 u8 hash;
80
81 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
82 ipvlan_get_v4_hash(iaddr);
83 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
84 if (addr_equal(is_v6, addr, iaddr))
85 return addr;
86 return NULL;
87 }
88
ipvlan_ht_addr_add(struct ipvl_dev * ipvlan,struct ipvl_addr * addr)89 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
90 {
91 struct ipvl_port *port = ipvlan->port;
92 u8 hash;
93
94 hash = (addr->atype == IPVL_IPV6) ?
95 ipvlan_get_v6_hash(&addr->ip6addr) :
96 ipvlan_get_v4_hash(&addr->ip4addr);
97 if (hlist_unhashed(&addr->hlnode))
98 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
99 }
100
ipvlan_ht_addr_del(struct ipvl_addr * addr)101 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
102 {
103 hlist_del_init_rcu(&addr->hlnode);
104 }
105
ipvlan_find_addr(const struct ipvl_dev * ipvlan,const void * iaddr,bool is_v6)106 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
107 const void *iaddr, bool is_v6)
108 {
109 struct ipvl_addr *addr, *ret = NULL;
110
111 rcu_read_lock();
112 list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
113 if (addr_equal(is_v6, addr, iaddr)) {
114 ret = addr;
115 break;
116 }
117 }
118 rcu_read_unlock();
119 return ret;
120 }
121
ipvlan_addr_busy(struct ipvl_port * port,void * iaddr,bool is_v6)122 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
123 {
124 struct ipvl_dev *ipvlan;
125 bool ret = false;
126
127 rcu_read_lock();
128 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
129 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
130 ret = true;
131 break;
132 }
133 }
134 rcu_read_unlock();
135 return ret;
136 }
137
ipvlan_get_L3_hdr(struct ipvl_port * port,struct sk_buff * skb,int * type)138 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
139 {
140 void *lyr3h = NULL;
141
142 switch (skb->protocol) {
143 case htons(ETH_P_ARP): {
144 struct arphdr *arph;
145
146 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
147 return NULL;
148
149 arph = arp_hdr(skb);
150 *type = IPVL_ARP;
151 lyr3h = arph;
152 break;
153 }
154 case htons(ETH_P_IP): {
155 u32 pktlen;
156 struct iphdr *ip4h;
157
158 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
159 return NULL;
160
161 ip4h = ip_hdr(skb);
162 pktlen = skb_ip_totlen(skb);
163 if (ip4h->ihl < 5 || ip4h->version != 4)
164 return NULL;
165 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
166 return NULL;
167
168 *type = IPVL_IPV4;
169 lyr3h = ip4h;
170 break;
171 }
172 #if IS_ENABLED(CONFIG_IPV6)
173 case htons(ETH_P_IPV6): {
174 struct ipv6hdr *ip6h;
175
176 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
177 return NULL;
178
179 ip6h = ipv6_hdr(skb);
180 if (ip6h->version != 6)
181 return NULL;
182
183 *type = IPVL_IPV6;
184 lyr3h = ip6h;
185 /* Only Neighbour Solicitation pkts need different treatment */
186 if (ipv6_addr_any(&ip6h->saddr) &&
187 ip6h->nexthdr == NEXTHDR_ICMP) {
188 struct icmp6hdr *icmph;
189
190 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
191 return NULL;
192
193 ip6h = ipv6_hdr(skb);
194 icmph = (struct icmp6hdr *)(ip6h + 1);
195
196 if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
197 /* Need to access the ipv6 address in body */
198 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
199 + sizeof(struct in6_addr))))
200 return NULL;
201
202 ip6h = ipv6_hdr(skb);
203 icmph = (struct icmp6hdr *)(ip6h + 1);
204 }
205
206 *type = IPVL_ICMPV6;
207 lyr3h = icmph;
208 }
209 break;
210 }
211 #endif
212 default:
213 return NULL;
214 }
215
216 return lyr3h;
217 }
218
ipvlan_mac_hash(const unsigned char * addr)219 unsigned int ipvlan_mac_hash(const unsigned char *addr)
220 {
221 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
222 ipvlan_jhash_secret);
223
224 return hash & IPVLAN_MAC_FILTER_MASK;
225 }
226
ipvlan_process_multicast(struct work_struct * work)227 void ipvlan_process_multicast(struct work_struct *work)
228 {
229 struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
230 struct ethhdr *ethh;
231 struct ipvl_dev *ipvlan;
232 struct sk_buff *skb, *nskb;
233 struct sk_buff_head list;
234 unsigned int len;
235 unsigned int mac_hash;
236 int ret;
237 u8 pkt_type;
238 bool tx_pkt;
239
240 __skb_queue_head_init(&list);
241
242 spin_lock_bh(&port->backlog.lock);
243 skb_queue_splice_tail_init(&port->backlog, &list);
244 spin_unlock_bh(&port->backlog.lock);
245
246 while ((skb = __skb_dequeue(&list)) != NULL) {
247 struct net_device *dev = skb->dev;
248 bool consumed = false;
249
250 ethh = eth_hdr(skb);
251 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
252 mac_hash = ipvlan_mac_hash(ethh->h_dest);
253
254 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
255 pkt_type = PACKET_BROADCAST;
256 else
257 pkt_type = PACKET_MULTICAST;
258
259 rcu_read_lock();
260 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
261 if (tx_pkt && (ipvlan->dev == skb->dev))
262 continue;
263 if (!test_bit(mac_hash, ipvlan->mac_filters))
264 continue;
265 if (!(ipvlan->dev->flags & IFF_UP))
266 continue;
267 ret = NET_RX_DROP;
268 len = skb->len + ETH_HLEN;
269 nskb = skb_clone(skb, GFP_ATOMIC);
270 local_bh_disable();
271 if (nskb) {
272 consumed = true;
273 nskb->pkt_type = pkt_type;
274 nskb->dev = ipvlan->dev;
275 if (tx_pkt)
276 ret = dev_forward_skb(ipvlan->dev, nskb);
277 else
278 ret = netif_rx(nskb);
279 }
280 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
281 local_bh_enable();
282 }
283 rcu_read_unlock();
284
285 if (tx_pkt) {
286 /* If the packet originated here, send it out. */
287 skb->dev = port->dev;
288 skb->pkt_type = pkt_type;
289 dev_queue_xmit(skb);
290 } else {
291 if (consumed)
292 consume_skb(skb);
293 else
294 kfree_skb(skb);
295 }
296 dev_put(dev);
297 cond_resched();
298 }
299 }
300
ipvlan_skb_crossing_ns(struct sk_buff * skb,struct net_device * dev)301 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
302 {
303 bool xnet = true;
304
305 if (dev)
306 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
307
308 skb_scrub_packet(skb, xnet);
309 if (dev)
310 skb->dev = dev;
311 }
312
ipvlan_rcv_frame(struct ipvl_addr * addr,struct sk_buff ** pskb,bool local)313 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
314 bool local)
315 {
316 struct ipvl_dev *ipvlan = addr->master;
317 struct net_device *dev = ipvlan->dev;
318 unsigned int len;
319 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
320 bool success = false;
321 struct sk_buff *skb = *pskb;
322
323 len = skb->len + ETH_HLEN;
324 /* Only packets exchanged between two local slaves need to have
325 * device-up check as well as skb-share check.
326 */
327 if (local) {
328 if (unlikely(!(dev->flags & IFF_UP))) {
329 kfree_skb(skb);
330 goto out;
331 }
332
333 skb = skb_share_check(skb, GFP_ATOMIC);
334 if (!skb)
335 goto out;
336
337 *pskb = skb;
338 }
339
340 if (local) {
341 skb->pkt_type = PACKET_HOST;
342 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
343 success = true;
344 } else {
345 skb->dev = dev;
346 ret = RX_HANDLER_ANOTHER;
347 success = true;
348 }
349
350 out:
351 ipvlan_count_rx(ipvlan, len, success, false);
352 return ret;
353 }
354
ipvlan_addr_lookup(struct ipvl_port * port,void * lyr3h,int addr_type,bool use_dest)355 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
356 int addr_type, bool use_dest)
357 {
358 struct ipvl_addr *addr = NULL;
359
360 switch (addr_type) {
361 #if IS_ENABLED(CONFIG_IPV6)
362 case IPVL_IPV6: {
363 struct ipv6hdr *ip6h;
364 struct in6_addr *i6addr;
365
366 ip6h = (struct ipv6hdr *)lyr3h;
367 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
368 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
369 break;
370 }
371 case IPVL_ICMPV6: {
372 struct nd_msg *ndmh;
373 struct in6_addr *i6addr;
374
375 /* Make sure that the NeighborSolicitation ICMPv6 packets
376 * are handled to avoid DAD issue.
377 */
378 ndmh = (struct nd_msg *)lyr3h;
379 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
380 i6addr = &ndmh->target;
381 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
382 }
383 break;
384 }
385 #endif
386 case IPVL_IPV4: {
387 struct iphdr *ip4h;
388 __be32 *i4addr;
389
390 ip4h = (struct iphdr *)lyr3h;
391 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
392 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
393 break;
394 }
395 case IPVL_ARP: {
396 struct arphdr *arph;
397 unsigned char *arp_ptr;
398 __be32 dip;
399
400 arph = (struct arphdr *)lyr3h;
401 arp_ptr = (unsigned char *)(arph + 1);
402 if (use_dest)
403 arp_ptr += (2 * port->dev->addr_len) + 4;
404 else
405 arp_ptr += port->dev->addr_len;
406
407 memcpy(&dip, arp_ptr, 4);
408 addr = ipvlan_ht_addr_lookup(port, &dip, false);
409 break;
410 }
411 }
412
413 return addr;
414 }
415
ipvlan_process_v4_outbound(struct sk_buff * skb)416 static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
417 {
418 const struct iphdr *ip4h = ip_hdr(skb);
419 struct net_device *dev = skb->dev;
420 struct net *net = dev_net(dev);
421 struct rtable *rt;
422 int err, ret = NET_XMIT_DROP;
423 struct flowi4 fl4 = {
424 .flowi4_oif = dev->ifindex,
425 .flowi4_tos = ip4h->tos & INET_DSCP_MASK,
426 .flowi4_flags = FLOWI_FLAG_ANYSRC,
427 .flowi4_mark = skb->mark,
428 .daddr = ip4h->daddr,
429 .saddr = ip4h->saddr,
430 };
431
432 rt = ip_route_output_flow(net, &fl4, NULL);
433 if (IS_ERR(rt))
434 goto err;
435
436 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
437 ip_rt_put(rt);
438 goto err;
439 }
440 skb_dst_set(skb, &rt->dst);
441
442 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
443
444 err = ip_local_out(net, NULL, skb);
445 if (unlikely(net_xmit_eval(err)))
446 DEV_STATS_INC(dev, tx_errors);
447 else
448 ret = NET_XMIT_SUCCESS;
449 goto out;
450 err:
451 DEV_STATS_INC(dev, tx_errors);
452 kfree_skb(skb);
453 out:
454 return ret;
455 }
456
457 #if IS_ENABLED(CONFIG_IPV6)
458
459 static noinline_for_stack int
ipvlan_route_v6_outbound(struct net_device * dev,struct sk_buff * skb)460 ipvlan_route_v6_outbound(struct net_device *dev, struct sk_buff *skb)
461 {
462 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
463 struct flowi6 fl6 = {
464 .flowi6_oif = dev->ifindex,
465 .daddr = ip6h->daddr,
466 .saddr = ip6h->saddr,
467 .flowi6_flags = FLOWI_FLAG_ANYSRC,
468 .flowlabel = ip6_flowinfo(ip6h),
469 .flowi6_mark = skb->mark,
470 .flowi6_proto = ip6h->nexthdr,
471 };
472 struct dst_entry *dst;
473 int err;
474
475 dst = ip6_route_output(dev_net(dev), NULL, &fl6);
476 err = dst->error;
477 if (err) {
478 dst_release(dst);
479 return err;
480 }
481 skb_dst_set(skb, dst);
482 return 0;
483 }
484
ipvlan_process_v6_outbound(struct sk_buff * skb)485 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
486 {
487 struct net_device *dev = skb->dev;
488 int err, ret = NET_XMIT_DROP;
489
490 err = ipvlan_route_v6_outbound(dev, skb);
491 if (unlikely(err)) {
492 DEV_STATS_INC(dev, tx_errors);
493 kfree_skb(skb);
494 return err;
495 }
496
497 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
498
499 err = ip6_local_out(dev_net(dev), NULL, skb);
500 if (unlikely(net_xmit_eval(err)))
501 DEV_STATS_INC(dev, tx_errors);
502 else
503 ret = NET_XMIT_SUCCESS;
504 return ret;
505 }
506 #else
ipvlan_process_v6_outbound(struct sk_buff * skb)507 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
508 {
509 return NET_XMIT_DROP;
510 }
511 #endif
512
ipvlan_process_outbound(struct sk_buff * skb)513 static int ipvlan_process_outbound(struct sk_buff *skb)
514 {
515 int ret = NET_XMIT_DROP;
516
517 /* The ipvlan is a pseudo-L2 device, so the packets that we receive
518 * will have L2; which need to discarded and processed further
519 * in the net-ns of the main-device.
520 */
521 if (skb_mac_header_was_set(skb)) {
522 /* In this mode we dont care about
523 * multicast and broadcast traffic */
524 struct ethhdr *ethh = eth_hdr(skb);
525
526 if (is_multicast_ether_addr(ethh->h_dest)) {
527 pr_debug_ratelimited(
528 "Dropped {multi|broad}cast of type=[%x]\n",
529 ntohs(skb->protocol));
530 kfree_skb(skb);
531 goto out;
532 }
533
534 skb_pull(skb, sizeof(*ethh));
535 skb->mac_header = (typeof(skb->mac_header))~0U;
536 skb_reset_network_header(skb);
537 }
538
539 if (skb->protocol == htons(ETH_P_IPV6))
540 ret = ipvlan_process_v6_outbound(skb);
541 else if (skb->protocol == htons(ETH_P_IP))
542 ret = ipvlan_process_v4_outbound(skb);
543 else {
544 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
545 ntohs(skb->protocol));
546 kfree_skb(skb);
547 }
548 out:
549 return ret;
550 }
551
ipvlan_multicast_enqueue(struct ipvl_port * port,struct sk_buff * skb,bool tx_pkt)552 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
553 struct sk_buff *skb, bool tx_pkt)
554 {
555 if (skb->protocol == htons(ETH_P_PAUSE)) {
556 kfree_skb(skb);
557 return;
558 }
559
560 /* Record that the deferred packet is from TX or RX path. By
561 * looking at mac-addresses on packet will lead to erronus decisions.
562 * (This would be true for a loopback-mode on master device or a
563 * hair-pin mode of the switch.)
564 */
565 IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
566
567 spin_lock(&port->backlog.lock);
568 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
569 dev_hold(skb->dev);
570 __skb_queue_tail(&port->backlog, skb);
571 spin_unlock(&port->backlog.lock);
572 schedule_work(&port->wq);
573 } else {
574 spin_unlock(&port->backlog.lock);
575 dev_core_stats_rx_dropped_inc(skb->dev);
576 kfree_skb(skb);
577 }
578 }
579
ipvlan_xmit_mode_l3(struct sk_buff * skb,struct net_device * dev)580 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
581 {
582 const struct ipvl_dev *ipvlan = netdev_priv(dev);
583 void *lyr3h;
584 struct ipvl_addr *addr;
585 int addr_type;
586
587 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
588 if (!lyr3h)
589 goto out;
590
591 if (!ipvlan_is_vepa(ipvlan->port)) {
592 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
593 if (addr) {
594 if (ipvlan_is_private(ipvlan->port)) {
595 consume_skb(skb);
596 return NET_XMIT_DROP;
597 }
598 ipvlan_rcv_frame(addr, &skb, true);
599 return NET_XMIT_SUCCESS;
600 }
601 }
602 out:
603 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
604 return ipvlan_process_outbound(skb);
605 }
606
ipvlan_xmit_mode_l2(struct sk_buff * skb,struct net_device * dev)607 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
608 {
609 const struct ipvl_dev *ipvlan = netdev_priv(dev);
610 struct ethhdr *eth = skb_eth_hdr(skb);
611 struct ipvl_addr *addr;
612 void *lyr3h;
613 int addr_type;
614
615 if (!ipvlan_is_vepa(ipvlan->port) &&
616 ether_addr_equal(eth->h_dest, eth->h_source)) {
617 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
618 if (lyr3h) {
619 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
620 if (addr) {
621 if (ipvlan_is_private(ipvlan->port)) {
622 consume_skb(skb);
623 return NET_XMIT_DROP;
624 }
625 ipvlan_rcv_frame(addr, &skb, true);
626 return NET_XMIT_SUCCESS;
627 }
628 }
629 skb = skb_share_check(skb, GFP_ATOMIC);
630 if (!skb)
631 return NET_XMIT_DROP;
632
633 /* Packet definitely does not belong to any of the
634 * virtual devices, but the dest is local. So forward
635 * the skb for the main-dev. At the RX side we just return
636 * RX_PASS for it to be processed further on the stack.
637 */
638 dev_forward_skb(ipvlan->phy_dev, skb);
639 return NET_XMIT_SUCCESS;
640
641 } else if (is_multicast_ether_addr(eth->h_dest)) {
642 skb_reset_mac_header(skb);
643 ipvlan_skb_crossing_ns(skb, NULL);
644 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
645 return NET_XMIT_SUCCESS;
646 }
647
648 skb->dev = ipvlan->phy_dev;
649 return dev_queue_xmit(skb);
650 }
651
ipvlan_queue_xmit(struct sk_buff * skb,struct net_device * dev)652 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
653 {
654 struct ipvl_dev *ipvlan = netdev_priv(dev);
655 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
656
657 if (!port)
658 goto out;
659
660 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
661 goto out;
662
663 switch(port->mode) {
664 case IPVLAN_MODE_L2:
665 return ipvlan_xmit_mode_l2(skb, dev);
666 case IPVLAN_MODE_L3:
667 #ifdef CONFIG_IPVLAN_L3S
668 case IPVLAN_MODE_L3S:
669 #endif
670 return ipvlan_xmit_mode_l3(skb, dev);
671 }
672
673 /* Should not reach here */
674 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
675 out:
676 kfree_skb(skb);
677 return NET_XMIT_DROP;
678 }
679
ipvlan_external_frame(struct sk_buff * skb,struct ipvl_port * port)680 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
681 {
682 struct ethhdr *eth = eth_hdr(skb);
683 struct ipvl_addr *addr;
684 void *lyr3h;
685 int addr_type;
686
687 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
688 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
689 if (!lyr3h)
690 return true;
691
692 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
693 if (addr)
694 return false;
695 }
696
697 return true;
698 }
699
ipvlan_handle_mode_l3(struct sk_buff ** pskb,struct ipvl_port * port)700 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
701 struct ipvl_port *port)
702 {
703 void *lyr3h;
704 int addr_type;
705 struct ipvl_addr *addr;
706 struct sk_buff *skb = *pskb;
707 rx_handler_result_t ret = RX_HANDLER_PASS;
708
709 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
710 if (!lyr3h)
711 goto out;
712
713 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
714 if (addr)
715 ret = ipvlan_rcv_frame(addr, pskb, false);
716
717 out:
718 return ret;
719 }
720
ipvlan_handle_mode_l2(struct sk_buff ** pskb,struct ipvl_port * port)721 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
722 struct ipvl_port *port)
723 {
724 struct sk_buff *skb = *pskb;
725 struct ethhdr *eth = eth_hdr(skb);
726 rx_handler_result_t ret = RX_HANDLER_PASS;
727
728 if (is_multicast_ether_addr(eth->h_dest)) {
729 if (ipvlan_external_frame(skb, port)) {
730 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
731
732 /* External frames are queued for device local
733 * distribution, but a copy is given to master
734 * straight away to avoid sending duplicates later
735 * when work-queue processes this frame. This is
736 * achieved by returning RX_HANDLER_PASS.
737 */
738 if (nskb) {
739 ipvlan_skb_crossing_ns(nskb, NULL);
740 ipvlan_multicast_enqueue(port, nskb, false);
741 }
742 }
743 } else {
744 /* Perform like l3 mode for non-multicast packet */
745 ret = ipvlan_handle_mode_l3(pskb, port);
746 }
747
748 return ret;
749 }
750
ipvlan_handle_frame(struct sk_buff ** pskb)751 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
752 {
753 struct sk_buff *skb = *pskb;
754 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
755
756 if (!port)
757 return RX_HANDLER_PASS;
758
759 switch (port->mode) {
760 case IPVLAN_MODE_L2:
761 return ipvlan_handle_mode_l2(pskb, port);
762 case IPVLAN_MODE_L3:
763 return ipvlan_handle_mode_l3(pskb, port);
764 #ifdef CONFIG_IPVLAN_L3S
765 case IPVLAN_MODE_L3S:
766 return RX_HANDLER_PASS;
767 #endif
768 }
769
770 /* Should not reach here */
771 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
772 kfree_skb(skb);
773 return RX_HANDLER_CONSUMED;
774 }
775