xref: /linux/net/ipv6/ip6_input.c (revision fcee7d82f27d6a8b1ddc5bbefda59b4e441e9bc0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 input
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *	Ian P. Morris		<I.P.Morris@soton.ac.uk>
9  *
10  *	Based in linux/net/ipv4/ip_input.c
11  */
12 /* Changes
13  *
14  *	Mitsuru KANDA @USAGI and
15  *	YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
16  */
17 
18 #include <linux/errno.h>
19 #include <linux/types.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/in6.h>
25 #include <linux/icmpv6.h>
26 #include <linux/mroute6.h>
27 #include <linux/slab.h>
28 #include <linux/indirect_call_wrapper.h>
29 
30 #include <linux/netfilter.h>
31 #include <linux/netfilter_ipv6.h>
32 
33 #include <net/sock.h>
34 #include <net/snmp.h>
35 #include <net/udp.h>
36 
37 #include <net/ipv6.h>
38 #include <net/protocol.h>
39 #include <net/transp_v6.h>
40 #include <net/rawv6.h>
41 #include <net/ndisc.h>
42 #include <net/ip6_route.h>
43 #include <net/addrconf.h>
44 #include <net/xfrm.h>
45 #include <net/inet_ecn.h>
46 #include <net/dst_metadata.h>
47 #include <net/inet6_hashtables.h>
48 
tcp_v6_early_demux(struct sk_buff * skb)49 static void tcp_v6_early_demux(struct sk_buff *skb)
50 {
51 	struct net *net = dev_net_rcu(skb->dev);
52 	const struct ipv6hdr *hdr;
53 	const struct tcphdr *th;
54 	struct sock *sk;
55 
56 	if (skb->pkt_type != PACKET_HOST)
57 		return;
58 
59 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
60 				sizeof(struct tcphdr)))
61 		return;
62 
63 	hdr = ipv6_hdr(skb);
64 	th = tcp_hdr(skb);
65 
66 	if (th->doff < sizeof(struct tcphdr) / 4)
67 		return;
68 
69 	/* Note : We use inet6_iif() here, not tcp_v6_iif() */
70 	sk = __inet6_lookup_established(net, &hdr->saddr, th->source,
71 					&hdr->daddr, ntohs(th->dest),
72 					inet6_iif(skb), inet6_sdif(skb));
73 	if (sk) {
74 		skb->sk = sk;
75 		skb->destructor = sock_edemux;
76 		if (sk_fullsock(sk)) {
77 			struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
78 
79 			if (dst)
80 				dst = dst_check(dst, sk->sk_rx_dst_cookie);
81 			if (dst &&
82 			    sk->sk_rx_dst_ifindex == skb->skb_iif)
83 				skb_dst_set_noref(skb, dst);
84 		}
85 	}
86 }
87 
ip6_rcv_finish_core(struct net * net,struct sock * sk,struct sk_buff * skb)88 static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
89 				struct sk_buff *skb)
90 {
91 	if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
92 	    !skb_dst(skb) && !skb->sk) {
93 		switch (ipv6_hdr(skb)->nexthdr) {
94 		case IPPROTO_TCP:
95 			if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux))
96 				tcp_v6_early_demux(skb);
97 			break;
98 		case IPPROTO_UDP:
99 			if (READ_ONCE(net->ipv4.sysctl_udp_early_demux))
100 				udp_v6_early_demux(skb);
101 			break;
102 		}
103 	}
104 
105 	if (!skb_valid_dst(skb))
106 		ip6_route_input(skb);
107 }
108 
ip6_rcv_finish(struct net * net,struct sock * sk,struct sk_buff * skb)109 int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
110 {
111 	/* if ingress device is enslaved to an L3 master device pass the
112 	 * skb to its handler for processing
113 	 */
114 	skb = l3mdev_ip6_rcv(skb);
115 	if (!skb)
116 		return NET_RX_SUCCESS;
117 	ip6_rcv_finish_core(net, sk, skb);
118 
119 	return dst_input(skb);
120 }
121 
ip6_sublist_rcv_finish(struct list_head * head)122 static void ip6_sublist_rcv_finish(struct list_head *head)
123 {
124 	struct sk_buff *skb, *next;
125 
126 	list_for_each_entry_safe(skb, next, head, list) {
127 		skb_list_del_init(skb);
128 		dst_input(skb);
129 	}
130 }
131 
ip6_can_use_hint(const struct sk_buff * skb,const struct sk_buff * hint)132 static bool ip6_can_use_hint(const struct sk_buff *skb,
133 			     const struct sk_buff *hint)
134 {
135 	return hint && !skb_dst(skb) &&
136 	       ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr);
137 }
138 
ip6_extract_route_hint(const struct net * net,struct sk_buff * skb)139 static struct sk_buff *ip6_extract_route_hint(const struct net *net,
140 					      struct sk_buff *skb)
141 {
142 	if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
143 	    IP6CB(skb)->flags & IP6SKB_MULTIPATH)
144 		return NULL;
145 
146 	return skb;
147 }
148 
ip6_list_rcv_finish(struct net * net,struct sock * sk,struct list_head * head)149 static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
150 				struct list_head *head)
151 {
152 	struct sk_buff *skb, *next, *hint = NULL;
153 	struct dst_entry *curr_dst = NULL;
154 	LIST_HEAD(sublist);
155 
156 	list_for_each_entry_safe(skb, next, head, list) {
157 		struct dst_entry *dst;
158 
159 		skb_list_del_init(skb);
160 		/* if ingress device is enslaved to an L3 master device pass the
161 		 * skb to its handler for processing
162 		 */
163 		skb = l3mdev_ip6_rcv(skb);
164 		if (!skb)
165 			continue;
166 
167 		if (ip6_can_use_hint(skb, hint))
168 			skb_dst_copy(skb, hint);
169 		else
170 			ip6_rcv_finish_core(net, sk, skb);
171 		dst = skb_dst(skb);
172 		if (curr_dst != dst) {
173 			hint = ip6_extract_route_hint(net, skb);
174 
175 			/* dispatch old sublist */
176 			if (!list_empty(&sublist))
177 				ip6_sublist_rcv_finish(&sublist);
178 			/* start new sublist */
179 			INIT_LIST_HEAD(&sublist);
180 			curr_dst = dst;
181 		}
182 		list_add_tail(&skb->list, &sublist);
183 	}
184 	/* dispatch final sublist */
185 	ip6_sublist_rcv_finish(&sublist);
186 }
187 
ip6_rcv_core(struct sk_buff * skb,struct net_device * dev,struct net * net)188 static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
189 				    struct net *net)
190 {
191 	enum skb_drop_reason reason;
192 	const struct ipv6hdr *hdr;
193 	u32 pkt_len;
194 	struct inet6_dev *idev;
195 
196 	if (skb->pkt_type == PACKET_OTHERHOST) {
197 		dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
198 		kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST);
199 		return NULL;
200 	}
201 
202 	rcu_read_lock();
203 
204 	idev = __in6_dev_get(skb->dev);
205 
206 	__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
207 
208 	SKB_DR_SET(reason, NOT_SPECIFIED);
209 	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
210 	    !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) {
211 		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
212 		if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6)))
213 			SKB_DR_SET(reason, IPV6DISABLED);
214 		goto drop;
215 	}
216 
217 	memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
218 
219 	/*
220 	 * Store incoming device index. When the packet will
221 	 * be queued, we cannot refer to skb->dev anymore.
222 	 *
223 	 * BTW, when we send a packet for our own local address on a
224 	 * non-loopback interface (e.g. ethX), it is being delivered
225 	 * via the loopback interface (lo) here; skb->dev = loopback_dev.
226 	 * It, however, should be considered as if it is being
227 	 * arrived via the sending interface (ethX), because of the
228 	 * nature of scoping architecture. --yoshfuji
229 	 */
230 	IP6CB(skb)->iif = skb_valid_dst(skb) ?
231 				ip6_dst_idev(skb_dst(skb))->dev->ifindex :
232 				dev->ifindex;
233 
234 	if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
235 		goto err;
236 
237 	hdr = ipv6_hdr(skb);
238 
239 	if (hdr->version != 6) {
240 		SKB_DR_SET(reason, UNHANDLED_PROTO);
241 		goto err;
242 	}
243 
244 	__IP6_ADD_STATS(net, idev,
245 			IPSTATS_MIB_NOECTPKTS +
246 				(ipv6_get_dsfield(hdr) & INET_ECN_MASK),
247 			max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
248 	/*
249 	 * RFC4291 2.5.3
250 	 * The loopback address must not be used as the source address in IPv6
251 	 * packets that are sent outside of a single node. [..]
252 	 * A packet received on an interface with a destination address
253 	 * of loopback must be dropped.
254 	 */
255 	if ((ipv6_addr_loopback(&hdr->saddr) ||
256 	     ipv6_addr_loopback(&hdr->daddr)) &&
257 	    !(dev->flags & IFF_LOOPBACK) &&
258 	    !netif_is_l3_master(dev))
259 		goto err;
260 
261 	/* RFC4291 Errata ID: 3480
262 	 * Interface-Local scope spans only a single interface on a
263 	 * node and is useful only for loopback transmission of
264 	 * multicast.  Packets with interface-local scope received
265 	 * from another node must be discarded.
266 	 */
267 	if (!(skb->pkt_type == PACKET_LOOPBACK ||
268 	      dev->flags & IFF_LOOPBACK) &&
269 	    ipv6_addr_is_multicast(&hdr->daddr) &&
270 	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
271 		goto err;
272 
273 	/* If enabled, drop unicast packets that were encapsulated in link-layer
274 	 * multicast or broadcast to protected against the so-called "hole-196"
275 	 * attack in 802.11 wireless.
276 	 */
277 	if (!ipv6_addr_is_multicast(&hdr->daddr) &&
278 	    (skb->pkt_type == PACKET_BROADCAST ||
279 	     skb->pkt_type == PACKET_MULTICAST) &&
280 	    READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) {
281 		SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST);
282 		goto err;
283 	}
284 
285 	/* RFC4291 2.7
286 	 * Nodes must not originate a packet to a multicast address whose scope
287 	 * field contains the reserved value 0; if such a packet is received, it
288 	 * must be silently dropped.
289 	 */
290 	if (ipv6_addr_is_multicast(&hdr->daddr) &&
291 	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
292 		goto err;
293 
294 	/*
295 	 * RFC4291 2.7
296 	 * Multicast addresses must not be used as source addresses in IPv6
297 	 * packets or appear in any Routing header.
298 	 */
299 	if (ipv6_addr_is_multicast(&hdr->saddr))
300 		goto err;
301 
302 	skb->transport_header = skb->network_header + sizeof(*hdr);
303 	IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
304 
305 	pkt_len = ipv6_payload_len(skb, hdr);
306 
307 	/* pkt_len may be zero if Jumbo payload option is present */
308 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
309 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
310 			__IP6_INC_STATS(net,
311 					idev, IPSTATS_MIB_INTRUNCATEDPKTS);
312 			SKB_DR_SET(reason, PKT_TOO_SMALL);
313 			goto drop;
314 		}
315 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
316 			goto err;
317 		hdr = ipv6_hdr(skb);
318 	}
319 
320 	if (hdr->nexthdr == NEXTHDR_HOP) {
321 		if (ipv6_parse_hopopts(skb) < 0) {
322 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
323 			rcu_read_unlock();
324 			return NULL;
325 		}
326 	}
327 
328 	rcu_read_unlock();
329 
330 	/* Must drop socket now because of tproxy. */
331 	if (!skb_sk_is_prefetched(skb))
332 		skb_orphan(skb);
333 
334 	return skb;
335 err:
336 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
337 	SKB_DR_OR(reason, IP_INHDR);
338 drop:
339 	rcu_read_unlock();
340 	kfree_skb_reason(skb, reason);
341 	return NULL;
342 }
343 
ipv6_rcv(struct sk_buff * skb,struct net_device * dev,struct packet_type * pt,struct net_device * orig_dev)344 int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
345 {
346 	struct net *net = dev_net(skb->dev);
347 
348 	skb = ip6_rcv_core(skb, dev, net);
349 	if (skb == NULL)
350 		return NET_RX_DROP;
351 	return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
352 		       net, NULL, skb, dev, NULL,
353 		       ip6_rcv_finish);
354 }
355 
ip6_sublist_rcv(struct list_head * head,struct net_device * dev,struct net * net)356 static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
357 			    struct net *net)
358 {
359 	NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
360 		     head, dev, NULL, ip6_rcv_finish);
361 	ip6_list_rcv_finish(net, NULL, head);
362 }
363 
364 /* Receive a list of IPv6 packets */
ipv6_list_rcv(struct list_head * head,struct packet_type * pt,struct net_device * orig_dev)365 void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
366 		   struct net_device *orig_dev)
367 {
368 	struct net_device *curr_dev = NULL;
369 	struct net *curr_net = NULL;
370 	struct sk_buff *skb, *next;
371 	LIST_HEAD(sublist);
372 
373 	list_for_each_entry_safe(skb, next, head, list) {
374 		struct net_device *dev = skb->dev;
375 		struct net *net = dev_net(dev);
376 
377 		skb_list_del_init(skb);
378 		skb = ip6_rcv_core(skb, dev, net);
379 		if (skb == NULL)
380 			continue;
381 
382 		if (curr_dev != dev || curr_net != net) {
383 			/* dispatch old sublist */
384 			if (!list_empty(&sublist))
385 				ip6_sublist_rcv(&sublist, curr_dev, curr_net);
386 			/* start new sublist */
387 			INIT_LIST_HEAD(&sublist);
388 			curr_dev = dev;
389 			curr_net = net;
390 		}
391 		list_add_tail(&skb->list, &sublist);
392 	}
393 	/* dispatch final sublist */
394 	if (!list_empty(&sublist))
395 		ip6_sublist_rcv(&sublist, curr_dev, curr_net);
396 }
397 
398 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *));
399 
400 /*
401  *	Deliver the packet to the host
402  */
ip6_protocol_deliver_rcu(struct net * net,struct sk_buff * skb,int nexthdr,bool have_final)403 void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
404 			      bool have_final)
405 {
406 	int exthdr_cnt = IP6CB(skb)->flags & IP6SKB_HOPBYHOP ? 1 : 0;
407 	const struct inet6_protocol *ipprot;
408 	struct inet6_dev *idev;
409 	unsigned int nhoff;
410 	SKB_DR(reason);
411 	bool raw;
412 
413 	/*
414 	 *	Parse extension headers
415 	 */
416 
417 resubmit:
418 	idev = ip6_dst_idev(skb_dst(skb));
419 	nhoff = IP6CB(skb)->nhoff;
420 	if (!have_final) {
421 		if (!pskb_pull(skb, skb_transport_offset(skb)))
422 			goto discard;
423 		nexthdr = skb_network_header(skb)[nhoff];
424 	}
425 
426 resubmit_final:
427 	raw = raw6_local_deliver(skb, nexthdr);
428 	ipprot = rcu_dereference(inet6_protos[nexthdr]);
429 	if (ipprot) {
430 		int ret;
431 
432 		if (have_final) {
433 			if (!(ipprot->flags & INET6_PROTO_FINAL)) {
434 				/* Once we've seen a final protocol don't
435 				 * allow encapsulation on any non-final
436 				 * ones. This allows foo in UDP encapsulation
437 				 * to work.
438 				 */
439 				goto discard;
440 			}
441 		} else if (ipprot->flags & INET6_PROTO_FINAL) {
442 			const struct ipv6hdr *hdr;
443 			int sdif = inet6_sdif(skb);
444 			struct net_device *dev;
445 
446 			/* Only do this once for first final protocol */
447 			have_final = true;
448 
449 
450 			skb_postpull_rcsum(skb, skb_network_header(skb),
451 					   skb_network_header_len(skb));
452 			hdr = ipv6_hdr(skb);
453 
454 			/* skb->dev passed may be master dev for vrfs. */
455 			if (sdif) {
456 				dev = dev_get_by_index_rcu(net, sdif);
457 				if (!dev)
458 					goto discard;
459 			} else {
460 				dev = skb->dev;
461 			}
462 
463 			if (ipv6_addr_is_multicast(&hdr->daddr) &&
464 			    !ipv6_chk_mcast_addr(dev, &hdr->daddr,
465 						 &hdr->saddr) &&
466 			    !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) {
467 				SKB_DR_SET(reason, IP_INADDRERRORS);
468 				goto discard;
469 			}
470 		}
471 		if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) {
472 			if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
473 				SKB_DR_SET(reason, XFRM_POLICY);
474 				goto discard;
475 			}
476 			nf_reset_ct(skb);
477 		}
478 
479 		ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv,
480 				      skb);
481 		if (ret > 0) {
482 			if (ipprot->flags & INET6_PROTO_FINAL) {
483 				/* Not an extension header, most likely UDP
484 				 * encapsulation. Use return value as nexthdr
485 				 * protocol not nhoff (which presumably is
486 				 * not set by handler).
487 				 */
488 				nexthdr = ret;
489 				goto resubmit_final;
490 			} else {
491 				if (unlikely(exthdr_cnt++ >= IP6_MAX_EXT_HDRS_CNT)) {
492 					SKB_DR_SET(reason, IPV6_TOO_MANY_EXTHDRS);
493 					goto discard;
494 				}
495 				goto resubmit;
496 			}
497 		} else if (ret == 0) {
498 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
499 		}
500 	} else {
501 		if (!raw) {
502 			if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
503 				__IP6_INC_STATS(net, idev,
504 						IPSTATS_MIB_INUNKNOWNPROTOS);
505 				icmpv6_send(skb, ICMPV6_PARAMPROB,
506 					    ICMPV6_UNK_NEXTHDR, nhoff);
507 				SKB_DR_SET(reason, IP_NOPROTO);
508 			} else {
509 				SKB_DR_SET(reason, XFRM_POLICY);
510 			}
511 			kfree_skb_reason(skb, reason);
512 		} else {
513 			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
514 			consume_skb(skb);
515 		}
516 	}
517 	return;
518 
519 discard:
520 	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
521 	kfree_skb_reason(skb, reason);
522 }
523 
ip6_input_finish(struct net * net,struct sock * sk,struct sk_buff * skb)524 static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
525 {
526 	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
527 		__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
528 				IPSTATS_MIB_INDISCARDS);
529 		kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
530 		return 0;
531 	}
532 
533 	skb_clear_delivery_time(skb);
534 	ip6_protocol_deliver_rcu(net, skb, 0, false);
535 
536 	return 0;
537 }
538 
539 
ip6_input(struct sk_buff * skb)540 int ip6_input(struct sk_buff *skb)
541 {
542 	int res;
543 
544 	rcu_read_lock();
545 	res = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
546 		      dev_net_rcu(skb->dev), NULL, skb, skb->dev, NULL,
547 		      ip6_input_finish);
548 	rcu_read_unlock();
549 
550 	return res;
551 }
552 EXPORT_SYMBOL_GPL(ip6_input);
553 
ip6_mc_input(struct sk_buff * skb)554 int ip6_mc_input(struct sk_buff *skb)
555 {
556 	struct net_device *dev = skb->dev;
557 	int sdif = inet6_sdif(skb);
558 	const struct ipv6hdr *hdr;
559 	bool deliver;
560 
561 	__IP6_UPD_PO_STATS(skb_dst_dev_net_rcu(skb),
562 			   __in6_dev_get_safely(dev), IPSTATS_MIB_INMCAST,
563 			   skb->len);
564 
565 	/* skb->dev passed may be master dev for vrfs. */
566 	if (sdif) {
567 		dev = dev_get_by_index_rcu(dev_net_rcu(dev), sdif);
568 		if (!dev) {
569 			kfree_skb(skb);
570 			return -ENODEV;
571 		}
572 	}
573 
574 	hdr = ipv6_hdr(skb);
575 	deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL);
576 
577 #ifdef CONFIG_IPV6_MROUTE
578 	/*
579 	 *      IPv6 multicast router mode is now supported ;)
580 	 */
581 	if (atomic_read(&dev_net_rcu(skb->dev)->ipv6.devconf_all->mc_forwarding) &&
582 	    !(ipv6_addr_type(&hdr->daddr) &
583 	      (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
584 	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
585 		/*
586 		 * Okay, we try to forward - split and duplicate
587 		 * packets.
588 		 */
589 		struct sk_buff *skb2;
590 		struct inet6_skb_parm *opt = IP6CB(skb);
591 
592 		/* Check for MLD */
593 		if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
594 			/* Check if this is a mld message */
595 			u8 nexthdr = hdr->nexthdr;
596 			__be16 frag_off;
597 			int offset;
598 
599 			/* Check if the value of Router Alert
600 			 * is for MLD (0x0000).
601 			 */
602 			if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
603 				deliver = false;
604 
605 				if (!ipv6_ext_hdr(nexthdr)) {
606 					/* BUG */
607 					goto out;
608 				}
609 				offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
610 							  &nexthdr, &frag_off);
611 				if (offset < 0)
612 					goto out;
613 
614 				if (ipv6_is_mld(skb, nexthdr, offset))
615 					deliver = true;
616 
617 				goto out;
618 			}
619 			/* unknown RA - process it normally */
620 		}
621 
622 		if (deliver) {
623 			skb2 = skb_clone(skb, GFP_ATOMIC);
624 		} else {
625 			skb2 = skb;
626 			skb = NULL;
627 		}
628 
629 		if (skb2)
630 			ip6_mr_input(skb2);
631 	}
632 out:
633 #endif
634 	if (likely(deliver)) {
635 		ip6_input(skb);
636 	} else {
637 		/* discard */
638 		kfree_skb(skb);
639 	}
640 
641 	return 0;
642 }
643