xref: /linux/net/ipv6/icmp.c (revision 0dd9ac63ce26ec87b080ca9c3e6efed33c23ace6)
1 /*
2  *	Internet Control Message Protocol (ICMPv6)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on net/ipv4/icmp.c
9  *
10  *	RFC 1885
11  *
12  *	This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  */
17 
18 /*
19  *	Changes:
20  *
21  *	Andi Kleen		:	exception handling
22  *	Andi Kleen			add rate limits. never reply to a icmp.
23  *					add more length checks and other fixes.
24  *	yoshfuji		:	ensure to sent parameter problem for
25  *					fragments.
26  *	YOSHIFUJI Hideaki @USAGI:	added sysctl for icmp rate limit.
27  *	Randy Dunlap and
28  *	YOSHIFUJI Hideaki @USAGI:	Per-interface statistics support
29  *	Kazunori MIYAZAWA @USAGI:       change output process to use ip6_append_data
30  */
31 
32 #include <linux/module.h>
33 #include <linux/errno.h>
34 #include <linux/types.h>
35 #include <linux/socket.h>
36 #include <linux/in.h>
37 #include <linux/kernel.h>
38 #include <linux/sockios.h>
39 #include <linux/net.h>
40 #include <linux/skbuff.h>
41 #include <linux/init.h>
42 #include <linux/netfilter.h>
43 #include <linux/slab.h>
44 
45 #ifdef CONFIG_SYSCTL
46 #include <linux/sysctl.h>
47 #endif
48 
49 #include <linux/inet.h>
50 #include <linux/netdevice.h>
51 #include <linux/icmpv6.h>
52 
53 #include <net/ip.h>
54 #include <net/sock.h>
55 
56 #include <net/ipv6.h>
57 #include <net/ip6_checksum.h>
58 #include <net/protocol.h>
59 #include <net/raw.h>
60 #include <net/rawv6.h>
61 #include <net/transp_v6.h>
62 #include <net/ip6_route.h>
63 #include <net/addrconf.h>
64 #include <net/icmp.h>
65 #include <net/xfrm.h>
66 #include <net/inet_common.h>
67 
68 #include <asm/uaccess.h>
69 #include <asm/system.h>
70 
71 /*
72  *	The ICMP socket(s). This is the most convenient way to flow control
73  *	our ICMP output as well as maintain a clean interface throughout
74  *	all layers. All Socketless IP sends will soon be gone.
75  *
76  *	On SMP we have one ICMP socket per-cpu.
77  */
78 static inline struct sock *icmpv6_sk(struct net *net)
79 {
80 	return net->ipv6.icmp_sk[smp_processor_id()];
81 }
82 
83 static int icmpv6_rcv(struct sk_buff *skb);
84 
85 static const struct inet6_protocol icmpv6_protocol = {
86 	.handler	=	icmpv6_rcv,
87 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
88 };
89 
90 static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
91 {
92 	struct sock *sk;
93 
94 	local_bh_disable();
95 
96 	sk = icmpv6_sk(net);
97 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
98 		/* This can happen if the output path (f.e. SIT or
99 		 * ip6ip6 tunnel) signals dst_link_failure() for an
100 		 * outgoing ICMP6 packet.
101 		 */
102 		local_bh_enable();
103 		return NULL;
104 	}
105 	return sk;
106 }
107 
108 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
109 {
110 	spin_unlock_bh(&sk->sk_lock.slock);
111 }
112 
113 /*
114  * Slightly more convenient version of icmpv6_send.
115  */
116 void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
117 {
118 	icmpv6_send(skb, ICMPV6_PARAMPROB, code, pos);
119 	kfree_skb(skb);
120 }
121 
122 /*
123  * Figure out, may we reply to this packet with icmp error.
124  *
125  * We do not reply, if:
126  *	- it was icmp error message.
127  *	- it is truncated, so that it is known, that protocol is ICMPV6
128  *	  (i.e. in the middle of some exthdr)
129  *
130  *	--ANK (980726)
131  */
132 
133 static int is_ineligible(struct sk_buff *skb)
134 {
135 	int ptr = (u8 *)(ipv6_hdr(skb) + 1) - skb->data;
136 	int len = skb->len - ptr;
137 	__u8 nexthdr = ipv6_hdr(skb)->nexthdr;
138 
139 	if (len < 0)
140 		return 1;
141 
142 	ptr = ipv6_skip_exthdr(skb, ptr, &nexthdr);
143 	if (ptr < 0)
144 		return 0;
145 	if (nexthdr == IPPROTO_ICMPV6) {
146 		u8 _type, *tp;
147 		tp = skb_header_pointer(skb,
148 			ptr+offsetof(struct icmp6hdr, icmp6_type),
149 			sizeof(_type), &_type);
150 		if (tp == NULL ||
151 		    !(*tp & ICMPV6_INFOMSG_MASK))
152 			return 1;
153 	}
154 	return 0;
155 }
156 
157 /*
158  * Check the ICMP output rate limit
159  */
160 static inline int icmpv6_xrlim_allow(struct sock *sk, u8 type,
161 				     struct flowi *fl)
162 {
163 	struct dst_entry *dst;
164 	struct net *net = sock_net(sk);
165 	int res = 0;
166 
167 	/* Informational messages are not limited. */
168 	if (type & ICMPV6_INFOMSG_MASK)
169 		return 1;
170 
171 	/* Do not limit pmtu discovery, it would break it. */
172 	if (type == ICMPV6_PKT_TOOBIG)
173 		return 1;
174 
175 	/*
176 	 * Look up the output route.
177 	 * XXX: perhaps the expire for routing entries cloned by
178 	 * this lookup should be more aggressive (not longer than timeout).
179 	 */
180 	dst = ip6_route_output(net, sk, fl);
181 	if (dst->error) {
182 		IP6_INC_STATS(net, ip6_dst_idev(dst),
183 			      IPSTATS_MIB_OUTNOROUTES);
184 	} else if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) {
185 		res = 1;
186 	} else {
187 		struct rt6_info *rt = (struct rt6_info *)dst;
188 		int tmo = net->ipv6.sysctl.icmpv6_time;
189 
190 		/* Give more bandwidth to wider prefixes. */
191 		if (rt->rt6i_dst.plen < 128)
192 			tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
193 
194 		res = xrlim_allow(dst, tmo);
195 	}
196 	dst_release(dst);
197 	return res;
198 }
199 
200 /*
201  *	an inline helper for the "simple" if statement below
202  *	checks if parameter problem report is caused by an
203  *	unrecognized IPv6 option that has the Option Type
204  *	highest-order two bits set to 10
205  */
206 
207 static __inline__ int opt_unrec(struct sk_buff *skb, __u32 offset)
208 {
209 	u8 _optval, *op;
210 
211 	offset += skb_network_offset(skb);
212 	op = skb_header_pointer(skb, offset, sizeof(_optval), &_optval);
213 	if (op == NULL)
214 		return 1;
215 	return (*op & 0xC0) == 0x80;
216 }
217 
218 static int icmpv6_push_pending_frames(struct sock *sk, struct flowi *fl, struct icmp6hdr *thdr, int len)
219 {
220 	struct sk_buff *skb;
221 	struct icmp6hdr *icmp6h;
222 	int err = 0;
223 
224 	if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
225 		goto out;
226 
227 	icmp6h = icmp6_hdr(skb);
228 	memcpy(icmp6h, thdr, sizeof(struct icmp6hdr));
229 	icmp6h->icmp6_cksum = 0;
230 
231 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
232 		skb->csum = csum_partial(icmp6h,
233 					sizeof(struct icmp6hdr), skb->csum);
234 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
235 						      &fl->fl6_dst,
236 						      len, fl->proto,
237 						      skb->csum);
238 	} else {
239 		__wsum tmp_csum = 0;
240 
241 		skb_queue_walk(&sk->sk_write_queue, skb) {
242 			tmp_csum = csum_add(tmp_csum, skb->csum);
243 		}
244 
245 		tmp_csum = csum_partial(icmp6h,
246 					sizeof(struct icmp6hdr), tmp_csum);
247 		icmp6h->icmp6_cksum = csum_ipv6_magic(&fl->fl6_src,
248 						      &fl->fl6_dst,
249 						      len, fl->proto,
250 						      tmp_csum);
251 	}
252 	ip6_push_pending_frames(sk);
253 out:
254 	return err;
255 }
256 
257 struct icmpv6_msg {
258 	struct sk_buff	*skb;
259 	int		offset;
260 	uint8_t		type;
261 };
262 
263 static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
264 {
265 	struct icmpv6_msg *msg = (struct icmpv6_msg *) from;
266 	struct sk_buff *org_skb = msg->skb;
267 	__wsum csum = 0;
268 
269 	csum = skb_copy_and_csum_bits(org_skb, msg->offset + offset,
270 				      to, len, csum);
271 	skb->csum = csum_block_add(skb->csum, csum, odd);
272 	if (!(msg->type & ICMPV6_INFOMSG_MASK))
273 		nf_ct_attach(skb, org_skb);
274 	return 0;
275 }
276 
277 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
278 static void mip6_addr_swap(struct sk_buff *skb)
279 {
280 	struct ipv6hdr *iph = ipv6_hdr(skb);
281 	struct inet6_skb_parm *opt = IP6CB(skb);
282 	struct ipv6_destopt_hao *hao;
283 	struct in6_addr tmp;
284 	int off;
285 
286 	if (opt->dsthao) {
287 		off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO);
288 		if (likely(off >= 0)) {
289 			hao = (struct ipv6_destopt_hao *)
290 					(skb_network_header(skb) + off);
291 			ipv6_addr_copy(&tmp, &iph->saddr);
292 			ipv6_addr_copy(&iph->saddr, &hao->addr);
293 			ipv6_addr_copy(&hao->addr, &tmp);
294 		}
295 	}
296 }
297 #else
298 static inline void mip6_addr_swap(struct sk_buff *skb) {}
299 #endif
300 
301 /*
302  *	Send an ICMP message in response to a packet in error
303  */
304 void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
305 {
306 	struct net *net = dev_net(skb->dev);
307 	struct inet6_dev *idev = NULL;
308 	struct ipv6hdr *hdr = ipv6_hdr(skb);
309 	struct sock *sk;
310 	struct ipv6_pinfo *np;
311 	struct in6_addr *saddr = NULL;
312 	struct dst_entry *dst;
313 	struct dst_entry *dst2;
314 	struct icmp6hdr tmp_hdr;
315 	struct flowi fl;
316 	struct flowi fl2;
317 	struct icmpv6_msg msg;
318 	int iif = 0;
319 	int addr_type = 0;
320 	int len;
321 	int hlimit;
322 	int err = 0;
323 
324 	if ((u8 *)hdr < skb->head ||
325 	    (skb->network_header + sizeof(*hdr)) > skb->tail)
326 		return;
327 
328 	/*
329 	 *	Make sure we respect the rules
330 	 *	i.e. RFC 1885 2.4(e)
331 	 *	Rule (e.1) is enforced by not using icmpv6_send
332 	 *	in any code that processes icmp errors.
333 	 */
334 	addr_type = ipv6_addr_type(&hdr->daddr);
335 
336 	if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0))
337 		saddr = &hdr->daddr;
338 
339 	/*
340 	 *	Dest addr check
341 	 */
342 
343 	if ((addr_type & IPV6_ADDR_MULTICAST || skb->pkt_type != PACKET_HOST)) {
344 		if (type != ICMPV6_PKT_TOOBIG &&
345 		    !(type == ICMPV6_PARAMPROB &&
346 		      code == ICMPV6_UNK_OPTION &&
347 		      (opt_unrec(skb, info))))
348 			return;
349 
350 		saddr = NULL;
351 	}
352 
353 	addr_type = ipv6_addr_type(&hdr->saddr);
354 
355 	/*
356 	 *	Source addr check
357 	 */
358 
359 	if (addr_type & IPV6_ADDR_LINKLOCAL)
360 		iif = skb->dev->ifindex;
361 
362 	/*
363 	 *	Must not send error if the source does not uniquely
364 	 *	identify a single node (RFC2463 Section 2.4).
365 	 *	We check unspecified / multicast addresses here,
366 	 *	and anycast addresses will be checked later.
367 	 */
368 	if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) {
369 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: addr_any/mcast source\n");
370 		return;
371 	}
372 
373 	/*
374 	 *	Never answer to a ICMP packet.
375 	 */
376 	if (is_ineligible(skb)) {
377 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: no reply to icmp error\n");
378 		return;
379 	}
380 
381 	mip6_addr_swap(skb);
382 
383 	memset(&fl, 0, sizeof(fl));
384 	fl.proto = IPPROTO_ICMPV6;
385 	ipv6_addr_copy(&fl.fl6_dst, &hdr->saddr);
386 	if (saddr)
387 		ipv6_addr_copy(&fl.fl6_src, saddr);
388 	fl.oif = iif;
389 	fl.fl_icmp_type = type;
390 	fl.fl_icmp_code = code;
391 	security_skb_classify_flow(skb, &fl);
392 
393 	sk = icmpv6_xmit_lock(net);
394 	if (sk == NULL)
395 		return;
396 	np = inet6_sk(sk);
397 
398 	if (!icmpv6_xrlim_allow(sk, type, &fl))
399 		goto out;
400 
401 	tmp_hdr.icmp6_type = type;
402 	tmp_hdr.icmp6_code = code;
403 	tmp_hdr.icmp6_cksum = 0;
404 	tmp_hdr.icmp6_pointer = htonl(info);
405 
406 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
407 		fl.oif = np->mcast_oif;
408 
409 	err = ip6_dst_lookup(sk, &dst, &fl);
410 	if (err)
411 		goto out;
412 
413 	/*
414 	 * We won't send icmp if the destination is known
415 	 * anycast.
416 	 */
417 	if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) {
418 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6_send: acast source\n");
419 		goto out_dst_release;
420 	}
421 
422 	/* No need to clone since we're just using its address. */
423 	dst2 = dst;
424 
425 	err = xfrm_lookup(net, &dst, &fl, sk, 0);
426 	switch (err) {
427 	case 0:
428 		if (dst != dst2)
429 			goto route_done;
430 		break;
431 	case -EPERM:
432 		dst = NULL;
433 		break;
434 	default:
435 		goto out;
436 	}
437 
438 	if (xfrm_decode_session_reverse(skb, &fl2, AF_INET6))
439 		goto relookup_failed;
440 
441 	if (ip6_dst_lookup(sk, &dst2, &fl2))
442 		goto relookup_failed;
443 
444 	err = xfrm_lookup(net, &dst2, &fl2, sk, XFRM_LOOKUP_ICMP);
445 	switch (err) {
446 	case 0:
447 		dst_release(dst);
448 		dst = dst2;
449 		break;
450 	case -EPERM:
451 		goto out_dst_release;
452 	default:
453 relookup_failed:
454 		if (!dst)
455 			goto out;
456 		break;
457 	}
458 
459 route_done:
460 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
461 		hlimit = np->mcast_hops;
462 	else
463 		hlimit = np->hop_limit;
464 	if (hlimit < 0)
465 		hlimit = ip6_dst_hoplimit(dst);
466 
467 	msg.skb = skb;
468 	msg.offset = skb_network_offset(skb);
469 	msg.type = type;
470 
471 	len = skb->len - msg.offset;
472 	len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
473 	if (len < 0) {
474 		LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
475 		goto out_dst_release;
476 	}
477 
478 	idev = in6_dev_get(skb->dev);
479 
480 	err = ip6_append_data(sk, icmpv6_getfrag, &msg,
481 			      len + sizeof(struct icmp6hdr),
482 			      sizeof(struct icmp6hdr), hlimit,
483 			      np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 			      MSG_DONTWAIT, np->dontfrag);
485 	if (err) {
486 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
487 		ip6_flush_pending_frames(sk);
488 		goto out_put;
489 	}
490 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, len + sizeof(struct icmp6hdr));
491 
492 out_put:
493 	if (likely(idev != NULL))
494 		in6_dev_put(idev);
495 out_dst_release:
496 	dst_release(dst);
497 out:
498 	icmpv6_xmit_unlock(sk);
499 }
500 
501 EXPORT_SYMBOL(icmpv6_send);
502 
503 static void icmpv6_echo_reply(struct sk_buff *skb)
504 {
505 	struct net *net = dev_net(skb->dev);
506 	struct sock *sk;
507 	struct inet6_dev *idev;
508 	struct ipv6_pinfo *np;
509 	struct in6_addr *saddr = NULL;
510 	struct icmp6hdr *icmph = icmp6_hdr(skb);
511 	struct icmp6hdr tmp_hdr;
512 	struct flowi fl;
513 	struct icmpv6_msg msg;
514 	struct dst_entry *dst;
515 	int err = 0;
516 	int hlimit;
517 
518 	saddr = &ipv6_hdr(skb)->daddr;
519 
520 	if (!ipv6_unicast_destination(skb))
521 		saddr = NULL;
522 
523 	memcpy(&tmp_hdr, icmph, sizeof(tmp_hdr));
524 	tmp_hdr.icmp6_type = ICMPV6_ECHO_REPLY;
525 
526 	memset(&fl, 0, sizeof(fl));
527 	fl.proto = IPPROTO_ICMPV6;
528 	ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
529 	if (saddr)
530 		ipv6_addr_copy(&fl.fl6_src, saddr);
531 	fl.oif = skb->dev->ifindex;
532 	fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
533 	security_skb_classify_flow(skb, &fl);
534 
535 	sk = icmpv6_xmit_lock(net);
536 	if (sk == NULL)
537 		return;
538 	np = inet6_sk(sk);
539 
540 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
541 		fl.oif = np->mcast_oif;
542 
543 	err = ip6_dst_lookup(sk, &dst, &fl);
544 	if (err)
545 		goto out;
546 	if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0)
547 		goto out;
548 
549 	if (ipv6_addr_is_multicast(&fl.fl6_dst))
550 		hlimit = np->mcast_hops;
551 	else
552 		hlimit = np->hop_limit;
553 	if (hlimit < 0)
554 		hlimit = ip6_dst_hoplimit(dst);
555 
556 	idev = in6_dev_get(skb->dev);
557 
558 	msg.skb = skb;
559 	msg.offset = 0;
560 	msg.type = ICMPV6_ECHO_REPLY;
561 
562 	err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
563 				sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
564 				(struct rt6_info*)dst, MSG_DONTWAIT,
565 				np->dontfrag);
566 
567 	if (err) {
568 		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
569 		ip6_flush_pending_frames(sk);
570 		goto out_put;
571 	}
572 	err = icmpv6_push_pending_frames(sk, &fl, &tmp_hdr, skb->len + sizeof(struct icmp6hdr));
573 
574 out_put:
575 	if (likely(idev != NULL))
576 		in6_dev_put(idev);
577 	dst_release(dst);
578 out:
579 	icmpv6_xmit_unlock(sk);
580 }
581 
582 static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
583 {
584 	const struct inet6_protocol *ipprot;
585 	int inner_offset;
586 	int hash;
587 	u8 nexthdr;
588 
589 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
590 		return;
591 
592 	nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
593 	if (ipv6_ext_hdr(nexthdr)) {
594 		/* now skip over extension headers */
595 		inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
596 		if (inner_offset<0)
597 			return;
598 	} else {
599 		inner_offset = sizeof(struct ipv6hdr);
600 	}
601 
602 	/* Checkin header including 8 bytes of inner protocol header. */
603 	if (!pskb_may_pull(skb, inner_offset+8))
604 		return;
605 
606 	/* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
607 	   Without this we will not able f.e. to make source routed
608 	   pmtu discovery.
609 	   Corresponding argument (opt) to notifiers is already added.
610 	   --ANK (980726)
611 	 */
612 
613 	hash = nexthdr & (MAX_INET_PROTOS - 1);
614 
615 	rcu_read_lock();
616 	ipprot = rcu_dereference(inet6_protos[hash]);
617 	if (ipprot && ipprot->err_handler)
618 		ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
619 	rcu_read_unlock();
620 
621 	raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
622 }
623 
624 /*
625  *	Handle icmp messages
626  */
627 
628 static int icmpv6_rcv(struct sk_buff *skb)
629 {
630 	struct net_device *dev = skb->dev;
631 	struct inet6_dev *idev = __in6_dev_get(dev);
632 	struct in6_addr *saddr, *daddr;
633 	struct ipv6hdr *orig_hdr;
634 	struct icmp6hdr *hdr;
635 	u8 type;
636 
637 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
638 		struct sec_path *sp = skb_sec_path(skb);
639 		int nh;
640 
641 		if (!(sp && sp->xvec[sp->len - 1]->props.flags &
642 				 XFRM_STATE_ICMP))
643 			goto drop_no_count;
644 
645 		if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
646 			goto drop_no_count;
647 
648 		nh = skb_network_offset(skb);
649 		skb_set_network_header(skb, sizeof(*hdr));
650 
651 		if (!xfrm6_policy_check_reverse(NULL, XFRM_POLICY_IN, skb))
652 			goto drop_no_count;
653 
654 		skb_set_network_header(skb, nh);
655 	}
656 
657 	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS);
658 
659 	saddr = &ipv6_hdr(skb)->saddr;
660 	daddr = &ipv6_hdr(skb)->daddr;
661 
662 	/* Perform checksum. */
663 	switch (skb->ip_summed) {
664 	case CHECKSUM_COMPLETE:
665 		if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
666 				     skb->csum))
667 			break;
668 		/* fall through */
669 	case CHECKSUM_NONE:
670 		skb->csum = ~csum_unfold(csum_ipv6_magic(saddr, daddr, skb->len,
671 					     IPPROTO_ICMPV6, 0));
672 		if (__skb_checksum_complete(skb)) {
673 			LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%pI6 > %pI6]\n",
674 				       saddr, daddr);
675 			goto discard_it;
676 		}
677 	}
678 
679 	if (!pskb_pull(skb, sizeof(*hdr)))
680 		goto discard_it;
681 
682 	hdr = icmp6_hdr(skb);
683 
684 	type = hdr->icmp6_type;
685 
686 	ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type);
687 
688 	switch (type) {
689 	case ICMPV6_ECHO_REQUEST:
690 		icmpv6_echo_reply(skb);
691 		break;
692 
693 	case ICMPV6_ECHO_REPLY:
694 		/* we couldn't care less */
695 		break;
696 
697 	case ICMPV6_PKT_TOOBIG:
698 		/* BUGGG_FUTURE: if packet contains rthdr, we cannot update
699 		   standard destination cache. Seems, only "advanced"
700 		   destination cache will allow to solve this problem
701 		   --ANK (980726)
702 		 */
703 		if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
704 			goto discard_it;
705 		hdr = icmp6_hdr(skb);
706 		orig_hdr = (struct ipv6hdr *) (hdr + 1);
707 		rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
708 				   ntohl(hdr->icmp6_mtu));
709 
710 		/*
711 		 *	Drop through to notify
712 		 */
713 
714 	case ICMPV6_DEST_UNREACH:
715 	case ICMPV6_TIME_EXCEED:
716 	case ICMPV6_PARAMPROB:
717 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
718 		break;
719 
720 	case NDISC_ROUTER_SOLICITATION:
721 	case NDISC_ROUTER_ADVERTISEMENT:
722 	case NDISC_NEIGHBOUR_SOLICITATION:
723 	case NDISC_NEIGHBOUR_ADVERTISEMENT:
724 	case NDISC_REDIRECT:
725 		ndisc_rcv(skb);
726 		break;
727 
728 	case ICMPV6_MGM_QUERY:
729 		igmp6_event_query(skb);
730 		break;
731 
732 	case ICMPV6_MGM_REPORT:
733 		igmp6_event_report(skb);
734 		break;
735 
736 	case ICMPV6_MGM_REDUCTION:
737 	case ICMPV6_NI_QUERY:
738 	case ICMPV6_NI_REPLY:
739 	case ICMPV6_MLD2_REPORT:
740 	case ICMPV6_DHAAD_REQUEST:
741 	case ICMPV6_DHAAD_REPLY:
742 	case ICMPV6_MOBILE_PREFIX_SOL:
743 	case ICMPV6_MOBILE_PREFIX_ADV:
744 		break;
745 
746 	default:
747 		LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
748 
749 		/* informational */
750 		if (type & ICMPV6_INFOMSG_MASK)
751 			break;
752 
753 		/*
754 		 * error of unknown type.
755 		 * must pass to upper level
756 		 */
757 
758 		icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu);
759 	}
760 
761 	kfree_skb(skb);
762 	return 0;
763 
764 discard_it:
765 	ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS);
766 drop_no_count:
767 	kfree_skb(skb);
768 	return 0;
769 }
770 
771 void icmpv6_flow_init(struct sock *sk, struct flowi *fl,
772 		      u8 type,
773 		      const struct in6_addr *saddr,
774 		      const struct in6_addr *daddr,
775 		      int oif)
776 {
777 	memset(fl, 0, sizeof(*fl));
778 	ipv6_addr_copy(&fl->fl6_src, saddr);
779 	ipv6_addr_copy(&fl->fl6_dst, daddr);
780 	fl->proto	 	= IPPROTO_ICMPV6;
781 	fl->fl_icmp_type	= type;
782 	fl->fl_icmp_code	= 0;
783 	fl->oif			= oif;
784 	security_sk_classify_flow(sk, fl);
785 }
786 
787 /*
788  * Special lock-class for __icmpv6_sk:
789  */
790 static struct lock_class_key icmpv6_socket_sk_dst_lock_key;
791 
792 static int __net_init icmpv6_sk_init(struct net *net)
793 {
794 	struct sock *sk;
795 	int err, i, j;
796 
797 	net->ipv6.icmp_sk =
798 		kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
799 	if (net->ipv6.icmp_sk == NULL)
800 		return -ENOMEM;
801 
802 	for_each_possible_cpu(i) {
803 		err = inet_ctl_sock_create(&sk, PF_INET6,
804 					   SOCK_RAW, IPPROTO_ICMPV6, net);
805 		if (err < 0) {
806 			printk(KERN_ERR
807 			       "Failed to initialize the ICMP6 control socket "
808 			       "(err %d).\n",
809 			       err);
810 			goto fail;
811 		}
812 
813 		net->ipv6.icmp_sk[i] = sk;
814 
815 		/*
816 		 * Split off their lock-class, because sk->sk_dst_lock
817 		 * gets used from softirqs, which is safe for
818 		 * __icmpv6_sk (because those never get directly used
819 		 * via userspace syscalls), but unsafe for normal sockets.
820 		 */
821 		lockdep_set_class(&sk->sk_dst_lock,
822 				  &icmpv6_socket_sk_dst_lock_key);
823 
824 		/* Enough space for 2 64K ICMP packets, including
825 		 * sk_buff struct overhead.
826 		 */
827 		sk->sk_sndbuf =
828 			(2 * ((64 * 1024) + sizeof(struct sk_buff)));
829 	}
830 	return 0;
831 
832  fail:
833 	for (j = 0; j < i; j++)
834 		inet_ctl_sock_destroy(net->ipv6.icmp_sk[j]);
835 	kfree(net->ipv6.icmp_sk);
836 	return err;
837 }
838 
839 static void __net_exit icmpv6_sk_exit(struct net *net)
840 {
841 	int i;
842 
843 	for_each_possible_cpu(i) {
844 		inet_ctl_sock_destroy(net->ipv6.icmp_sk[i]);
845 	}
846 	kfree(net->ipv6.icmp_sk);
847 }
848 
849 static struct pernet_operations icmpv6_sk_ops = {
850        .init = icmpv6_sk_init,
851        .exit = icmpv6_sk_exit,
852 };
853 
854 int __init icmpv6_init(void)
855 {
856 	int err;
857 
858 	err = register_pernet_subsys(&icmpv6_sk_ops);
859 	if (err < 0)
860 		return err;
861 
862 	err = -EAGAIN;
863 	if (inet6_add_protocol(&icmpv6_protocol, IPPROTO_ICMPV6) < 0)
864 		goto fail;
865 	return 0;
866 
867 fail:
868 	printk(KERN_ERR "Failed to register ICMP6 protocol\n");
869 	unregister_pernet_subsys(&icmpv6_sk_ops);
870 	return err;
871 }
872 
873 void icmpv6_cleanup(void)
874 {
875 	unregister_pernet_subsys(&icmpv6_sk_ops);
876 	inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
877 }
878 
879 
880 static const struct icmp6_err {
881 	int err;
882 	int fatal;
883 } tab_unreach[] = {
884 	{	/* NOROUTE */
885 		.err	= ENETUNREACH,
886 		.fatal	= 0,
887 	},
888 	{	/* ADM_PROHIBITED */
889 		.err	= EACCES,
890 		.fatal	= 1,
891 	},
892 	{	/* Was NOT_NEIGHBOUR, now reserved */
893 		.err	= EHOSTUNREACH,
894 		.fatal	= 0,
895 	},
896 	{	/* ADDR_UNREACH	*/
897 		.err	= EHOSTUNREACH,
898 		.fatal	= 0,
899 	},
900 	{	/* PORT_UNREACH	*/
901 		.err	= ECONNREFUSED,
902 		.fatal	= 1,
903 	},
904 };
905 
906 int icmpv6_err_convert(u8 type, u8 code, int *err)
907 {
908 	int fatal = 0;
909 
910 	*err = EPROTO;
911 
912 	switch (type) {
913 	case ICMPV6_DEST_UNREACH:
914 		fatal = 1;
915 		if (code <= ICMPV6_PORT_UNREACH) {
916 			*err  = tab_unreach[code].err;
917 			fatal = tab_unreach[code].fatal;
918 		}
919 		break;
920 
921 	case ICMPV6_PKT_TOOBIG:
922 		*err = EMSGSIZE;
923 		break;
924 
925 	case ICMPV6_PARAMPROB:
926 		*err = EPROTO;
927 		fatal = 1;
928 		break;
929 
930 	case ICMPV6_TIME_EXCEED:
931 		*err = EHOSTUNREACH;
932 		break;
933 	}
934 
935 	return fatal;
936 }
937 
938 EXPORT_SYMBOL(icmpv6_err_convert);
939 
940 #ifdef CONFIG_SYSCTL
941 ctl_table ipv6_icmp_table_template[] = {
942 	{
943 		.procname	= "ratelimit",
944 		.data		= &init_net.ipv6.sysctl.icmpv6_time,
945 		.maxlen		= sizeof(int),
946 		.mode		= 0644,
947 		.proc_handler	= proc_dointvec_ms_jiffies,
948 	},
949 	{ },
950 };
951 
952 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
953 {
954 	struct ctl_table *table;
955 
956 	table = kmemdup(ipv6_icmp_table_template,
957 			sizeof(ipv6_icmp_table_template),
958 			GFP_KERNEL);
959 
960 	if (table)
961 		table[0].data = &net->ipv6.sysctl.icmpv6_time;
962 
963 	return table;
964 }
965 #endif
966 
967