xref: /linux/net/ipv6/sit.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  *	IPv6 over IPv4 tunnel device - Simple Internet Transition (SIT)
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *	Alexey Kuznetsov	<kuznet@ms2.inr.ac.ru>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *      modify it under the terms of the GNU General Public License
11  *      as published by the Free Software Foundation; either version
12  *      2 of the License, or (at your option) any later version.
13  *
14  *	Changes:
15  * Roger Venning <r.venning@telstra.com>:	6to4 support
16  * Nate Thompson <nate@thebog.net>:		6to4 support
17  * Fred Templin <fred.l.templin@boeing.com>:	isatap support
18  */
19 
20 #include <linux/module.h>
21 #include <linux/capability.h>
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/net.h>
27 #include <linux/in6.h>
28 #include <linux/netdevice.h>
29 #include <linux/if_arp.h>
30 #include <linux/icmp.h>
31 #include <linux/slab.h>
32 #include <asm/uaccess.h>
33 #include <linux/init.h>
34 #include <linux/netfilter_ipv4.h>
35 #include <linux/if_ether.h>
36 
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 
40 #include <net/ipv6.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/ip6_fib.h>
44 #include <net/ip6_route.h>
45 #include <net/ndisc.h>
46 #include <net/addrconf.h>
47 #include <net/ip.h>
48 #include <net/udp.h>
49 #include <net/icmp.h>
50 #include <net/ipip.h>
51 #include <net/inet_ecn.h>
52 #include <net/xfrm.h>
53 #include <net/dsfield.h>
54 #include <net/net_namespace.h>
55 #include <net/netns/generic.h>
56 
57 /*
58    This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
59 
60    For comments look at net/ipv4/ip_gre.c --ANK
61  */
62 
63 #define HASH_SIZE  16
64 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
65 
66 static int ipip6_tunnel_init(struct net_device *dev);
67 static void ipip6_tunnel_setup(struct net_device *dev);
68 static void ipip6_dev_free(struct net_device *dev);
69 
70 static int sit_net_id __read_mostly;
71 struct sit_net {
72 	struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
73 	struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
74 	struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
75 	struct ip_tunnel __rcu *tunnels_wc[1];
76 	struct ip_tunnel __rcu **tunnels[4];
77 
78 	struct net_device *fb_tunnel_dev;
79 };
80 
81 /*
82  * Locking : hash tables are protected by RCU and RTNL
83  */
84 
85 #define for_each_ip_tunnel_rcu(start) \
86 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
87 
88 /* often modified stats are per cpu, other are shared (netdev->stats) */
89 struct pcpu_tstats {
90 	unsigned long	rx_packets;
91 	unsigned long	rx_bytes;
92 	unsigned long	tx_packets;
93 	unsigned long	tx_bytes;
94 } __attribute__((aligned(4*sizeof(unsigned long))));
95 
96 static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
97 {
98 	struct pcpu_tstats sum = { 0 };
99 	int i;
100 
101 	for_each_possible_cpu(i) {
102 		const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
103 
104 		sum.rx_packets += tstats->rx_packets;
105 		sum.rx_bytes   += tstats->rx_bytes;
106 		sum.tx_packets += tstats->tx_packets;
107 		sum.tx_bytes   += tstats->tx_bytes;
108 	}
109 	dev->stats.rx_packets = sum.rx_packets;
110 	dev->stats.rx_bytes   = sum.rx_bytes;
111 	dev->stats.tx_packets = sum.tx_packets;
112 	dev->stats.tx_bytes   = sum.tx_bytes;
113 	return &dev->stats;
114 }
115 /*
116  * Must be invoked with rcu_read_lock
117  */
118 static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
119 		struct net_device *dev, __be32 remote, __be32 local)
120 {
121 	unsigned int h0 = HASH(remote);
122 	unsigned int h1 = HASH(local);
123 	struct ip_tunnel *t;
124 	struct sit_net *sitn = net_generic(net, sit_net_id);
125 
126 	for_each_ip_tunnel_rcu(sitn->tunnels_r_l[h0 ^ h1]) {
127 		if (local == t->parms.iph.saddr &&
128 		    remote == t->parms.iph.daddr &&
129 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
130 		    (t->dev->flags & IFF_UP))
131 			return t;
132 	}
133 	for_each_ip_tunnel_rcu(sitn->tunnels_r[h0]) {
134 		if (remote == t->parms.iph.daddr &&
135 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
136 		    (t->dev->flags & IFF_UP))
137 			return t;
138 	}
139 	for_each_ip_tunnel_rcu(sitn->tunnels_l[h1]) {
140 		if (local == t->parms.iph.saddr &&
141 		    (!dev || !t->parms.link || dev->iflink == t->parms.link) &&
142 		    (t->dev->flags & IFF_UP))
143 			return t;
144 	}
145 	t = rcu_dereference(sitn->tunnels_wc[0]);
146 	if ((t != NULL) && (t->dev->flags & IFF_UP))
147 		return t;
148 	return NULL;
149 }
150 
151 static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
152 		struct ip_tunnel_parm *parms)
153 {
154 	__be32 remote = parms->iph.daddr;
155 	__be32 local = parms->iph.saddr;
156 	unsigned int h = 0;
157 	int prio = 0;
158 
159 	if (remote) {
160 		prio |= 2;
161 		h ^= HASH(remote);
162 	}
163 	if (local) {
164 		prio |= 1;
165 		h ^= HASH(local);
166 	}
167 	return &sitn->tunnels[prio][h];
168 }
169 
170 static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
171 		struct ip_tunnel *t)
172 {
173 	return __ipip6_bucket(sitn, &t->parms);
174 }
175 
176 static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
177 {
178 	struct ip_tunnel __rcu **tp;
179 	struct ip_tunnel *iter;
180 
181 	for (tp = ipip6_bucket(sitn, t);
182 	     (iter = rtnl_dereference(*tp)) != NULL;
183 	     tp = &iter->next) {
184 		if (t == iter) {
185 			rcu_assign_pointer(*tp, t->next);
186 			break;
187 		}
188 	}
189 }
190 
191 static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
192 {
193 	struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
194 
195 	rcu_assign_pointer(t->next, rtnl_dereference(*tp));
196 	rcu_assign_pointer(*tp, t);
197 }
198 
199 static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
200 {
201 #ifdef CONFIG_IPV6_SIT_6RD
202 	struct ip_tunnel *t = netdev_priv(dev);
203 
204 	if (t->dev == sitn->fb_tunnel_dev) {
205 		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
206 		t->ip6rd.relay_prefix = 0;
207 		t->ip6rd.prefixlen = 16;
208 		t->ip6rd.relay_prefixlen = 0;
209 	} else {
210 		struct ip_tunnel *t0 = netdev_priv(sitn->fb_tunnel_dev);
211 		memcpy(&t->ip6rd, &t0->ip6rd, sizeof(t->ip6rd));
212 	}
213 #endif
214 }
215 
216 static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
217 		struct ip_tunnel_parm *parms, int create)
218 {
219 	__be32 remote = parms->iph.daddr;
220 	__be32 local = parms->iph.saddr;
221 	struct ip_tunnel *t, *nt;
222 	struct ip_tunnel __rcu **tp;
223 	struct net_device *dev;
224 	char name[IFNAMSIZ];
225 	struct sit_net *sitn = net_generic(net, sit_net_id);
226 
227 	for (tp = __ipip6_bucket(sitn, parms);
228 	    (t = rtnl_dereference(*tp)) != NULL;
229 	     tp = &t->next) {
230 		if (local == t->parms.iph.saddr &&
231 		    remote == t->parms.iph.daddr &&
232 		    parms->link == t->parms.link) {
233 			if (create)
234 				return NULL;
235 			else
236 				return t;
237 		}
238 	}
239 	if (!create)
240 		goto failed;
241 
242 	if (parms->name[0])
243 		strlcpy(name, parms->name, IFNAMSIZ);
244 	else
245 		strcpy(name, "sit%d");
246 
247 	dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
248 	if (dev == NULL)
249 		return NULL;
250 
251 	dev_net_set(dev, net);
252 
253 	nt = netdev_priv(dev);
254 
255 	nt->parms = *parms;
256 	if (ipip6_tunnel_init(dev) < 0)
257 		goto failed_free;
258 	ipip6_tunnel_clone_6rd(dev, sitn);
259 
260 	if (parms->i_flags & SIT_ISATAP)
261 		dev->priv_flags |= IFF_ISATAP;
262 
263 	if (register_netdevice(dev) < 0)
264 		goto failed_free;
265 
266 	strcpy(nt->parms.name, dev->name);
267 
268 	dev_hold(dev);
269 
270 	ipip6_tunnel_link(sitn, nt);
271 	return nt;
272 
273 failed_free:
274 	ipip6_dev_free(dev);
275 failed:
276 	return NULL;
277 }
278 
279 #define for_each_prl_rcu(start)			\
280 	for (prl = rcu_dereference(start);	\
281 	     prl;				\
282 	     prl = rcu_dereference(prl->next))
283 
284 static struct ip_tunnel_prl_entry *
285 __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr)
286 {
287 	struct ip_tunnel_prl_entry *prl;
288 
289 	for_each_prl_rcu(t->prl)
290 		if (prl->addr == addr)
291 			break;
292 	return prl;
293 
294 }
295 
296 static int ipip6_tunnel_get_prl(struct ip_tunnel *t,
297 				struct ip_tunnel_prl __user *a)
298 {
299 	struct ip_tunnel_prl kprl, *kp;
300 	struct ip_tunnel_prl_entry *prl;
301 	unsigned int cmax, c = 0, ca, len;
302 	int ret = 0;
303 
304 	if (copy_from_user(&kprl, a, sizeof(kprl)))
305 		return -EFAULT;
306 	cmax = kprl.datalen / sizeof(kprl);
307 	if (cmax > 1 && kprl.addr != htonl(INADDR_ANY))
308 		cmax = 1;
309 
310 	/* For simple GET or for root users,
311 	 * we try harder to allocate.
312 	 */
313 	kp = (cmax <= 1 || capable(CAP_NET_ADMIN)) ?
314 		kcalloc(cmax, sizeof(*kp), GFP_KERNEL) :
315 		NULL;
316 
317 	rcu_read_lock();
318 
319 	ca = t->prl_count < cmax ? t->prl_count : cmax;
320 
321 	if (!kp) {
322 		/* We don't try hard to allocate much memory for
323 		 * non-root users.
324 		 * For root users, retry allocating enough memory for
325 		 * the answer.
326 		 */
327 		kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
328 		if (!kp) {
329 			ret = -ENOMEM;
330 			goto out;
331 		}
332 	}
333 
334 	c = 0;
335 	for_each_prl_rcu(t->prl) {
336 		if (c >= cmax)
337 			break;
338 		if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr)
339 			continue;
340 		kp[c].addr = prl->addr;
341 		kp[c].flags = prl->flags;
342 		c++;
343 		if (kprl.addr != htonl(INADDR_ANY))
344 			break;
345 	}
346 out:
347 	rcu_read_unlock();
348 
349 	len = sizeof(*kp) * c;
350 	ret = 0;
351 	if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen))
352 		ret = -EFAULT;
353 
354 	kfree(kp);
355 
356 	return ret;
357 }
358 
359 static int
360 ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
361 {
362 	struct ip_tunnel_prl_entry *p;
363 	int err = 0;
364 
365 	if (a->addr == htonl(INADDR_ANY))
366 		return -EINVAL;
367 
368 	ASSERT_RTNL();
369 
370 	for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
371 		if (p->addr == a->addr) {
372 			if (chg) {
373 				p->flags = a->flags;
374 				goto out;
375 			}
376 			err = -EEXIST;
377 			goto out;
378 		}
379 	}
380 
381 	if (chg) {
382 		err = -ENXIO;
383 		goto out;
384 	}
385 
386 	p = kzalloc(sizeof(struct ip_tunnel_prl_entry), GFP_KERNEL);
387 	if (!p) {
388 		err = -ENOBUFS;
389 		goto out;
390 	}
391 
392 	p->next = t->prl;
393 	p->addr = a->addr;
394 	p->flags = a->flags;
395 	t->prl_count++;
396 	rcu_assign_pointer(t->prl, p);
397 out:
398 	return err;
399 }
400 
401 static void prl_list_destroy_rcu(struct rcu_head *head)
402 {
403 	struct ip_tunnel_prl_entry *p, *n;
404 
405 	p = container_of(head, struct ip_tunnel_prl_entry, rcu_head);
406 	do {
407 		n = rcu_dereference_protected(p->next, 1);
408 		kfree(p);
409 		p = n;
410 	} while (p);
411 }
412 
413 static int
414 ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
415 {
416 	struct ip_tunnel_prl_entry *x;
417 	struct ip_tunnel_prl_entry __rcu **p;
418 	int err = 0;
419 
420 	ASSERT_RTNL();
421 
422 	if (a && a->addr != htonl(INADDR_ANY)) {
423 		for (p = &t->prl;
424 		     (x = rtnl_dereference(*p)) != NULL;
425 		     p = &x->next) {
426 			if (x->addr == a->addr) {
427 				*p = x->next;
428 				kfree_rcu(x, rcu_head);
429 				t->prl_count--;
430 				goto out;
431 			}
432 		}
433 		err = -ENXIO;
434 	} else {
435 		x = rtnl_dereference(t->prl);
436 		if (x) {
437 			t->prl_count = 0;
438 			call_rcu(&x->rcu_head, prl_list_destroy_rcu);
439 			t->prl = NULL;
440 		}
441 	}
442 out:
443 	return err;
444 }
445 
446 static int
447 isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
448 {
449 	struct ip_tunnel_prl_entry *p;
450 	int ok = 1;
451 
452 	rcu_read_lock();
453 	p = __ipip6_tunnel_locate_prl(t, iph->saddr);
454 	if (p) {
455 		if (p->flags & PRL_DEFAULT)
456 			skb->ndisc_nodetype = NDISC_NODETYPE_DEFAULT;
457 		else
458 			skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
459 	} else {
460 		const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
461 
462 		if (ipv6_addr_is_isatap(addr6) &&
463 		    (addr6->s6_addr32[3] == iph->saddr) &&
464 		    ipv6_chk_prefix(addr6, t->dev))
465 			skb->ndisc_nodetype = NDISC_NODETYPE_HOST;
466 		else
467 			ok = 0;
468 	}
469 	rcu_read_unlock();
470 	return ok;
471 }
472 
473 static void ipip6_tunnel_uninit(struct net_device *dev)
474 {
475 	struct net *net = dev_net(dev);
476 	struct sit_net *sitn = net_generic(net, sit_net_id);
477 
478 	if (dev == sitn->fb_tunnel_dev) {
479 		RCU_INIT_POINTER(sitn->tunnels_wc[0], NULL);
480 	} else {
481 		ipip6_tunnel_unlink(sitn, netdev_priv(dev));
482 		ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
483 	}
484 	dev_put(dev);
485 }
486 
487 
488 static int ipip6_err(struct sk_buff *skb, u32 info)
489 {
490 
491 /* All the routers (except for Linux) return only
492    8 bytes of packet payload. It means, that precise relaying of
493    ICMP in the real Internet is absolutely infeasible.
494  */
495 	const struct iphdr *iph = (const struct iphdr *)skb->data;
496 	const int type = icmp_hdr(skb)->type;
497 	const int code = icmp_hdr(skb)->code;
498 	struct ip_tunnel *t;
499 	int err;
500 
501 	switch (type) {
502 	default:
503 	case ICMP_PARAMETERPROB:
504 		return 0;
505 
506 	case ICMP_DEST_UNREACH:
507 		switch (code) {
508 		case ICMP_SR_FAILED:
509 		case ICMP_PORT_UNREACH:
510 			/* Impossible event. */
511 			return 0;
512 		case ICMP_FRAG_NEEDED:
513 			/* Soft state for pmtu is maintained by IP core. */
514 			return 0;
515 		default:
516 			/* All others are translated to HOST_UNREACH.
517 			   rfc2003 contains "deep thoughts" about NET_UNREACH,
518 			   I believe they are just ether pollution. --ANK
519 			 */
520 			break;
521 		}
522 		break;
523 	case ICMP_TIME_EXCEEDED:
524 		if (code != ICMP_EXC_TTL)
525 			return 0;
526 		break;
527 	}
528 
529 	err = -ENOENT;
530 
531 	rcu_read_lock();
532 	t = ipip6_tunnel_lookup(dev_net(skb->dev),
533 				skb->dev,
534 				iph->daddr,
535 				iph->saddr);
536 	if (t == NULL || t->parms.iph.daddr == 0)
537 		goto out;
538 
539 	err = 0;
540 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
541 		goto out;
542 
543 	if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
544 		t->err_count++;
545 	else
546 		t->err_count = 1;
547 	t->err_time = jiffies;
548 out:
549 	rcu_read_unlock();
550 	return err;
551 }
552 
553 static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
554 {
555 	if (INET_ECN_is_ce(iph->tos))
556 		IP6_ECN_set_ce(ipv6_hdr(skb));
557 }
558 
559 static int ipip6_rcv(struct sk_buff *skb)
560 {
561 	const struct iphdr *iph;
562 	struct ip_tunnel *tunnel;
563 
564 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
565 		goto out;
566 
567 	iph = ip_hdr(skb);
568 
569 	rcu_read_lock();
570 	tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
571 				     iph->saddr, iph->daddr);
572 	if (tunnel != NULL) {
573 		struct pcpu_tstats *tstats;
574 
575 		secpath_reset(skb);
576 		skb->mac_header = skb->network_header;
577 		skb_reset_network_header(skb);
578 		IPCB(skb)->flags = 0;
579 		skb->protocol = htons(ETH_P_IPV6);
580 		skb->pkt_type = PACKET_HOST;
581 
582 		if ((tunnel->dev->priv_flags & IFF_ISATAP) &&
583 		    !isatap_chksrc(skb, iph, tunnel)) {
584 			tunnel->dev->stats.rx_errors++;
585 			rcu_read_unlock();
586 			kfree_skb(skb);
587 			return 0;
588 		}
589 
590 		tstats = this_cpu_ptr(tunnel->dev->tstats);
591 		tstats->rx_packets++;
592 		tstats->rx_bytes += skb->len;
593 
594 		__skb_tunnel_rx(skb, tunnel->dev);
595 
596 		ipip6_ecn_decapsulate(iph, skb);
597 
598 		netif_rx(skb);
599 
600 		rcu_read_unlock();
601 		return 0;
602 	}
603 
604 	/* no tunnel matched,  let upstream know, ipsec may handle it */
605 	rcu_read_unlock();
606 	return 1;
607 out:
608 	kfree_skb(skb);
609 	return 0;
610 }
611 
612 /*
613  * Returns the embedded IPv4 address if the IPv6 address
614  * comes from 6rd / 6to4 (RFC 3056) addr space.
615  */
616 static inline
617 __be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
618 {
619 	__be32 dst = 0;
620 
621 #ifdef CONFIG_IPV6_SIT_6RD
622 	if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
623 			      tunnel->ip6rd.prefixlen)) {
624 		unsigned int pbw0, pbi0;
625 		int pbi1;
626 		u32 d;
627 
628 		pbw0 = tunnel->ip6rd.prefixlen >> 5;
629 		pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
630 
631 		d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
632 		    tunnel->ip6rd.relay_prefixlen;
633 
634 		pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
635 		if (pbi1 > 0)
636 			d |= ntohl(v6dst->s6_addr32[pbw0 + 1]) >>
637 			     (32 - pbi1);
638 
639 		dst = tunnel->ip6rd.relay_prefix | htonl(d);
640 	}
641 #else
642 	if (v6dst->s6_addr16[0] == htons(0x2002)) {
643 		/* 6to4 v6 addr has 16 bits prefix, 32 v4addr, 16 SLA, ... */
644 		memcpy(&dst, &v6dst->s6_addr16[1], 4);
645 	}
646 #endif
647 	return dst;
648 }
649 
650 /*
651  *	This function assumes it is being called from dev_queue_xmit()
652  *	and that skb is filled properly by that function.
653  */
654 
655 static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
656 				     struct net_device *dev)
657 {
658 	struct ip_tunnel *tunnel = netdev_priv(dev);
659 	struct pcpu_tstats *tstats;
660 	const struct iphdr  *tiph = &tunnel->parms.iph;
661 	const struct ipv6hdr *iph6 = ipv6_hdr(skb);
662 	u8     tos = tunnel->parms.iph.tos;
663 	__be16 df = tiph->frag_off;
664 	struct rtable *rt;     			/* Route to the other host */
665 	struct net_device *tdev;		/* Device to other host */
666 	struct iphdr  *iph;			/* Our new IP header */
667 	unsigned int max_headroom;		/* The extra header space needed */
668 	__be32 dst = tiph->daddr;
669 	struct flowi4 fl4;
670 	int    mtu;
671 	const struct in6_addr *addr6;
672 	int addr_type;
673 
674 	if (skb->protocol != htons(ETH_P_IPV6))
675 		goto tx_error;
676 
677 	if (tos == 1)
678 		tos = ipv6_get_dsfield(iph6);
679 
680 	/* ISATAP (RFC4214) - must come before 6to4 */
681 	if (dev->priv_flags & IFF_ISATAP) {
682 		struct neighbour *neigh = NULL;
683 
684 		if (skb_dst(skb))
685 			neigh = dst_get_neighbour_noref(skb_dst(skb));
686 
687 		if (neigh == NULL) {
688 			if (net_ratelimit())
689 				printk(KERN_DEBUG "sit: nexthop == NULL\n");
690 			goto tx_error;
691 		}
692 
693 		addr6 = (const struct in6_addr*)&neigh->primary_key;
694 		addr_type = ipv6_addr_type(addr6);
695 
696 		if ((addr_type & IPV6_ADDR_UNICAST) &&
697 		     ipv6_addr_is_isatap(addr6))
698 			dst = addr6->s6_addr32[3];
699 		else
700 			goto tx_error;
701 	}
702 
703 	if (!dst)
704 		dst = try_6rd(&iph6->daddr, tunnel);
705 
706 	if (!dst) {
707 		struct neighbour *neigh = NULL;
708 
709 		if (skb_dst(skb))
710 			neigh = dst_get_neighbour_noref(skb_dst(skb));
711 
712 		if (neigh == NULL) {
713 			if (net_ratelimit())
714 				printk(KERN_DEBUG "sit: nexthop == NULL\n");
715 			goto tx_error;
716 		}
717 
718 		addr6 = (const struct in6_addr*)&neigh->primary_key;
719 		addr_type = ipv6_addr_type(addr6);
720 
721 		if (addr_type == IPV6_ADDR_ANY) {
722 			addr6 = &ipv6_hdr(skb)->daddr;
723 			addr_type = ipv6_addr_type(addr6);
724 		}
725 
726 		if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
727 			goto tx_error_icmp;
728 
729 		dst = addr6->s6_addr32[3];
730 	}
731 
732 	rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
733 				   dst, tiph->saddr,
734 				   0, 0,
735 				   IPPROTO_IPV6, RT_TOS(tos),
736 				   tunnel->parms.link);
737 	if (IS_ERR(rt)) {
738 		dev->stats.tx_carrier_errors++;
739 		goto tx_error_icmp;
740 	}
741 	if (rt->rt_type != RTN_UNICAST) {
742 		ip_rt_put(rt);
743 		dev->stats.tx_carrier_errors++;
744 		goto tx_error_icmp;
745 	}
746 	tdev = rt->dst.dev;
747 
748 	if (tdev == dev) {
749 		ip_rt_put(rt);
750 		dev->stats.collisions++;
751 		goto tx_error;
752 	}
753 
754 	if (df) {
755 		mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
756 
757 		if (mtu < 68) {
758 			dev->stats.collisions++;
759 			ip_rt_put(rt);
760 			goto tx_error;
761 		}
762 
763 		if (mtu < IPV6_MIN_MTU) {
764 			mtu = IPV6_MIN_MTU;
765 			df = 0;
766 		}
767 
768 		if (tunnel->parms.iph.daddr && skb_dst(skb))
769 			skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
770 
771 		if (skb->len > mtu) {
772 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
773 			ip_rt_put(rt);
774 			goto tx_error;
775 		}
776 	}
777 
778 	if (tunnel->err_count > 0) {
779 		if (time_before(jiffies,
780 				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
781 			tunnel->err_count--;
782 			dst_link_failure(skb);
783 		} else
784 			tunnel->err_count = 0;
785 	}
786 
787 	/*
788 	 * Okay, now see if we can stuff it in the buffer as-is.
789 	 */
790 	max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
791 
792 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
793 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
794 		struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
795 		if (!new_skb) {
796 			ip_rt_put(rt);
797 			dev->stats.tx_dropped++;
798 			dev_kfree_skb(skb);
799 			return NETDEV_TX_OK;
800 		}
801 		if (skb->sk)
802 			skb_set_owner_w(new_skb, skb->sk);
803 		dev_kfree_skb(skb);
804 		skb = new_skb;
805 		iph6 = ipv6_hdr(skb);
806 	}
807 
808 	skb->transport_header = skb->network_header;
809 	skb_push(skb, sizeof(struct iphdr));
810 	skb_reset_network_header(skb);
811 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
812 	IPCB(skb)->flags = 0;
813 	skb_dst_drop(skb);
814 	skb_dst_set(skb, &rt->dst);
815 
816 	/*
817 	 *	Push down and install the IPIP header.
818 	 */
819 
820 	iph 			=	ip_hdr(skb);
821 	iph->version		=	4;
822 	iph->ihl		=	sizeof(struct iphdr)>>2;
823 	iph->frag_off		=	df;
824 	iph->protocol		=	IPPROTO_IPV6;
825 	iph->tos		=	INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
826 	iph->daddr		=	fl4.daddr;
827 	iph->saddr		=	fl4.saddr;
828 
829 	if ((iph->ttl = tiph->ttl) == 0)
830 		iph->ttl	=	iph6->hop_limit;
831 
832 	nf_reset(skb);
833 	tstats = this_cpu_ptr(dev->tstats);
834 	__IPTUNNEL_XMIT(tstats, &dev->stats);
835 	return NETDEV_TX_OK;
836 
837 tx_error_icmp:
838 	dst_link_failure(skb);
839 tx_error:
840 	dev->stats.tx_errors++;
841 	dev_kfree_skb(skb);
842 	return NETDEV_TX_OK;
843 }
844 
845 static void ipip6_tunnel_bind_dev(struct net_device *dev)
846 {
847 	struct net_device *tdev = NULL;
848 	struct ip_tunnel *tunnel;
849 	const struct iphdr *iph;
850 	struct flowi4 fl4;
851 
852 	tunnel = netdev_priv(dev);
853 	iph = &tunnel->parms.iph;
854 
855 	if (iph->daddr) {
856 		struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
857 							  iph->daddr, iph->saddr,
858 							  0, 0,
859 							  IPPROTO_IPV6,
860 							  RT_TOS(iph->tos),
861 							  tunnel->parms.link);
862 
863 		if (!IS_ERR(rt)) {
864 			tdev = rt->dst.dev;
865 			ip_rt_put(rt);
866 		}
867 		dev->flags |= IFF_POINTOPOINT;
868 	}
869 
870 	if (!tdev && tunnel->parms.link)
871 		tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
872 
873 	if (tdev) {
874 		dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
875 		dev->mtu = tdev->mtu - sizeof(struct iphdr);
876 		if (dev->mtu < IPV6_MIN_MTU)
877 			dev->mtu = IPV6_MIN_MTU;
878 	}
879 	dev->iflink = tunnel->parms.link;
880 }
881 
882 static int
883 ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
884 {
885 	int err = 0;
886 	struct ip_tunnel_parm p;
887 	struct ip_tunnel_prl prl;
888 	struct ip_tunnel *t;
889 	struct net *net = dev_net(dev);
890 	struct sit_net *sitn = net_generic(net, sit_net_id);
891 #ifdef CONFIG_IPV6_SIT_6RD
892 	struct ip_tunnel_6rd ip6rd;
893 #endif
894 
895 	switch (cmd) {
896 	case SIOCGETTUNNEL:
897 #ifdef CONFIG_IPV6_SIT_6RD
898 	case SIOCGET6RD:
899 #endif
900 		t = NULL;
901 		if (dev == sitn->fb_tunnel_dev) {
902 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
903 				err = -EFAULT;
904 				break;
905 			}
906 			t = ipip6_tunnel_locate(net, &p, 0);
907 		}
908 		if (t == NULL)
909 			t = netdev_priv(dev);
910 
911 		err = -EFAULT;
912 		if (cmd == SIOCGETTUNNEL) {
913 			memcpy(&p, &t->parms, sizeof(p));
914 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p,
915 					 sizeof(p)))
916 				goto done;
917 #ifdef CONFIG_IPV6_SIT_6RD
918 		} else {
919 			ip6rd.prefix = t->ip6rd.prefix;
920 			ip6rd.relay_prefix = t->ip6rd.relay_prefix;
921 			ip6rd.prefixlen = t->ip6rd.prefixlen;
922 			ip6rd.relay_prefixlen = t->ip6rd.relay_prefixlen;
923 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &ip6rd,
924 					 sizeof(ip6rd)))
925 				goto done;
926 #endif
927 		}
928 		err = 0;
929 		break;
930 
931 	case SIOCADDTUNNEL:
932 	case SIOCCHGTUNNEL:
933 		err = -EPERM;
934 		if (!capable(CAP_NET_ADMIN))
935 			goto done;
936 
937 		err = -EFAULT;
938 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
939 			goto done;
940 
941 		err = -EINVAL;
942 		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPV6 ||
943 		    p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
944 			goto done;
945 		if (p.iph.ttl)
946 			p.iph.frag_off |= htons(IP_DF);
947 
948 		t = ipip6_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
949 
950 		if (dev != sitn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
951 			if (t != NULL) {
952 				if (t->dev != dev) {
953 					err = -EEXIST;
954 					break;
955 				}
956 			} else {
957 				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
958 				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
959 					err = -EINVAL;
960 					break;
961 				}
962 				t = netdev_priv(dev);
963 				ipip6_tunnel_unlink(sitn, t);
964 				synchronize_net();
965 				t->parms.iph.saddr = p.iph.saddr;
966 				t->parms.iph.daddr = p.iph.daddr;
967 				memcpy(dev->dev_addr, &p.iph.saddr, 4);
968 				memcpy(dev->broadcast, &p.iph.daddr, 4);
969 				ipip6_tunnel_link(sitn, t);
970 				netdev_state_change(dev);
971 			}
972 		}
973 
974 		if (t) {
975 			err = 0;
976 			if (cmd == SIOCCHGTUNNEL) {
977 				t->parms.iph.ttl = p.iph.ttl;
978 				t->parms.iph.tos = p.iph.tos;
979 				if (t->parms.link != p.link) {
980 					t->parms.link = p.link;
981 					ipip6_tunnel_bind_dev(dev);
982 					netdev_state_change(dev);
983 				}
984 			}
985 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
986 				err = -EFAULT;
987 		} else
988 			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
989 		break;
990 
991 	case SIOCDELTUNNEL:
992 		err = -EPERM;
993 		if (!capable(CAP_NET_ADMIN))
994 			goto done;
995 
996 		if (dev == sitn->fb_tunnel_dev) {
997 			err = -EFAULT;
998 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
999 				goto done;
1000 			err = -ENOENT;
1001 			if ((t = ipip6_tunnel_locate(net, &p, 0)) == NULL)
1002 				goto done;
1003 			err = -EPERM;
1004 			if (t == netdev_priv(sitn->fb_tunnel_dev))
1005 				goto done;
1006 			dev = t->dev;
1007 		}
1008 		unregister_netdevice(dev);
1009 		err = 0;
1010 		break;
1011 
1012 	case SIOCGETPRL:
1013 		err = -EINVAL;
1014 		if (dev == sitn->fb_tunnel_dev)
1015 			goto done;
1016 		err = -ENOENT;
1017 		if (!(t = netdev_priv(dev)))
1018 			goto done;
1019 		err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data);
1020 		break;
1021 
1022 	case SIOCADDPRL:
1023 	case SIOCDELPRL:
1024 	case SIOCCHGPRL:
1025 		err = -EPERM;
1026 		if (!capable(CAP_NET_ADMIN))
1027 			goto done;
1028 		err = -EINVAL;
1029 		if (dev == sitn->fb_tunnel_dev)
1030 			goto done;
1031 		err = -EFAULT;
1032 		if (copy_from_user(&prl, ifr->ifr_ifru.ifru_data, sizeof(prl)))
1033 			goto done;
1034 		err = -ENOENT;
1035 		if (!(t = netdev_priv(dev)))
1036 			goto done;
1037 
1038 		switch (cmd) {
1039 		case SIOCDELPRL:
1040 			err = ipip6_tunnel_del_prl(t, &prl);
1041 			break;
1042 		case SIOCADDPRL:
1043 		case SIOCCHGPRL:
1044 			err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
1045 			break;
1046 		}
1047 		netdev_state_change(dev);
1048 		break;
1049 
1050 #ifdef CONFIG_IPV6_SIT_6RD
1051 	case SIOCADD6RD:
1052 	case SIOCCHG6RD:
1053 	case SIOCDEL6RD:
1054 		err = -EPERM;
1055 		if (!capable(CAP_NET_ADMIN))
1056 			goto done;
1057 
1058 		err = -EFAULT;
1059 		if (copy_from_user(&ip6rd, ifr->ifr_ifru.ifru_data,
1060 				   sizeof(ip6rd)))
1061 			goto done;
1062 
1063 		t = netdev_priv(dev);
1064 
1065 		if (cmd != SIOCDEL6RD) {
1066 			struct in6_addr prefix;
1067 			__be32 relay_prefix;
1068 
1069 			err = -EINVAL;
1070 			if (ip6rd.relay_prefixlen > 32 ||
1071 			    ip6rd.prefixlen + (32 - ip6rd.relay_prefixlen) > 64)
1072 				goto done;
1073 
1074 			ipv6_addr_prefix(&prefix, &ip6rd.prefix,
1075 					 ip6rd.prefixlen);
1076 			if (!ipv6_addr_equal(&prefix, &ip6rd.prefix))
1077 				goto done;
1078 			if (ip6rd.relay_prefixlen)
1079 				relay_prefix = ip6rd.relay_prefix &
1080 					       htonl(0xffffffffUL <<
1081 						     (32 - ip6rd.relay_prefixlen));
1082 			else
1083 				relay_prefix = 0;
1084 			if (relay_prefix != ip6rd.relay_prefix)
1085 				goto done;
1086 
1087 			t->ip6rd.prefix = prefix;
1088 			t->ip6rd.relay_prefix = relay_prefix;
1089 			t->ip6rd.prefixlen = ip6rd.prefixlen;
1090 			t->ip6rd.relay_prefixlen = ip6rd.relay_prefixlen;
1091 		} else
1092 			ipip6_tunnel_clone_6rd(dev, sitn);
1093 
1094 		err = 0;
1095 		break;
1096 #endif
1097 
1098 	default:
1099 		err = -EINVAL;
1100 	}
1101 
1102 done:
1103 	return err;
1104 }
1105 
1106 static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1107 {
1108 	if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
1109 		return -EINVAL;
1110 	dev->mtu = new_mtu;
1111 	return 0;
1112 }
1113 
1114 static const struct net_device_ops ipip6_netdev_ops = {
1115 	.ndo_uninit	= ipip6_tunnel_uninit,
1116 	.ndo_start_xmit	= ipip6_tunnel_xmit,
1117 	.ndo_do_ioctl	= ipip6_tunnel_ioctl,
1118 	.ndo_change_mtu	= ipip6_tunnel_change_mtu,
1119 	.ndo_get_stats	= ipip6_get_stats,
1120 };
1121 
1122 static void ipip6_dev_free(struct net_device *dev)
1123 {
1124 	free_percpu(dev->tstats);
1125 	free_netdev(dev);
1126 }
1127 
1128 static void ipip6_tunnel_setup(struct net_device *dev)
1129 {
1130 	dev->netdev_ops		= &ipip6_netdev_ops;
1131 	dev->destructor 	= ipip6_dev_free;
1132 
1133 	dev->type		= ARPHRD_SIT;
1134 	dev->hard_header_len 	= LL_MAX_HEADER + sizeof(struct iphdr);
1135 	dev->mtu		= ETH_DATA_LEN - sizeof(struct iphdr);
1136 	dev->flags		= IFF_NOARP;
1137 	dev->priv_flags	       &= ~IFF_XMIT_DST_RELEASE;
1138 	dev->iflink		= 0;
1139 	dev->addr_len		= 4;
1140 	dev->features		|= NETIF_F_NETNS_LOCAL;
1141 	dev->features		|= NETIF_F_LLTX;
1142 }
1143 
1144 static int ipip6_tunnel_init(struct net_device *dev)
1145 {
1146 	struct ip_tunnel *tunnel = netdev_priv(dev);
1147 
1148 	tunnel->dev = dev;
1149 
1150 	memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1151 	memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1152 
1153 	ipip6_tunnel_bind_dev(dev);
1154 	dev->tstats = alloc_percpu(struct pcpu_tstats);
1155 	if (!dev->tstats)
1156 		return -ENOMEM;
1157 
1158 	return 0;
1159 }
1160 
1161 static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1162 {
1163 	struct ip_tunnel *tunnel = netdev_priv(dev);
1164 	struct iphdr *iph = &tunnel->parms.iph;
1165 	struct net *net = dev_net(dev);
1166 	struct sit_net *sitn = net_generic(net, sit_net_id);
1167 
1168 	tunnel->dev = dev;
1169 	strcpy(tunnel->parms.name, dev->name);
1170 
1171 	iph->version		= 4;
1172 	iph->protocol		= IPPROTO_IPV6;
1173 	iph->ihl		= 5;
1174 	iph->ttl		= 64;
1175 
1176 	dev->tstats = alloc_percpu(struct pcpu_tstats);
1177 	if (!dev->tstats)
1178 		return -ENOMEM;
1179 	dev_hold(dev);
1180 	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
1181 	return 0;
1182 }
1183 
1184 static struct xfrm_tunnel sit_handler __read_mostly = {
1185 	.handler	=	ipip6_rcv,
1186 	.err_handler	=	ipip6_err,
1187 	.priority	=	1,
1188 };
1189 
1190 static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_head *head)
1191 {
1192 	int prio;
1193 
1194 	for (prio = 1; prio < 4; prio++) {
1195 		int h;
1196 		for (h = 0; h < HASH_SIZE; h++) {
1197 			struct ip_tunnel *t;
1198 
1199 			t = rtnl_dereference(sitn->tunnels[prio][h]);
1200 			while (t != NULL) {
1201 				unregister_netdevice_queue(t->dev, head);
1202 				t = rtnl_dereference(t->next);
1203 			}
1204 		}
1205 	}
1206 }
1207 
1208 static int __net_init sit_init_net(struct net *net)
1209 {
1210 	struct sit_net *sitn = net_generic(net, sit_net_id);
1211 	struct ip_tunnel *t;
1212 	int err;
1213 
1214 	sitn->tunnels[0] = sitn->tunnels_wc;
1215 	sitn->tunnels[1] = sitn->tunnels_l;
1216 	sitn->tunnels[2] = sitn->tunnels_r;
1217 	sitn->tunnels[3] = sitn->tunnels_r_l;
1218 
1219 	sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
1220 					   ipip6_tunnel_setup);
1221 	if (!sitn->fb_tunnel_dev) {
1222 		err = -ENOMEM;
1223 		goto err_alloc_dev;
1224 	}
1225 	dev_net_set(sitn->fb_tunnel_dev, net);
1226 
1227 	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
1228 	if (err)
1229 		goto err_dev_free;
1230 
1231 	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
1232 
1233 	if ((err = register_netdev(sitn->fb_tunnel_dev)))
1234 		goto err_reg_dev;
1235 
1236 	t = netdev_priv(sitn->fb_tunnel_dev);
1237 
1238 	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
1239 	return 0;
1240 
1241 err_reg_dev:
1242 	dev_put(sitn->fb_tunnel_dev);
1243 err_dev_free:
1244 	ipip6_dev_free(sitn->fb_tunnel_dev);
1245 err_alloc_dev:
1246 	return err;
1247 }
1248 
1249 static void __net_exit sit_exit_net(struct net *net)
1250 {
1251 	struct sit_net *sitn = net_generic(net, sit_net_id);
1252 	LIST_HEAD(list);
1253 
1254 	rtnl_lock();
1255 	sit_destroy_tunnels(sitn, &list);
1256 	unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
1257 	unregister_netdevice_many(&list);
1258 	rtnl_unlock();
1259 }
1260 
1261 static struct pernet_operations sit_net_ops = {
1262 	.init = sit_init_net,
1263 	.exit = sit_exit_net,
1264 	.id   = &sit_net_id,
1265 	.size = sizeof(struct sit_net),
1266 };
1267 
1268 static void __exit sit_cleanup(void)
1269 {
1270 	xfrm4_tunnel_deregister(&sit_handler, AF_INET6);
1271 
1272 	unregister_pernet_device(&sit_net_ops);
1273 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1274 }
1275 
1276 static int __init sit_init(void)
1277 {
1278 	int err;
1279 
1280 	printk(KERN_INFO "IPv6 over IPv4 tunneling driver\n");
1281 
1282 	err = register_pernet_device(&sit_net_ops);
1283 	if (err < 0)
1284 		return err;
1285 	err = xfrm4_tunnel_register(&sit_handler, AF_INET6);
1286 	if (err < 0) {
1287 		unregister_pernet_device(&sit_net_ops);
1288 		printk(KERN_INFO "sit init: Can't add protocol\n");
1289 	}
1290 	return err;
1291 }
1292 
1293 module_init(sit_init);
1294 module_exit(sit_cleanup);
1295 MODULE_LICENSE("GPL");
1296 MODULE_ALIAS_NETDEV("sit0");
1297