xref: /linux/net/ipv6/tcp_ipv6.c (revision cff4fa8415a3224a5abdd2b1dd7f431e4ea49366)
1 /*
2  *	TCP over IPv6
3  *	Linux INET6 implementation
4  *
5  *	Authors:
6  *	Pedro Roque		<roque@di.fc.ul.pt>
7  *
8  *	Based on:
9  *	linux/net/ipv4/tcp.c
10  *	linux/net/ipv4/tcp_input.c
11  *	linux/net/ipv4/tcp_output.c
12  *
13  *	Fixes:
14  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
15  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
16  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
17  *					a single port at the same time.
18  *	YOSHIFUJI Hideaki @USAGI:	convert /proc/net/tcp6 to seq_file.
19  *
20  *	This program is free software; you can redistribute it and/or
21  *      modify it under the terms of the GNU General Public License
22  *      as published by the Free Software Foundation; either version
23  *      2 of the License, or (at your option) any later version.
24  */
25 
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
42 
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
46 
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 
66 #include <asm/uaccess.h>
67 
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
70 
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
73 
74 static void	tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void	tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 				      struct request_sock *req);
77 
78 static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static void	__tcp_v6_send_check(struct sk_buff *skb,
80 				    const struct in6_addr *saddr,
81 				    const struct in6_addr *daddr);
82 
83 static const struct inet_connection_sock_af_ops ipv6_mapped;
84 static const struct inet_connection_sock_af_ops ipv6_specific;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
88 #else
89 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
90 						   const struct in6_addr *addr)
91 {
92 	return NULL;
93 }
94 #endif
95 
96 static void tcp_v6_hash(struct sock *sk)
97 {
98 	if (sk->sk_state != TCP_CLOSE) {
99 		if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
100 			tcp_prot.hash(sk);
101 			return;
102 		}
103 		local_bh_disable();
104 		__inet6_hash(sk, NULL);
105 		local_bh_enable();
106 	}
107 }
108 
109 static __inline__ __sum16 tcp_v6_check(int len,
110 				   const struct in6_addr *saddr,
111 				   const struct in6_addr *daddr,
112 				   __wsum base)
113 {
114 	return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
115 }
116 
117 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
118 {
119 	return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 					    ipv6_hdr(skb)->saddr.s6_addr32,
121 					    tcp_hdr(skb)->dest,
122 					    tcp_hdr(skb)->source);
123 }
124 
125 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
126 			  int addr_len)
127 {
128 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
129 	struct inet_sock *inet = inet_sk(sk);
130 	struct inet_connection_sock *icsk = inet_csk(sk);
131 	struct ipv6_pinfo *np = inet6_sk(sk);
132 	struct tcp_sock *tp = tcp_sk(sk);
133 	struct in6_addr *saddr = NULL, *final_p, final;
134 	struct rt6_info *rt;
135 	struct flowi6 fl6;
136 	struct dst_entry *dst;
137 	int addr_type;
138 	int err;
139 
140 	if (addr_len < SIN6_LEN_RFC2133)
141 		return -EINVAL;
142 
143 	if (usin->sin6_family != AF_INET6)
144 		return -EAFNOSUPPORT;
145 
146 	memset(&fl6, 0, sizeof(fl6));
147 
148 	if (np->sndflow) {
149 		fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 		IP6_ECN_flow_init(fl6.flowlabel);
151 		if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
152 			struct ip6_flowlabel *flowlabel;
153 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
154 			if (flowlabel == NULL)
155 				return -EINVAL;
156 			ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
157 			fl6_sock_release(flowlabel);
158 		}
159 	}
160 
161 	/*
162 	 *	connect() to INADDR_ANY means loopback (BSD'ism).
163 	 */
164 
165 	if(ipv6_addr_any(&usin->sin6_addr))
166 		usin->sin6_addr.s6_addr[15] = 0x1;
167 
168 	addr_type = ipv6_addr_type(&usin->sin6_addr);
169 
170 	if(addr_type & IPV6_ADDR_MULTICAST)
171 		return -ENETUNREACH;
172 
173 	if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 		if (addr_len >= sizeof(struct sockaddr_in6) &&
175 		    usin->sin6_scope_id) {
176 			/* If interface is set while binding, indices
177 			 * must coincide.
178 			 */
179 			if (sk->sk_bound_dev_if &&
180 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
181 				return -EINVAL;
182 
183 			sk->sk_bound_dev_if = usin->sin6_scope_id;
184 		}
185 
186 		/* Connect to link-local address requires an interface */
187 		if (!sk->sk_bound_dev_if)
188 			return -EINVAL;
189 	}
190 
191 	if (tp->rx_opt.ts_recent_stamp &&
192 	    !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 		tp->rx_opt.ts_recent = 0;
194 		tp->rx_opt.ts_recent_stamp = 0;
195 		tp->write_seq = 0;
196 	}
197 
198 	ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
199 	np->flow_label = fl6.flowlabel;
200 
201 	/*
202 	 *	TCP over IPv4
203 	 */
204 
205 	if (addr_type == IPV6_ADDR_MAPPED) {
206 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 		struct sockaddr_in sin;
208 
209 		SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
210 
211 		if (__ipv6_only_sock(sk))
212 			return -ENETUNREACH;
213 
214 		sin.sin_family = AF_INET;
215 		sin.sin_port = usin->sin6_port;
216 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
217 
218 		icsk->icsk_af_ops = &ipv6_mapped;
219 		sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 		tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222 #endif
223 
224 		err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
225 
226 		if (err) {
227 			icsk->icsk_ext_hdr_len = exthdrlen;
228 			icsk->icsk_af_ops = &ipv6_specific;
229 			sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 			tp->af_specific = &tcp_sock_ipv6_specific;
232 #endif
233 			goto failure;
234 		} else {
235 			ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 			ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 					       &np->rcv_saddr);
238 		}
239 
240 		return err;
241 	}
242 
243 	if (!ipv6_addr_any(&np->rcv_saddr))
244 		saddr = &np->rcv_saddr;
245 
246 	fl6.flowi6_proto = IPPROTO_TCP;
247 	ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 	ipv6_addr_copy(&fl6.saddr,
249 		       (saddr ? saddr : &np->saddr));
250 	fl6.flowi6_oif = sk->sk_bound_dev_if;
251 	fl6.flowi6_mark = sk->sk_mark;
252 	fl6.fl6_dport = usin->sin6_port;
253 	fl6.fl6_sport = inet->inet_sport;
254 
255 	final_p = fl6_update_dst(&fl6, np->opt, &final);
256 
257 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
258 
259 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 	if (IS_ERR(dst)) {
261 		err = PTR_ERR(dst);
262 		goto failure;
263 	}
264 
265 	if (saddr == NULL) {
266 		saddr = &fl6.saddr;
267 		ipv6_addr_copy(&np->rcv_saddr, saddr);
268 	}
269 
270 	/* set the source address */
271 	ipv6_addr_copy(&np->saddr, saddr);
272 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
273 
274 	sk->sk_gso_type = SKB_GSO_TCPV6;
275 	__ip6_dst_store(sk, dst, NULL, NULL);
276 
277 	rt = (struct rt6_info *) dst;
278 	if (tcp_death_row.sysctl_tw_recycle &&
279 	    !tp->rx_opt.ts_recent_stamp &&
280 	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 		struct inet_peer *peer = rt6_get_peer(rt);
282 		/*
283 		 * VJ's idea. We save last timestamp seen from
284 		 * the destination in peer table, when entering state
285 		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 		 * when trying new connection.
287 		 */
288 		if (peer) {
289 			inet_peer_refcheck(peer);
290 			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 				tp->rx_opt.ts_recent = peer->tcp_ts;
293 			}
294 		}
295 	}
296 
297 	icsk->icsk_ext_hdr_len = 0;
298 	if (np->opt)
299 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 					  np->opt->opt_nflen);
301 
302 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
303 
304 	inet->inet_dport = usin->sin6_port;
305 
306 	tcp_set_state(sk, TCP_SYN_SENT);
307 	err = inet6_hash_connect(&tcp_death_row, sk);
308 	if (err)
309 		goto late_failure;
310 
311 	if (!tp->write_seq)
312 		tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 							     np->daddr.s6_addr32,
314 							     inet->inet_sport,
315 							     inet->inet_dport);
316 
317 	err = tcp_connect(sk);
318 	if (err)
319 		goto late_failure;
320 
321 	return 0;
322 
323 late_failure:
324 	tcp_set_state(sk, TCP_CLOSE);
325 	__sk_dst_reset(sk);
326 failure:
327 	inet->inet_dport = 0;
328 	sk->sk_route_caps = 0;
329 	return err;
330 }
331 
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 		u8 type, u8 code, int offset, __be32 info)
334 {
335 	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 	struct ipv6_pinfo *np;
338 	struct sock *sk;
339 	int err;
340 	struct tcp_sock *tp;
341 	__u32 seq;
342 	struct net *net = dev_net(skb->dev);
343 
344 	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
346 
347 	if (sk == NULL) {
348 		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 				   ICMP6_MIB_INERRORS);
350 		return;
351 	}
352 
353 	if (sk->sk_state == TCP_TIME_WAIT) {
354 		inet_twsk_put(inet_twsk(sk));
355 		return;
356 	}
357 
358 	bh_lock_sock(sk);
359 	if (sock_owned_by_user(sk))
360 		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
361 
362 	if (sk->sk_state == TCP_CLOSE)
363 		goto out;
364 
365 	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 		goto out;
368 	}
369 
370 	tp = tcp_sk(sk);
371 	seq = ntohl(th->seq);
372 	if (sk->sk_state != TCP_LISTEN &&
373 	    !between(seq, tp->snd_una, tp->snd_nxt)) {
374 		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 		goto out;
376 	}
377 
378 	np = inet6_sk(sk);
379 
380 	if (type == ICMPV6_PKT_TOOBIG) {
381 		struct dst_entry *dst;
382 
383 		if (sock_owned_by_user(sk))
384 			goto out;
385 		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 			goto out;
387 
388 		/* icmp should have updated the destination cache entry */
389 		dst = __sk_dst_check(sk, np->dst_cookie);
390 
391 		if (dst == NULL) {
392 			struct inet_sock *inet = inet_sk(sk);
393 			struct flowi6 fl6;
394 
395 			/* BUGGG_FUTURE: Again, it is not clear how
396 			   to handle rthdr case. Ignore this complexity
397 			   for now.
398 			 */
399 			memset(&fl6, 0, sizeof(fl6));
400 			fl6.flowi6_proto = IPPROTO_TCP;
401 			ipv6_addr_copy(&fl6.daddr, &np->daddr);
402 			ipv6_addr_copy(&fl6.saddr, &np->saddr);
403 			fl6.flowi6_oif = sk->sk_bound_dev_if;
404 			fl6.flowi6_mark = sk->sk_mark;
405 			fl6.fl6_dport = inet->inet_dport;
406 			fl6.fl6_sport = inet->inet_sport;
407 			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
408 
409 			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 			if (IS_ERR(dst)) {
411 				sk->sk_err_soft = -PTR_ERR(dst);
412 				goto out;
413 			}
414 
415 		} else
416 			dst_hold(dst);
417 
418 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 			tcp_sync_mss(sk, dst_mtu(dst));
420 			tcp_simple_retransmit(sk);
421 		} /* else let the usual retransmit timer handle it */
422 		dst_release(dst);
423 		goto out;
424 	}
425 
426 	icmpv6_err_convert(type, code, &err);
427 
428 	/* Might be for an request_sock */
429 	switch (sk->sk_state) {
430 		struct request_sock *req, **prev;
431 	case TCP_LISTEN:
432 		if (sock_owned_by_user(sk))
433 			goto out;
434 
435 		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 					   &hdr->saddr, inet6_iif(skb));
437 		if (!req)
438 			goto out;
439 
440 		/* ICMPs are not backlogged, hence we cannot get
441 		 * an established socket here.
442 		 */
443 		WARN_ON(req->sk != NULL);
444 
445 		if (seq != tcp_rsk(req)->snt_isn) {
446 			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 			goto out;
448 		}
449 
450 		inet_csk_reqsk_queue_drop(sk, req, prev);
451 		goto out;
452 
453 	case TCP_SYN_SENT:
454 	case TCP_SYN_RECV:  /* Cannot happen.
455 			       It can, it SYNs are crossed. --ANK */
456 		if (!sock_owned_by_user(sk)) {
457 			sk->sk_err = err;
458 			sk->sk_error_report(sk);		/* Wake people up to see the error (see connect in sock.c) */
459 
460 			tcp_done(sk);
461 		} else
462 			sk->sk_err_soft = err;
463 		goto out;
464 	}
465 
466 	if (!sock_owned_by_user(sk) && np->recverr) {
467 		sk->sk_err = err;
468 		sk->sk_error_report(sk);
469 	} else
470 		sk->sk_err_soft = err;
471 
472 out:
473 	bh_unlock_sock(sk);
474 	sock_put(sk);
475 }
476 
477 
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 			      struct request_values *rvp)
480 {
481 	struct inet6_request_sock *treq = inet6_rsk(req);
482 	struct ipv6_pinfo *np = inet6_sk(sk);
483 	struct sk_buff * skb;
484 	struct ipv6_txoptions *opt = NULL;
485 	struct in6_addr * final_p, final;
486 	struct flowi6 fl6;
487 	struct dst_entry *dst;
488 	int err;
489 
490 	memset(&fl6, 0, sizeof(fl6));
491 	fl6.flowi6_proto = IPPROTO_TCP;
492 	ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
493 	ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
494 	fl6.flowlabel = 0;
495 	fl6.flowi6_oif = treq->iif;
496 	fl6.flowi6_mark = sk->sk_mark;
497 	fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 	fl6.fl6_sport = inet_rsk(req)->loc_port;
499 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
500 
501 	opt = np->opt;
502 	final_p = fl6_update_dst(&fl6, opt, &final);
503 
504 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
505 	if (IS_ERR(dst)) {
506 		err = PTR_ERR(dst);
507 		dst = NULL;
508 		goto done;
509 	}
510 	skb = tcp_make_synack(sk, dst, req, rvp);
511 	err = -ENOMEM;
512 	if (skb) {
513 		__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 
515 		ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 		err = ip6_xmit(sk, skb, &fl6, opt);
517 		err = net_xmit_eval(err);
518 	}
519 
520 done:
521 	if (opt && opt != np->opt)
522 		sock_kfree_s(sk, opt, opt->tot_len);
523 	dst_release(dst);
524 	return err;
525 }
526 
527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 			     struct request_values *rvp)
529 {
530 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 	return tcp_v6_send_synack(sk, req, rvp);
532 }
533 
534 static inline void syn_flood_warning(struct sk_buff *skb)
535 {
536 #ifdef CONFIG_SYN_COOKIES
537 	if (sysctl_tcp_syncookies)
538 		printk(KERN_INFO
539 		       "TCPv6: Possible SYN flooding on port %d. "
540 		       "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
541 	else
542 #endif
543 		printk(KERN_INFO
544 		       "TCPv6: Possible SYN flooding on port %d. "
545 		       "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
546 }
547 
548 static void tcp_v6_reqsk_destructor(struct request_sock *req)
549 {
550 	kfree_skb(inet6_rsk(req)->pktopts);
551 }
552 
553 #ifdef CONFIG_TCP_MD5SIG
554 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
555 						   const struct in6_addr *addr)
556 {
557 	struct tcp_sock *tp = tcp_sk(sk);
558 	int i;
559 
560 	BUG_ON(tp == NULL);
561 
562 	if (!tp->md5sig_info || !tp->md5sig_info->entries6)
563 		return NULL;
564 
565 	for (i = 0; i < tp->md5sig_info->entries6; i++) {
566 		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
567 			return &tp->md5sig_info->keys6[i].base;
568 	}
569 	return NULL;
570 }
571 
572 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
573 						struct sock *addr_sk)
574 {
575 	return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
576 }
577 
578 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
579 						      struct request_sock *req)
580 {
581 	return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
582 }
583 
584 static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
585 			     char *newkey, u8 newkeylen)
586 {
587 	/* Add key to the list */
588 	struct tcp_md5sig_key *key;
589 	struct tcp_sock *tp = tcp_sk(sk);
590 	struct tcp6_md5sig_key *keys;
591 
592 	key = tcp_v6_md5_do_lookup(sk, peer);
593 	if (key) {
594 		/* modify existing entry - just update that one */
595 		kfree(key->key);
596 		key->key = newkey;
597 		key->keylen = newkeylen;
598 	} else {
599 		/* reallocate new list if current one is full. */
600 		if (!tp->md5sig_info) {
601 			tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
602 			if (!tp->md5sig_info) {
603 				kfree(newkey);
604 				return -ENOMEM;
605 			}
606 			sk_nocaps_add(sk, NETIF_F_GSO_MASK);
607 		}
608 		if (tcp_alloc_md5sig_pool(sk) == NULL) {
609 			kfree(newkey);
610 			return -ENOMEM;
611 		}
612 		if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
613 			keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
614 				       (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
615 
616 			if (!keys) {
617 				tcp_free_md5sig_pool();
618 				kfree(newkey);
619 				return -ENOMEM;
620 			}
621 
622 			if (tp->md5sig_info->entries6)
623 				memmove(keys, tp->md5sig_info->keys6,
624 					(sizeof (tp->md5sig_info->keys6[0]) *
625 					 tp->md5sig_info->entries6));
626 
627 			kfree(tp->md5sig_info->keys6);
628 			tp->md5sig_info->keys6 = keys;
629 			tp->md5sig_info->alloced6++;
630 		}
631 
632 		ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
633 			       peer);
634 		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
635 		tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
636 
637 		tp->md5sig_info->entries6++;
638 	}
639 	return 0;
640 }
641 
642 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
643 			       u8 *newkey, __u8 newkeylen)
644 {
645 	return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
646 				 newkey, newkeylen);
647 }
648 
649 static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
650 {
651 	struct tcp_sock *tp = tcp_sk(sk);
652 	int i;
653 
654 	for (i = 0; i < tp->md5sig_info->entries6; i++) {
655 		if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
656 			/* Free the key */
657 			kfree(tp->md5sig_info->keys6[i].base.key);
658 			tp->md5sig_info->entries6--;
659 
660 			if (tp->md5sig_info->entries6 == 0) {
661 				kfree(tp->md5sig_info->keys6);
662 				tp->md5sig_info->keys6 = NULL;
663 				tp->md5sig_info->alloced6 = 0;
664 			} else {
665 				/* shrink the database */
666 				if (tp->md5sig_info->entries6 != i)
667 					memmove(&tp->md5sig_info->keys6[i],
668 						&tp->md5sig_info->keys6[i+1],
669 						(tp->md5sig_info->entries6 - i)
670 						* sizeof (tp->md5sig_info->keys6[0]));
671 			}
672 			tcp_free_md5sig_pool();
673 			return 0;
674 		}
675 	}
676 	return -ENOENT;
677 }
678 
679 static void tcp_v6_clear_md5_list (struct sock *sk)
680 {
681 	struct tcp_sock *tp = tcp_sk(sk);
682 	int i;
683 
684 	if (tp->md5sig_info->entries6) {
685 		for (i = 0; i < tp->md5sig_info->entries6; i++)
686 			kfree(tp->md5sig_info->keys6[i].base.key);
687 		tp->md5sig_info->entries6 = 0;
688 		tcp_free_md5sig_pool();
689 	}
690 
691 	kfree(tp->md5sig_info->keys6);
692 	tp->md5sig_info->keys6 = NULL;
693 	tp->md5sig_info->alloced6 = 0;
694 
695 	if (tp->md5sig_info->entries4) {
696 		for (i = 0; i < tp->md5sig_info->entries4; i++)
697 			kfree(tp->md5sig_info->keys4[i].base.key);
698 		tp->md5sig_info->entries4 = 0;
699 		tcp_free_md5sig_pool();
700 	}
701 
702 	kfree(tp->md5sig_info->keys4);
703 	tp->md5sig_info->keys4 = NULL;
704 	tp->md5sig_info->alloced4 = 0;
705 }
706 
707 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
708 				  int optlen)
709 {
710 	struct tcp_md5sig cmd;
711 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
712 	u8 *newkey;
713 
714 	if (optlen < sizeof(cmd))
715 		return -EINVAL;
716 
717 	if (copy_from_user(&cmd, optval, sizeof(cmd)))
718 		return -EFAULT;
719 
720 	if (sin6->sin6_family != AF_INET6)
721 		return -EINVAL;
722 
723 	if (!cmd.tcpm_keylen) {
724 		if (!tcp_sk(sk)->md5sig_info)
725 			return -ENOENT;
726 		if (ipv6_addr_v4mapped(&sin6->sin6_addr))
727 			return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
728 		return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
729 	}
730 
731 	if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
732 		return -EINVAL;
733 
734 	if (!tcp_sk(sk)->md5sig_info) {
735 		struct tcp_sock *tp = tcp_sk(sk);
736 		struct tcp_md5sig_info *p;
737 
738 		p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
739 		if (!p)
740 			return -ENOMEM;
741 
742 		tp->md5sig_info = p;
743 		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
744 	}
745 
746 	newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
747 	if (!newkey)
748 		return -ENOMEM;
749 	if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
750 		return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
751 					 newkey, cmd.tcpm_keylen);
752 	}
753 	return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
754 }
755 
756 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
757 					const struct in6_addr *daddr,
758 					const struct in6_addr *saddr, int nbytes)
759 {
760 	struct tcp6_pseudohdr *bp;
761 	struct scatterlist sg;
762 
763 	bp = &hp->md5_blk.ip6;
764 	/* 1. TCP pseudo-header (RFC2460) */
765 	ipv6_addr_copy(&bp->saddr, saddr);
766 	ipv6_addr_copy(&bp->daddr, daddr);
767 	bp->protocol = cpu_to_be32(IPPROTO_TCP);
768 	bp->len = cpu_to_be32(nbytes);
769 
770 	sg_init_one(&sg, bp, sizeof(*bp));
771 	return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
772 }
773 
774 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
775 			       const struct in6_addr *daddr, struct in6_addr *saddr,
776 			       struct tcphdr *th)
777 {
778 	struct tcp_md5sig_pool *hp;
779 	struct hash_desc *desc;
780 
781 	hp = tcp_get_md5sig_pool();
782 	if (!hp)
783 		goto clear_hash_noput;
784 	desc = &hp->md5_desc;
785 
786 	if (crypto_hash_init(desc))
787 		goto clear_hash;
788 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
789 		goto clear_hash;
790 	if (tcp_md5_hash_header(hp, th))
791 		goto clear_hash;
792 	if (tcp_md5_hash_key(hp, key))
793 		goto clear_hash;
794 	if (crypto_hash_final(desc, md5_hash))
795 		goto clear_hash;
796 
797 	tcp_put_md5sig_pool();
798 	return 0;
799 
800 clear_hash:
801 	tcp_put_md5sig_pool();
802 clear_hash_noput:
803 	memset(md5_hash, 0, 16);
804 	return 1;
805 }
806 
807 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
808 			       struct sock *sk, struct request_sock *req,
809 			       struct sk_buff *skb)
810 {
811 	const struct in6_addr *saddr, *daddr;
812 	struct tcp_md5sig_pool *hp;
813 	struct hash_desc *desc;
814 	struct tcphdr *th = tcp_hdr(skb);
815 
816 	if (sk) {
817 		saddr = &inet6_sk(sk)->saddr;
818 		daddr = &inet6_sk(sk)->daddr;
819 	} else if (req) {
820 		saddr = &inet6_rsk(req)->loc_addr;
821 		daddr = &inet6_rsk(req)->rmt_addr;
822 	} else {
823 		const struct ipv6hdr *ip6h = ipv6_hdr(skb);
824 		saddr = &ip6h->saddr;
825 		daddr = &ip6h->daddr;
826 	}
827 
828 	hp = tcp_get_md5sig_pool();
829 	if (!hp)
830 		goto clear_hash_noput;
831 	desc = &hp->md5_desc;
832 
833 	if (crypto_hash_init(desc))
834 		goto clear_hash;
835 
836 	if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
837 		goto clear_hash;
838 	if (tcp_md5_hash_header(hp, th))
839 		goto clear_hash;
840 	if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
841 		goto clear_hash;
842 	if (tcp_md5_hash_key(hp, key))
843 		goto clear_hash;
844 	if (crypto_hash_final(desc, md5_hash))
845 		goto clear_hash;
846 
847 	tcp_put_md5sig_pool();
848 	return 0;
849 
850 clear_hash:
851 	tcp_put_md5sig_pool();
852 clear_hash_noput:
853 	memset(md5_hash, 0, 16);
854 	return 1;
855 }
856 
857 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
858 {
859 	__u8 *hash_location = NULL;
860 	struct tcp_md5sig_key *hash_expected;
861 	const struct ipv6hdr *ip6h = ipv6_hdr(skb);
862 	struct tcphdr *th = tcp_hdr(skb);
863 	int genhash;
864 	u8 newhash[16];
865 
866 	hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
867 	hash_location = tcp_parse_md5sig_option(th);
868 
869 	/* We've parsed the options - do we have a hash? */
870 	if (!hash_expected && !hash_location)
871 		return 0;
872 
873 	if (hash_expected && !hash_location) {
874 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
875 		return 1;
876 	}
877 
878 	if (!hash_expected && hash_location) {
879 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
880 		return 1;
881 	}
882 
883 	/* check the signature */
884 	genhash = tcp_v6_md5_hash_skb(newhash,
885 				      hash_expected,
886 				      NULL, NULL, skb);
887 
888 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
889 		if (net_ratelimit()) {
890 			printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
891 			       genhash ? "failed" : "mismatch",
892 			       &ip6h->saddr, ntohs(th->source),
893 			       &ip6h->daddr, ntohs(th->dest));
894 		}
895 		return 1;
896 	}
897 	return 0;
898 }
899 #endif
900 
901 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
902 	.family		=	AF_INET6,
903 	.obj_size	=	sizeof(struct tcp6_request_sock),
904 	.rtx_syn_ack	=	tcp_v6_rtx_synack,
905 	.send_ack	=	tcp_v6_reqsk_send_ack,
906 	.destructor	=	tcp_v6_reqsk_destructor,
907 	.send_reset	=	tcp_v6_send_reset,
908 	.syn_ack_timeout = 	tcp_syn_ack_timeout,
909 };
910 
911 #ifdef CONFIG_TCP_MD5SIG
912 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
913 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
914 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
915 };
916 #endif
917 
918 static void __tcp_v6_send_check(struct sk_buff *skb,
919 				const struct in6_addr *saddr, const struct in6_addr *daddr)
920 {
921 	struct tcphdr *th = tcp_hdr(skb);
922 
923 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
924 		th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
925 		skb->csum_start = skb_transport_header(skb) - skb->head;
926 		skb->csum_offset = offsetof(struct tcphdr, check);
927 	} else {
928 		th->check = tcp_v6_check(skb->len, saddr, daddr,
929 					 csum_partial(th, th->doff << 2,
930 						      skb->csum));
931 	}
932 }
933 
934 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
935 {
936 	struct ipv6_pinfo *np = inet6_sk(sk);
937 
938 	__tcp_v6_send_check(skb, &np->saddr, &np->daddr);
939 }
940 
941 static int tcp_v6_gso_send_check(struct sk_buff *skb)
942 {
943 	const struct ipv6hdr *ipv6h;
944 	struct tcphdr *th;
945 
946 	if (!pskb_may_pull(skb, sizeof(*th)))
947 		return -EINVAL;
948 
949 	ipv6h = ipv6_hdr(skb);
950 	th = tcp_hdr(skb);
951 
952 	th->check = 0;
953 	skb->ip_summed = CHECKSUM_PARTIAL;
954 	__tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
955 	return 0;
956 }
957 
958 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
959 					 struct sk_buff *skb)
960 {
961 	const struct ipv6hdr *iph = skb_gro_network_header(skb);
962 
963 	switch (skb->ip_summed) {
964 	case CHECKSUM_COMPLETE:
965 		if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
966 				  skb->csum)) {
967 			skb->ip_summed = CHECKSUM_UNNECESSARY;
968 			break;
969 		}
970 
971 		/* fall through */
972 	case CHECKSUM_NONE:
973 		NAPI_GRO_CB(skb)->flush = 1;
974 		return NULL;
975 	}
976 
977 	return tcp_gro_receive(head, skb);
978 }
979 
980 static int tcp6_gro_complete(struct sk_buff *skb)
981 {
982 	const struct ipv6hdr *iph = ipv6_hdr(skb);
983 	struct tcphdr *th = tcp_hdr(skb);
984 
985 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
986 				  &iph->saddr, &iph->daddr, 0);
987 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
988 
989 	return tcp_gro_complete(skb);
990 }
991 
992 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
993 				 u32 ts, struct tcp_md5sig_key *key, int rst)
994 {
995 	struct tcphdr *th = tcp_hdr(skb), *t1;
996 	struct sk_buff *buff;
997 	struct flowi6 fl6;
998 	struct net *net = dev_net(skb_dst(skb)->dev);
999 	struct sock *ctl_sk = net->ipv6.tcp_sk;
1000 	unsigned int tot_len = sizeof(struct tcphdr);
1001 	struct dst_entry *dst;
1002 	__be32 *topt;
1003 
1004 	if (ts)
1005 		tot_len += TCPOLEN_TSTAMP_ALIGNED;
1006 #ifdef CONFIG_TCP_MD5SIG
1007 	if (key)
1008 		tot_len += TCPOLEN_MD5SIG_ALIGNED;
1009 #endif
1010 
1011 	buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1012 			 GFP_ATOMIC);
1013 	if (buff == NULL)
1014 		return;
1015 
1016 	skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1017 
1018 	t1 = (struct tcphdr *) skb_push(buff, tot_len);
1019 	skb_reset_transport_header(buff);
1020 
1021 	/* Swap the send and the receive. */
1022 	memset(t1, 0, sizeof(*t1));
1023 	t1->dest = th->source;
1024 	t1->source = th->dest;
1025 	t1->doff = tot_len / 4;
1026 	t1->seq = htonl(seq);
1027 	t1->ack_seq = htonl(ack);
1028 	t1->ack = !rst || !th->ack;
1029 	t1->rst = rst;
1030 	t1->window = htons(win);
1031 
1032 	topt = (__be32 *)(t1 + 1);
1033 
1034 	if (ts) {
1035 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1036 				(TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1037 		*topt++ = htonl(tcp_time_stamp);
1038 		*topt++ = htonl(ts);
1039 	}
1040 
1041 #ifdef CONFIG_TCP_MD5SIG
1042 	if (key) {
1043 		*topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1044 				(TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1045 		tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1046 				    &ipv6_hdr(skb)->saddr,
1047 				    &ipv6_hdr(skb)->daddr, t1);
1048 	}
1049 #endif
1050 
1051 	memset(&fl6, 0, sizeof(fl6));
1052 	ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1053 	ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1054 
1055 	buff->ip_summed = CHECKSUM_PARTIAL;
1056 	buff->csum = 0;
1057 
1058 	__tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1059 
1060 	fl6.flowi6_proto = IPPROTO_TCP;
1061 	fl6.flowi6_oif = inet6_iif(skb);
1062 	fl6.fl6_dport = t1->dest;
1063 	fl6.fl6_sport = t1->source;
1064 	security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1065 
1066 	/* Pass a socket to ip6_dst_lookup either it is for RST
1067 	 * Underlying function will use this to retrieve the network
1068 	 * namespace
1069 	 */
1070 	dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1071 	if (!IS_ERR(dst)) {
1072 		skb_dst_set(buff, dst);
1073 		ip6_xmit(ctl_sk, buff, &fl6, NULL);
1074 		TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1075 		if (rst)
1076 			TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1077 		return;
1078 	}
1079 
1080 	kfree_skb(buff);
1081 }
1082 
1083 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1084 {
1085 	struct tcphdr *th = tcp_hdr(skb);
1086 	u32 seq = 0, ack_seq = 0;
1087 	struct tcp_md5sig_key *key = NULL;
1088 
1089 	if (th->rst)
1090 		return;
1091 
1092 	if (!ipv6_unicast_destination(skb))
1093 		return;
1094 
1095 #ifdef CONFIG_TCP_MD5SIG
1096 	if (sk)
1097 		key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1098 #endif
1099 
1100 	if (th->ack)
1101 		seq = ntohl(th->ack_seq);
1102 	else
1103 		ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1104 			  (th->doff << 2);
1105 
1106 	tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1107 }
1108 
1109 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1110 			    struct tcp_md5sig_key *key)
1111 {
1112 	tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1113 }
1114 
1115 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1116 {
1117 	struct inet_timewait_sock *tw = inet_twsk(sk);
1118 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1119 
1120 	tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1121 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1122 			tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1123 
1124 	inet_twsk_put(tw);
1125 }
1126 
1127 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1128 				  struct request_sock *req)
1129 {
1130 	tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1131 			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1132 }
1133 
1134 
1135 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1136 {
1137 	struct request_sock *req, **prev;
1138 	const struct tcphdr *th = tcp_hdr(skb);
1139 	struct sock *nsk;
1140 
1141 	/* Find possible connection requests. */
1142 	req = inet6_csk_search_req(sk, &prev, th->source,
1143 				   &ipv6_hdr(skb)->saddr,
1144 				   &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1145 	if (req)
1146 		return tcp_check_req(sk, skb, req, prev);
1147 
1148 	nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1149 			&ipv6_hdr(skb)->saddr, th->source,
1150 			&ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1151 
1152 	if (nsk) {
1153 		if (nsk->sk_state != TCP_TIME_WAIT) {
1154 			bh_lock_sock(nsk);
1155 			return nsk;
1156 		}
1157 		inet_twsk_put(inet_twsk(nsk));
1158 		return NULL;
1159 	}
1160 
1161 #ifdef CONFIG_SYN_COOKIES
1162 	if (!th->syn)
1163 		sk = cookie_v6_check(sk, skb);
1164 #endif
1165 	return sk;
1166 }
1167 
1168 /* FIXME: this is substantially similar to the ipv4 code.
1169  * Can some kind of merge be done? -- erics
1170  */
1171 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1172 {
1173 	struct tcp_extend_values tmp_ext;
1174 	struct tcp_options_received tmp_opt;
1175 	u8 *hash_location;
1176 	struct request_sock *req;
1177 	struct inet6_request_sock *treq;
1178 	struct ipv6_pinfo *np = inet6_sk(sk);
1179 	struct tcp_sock *tp = tcp_sk(sk);
1180 	__u32 isn = TCP_SKB_CB(skb)->when;
1181 	struct dst_entry *dst = NULL;
1182 #ifdef CONFIG_SYN_COOKIES
1183 	int want_cookie = 0;
1184 #else
1185 #define want_cookie 0
1186 #endif
1187 
1188 	if (skb->protocol == htons(ETH_P_IP))
1189 		return tcp_v4_conn_request(sk, skb);
1190 
1191 	if (!ipv6_unicast_destination(skb))
1192 		goto drop;
1193 
1194 	if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1195 		if (net_ratelimit())
1196 			syn_flood_warning(skb);
1197 #ifdef CONFIG_SYN_COOKIES
1198 		if (sysctl_tcp_syncookies)
1199 			want_cookie = 1;
1200 		else
1201 #endif
1202 		goto drop;
1203 	}
1204 
1205 	if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1206 		goto drop;
1207 
1208 	req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1209 	if (req == NULL)
1210 		goto drop;
1211 
1212 #ifdef CONFIG_TCP_MD5SIG
1213 	tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1214 #endif
1215 
1216 	tcp_clear_options(&tmp_opt);
1217 	tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1218 	tmp_opt.user_mss = tp->rx_opt.user_mss;
1219 	tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1220 
1221 	if (tmp_opt.cookie_plus > 0 &&
1222 	    tmp_opt.saw_tstamp &&
1223 	    !tp->rx_opt.cookie_out_never &&
1224 	    (sysctl_tcp_cookie_size > 0 ||
1225 	     (tp->cookie_values != NULL &&
1226 	      tp->cookie_values->cookie_desired > 0))) {
1227 		u8 *c;
1228 		u32 *d;
1229 		u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1230 		int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1231 
1232 		if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1233 			goto drop_and_free;
1234 
1235 		/* Secret recipe starts with IP addresses */
1236 		d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1237 		*mess++ ^= *d++;
1238 		*mess++ ^= *d++;
1239 		*mess++ ^= *d++;
1240 		*mess++ ^= *d++;
1241 		d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1242 		*mess++ ^= *d++;
1243 		*mess++ ^= *d++;
1244 		*mess++ ^= *d++;
1245 		*mess++ ^= *d++;
1246 
1247 		/* plus variable length Initiator Cookie */
1248 		c = (u8 *)mess;
1249 		while (l-- > 0)
1250 			*c++ ^= *hash_location++;
1251 
1252 #ifdef CONFIG_SYN_COOKIES
1253 		want_cookie = 0;	/* not our kind of cookie */
1254 #endif
1255 		tmp_ext.cookie_out_never = 0; /* false */
1256 		tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1257 	} else if (!tp->rx_opt.cookie_in_always) {
1258 		/* redundant indications, but ensure initialization. */
1259 		tmp_ext.cookie_out_never = 1; /* true */
1260 		tmp_ext.cookie_plus = 0;
1261 	} else {
1262 		goto drop_and_free;
1263 	}
1264 	tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1265 
1266 	if (want_cookie && !tmp_opt.saw_tstamp)
1267 		tcp_clear_options(&tmp_opt);
1268 
1269 	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1270 	tcp_openreq_init(req, &tmp_opt, skb);
1271 
1272 	treq = inet6_rsk(req);
1273 	ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1274 	ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1275 	if (!want_cookie || tmp_opt.tstamp_ok)
1276 		TCP_ECN_create_request(req, tcp_hdr(skb));
1277 
1278 	if (!isn) {
1279 		struct inet_peer *peer = NULL;
1280 
1281 		if (ipv6_opt_accepted(sk, skb) ||
1282 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1283 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1284 			atomic_inc(&skb->users);
1285 			treq->pktopts = skb;
1286 		}
1287 		treq->iif = sk->sk_bound_dev_if;
1288 
1289 		/* So that link locals have meaning */
1290 		if (!sk->sk_bound_dev_if &&
1291 		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1292 			treq->iif = inet6_iif(skb);
1293 
1294 		if (want_cookie) {
1295 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1296 			req->cookie_ts = tmp_opt.tstamp_ok;
1297 			goto have_isn;
1298 		}
1299 
1300 		/* VJ's idea. We save last timestamp seen
1301 		 * from the destination in peer table, when entering
1302 		 * state TIME-WAIT, and check against it before
1303 		 * accepting new connection request.
1304 		 *
1305 		 * If "isn" is not zero, this request hit alive
1306 		 * timewait bucket, so that all the necessary checks
1307 		 * are made in the function processing timewait state.
1308 		 */
1309 		if (tmp_opt.saw_tstamp &&
1310 		    tcp_death_row.sysctl_tw_recycle &&
1311 		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
1312 		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1313 		    ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1314 				    &treq->rmt_addr)) {
1315 			inet_peer_refcheck(peer);
1316 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1317 			    (s32)(peer->tcp_ts - req->ts_recent) >
1318 							TCP_PAWS_WINDOW) {
1319 				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1320 				goto drop_and_release;
1321 			}
1322 		}
1323 		/* Kill the following clause, if you dislike this way. */
1324 		else if (!sysctl_tcp_syncookies &&
1325 			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1326 			  (sysctl_max_syn_backlog >> 2)) &&
1327 			 (!peer || !peer->tcp_ts_stamp) &&
1328 			 (!dst || !dst_metric(dst, RTAX_RTT))) {
1329 			/* Without syncookies last quarter of
1330 			 * backlog is filled with destinations,
1331 			 * proven to be alive.
1332 			 * It means that we continue to communicate
1333 			 * to destinations, already remembered
1334 			 * to the moment of synflood.
1335 			 */
1336 			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1337 				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1338 			goto drop_and_release;
1339 		}
1340 
1341 		isn = tcp_v6_init_sequence(skb);
1342 	}
1343 have_isn:
1344 	tcp_rsk(req)->snt_isn = isn;
1345 	tcp_rsk(req)->snt_synack = tcp_time_stamp;
1346 
1347 	security_inet_conn_request(sk, skb, req);
1348 
1349 	if (tcp_v6_send_synack(sk, req,
1350 			       (struct request_values *)&tmp_ext) ||
1351 	    want_cookie)
1352 		goto drop_and_free;
1353 
1354 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1355 	return 0;
1356 
1357 drop_and_release:
1358 	dst_release(dst);
1359 drop_and_free:
1360 	reqsk_free(req);
1361 drop:
1362 	return 0; /* don't send reset */
1363 }
1364 
1365 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1366 					  struct request_sock *req,
1367 					  struct dst_entry *dst)
1368 {
1369 	struct inet6_request_sock *treq;
1370 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1371 	struct tcp6_sock *newtcp6sk;
1372 	struct inet_sock *newinet;
1373 	struct tcp_sock *newtp;
1374 	struct sock *newsk;
1375 	struct ipv6_txoptions *opt;
1376 #ifdef CONFIG_TCP_MD5SIG
1377 	struct tcp_md5sig_key *key;
1378 #endif
1379 
1380 	if (skb->protocol == htons(ETH_P_IP)) {
1381 		/*
1382 		 *	v6 mapped
1383 		 */
1384 
1385 		newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1386 
1387 		if (newsk == NULL)
1388 			return NULL;
1389 
1390 		newtcp6sk = (struct tcp6_sock *)newsk;
1391 		inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1392 
1393 		newinet = inet_sk(newsk);
1394 		newnp = inet6_sk(newsk);
1395 		newtp = tcp_sk(newsk);
1396 
1397 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1398 
1399 		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1400 
1401 		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1402 
1403 		ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1404 
1405 		inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1406 		newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1407 #ifdef CONFIG_TCP_MD5SIG
1408 		newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1409 #endif
1410 
1411 		newnp->pktoptions  = NULL;
1412 		newnp->opt	   = NULL;
1413 		newnp->mcast_oif   = inet6_iif(skb);
1414 		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
1415 
1416 		/*
1417 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1418 		 * here, tcp_create_openreq_child now does this for us, see the comment in
1419 		 * that function for the gory details. -acme
1420 		 */
1421 
1422 		/* It is tricky place. Until this moment IPv4 tcp
1423 		   worked with IPv6 icsk.icsk_af_ops.
1424 		   Sync it now.
1425 		 */
1426 		tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1427 
1428 		return newsk;
1429 	}
1430 
1431 	treq = inet6_rsk(req);
1432 	opt = np->opt;
1433 
1434 	if (sk_acceptq_is_full(sk))
1435 		goto out_overflow;
1436 
1437 	if (!dst) {
1438 		dst = inet6_csk_route_req(sk, req);
1439 		if (!dst)
1440 			goto out;
1441 	}
1442 
1443 	newsk = tcp_create_openreq_child(sk, req, skb);
1444 	if (newsk == NULL)
1445 		goto out_nonewsk;
1446 
1447 	/*
1448 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1449 	 * count here, tcp_create_openreq_child now does this for us, see the
1450 	 * comment in that function for the gory details. -acme
1451 	 */
1452 
1453 	newsk->sk_gso_type = SKB_GSO_TCPV6;
1454 	__ip6_dst_store(newsk, dst, NULL, NULL);
1455 
1456 	newtcp6sk = (struct tcp6_sock *)newsk;
1457 	inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1458 
1459 	newtp = tcp_sk(newsk);
1460 	newinet = inet_sk(newsk);
1461 	newnp = inet6_sk(newsk);
1462 
1463 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1464 
1465 	ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1466 	ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1467 	ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1468 	newsk->sk_bound_dev_if = treq->iif;
1469 
1470 	/* Now IPv6 options...
1471 
1472 	   First: no IPv4 options.
1473 	 */
1474 	newinet->inet_opt = NULL;
1475 	newnp->ipv6_fl_list = NULL;
1476 
1477 	/* Clone RX bits */
1478 	newnp->rxopt.all = np->rxopt.all;
1479 
1480 	/* Clone pktoptions received with SYN */
1481 	newnp->pktoptions = NULL;
1482 	if (treq->pktopts != NULL) {
1483 		newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1484 		kfree_skb(treq->pktopts);
1485 		treq->pktopts = NULL;
1486 		if (newnp->pktoptions)
1487 			skb_set_owner_r(newnp->pktoptions, newsk);
1488 	}
1489 	newnp->opt	  = NULL;
1490 	newnp->mcast_oif  = inet6_iif(skb);
1491 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1492 
1493 	/* Clone native IPv6 options from listening socket (if any)
1494 
1495 	   Yes, keeping reference count would be much more clever,
1496 	   but we make one more one thing there: reattach optmem
1497 	   to newsk.
1498 	 */
1499 	if (opt) {
1500 		newnp->opt = ipv6_dup_options(newsk, opt);
1501 		if (opt != np->opt)
1502 			sock_kfree_s(sk, opt, opt->tot_len);
1503 	}
1504 
1505 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
1506 	if (newnp->opt)
1507 		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1508 						     newnp->opt->opt_flen);
1509 
1510 	tcp_mtup_init(newsk);
1511 	tcp_sync_mss(newsk, dst_mtu(dst));
1512 	newtp->advmss = dst_metric_advmss(dst);
1513 	tcp_initialize_rcv_mss(newsk);
1514 	if (tcp_rsk(req)->snt_synack)
1515 		tcp_valid_rtt_meas(newsk,
1516 		    tcp_time_stamp - tcp_rsk(req)->snt_synack);
1517 	newtp->total_retrans = req->retrans;
1518 
1519 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1520 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1521 
1522 #ifdef CONFIG_TCP_MD5SIG
1523 	/* Copy over the MD5 key from the original socket */
1524 	if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1525 		/* We're using one, so create a matching key
1526 		 * on the newsk structure. If we fail to get
1527 		 * memory, then we end up not copying the key
1528 		 * across. Shucks.
1529 		 */
1530 		char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1531 		if (newkey != NULL)
1532 			tcp_v6_md5_do_add(newsk, &newnp->daddr,
1533 					  newkey, key->keylen);
1534 	}
1535 #endif
1536 
1537 	if (__inet_inherit_port(sk, newsk) < 0) {
1538 		sock_put(newsk);
1539 		goto out;
1540 	}
1541 	__inet6_hash(newsk, NULL);
1542 
1543 	return newsk;
1544 
1545 out_overflow:
1546 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1547 out_nonewsk:
1548 	if (opt && opt != np->opt)
1549 		sock_kfree_s(sk, opt, opt->tot_len);
1550 	dst_release(dst);
1551 out:
1552 	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1553 	return NULL;
1554 }
1555 
1556 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1557 {
1558 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
1559 		if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1560 				  &ipv6_hdr(skb)->daddr, skb->csum)) {
1561 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 			return 0;
1563 		}
1564 	}
1565 
1566 	skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1567 					      &ipv6_hdr(skb)->saddr,
1568 					      &ipv6_hdr(skb)->daddr, 0));
1569 
1570 	if (skb->len <= 76) {
1571 		return __skb_checksum_complete(skb);
1572 	}
1573 	return 0;
1574 }
1575 
1576 /* The socket must have it's spinlock held when we get
1577  * here.
1578  *
1579  * We have a potential double-lock case here, so even when
1580  * doing backlog processing we use the BH locking scheme.
1581  * This is because we cannot sleep with the original spinlock
1582  * held.
1583  */
1584 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1585 {
1586 	struct ipv6_pinfo *np = inet6_sk(sk);
1587 	struct tcp_sock *tp;
1588 	struct sk_buff *opt_skb = NULL;
1589 
1590 	/* Imagine: socket is IPv6. IPv4 packet arrives,
1591 	   goes to IPv4 receive handler and backlogged.
1592 	   From backlog it always goes here. Kerboom...
1593 	   Fortunately, tcp_rcv_established and rcv_established
1594 	   handle them correctly, but it is not case with
1595 	   tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1596 	 */
1597 
1598 	if (skb->protocol == htons(ETH_P_IP))
1599 		return tcp_v4_do_rcv(sk, skb);
1600 
1601 #ifdef CONFIG_TCP_MD5SIG
1602 	if (tcp_v6_inbound_md5_hash (sk, skb))
1603 		goto discard;
1604 #endif
1605 
1606 	if (sk_filter(sk, skb))
1607 		goto discard;
1608 
1609 	/*
1610 	 *	socket locking is here for SMP purposes as backlog rcv
1611 	 *	is currently called with bh processing disabled.
1612 	 */
1613 
1614 	/* Do Stevens' IPV6_PKTOPTIONS.
1615 
1616 	   Yes, guys, it is the only place in our code, where we
1617 	   may make it not affecting IPv4.
1618 	   The rest of code is protocol independent,
1619 	   and I do not like idea to uglify IPv4.
1620 
1621 	   Actually, all the idea behind IPV6_PKTOPTIONS
1622 	   looks not very well thought. For now we latch
1623 	   options, received in the last packet, enqueued
1624 	   by tcp. Feel free to propose better solution.
1625 					       --ANK (980728)
1626 	 */
1627 	if (np->rxopt.all)
1628 		opt_skb = skb_clone(skb, GFP_ATOMIC);
1629 
1630 	if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1631 		sock_rps_save_rxhash(sk, skb->rxhash);
1632 		if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1633 			goto reset;
1634 		if (opt_skb)
1635 			goto ipv6_pktoptions;
1636 		return 0;
1637 	}
1638 
1639 	if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1640 		goto csum_err;
1641 
1642 	if (sk->sk_state == TCP_LISTEN) {
1643 		struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1644 		if (!nsk)
1645 			goto discard;
1646 
1647 		/*
1648 		 * Queue it on the new socket if the new socket is active,
1649 		 * otherwise we just shortcircuit this and continue with
1650 		 * the new socket..
1651 		 */
1652 		if(nsk != sk) {
1653 			sock_rps_save_rxhash(nsk, skb->rxhash);
1654 			if (tcp_child_process(sk, nsk, skb))
1655 				goto reset;
1656 			if (opt_skb)
1657 				__kfree_skb(opt_skb);
1658 			return 0;
1659 		}
1660 	} else
1661 		sock_rps_save_rxhash(sk, skb->rxhash);
1662 
1663 	if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1664 		goto reset;
1665 	if (opt_skb)
1666 		goto ipv6_pktoptions;
1667 	return 0;
1668 
1669 reset:
1670 	tcp_v6_send_reset(sk, skb);
1671 discard:
1672 	if (opt_skb)
1673 		__kfree_skb(opt_skb);
1674 	kfree_skb(skb);
1675 	return 0;
1676 csum_err:
1677 	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1678 	goto discard;
1679 
1680 
1681 ipv6_pktoptions:
1682 	/* Do you ask, what is it?
1683 
1684 	   1. skb was enqueued by tcp.
1685 	   2. skb is added to tail of read queue, rather than out of order.
1686 	   3. socket is not in passive state.
1687 	   4. Finally, it really contains options, which user wants to receive.
1688 	 */
1689 	tp = tcp_sk(sk);
1690 	if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1691 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1692 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1693 			np->mcast_oif = inet6_iif(opt_skb);
1694 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1695 			np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1696 		if (ipv6_opt_accepted(sk, opt_skb)) {
1697 			skb_set_owner_r(opt_skb, sk);
1698 			opt_skb = xchg(&np->pktoptions, opt_skb);
1699 		} else {
1700 			__kfree_skb(opt_skb);
1701 			opt_skb = xchg(&np->pktoptions, NULL);
1702 		}
1703 	}
1704 
1705 	kfree_skb(opt_skb);
1706 	return 0;
1707 }
1708 
1709 static int tcp_v6_rcv(struct sk_buff *skb)
1710 {
1711 	struct tcphdr *th;
1712 	const struct ipv6hdr *hdr;
1713 	struct sock *sk;
1714 	int ret;
1715 	struct net *net = dev_net(skb->dev);
1716 
1717 	if (skb->pkt_type != PACKET_HOST)
1718 		goto discard_it;
1719 
1720 	/*
1721 	 *	Count it even if it's bad.
1722 	 */
1723 	TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1724 
1725 	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1726 		goto discard_it;
1727 
1728 	th = tcp_hdr(skb);
1729 
1730 	if (th->doff < sizeof(struct tcphdr)/4)
1731 		goto bad_packet;
1732 	if (!pskb_may_pull(skb, th->doff*4))
1733 		goto discard_it;
1734 
1735 	if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1736 		goto bad_packet;
1737 
1738 	th = tcp_hdr(skb);
1739 	hdr = ipv6_hdr(skb);
1740 	TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1741 	TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1742 				    skb->len - th->doff*4);
1743 	TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1744 	TCP_SKB_CB(skb)->when = 0;
1745 	TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(hdr);
1746 	TCP_SKB_CB(skb)->sacked = 0;
1747 
1748 	sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1749 	if (!sk)
1750 		goto no_tcp_socket;
1751 
1752 process:
1753 	if (sk->sk_state == TCP_TIME_WAIT)
1754 		goto do_time_wait;
1755 
1756 	if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1757 		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1758 		goto discard_and_relse;
1759 	}
1760 
1761 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1762 		goto discard_and_relse;
1763 
1764 	if (sk_filter(sk, skb))
1765 		goto discard_and_relse;
1766 
1767 	skb->dev = NULL;
1768 
1769 	bh_lock_sock_nested(sk);
1770 	ret = 0;
1771 	if (!sock_owned_by_user(sk)) {
1772 #ifdef CONFIG_NET_DMA
1773 		struct tcp_sock *tp = tcp_sk(sk);
1774 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1775 			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1776 		if (tp->ucopy.dma_chan)
1777 			ret = tcp_v6_do_rcv(sk, skb);
1778 		else
1779 #endif
1780 		{
1781 			if (!tcp_prequeue(sk, skb))
1782 				ret = tcp_v6_do_rcv(sk, skb);
1783 		}
1784 	} else if (unlikely(sk_add_backlog(sk, skb))) {
1785 		bh_unlock_sock(sk);
1786 		NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1787 		goto discard_and_relse;
1788 	}
1789 	bh_unlock_sock(sk);
1790 
1791 	sock_put(sk);
1792 	return ret ? -1 : 0;
1793 
1794 no_tcp_socket:
1795 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1796 		goto discard_it;
1797 
1798 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1799 bad_packet:
1800 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1801 	} else {
1802 		tcp_v6_send_reset(NULL, skb);
1803 	}
1804 
1805 discard_it:
1806 
1807 	/*
1808 	 *	Discard frame
1809 	 */
1810 
1811 	kfree_skb(skb);
1812 	return 0;
1813 
1814 discard_and_relse:
1815 	sock_put(sk);
1816 	goto discard_it;
1817 
1818 do_time_wait:
1819 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1820 		inet_twsk_put(inet_twsk(sk));
1821 		goto discard_it;
1822 	}
1823 
1824 	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1825 		TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1826 		inet_twsk_put(inet_twsk(sk));
1827 		goto discard_it;
1828 	}
1829 
1830 	switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1831 	case TCP_TW_SYN:
1832 	{
1833 		struct sock *sk2;
1834 
1835 		sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1836 					    &ipv6_hdr(skb)->daddr,
1837 					    ntohs(th->dest), inet6_iif(skb));
1838 		if (sk2 != NULL) {
1839 			struct inet_timewait_sock *tw = inet_twsk(sk);
1840 			inet_twsk_deschedule(tw, &tcp_death_row);
1841 			inet_twsk_put(tw);
1842 			sk = sk2;
1843 			goto process;
1844 		}
1845 		/* Fall through to ACK */
1846 	}
1847 	case TCP_TW_ACK:
1848 		tcp_v6_timewait_ack(sk, skb);
1849 		break;
1850 	case TCP_TW_RST:
1851 		goto no_tcp_socket;
1852 	case TCP_TW_SUCCESS:;
1853 	}
1854 	goto discard_it;
1855 }
1856 
1857 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1858 {
1859 	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1860 	struct ipv6_pinfo *np = inet6_sk(sk);
1861 	struct inet_peer *peer;
1862 
1863 	if (!rt ||
1864 	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1865 		peer = inet_getpeer_v6(&np->daddr, 1);
1866 		*release_it = true;
1867 	} else {
1868 		if (!rt->rt6i_peer)
1869 			rt6_bind_peer(rt, 1);
1870 		peer = rt->rt6i_peer;
1871 		*release_it = false;
1872 	}
1873 
1874 	return peer;
1875 }
1876 
1877 static void *tcp_v6_tw_get_peer(struct sock *sk)
1878 {
1879 	struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1880 	struct inet_timewait_sock *tw = inet_twsk(sk);
1881 
1882 	if (tw->tw_family == AF_INET)
1883 		return tcp_v4_tw_get_peer(sk);
1884 
1885 	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1886 }
1887 
1888 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1889 	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
1890 	.twsk_unique	= tcp_twsk_unique,
1891 	.twsk_destructor= tcp_twsk_destructor,
1892 	.twsk_getpeer	= tcp_v6_tw_get_peer,
1893 };
1894 
1895 static const struct inet_connection_sock_af_ops ipv6_specific = {
1896 	.queue_xmit	   = inet6_csk_xmit,
1897 	.send_check	   = tcp_v6_send_check,
1898 	.rebuild_header	   = inet6_sk_rebuild_header,
1899 	.conn_request	   = tcp_v6_conn_request,
1900 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1901 	.get_peer	   = tcp_v6_get_peer,
1902 	.net_header_len	   = sizeof(struct ipv6hdr),
1903 	.setsockopt	   = ipv6_setsockopt,
1904 	.getsockopt	   = ipv6_getsockopt,
1905 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1906 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1907 	.bind_conflict	   = inet6_csk_bind_conflict,
1908 #ifdef CONFIG_COMPAT
1909 	.compat_setsockopt = compat_ipv6_setsockopt,
1910 	.compat_getsockopt = compat_ipv6_getsockopt,
1911 #endif
1912 };
1913 
1914 #ifdef CONFIG_TCP_MD5SIG
1915 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1916 	.md5_lookup	=	tcp_v6_md5_lookup,
1917 	.calc_md5_hash	=	tcp_v6_md5_hash_skb,
1918 	.md5_add	=	tcp_v6_md5_add_func,
1919 	.md5_parse	=	tcp_v6_parse_md5_keys,
1920 };
1921 #endif
1922 
1923 /*
1924  *	TCP over IPv4 via INET6 API
1925  */
1926 
1927 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1928 	.queue_xmit	   = ip_queue_xmit,
1929 	.send_check	   = tcp_v4_send_check,
1930 	.rebuild_header	   = inet_sk_rebuild_header,
1931 	.conn_request	   = tcp_v6_conn_request,
1932 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
1933 	.get_peer	   = tcp_v4_get_peer,
1934 	.net_header_len	   = sizeof(struct iphdr),
1935 	.setsockopt	   = ipv6_setsockopt,
1936 	.getsockopt	   = ipv6_getsockopt,
1937 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1938 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1939 	.bind_conflict	   = inet6_csk_bind_conflict,
1940 #ifdef CONFIG_COMPAT
1941 	.compat_setsockopt = compat_ipv6_setsockopt,
1942 	.compat_getsockopt = compat_ipv6_getsockopt,
1943 #endif
1944 };
1945 
1946 #ifdef CONFIG_TCP_MD5SIG
1947 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1948 	.md5_lookup	=	tcp_v4_md5_lookup,
1949 	.calc_md5_hash	=	tcp_v4_md5_hash_skb,
1950 	.md5_add	=	tcp_v6_md5_add_func,
1951 	.md5_parse	=	tcp_v6_parse_md5_keys,
1952 };
1953 #endif
1954 
1955 /* NOTE: A lot of things set to zero explicitly by call to
1956  *       sk_alloc() so need not be done here.
1957  */
1958 static int tcp_v6_init_sock(struct sock *sk)
1959 {
1960 	struct inet_connection_sock *icsk = inet_csk(sk);
1961 	struct tcp_sock *tp = tcp_sk(sk);
1962 
1963 	skb_queue_head_init(&tp->out_of_order_queue);
1964 	tcp_init_xmit_timers(sk);
1965 	tcp_prequeue_init(tp);
1966 
1967 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
1968 	tp->mdev = TCP_TIMEOUT_INIT;
1969 
1970 	/* So many TCP implementations out there (incorrectly) count the
1971 	 * initial SYN frame in their delayed-ACK and congestion control
1972 	 * algorithms that we must have the following bandaid to talk
1973 	 * efficiently to them.  -DaveM
1974 	 */
1975 	tp->snd_cwnd = 2;
1976 
1977 	/* See draft-stevens-tcpca-spec-01 for discussion of the
1978 	 * initialization of these values.
1979 	 */
1980 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1981 	tp->snd_cwnd_clamp = ~0;
1982 	tp->mss_cache = TCP_MSS_DEFAULT;
1983 
1984 	tp->reordering = sysctl_tcp_reordering;
1985 
1986 	sk->sk_state = TCP_CLOSE;
1987 
1988 	icsk->icsk_af_ops = &ipv6_specific;
1989 	icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1990 	icsk->icsk_sync_mss = tcp_sync_mss;
1991 	sk->sk_write_space = sk_stream_write_space;
1992 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1993 
1994 #ifdef CONFIG_TCP_MD5SIG
1995 	tp->af_specific = &tcp_sock_ipv6_specific;
1996 #endif
1997 
1998 	/* TCP Cookie Transactions */
1999 	if (sysctl_tcp_cookie_size > 0) {
2000 		/* Default, cookies without s_data_payload. */
2001 		tp->cookie_values =
2002 			kzalloc(sizeof(*tp->cookie_values),
2003 				sk->sk_allocation);
2004 		if (tp->cookie_values != NULL)
2005 			kref_init(&tp->cookie_values->kref);
2006 	}
2007 	/* Presumed zeroed, in order of appearance:
2008 	 *	cookie_in_always, cookie_out_never,
2009 	 *	s_data_constant, s_data_in, s_data_out
2010 	 */
2011 	sk->sk_sndbuf = sysctl_tcp_wmem[1];
2012 	sk->sk_rcvbuf = sysctl_tcp_rmem[1];
2013 
2014 	local_bh_disable();
2015 	percpu_counter_inc(&tcp_sockets_allocated);
2016 	local_bh_enable();
2017 
2018 	return 0;
2019 }
2020 
2021 static void tcp_v6_destroy_sock(struct sock *sk)
2022 {
2023 #ifdef CONFIG_TCP_MD5SIG
2024 	/* Clean up the MD5 key list */
2025 	if (tcp_sk(sk)->md5sig_info)
2026 		tcp_v6_clear_md5_list(sk);
2027 #endif
2028 	tcp_v4_destroy_sock(sk);
2029 	inet6_destroy_sock(sk);
2030 }
2031 
2032 #ifdef CONFIG_PROC_FS
2033 /* Proc filesystem TCPv6 sock list dumping. */
2034 static void get_openreq6(struct seq_file *seq,
2035 			 struct sock *sk, struct request_sock *req, int i, int uid)
2036 {
2037 	int ttd = req->expires - jiffies;
2038 	const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2039 	const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2040 
2041 	if (ttd < 0)
2042 		ttd = 0;
2043 
2044 	seq_printf(seq,
2045 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2046 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2047 		   i,
2048 		   src->s6_addr32[0], src->s6_addr32[1],
2049 		   src->s6_addr32[2], src->s6_addr32[3],
2050 		   ntohs(inet_rsk(req)->loc_port),
2051 		   dest->s6_addr32[0], dest->s6_addr32[1],
2052 		   dest->s6_addr32[2], dest->s6_addr32[3],
2053 		   ntohs(inet_rsk(req)->rmt_port),
2054 		   TCP_SYN_RECV,
2055 		   0,0, /* could print option size, but that is af dependent. */
2056 		   1,   /* timers active (only the expire timer) */
2057 		   jiffies_to_clock_t(ttd),
2058 		   req->retrans,
2059 		   uid,
2060 		   0,  /* non standard timer */
2061 		   0, /* open_requests have no inode */
2062 		   0, req);
2063 }
2064 
2065 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2066 {
2067 	const struct in6_addr *dest, *src;
2068 	__u16 destp, srcp;
2069 	int timer_active;
2070 	unsigned long timer_expires;
2071 	struct inet_sock *inet = inet_sk(sp);
2072 	struct tcp_sock *tp = tcp_sk(sp);
2073 	const struct inet_connection_sock *icsk = inet_csk(sp);
2074 	struct ipv6_pinfo *np = inet6_sk(sp);
2075 
2076 	dest  = &np->daddr;
2077 	src   = &np->rcv_saddr;
2078 	destp = ntohs(inet->inet_dport);
2079 	srcp  = ntohs(inet->inet_sport);
2080 
2081 	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2082 		timer_active	= 1;
2083 		timer_expires	= icsk->icsk_timeout;
2084 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2085 		timer_active	= 4;
2086 		timer_expires	= icsk->icsk_timeout;
2087 	} else if (timer_pending(&sp->sk_timer)) {
2088 		timer_active	= 2;
2089 		timer_expires	= sp->sk_timer.expires;
2090 	} else {
2091 		timer_active	= 0;
2092 		timer_expires = jiffies;
2093 	}
2094 
2095 	seq_printf(seq,
2096 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2097 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2098 		   i,
2099 		   src->s6_addr32[0], src->s6_addr32[1],
2100 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2101 		   dest->s6_addr32[0], dest->s6_addr32[1],
2102 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2103 		   sp->sk_state,
2104 		   tp->write_seq-tp->snd_una,
2105 		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2106 		   timer_active,
2107 		   jiffies_to_clock_t(timer_expires - jiffies),
2108 		   icsk->icsk_retransmits,
2109 		   sock_i_uid(sp),
2110 		   icsk->icsk_probes_out,
2111 		   sock_i_ino(sp),
2112 		   atomic_read(&sp->sk_refcnt), sp,
2113 		   jiffies_to_clock_t(icsk->icsk_rto),
2114 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
2115 		   (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2116 		   tp->snd_cwnd,
2117 		   tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2118 		   );
2119 }
2120 
2121 static void get_timewait6_sock(struct seq_file *seq,
2122 			       struct inet_timewait_sock *tw, int i)
2123 {
2124 	const struct in6_addr *dest, *src;
2125 	__u16 destp, srcp;
2126 	struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2127 	int ttd = tw->tw_ttd - jiffies;
2128 
2129 	if (ttd < 0)
2130 		ttd = 0;
2131 
2132 	dest = &tw6->tw_v6_daddr;
2133 	src  = &tw6->tw_v6_rcv_saddr;
2134 	destp = ntohs(tw->tw_dport);
2135 	srcp  = ntohs(tw->tw_sport);
2136 
2137 	seq_printf(seq,
2138 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2139 		   "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2140 		   i,
2141 		   src->s6_addr32[0], src->s6_addr32[1],
2142 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
2143 		   dest->s6_addr32[0], dest->s6_addr32[1],
2144 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
2145 		   tw->tw_substate, 0, 0,
2146 		   3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2147 		   atomic_read(&tw->tw_refcnt), tw);
2148 }
2149 
2150 static int tcp6_seq_show(struct seq_file *seq, void *v)
2151 {
2152 	struct tcp_iter_state *st;
2153 
2154 	if (v == SEQ_START_TOKEN) {
2155 		seq_puts(seq,
2156 			 "  sl  "
2157 			 "local_address                         "
2158 			 "remote_address                        "
2159 			 "st tx_queue rx_queue tr tm->when retrnsmt"
2160 			 "   uid  timeout inode\n");
2161 		goto out;
2162 	}
2163 	st = seq->private;
2164 
2165 	switch (st->state) {
2166 	case TCP_SEQ_STATE_LISTENING:
2167 	case TCP_SEQ_STATE_ESTABLISHED:
2168 		get_tcp6_sock(seq, v, st->num);
2169 		break;
2170 	case TCP_SEQ_STATE_OPENREQ:
2171 		get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2172 		break;
2173 	case TCP_SEQ_STATE_TIME_WAIT:
2174 		get_timewait6_sock(seq, v, st->num);
2175 		break;
2176 	}
2177 out:
2178 	return 0;
2179 }
2180 
2181 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2182 	.name		= "tcp6",
2183 	.family		= AF_INET6,
2184 	.seq_fops	= {
2185 		.owner		= THIS_MODULE,
2186 	},
2187 	.seq_ops	= {
2188 		.show		= tcp6_seq_show,
2189 	},
2190 };
2191 
2192 int __net_init tcp6_proc_init(struct net *net)
2193 {
2194 	return tcp_proc_register(net, &tcp6_seq_afinfo);
2195 }
2196 
2197 void tcp6_proc_exit(struct net *net)
2198 {
2199 	tcp_proc_unregister(net, &tcp6_seq_afinfo);
2200 }
2201 #endif
2202 
2203 struct proto tcpv6_prot = {
2204 	.name			= "TCPv6",
2205 	.owner			= THIS_MODULE,
2206 	.close			= tcp_close,
2207 	.connect		= tcp_v6_connect,
2208 	.disconnect		= tcp_disconnect,
2209 	.accept			= inet_csk_accept,
2210 	.ioctl			= tcp_ioctl,
2211 	.init			= tcp_v6_init_sock,
2212 	.destroy		= tcp_v6_destroy_sock,
2213 	.shutdown		= tcp_shutdown,
2214 	.setsockopt		= tcp_setsockopt,
2215 	.getsockopt		= tcp_getsockopt,
2216 	.recvmsg		= tcp_recvmsg,
2217 	.sendmsg		= tcp_sendmsg,
2218 	.sendpage		= tcp_sendpage,
2219 	.backlog_rcv		= tcp_v6_do_rcv,
2220 	.hash			= tcp_v6_hash,
2221 	.unhash			= inet_unhash,
2222 	.get_port		= inet_csk_get_port,
2223 	.enter_memory_pressure	= tcp_enter_memory_pressure,
2224 	.sockets_allocated	= &tcp_sockets_allocated,
2225 	.memory_allocated	= &tcp_memory_allocated,
2226 	.memory_pressure	= &tcp_memory_pressure,
2227 	.orphan_count		= &tcp_orphan_count,
2228 	.sysctl_mem		= sysctl_tcp_mem,
2229 	.sysctl_wmem		= sysctl_tcp_wmem,
2230 	.sysctl_rmem		= sysctl_tcp_rmem,
2231 	.max_header		= MAX_TCP_HEADER,
2232 	.obj_size		= sizeof(struct tcp6_sock),
2233 	.slab_flags		= SLAB_DESTROY_BY_RCU,
2234 	.twsk_prot		= &tcp6_timewait_sock_ops,
2235 	.rsk_prot		= &tcp6_request_sock_ops,
2236 	.h.hashinfo		= &tcp_hashinfo,
2237 	.no_autobind		= true,
2238 #ifdef CONFIG_COMPAT
2239 	.compat_setsockopt	= compat_tcp_setsockopt,
2240 	.compat_getsockopt	= compat_tcp_getsockopt,
2241 #endif
2242 };
2243 
2244 static const struct inet6_protocol tcpv6_protocol = {
2245 	.handler	=	tcp_v6_rcv,
2246 	.err_handler	=	tcp_v6_err,
2247 	.gso_send_check	=	tcp_v6_gso_send_check,
2248 	.gso_segment	=	tcp_tso_segment,
2249 	.gro_receive	=	tcp6_gro_receive,
2250 	.gro_complete	=	tcp6_gro_complete,
2251 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2252 };
2253 
2254 static struct inet_protosw tcpv6_protosw = {
2255 	.type		=	SOCK_STREAM,
2256 	.protocol	=	IPPROTO_TCP,
2257 	.prot		=	&tcpv6_prot,
2258 	.ops		=	&inet6_stream_ops,
2259 	.no_check	=	0,
2260 	.flags		=	INET_PROTOSW_PERMANENT |
2261 				INET_PROTOSW_ICSK,
2262 };
2263 
2264 static int __net_init tcpv6_net_init(struct net *net)
2265 {
2266 	return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2267 				    SOCK_RAW, IPPROTO_TCP, net);
2268 }
2269 
2270 static void __net_exit tcpv6_net_exit(struct net *net)
2271 {
2272 	inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2273 }
2274 
2275 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2276 {
2277 	inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2278 }
2279 
2280 static struct pernet_operations tcpv6_net_ops = {
2281 	.init	    = tcpv6_net_init,
2282 	.exit	    = tcpv6_net_exit,
2283 	.exit_batch = tcpv6_net_exit_batch,
2284 };
2285 
2286 int __init tcpv6_init(void)
2287 {
2288 	int ret;
2289 
2290 	ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2291 	if (ret)
2292 		goto out;
2293 
2294 	/* register inet6 protocol */
2295 	ret = inet6_register_protosw(&tcpv6_protosw);
2296 	if (ret)
2297 		goto out_tcpv6_protocol;
2298 
2299 	ret = register_pernet_subsys(&tcpv6_net_ops);
2300 	if (ret)
2301 		goto out_tcpv6_protosw;
2302 out:
2303 	return ret;
2304 
2305 out_tcpv6_protocol:
2306 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2307 out_tcpv6_protosw:
2308 	inet6_unregister_protosw(&tcpv6_protosw);
2309 	goto out;
2310 }
2311 
2312 void tcpv6_exit(void)
2313 {
2314 	unregister_pernet_subsys(&tcpv6_net_ops);
2315 	inet6_unregister_protosw(&tcpv6_protosw);
2316 	inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2317 }
2318