xref: /linux/net/dccp/ipv6.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	DCCP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Based on net/dccp6/ipv6.c
7  *
8  *	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
9  */
10 
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/xfrm.h>
15 #include <linux/string.h>
16 
17 #include <net/addrconf.h>
18 #include <net/inet_common.h>
19 #include <net/inet_hashtables.h>
20 #include <net/inet_sock.h>
21 #include <net/inet6_connection_sock.h>
22 #include <net/inet6_hashtables.h>
23 #include <net/ip6_route.h>
24 #include <net/ipv6.h>
25 #include <net/protocol.h>
26 #include <net/transp_v6.h>
27 #include <net/ip6_checksum.h>
28 #include <net/xfrm.h>
29 #include <net/secure_seq.h>
30 #include <net/netns/generic.h>
31 #include <net/sock.h>
32 #include <net/rstreason.h>
33 
34 #include "dccp.h"
35 #include "ipv6.h"
36 #include "feat.h"
37 
38 struct dccp_v6_pernet {
39 	struct sock *v6_ctl_sk;
40 };
41 
42 static unsigned int dccp_v6_pernet_id __read_mostly;
43 
44 /* The per-net v6_ctl_sk is used for sending RSTs and ACKs */
45 
46 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
47 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
48 
49 /* add pseudo-header to DCCP checksum stored in skb->csum */
50 static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
51 				      const struct in6_addr *saddr,
52 				      const struct in6_addr *daddr)
53 {
54 	return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
55 }
56 
57 static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
58 {
59 	struct ipv6_pinfo *np = inet6_sk(sk);
60 	struct dccp_hdr *dh = dccp_hdr(skb);
61 
62 	dccp_csum_outgoing(skb);
63 	dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &sk->sk_v6_daddr);
64 }
65 
66 static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
67 {
68 	return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
69 					     ipv6_hdr(skb)->saddr.s6_addr32,
70 					     dccp_hdr(skb)->dccph_dport,
71 					     dccp_hdr(skb)->dccph_sport     );
72 
73 }
74 
75 static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
76 			u8 type, u8 code, int offset, __be32 info)
77 {
78 	const struct ipv6hdr *hdr;
79 	const struct dccp_hdr *dh;
80 	struct dccp_sock *dp;
81 	struct ipv6_pinfo *np;
82 	struct sock *sk;
83 	int err;
84 	__u64 seq;
85 	struct net *net = dev_net(skb->dev);
86 
87 	if (!pskb_may_pull(skb, offset + sizeof(*dh)))
88 		return -EINVAL;
89 	dh = (struct dccp_hdr *)(skb->data + offset);
90 	if (!pskb_may_pull(skb, offset + __dccp_basic_hdr_len(dh)))
91 		return -EINVAL;
92 	hdr = (const struct ipv6hdr *)skb->data;
93 	dh = (struct dccp_hdr *)(skb->data + offset);
94 
95 	sk = __inet6_lookup_established(net, &dccp_hashinfo,
96 					&hdr->daddr, dh->dccph_dport,
97 					&hdr->saddr, ntohs(dh->dccph_sport),
98 					inet6_iif(skb), 0);
99 
100 	if (!sk) {
101 		__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
102 				  ICMP6_MIB_INERRORS);
103 		return -ENOENT;
104 	}
105 
106 	if (sk->sk_state == DCCP_TIME_WAIT) {
107 		inet_twsk_put(inet_twsk(sk));
108 		return 0;
109 	}
110 	seq = dccp_hdr_seq(dh);
111 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
112 		dccp_req_err(sk, seq);
113 		return 0;
114 	}
115 
116 	bh_lock_sock(sk);
117 	if (sock_owned_by_user(sk))
118 		__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
119 
120 	if (sk->sk_state == DCCP_CLOSED)
121 		goto out;
122 
123 	dp = dccp_sk(sk);
124 	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
125 	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
126 		__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
127 		goto out;
128 	}
129 
130 	np = inet6_sk(sk);
131 
132 	if (type == NDISC_REDIRECT) {
133 		if (!sock_owned_by_user(sk)) {
134 			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
135 
136 			if (dst)
137 				dst->ops->redirect(dst, sk, skb);
138 		}
139 		goto out;
140 	}
141 
142 	if (type == ICMPV6_PKT_TOOBIG) {
143 		struct dst_entry *dst = NULL;
144 
145 		if (!ip6_sk_accept_pmtu(sk))
146 			goto out;
147 
148 		if (sock_owned_by_user(sk))
149 			goto out;
150 		if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
151 			goto out;
152 
153 		dst = inet6_csk_update_pmtu(sk, ntohl(info));
154 		if (!dst)
155 			goto out;
156 
157 		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
158 			dccp_sync_mss(sk, dst_mtu(dst));
159 		goto out;
160 	}
161 
162 	icmpv6_err_convert(type, code, &err);
163 
164 	/* Might be for an request_sock */
165 	switch (sk->sk_state) {
166 	case DCCP_REQUESTING:
167 	case DCCP_RESPOND:  /* Cannot happen.
168 			       It can, it SYNs are crossed. --ANK */
169 		if (!sock_owned_by_user(sk)) {
170 			__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
171 			sk->sk_err = err;
172 			/*
173 			 * Wake people up to see the error
174 			 * (see connect in sock.c)
175 			 */
176 			sk_error_report(sk);
177 			dccp_done(sk);
178 		} else {
179 			WRITE_ONCE(sk->sk_err_soft, err);
180 		}
181 		goto out;
182 	}
183 
184 	if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) {
185 		sk->sk_err = err;
186 		sk_error_report(sk);
187 	} else {
188 		WRITE_ONCE(sk->sk_err_soft, err);
189 	}
190 out:
191 	bh_unlock_sock(sk);
192 	sock_put(sk);
193 	return 0;
194 }
195 
196 
197 static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req)
198 {
199 	struct inet_request_sock *ireq = inet_rsk(req);
200 	struct ipv6_pinfo *np = inet6_sk(sk);
201 	struct sk_buff *skb;
202 	struct in6_addr *final_p, final;
203 	struct flowi6 fl6;
204 	int err = -1;
205 	struct dst_entry *dst;
206 
207 	memset(&fl6, 0, sizeof(fl6));
208 	fl6.flowi6_proto = IPPROTO_DCCP;
209 	fl6.daddr = ireq->ir_v6_rmt_addr;
210 	fl6.saddr = ireq->ir_v6_loc_addr;
211 	fl6.flowlabel = 0;
212 	fl6.flowi6_oif = ireq->ir_iif;
213 	fl6.fl6_dport = ireq->ir_rmt_port;
214 	fl6.fl6_sport = htons(ireq->ir_num);
215 	security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
216 
217 
218 	rcu_read_lock();
219 	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
220 	rcu_read_unlock();
221 
222 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
223 	if (IS_ERR(dst)) {
224 		err = PTR_ERR(dst);
225 		dst = NULL;
226 		goto done;
227 	}
228 
229 	skb = dccp_make_response(sk, dst, req);
230 	if (skb != NULL) {
231 		struct dccp_hdr *dh = dccp_hdr(skb);
232 		struct ipv6_txoptions *opt;
233 
234 		dh->dccph_checksum = dccp_v6_csum_finish(skb,
235 							 &ireq->ir_v6_loc_addr,
236 							 &ireq->ir_v6_rmt_addr);
237 		fl6.daddr = ireq->ir_v6_rmt_addr;
238 		rcu_read_lock();
239 		opt = ireq->ipv6_opt;
240 		if (!opt)
241 			opt = rcu_dereference(np->opt);
242 		err = ip6_xmit(sk, skb, &fl6, READ_ONCE(sk->sk_mark), opt,
243 			       np->tclass, READ_ONCE(sk->sk_priority));
244 		rcu_read_unlock();
245 		err = net_xmit_eval(err);
246 	}
247 
248 done:
249 	dst_release(dst);
250 	return err;
251 }
252 
253 static void dccp_v6_reqsk_destructor(struct request_sock *req)
254 {
255 	dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
256 	kfree(inet_rsk(req)->ipv6_opt);
257 	kfree_skb(inet_rsk(req)->pktopts);
258 }
259 
260 static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb,
261 				   enum sk_rst_reason reason)
262 {
263 	const struct ipv6hdr *rxip6h;
264 	struct sk_buff *skb;
265 	struct flowi6 fl6;
266 	struct net *net = dev_net(skb_dst(rxskb)->dev);
267 	struct dccp_v6_pernet *pn;
268 	struct sock *ctl_sk;
269 	struct dst_entry *dst;
270 
271 	if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET)
272 		return;
273 
274 	if (!ipv6_unicast_destination(rxskb))
275 		return;
276 
277 	pn = net_generic(net, dccp_v6_pernet_id);
278 	ctl_sk = pn->v6_ctl_sk;
279 	skb = dccp_ctl_make_reset(ctl_sk, rxskb);
280 	if (skb == NULL)
281 		return;
282 
283 	rxip6h = ipv6_hdr(rxskb);
284 	dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr,
285 							    &rxip6h->daddr);
286 
287 	memset(&fl6, 0, sizeof(fl6));
288 	fl6.daddr = rxip6h->saddr;
289 	fl6.saddr = rxip6h->daddr;
290 
291 	fl6.flowi6_proto = IPPROTO_DCCP;
292 	fl6.flowi6_oif = inet6_iif(rxskb);
293 	fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
294 	fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
295 	security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6));
296 
297 	/* sk = NULL, but it is safe for now. RST socket required. */
298 	dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
299 	if (!IS_ERR(dst)) {
300 		skb_dst_set(skb, dst);
301 		ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0, 0);
302 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
303 		DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
304 		return;
305 	}
306 
307 	kfree_skb(skb);
308 }
309 
310 static struct request_sock_ops dccp6_request_sock_ops = {
311 	.family		= AF_INET6,
312 	.obj_size	= sizeof(struct dccp6_request_sock),
313 	.rtx_syn_ack	= dccp_v6_send_response,
314 	.send_ack	= dccp_reqsk_send_ack,
315 	.destructor	= dccp_v6_reqsk_destructor,
316 	.send_reset	= dccp_v6_ctl_send_reset,
317 	.syn_ack_timeout = dccp_syn_ack_timeout,
318 };
319 
320 static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
321 {
322 	struct request_sock *req;
323 	struct dccp_request_sock *dreq;
324 	struct inet_request_sock *ireq;
325 	struct ipv6_pinfo *np = inet6_sk(sk);
326 	const __be32 service = dccp_hdr_request(skb)->dccph_req_service;
327 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
328 
329 	if (skb->protocol == htons(ETH_P_IP))
330 		return dccp_v4_conn_request(sk, skb);
331 
332 	if (!ipv6_unicast_destination(skb))
333 		return 0;	/* discard, don't send a reset here */
334 
335 	if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
336 		__IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
337 		return 0;
338 	}
339 
340 	if (dccp_bad_service_code(sk, service)) {
341 		dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
342 		goto drop;
343 	}
344 	/*
345 	 * There are no SYN attacks on IPv6, yet...
346 	 */
347 	dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
348 	if (inet_csk_reqsk_queue_is_full(sk))
349 		goto drop;
350 
351 	if (sk_acceptq_is_full(sk))
352 		goto drop;
353 
354 	req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
355 	if (req == NULL)
356 		goto drop;
357 
358 	if (dccp_reqsk_init(req, dccp_sk(sk), skb))
359 		goto drop_and_free;
360 
361 	dreq = dccp_rsk(req);
362 	if (dccp_parse_options(sk, dreq, skb))
363 		goto drop_and_free;
364 
365 	ireq = inet_rsk(req);
366 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
367 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
368 	ireq->ireq_family = AF_INET6;
369 	ireq->ir_mark = inet_request_mark(sk, skb);
370 
371 	if (security_inet_conn_request(sk, skb, req))
372 		goto drop_and_free;
373 
374 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
375 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
376 	    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
377 		refcount_inc(&skb->users);
378 		ireq->pktopts = skb;
379 	}
380 	ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
381 
382 	/* So that link locals have meaning */
383 	if (!ireq->ir_iif &&
384 	    ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
385 		ireq->ir_iif = inet6_iif(skb);
386 
387 	/*
388 	 * Step 3: Process LISTEN state
389 	 *
390 	 *   Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
391 	 *
392 	 * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child().
393 	 */
394 	dreq->dreq_isr	   = dcb->dccpd_seq;
395 	dreq->dreq_gsr     = dreq->dreq_isr;
396 	dreq->dreq_iss	   = dccp_v6_init_sequence(skb);
397 	dreq->dreq_gss     = dreq->dreq_iss;
398 	dreq->dreq_service = service;
399 
400 	if (dccp_v6_send_response(sk, req))
401 		goto drop_and_free;
402 
403 	if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT)))
404 		reqsk_free(req);
405 	else
406 		reqsk_put(req);
407 
408 	return 0;
409 
410 drop_and_free:
411 	reqsk_free(req);
412 drop:
413 	__DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
414 	return -1;
415 }
416 
417 static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
418 					      struct sk_buff *skb,
419 					      struct request_sock *req,
420 					      struct dst_entry *dst,
421 					      struct request_sock *req_unhash,
422 					      bool *own_req)
423 {
424 	struct inet_request_sock *ireq = inet_rsk(req);
425 	struct ipv6_pinfo *newnp;
426 	const struct ipv6_pinfo *np = inet6_sk(sk);
427 	struct ipv6_txoptions *opt;
428 	struct inet_sock *newinet;
429 	struct dccp6_sock *newdp6;
430 	struct sock *newsk;
431 
432 	if (skb->protocol == htons(ETH_P_IP)) {
433 		/*
434 		 *	v6 mapped
435 		 */
436 		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
437 						  req_unhash, own_req);
438 		if (newsk == NULL)
439 			return NULL;
440 
441 		newdp6 = (struct dccp6_sock *)newsk;
442 		newinet = inet_sk(newsk);
443 		newinet->pinet6 = &newdp6->inet6;
444 		newnp = inet6_sk(newsk);
445 
446 		memcpy(newnp, np, sizeof(struct ipv6_pinfo));
447 
448 		newnp->saddr = newsk->sk_v6_rcv_saddr;
449 
450 		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
451 		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
452 		newnp->pktoptions  = NULL;
453 		newnp->opt	   = NULL;
454 		newnp->ipv6_mc_list = NULL;
455 		newnp->ipv6_ac_list = NULL;
456 		newnp->ipv6_fl_list = NULL;
457 		newnp->mcast_oif   = inet_iif(skb);
458 		newnp->mcast_hops  = ip_hdr(skb)->ttl;
459 
460 		/*
461 		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
462 		 * here, dccp_create_openreq_child now does this for us, see the comment in
463 		 * that function for the gory details. -acme
464 		 */
465 
466 		/* It is tricky place. Until this moment IPv4 tcp
467 		   worked with IPv6 icsk.icsk_af_ops.
468 		   Sync it now.
469 		 */
470 		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
471 
472 		return newsk;
473 	}
474 
475 
476 	if (sk_acceptq_is_full(sk))
477 		goto out_overflow;
478 
479 	if (!dst) {
480 		struct flowi6 fl6;
481 
482 		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
483 		if (!dst)
484 			goto out;
485 	}
486 
487 	newsk = dccp_create_openreq_child(sk, req, skb);
488 	if (newsk == NULL)
489 		goto out_nonewsk;
490 
491 	/*
492 	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
493 	 * count here, dccp_create_openreq_child now does this for us, see the
494 	 * comment in that function for the gory details. -acme
495 	 */
496 
497 	ip6_dst_store(newsk, dst, NULL, NULL);
498 	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
499 						      NETIF_F_TSO);
500 	newdp6 = (struct dccp6_sock *)newsk;
501 	newinet = inet_sk(newsk);
502 	newinet->pinet6 = &newdp6->inet6;
503 	newnp = inet6_sk(newsk);
504 
505 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
506 
507 	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
508 	newnp->saddr		= ireq->ir_v6_loc_addr;
509 	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
510 	newsk->sk_bound_dev_if	= ireq->ir_iif;
511 
512 	/* Now IPv6 options...
513 
514 	   First: no IPv4 options.
515 	 */
516 	newinet->inet_opt = NULL;
517 
518 	/* Clone RX bits */
519 	newnp->rxopt.all = np->rxopt.all;
520 
521 	newnp->ipv6_mc_list = NULL;
522 	newnp->ipv6_ac_list = NULL;
523 	newnp->ipv6_fl_list = NULL;
524 	newnp->pktoptions = NULL;
525 	newnp->opt	  = NULL;
526 	newnp->mcast_oif  = inet6_iif(skb);
527 	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
528 
529 	/*
530 	 * Clone native IPv6 options from listening socket (if any)
531 	 *
532 	 * Yes, keeping reference count would be much more clever, but we make
533 	 * one more one thing there: reattach optmem to newsk.
534 	 */
535 	opt = ireq->ipv6_opt;
536 	if (!opt)
537 		opt = rcu_dereference(np->opt);
538 	if (opt) {
539 		opt = ipv6_dup_options(newsk, opt);
540 		RCU_INIT_POINTER(newnp->opt, opt);
541 	}
542 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
543 	if (opt)
544 		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
545 						    opt->opt_flen;
546 
547 	dccp_sync_mss(newsk, dst_mtu(dst));
548 
549 	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
550 	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
551 
552 	if (__inet_inherit_port(sk, newsk) < 0) {
553 		inet_csk_prepare_forced_close(newsk);
554 		dccp_done(newsk);
555 		goto out;
556 	}
557 	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
558 	/* Clone pktoptions received with SYN, if we own the req */
559 	if (*own_req && ireq->pktopts) {
560 		newnp->pktoptions = skb_clone_and_charge_r(ireq->pktopts, newsk);
561 		consume_skb(ireq->pktopts);
562 		ireq->pktopts = NULL;
563 	}
564 
565 	return newsk;
566 
567 out_overflow:
568 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
569 out_nonewsk:
570 	dst_release(dst);
571 out:
572 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
573 	return NULL;
574 }
575 
576 /* The socket must have it's spinlock held when we get
577  * here.
578  *
579  * We have a potential double-lock case here, so even when
580  * doing backlog processing we use the BH locking scheme.
581  * This is because we cannot sleep with the original spinlock
582  * held.
583  */
584 static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
585 {
586 	struct ipv6_pinfo *np = inet6_sk(sk);
587 	struct sk_buff *opt_skb = NULL;
588 
589 	/* Imagine: socket is IPv6. IPv4 packet arrives,
590 	   goes to IPv4 receive handler and backlogged.
591 	   From backlog it always goes here. Kerboom...
592 	   Fortunately, dccp_rcv_established and rcv_established
593 	   handle them correctly, but it is not case with
594 	   dccp_v6_hnd_req and dccp_v6_ctl_send_reset().   --ANK
595 	 */
596 
597 	if (skb->protocol == htons(ETH_P_IP))
598 		return dccp_v4_do_rcv(sk, skb);
599 
600 	if (sk_filter(sk, skb))
601 		goto discard;
602 
603 	/*
604 	 * socket locking is here for SMP purposes as backlog rcv is currently
605 	 * called with bh processing disabled.
606 	 */
607 
608 	/* Do Stevens' IPV6_PKTOPTIONS.
609 
610 	   Yes, guys, it is the only place in our code, where we
611 	   may make it not affecting IPv4.
612 	   The rest of code is protocol independent,
613 	   and I do not like idea to uglify IPv4.
614 
615 	   Actually, all the idea behind IPV6_PKTOPTIONS
616 	   looks not very well thought. For now we latch
617 	   options, received in the last packet, enqueued
618 	   by tcp. Feel free to propose better solution.
619 					       --ANK (980728)
620 	 */
621 	if (np->rxopt.all)
622 		opt_skb = skb_clone_and_charge_r(skb, sk);
623 
624 	if (sk->sk_state == DCCP_OPEN) { /* Fast path */
625 		if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len))
626 			goto reset;
627 		if (opt_skb)
628 			goto ipv6_pktoptions;
629 		return 0;
630 	}
631 
632 	/*
633 	 *  Step 3: Process LISTEN state
634 	 *     If S.state == LISTEN,
635 	 *	 If P.type == Request or P contains a valid Init Cookie option,
636 	 *	      (* Must scan the packet's options to check for Init
637 	 *		 Cookies.  Only Init Cookies are processed here,
638 	 *		 however; other options are processed in Step 8.  This
639 	 *		 scan need only be performed if the endpoint uses Init
640 	 *		 Cookies *)
641 	 *	      (* Generate a new socket and switch to that socket *)
642 	 *	      Set S := new socket for this port pair
643 	 *	      S.state = RESPOND
644 	 *	      Choose S.ISS (initial seqno) or set from Init Cookies
645 	 *	      Initialize S.GAR := S.ISS
646 	 *	      Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
647 	 *	      Continue with S.state == RESPOND
648 	 *	      (* A Response packet will be generated in Step 11 *)
649 	 *	 Otherwise,
650 	 *	      Generate Reset(No Connection) unless P.type == Reset
651 	 *	      Drop packet and return
652 	 *
653 	 * NOTE: the check for the packet types is done in
654 	 *	 dccp_rcv_state_process
655 	 */
656 
657 	if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len))
658 		goto reset;
659 	if (opt_skb)
660 		goto ipv6_pktoptions;
661 	return 0;
662 
663 reset:
664 	dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
665 discard:
666 	if (opt_skb != NULL)
667 		__kfree_skb(opt_skb);
668 	kfree_skb(skb);
669 	return 0;
670 
671 /* Handling IPV6_PKTOPTIONS skb the similar
672  * way it's done for net/ipv6/tcp_ipv6.c
673  */
674 ipv6_pktoptions:
675 	if (!((1 << sk->sk_state) & (DCCPF_CLOSED | DCCPF_LISTEN))) {
676 		if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
677 			WRITE_ONCE(np->mcast_oif, inet6_iif(opt_skb));
678 		if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
679 			WRITE_ONCE(np->mcast_hops, ipv6_hdr(opt_skb)->hop_limit);
680 		if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
681 			np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
682 		if (inet6_test_bit(REPFLOW, sk))
683 			np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
684 		if (ipv6_opt_accepted(sk, opt_skb,
685 				      &DCCP_SKB_CB(opt_skb)->header.h6)) {
686 			memmove(IP6CB(opt_skb),
687 				&DCCP_SKB_CB(opt_skb)->header.h6,
688 				sizeof(struct inet6_skb_parm));
689 			opt_skb = xchg(&np->pktoptions, opt_skb);
690 		} else {
691 			__kfree_skb(opt_skb);
692 			opt_skb = xchg(&np->pktoptions, NULL);
693 		}
694 	}
695 
696 	kfree_skb(opt_skb);
697 	return 0;
698 }
699 
700 static int dccp_v6_rcv(struct sk_buff *skb)
701 {
702 	const struct dccp_hdr *dh;
703 	bool refcounted;
704 	struct sock *sk;
705 	int min_cov;
706 
707 	/* Step 1: Check header basics */
708 
709 	if (dccp_invalid_packet(skb))
710 		goto discard_it;
711 
712 	/* Step 1: If header checksum is incorrect, drop packet and return. */
713 	if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr,
714 				     &ipv6_hdr(skb)->daddr)) {
715 		DCCP_WARN("dropped packet with invalid checksum\n");
716 		goto discard_it;
717 	}
718 
719 	dh = dccp_hdr(skb);
720 
721 	DCCP_SKB_CB(skb)->dccpd_seq  = dccp_hdr_seq(dh);
722 	DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type;
723 
724 	if (dccp_packet_without_ack(skb))
725 		DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ;
726 	else
727 		DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb);
728 
729 lookup:
730 	sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh),
731 			        dh->dccph_sport, dh->dccph_dport,
732 				inet6_iif(skb), 0, &refcounted);
733 	if (!sk) {
734 		dccp_pr_debug("failed to look up flow ID in table and "
735 			      "get corresponding socket\n");
736 		goto no_dccp_socket;
737 	}
738 
739 	/*
740 	 * Step 2:
741 	 *	... or S.state == TIMEWAIT,
742 	 *		Generate Reset(No Connection) unless P.type == Reset
743 	 *		Drop packet and return
744 	 */
745 	if (sk->sk_state == DCCP_TIME_WAIT) {
746 		dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
747 		inet_twsk_put(inet_twsk(sk));
748 		goto no_dccp_socket;
749 	}
750 
751 	if (sk->sk_state == DCCP_NEW_SYN_RECV) {
752 		struct request_sock *req = inet_reqsk(sk);
753 		struct sock *nsk;
754 
755 		sk = req->rsk_listener;
756 		if (unlikely(sk->sk_state != DCCP_LISTEN)) {
757 			inet_csk_reqsk_queue_drop_and_put(sk, req);
758 			goto lookup;
759 		}
760 		sock_hold(sk);
761 		refcounted = true;
762 		nsk = dccp_check_req(sk, skb, req);
763 		if (!nsk) {
764 			reqsk_put(req);
765 			goto discard_and_relse;
766 		}
767 		if (nsk == sk) {
768 			reqsk_put(req);
769 		} else if (dccp_child_process(sk, nsk, skb)) {
770 			dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
771 			goto discard_and_relse;
772 		} else {
773 			sock_put(sk);
774 			return 0;
775 		}
776 	}
777 	/*
778 	 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
779 	 *	o if MinCsCov = 0, only packets with CsCov = 0 are accepted
780 	 *	o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
781 	 */
782 	min_cov = dccp_sk(sk)->dccps_pcrlen;
783 	if (dh->dccph_cscov  &&  (min_cov == 0 || dh->dccph_cscov < min_cov))  {
784 		dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
785 			      dh->dccph_cscov, min_cov);
786 		/* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
787 		goto discard_and_relse;
788 	}
789 
790 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
791 		goto discard_and_relse;
792 	nf_reset_ct(skb);
793 
794 	return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4,
795 				refcounted) ? -1 : 0;
796 
797 no_dccp_socket:
798 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
799 		goto discard_it;
800 	/*
801 	 * Step 2:
802 	 *	If no socket ...
803 	 *		Generate Reset(No Connection) unless P.type == Reset
804 	 *		Drop packet and return
805 	 */
806 	if (dh->dccph_type != DCCP_PKT_RESET) {
807 		DCCP_SKB_CB(skb)->dccpd_reset_code =
808 					DCCP_RESET_CODE_NO_CONNECTION;
809 		dccp_v6_ctl_send_reset(sk, skb, SK_RST_REASON_NOT_SPECIFIED);
810 	}
811 
812 discard_it:
813 	kfree_skb(skb);
814 	return 0;
815 
816 discard_and_relse:
817 	if (refcounted)
818 		sock_put(sk);
819 	goto discard_it;
820 }
821 
822 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
823 			   int addr_len)
824 {
825 	struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr;
826 	struct inet_connection_sock *icsk = inet_csk(sk);
827 	struct inet_sock *inet = inet_sk(sk);
828 	struct ipv6_pinfo *np = inet6_sk(sk);
829 	struct dccp_sock *dp = dccp_sk(sk);
830 	struct in6_addr *saddr = NULL, *final_p, final;
831 	struct ipv6_txoptions *opt;
832 	struct flowi6 fl6;
833 	struct dst_entry *dst;
834 	int addr_type;
835 	int err;
836 
837 	dp->dccps_role = DCCP_ROLE_CLIENT;
838 
839 	if (addr_len < SIN6_LEN_RFC2133)
840 		return -EINVAL;
841 
842 	if (usin->sin6_family != AF_INET6)
843 		return -EAFNOSUPPORT;
844 
845 	memset(&fl6, 0, sizeof(fl6));
846 
847 	if (inet6_test_bit(SNDFLOW, sk)) {
848 		fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
849 		IP6_ECN_flow_init(fl6.flowlabel);
850 		if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
851 			struct ip6_flowlabel *flowlabel;
852 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
853 			if (IS_ERR(flowlabel))
854 				return -EINVAL;
855 			fl6_sock_release(flowlabel);
856 		}
857 	}
858 	/*
859 	 * connect() to INADDR_ANY means loopback (BSD'ism).
860 	 */
861 	if (ipv6_addr_any(&usin->sin6_addr))
862 		usin->sin6_addr.s6_addr[15] = 1;
863 
864 	addr_type = ipv6_addr_type(&usin->sin6_addr);
865 
866 	if (addr_type & IPV6_ADDR_MULTICAST)
867 		return -ENETUNREACH;
868 
869 	if (addr_type & IPV6_ADDR_LINKLOCAL) {
870 		if (addr_len >= sizeof(struct sockaddr_in6) &&
871 		    usin->sin6_scope_id) {
872 			/* If interface is set while binding, indices
873 			 * must coincide.
874 			 */
875 			if (sk->sk_bound_dev_if &&
876 			    sk->sk_bound_dev_if != usin->sin6_scope_id)
877 				return -EINVAL;
878 
879 			sk->sk_bound_dev_if = usin->sin6_scope_id;
880 		}
881 
882 		/* Connect to link-local address requires an interface */
883 		if (!sk->sk_bound_dev_if)
884 			return -EINVAL;
885 	}
886 
887 	sk->sk_v6_daddr = usin->sin6_addr;
888 	np->flow_label = fl6.flowlabel;
889 
890 	/*
891 	 * DCCP over IPv4
892 	 */
893 	if (addr_type == IPV6_ADDR_MAPPED) {
894 		u32 exthdrlen = icsk->icsk_ext_hdr_len;
895 		struct sockaddr_in sin;
896 
897 		net_dbg_ratelimited("connect: ipv4 mapped\n");
898 
899 		if (ipv6_only_sock(sk))
900 			return -ENETUNREACH;
901 
902 		sin.sin_family = AF_INET;
903 		sin.sin_port = usin->sin6_port;
904 		sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
905 
906 		icsk->icsk_af_ops = &dccp_ipv6_mapped;
907 		sk->sk_backlog_rcv = dccp_v4_do_rcv;
908 
909 		err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
910 		if (err) {
911 			icsk->icsk_ext_hdr_len = exthdrlen;
912 			icsk->icsk_af_ops = &dccp_ipv6_af_ops;
913 			sk->sk_backlog_rcv = dccp_v6_do_rcv;
914 			goto failure;
915 		}
916 		np->saddr = sk->sk_v6_rcv_saddr;
917 		return err;
918 	}
919 
920 	if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
921 		saddr = &sk->sk_v6_rcv_saddr;
922 
923 	fl6.flowi6_proto = IPPROTO_DCCP;
924 	fl6.daddr = sk->sk_v6_daddr;
925 	fl6.saddr = saddr ? *saddr : np->saddr;
926 	fl6.flowi6_oif = sk->sk_bound_dev_if;
927 	fl6.fl6_dport = usin->sin6_port;
928 	fl6.fl6_sport = inet->inet_sport;
929 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
930 
931 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
932 	final_p = fl6_update_dst(&fl6, opt, &final);
933 
934 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
935 	if (IS_ERR(dst)) {
936 		err = PTR_ERR(dst);
937 		goto failure;
938 	}
939 
940 	if (saddr == NULL) {
941 		saddr = &fl6.saddr;
942 
943 		err = inet_bhash2_update_saddr(sk, saddr, AF_INET6);
944 		if (err)
945 			goto failure;
946 	}
947 
948 	/* set the source address */
949 	np->saddr = *saddr;
950 	inet->inet_rcv_saddr = LOOPBACK4_IPV6;
951 
952 	ip6_dst_store(sk, dst, NULL, NULL);
953 
954 	icsk->icsk_ext_hdr_len = 0;
955 	if (opt)
956 		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
957 
958 	inet->inet_dport = usin->sin6_port;
959 
960 	dccp_set_state(sk, DCCP_REQUESTING);
961 	err = inet6_hash_connect(&dccp_death_row, sk);
962 	if (err)
963 		goto late_failure;
964 
965 	dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32,
966 						      sk->sk_v6_daddr.s6_addr32,
967 						      inet->inet_sport,
968 						      inet->inet_dport);
969 	err = dccp_connect(sk);
970 	if (err)
971 		goto late_failure;
972 
973 	return 0;
974 
975 late_failure:
976 	dccp_set_state(sk, DCCP_CLOSED);
977 	inet_bhash2_reset_saddr(sk);
978 	__sk_dst_reset(sk);
979 failure:
980 	inet->inet_dport = 0;
981 	sk->sk_route_caps = 0;
982 	return err;
983 }
984 
985 static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
986 	.queue_xmit	   = inet6_csk_xmit,
987 	.send_check	   = dccp_v6_send_check,
988 	.rebuild_header	   = inet6_sk_rebuild_header,
989 	.conn_request	   = dccp_v6_conn_request,
990 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
991 	.net_header_len	   = sizeof(struct ipv6hdr),
992 	.setsockopt	   = ipv6_setsockopt,
993 	.getsockopt	   = ipv6_getsockopt,
994 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
995 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
996 };
997 
998 /*
999  *	DCCP over IPv4 via INET6 API
1000  */
1001 static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
1002 	.queue_xmit	   = ip_queue_xmit,
1003 	.send_check	   = dccp_v4_send_check,
1004 	.rebuild_header	   = inet_sk_rebuild_header,
1005 	.conn_request	   = dccp_v6_conn_request,
1006 	.syn_recv_sock	   = dccp_v6_request_recv_sock,
1007 	.net_header_len	   = sizeof(struct iphdr),
1008 	.setsockopt	   = ipv6_setsockopt,
1009 	.getsockopt	   = ipv6_getsockopt,
1010 	.addr2sockaddr	   = inet6_csk_addr2sockaddr,
1011 	.sockaddr_len	   = sizeof(struct sockaddr_in6),
1012 };
1013 
1014 static void dccp_v6_sk_destruct(struct sock *sk)
1015 {
1016 	dccp_destruct_common(sk);
1017 	inet6_sock_destruct(sk);
1018 }
1019 
1020 /* NOTE: A lot of things set to zero explicitly by call to
1021  *       sk_alloc() so need not be done here.
1022  */
1023 static int dccp_v6_init_sock(struct sock *sk)
1024 {
1025 	static __u8 dccp_v6_ctl_sock_initialized;
1026 	int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized);
1027 
1028 	if (err == 0) {
1029 		if (unlikely(!dccp_v6_ctl_sock_initialized))
1030 			dccp_v6_ctl_sock_initialized = 1;
1031 		inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
1032 		sk->sk_destruct = dccp_v6_sk_destruct;
1033 	}
1034 
1035 	return err;
1036 }
1037 
1038 static struct timewait_sock_ops dccp6_timewait_sock_ops = {
1039 	.twsk_obj_size	= sizeof(struct dccp6_timewait_sock),
1040 };
1041 
1042 static struct proto dccp_v6_prot = {
1043 	.name		   = "DCCPv6",
1044 	.owner		   = THIS_MODULE,
1045 	.close		   = dccp_close,
1046 	.connect	   = dccp_v6_connect,
1047 	.disconnect	   = dccp_disconnect,
1048 	.ioctl		   = dccp_ioctl,
1049 	.init		   = dccp_v6_init_sock,
1050 	.setsockopt	   = dccp_setsockopt,
1051 	.getsockopt	   = dccp_getsockopt,
1052 	.sendmsg	   = dccp_sendmsg,
1053 	.recvmsg	   = dccp_recvmsg,
1054 	.backlog_rcv	   = dccp_v6_do_rcv,
1055 	.hash		   = inet6_hash,
1056 	.unhash		   = inet_unhash,
1057 	.accept		   = inet_csk_accept,
1058 	.get_port	   = inet_csk_get_port,
1059 	.shutdown	   = dccp_shutdown,
1060 	.destroy	   = dccp_destroy_sock,
1061 	.orphan_count	   = &dccp_orphan_count,
1062 	.max_header	   = MAX_DCCP_HEADER,
1063 	.obj_size	   = sizeof(struct dccp6_sock),
1064 	.ipv6_pinfo_offset = offsetof(struct dccp6_sock, inet6),
1065 	.slab_flags	   = SLAB_TYPESAFE_BY_RCU,
1066 	.rsk_prot	   = &dccp6_request_sock_ops,
1067 	.twsk_prot	   = &dccp6_timewait_sock_ops,
1068 	.h.hashinfo	   = &dccp_hashinfo,
1069 };
1070 
1071 static const struct inet6_protocol dccp_v6_protocol = {
1072 	.handler	= dccp_v6_rcv,
1073 	.err_handler	= dccp_v6_err,
1074 	.flags		= INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1075 };
1076 
1077 static const struct proto_ops inet6_dccp_ops = {
1078 	.family		   = PF_INET6,
1079 	.owner		   = THIS_MODULE,
1080 	.release	   = inet6_release,
1081 	.bind		   = inet6_bind,
1082 	.connect	   = inet_stream_connect,
1083 	.socketpair	   = sock_no_socketpair,
1084 	.accept		   = inet_accept,
1085 	.getname	   = inet6_getname,
1086 	.poll		   = dccp_poll,
1087 	.ioctl		   = inet6_ioctl,
1088 	.gettstamp	   = sock_gettstamp,
1089 	.listen		   = inet_dccp_listen,
1090 	.shutdown	   = inet_shutdown,
1091 	.setsockopt	   = sock_common_setsockopt,
1092 	.getsockopt	   = sock_common_getsockopt,
1093 	.sendmsg	   = inet_sendmsg,
1094 	.recvmsg	   = sock_common_recvmsg,
1095 	.mmap		   = sock_no_mmap,
1096 #ifdef CONFIG_COMPAT
1097 	.compat_ioctl	   = inet6_compat_ioctl,
1098 #endif
1099 };
1100 
1101 static struct inet_protosw dccp_v6_protosw = {
1102 	.type		= SOCK_DCCP,
1103 	.protocol	= IPPROTO_DCCP,
1104 	.prot		= &dccp_v6_prot,
1105 	.ops		= &inet6_dccp_ops,
1106 	.flags		= INET_PROTOSW_ICSK,
1107 };
1108 
1109 static int __net_init dccp_v6_init_net(struct net *net)
1110 {
1111 	struct dccp_v6_pernet *pn = net_generic(net, dccp_v6_pernet_id);
1112 
1113 	if (dccp_hashinfo.bhash == NULL)
1114 		return -ESOCKTNOSUPPORT;
1115 
1116 	return inet_ctl_sock_create(&pn->v6_ctl_sk, PF_INET6,
1117 				    SOCK_DCCP, IPPROTO_DCCP, net);
1118 }
1119 
1120 static void __net_exit dccp_v6_exit_net(struct net *net)
1121 {
1122 	struct dccp_v6_pernet *pn = net_generic(net, dccp_v6_pernet_id);
1123 
1124 	inet_ctl_sock_destroy(pn->v6_ctl_sk);
1125 }
1126 
1127 static struct pernet_operations dccp_v6_ops = {
1128 	.init   = dccp_v6_init_net,
1129 	.exit   = dccp_v6_exit_net,
1130 	.id	= &dccp_v6_pernet_id,
1131 	.size   = sizeof(struct dccp_v6_pernet),
1132 };
1133 
1134 static int __init dccp_v6_init(void)
1135 {
1136 	int err = proto_register(&dccp_v6_prot, 1);
1137 
1138 	if (err)
1139 		goto out;
1140 
1141 	inet6_register_protosw(&dccp_v6_protosw);
1142 
1143 	err = register_pernet_subsys(&dccp_v6_ops);
1144 	if (err)
1145 		goto out_destroy_ctl_sock;
1146 
1147 	err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1148 	if (err)
1149 		goto out_unregister_proto;
1150 
1151 out:
1152 	return err;
1153 out_unregister_proto:
1154 	unregister_pernet_subsys(&dccp_v6_ops);
1155 out_destroy_ctl_sock:
1156 	inet6_unregister_protosw(&dccp_v6_protosw);
1157 	proto_unregister(&dccp_v6_prot);
1158 	goto out;
1159 }
1160 
1161 static void __exit dccp_v6_exit(void)
1162 {
1163 	inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP);
1164 	unregister_pernet_subsys(&dccp_v6_ops);
1165 	inet6_unregister_protosw(&dccp_v6_protosw);
1166 	proto_unregister(&dccp_v6_prot);
1167 }
1168 
1169 module_init(dccp_v6_init);
1170 module_exit(dccp_v6_exit);
1171 
1172 /*
1173  * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1174  * values directly, Also cover the case where the protocol is not specified,
1175  * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1176  */
1177 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6);
1178 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6);
1179 MODULE_LICENSE("GPL");
1180 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1181 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
1182