xref: /linux/net/ipv6/raw.c (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	RAW sockets for IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Adapted from linux/net/ipv4/raw.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI,H.@USAGI	:	raw checksum (RFC2292(bis) compliance)
14  *	Kazunori MIYAZAWA @USAGI:	change process style to use ip6_append_data
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/types.h>
19 #include <linux/socket.h>
20 #include <linux/slab.h>
21 #include <linux/sockios.h>
22 #include <linux/net.h>
23 #include <linux/in6.h>
24 #include <linux/netdevice.h>
25 #include <linux/if_arp.h>
26 #include <linux/icmpv6.h>
27 #include <linux/netfilter.h>
28 #include <linux/netfilter_ipv6.h>
29 #include <linux/skbuff.h>
30 #include <linux/compat.h>
31 #include <linux/uaccess.h>
32 #include <asm/ioctls.h>
33 
34 #include <net/net_namespace.h>
35 #include <net/ip.h>
36 #include <net/sock.h>
37 #include <net/snmp.h>
38 
39 #include <net/ipv6.h>
40 #include <net/ndisc.h>
41 #include <net/protocol.h>
42 #include <net/ip6_route.h>
43 #include <net/ip6_checksum.h>
44 #include <net/addrconf.h>
45 #include <net/transp_v6.h>
46 #include <net/udp.h>
47 #include <net/inet_common.h>
48 #include <net/tcp_states.h>
49 #if IS_ENABLED(CONFIG_IPV6_MIP6)
50 #include <net/mip6.h>
51 #endif
52 #include <linux/mroute6.h>
53 
54 #include <net/raw.h>
55 #include <net/rawv6.h>
56 #include <net/xfrm.h>
57 
58 #include <linux/proc_fs.h>
59 #include <linux/seq_file.h>
60 #include <linux/export.h>
61 
62 #define	ICMPV6_HDRLEN	4	/* ICMPv6 header, RFC 4443 Section 2.1 */
63 
64 struct raw_hashinfo raw_v6_hashinfo;
65 EXPORT_SYMBOL_GPL(raw_v6_hashinfo);
66 
67 bool raw_v6_match(struct net *net, const struct sock *sk, unsigned short num,
68 		  const struct in6_addr *loc_addr,
69 		  const struct in6_addr *rmt_addr, int dif, int sdif)
70 {
71 	if (inet_sk(sk)->inet_num != num ||
72 	    !net_eq(sock_net(sk), net) ||
73 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
74 	     !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
75 	    !raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if,
76 				 dif, sdif))
77 		return false;
78 
79 	if (ipv6_addr_any(&sk->sk_v6_rcv_saddr) ||
80 	    ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr) ||
81 	    (ipv6_addr_is_multicast(loc_addr) &&
82 	     inet6_mc_check(sk, loc_addr, rmt_addr)))
83 		return true;
84 
85 	return false;
86 }
87 EXPORT_SYMBOL_GPL(raw_v6_match);
88 
89 /*
90  *	0 - deliver
91  *	1 - block
92  */
93 static int icmpv6_filter(const struct sock *sk, struct sk_buff *skb)
94 {
95 	const struct icmp6hdr *hdr;
96 	const __u32 *data;
97 	unsigned int type;
98 
99 	/* We require only the four bytes of the ICMPv6 header, not any
100 	 * additional bytes of message body in "struct icmp6hdr".
101 	 */
102 	if (!pskb_may_pull(skb, ICMPV6_HDRLEN))
103 		return 1;
104 
105 	hdr = (struct icmp6hdr *)skb->data;
106 	type = hdr->icmp6_type;
107 
108 	data = &raw6_sk(sk)->filter.data[0];
109 
110 	return (data[type >> 5] & (1U << (type & 31))) != 0;
111 }
112 
113 #if IS_ENABLED(CONFIG_IPV6_MIP6)
114 typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
115 
116 static mh_filter_t __rcu *mh_filter __read_mostly;
117 
118 int rawv6_mh_filter_register(mh_filter_t filter)
119 {
120 	rcu_assign_pointer(mh_filter, filter);
121 	return 0;
122 }
123 EXPORT_SYMBOL(rawv6_mh_filter_register);
124 
125 int rawv6_mh_filter_unregister(mh_filter_t filter)
126 {
127 	RCU_INIT_POINTER(mh_filter, NULL);
128 	synchronize_rcu();
129 	return 0;
130 }
131 EXPORT_SYMBOL(rawv6_mh_filter_unregister);
132 
133 #endif
134 
135 /*
136  *	demultiplex raw sockets.
137  *	(should consider queueing the skb in the sock receive_queue
138  *	without calling rawv6.c)
139  *
140  *	Caller owns SKB so we must make clones.
141  */
142 static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
143 {
144 	struct net *net = dev_net(skb->dev);
145 	const struct ipv6hdr *ip6h;
146 	struct hlist_head *hlist;
147 	bool delivered = false;
148 	struct sock *sk;
149 	__u8 hash;
150 
151 	ip6h = ipv6_hdr(skb);
152 
153 	hash = raw_hashfunc(net, nexthdr);
154 	hlist = &raw_v6_hashinfo.ht[hash];
155 	rcu_read_lock();
156 	sk_for_each_rcu(sk, hlist) {
157 		int filtered;
158 
159 		if (!raw_v6_match(net, sk, nexthdr, &ip6h->daddr, &ip6h->saddr,
160 				  inet6_iif(skb), inet6_sdif(skb)))
161 			continue;
162 
163 		if (atomic_read(&sk->sk_rmem_alloc) >=
164 		    READ_ONCE(sk->sk_rcvbuf)) {
165 			sk_drops_inc(sk);
166 			continue;
167 		}
168 
169 		delivered = true;
170 		switch (nexthdr) {
171 		case IPPROTO_ICMPV6:
172 			filtered = icmpv6_filter(sk, skb);
173 			ip6h = ipv6_hdr(skb);
174 			break;
175 
176 #if IS_ENABLED(CONFIG_IPV6_MIP6)
177 		case IPPROTO_MH:
178 		{
179 			/* XXX: To validate MH only once for each packet,
180 			 * this is placed here. It should be after checking
181 			 * xfrm policy, however it doesn't. The checking xfrm
182 			 * policy is placed in rawv6_rcv() because it is
183 			 * required for each socket.
184 			 */
185 			mh_filter_t *filter;
186 
187 			filter = rcu_dereference(mh_filter);
188 			filtered = filter ? (*filter)(sk, skb) : 0;
189 			break;
190 		}
191 #endif
192 		default:
193 			filtered = 0;
194 			break;
195 		}
196 
197 		if (filtered < 0)
198 			break;
199 		if (filtered == 0) {
200 			struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
201 
202 			/* Not releasing hash table! */
203 			if (clone)
204 				rawv6_rcv(sk, clone);
205 		}
206 	}
207 	rcu_read_unlock();
208 	return delivered;
209 }
210 
211 bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
212 {
213 	return ipv6_raw_deliver(skb, nexthdr);
214 }
215 
216 /* This cleans up af_inet6 a bit. -DaveM */
217 static int rawv6_bind(struct sock *sk, struct sockaddr_unsized *uaddr,
218 		      int addr_len)
219 {
220 	struct inet_sock *inet = inet_sk(sk);
221 	struct ipv6_pinfo *np = inet6_sk(sk);
222 	struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
223 	__be32 v4addr = 0;
224 	int addr_type;
225 	int err;
226 
227 	if (addr_len < SIN6_LEN_RFC2133)
228 		return -EINVAL;
229 
230 	if (addr->sin6_family != AF_INET6)
231 		return -EINVAL;
232 
233 	addr_type = ipv6_addr_type(&addr->sin6_addr);
234 
235 	/* Raw sockets are IPv6 only */
236 	if (addr_type == IPV6_ADDR_MAPPED)
237 		return -EADDRNOTAVAIL;
238 
239 	lock_sock(sk);
240 
241 	err = -EINVAL;
242 	if (sk->sk_state != TCP_CLOSE)
243 		goto out;
244 
245 	rcu_read_lock();
246 	/* Check if the address belongs to the host. */
247 	if (addr_type != IPV6_ADDR_ANY) {
248 		struct net_device *dev = NULL;
249 
250 		if (__ipv6_addr_needs_scope_id(addr_type)) {
251 			if (addr_len >= sizeof(struct sockaddr_in6) &&
252 			    addr->sin6_scope_id) {
253 				/* Override any existing binding, if another
254 				 * one is supplied by user.
255 				 */
256 				sk->sk_bound_dev_if = addr->sin6_scope_id;
257 			}
258 
259 			/* Binding to link-local address requires an interface */
260 			if (!sk->sk_bound_dev_if)
261 				goto out_unlock;
262 		}
263 
264 		if (sk->sk_bound_dev_if) {
265 			err = -ENODEV;
266 			dev = dev_get_by_index_rcu(sock_net(sk),
267 						   sk->sk_bound_dev_if);
268 			if (!dev)
269 				goto out_unlock;
270 		}
271 
272 		/* ipv4 addr of the socket is invalid.  Only the
273 		 * unspecified and mapped address have a v4 equivalent.
274 		 */
275 		v4addr = LOOPBACK4_IPV6;
276 		if (!(addr_type & IPV6_ADDR_MULTICAST) &&
277 		    !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
278 			err = -EADDRNOTAVAIL;
279 			if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
280 					   dev, 0)) {
281 				goto out_unlock;
282 			}
283 		}
284 	}
285 
286 	inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
287 	sk->sk_v6_rcv_saddr = addr->sin6_addr;
288 	if (!(addr_type & IPV6_ADDR_MULTICAST))
289 		np->saddr = addr->sin6_addr;
290 	err = 0;
291 out_unlock:
292 	rcu_read_unlock();
293 out:
294 	release_sock(sk);
295 	return err;
296 }
297 
298 static void rawv6_err(struct sock *sk, struct sk_buff *skb,
299 		      u8 type, u8 code, int offset, __be32 info)
300 {
301 	bool recverr = inet6_test_bit(RECVERR6, sk);
302 	struct ipv6_pinfo *np = inet6_sk(sk);
303 	int err;
304 	int harderr;
305 
306 	/* Report error on raw socket, if:
307 	   1. User requested recverr.
308 	   2. Socket is connected (otherwise the error indication
309 	      is useless without recverr and error is hard.
310 	 */
311 	if (!recverr && sk->sk_state != TCP_ESTABLISHED)
312 		return;
313 
314 	harderr = icmpv6_err_convert(type, code, &err);
315 	if (type == ICMPV6_PKT_TOOBIG) {
316 		ip6_sk_update_pmtu(skb, sk, info);
317 		harderr = (READ_ONCE(np->pmtudisc) == IPV6_PMTUDISC_DO);
318 	}
319 	if (type == NDISC_REDIRECT) {
320 		ip6_sk_redirect(skb, sk);
321 		return;
322 	}
323 	if (recverr) {
324 		u8 *payload = skb->data;
325 		if (!inet_test_bit(HDRINCL, sk))
326 			payload += offset;
327 		ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
328 	}
329 
330 	if (recverr || harderr) {
331 		sk->sk_err = err;
332 		sk_error_report(sk);
333 	}
334 }
335 
336 void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
337 		u8 type, u8 code, int inner_offset, __be32 info)
338 {
339 	struct net *net = dev_net(skb->dev);
340 	struct hlist_head *hlist;
341 	struct sock *sk;
342 	int hash;
343 
344 	hash = raw_hashfunc(net, nexthdr);
345 	hlist = &raw_v6_hashinfo.ht[hash];
346 	rcu_read_lock();
347 	sk_for_each_rcu(sk, hlist) {
348 		/* Note: ipv6_hdr(skb) != skb->data */
349 		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
350 
351 		if (!raw_v6_match(net, sk, nexthdr, &ip6h->saddr, &ip6h->daddr,
352 				  inet6_iif(skb), inet6_iif(skb)))
353 			continue;
354 		rawv6_err(sk, skb, type, code, inner_offset, info);
355 	}
356 	rcu_read_unlock();
357 }
358 
359 static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
360 {
361 	enum skb_drop_reason reason;
362 
363 	if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
364 	    skb_checksum_complete(skb)) {
365 		sk_drops_inc(sk);
366 		sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
367 		return NET_RX_DROP;
368 	}
369 
370 	/* Charge it to the socket. */
371 	skb_dst_drop(skb);
372 	reason = sock_queue_rcv_skb_reason(sk, skb);
373 	if (reason) {
374 		sk_skb_reason_drop(sk, skb, reason);
375 		return NET_RX_DROP;
376 	}
377 
378 	return 0;
379 }
380 
381 /*
382  *	This is next to useless...
383  *	if we demultiplex in network layer we don't need the extra call
384  *	just to queue the skb...
385  *	maybe we could have the network decide upon a hint if it
386  *	should call raw_rcv for demultiplexing
387  */
388 int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
389 {
390 	struct inet_sock *inet = inet_sk(sk);
391 	struct raw6_sock *rp = raw6_sk(sk);
392 
393 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
394 		sk_drops_inc(sk);
395 		sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY);
396 		return NET_RX_DROP;
397 	}
398 	nf_reset_ct(skb);
399 
400 	if (!rp->checksum)
401 		skb->ip_summed = CHECKSUM_UNNECESSARY;
402 
403 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
404 		skb_postpull_rcsum(skb, skb_network_header(skb),
405 				   skb_network_header_len(skb));
406 		if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
407 				     &ipv6_hdr(skb)->daddr,
408 				     skb->len, inet->inet_num, skb->csum))
409 			skb->ip_summed = CHECKSUM_UNNECESSARY;
410 	}
411 	if (!skb_csum_unnecessary(skb))
412 		skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
413 							 &ipv6_hdr(skb)->daddr,
414 							 skb->len,
415 							 inet->inet_num, 0));
416 
417 	if (inet_test_bit(HDRINCL, sk)) {
418 		if (skb_checksum_complete(skb)) {
419 			sk_drops_inc(sk);
420 			sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM);
421 			return NET_RX_DROP;
422 		}
423 	}
424 
425 	rawv6_rcv_skb(sk, skb);
426 	return 0;
427 }
428 
429 
430 /*
431  *	This should be easy, if there is something there
432  *	we return it, otherwise we block.
433  */
434 
435 static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
436 			 int flags)
437 {
438 	struct ipv6_pinfo *np = inet6_sk(sk);
439 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
440 	struct sk_buff *skb;
441 	size_t copied;
442 	int err;
443 
444 	if (flags & MSG_OOB)
445 		return -EOPNOTSUPP;
446 
447 	if (flags & MSG_ERRQUEUE)
448 		return ipv6_recv_error(sk, msg, len);
449 
450 	if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu))
451 		return ipv6_recv_rxpmtu(sk, msg, len);
452 
453 	skb = skb_recv_datagram(sk, flags, &err);
454 	if (!skb)
455 		goto out;
456 
457 	copied = skb->len;
458 	if (copied > len) {
459 		copied = len;
460 		msg->msg_flags |= MSG_TRUNC;
461 	}
462 
463 	if (skb_csum_unnecessary(skb)) {
464 		err = skb_copy_datagram_msg(skb, 0, msg, copied);
465 	} else if (msg->msg_flags&MSG_TRUNC) {
466 		if (__skb_checksum_complete(skb))
467 			goto csum_copy_err;
468 		err = skb_copy_datagram_msg(skb, 0, msg, copied);
469 	} else {
470 		err = skb_copy_and_csum_datagram_msg(skb, 0, msg);
471 		if (err == -EINVAL)
472 			goto csum_copy_err;
473 	}
474 	if (err)
475 		goto out_free;
476 
477 	/* Copy the address. */
478 	if (sin6) {
479 		sin6->sin6_family = AF_INET6;
480 		sin6->sin6_port = 0;
481 		sin6->sin6_addr = ipv6_hdr(skb)->saddr;
482 		sin6->sin6_flowinfo = 0;
483 		sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
484 							  inet6_iif(skb));
485 		msg->msg_namelen = sizeof(*sin6);
486 	}
487 
488 	sock_recv_cmsgs(msg, sk, skb);
489 
490 	if (np->rxopt.all)
491 		ip6_datagram_recv_ctl(sk, msg, skb);
492 
493 	err = copied;
494 	if (flags & MSG_TRUNC)
495 		err = skb->len;
496 
497 out_free:
498 	skb_free_datagram(sk, skb);
499 out:
500 	return err;
501 
502 csum_copy_err:
503 	skb_kill_datagram(sk, skb, flags);
504 
505 	/* Error for blocking case is chosen to masquerade
506 	   as some normal condition.
507 	 */
508 	err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
509 	goto out;
510 }
511 
512 static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
513 				     struct raw6_sock *rp)
514 {
515 	struct ipv6_txoptions *opt;
516 	struct sk_buff *skb;
517 	int err = 0;
518 	int offset;
519 	int len;
520 	int total_len;
521 	__wsum tmp_csum;
522 	__sum16 csum;
523 
524 	if (!rp->checksum)
525 		goto send;
526 
527 	skb = skb_peek(&sk->sk_write_queue);
528 	if (!skb)
529 		goto out;
530 
531 	offset = rp->offset;
532 	total_len = inet_sk(sk)->cork.base.length;
533 	opt = inet_sk(sk)->cork.base6.opt;
534 	total_len -= opt ? opt->opt_flen : 0;
535 
536 	if (offset >= total_len - 1) {
537 		err = -EINVAL;
538 		ip6_flush_pending_frames(sk);
539 		goto out;
540 	}
541 
542 	/* should be check HW csum miyazawa */
543 	if (skb_queue_len(&sk->sk_write_queue) == 1) {
544 		/*
545 		 * Only one fragment on the socket.
546 		 */
547 		tmp_csum = skb->csum;
548 	} else {
549 		struct sk_buff *csum_skb = NULL;
550 		tmp_csum = 0;
551 
552 		skb_queue_walk(&sk->sk_write_queue, skb) {
553 			tmp_csum = csum_add(tmp_csum, skb->csum);
554 
555 			if (csum_skb)
556 				continue;
557 
558 			len = skb->len - skb_transport_offset(skb);
559 			if (offset >= len) {
560 				offset -= len;
561 				continue;
562 			}
563 
564 			csum_skb = skb;
565 		}
566 
567 		skb = csum_skb;
568 	}
569 
570 	offset += skb_transport_offset(skb);
571 	err = skb_copy_bits(skb, offset, &csum, 2);
572 	if (err < 0) {
573 		ip6_flush_pending_frames(sk);
574 		goto out;
575 	}
576 
577 	/* in case cksum was not initialized */
578 	if (unlikely(csum))
579 		tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
580 
581 	csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
582 			       total_len, fl6->flowi6_proto, tmp_csum);
583 
584 	if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
585 		csum = CSUM_MANGLED_0;
586 
587 	BUG_ON(skb_store_bits(skb, offset, &csum, 2));
588 
589 send:
590 	err = ip6_push_pending_frames(sk);
591 out:
592 	return err;
593 }
594 
595 static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
596 			struct flowi6 *fl6, struct dst_entry **dstp,
597 			unsigned int flags, const struct sockcm_cookie *sockc)
598 {
599 	struct net *net = sock_net(sk);
600 	struct ipv6hdr *iph;
601 	struct sk_buff *skb;
602 	int err;
603 	struct rt6_info *rt = dst_rt6_info(*dstp);
604 	int hlen = LL_RESERVED_SPACE(rt->dst.dev);
605 	int tlen = rt->dst.dev->needed_tailroom;
606 
607 	if (length > rt->dst.dev->mtu) {
608 		ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
609 		return -EMSGSIZE;
610 	}
611 	if (length < sizeof(struct ipv6hdr))
612 		return -EINVAL;
613 	if (flags&MSG_PROBE)
614 		goto out;
615 
616 	skb = sock_alloc_send_skb(sk,
617 				  length + hlen + tlen + 15,
618 				  flags & MSG_DONTWAIT, &err);
619 	if (!skb)
620 		goto error;
621 	skb_reserve(skb, hlen);
622 
623 	skb->protocol = htons(ETH_P_IPV6);
624 	skb->priority = sockc->priority;
625 	skb->mark = sockc->mark;
626 	skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid);
627 
628 	skb_put(skb, length);
629 	skb_reset_network_header(skb);
630 	iph = ipv6_hdr(skb);
631 
632 	skb->ip_summed = CHECKSUM_NONE;
633 
634 	skb_setup_tx_timestamp(skb, sockc);
635 
636 	if (flags & MSG_CONFIRM)
637 		skb_set_dst_pending_confirm(skb, 1);
638 
639 	skb->transport_header = skb->network_header;
640 	err = memcpy_from_msg(iph, msg, length);
641 	if (err) {
642 		err = -EFAULT;
643 		kfree_skb(skb);
644 		goto error;
645 	}
646 
647 	skb_dst_set(skb, &rt->dst);
648 	*dstp = NULL;
649 
650 	/* if egress device is enslaved to an L3 master device pass the
651 	 * skb to its handler for processing
652 	 */
653 	skb = l3mdev_ip6_out(sk, skb);
654 	if (unlikely(!skb))
655 		return 0;
656 
657 	/* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
658 	 * in the error path. Since skb has been freed, the dst could
659 	 * have been queued for deletion.
660 	 */
661 	rcu_read_lock();
662 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
663 	err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
664 		      NULL, rt->dst.dev, dst_output);
665 	if (err > 0)
666 		err = net_xmit_errno(err);
667 	if (err) {
668 		IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
669 		rcu_read_unlock();
670 		goto error_check;
671 	}
672 	rcu_read_unlock();
673 out:
674 	return 0;
675 
676 error:
677 	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
678 error_check:
679 	if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk))
680 		err = 0;
681 	return err;
682 }
683 
684 struct raw6_frag_vec {
685 	struct msghdr *msg;
686 	int hlen;
687 	char c[4];
688 };
689 
690 static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6)
691 {
692 	int err = 0;
693 	switch (fl6->flowi6_proto) {
694 	case IPPROTO_ICMPV6:
695 		rfv->hlen = 2;
696 		err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
697 		if (!err) {
698 			fl6->fl6_icmp_type = rfv->c[0];
699 			fl6->fl6_icmp_code = rfv->c[1];
700 		}
701 		break;
702 	case IPPROTO_MH:
703 		rfv->hlen = 4;
704 		err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen);
705 		if (!err)
706 			fl6->fl6_mh_type = rfv->c[2];
707 	}
708 	return err;
709 }
710 
711 static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
712 		       struct sk_buff *skb)
713 {
714 	struct raw6_frag_vec *rfv = from;
715 
716 	if (offset < rfv->hlen) {
717 		int copy = min(rfv->hlen - offset, len);
718 
719 		if (skb->ip_summed == CHECKSUM_PARTIAL)
720 			memcpy(to, rfv->c + offset, copy);
721 		else
722 			skb->csum = csum_block_add(
723 				skb->csum,
724 				csum_partial_copy_nocheck(rfv->c + offset,
725 							  to, copy),
726 				odd);
727 
728 		odd = 0;
729 		offset += copy;
730 		to += copy;
731 		len -= copy;
732 
733 		if (!len)
734 			return 0;
735 	}
736 
737 	offset -= rfv->hlen;
738 
739 	return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb);
740 }
741 
742 static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
743 {
744 	struct ipv6_txoptions *opt_to_free = NULL;
745 	struct ipv6_txoptions opt_space;
746 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
747 	struct in6_addr *daddr, *final_p, final;
748 	struct inet_sock *inet = inet_sk(sk);
749 	struct ipv6_pinfo *np = inet6_sk(sk);
750 	struct raw6_sock *rp = raw6_sk(sk);
751 	struct ipv6_txoptions *opt = NULL;
752 	struct ip6_flowlabel *flowlabel = NULL;
753 	struct dst_entry *dst = NULL;
754 	struct raw6_frag_vec rfv;
755 	struct flowi6 fl6;
756 	struct ipcm6_cookie ipc6;
757 	int addr_len = msg->msg_namelen;
758 	int hdrincl;
759 	u16 proto;
760 	int err;
761 
762 	/* Rough check on arithmetic overflow,
763 	   better check is made in ip6_append_data().
764 	 */
765 	if (len > INT_MAX)
766 		return -EMSGSIZE;
767 
768 	/* Mirror BSD error message compatibility */
769 	if (msg->msg_flags & MSG_OOB)
770 		return -EOPNOTSUPP;
771 
772 	hdrincl = inet_test_bit(HDRINCL, sk);
773 
774 	ipcm6_init_sk(&ipc6, sk);
775 
776 	/*
777 	 *	Get and verify the address.
778 	 */
779 	memset(&fl6, 0, sizeof(fl6));
780 
781 	fl6.flowi6_mark = ipc6.sockc.mark;
782 	fl6.flowi6_uid = sk_uid(sk);
783 
784 	if (sin6) {
785 		if (addr_len < SIN6_LEN_RFC2133)
786 			return -EINVAL;
787 
788 		if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
789 			return -EAFNOSUPPORT;
790 
791 		/* port is the proto value [0..255] carried in nexthdr */
792 		proto = ntohs(sin6->sin6_port);
793 
794 		if (!proto)
795 			proto = inet->inet_num;
796 		else if (proto != inet->inet_num &&
797 			 inet->inet_num != IPPROTO_RAW)
798 			return -EINVAL;
799 
800 		if (proto > 255)
801 			return -EINVAL;
802 
803 		daddr = &sin6->sin6_addr;
804 		if (inet6_test_bit(SNDFLOW, sk)) {
805 			fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
806 			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
807 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
808 				if (IS_ERR(flowlabel))
809 					return -EINVAL;
810 			}
811 		}
812 
813 		/*
814 		 * Otherwise it will be difficult to maintain
815 		 * sk->sk_dst_cache.
816 		 */
817 		if (sk->sk_state == TCP_ESTABLISHED &&
818 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
819 			daddr = &sk->sk_v6_daddr;
820 
821 		if (addr_len >= sizeof(struct sockaddr_in6) &&
822 		    sin6->sin6_scope_id &&
823 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
824 			fl6.flowi6_oif = sin6->sin6_scope_id;
825 	} else {
826 		if (sk->sk_state != TCP_ESTABLISHED)
827 			return -EDESTADDRREQ;
828 
829 		proto = inet->inet_num;
830 		daddr = &sk->sk_v6_daddr;
831 		fl6.flowlabel = np->flow_label;
832 	}
833 
834 	if (fl6.flowi6_oif == 0)
835 		fl6.flowi6_oif = sk->sk_bound_dev_if;
836 
837 	if (msg->msg_controllen) {
838 		opt = &opt_space;
839 		memset(opt, 0, sizeof(struct ipv6_txoptions));
840 		opt->tot_len = sizeof(struct ipv6_txoptions);
841 		ipc6.opt = opt;
842 
843 		err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6);
844 		if (err < 0) {
845 			fl6_sock_release(flowlabel);
846 			return err;
847 		}
848 		if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
849 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
850 			if (IS_ERR(flowlabel))
851 				return -EINVAL;
852 		}
853 		if (!(opt->opt_nflen|opt->opt_flen))
854 			opt = NULL;
855 	}
856 	if (!opt) {
857 		opt = txopt_get(np);
858 		opt_to_free = opt;
859 	}
860 	if (flowlabel)
861 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
862 	opt = ipv6_fixup_options(&opt_space, opt);
863 
864 	fl6.flowi6_proto = proto;
865 	fl6.flowi6_mark = ipc6.sockc.mark;
866 
867 	if (!hdrincl) {
868 		rfv.msg = msg;
869 		rfv.hlen = 0;
870 		err = rawv6_probe_proto_opt(&rfv, &fl6);
871 		if (err)
872 			goto out;
873 	}
874 
875 	if (!ipv6_addr_any(daddr))
876 		fl6.daddr = *daddr;
877 	else
878 		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
879 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
880 		fl6.saddr = np->saddr;
881 
882 	final_p = fl6_update_dst(&fl6, opt, &final);
883 
884 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
885 		fl6.flowi6_oif = READ_ONCE(np->mcast_oif);
886 	else if (!fl6.flowi6_oif)
887 		fl6.flowi6_oif = READ_ONCE(np->ucast_oif);
888 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
889 
890 	if (hdrincl)
891 		fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
892 
893 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
894 
895 	dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
896 	if (IS_ERR(dst)) {
897 		err = PTR_ERR(dst);
898 		goto out;
899 	}
900 	if (ipc6.hlimit < 0)
901 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
902 
903 	if (msg->msg_flags&MSG_CONFIRM)
904 		goto do_confirm;
905 
906 back_from_confirm:
907 	if (hdrincl)
908 		err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
909 					msg->msg_flags, &ipc6.sockc);
910 	else {
911 		ipc6.opt = opt;
912 		lock_sock(sk);
913 		err = ip6_append_data(sk, raw6_getfrag, &rfv,
914 			len, 0, &ipc6, &fl6, dst_rt6_info(dst),
915 			msg->msg_flags);
916 
917 		if (err)
918 			ip6_flush_pending_frames(sk);
919 		else if (!(msg->msg_flags & MSG_MORE))
920 			err = rawv6_push_pending_frames(sk, &fl6, rp);
921 		release_sock(sk);
922 	}
923 done:
924 	dst_release(dst);
925 out:
926 	fl6_sock_release(flowlabel);
927 	txopt_put(opt_to_free);
928 	return err < 0 ? err : len;
929 do_confirm:
930 	if (msg->msg_flags & MSG_PROBE)
931 		dst_confirm_neigh(dst, &fl6.daddr);
932 	if (!(msg->msg_flags & MSG_PROBE) || len)
933 		goto back_from_confirm;
934 	err = 0;
935 	goto done;
936 }
937 
938 static int rawv6_seticmpfilter(struct sock *sk, int optname,
939 			       sockptr_t optval, int optlen)
940 {
941 	switch (optname) {
942 	case ICMPV6_FILTER:
943 		if (optlen > sizeof(struct icmp6_filter))
944 			optlen = sizeof(struct icmp6_filter);
945 		if (copy_from_sockptr(&raw6_sk(sk)->filter, optval, optlen))
946 			return -EFAULT;
947 		return 0;
948 	default:
949 		return -ENOPROTOOPT;
950 	}
951 
952 	return 0;
953 }
954 
955 static int rawv6_geticmpfilter(struct sock *sk, int optname,
956 			       char __user *optval, int __user *optlen)
957 {
958 	int len;
959 
960 	switch (optname) {
961 	case ICMPV6_FILTER:
962 		if (get_user(len, optlen))
963 			return -EFAULT;
964 		if (len < 0)
965 			return -EINVAL;
966 		if (len > sizeof(struct icmp6_filter))
967 			len = sizeof(struct icmp6_filter);
968 		if (put_user(len, optlen))
969 			return -EFAULT;
970 		if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
971 			return -EFAULT;
972 		return 0;
973 	default:
974 		return -ENOPROTOOPT;
975 	}
976 
977 	return 0;
978 }
979 
980 
981 static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
982 			       sockptr_t optval, unsigned int optlen)
983 {
984 	struct raw6_sock *rp = raw6_sk(sk);
985 	int val;
986 
987 	if (optlen < sizeof(val))
988 		return -EINVAL;
989 
990 	if (copy_from_sockptr(&val, optval, sizeof(val)))
991 		return -EFAULT;
992 
993 	switch (optname) {
994 	case IPV6_HDRINCL:
995 		if (sk->sk_type != SOCK_RAW)
996 			return -EINVAL;
997 		inet_assign_bit(HDRINCL, sk, val);
998 		return 0;
999 	case IPV6_CHECKSUM:
1000 		if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
1001 		    level == IPPROTO_IPV6) {
1002 			/*
1003 			 * RFC3542 tells that IPV6_CHECKSUM socket
1004 			 * option in the IPPROTO_IPV6 level is not
1005 			 * allowed on ICMPv6 sockets.
1006 			 * If you want to set it, use IPPROTO_RAW
1007 			 * level IPV6_CHECKSUM socket option
1008 			 * (Linux extension).
1009 			 */
1010 			return -EINVAL;
1011 		}
1012 
1013 		/* You may get strange result with a positive odd offset;
1014 		   RFC2292bis agrees with me. */
1015 		if (val > 0 && (val&1))
1016 			return -EINVAL;
1017 		if (val < 0) {
1018 			rp->checksum = 0;
1019 		} else {
1020 			rp->checksum = 1;
1021 			rp->offset = val;
1022 		}
1023 
1024 		return 0;
1025 
1026 	default:
1027 		return -ENOPROTOOPT;
1028 	}
1029 }
1030 
1031 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
1032 			    sockptr_t optval, unsigned int optlen)
1033 {
1034 	switch (level) {
1035 	case SOL_RAW:
1036 		break;
1037 
1038 	case SOL_ICMPV6:
1039 		if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1040 			return -EOPNOTSUPP;
1041 		return rawv6_seticmpfilter(sk, optname, optval, optlen);
1042 	case SOL_IPV6:
1043 		if (optname == IPV6_CHECKSUM ||
1044 		    optname == IPV6_HDRINCL)
1045 			break;
1046 		fallthrough;
1047 	default:
1048 		return ipv6_setsockopt(sk, level, optname, optval, optlen);
1049 	}
1050 
1051 	return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
1052 }
1053 
1054 static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
1055 			    char __user *optval, int __user *optlen)
1056 {
1057 	struct raw6_sock *rp = raw6_sk(sk);
1058 	int val, len;
1059 
1060 	if (get_user(len, optlen))
1061 		return -EFAULT;
1062 
1063 	switch (optname) {
1064 	case IPV6_HDRINCL:
1065 		val = inet_test_bit(HDRINCL, sk);
1066 		break;
1067 	case IPV6_CHECKSUM:
1068 		/*
1069 		 * We allow getsockopt() for IPPROTO_IPV6-level
1070 		 * IPV6_CHECKSUM socket option on ICMPv6 sockets
1071 		 * since RFC3542 is silent about it.
1072 		 */
1073 		if (rp->checksum == 0)
1074 			val = -1;
1075 		else
1076 			val = rp->offset;
1077 		break;
1078 
1079 	default:
1080 		return -ENOPROTOOPT;
1081 	}
1082 
1083 	len = min_t(unsigned int, sizeof(int), len);
1084 
1085 	if (put_user(len, optlen))
1086 		return -EFAULT;
1087 	if (copy_to_user(optval, &val, len))
1088 		return -EFAULT;
1089 	return 0;
1090 }
1091 
1092 static int rawv6_getsockopt(struct sock *sk, int level, int optname,
1093 			  char __user *optval, int __user *optlen)
1094 {
1095 	switch (level) {
1096 	case SOL_RAW:
1097 		break;
1098 
1099 	case SOL_ICMPV6:
1100 		if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1101 			return -EOPNOTSUPP;
1102 		return rawv6_geticmpfilter(sk, optname, optval, optlen);
1103 	case SOL_IPV6:
1104 		if (optname == IPV6_CHECKSUM ||
1105 		    optname == IPV6_HDRINCL)
1106 			break;
1107 		fallthrough;
1108 	default:
1109 		return ipv6_getsockopt(sk, level, optname, optval, optlen);
1110 	}
1111 
1112 	return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
1113 }
1114 
1115 static int rawv6_ioctl(struct sock *sk, int cmd, int *karg)
1116 {
1117 	switch (cmd) {
1118 	case SIOCOUTQ: {
1119 		*karg = sk_wmem_alloc_get(sk);
1120 		return 0;
1121 	}
1122 	case SIOCINQ: {
1123 		struct sk_buff *skb;
1124 
1125 		spin_lock_bh(&sk->sk_receive_queue.lock);
1126 		skb = skb_peek(&sk->sk_receive_queue);
1127 		if (skb)
1128 			*karg = skb->len;
1129 		else
1130 			*karg = 0;
1131 		spin_unlock_bh(&sk->sk_receive_queue.lock);
1132 		return 0;
1133 	}
1134 
1135 	default:
1136 #ifdef CONFIG_IPV6_MROUTE
1137 		return ip6mr_ioctl(sk, cmd, karg);
1138 #else
1139 		return -ENOIOCTLCMD;
1140 #endif
1141 	}
1142 }
1143 
1144 #ifdef CONFIG_COMPAT
1145 static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
1146 {
1147 	switch (cmd) {
1148 	case SIOCOUTQ:
1149 	case SIOCINQ:
1150 		return -ENOIOCTLCMD;
1151 	default:
1152 #ifdef CONFIG_IPV6_MROUTE
1153 		return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
1154 #else
1155 		return -ENOIOCTLCMD;
1156 #endif
1157 	}
1158 }
1159 #endif
1160 
1161 static void rawv6_close(struct sock *sk, long timeout)
1162 {
1163 	if (inet_sk(sk)->inet_num == IPPROTO_RAW)
1164 		ip6_ra_control(sk, -1);
1165 	ip6mr_sk_done(sk);
1166 	sk_common_release(sk);
1167 }
1168 
1169 static void raw6_destroy(struct sock *sk)
1170 {
1171 	lock_sock(sk);
1172 	ip6_flush_pending_frames(sk);
1173 	release_sock(sk);
1174 }
1175 
1176 static int rawv6_init_sk(struct sock *sk)
1177 {
1178 	struct raw6_sock *rp = raw6_sk(sk);
1179 
1180 	sk->sk_drop_counters = &rp->drop_counters;
1181 	switch (inet_sk(sk)->inet_num) {
1182 	case IPPROTO_ICMPV6:
1183 		rp->checksum = 1;
1184 		rp->offset   = 2;
1185 		break;
1186 	case IPPROTO_MH:
1187 		rp->checksum = 1;
1188 		rp->offset   = 4;
1189 		break;
1190 	default:
1191 		break;
1192 	}
1193 	return 0;
1194 }
1195 
1196 struct proto rawv6_prot = {
1197 	.name		   = "RAWv6",
1198 	.owner		   = THIS_MODULE,
1199 	.close		   = rawv6_close,
1200 	.destroy	   = raw6_destroy,
1201 	.connect	   = ip6_datagram_connect_v6_only,
1202 	.disconnect	   = __udp_disconnect,
1203 	.ioctl		   = rawv6_ioctl,
1204 	.init		   = rawv6_init_sk,
1205 	.setsockopt	   = rawv6_setsockopt,
1206 	.getsockopt	   = rawv6_getsockopt,
1207 	.sendmsg	   = rawv6_sendmsg,
1208 	.recvmsg	   = rawv6_recvmsg,
1209 	.bind		   = rawv6_bind,
1210 	.backlog_rcv	   = rawv6_rcv_skb,
1211 	.hash		   = raw_hash_sk,
1212 	.unhash		   = raw_unhash_sk,
1213 	.obj_size	   = sizeof(struct raw6_sock),
1214 	.ipv6_pinfo_offset = offsetof(struct raw6_sock, inet6),
1215 	.useroffset	   = offsetof(struct raw6_sock, filter),
1216 	.usersize	   = sizeof_field(struct raw6_sock, filter),
1217 	.h.raw_hash	   = &raw_v6_hashinfo,
1218 #ifdef CONFIG_COMPAT
1219 	.compat_ioctl	   = compat_rawv6_ioctl,
1220 #endif
1221 	.diag_destroy	   = raw_abort,
1222 };
1223 
1224 #ifdef CONFIG_PROC_FS
1225 static int raw6_seq_show(struct seq_file *seq, void *v)
1226 {
1227 	if (v == SEQ_START_TOKEN) {
1228 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1229 	} else {
1230 		struct sock *sp = v;
1231 		__u16 srcp  = inet_sk(sp)->inet_num;
1232 		ip6_dgram_sock_seq_show(seq, v, srcp, 0,
1233 					raw_seq_private(seq)->bucket);
1234 	}
1235 	return 0;
1236 }
1237 
1238 static const struct seq_operations raw6_seq_ops = {
1239 	.start =	raw_seq_start,
1240 	.next =		raw_seq_next,
1241 	.stop =		raw_seq_stop,
1242 	.show =		raw6_seq_show,
1243 };
1244 
1245 static int __net_init raw6_init_net(struct net *net)
1246 {
1247 	if (!proc_create_net_data("raw6", 0444, net->proc_net, &raw6_seq_ops,
1248 			sizeof(struct raw_iter_state), &raw_v6_hashinfo))
1249 		return -ENOMEM;
1250 
1251 	return 0;
1252 }
1253 
1254 static void __net_exit raw6_exit_net(struct net *net)
1255 {
1256 	remove_proc_entry("raw6", net->proc_net);
1257 }
1258 
1259 static struct pernet_operations raw6_net_ops = {
1260 	.init = raw6_init_net,
1261 	.exit = raw6_exit_net,
1262 };
1263 
1264 int __init raw6_proc_init(void)
1265 {
1266 	return register_pernet_subsys(&raw6_net_ops);
1267 }
1268 
1269 void raw6_proc_exit(void)
1270 {
1271 	unregister_pernet_subsys(&raw6_net_ops);
1272 }
1273 #endif	/* CONFIG_PROC_FS */
1274 
1275 /* Same as inet6_dgram_ops, sans udp_poll.  */
1276 const struct proto_ops inet6_sockraw_ops = {
1277 	.family		   = PF_INET6,
1278 	.owner		   = THIS_MODULE,
1279 	.release	   = inet6_release,
1280 	.bind		   = inet6_bind,
1281 	.connect	   = inet_dgram_connect,	/* ok		*/
1282 	.socketpair	   = sock_no_socketpair,	/* a do nothing	*/
1283 	.accept		   = sock_no_accept,		/* a do nothing	*/
1284 	.getname	   = inet6_getname,
1285 	.poll		   = datagram_poll,		/* ok		*/
1286 	.ioctl		   = inet6_ioctl,		/* must change  */
1287 	.gettstamp	   = sock_gettstamp,
1288 	.listen		   = sock_no_listen,		/* ok		*/
1289 	.shutdown	   = inet_shutdown,		/* ok		*/
1290 	.setsockopt	   = sock_common_setsockopt,	/* ok		*/
1291 	.getsockopt	   = sock_common_getsockopt,	/* ok		*/
1292 	.sendmsg	   = inet_sendmsg,		/* ok		*/
1293 	.recvmsg	   = sock_common_recvmsg,	/* ok		*/
1294 	.mmap		   = sock_no_mmap,
1295 #ifdef CONFIG_COMPAT
1296 	.compat_ioctl	   = inet6_compat_ioctl,
1297 #endif
1298 };
1299 
1300 static struct inet_protosw rawv6_protosw = {
1301 	.type		= SOCK_RAW,
1302 	.protocol	= IPPROTO_IP,	/* wild card */
1303 	.prot		= &rawv6_prot,
1304 	.ops		= &inet6_sockraw_ops,
1305 	.flags		= INET_PROTOSW_REUSE,
1306 };
1307 
1308 int __init rawv6_init(void)
1309 {
1310 	return inet6_register_protosw(&rawv6_protosw);
1311 }
1312 
1313 void rawv6_exit(void)
1314 {
1315 	inet6_unregister_protosw(&rawv6_protosw);
1316 }
1317