xref: /freebsd/sys/netinet/raw_ip.c (revision 38f0b757fd84d17d0fc24739a7cda160c4516d81)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 4. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 
40 #include <sys/param.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/protosw.h>
49 #include <sys/rwlock.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sx.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 
57 #include <vm/uma.h>
58 
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/route.h>
62 #include <net/vnet.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_var.h>
68 #include <netinet/if_ether.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_mroute.h>
72 
73 #ifdef IPSEC
74 #include <netipsec/ipsec.h>
75 #endif /*IPSEC*/
76 
77 #include <security/mac/mac_framework.h>
78 
79 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
80 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
81     &VNET_NAME(ip_defttl), 0,
82     "Maximum TTL on IP packets");
83 
84 VNET_DEFINE(struct inpcbhead, ripcb);
85 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
86 
87 #define	V_ripcb			VNET(ripcb)
88 #define	V_ripcbinfo		VNET(ripcbinfo)
89 
90 /*
91  * Control and data hooks for ipfw, dummynet, divert and so on.
92  * The data hooks are not used here but it is convenient
93  * to keep them all in one place.
94  */
95 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
96 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
97 
98 int	(*ip_dn_ctl_ptr)(struct sockopt *);
99 int	(*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
100 void	(*ip_divert_ptr)(struct mbuf *, int);
101 int	(*ng_ipfw_input_p)(struct mbuf **, int,
102 			struct ip_fw_args *, int);
103 
104 #ifdef INET
105 /*
106  * Hooks for multicast routing. They all default to NULL, so leave them not
107  * initialized and rely on BSS being set to 0.
108  */
109 
110 /*
111  * The socket used to communicate with the multicast routing daemon.
112  */
113 VNET_DEFINE(struct socket *, ip_mrouter);
114 
115 /*
116  * The various mrouter and rsvp functions.
117  */
118 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
119 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
120 int (*ip_mrouter_done)(void);
121 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
122 		   struct ip_moptions *);
123 int (*mrt_ioctl)(u_long, caddr_t, int);
124 int (*legal_vif_num)(int);
125 u_long (*ip_mcast_src)(int);
126 
127 void (*rsvp_input_p)(struct mbuf *m, int off);
128 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
129 void (*ip_rsvp_force_done)(struct socket *);
130 #endif /* INET */
131 
132 u_long	rip_sendspace = 9216;
133 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
134     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
135 
136 u_long	rip_recvspace = 9216;
137 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
138     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
139 
140 /*
141  * Hash functions
142  */
143 
144 #define INP_PCBHASH_RAW_SIZE	256
145 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
146         (((proto) + (laddr) + (faddr)) % (mask) + 1)
147 
148 #ifdef INET
149 static void
150 rip_inshash(struct inpcb *inp)
151 {
152 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
153 	struct inpcbhead *pcbhash;
154 	int hash;
155 
156 	INP_INFO_WLOCK_ASSERT(pcbinfo);
157 	INP_WLOCK_ASSERT(inp);
158 
159 	if (inp->inp_ip_p != 0 &&
160 	    inp->inp_laddr.s_addr != INADDR_ANY &&
161 	    inp->inp_faddr.s_addr != INADDR_ANY) {
162 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
163 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
164 	} else
165 		hash = 0;
166 	pcbhash = &pcbinfo->ipi_hashbase[hash];
167 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
168 }
169 
170 static void
171 rip_delhash(struct inpcb *inp)
172 {
173 
174 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
175 	INP_WLOCK_ASSERT(inp);
176 
177 	LIST_REMOVE(inp, inp_hash);
178 }
179 #endif /* INET */
180 
181 /*
182  * Raw interface to IP protocol.
183  */
184 
185 /*
186  * Initialize raw connection block q.
187  */
188 static void
189 rip_zone_change(void *tag)
190 {
191 
192 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
193 }
194 
195 static int
196 rip_inpcb_init(void *mem, int size, int flags)
197 {
198 	struct inpcb *inp = mem;
199 
200 	INP_LOCK_INIT(inp, "inp", "rawinp");
201 	return (0);
202 }
203 
204 void
205 rip_init(void)
206 {
207 
208 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
209 	    1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE,
210 	    IPI_HASHFIELDS_NONE);
211 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
212 	    EVENTHANDLER_PRI_ANY);
213 }
214 
215 #ifdef VIMAGE
216 void
217 rip_destroy(void)
218 {
219 
220 	in_pcbinfo_destroy(&V_ripcbinfo);
221 }
222 #endif
223 
224 #ifdef INET
225 static int
226 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
227     struct sockaddr_in *ripsrc)
228 {
229 	int policyfail = 0;
230 
231 	INP_LOCK_ASSERT(last);
232 
233 #ifdef IPSEC
234 	/* check AH/ESP integrity. */
235 	if (ipsec4_in_reject(n, last)) {
236 		policyfail = 1;
237 	}
238 #endif /* IPSEC */
239 #ifdef MAC
240 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
241 		policyfail = 1;
242 #endif
243 	/* Check the minimum TTL for socket. */
244 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
245 		policyfail = 1;
246 	if (!policyfail) {
247 		struct mbuf *opts = NULL;
248 		struct socket *so;
249 
250 		so = last->inp_socket;
251 		if ((last->inp_flags & INP_CONTROLOPTS) ||
252 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
253 			ip_savecontrol(last, &opts, ip, n);
254 		SOCKBUF_LOCK(&so->so_rcv);
255 		if (sbappendaddr_locked(&so->so_rcv,
256 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
257 			/* should notify about lost packet */
258 			m_freem(n);
259 			if (opts)
260 				m_freem(opts);
261 			SOCKBUF_UNLOCK(&so->so_rcv);
262 		} else
263 			sorwakeup_locked(so);
264 	} else
265 		m_freem(n);
266 	return (policyfail);
267 }
268 
269 /*
270  * Setup generic address and protocol structures for raw_input routine, then
271  * pass them along with mbuf chain.
272  */
273 void
274 rip_input(struct mbuf *m, int off)
275 {
276 	struct ifnet *ifp;
277 	struct ip *ip = mtod(m, struct ip *);
278 	int proto = ip->ip_p;
279 	struct inpcb *inp, *last;
280 	struct sockaddr_in ripsrc;
281 	int hash;
282 
283 	bzero(&ripsrc, sizeof(ripsrc));
284 	ripsrc.sin_len = sizeof(ripsrc);
285 	ripsrc.sin_family = AF_INET;
286 	ripsrc.sin_addr = ip->ip_src;
287 	last = NULL;
288 
289 	ifp = m->m_pkthdr.rcvif;
290 	/*
291 	 * Applications on raw sockets expect host byte order.
292 	 */
293 	ip->ip_len = ntohs(ip->ip_len);
294 	ip->ip_off = ntohs(ip->ip_off);
295 
296 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
297 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
298 	INP_INFO_RLOCK(&V_ripcbinfo);
299 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
300 		if (inp->inp_ip_p != proto)
301 			continue;
302 #ifdef INET6
303 		/* XXX inp locking */
304 		if ((inp->inp_vflag & INP_IPV4) == 0)
305 			continue;
306 #endif
307 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
308 			continue;
309 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
310 			continue;
311 		if (jailed_without_vnet(inp->inp_cred)) {
312 			/*
313 			 * XXX: If faddr was bound to multicast group,
314 			 * jailed raw socket will drop datagram.
315 			 */
316 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
317 				continue;
318 		}
319 		if (last != NULL) {
320 			struct mbuf *n;
321 
322 			n = m_copy(m, 0, (int)M_COPYALL);
323 			if (n != NULL)
324 		    	    (void) rip_append(last, ip, n, &ripsrc);
325 			/* XXX count dropped packet */
326 			INP_RUNLOCK(last);
327 		}
328 		INP_RLOCK(inp);
329 		last = inp;
330 	}
331 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
332 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
333 			continue;
334 #ifdef INET6
335 		/* XXX inp locking */
336 		if ((inp->inp_vflag & INP_IPV4) == 0)
337 			continue;
338 #endif
339 		if (!in_nullhost(inp->inp_laddr) &&
340 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
341 			continue;
342 		if (!in_nullhost(inp->inp_faddr) &&
343 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
344 			continue;
345 		if (jailed_without_vnet(inp->inp_cred)) {
346 			/*
347 			 * Allow raw socket in jail to receive multicast;
348 			 * assume process had PRIV_NETINET_RAW at attach,
349 			 * and fall through into normal filter path if so.
350 			 */
351 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
352 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
353 				continue;
354 		}
355 		/*
356 		 * If this raw socket has multicast state, and we
357 		 * have received a multicast, check if this socket
358 		 * should receive it, as multicast filtering is now
359 		 * the responsibility of the transport layer.
360 		 */
361 		if (inp->inp_moptions != NULL &&
362 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
363 			/*
364 			 * If the incoming datagram is for IGMP, allow it
365 			 * through unconditionally to the raw socket.
366 			 *
367 			 * In the case of IGMPv2, we may not have explicitly
368 			 * joined the group, and may have set IFF_ALLMULTI
369 			 * on the interface. imo_multi_filter() may discard
370 			 * control traffic we actually need to see.
371 			 *
372 			 * Userland multicast routing daemons should continue
373 			 * filter the control traffic appropriately.
374 			 */
375 			int blocked;
376 
377 			blocked = MCAST_PASS;
378 			if (proto != IPPROTO_IGMP) {
379 				struct sockaddr_in group;
380 
381 				bzero(&group, sizeof(struct sockaddr_in));
382 				group.sin_len = sizeof(struct sockaddr_in);
383 				group.sin_family = AF_INET;
384 				group.sin_addr = ip->ip_dst;
385 
386 				blocked = imo_multi_filter(inp->inp_moptions,
387 				    ifp,
388 				    (struct sockaddr *)&group,
389 				    (struct sockaddr *)&ripsrc);
390 			}
391 
392 			if (blocked != MCAST_PASS) {
393 				IPSTAT_INC(ips_notmember);
394 				continue;
395 			}
396 		}
397 		if (last != NULL) {
398 			struct mbuf *n;
399 
400 			n = m_copy(m, 0, (int)M_COPYALL);
401 			if (n != NULL)
402 				(void) rip_append(last, ip, n, &ripsrc);
403 			/* XXX count dropped packet */
404 			INP_RUNLOCK(last);
405 		}
406 		INP_RLOCK(inp);
407 		last = inp;
408 	}
409 	INP_INFO_RUNLOCK(&V_ripcbinfo);
410 	if (last != NULL) {
411 		if (rip_append(last, ip, m, &ripsrc) != 0)
412 			IPSTAT_INC(ips_delivered);
413 		INP_RUNLOCK(last);
414 	} else {
415 		m_freem(m);
416 		IPSTAT_INC(ips_noproto);
417 		IPSTAT_DEC(ips_delivered);
418 	}
419 }
420 
421 /*
422  * Generate IP header and pass packet to ip_output.  Tack on options user may
423  * have setup with control call.
424  */
425 int
426 rip_output(struct mbuf *m, struct socket *so, u_long dst)
427 {
428 	struct ip *ip;
429 	int error;
430 	struct inpcb *inp = sotoinpcb(so);
431 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
432 	    IP_ALLOWBROADCAST;
433 
434 	/*
435 	 * If the user handed us a complete IP packet, use it.  Otherwise,
436 	 * allocate an mbuf for a header and fill it in.
437 	 */
438 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
439 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
440 			m_freem(m);
441 			return(EMSGSIZE);
442 		}
443 		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
444 		if (m == NULL)
445 			return(ENOBUFS);
446 
447 		INP_RLOCK(inp);
448 		ip = mtod(m, struct ip *);
449 		ip->ip_tos = inp->inp_ip_tos;
450 		if (inp->inp_flags & INP_DONTFRAG)
451 			ip->ip_off = htons(IP_DF);
452 		else
453 			ip->ip_off = htons(0);
454 		ip->ip_p = inp->inp_ip_p;
455 		ip->ip_len = htons(m->m_pkthdr.len);
456 		ip->ip_src = inp->inp_laddr;
457 		if (jailed(inp->inp_cred)) {
458 			/*
459 			 * prison_local_ip4() would be good enough but would
460 			 * let a source of INADDR_ANY pass, which we do not
461 			 * want to see from jails. We do not go through the
462 			 * pain of in_pcbladdr() for raw sockets.
463 			 */
464 			if (ip->ip_src.s_addr == INADDR_ANY)
465 				error = prison_get_ip4(inp->inp_cred,
466 				    &ip->ip_src);
467 			else
468 				error = prison_local_ip4(inp->inp_cred,
469 				    &ip->ip_src);
470 			if (error != 0) {
471 				INP_RUNLOCK(inp);
472 				m_freem(m);
473 				return (error);
474 			}
475 		}
476 		ip->ip_dst.s_addr = dst;
477 		ip->ip_ttl = inp->inp_ip_ttl;
478 	} else {
479 		if (m->m_pkthdr.len > IP_MAXPACKET) {
480 			m_freem(m);
481 			return(EMSGSIZE);
482 		}
483 		INP_RLOCK(inp);
484 		ip = mtod(m, struct ip *);
485 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
486 		if (error != 0) {
487 			INP_RUNLOCK(inp);
488 			m_freem(m);
489 			return (error);
490 		}
491 
492 		/*
493 		 * Don't allow both user specified and setsockopt options,
494 		 * and don't allow packet length sizes that will crash.
495 		 */
496 		if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
497 		    || (ip->ip_len > m->m_pkthdr.len)
498 		    || (ip->ip_len < (ip->ip_hl << 2))) {
499 			INP_RUNLOCK(inp);
500 			m_freem(m);
501 			return (EINVAL);
502 		}
503 		if (ip->ip_id == 0)
504 			ip->ip_id = ip_newid();
505 
506 		/*
507 		 * Applications on raw sockets pass us packets
508 		 * in host byte order.
509 		 */
510 		ip->ip_len = htons(ip->ip_len);
511 		ip->ip_off = htons(ip->ip_off);
512 
513 		/*
514 		 * XXX prevent ip_output from overwriting header fields.
515 		 */
516 		flags |= IP_RAWOUTPUT;
517 		IPSTAT_INC(ips_rawout);
518 	}
519 
520 	if (inp->inp_flags & INP_ONESBCAST)
521 		flags |= IP_SENDONES;
522 
523 #ifdef MAC
524 	mac_inpcb_create_mbuf(inp, m);
525 #endif
526 
527 	error = ip_output(m, inp->inp_options, NULL, flags,
528 	    inp->inp_moptions, inp);
529 	INP_RUNLOCK(inp);
530 	return (error);
531 }
532 
533 /*
534  * Raw IP socket option processing.
535  *
536  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
537  * only be created by a privileged process, and as such, socket option
538  * operations to manage system properties on any raw socket were allowed to
539  * take place without explicit additional access control checks.  However,
540  * raw sockets can now also be created in jail(), and therefore explicit
541  * checks are now required.  Likewise, raw sockets can be used by a process
542  * after it gives up privilege, so some caution is required.  For options
543  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
544  * performed in ip_ctloutput() and therefore no check occurs here.
545  * Unilaterally checking priv_check() here breaks normal IP socket option
546  * operations on raw sockets.
547  *
548  * When adding new socket options here, make sure to add access control
549  * checks here as necessary.
550  *
551  * XXX-BZ inp locking?
552  */
553 int
554 rip_ctloutput(struct socket *so, struct sockopt *sopt)
555 {
556 	struct	inpcb *inp = sotoinpcb(so);
557 	int	error, optval;
558 
559 	if (sopt->sopt_level != IPPROTO_IP) {
560 		if ((sopt->sopt_level == SOL_SOCKET) &&
561 		    (sopt->sopt_name == SO_SETFIB)) {
562 			inp->inp_inc.inc_fibnum = so->so_fibnum;
563 			return (0);
564 		}
565 		return (EINVAL);
566 	}
567 
568 	error = 0;
569 	switch (sopt->sopt_dir) {
570 	case SOPT_GET:
571 		switch (sopt->sopt_name) {
572 		case IP_HDRINCL:
573 			optval = inp->inp_flags & INP_HDRINCL;
574 			error = sooptcopyout(sopt, &optval, sizeof optval);
575 			break;
576 
577 		case IP_FW3:	/* generic ipfw v.3 functions */
578 		case IP_FW_ADD:	/* ADD actually returns the body... */
579 		case IP_FW_GET:
580 		case IP_FW_TABLE_GETSIZE:
581 		case IP_FW_TABLE_LIST:
582 		case IP_FW_NAT_GET_CONFIG:
583 		case IP_FW_NAT_GET_LOG:
584 			if (V_ip_fw_ctl_ptr != NULL)
585 				error = V_ip_fw_ctl_ptr(sopt);
586 			else
587 				error = ENOPROTOOPT;
588 			break;
589 
590 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
591 		case IP_DUMMYNET_GET:
592 			if (ip_dn_ctl_ptr != NULL)
593 				error = ip_dn_ctl_ptr(sopt);
594 			else
595 				error = ENOPROTOOPT;
596 			break ;
597 
598 		case MRT_INIT:
599 		case MRT_DONE:
600 		case MRT_ADD_VIF:
601 		case MRT_DEL_VIF:
602 		case MRT_ADD_MFC:
603 		case MRT_DEL_MFC:
604 		case MRT_VERSION:
605 		case MRT_ASSERT:
606 		case MRT_API_SUPPORT:
607 		case MRT_API_CONFIG:
608 		case MRT_ADD_BW_UPCALL:
609 		case MRT_DEL_BW_UPCALL:
610 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
611 			if (error != 0)
612 				return (error);
613 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
614 				EOPNOTSUPP;
615 			break;
616 
617 		default:
618 			error = ip_ctloutput(so, sopt);
619 			break;
620 		}
621 		break;
622 
623 	case SOPT_SET:
624 		switch (sopt->sopt_name) {
625 		case IP_HDRINCL:
626 			error = sooptcopyin(sopt, &optval, sizeof optval,
627 					    sizeof optval);
628 			if (error)
629 				break;
630 			if (optval)
631 				inp->inp_flags |= INP_HDRINCL;
632 			else
633 				inp->inp_flags &= ~INP_HDRINCL;
634 			break;
635 
636 		case IP_FW3:	/* generic ipfw v.3 functions */
637 		case IP_FW_ADD:
638 		case IP_FW_DEL:
639 		case IP_FW_FLUSH:
640 		case IP_FW_ZERO:
641 		case IP_FW_RESETLOG:
642 		case IP_FW_TABLE_ADD:
643 		case IP_FW_TABLE_DEL:
644 		case IP_FW_TABLE_FLUSH:
645 		case IP_FW_NAT_CFG:
646 		case IP_FW_NAT_DEL:
647 			if (V_ip_fw_ctl_ptr != NULL)
648 				error = V_ip_fw_ctl_ptr(sopt);
649 			else
650 				error = ENOPROTOOPT;
651 			break;
652 
653 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
654 		case IP_DUMMYNET_CONFIGURE:
655 		case IP_DUMMYNET_DEL:
656 		case IP_DUMMYNET_FLUSH:
657 			if (ip_dn_ctl_ptr != NULL)
658 				error = ip_dn_ctl_ptr(sopt);
659 			else
660 				error = ENOPROTOOPT ;
661 			break ;
662 
663 		case IP_RSVP_ON:
664 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
665 			if (error != 0)
666 				return (error);
667 			error = ip_rsvp_init(so);
668 			break;
669 
670 		case IP_RSVP_OFF:
671 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
672 			if (error != 0)
673 				return (error);
674 			error = ip_rsvp_done();
675 			break;
676 
677 		case IP_RSVP_VIF_ON:
678 		case IP_RSVP_VIF_OFF:
679 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
680 			if (error != 0)
681 				return (error);
682 			error = ip_rsvp_vif ?
683 				ip_rsvp_vif(so, sopt) : EINVAL;
684 			break;
685 
686 		case MRT_INIT:
687 		case MRT_DONE:
688 		case MRT_ADD_VIF:
689 		case MRT_DEL_VIF:
690 		case MRT_ADD_MFC:
691 		case MRT_DEL_MFC:
692 		case MRT_VERSION:
693 		case MRT_ASSERT:
694 		case MRT_API_SUPPORT:
695 		case MRT_API_CONFIG:
696 		case MRT_ADD_BW_UPCALL:
697 		case MRT_DEL_BW_UPCALL:
698 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
699 			if (error != 0)
700 				return (error);
701 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
702 					EOPNOTSUPP;
703 			break;
704 
705 		default:
706 			error = ip_ctloutput(so, sopt);
707 			break;
708 		}
709 		break;
710 	}
711 
712 	return (error);
713 }
714 
715 /*
716  * This function exists solely to receive the PRC_IFDOWN messages which are
717  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
718  * in_ifadown() to remove all routes corresponding to that address.  It also
719  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
720  * routes.
721  */
722 void
723 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
724 {
725 	struct in_ifaddr *ia;
726 	struct ifnet *ifp;
727 	int err;
728 	int flags;
729 
730 	switch (cmd) {
731 	case PRC_IFDOWN:
732 		IN_IFADDR_RLOCK();
733 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
734 			if (ia->ia_ifa.ifa_addr == sa
735 			    && (ia->ia_flags & IFA_ROUTE)) {
736 				ifa_ref(&ia->ia_ifa);
737 				IN_IFADDR_RUNLOCK();
738 				/*
739 				 * in_scrubprefix() kills the interface route.
740 				 */
741 				in_scrubprefix(ia, 0);
742 				/*
743 				 * in_ifadown gets rid of all the rest of the
744 				 * routes.  This is not quite the right thing
745 				 * to do, but at least if we are running a
746 				 * routing process they will come back.
747 				 */
748 				in_ifadown(&ia->ia_ifa, 0);
749 				ifa_free(&ia->ia_ifa);
750 				break;
751 			}
752 		}
753 		if (ia == NULL)		/* If ia matched, already unlocked. */
754 			IN_IFADDR_RUNLOCK();
755 		break;
756 
757 	case PRC_IFUP:
758 		IN_IFADDR_RLOCK();
759 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
760 			if (ia->ia_ifa.ifa_addr == sa)
761 				break;
762 		}
763 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
764 			IN_IFADDR_RUNLOCK();
765 			return;
766 		}
767 		ifa_ref(&ia->ia_ifa);
768 		IN_IFADDR_RUNLOCK();
769 		flags = RTF_UP;
770 		ifp = ia->ia_ifa.ifa_ifp;
771 
772 		if ((ifp->if_flags & IFF_LOOPBACK)
773 		    || (ifp->if_flags & IFF_POINTOPOINT))
774 			flags |= RTF_HOST;
775 
776 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
777 
778 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
779 		if (err == 0)
780 			ia->ia_flags |= IFA_ROUTE;
781 
782 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
783 
784 		ifa_free(&ia->ia_ifa);
785 		break;
786 	}
787 }
788 
789 static int
790 rip_attach(struct socket *so, int proto, struct thread *td)
791 {
792 	struct inpcb *inp;
793 	int error;
794 
795 	inp = sotoinpcb(so);
796 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
797 
798 	error = priv_check(td, PRIV_NETINET_RAW);
799 	if (error)
800 		return (error);
801 	if (proto >= IPPROTO_MAX || proto < 0)
802 		return EPROTONOSUPPORT;
803 	error = soreserve(so, rip_sendspace, rip_recvspace);
804 	if (error)
805 		return (error);
806 	INP_INFO_WLOCK(&V_ripcbinfo);
807 	error = in_pcballoc(so, &V_ripcbinfo);
808 	if (error) {
809 		INP_INFO_WUNLOCK(&V_ripcbinfo);
810 		return (error);
811 	}
812 	inp = (struct inpcb *)so->so_pcb;
813 	inp->inp_vflag |= INP_IPV4;
814 	inp->inp_ip_p = proto;
815 	inp->inp_ip_ttl = V_ip_defttl;
816 	rip_inshash(inp);
817 	INP_INFO_WUNLOCK(&V_ripcbinfo);
818 	INP_WUNLOCK(inp);
819 	return (0);
820 }
821 
822 static void
823 rip_detach(struct socket *so)
824 {
825 	struct inpcb *inp;
826 
827 	inp = sotoinpcb(so);
828 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
829 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
830 	    ("rip_detach: not closed"));
831 
832 	INP_INFO_WLOCK(&V_ripcbinfo);
833 	INP_WLOCK(inp);
834 	rip_delhash(inp);
835 	if (so == V_ip_mrouter && ip_mrouter_done)
836 		ip_mrouter_done();
837 	if (ip_rsvp_force_done)
838 		ip_rsvp_force_done(so);
839 	if (so == V_ip_rsvpd)
840 		ip_rsvp_done();
841 	in_pcbdetach(inp);
842 	in_pcbfree(inp);
843 	INP_INFO_WUNLOCK(&V_ripcbinfo);
844 }
845 
846 static void
847 rip_dodisconnect(struct socket *so, struct inpcb *inp)
848 {
849 	struct inpcbinfo *pcbinfo;
850 
851 	pcbinfo = inp->inp_pcbinfo;
852 	INP_INFO_WLOCK(pcbinfo);
853 	INP_WLOCK(inp);
854 	rip_delhash(inp);
855 	inp->inp_faddr.s_addr = INADDR_ANY;
856 	rip_inshash(inp);
857 	SOCK_LOCK(so);
858 	so->so_state &= ~SS_ISCONNECTED;
859 	SOCK_UNLOCK(so);
860 	INP_WUNLOCK(inp);
861 	INP_INFO_WUNLOCK(pcbinfo);
862 }
863 
864 static void
865 rip_abort(struct socket *so)
866 {
867 	struct inpcb *inp;
868 
869 	inp = sotoinpcb(so);
870 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
871 
872 	rip_dodisconnect(so, inp);
873 }
874 
875 static void
876 rip_close(struct socket *so)
877 {
878 	struct inpcb *inp;
879 
880 	inp = sotoinpcb(so);
881 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
882 
883 	rip_dodisconnect(so, inp);
884 }
885 
886 static int
887 rip_disconnect(struct socket *so)
888 {
889 	struct inpcb *inp;
890 
891 	if ((so->so_state & SS_ISCONNECTED) == 0)
892 		return (ENOTCONN);
893 
894 	inp = sotoinpcb(so);
895 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
896 
897 	rip_dodisconnect(so, inp);
898 	return (0);
899 }
900 
901 static int
902 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
903 {
904 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
905 	struct inpcb *inp;
906 	int error;
907 
908 	if (nam->sa_len != sizeof(*addr))
909 		return (EINVAL);
910 
911 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
912 	if (error != 0)
913 		return (error);
914 
915 	inp = sotoinpcb(so);
916 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
917 
918 	if (TAILQ_EMPTY(&V_ifnet) ||
919 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
920 	    (addr->sin_addr.s_addr &&
921 	     (inp->inp_flags & INP_BINDANY) == 0 &&
922 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
923 		return (EADDRNOTAVAIL);
924 
925 	INP_INFO_WLOCK(&V_ripcbinfo);
926 	INP_WLOCK(inp);
927 	rip_delhash(inp);
928 	inp->inp_laddr = addr->sin_addr;
929 	rip_inshash(inp);
930 	INP_WUNLOCK(inp);
931 	INP_INFO_WUNLOCK(&V_ripcbinfo);
932 	return (0);
933 }
934 
935 static int
936 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
937 {
938 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
939 	struct inpcb *inp;
940 
941 	if (nam->sa_len != sizeof(*addr))
942 		return (EINVAL);
943 	if (TAILQ_EMPTY(&V_ifnet))
944 		return (EADDRNOTAVAIL);
945 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
946 		return (EAFNOSUPPORT);
947 
948 	inp = sotoinpcb(so);
949 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
950 
951 	INP_INFO_WLOCK(&V_ripcbinfo);
952 	INP_WLOCK(inp);
953 	rip_delhash(inp);
954 	inp->inp_faddr = addr->sin_addr;
955 	rip_inshash(inp);
956 	soisconnected(so);
957 	INP_WUNLOCK(inp);
958 	INP_INFO_WUNLOCK(&V_ripcbinfo);
959 	return (0);
960 }
961 
962 static int
963 rip_shutdown(struct socket *so)
964 {
965 	struct inpcb *inp;
966 
967 	inp = sotoinpcb(so);
968 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
969 
970 	INP_WLOCK(inp);
971 	socantsendmore(so);
972 	INP_WUNLOCK(inp);
973 	return (0);
974 }
975 
976 static int
977 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
978     struct mbuf *control, struct thread *td)
979 {
980 	struct inpcb *inp;
981 	u_long dst;
982 
983 	inp = sotoinpcb(so);
984 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
985 
986 	/*
987 	 * Note: 'dst' reads below are unlocked.
988 	 */
989 	if (so->so_state & SS_ISCONNECTED) {
990 		if (nam) {
991 			m_freem(m);
992 			return (EISCONN);
993 		}
994 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
995 	} else {
996 		if (nam == NULL) {
997 			m_freem(m);
998 			return (ENOTCONN);
999 		}
1000 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1001 	}
1002 	return (rip_output(m, so, dst));
1003 }
1004 #endif /* INET */
1005 
1006 static int
1007 rip_pcblist(SYSCTL_HANDLER_ARGS)
1008 {
1009 	int error, i, n;
1010 	struct inpcb *inp, **inp_list;
1011 	inp_gen_t gencnt;
1012 	struct xinpgen xig;
1013 
1014 	/*
1015 	 * The process of preparing the TCB list is too time-consuming and
1016 	 * resource-intensive to repeat twice on every request.
1017 	 */
1018 	if (req->oldptr == 0) {
1019 		n = V_ripcbinfo.ipi_count;
1020 		n += imax(n / 8, 10);
1021 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1022 		return (0);
1023 	}
1024 
1025 	if (req->newptr != 0)
1026 		return (EPERM);
1027 
1028 	/*
1029 	 * OK, now we're committed to doing something.
1030 	 */
1031 	INP_INFO_RLOCK(&V_ripcbinfo);
1032 	gencnt = V_ripcbinfo.ipi_gencnt;
1033 	n = V_ripcbinfo.ipi_count;
1034 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1035 
1036 	xig.xig_len = sizeof xig;
1037 	xig.xig_count = n;
1038 	xig.xig_gen = gencnt;
1039 	xig.xig_sogen = so_gencnt;
1040 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1041 	if (error)
1042 		return (error);
1043 
1044 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1045 	if (inp_list == 0)
1046 		return (ENOMEM);
1047 
1048 	INP_INFO_RLOCK(&V_ripcbinfo);
1049 	for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1050 	     inp = LIST_NEXT(inp, inp_list)) {
1051 		INP_WLOCK(inp);
1052 		if (inp->inp_gencnt <= gencnt &&
1053 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1054 			in_pcbref(inp);
1055 			inp_list[i++] = inp;
1056 		}
1057 		INP_WUNLOCK(inp);
1058 	}
1059 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1060 	n = i;
1061 
1062 	error = 0;
1063 	for (i = 0; i < n; i++) {
1064 		inp = inp_list[i];
1065 		INP_RLOCK(inp);
1066 		if (inp->inp_gencnt <= gencnt) {
1067 			struct xinpcb xi;
1068 
1069 			bzero(&xi, sizeof(xi));
1070 			xi.xi_len = sizeof xi;
1071 			/* XXX should avoid extra copy */
1072 			bcopy(inp, &xi.xi_inp, sizeof *inp);
1073 			if (inp->inp_socket)
1074 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
1075 			INP_RUNLOCK(inp);
1076 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1077 		} else
1078 			INP_RUNLOCK(inp);
1079 	}
1080 	INP_INFO_WLOCK(&V_ripcbinfo);
1081 	for (i = 0; i < n; i++) {
1082 		inp = inp_list[i];
1083 		INP_RLOCK(inp);
1084 		if (!in_pcbrele_rlocked(inp))
1085 			INP_RUNLOCK(inp);
1086 	}
1087 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1088 
1089 	if (!error) {
1090 		/*
1091 		 * Give the user an updated idea of our state.  If the
1092 		 * generation differs from what we told her before, she knows
1093 		 * that something happened while we were processing this
1094 		 * request, and it might be necessary to retry.
1095 		 */
1096 		INP_INFO_RLOCK(&V_ripcbinfo);
1097 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1098 		xig.xig_sogen = so_gencnt;
1099 		xig.xig_count = V_ripcbinfo.ipi_count;
1100 		INP_INFO_RUNLOCK(&V_ripcbinfo);
1101 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1102 	}
1103 	free(inp_list, M_TEMP);
1104 	return (error);
1105 }
1106 
1107 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1108     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1109     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1110 
1111 #ifdef INET
1112 struct pr_usrreqs rip_usrreqs = {
1113 	.pru_abort =		rip_abort,
1114 	.pru_attach =		rip_attach,
1115 	.pru_bind =		rip_bind,
1116 	.pru_connect =		rip_connect,
1117 	.pru_control =		in_control,
1118 	.pru_detach =		rip_detach,
1119 	.pru_disconnect =	rip_disconnect,
1120 	.pru_peeraddr =		in_getpeeraddr,
1121 	.pru_send =		rip_send,
1122 	.pru_shutdown =		rip_shutdown,
1123 	.pru_sockaddr =		in_getsockaddr,
1124 	.pru_sosetlabel =	in_pcbsosetlabel,
1125 	.pru_close =		rip_close,
1126 };
1127 #endif /* INET */
1128