xref: /freebsd/sys/netinet/raw_ip.c (revision 7750ad47a9a7dbc83f87158464170c8640723293)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 4. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 
40 #include <sys/param.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/protosw.h>
49 #include <sys/rwlock.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sx.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 
57 #include <vm/uma.h>
58 
59 #include <net/if.h>
60 #include <net/route.h>
61 #include <net/vnet.h>
62 
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_pcb.h>
66 #include <netinet/in_var.h>
67 #include <netinet/if_ether.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip_var.h>
70 #include <netinet/ip_mroute.h>
71 
72 #ifdef IPSEC
73 #include <netipsec/ipsec.h>
74 #endif /*IPSEC*/
75 
76 #include <security/mac/mac_framework.h>
77 
78 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
79 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
80     &VNET_NAME(ip_defttl), 0,
81     "Maximum TTL on IP packets");
82 
83 VNET_DEFINE(struct inpcbhead, ripcb);
84 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
85 
86 #define	V_ripcb			VNET(ripcb)
87 #define	V_ripcbinfo		VNET(ripcbinfo)
88 
89 /*
90  * Control and data hooks for ipfw, dummynet, divert and so on.
91  * The data hooks are not used here but it is convenient
92  * to keep them all in one place.
93  */
94 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
95 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
96 
97 int	(*ip_dn_ctl_ptr)(struct sockopt *);
98 int	(*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
99 void	(*ip_divert_ptr)(struct mbuf *, int);
100 int	(*ng_ipfw_input_p)(struct mbuf **, int,
101 			struct ip_fw_args *, int);
102 
103 /* Hook for telling pf that the destination address changed */
104 void	(*m_addr_chg_pf_p)(struct mbuf *m);
105 
106 #ifdef INET
107 /*
108  * Hooks for multicast routing. They all default to NULL, so leave them not
109  * initialized and rely on BSS being set to 0.
110  */
111 
112 /*
113  * The socket used to communicate with the multicast routing daemon.
114  */
115 VNET_DEFINE(struct socket *, ip_mrouter);
116 
117 /*
118  * The various mrouter and rsvp functions.
119  */
120 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
121 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
122 int (*ip_mrouter_done)(void);
123 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
124 		   struct ip_moptions *);
125 int (*mrt_ioctl)(u_long, caddr_t, int);
126 int (*legal_vif_num)(int);
127 u_long (*ip_mcast_src)(int);
128 
129 void (*rsvp_input_p)(struct mbuf *m, int off);
130 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
131 void (*ip_rsvp_force_done)(struct socket *);
132 #endif /* INET */
133 
134 u_long	rip_sendspace = 9216;
135 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
136     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
137 
138 u_long	rip_recvspace = 9216;
139 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
140     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
141 
142 /*
143  * Hash functions
144  */
145 
146 #define INP_PCBHASH_RAW_SIZE	256
147 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
148         (((proto) + (laddr) + (faddr)) % (mask) + 1)
149 
150 #ifdef INET
151 static void
152 rip_inshash(struct inpcb *inp)
153 {
154 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
155 	struct inpcbhead *pcbhash;
156 	int hash;
157 
158 	INP_INFO_WLOCK_ASSERT(pcbinfo);
159 	INP_WLOCK_ASSERT(inp);
160 
161 	if (inp->inp_ip_p != 0 &&
162 	    inp->inp_laddr.s_addr != INADDR_ANY &&
163 	    inp->inp_faddr.s_addr != INADDR_ANY) {
164 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
165 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
166 	} else
167 		hash = 0;
168 	pcbhash = &pcbinfo->ipi_hashbase[hash];
169 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
170 }
171 
172 static void
173 rip_delhash(struct inpcb *inp)
174 {
175 
176 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
177 	INP_WLOCK_ASSERT(inp);
178 
179 	LIST_REMOVE(inp, inp_hash);
180 }
181 #endif /* INET */
182 
183 /*
184  * Raw interface to IP protocol.
185  */
186 
187 /*
188  * Initialize raw connection block q.
189  */
190 static void
191 rip_zone_change(void *tag)
192 {
193 
194 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
195 }
196 
197 static int
198 rip_inpcb_init(void *mem, int size, int flags)
199 {
200 	struct inpcb *inp = mem;
201 
202 	INP_LOCK_INIT(inp, "inp", "rawinp");
203 	return (0);
204 }
205 
206 void
207 rip_init(void)
208 {
209 
210 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
211 	    1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE,
212 	    IPI_HASHFIELDS_NONE);
213 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
214 	    EVENTHANDLER_PRI_ANY);
215 }
216 
217 #ifdef VIMAGE
218 void
219 rip_destroy(void)
220 {
221 
222 	in_pcbinfo_destroy(&V_ripcbinfo);
223 }
224 #endif
225 
226 #ifdef INET
227 static int
228 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
229     struct sockaddr_in *ripsrc)
230 {
231 	int policyfail = 0;
232 
233 	INP_LOCK_ASSERT(last);
234 
235 #ifdef IPSEC
236 	/* check AH/ESP integrity. */
237 	if (ipsec4_in_reject(n, last)) {
238 		policyfail = 1;
239 	}
240 #endif /* IPSEC */
241 #ifdef MAC
242 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
243 		policyfail = 1;
244 #endif
245 	/* Check the minimum TTL for socket. */
246 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
247 		policyfail = 1;
248 	if (!policyfail) {
249 		struct mbuf *opts = NULL;
250 		struct socket *so;
251 
252 		so = last->inp_socket;
253 		if ((last->inp_flags & INP_CONTROLOPTS) ||
254 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
255 			ip_savecontrol(last, &opts, ip, n);
256 		SOCKBUF_LOCK(&so->so_rcv);
257 		if (sbappendaddr_locked(&so->so_rcv,
258 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
259 			/* should notify about lost packet */
260 			m_freem(n);
261 			if (opts)
262 				m_freem(opts);
263 			SOCKBUF_UNLOCK(&so->so_rcv);
264 		} else
265 			sorwakeup_locked(so);
266 	} else
267 		m_freem(n);
268 	return (policyfail);
269 }
270 
271 /*
272  * Setup generic address and protocol structures for raw_input routine, then
273  * pass them along with mbuf chain.
274  */
275 void
276 rip_input(struct mbuf *m, int off)
277 {
278 	struct ifnet *ifp;
279 	struct ip *ip = mtod(m, struct ip *);
280 	int proto = ip->ip_p;
281 	struct inpcb *inp, *last;
282 	struct sockaddr_in ripsrc;
283 	int hash;
284 
285 	bzero(&ripsrc, sizeof(ripsrc));
286 	ripsrc.sin_len = sizeof(ripsrc);
287 	ripsrc.sin_family = AF_INET;
288 	ripsrc.sin_addr = ip->ip_src;
289 	last = NULL;
290 
291 	ifp = m->m_pkthdr.rcvif;
292 	/*
293 	 * Add back the IP header length which was
294 	 * removed by ip_input().  Raw sockets do
295 	 * not modify the packet except for some
296 	 * byte order swaps.
297 	 */
298 	ip->ip_len += off;
299 
300 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
301 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
302 	INP_INFO_RLOCK(&V_ripcbinfo);
303 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
304 		if (inp->inp_ip_p != proto)
305 			continue;
306 #ifdef INET6
307 		/* XXX inp locking */
308 		if ((inp->inp_vflag & INP_IPV4) == 0)
309 			continue;
310 #endif
311 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
312 			continue;
313 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
314 			continue;
315 		if (jailed_without_vnet(inp->inp_cred)) {
316 			/*
317 			 * XXX: If faddr was bound to multicast group,
318 			 * jailed raw socket will drop datagram.
319 			 */
320 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
321 				continue;
322 		}
323 		if (last != NULL) {
324 			struct mbuf *n;
325 
326 			n = m_copy(m, 0, (int)M_COPYALL);
327 			if (n != NULL)
328 		    	    (void) rip_append(last, ip, n, &ripsrc);
329 			/* XXX count dropped packet */
330 			INP_RUNLOCK(last);
331 		}
332 		INP_RLOCK(inp);
333 		last = inp;
334 	}
335 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
336 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
337 			continue;
338 #ifdef INET6
339 		/* XXX inp locking */
340 		if ((inp->inp_vflag & INP_IPV4) == 0)
341 			continue;
342 #endif
343 		if (!in_nullhost(inp->inp_laddr) &&
344 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
345 			continue;
346 		if (!in_nullhost(inp->inp_faddr) &&
347 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
348 			continue;
349 		if (jailed_without_vnet(inp->inp_cred)) {
350 			/*
351 			 * Allow raw socket in jail to receive multicast;
352 			 * assume process had PRIV_NETINET_RAW at attach,
353 			 * and fall through into normal filter path if so.
354 			 */
355 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
356 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
357 				continue;
358 		}
359 		/*
360 		 * If this raw socket has multicast state, and we
361 		 * have received a multicast, check if this socket
362 		 * should receive it, as multicast filtering is now
363 		 * the responsibility of the transport layer.
364 		 */
365 		if (inp->inp_moptions != NULL &&
366 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
367 			/*
368 			 * If the incoming datagram is for IGMP, allow it
369 			 * through unconditionally to the raw socket.
370 			 *
371 			 * In the case of IGMPv2, we may not have explicitly
372 			 * joined the group, and may have set IFF_ALLMULTI
373 			 * on the interface. imo_multi_filter() may discard
374 			 * control traffic we actually need to see.
375 			 *
376 			 * Userland multicast routing daemons should continue
377 			 * filter the control traffic appropriately.
378 			 */
379 			int blocked;
380 
381 			blocked = MCAST_PASS;
382 			if (proto != IPPROTO_IGMP) {
383 				struct sockaddr_in group;
384 
385 				bzero(&group, sizeof(struct sockaddr_in));
386 				group.sin_len = sizeof(struct sockaddr_in);
387 				group.sin_family = AF_INET;
388 				group.sin_addr = ip->ip_dst;
389 
390 				blocked = imo_multi_filter(inp->inp_moptions,
391 				    ifp,
392 				    (struct sockaddr *)&group,
393 				    (struct sockaddr *)&ripsrc);
394 			}
395 
396 			if (blocked != MCAST_PASS) {
397 				IPSTAT_INC(ips_notmember);
398 				continue;
399 			}
400 		}
401 		if (last != NULL) {
402 			struct mbuf *n;
403 
404 			n = m_copy(m, 0, (int)M_COPYALL);
405 			if (n != NULL)
406 				(void) rip_append(last, ip, n, &ripsrc);
407 			/* XXX count dropped packet */
408 			INP_RUNLOCK(last);
409 		}
410 		INP_RLOCK(inp);
411 		last = inp;
412 	}
413 	INP_INFO_RUNLOCK(&V_ripcbinfo);
414 	if (last != NULL) {
415 		if (rip_append(last, ip, m, &ripsrc) != 0)
416 			IPSTAT_INC(ips_delivered);
417 		INP_RUNLOCK(last);
418 	} else {
419 		m_freem(m);
420 		IPSTAT_INC(ips_noproto);
421 		IPSTAT_DEC(ips_delivered);
422 	}
423 }
424 
425 /*
426  * Generate IP header and pass packet to ip_output.  Tack on options user may
427  * have setup with control call.
428  */
429 int
430 rip_output(struct mbuf *m, struct socket *so, u_long dst)
431 {
432 	struct ip *ip;
433 	int error;
434 	struct inpcb *inp = sotoinpcb(so);
435 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
436 	    IP_ALLOWBROADCAST;
437 
438 	/*
439 	 * If the user handed us a complete IP packet, use it.  Otherwise,
440 	 * allocate an mbuf for a header and fill it in.
441 	 */
442 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
443 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
444 			m_freem(m);
445 			return(EMSGSIZE);
446 		}
447 		M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
448 		if (m == NULL)
449 			return(ENOBUFS);
450 
451 		INP_RLOCK(inp);
452 		ip = mtod(m, struct ip *);
453 		ip->ip_tos = inp->inp_ip_tos;
454 		if (inp->inp_flags & INP_DONTFRAG)
455 			ip->ip_off = IP_DF;
456 		else
457 			ip->ip_off = 0;
458 		ip->ip_p = inp->inp_ip_p;
459 		ip->ip_len = m->m_pkthdr.len;
460 		ip->ip_src = inp->inp_laddr;
461 		if (jailed(inp->inp_cred)) {
462 			/*
463 			 * prison_local_ip4() would be good enough but would
464 			 * let a source of INADDR_ANY pass, which we do not
465 			 * want to see from jails. We do not go through the
466 			 * pain of in_pcbladdr() for raw sockets.
467 			 */
468 			if (ip->ip_src.s_addr == INADDR_ANY)
469 				error = prison_get_ip4(inp->inp_cred,
470 				    &ip->ip_src);
471 			else
472 				error = prison_local_ip4(inp->inp_cred,
473 				    &ip->ip_src);
474 			if (error != 0) {
475 				INP_RUNLOCK(inp);
476 				m_freem(m);
477 				return (error);
478 			}
479 		}
480 		ip->ip_dst.s_addr = dst;
481 		ip->ip_ttl = inp->inp_ip_ttl;
482 	} else {
483 		if (m->m_pkthdr.len > IP_MAXPACKET) {
484 			m_freem(m);
485 			return(EMSGSIZE);
486 		}
487 		INP_RLOCK(inp);
488 		ip = mtod(m, struct ip *);
489 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
490 		if (error != 0) {
491 			INP_RUNLOCK(inp);
492 			m_freem(m);
493 			return (error);
494 		}
495 
496 		/*
497 		 * Don't allow both user specified and setsockopt options,
498 		 * and don't allow packet length sizes that will crash.
499 		 */
500 		if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
501 		    || (ip->ip_len > m->m_pkthdr.len)
502 		    || (ip->ip_len < (ip->ip_hl << 2))) {
503 			INP_RUNLOCK(inp);
504 			m_freem(m);
505 			return (EINVAL);
506 		}
507 		if (ip->ip_id == 0)
508 			ip->ip_id = ip_newid();
509 
510 		/*
511 		 * XXX prevent ip_output from overwriting header fields.
512 		 */
513 		flags |= IP_RAWOUTPUT;
514 		IPSTAT_INC(ips_rawout);
515 	}
516 
517 	if (inp->inp_flags & INP_ONESBCAST)
518 		flags |= IP_SENDONES;
519 
520 #ifdef MAC
521 	mac_inpcb_create_mbuf(inp, m);
522 #endif
523 
524 	error = ip_output(m, inp->inp_options, NULL, flags,
525 	    inp->inp_moptions, inp);
526 	INP_RUNLOCK(inp);
527 	return (error);
528 }
529 
530 /*
531  * Raw IP socket option processing.
532  *
533  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
534  * only be created by a privileged process, and as such, socket option
535  * operations to manage system properties on any raw socket were allowed to
536  * take place without explicit additional access control checks.  However,
537  * raw sockets can now also be created in jail(), and therefore explicit
538  * checks are now required.  Likewise, raw sockets can be used by a process
539  * after it gives up privilege, so some caution is required.  For options
540  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
541  * performed in ip_ctloutput() and therefore no check occurs here.
542  * Unilaterally checking priv_check() here breaks normal IP socket option
543  * operations on raw sockets.
544  *
545  * When adding new socket options here, make sure to add access control
546  * checks here as necessary.
547  *
548  * XXX-BZ inp locking?
549  */
550 int
551 rip_ctloutput(struct socket *so, struct sockopt *sopt)
552 {
553 	struct	inpcb *inp = sotoinpcb(so);
554 	int	error, optval;
555 
556 	if (sopt->sopt_level != IPPROTO_IP) {
557 		if ((sopt->sopt_level == SOL_SOCKET) &&
558 		    (sopt->sopt_name == SO_SETFIB)) {
559 			inp->inp_inc.inc_fibnum = so->so_fibnum;
560 			return (0);
561 		}
562 		return (EINVAL);
563 	}
564 
565 	error = 0;
566 	switch (sopt->sopt_dir) {
567 	case SOPT_GET:
568 		switch (sopt->sopt_name) {
569 		case IP_HDRINCL:
570 			optval = inp->inp_flags & INP_HDRINCL;
571 			error = sooptcopyout(sopt, &optval, sizeof optval);
572 			break;
573 
574 		case IP_FW3:	/* generic ipfw v.3 functions */
575 		case IP_FW_ADD:	/* ADD actually returns the body... */
576 		case IP_FW_GET:
577 		case IP_FW_TABLE_GETSIZE:
578 		case IP_FW_TABLE_LIST:
579 		case IP_FW_NAT_GET_CONFIG:
580 		case IP_FW_NAT_GET_LOG:
581 			if (V_ip_fw_ctl_ptr != NULL)
582 				error = V_ip_fw_ctl_ptr(sopt);
583 			else
584 				error = ENOPROTOOPT;
585 			break;
586 
587 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
588 		case IP_DUMMYNET_GET:
589 			if (ip_dn_ctl_ptr != NULL)
590 				error = ip_dn_ctl_ptr(sopt);
591 			else
592 				error = ENOPROTOOPT;
593 			break ;
594 
595 		case MRT_INIT:
596 		case MRT_DONE:
597 		case MRT_ADD_VIF:
598 		case MRT_DEL_VIF:
599 		case MRT_ADD_MFC:
600 		case MRT_DEL_MFC:
601 		case MRT_VERSION:
602 		case MRT_ASSERT:
603 		case MRT_API_SUPPORT:
604 		case MRT_API_CONFIG:
605 		case MRT_ADD_BW_UPCALL:
606 		case MRT_DEL_BW_UPCALL:
607 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
608 			if (error != 0)
609 				return (error);
610 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
611 				EOPNOTSUPP;
612 			break;
613 
614 		default:
615 			error = ip_ctloutput(so, sopt);
616 			break;
617 		}
618 		break;
619 
620 	case SOPT_SET:
621 		switch (sopt->sopt_name) {
622 		case IP_HDRINCL:
623 			error = sooptcopyin(sopt, &optval, sizeof optval,
624 					    sizeof optval);
625 			if (error)
626 				break;
627 			if (optval)
628 				inp->inp_flags |= INP_HDRINCL;
629 			else
630 				inp->inp_flags &= ~INP_HDRINCL;
631 			break;
632 
633 		case IP_FW3:	/* generic ipfw v.3 functions */
634 		case IP_FW_ADD:
635 		case IP_FW_DEL:
636 		case IP_FW_FLUSH:
637 		case IP_FW_ZERO:
638 		case IP_FW_RESETLOG:
639 		case IP_FW_TABLE_ADD:
640 		case IP_FW_TABLE_DEL:
641 		case IP_FW_TABLE_FLUSH:
642 		case IP_FW_NAT_CFG:
643 		case IP_FW_NAT_DEL:
644 			if (V_ip_fw_ctl_ptr != NULL)
645 				error = V_ip_fw_ctl_ptr(sopt);
646 			else
647 				error = ENOPROTOOPT;
648 			break;
649 
650 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
651 		case IP_DUMMYNET_CONFIGURE:
652 		case IP_DUMMYNET_DEL:
653 		case IP_DUMMYNET_FLUSH:
654 			if (ip_dn_ctl_ptr != NULL)
655 				error = ip_dn_ctl_ptr(sopt);
656 			else
657 				error = ENOPROTOOPT ;
658 			break ;
659 
660 		case IP_RSVP_ON:
661 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
662 			if (error != 0)
663 				return (error);
664 			error = ip_rsvp_init(so);
665 			break;
666 
667 		case IP_RSVP_OFF:
668 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
669 			if (error != 0)
670 				return (error);
671 			error = ip_rsvp_done();
672 			break;
673 
674 		case IP_RSVP_VIF_ON:
675 		case IP_RSVP_VIF_OFF:
676 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
677 			if (error != 0)
678 				return (error);
679 			error = ip_rsvp_vif ?
680 				ip_rsvp_vif(so, sopt) : EINVAL;
681 			break;
682 
683 		case MRT_INIT:
684 		case MRT_DONE:
685 		case MRT_ADD_VIF:
686 		case MRT_DEL_VIF:
687 		case MRT_ADD_MFC:
688 		case MRT_DEL_MFC:
689 		case MRT_VERSION:
690 		case MRT_ASSERT:
691 		case MRT_API_SUPPORT:
692 		case MRT_API_CONFIG:
693 		case MRT_ADD_BW_UPCALL:
694 		case MRT_DEL_BW_UPCALL:
695 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
696 			if (error != 0)
697 				return (error);
698 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
699 					EOPNOTSUPP;
700 			break;
701 
702 		default:
703 			error = ip_ctloutput(so, sopt);
704 			break;
705 		}
706 		break;
707 	}
708 
709 	return (error);
710 }
711 
712 /*
713  * This function exists solely to receive the PRC_IFDOWN messages which are
714  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
715  * in_ifadown() to remove all routes corresponding to that address.  It also
716  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
717  * routes.
718  */
719 void
720 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
721 {
722 	struct in_ifaddr *ia;
723 	struct ifnet *ifp;
724 	int err;
725 	int flags;
726 
727 	switch (cmd) {
728 	case PRC_IFDOWN:
729 		IN_IFADDR_RLOCK();
730 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
731 			if (ia->ia_ifa.ifa_addr == sa
732 			    && (ia->ia_flags & IFA_ROUTE)) {
733 				ifa_ref(&ia->ia_ifa);
734 				IN_IFADDR_RUNLOCK();
735 				/*
736 				 * in_ifscrub kills the interface route.
737 				 */
738 				in_ifscrub(ia->ia_ifp, ia, 0);
739 				/*
740 				 * in_ifadown gets rid of all the rest of the
741 				 * routes.  This is not quite the right thing
742 				 * to do, but at least if we are running a
743 				 * routing process they will come back.
744 				 */
745 				in_ifadown(&ia->ia_ifa, 0);
746 				ifa_free(&ia->ia_ifa);
747 				break;
748 			}
749 		}
750 		if (ia == NULL)		/* If ia matched, already unlocked. */
751 			IN_IFADDR_RUNLOCK();
752 		break;
753 
754 	case PRC_IFUP:
755 		IN_IFADDR_RLOCK();
756 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
757 			if (ia->ia_ifa.ifa_addr == sa)
758 				break;
759 		}
760 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
761 			IN_IFADDR_RUNLOCK();
762 			return;
763 		}
764 		ifa_ref(&ia->ia_ifa);
765 		IN_IFADDR_RUNLOCK();
766 		flags = RTF_UP;
767 		ifp = ia->ia_ifa.ifa_ifp;
768 
769 		if ((ifp->if_flags & IFF_LOOPBACK)
770 		    || (ifp->if_flags & IFF_POINTOPOINT))
771 			flags |= RTF_HOST;
772 
773 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
774 		if (err == 0)
775 			ia->ia_flags &= ~IFA_RTSELF;
776 
777 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
778 		if (err == 0)
779 			ia->ia_flags |= IFA_ROUTE;
780 
781 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
782 		if (err == 0)
783 			ia->ia_flags |= IFA_RTSELF;
784 
785 		ifa_free(&ia->ia_ifa);
786 		break;
787 	}
788 }
789 
790 static int
791 rip_attach(struct socket *so, int proto, struct thread *td)
792 {
793 	struct inpcb *inp;
794 	int error;
795 
796 	inp = sotoinpcb(so);
797 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
798 
799 	error = priv_check(td, PRIV_NETINET_RAW);
800 	if (error)
801 		return (error);
802 	if (proto >= IPPROTO_MAX || proto < 0)
803 		return EPROTONOSUPPORT;
804 	error = soreserve(so, rip_sendspace, rip_recvspace);
805 	if (error)
806 		return (error);
807 	INP_INFO_WLOCK(&V_ripcbinfo);
808 	error = in_pcballoc(so, &V_ripcbinfo);
809 	if (error) {
810 		INP_INFO_WUNLOCK(&V_ripcbinfo);
811 		return (error);
812 	}
813 	inp = (struct inpcb *)so->so_pcb;
814 	inp->inp_vflag |= INP_IPV4;
815 	inp->inp_ip_p = proto;
816 	inp->inp_ip_ttl = V_ip_defttl;
817 	rip_inshash(inp);
818 	INP_INFO_WUNLOCK(&V_ripcbinfo);
819 	INP_WUNLOCK(inp);
820 	return (0);
821 }
822 
823 static void
824 rip_detach(struct socket *so)
825 {
826 	struct inpcb *inp;
827 
828 	inp = sotoinpcb(so);
829 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
830 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
831 	    ("rip_detach: not closed"));
832 
833 	INP_INFO_WLOCK(&V_ripcbinfo);
834 	INP_WLOCK(inp);
835 	rip_delhash(inp);
836 	if (so == V_ip_mrouter && ip_mrouter_done)
837 		ip_mrouter_done();
838 	if (ip_rsvp_force_done)
839 		ip_rsvp_force_done(so);
840 	if (so == V_ip_rsvpd)
841 		ip_rsvp_done();
842 	in_pcbdetach(inp);
843 	in_pcbfree(inp);
844 	INP_INFO_WUNLOCK(&V_ripcbinfo);
845 }
846 
847 static void
848 rip_dodisconnect(struct socket *so, struct inpcb *inp)
849 {
850 	struct inpcbinfo *pcbinfo;
851 
852 	pcbinfo = inp->inp_pcbinfo;
853 	INP_INFO_WLOCK(pcbinfo);
854 	INP_WLOCK(inp);
855 	rip_delhash(inp);
856 	inp->inp_faddr.s_addr = INADDR_ANY;
857 	rip_inshash(inp);
858 	SOCK_LOCK(so);
859 	so->so_state &= ~SS_ISCONNECTED;
860 	SOCK_UNLOCK(so);
861 	INP_WUNLOCK(inp);
862 	INP_INFO_WUNLOCK(pcbinfo);
863 }
864 
865 static void
866 rip_abort(struct socket *so)
867 {
868 	struct inpcb *inp;
869 
870 	inp = sotoinpcb(so);
871 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
872 
873 	rip_dodisconnect(so, inp);
874 }
875 
876 static void
877 rip_close(struct socket *so)
878 {
879 	struct inpcb *inp;
880 
881 	inp = sotoinpcb(so);
882 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
883 
884 	rip_dodisconnect(so, inp);
885 }
886 
887 static int
888 rip_disconnect(struct socket *so)
889 {
890 	struct inpcb *inp;
891 
892 	if ((so->so_state & SS_ISCONNECTED) == 0)
893 		return (ENOTCONN);
894 
895 	inp = sotoinpcb(so);
896 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
897 
898 	rip_dodisconnect(so, inp);
899 	return (0);
900 }
901 
902 static int
903 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
904 {
905 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
906 	struct inpcb *inp;
907 	int error;
908 
909 	if (nam->sa_len != sizeof(*addr))
910 		return (EINVAL);
911 
912 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
913 	if (error != 0)
914 		return (error);
915 
916 	inp = sotoinpcb(so);
917 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
918 
919 	if (TAILQ_EMPTY(&V_ifnet) ||
920 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
921 	    (addr->sin_addr.s_addr &&
922 	     (inp->inp_flags & INP_BINDANY) == 0 &&
923 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
924 		return (EADDRNOTAVAIL);
925 
926 	INP_INFO_WLOCK(&V_ripcbinfo);
927 	INP_WLOCK(inp);
928 	rip_delhash(inp);
929 	inp->inp_laddr = addr->sin_addr;
930 	rip_inshash(inp);
931 	INP_WUNLOCK(inp);
932 	INP_INFO_WUNLOCK(&V_ripcbinfo);
933 	return (0);
934 }
935 
936 static int
937 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
938 {
939 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
940 	struct inpcb *inp;
941 
942 	if (nam->sa_len != sizeof(*addr))
943 		return (EINVAL);
944 	if (TAILQ_EMPTY(&V_ifnet))
945 		return (EADDRNOTAVAIL);
946 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
947 		return (EAFNOSUPPORT);
948 
949 	inp = sotoinpcb(so);
950 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
951 
952 	INP_INFO_WLOCK(&V_ripcbinfo);
953 	INP_WLOCK(inp);
954 	rip_delhash(inp);
955 	inp->inp_faddr = addr->sin_addr;
956 	rip_inshash(inp);
957 	soisconnected(so);
958 	INP_WUNLOCK(inp);
959 	INP_INFO_WUNLOCK(&V_ripcbinfo);
960 	return (0);
961 }
962 
963 static int
964 rip_shutdown(struct socket *so)
965 {
966 	struct inpcb *inp;
967 
968 	inp = sotoinpcb(so);
969 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
970 
971 	INP_WLOCK(inp);
972 	socantsendmore(so);
973 	INP_WUNLOCK(inp);
974 	return (0);
975 }
976 
977 static int
978 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
979     struct mbuf *control, struct thread *td)
980 {
981 	struct inpcb *inp;
982 	u_long dst;
983 
984 	inp = sotoinpcb(so);
985 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
986 
987 	/*
988 	 * Note: 'dst' reads below are unlocked.
989 	 */
990 	if (so->so_state & SS_ISCONNECTED) {
991 		if (nam) {
992 			m_freem(m);
993 			return (EISCONN);
994 		}
995 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
996 	} else {
997 		if (nam == NULL) {
998 			m_freem(m);
999 			return (ENOTCONN);
1000 		}
1001 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1002 	}
1003 	return (rip_output(m, so, dst));
1004 }
1005 #endif /* INET */
1006 
1007 static int
1008 rip_pcblist(SYSCTL_HANDLER_ARGS)
1009 {
1010 	int error, i, n;
1011 	struct inpcb *inp, **inp_list;
1012 	inp_gen_t gencnt;
1013 	struct xinpgen xig;
1014 
1015 	/*
1016 	 * The process of preparing the TCB list is too time-consuming and
1017 	 * resource-intensive to repeat twice on every request.
1018 	 */
1019 	if (req->oldptr == 0) {
1020 		n = V_ripcbinfo.ipi_count;
1021 		n += imax(n / 8, 10);
1022 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1023 		return (0);
1024 	}
1025 
1026 	if (req->newptr != 0)
1027 		return (EPERM);
1028 
1029 	/*
1030 	 * OK, now we're committed to doing something.
1031 	 */
1032 	INP_INFO_RLOCK(&V_ripcbinfo);
1033 	gencnt = V_ripcbinfo.ipi_gencnt;
1034 	n = V_ripcbinfo.ipi_count;
1035 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1036 
1037 	xig.xig_len = sizeof xig;
1038 	xig.xig_count = n;
1039 	xig.xig_gen = gencnt;
1040 	xig.xig_sogen = so_gencnt;
1041 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1042 	if (error)
1043 		return (error);
1044 
1045 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1046 	if (inp_list == 0)
1047 		return (ENOMEM);
1048 
1049 	INP_INFO_RLOCK(&V_ripcbinfo);
1050 	for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1051 	     inp = LIST_NEXT(inp, inp_list)) {
1052 		INP_WLOCK(inp);
1053 		if (inp->inp_gencnt <= gencnt &&
1054 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1055 			in_pcbref(inp);
1056 			inp_list[i++] = inp;
1057 		}
1058 		INP_WUNLOCK(inp);
1059 	}
1060 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1061 	n = i;
1062 
1063 	error = 0;
1064 	for (i = 0; i < n; i++) {
1065 		inp = inp_list[i];
1066 		INP_RLOCK(inp);
1067 		if (inp->inp_gencnt <= gencnt) {
1068 			struct xinpcb xi;
1069 
1070 			bzero(&xi, sizeof(xi));
1071 			xi.xi_len = sizeof xi;
1072 			/* XXX should avoid extra copy */
1073 			bcopy(inp, &xi.xi_inp, sizeof *inp);
1074 			if (inp->inp_socket)
1075 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
1076 			INP_RUNLOCK(inp);
1077 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1078 		} else
1079 			INP_RUNLOCK(inp);
1080 	}
1081 	INP_INFO_WLOCK(&V_ripcbinfo);
1082 	for (i = 0; i < n; i++) {
1083 		inp = inp_list[i];
1084 		INP_RLOCK(inp);
1085 		if (!in_pcbrele_rlocked(inp))
1086 			INP_RUNLOCK(inp);
1087 	}
1088 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1089 
1090 	if (!error) {
1091 		/*
1092 		 * Give the user an updated idea of our state.  If the
1093 		 * generation differs from what we told her before, she knows
1094 		 * that something happened while we were processing this
1095 		 * request, and it might be necessary to retry.
1096 		 */
1097 		INP_INFO_RLOCK(&V_ripcbinfo);
1098 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1099 		xig.xig_sogen = so_gencnt;
1100 		xig.xig_count = V_ripcbinfo.ipi_count;
1101 		INP_INFO_RUNLOCK(&V_ripcbinfo);
1102 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1103 	}
1104 	free(inp_list, M_TEMP);
1105 	return (error);
1106 }
1107 
1108 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1109     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1110     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1111 
1112 #ifdef INET
1113 struct pr_usrreqs rip_usrreqs = {
1114 	.pru_abort =		rip_abort,
1115 	.pru_attach =		rip_attach,
1116 	.pru_bind =		rip_bind,
1117 	.pru_connect =		rip_connect,
1118 	.pru_control =		in_control,
1119 	.pru_detach =		rip_detach,
1120 	.pru_disconnect =	rip_disconnect,
1121 	.pru_peeraddr =		in_getpeeraddr,
1122 	.pru_send =		rip_send,
1123 	.pru_shutdown =		rip_shutdown,
1124 	.pru_sockaddr =		in_getsockaddr,
1125 	.pru_sosetlabel =	in_pcbsosetlabel,
1126 	.pru_close =		rip_close,
1127 };
1128 #endif /* INET */
1129