xref: /freebsd/sys/netinet/raw_ip.c (revision 5608fd23c27fa1e8ee595d7b678cbfd35d657fbe)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 4. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_inet.h"
37 #include "opt_inet6.h"
38 #include "opt_ipsec.h"
39 
40 #include <sys/param.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/protosw.h>
49 #include <sys/rwlock.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sx.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 
57 #include <vm/uma.h>
58 
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/route.h>
62 #include <net/vnet.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_var.h>
68 #include <netinet/if_ether.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_mroute.h>
72 
73 #ifdef IPSEC
74 #include <netipsec/ipsec.h>
75 #endif /*IPSEC*/
76 
77 #include <machine/stdarg.h>
78 #include <security/mac/mac_framework.h>
79 
80 VNET_DEFINE(int, ip_defttl) = IPDEFTTL;
81 SYSCTL_VNET_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
82     &VNET_NAME(ip_defttl), 0,
83     "Maximum TTL on IP packets");
84 
85 VNET_DEFINE(struct inpcbhead, ripcb);
86 VNET_DEFINE(struct inpcbinfo, ripcbinfo);
87 
88 #define	V_ripcb			VNET(ripcb)
89 #define	V_ripcbinfo		VNET(ripcbinfo)
90 
91 /*
92  * Control and data hooks for ipfw, dummynet, divert and so on.
93  * The data hooks are not used here but it is convenient
94  * to keep them all in one place.
95  */
96 VNET_DEFINE(ip_fw_chk_ptr_t, ip_fw_chk_ptr) = NULL;
97 VNET_DEFINE(ip_fw_ctl_ptr_t, ip_fw_ctl_ptr) = NULL;
98 
99 int	(*ip_dn_ctl_ptr)(struct sockopt *);
100 int	(*ip_dn_io_ptr)(struct mbuf **, int, struct ip_fw_args *);
101 void	(*ip_divert_ptr)(struct mbuf *, int);
102 int	(*ng_ipfw_input_p)(struct mbuf **, int,
103 			struct ip_fw_args *, int);
104 
105 #ifdef INET
106 /*
107  * Hooks for multicast routing. They all default to NULL, so leave them not
108  * initialized and rely on BSS being set to 0.
109  */
110 
111 /*
112  * The socket used to communicate with the multicast routing daemon.
113  */
114 VNET_DEFINE(struct socket *, ip_mrouter);
115 
116 /*
117  * The various mrouter and rsvp functions.
118  */
119 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
120 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
121 int (*ip_mrouter_done)(void);
122 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
123 		   struct ip_moptions *);
124 int (*mrt_ioctl)(u_long, caddr_t, int);
125 int (*legal_vif_num)(int);
126 u_long (*ip_mcast_src)(int);
127 
128 int (*rsvp_input_p)(struct mbuf **, int *, int);
129 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
130 void (*ip_rsvp_force_done)(struct socket *);
131 #endif /* INET */
132 
133 u_long	rip_sendspace = 9216;
134 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
135     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
136 
137 u_long	rip_recvspace = 9216;
138 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
139     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
140 
141 /*
142  * Hash functions
143  */
144 
145 #define INP_PCBHASH_RAW_SIZE	256
146 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
147         (((proto) + (laddr) + (faddr)) % (mask) + 1)
148 
149 #ifdef INET
150 static void
151 rip_inshash(struct inpcb *inp)
152 {
153 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
154 	struct inpcbhead *pcbhash;
155 	int hash;
156 
157 	INP_INFO_WLOCK_ASSERT(pcbinfo);
158 	INP_WLOCK_ASSERT(inp);
159 
160 	if (inp->inp_ip_p != 0 &&
161 	    inp->inp_laddr.s_addr != INADDR_ANY &&
162 	    inp->inp_faddr.s_addr != INADDR_ANY) {
163 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
164 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
165 	} else
166 		hash = 0;
167 	pcbhash = &pcbinfo->ipi_hashbase[hash];
168 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
169 }
170 
171 static void
172 rip_delhash(struct inpcb *inp)
173 {
174 
175 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
176 	INP_WLOCK_ASSERT(inp);
177 
178 	LIST_REMOVE(inp, inp_hash);
179 }
180 #endif /* INET */
181 
182 /*
183  * Raw interface to IP protocol.
184  */
185 
186 /*
187  * Initialize raw connection block q.
188  */
189 static void
190 rip_zone_change(void *tag)
191 {
192 
193 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
194 }
195 
196 static int
197 rip_inpcb_init(void *mem, int size, int flags)
198 {
199 	struct inpcb *inp = mem;
200 
201 	INP_LOCK_INIT(inp, "inp", "rawinp");
202 	return (0);
203 }
204 
205 void
206 rip_init(void)
207 {
208 
209 	in_pcbinfo_init(&V_ripcbinfo, "rip", &V_ripcb, INP_PCBHASH_RAW_SIZE,
210 	    1, "ripcb", rip_inpcb_init, NULL, UMA_ZONE_NOFREE,
211 	    IPI_HASHFIELDS_NONE);
212 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
213 	    EVENTHANDLER_PRI_ANY);
214 }
215 
216 #ifdef VIMAGE
217 void
218 rip_destroy(void)
219 {
220 
221 	in_pcbinfo_destroy(&V_ripcbinfo);
222 }
223 #endif
224 
225 #ifdef INET
226 static int
227 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
228     struct sockaddr_in *ripsrc)
229 {
230 	int policyfail = 0;
231 
232 	INP_LOCK_ASSERT(last);
233 
234 #ifdef IPSEC
235 	/* check AH/ESP integrity. */
236 	if (ipsec4_in_reject(n, last)) {
237 		policyfail = 1;
238 	}
239 #endif /* IPSEC */
240 #ifdef MAC
241 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
242 		policyfail = 1;
243 #endif
244 	/* Check the minimum TTL for socket. */
245 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
246 		policyfail = 1;
247 	if (!policyfail) {
248 		struct mbuf *opts = NULL;
249 		struct socket *so;
250 
251 		so = last->inp_socket;
252 		if ((last->inp_flags & INP_CONTROLOPTS) ||
253 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
254 			ip_savecontrol(last, &opts, ip, n);
255 		SOCKBUF_LOCK(&so->so_rcv);
256 		if (sbappendaddr_locked(&so->so_rcv,
257 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
258 			/* should notify about lost packet */
259 			m_freem(n);
260 			if (opts)
261 				m_freem(opts);
262 			SOCKBUF_UNLOCK(&so->so_rcv);
263 		} else
264 			sorwakeup_locked(so);
265 	} else
266 		m_freem(n);
267 	return (policyfail);
268 }
269 
270 /*
271  * Setup generic address and protocol structures for raw_input routine, then
272  * pass them along with mbuf chain.
273  */
274 int
275 rip_input(struct mbuf **mp, int *offp, int proto)
276 {
277 	struct ifnet *ifp;
278 	struct mbuf *m = *mp;
279 	struct ip *ip = mtod(m, struct ip *);
280 	struct inpcb *inp, *last;
281 	struct sockaddr_in ripsrc;
282 	int hash;
283 
284 	*mp = NULL;
285 
286 	bzero(&ripsrc, sizeof(ripsrc));
287 	ripsrc.sin_len = sizeof(ripsrc);
288 	ripsrc.sin_family = AF_INET;
289 	ripsrc.sin_addr = ip->ip_src;
290 	last = NULL;
291 
292 	ifp = m->m_pkthdr.rcvif;
293 	/*
294 	 * Applications on raw sockets expect host byte order.
295 	 */
296 	ip->ip_len = ntohs(ip->ip_len);
297 	ip->ip_off = ntohs(ip->ip_off);
298 
299 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
300 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
301 	INP_INFO_RLOCK(&V_ripcbinfo);
302 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
303 		if (inp->inp_ip_p != proto)
304 			continue;
305 #ifdef INET6
306 		/* XXX inp locking */
307 		if ((inp->inp_vflag & INP_IPV4) == 0)
308 			continue;
309 #endif
310 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
311 			continue;
312 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
313 			continue;
314 		if (jailed_without_vnet(inp->inp_cred)) {
315 			/*
316 			 * XXX: If faddr was bound to multicast group,
317 			 * jailed raw socket will drop datagram.
318 			 */
319 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
320 				continue;
321 		}
322 		if (last != NULL) {
323 			struct mbuf *n;
324 
325 			n = m_copy(m, 0, (int)M_COPYALL);
326 			if (n != NULL)
327 		    	    (void) rip_append(last, ip, n, &ripsrc);
328 			/* XXX count dropped packet */
329 			INP_RUNLOCK(last);
330 		}
331 		INP_RLOCK(inp);
332 		last = inp;
333 	}
334 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
335 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
336 			continue;
337 #ifdef INET6
338 		/* XXX inp locking */
339 		if ((inp->inp_vflag & INP_IPV4) == 0)
340 			continue;
341 #endif
342 		if (!in_nullhost(inp->inp_laddr) &&
343 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
344 			continue;
345 		if (!in_nullhost(inp->inp_faddr) &&
346 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
347 			continue;
348 		if (jailed_without_vnet(inp->inp_cred)) {
349 			/*
350 			 * Allow raw socket in jail to receive multicast;
351 			 * assume process had PRIV_NETINET_RAW at attach,
352 			 * and fall through into normal filter path if so.
353 			 */
354 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
355 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
356 				continue;
357 		}
358 		/*
359 		 * If this raw socket has multicast state, and we
360 		 * have received a multicast, check if this socket
361 		 * should receive it, as multicast filtering is now
362 		 * the responsibility of the transport layer.
363 		 */
364 		if (inp->inp_moptions != NULL &&
365 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
366 			/*
367 			 * If the incoming datagram is for IGMP, allow it
368 			 * through unconditionally to the raw socket.
369 			 *
370 			 * In the case of IGMPv2, we may not have explicitly
371 			 * joined the group, and may have set IFF_ALLMULTI
372 			 * on the interface. imo_multi_filter() may discard
373 			 * control traffic we actually need to see.
374 			 *
375 			 * Userland multicast routing daemons should continue
376 			 * filter the control traffic appropriately.
377 			 */
378 			int blocked;
379 
380 			blocked = MCAST_PASS;
381 			if (proto != IPPROTO_IGMP) {
382 				struct sockaddr_in group;
383 
384 				bzero(&group, sizeof(struct sockaddr_in));
385 				group.sin_len = sizeof(struct sockaddr_in);
386 				group.sin_family = AF_INET;
387 				group.sin_addr = ip->ip_dst;
388 
389 				blocked = imo_multi_filter(inp->inp_moptions,
390 				    ifp,
391 				    (struct sockaddr *)&group,
392 				    (struct sockaddr *)&ripsrc);
393 			}
394 
395 			if (blocked != MCAST_PASS) {
396 				IPSTAT_INC(ips_notmember);
397 				continue;
398 			}
399 		}
400 		if (last != NULL) {
401 			struct mbuf *n;
402 
403 			n = m_copy(m, 0, (int)M_COPYALL);
404 			if (n != NULL)
405 				(void) rip_append(last, ip, n, &ripsrc);
406 			/* XXX count dropped packet */
407 			INP_RUNLOCK(last);
408 		}
409 		INP_RLOCK(inp);
410 		last = inp;
411 	}
412 	INP_INFO_RUNLOCK(&V_ripcbinfo);
413 	if (last != NULL) {
414 		if (rip_append(last, ip, m, &ripsrc) != 0)
415 			IPSTAT_INC(ips_delivered);
416 		INP_RUNLOCK(last);
417 	} else {
418 		m_freem(m);
419 		IPSTAT_INC(ips_noproto);
420 		IPSTAT_DEC(ips_delivered);
421 	}
422 	return (IPPROTO_DONE);
423 }
424 
425 /*
426  * Generate IP header and pass packet to ip_output.  Tack on options user may
427  * have setup with control call.
428  */
429 int
430 rip_output(struct mbuf *m, struct socket *so, ...)
431 {
432 	struct ip *ip;
433 	int error;
434 	struct inpcb *inp = sotoinpcb(so);
435 	va_list ap;
436 	u_long dst;
437 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
438 	    IP_ALLOWBROADCAST;
439 
440 	va_start(ap, so);
441 	dst = va_arg(ap, u_long);
442 	va_end(ap);
443 
444 	/*
445 	 * If the user handed us a complete IP packet, use it.  Otherwise,
446 	 * allocate an mbuf for a header and fill it in.
447 	 */
448 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
449 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
450 			m_freem(m);
451 			return(EMSGSIZE);
452 		}
453 		M_PREPEND(m, sizeof(struct ip), M_NOWAIT);
454 		if (m == NULL)
455 			return(ENOBUFS);
456 
457 		INP_RLOCK(inp);
458 		ip = mtod(m, struct ip *);
459 		ip->ip_tos = inp->inp_ip_tos;
460 		if (inp->inp_flags & INP_DONTFRAG)
461 			ip->ip_off = htons(IP_DF);
462 		else
463 			ip->ip_off = htons(0);
464 		ip->ip_p = inp->inp_ip_p;
465 		ip->ip_len = htons(m->m_pkthdr.len);
466 		ip->ip_src = inp->inp_laddr;
467 		ip->ip_dst.s_addr = dst;
468 		if (jailed(inp->inp_cred)) {
469 			/*
470 			 * prison_local_ip4() would be good enough but would
471 			 * let a source of INADDR_ANY pass, which we do not
472 			 * want to see from jails.
473 			 */
474 			if (ip->ip_src.s_addr == INADDR_ANY) {
475 				error = in_pcbladdr(inp, &ip->ip_dst, &ip->ip_src,
476 				    inp->inp_cred);
477 			} else {
478 				error = prison_local_ip4(inp->inp_cred,
479 				    &ip->ip_src);
480 			}
481 			if (error != 0) {
482 				INP_RUNLOCK(inp);
483 				m_freem(m);
484 				return (error);
485 			}
486 		}
487 		ip->ip_ttl = inp->inp_ip_ttl;
488 	} else {
489 		if (m->m_pkthdr.len > IP_MAXPACKET) {
490 			m_freem(m);
491 			return(EMSGSIZE);
492 		}
493 		INP_RLOCK(inp);
494 		ip = mtod(m, struct ip *);
495 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
496 		if (error != 0) {
497 			INP_RUNLOCK(inp);
498 			m_freem(m);
499 			return (error);
500 		}
501 
502 		/*
503 		 * Don't allow both user specified and setsockopt options,
504 		 * and don't allow packet length sizes that will crash.
505 		 */
506 		if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
507 		    || (ip->ip_len > m->m_pkthdr.len)
508 		    || (ip->ip_len < (ip->ip_hl << 2))) {
509 			INP_RUNLOCK(inp);
510 			m_freem(m);
511 			return (EINVAL);
512 		}
513 		if (ip->ip_id == 0)
514 			ip->ip_id = ip_newid();
515 
516 		/*
517 		 * Applications on raw sockets pass us packets
518 		 * in host byte order.
519 		 */
520 		ip->ip_len = htons(ip->ip_len);
521 		ip->ip_off = htons(ip->ip_off);
522 
523 		/*
524 		 * XXX prevent ip_output from overwriting header fields.
525 		 */
526 		flags |= IP_RAWOUTPUT;
527 		IPSTAT_INC(ips_rawout);
528 	}
529 
530 	if (inp->inp_flags & INP_ONESBCAST)
531 		flags |= IP_SENDONES;
532 
533 #ifdef MAC
534 	mac_inpcb_create_mbuf(inp, m);
535 #endif
536 
537 	error = ip_output(m, inp->inp_options, NULL, flags,
538 	    inp->inp_moptions, inp);
539 	INP_RUNLOCK(inp);
540 	return (error);
541 }
542 
543 /*
544  * Raw IP socket option processing.
545  *
546  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
547  * only be created by a privileged process, and as such, socket option
548  * operations to manage system properties on any raw socket were allowed to
549  * take place without explicit additional access control checks.  However,
550  * raw sockets can now also be created in jail(), and therefore explicit
551  * checks are now required.  Likewise, raw sockets can be used by a process
552  * after it gives up privilege, so some caution is required.  For options
553  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
554  * performed in ip_ctloutput() and therefore no check occurs here.
555  * Unilaterally checking priv_check() here breaks normal IP socket option
556  * operations on raw sockets.
557  *
558  * When adding new socket options here, make sure to add access control
559  * checks here as necessary.
560  *
561  * XXX-BZ inp locking?
562  */
563 int
564 rip_ctloutput(struct socket *so, struct sockopt *sopt)
565 {
566 	struct	inpcb *inp = sotoinpcb(so);
567 	int	error, optval;
568 
569 	if (sopt->sopt_level != IPPROTO_IP) {
570 		if ((sopt->sopt_level == SOL_SOCKET) &&
571 		    (sopt->sopt_name == SO_SETFIB)) {
572 			inp->inp_inc.inc_fibnum = so->so_fibnum;
573 			return (0);
574 		}
575 		return (EINVAL);
576 	}
577 
578 	error = 0;
579 	switch (sopt->sopt_dir) {
580 	case SOPT_GET:
581 		switch (sopt->sopt_name) {
582 		case IP_HDRINCL:
583 			optval = inp->inp_flags & INP_HDRINCL;
584 			error = sooptcopyout(sopt, &optval, sizeof optval);
585 			break;
586 
587 		case IP_FW3:	/* generic ipfw v.3 functions */
588 		case IP_FW_ADD:	/* ADD actually returns the body... */
589 		case IP_FW_GET:
590 		case IP_FW_TABLE_GETSIZE:
591 		case IP_FW_TABLE_LIST:
592 		case IP_FW_NAT_GET_CONFIG:
593 		case IP_FW_NAT_GET_LOG:
594 			if (V_ip_fw_ctl_ptr != NULL)
595 				error = V_ip_fw_ctl_ptr(sopt);
596 			else
597 				error = ENOPROTOOPT;
598 			break;
599 
600 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
601 		case IP_DUMMYNET_GET:
602 			if (ip_dn_ctl_ptr != NULL)
603 				error = ip_dn_ctl_ptr(sopt);
604 			else
605 				error = ENOPROTOOPT;
606 			break ;
607 
608 		case MRT_INIT:
609 		case MRT_DONE:
610 		case MRT_ADD_VIF:
611 		case MRT_DEL_VIF:
612 		case MRT_ADD_MFC:
613 		case MRT_DEL_MFC:
614 		case MRT_VERSION:
615 		case MRT_ASSERT:
616 		case MRT_API_SUPPORT:
617 		case MRT_API_CONFIG:
618 		case MRT_ADD_BW_UPCALL:
619 		case MRT_DEL_BW_UPCALL:
620 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
621 			if (error != 0)
622 				return (error);
623 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
624 				EOPNOTSUPP;
625 			break;
626 
627 		default:
628 			error = ip_ctloutput(so, sopt);
629 			break;
630 		}
631 		break;
632 
633 	case SOPT_SET:
634 		switch (sopt->sopt_name) {
635 		case IP_HDRINCL:
636 			error = sooptcopyin(sopt, &optval, sizeof optval,
637 					    sizeof optval);
638 			if (error)
639 				break;
640 			if (optval)
641 				inp->inp_flags |= INP_HDRINCL;
642 			else
643 				inp->inp_flags &= ~INP_HDRINCL;
644 			break;
645 
646 		case IP_FW3:	/* generic ipfw v.3 functions */
647 		case IP_FW_ADD:
648 		case IP_FW_DEL:
649 		case IP_FW_FLUSH:
650 		case IP_FW_ZERO:
651 		case IP_FW_RESETLOG:
652 		case IP_FW_TABLE_ADD:
653 		case IP_FW_TABLE_DEL:
654 		case IP_FW_TABLE_FLUSH:
655 		case IP_FW_NAT_CFG:
656 		case IP_FW_NAT_DEL:
657 			if (V_ip_fw_ctl_ptr != NULL)
658 				error = V_ip_fw_ctl_ptr(sopt);
659 			else
660 				error = ENOPROTOOPT;
661 			break;
662 
663 		case IP_DUMMYNET3:	/* generic dummynet v.3 functions */
664 		case IP_DUMMYNET_CONFIGURE:
665 		case IP_DUMMYNET_DEL:
666 		case IP_DUMMYNET_FLUSH:
667 			if (ip_dn_ctl_ptr != NULL)
668 				error = ip_dn_ctl_ptr(sopt);
669 			else
670 				error = ENOPROTOOPT ;
671 			break ;
672 
673 		case IP_RSVP_ON:
674 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
675 			if (error != 0)
676 				return (error);
677 			error = ip_rsvp_init(so);
678 			break;
679 
680 		case IP_RSVP_OFF:
681 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
682 			if (error != 0)
683 				return (error);
684 			error = ip_rsvp_done();
685 			break;
686 
687 		case IP_RSVP_VIF_ON:
688 		case IP_RSVP_VIF_OFF:
689 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
690 			if (error != 0)
691 				return (error);
692 			error = ip_rsvp_vif ?
693 				ip_rsvp_vif(so, sopt) : EINVAL;
694 			break;
695 
696 		case MRT_INIT:
697 		case MRT_DONE:
698 		case MRT_ADD_VIF:
699 		case MRT_DEL_VIF:
700 		case MRT_ADD_MFC:
701 		case MRT_DEL_MFC:
702 		case MRT_VERSION:
703 		case MRT_ASSERT:
704 		case MRT_API_SUPPORT:
705 		case MRT_API_CONFIG:
706 		case MRT_ADD_BW_UPCALL:
707 		case MRT_DEL_BW_UPCALL:
708 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
709 			if (error != 0)
710 				return (error);
711 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
712 					EOPNOTSUPP;
713 			break;
714 
715 		default:
716 			error = ip_ctloutput(so, sopt);
717 			break;
718 		}
719 		break;
720 	}
721 
722 	return (error);
723 }
724 
725 /*
726  * This function exists solely to receive the PRC_IFDOWN messages which are
727  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
728  * in_ifadown() to remove all routes corresponding to that address.  It also
729  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
730  * routes.
731  */
732 void
733 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
734 {
735 	struct in_ifaddr *ia;
736 	struct ifnet *ifp;
737 	int err;
738 	int flags;
739 
740 	switch (cmd) {
741 	case PRC_IFDOWN:
742 		IN_IFADDR_RLOCK();
743 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
744 			if (ia->ia_ifa.ifa_addr == sa
745 			    && (ia->ia_flags & IFA_ROUTE)) {
746 				ifa_ref(&ia->ia_ifa);
747 				IN_IFADDR_RUNLOCK();
748 				/*
749 				 * in_scrubprefix() kills the interface route.
750 				 */
751 				in_scrubprefix(ia, 0);
752 				/*
753 				 * in_ifadown gets rid of all the rest of the
754 				 * routes.  This is not quite the right thing
755 				 * to do, but at least if we are running a
756 				 * routing process they will come back.
757 				 */
758 				in_ifadown(&ia->ia_ifa, 0);
759 				ifa_free(&ia->ia_ifa);
760 				break;
761 			}
762 		}
763 		if (ia == NULL)		/* If ia matched, already unlocked. */
764 			IN_IFADDR_RUNLOCK();
765 		break;
766 
767 	case PRC_IFUP:
768 		IN_IFADDR_RLOCK();
769 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
770 			if (ia->ia_ifa.ifa_addr == sa)
771 				break;
772 		}
773 		if (ia == NULL || (ia->ia_flags & IFA_ROUTE)) {
774 			IN_IFADDR_RUNLOCK();
775 			return;
776 		}
777 		ifa_ref(&ia->ia_ifa);
778 		IN_IFADDR_RUNLOCK();
779 		flags = RTF_UP;
780 		ifp = ia->ia_ifa.ifa_ifp;
781 
782 		if ((ifp->if_flags & IFF_LOOPBACK)
783 		    || (ifp->if_flags & IFF_POINTOPOINT))
784 			flags |= RTF_HOST;
785 
786 		err = ifa_del_loopback_route((struct ifaddr *)ia, sa);
787 
788 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
789 		if (err == 0)
790 			ia->ia_flags |= IFA_ROUTE;
791 
792 		err = ifa_add_loopback_route((struct ifaddr *)ia, sa);
793 
794 		ifa_free(&ia->ia_ifa);
795 		break;
796 	}
797 }
798 
799 static int
800 rip_attach(struct socket *so, int proto, struct thread *td)
801 {
802 	struct inpcb *inp;
803 	int error;
804 
805 	inp = sotoinpcb(so);
806 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
807 
808 	error = priv_check(td, PRIV_NETINET_RAW);
809 	if (error)
810 		return (error);
811 	if (proto >= IPPROTO_MAX || proto < 0)
812 		return EPROTONOSUPPORT;
813 	error = soreserve(so, rip_sendspace, rip_recvspace);
814 	if (error)
815 		return (error);
816 	INP_INFO_WLOCK(&V_ripcbinfo);
817 	error = in_pcballoc(so, &V_ripcbinfo);
818 	if (error) {
819 		INP_INFO_WUNLOCK(&V_ripcbinfo);
820 		return (error);
821 	}
822 	inp = (struct inpcb *)so->so_pcb;
823 	inp->inp_vflag |= INP_IPV4;
824 	inp->inp_ip_p = proto;
825 	inp->inp_ip_ttl = V_ip_defttl;
826 	rip_inshash(inp);
827 	INP_INFO_WUNLOCK(&V_ripcbinfo);
828 	INP_WUNLOCK(inp);
829 	return (0);
830 }
831 
832 static void
833 rip_detach(struct socket *so)
834 {
835 	struct inpcb *inp;
836 
837 	inp = sotoinpcb(so);
838 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
839 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
840 	    ("rip_detach: not closed"));
841 
842 	INP_INFO_WLOCK(&V_ripcbinfo);
843 	INP_WLOCK(inp);
844 	rip_delhash(inp);
845 	if (so == V_ip_mrouter && ip_mrouter_done)
846 		ip_mrouter_done();
847 	if (ip_rsvp_force_done)
848 		ip_rsvp_force_done(so);
849 	if (so == V_ip_rsvpd)
850 		ip_rsvp_done();
851 	in_pcbdetach(inp);
852 	in_pcbfree(inp);
853 	INP_INFO_WUNLOCK(&V_ripcbinfo);
854 }
855 
856 static void
857 rip_dodisconnect(struct socket *so, struct inpcb *inp)
858 {
859 	struct inpcbinfo *pcbinfo;
860 
861 	pcbinfo = inp->inp_pcbinfo;
862 	INP_INFO_WLOCK(pcbinfo);
863 	INP_WLOCK(inp);
864 	rip_delhash(inp);
865 	inp->inp_faddr.s_addr = INADDR_ANY;
866 	rip_inshash(inp);
867 	SOCK_LOCK(so);
868 	so->so_state &= ~SS_ISCONNECTED;
869 	SOCK_UNLOCK(so);
870 	INP_WUNLOCK(inp);
871 	INP_INFO_WUNLOCK(pcbinfo);
872 }
873 
874 static void
875 rip_abort(struct socket *so)
876 {
877 	struct inpcb *inp;
878 
879 	inp = sotoinpcb(so);
880 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
881 
882 	rip_dodisconnect(so, inp);
883 }
884 
885 static void
886 rip_close(struct socket *so)
887 {
888 	struct inpcb *inp;
889 
890 	inp = sotoinpcb(so);
891 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
892 
893 	rip_dodisconnect(so, inp);
894 }
895 
896 static int
897 rip_disconnect(struct socket *so)
898 {
899 	struct inpcb *inp;
900 
901 	if ((so->so_state & SS_ISCONNECTED) == 0)
902 		return (ENOTCONN);
903 
904 	inp = sotoinpcb(so);
905 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
906 
907 	rip_dodisconnect(so, inp);
908 	return (0);
909 }
910 
911 static int
912 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
913 {
914 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
915 	struct inpcb *inp;
916 	int error;
917 
918 	if (nam->sa_len != sizeof(*addr))
919 		return (EINVAL);
920 
921 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
922 	if (error != 0)
923 		return (error);
924 
925 	inp = sotoinpcb(so);
926 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
927 
928 	if (TAILQ_EMPTY(&V_ifnet) ||
929 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
930 	    (addr->sin_addr.s_addr &&
931 	     (inp->inp_flags & INP_BINDANY) == 0 &&
932 	     ifa_ifwithaddr_check((struct sockaddr *)addr) == 0))
933 		return (EADDRNOTAVAIL);
934 
935 	INP_INFO_WLOCK(&V_ripcbinfo);
936 	INP_WLOCK(inp);
937 	rip_delhash(inp);
938 	inp->inp_laddr = addr->sin_addr;
939 	rip_inshash(inp);
940 	INP_WUNLOCK(inp);
941 	INP_INFO_WUNLOCK(&V_ripcbinfo);
942 	return (0);
943 }
944 
945 static int
946 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
947 {
948 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
949 	struct inpcb *inp;
950 
951 	if (nam->sa_len != sizeof(*addr))
952 		return (EINVAL);
953 	if (TAILQ_EMPTY(&V_ifnet))
954 		return (EADDRNOTAVAIL);
955 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
956 		return (EAFNOSUPPORT);
957 
958 	inp = sotoinpcb(so);
959 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
960 
961 	INP_INFO_WLOCK(&V_ripcbinfo);
962 	INP_WLOCK(inp);
963 	rip_delhash(inp);
964 	inp->inp_faddr = addr->sin_addr;
965 	rip_inshash(inp);
966 	soisconnected(so);
967 	INP_WUNLOCK(inp);
968 	INP_INFO_WUNLOCK(&V_ripcbinfo);
969 	return (0);
970 }
971 
972 static int
973 rip_shutdown(struct socket *so)
974 {
975 	struct inpcb *inp;
976 
977 	inp = sotoinpcb(so);
978 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
979 
980 	INP_WLOCK(inp);
981 	socantsendmore(so);
982 	INP_WUNLOCK(inp);
983 	return (0);
984 }
985 
986 static int
987 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
988     struct mbuf *control, struct thread *td)
989 {
990 	struct inpcb *inp;
991 	u_long dst;
992 
993 	inp = sotoinpcb(so);
994 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
995 
996 	/*
997 	 * Note: 'dst' reads below are unlocked.
998 	 */
999 	if (so->so_state & SS_ISCONNECTED) {
1000 		if (nam) {
1001 			m_freem(m);
1002 			return (EISCONN);
1003 		}
1004 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
1005 	} else {
1006 		if (nam == NULL) {
1007 			m_freem(m);
1008 			return (ENOTCONN);
1009 		}
1010 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
1011 	}
1012 	return (rip_output(m, so, dst));
1013 }
1014 #endif /* INET */
1015 
1016 static int
1017 rip_pcblist(SYSCTL_HANDLER_ARGS)
1018 {
1019 	int error, i, n;
1020 	struct inpcb *inp, **inp_list;
1021 	inp_gen_t gencnt;
1022 	struct xinpgen xig;
1023 
1024 	/*
1025 	 * The process of preparing the TCB list is too time-consuming and
1026 	 * resource-intensive to repeat twice on every request.
1027 	 */
1028 	if (req->oldptr == 0) {
1029 		n = V_ripcbinfo.ipi_count;
1030 		n += imax(n / 8, 10);
1031 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
1032 		return (0);
1033 	}
1034 
1035 	if (req->newptr != 0)
1036 		return (EPERM);
1037 
1038 	/*
1039 	 * OK, now we're committed to doing something.
1040 	 */
1041 	INP_INFO_RLOCK(&V_ripcbinfo);
1042 	gencnt = V_ripcbinfo.ipi_gencnt;
1043 	n = V_ripcbinfo.ipi_count;
1044 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1045 
1046 	xig.xig_len = sizeof xig;
1047 	xig.xig_count = n;
1048 	xig.xig_gen = gencnt;
1049 	xig.xig_sogen = so_gencnt;
1050 	error = SYSCTL_OUT(req, &xig, sizeof xig);
1051 	if (error)
1052 		return (error);
1053 
1054 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
1055 	if (inp_list == 0)
1056 		return (ENOMEM);
1057 
1058 	INP_INFO_RLOCK(&V_ripcbinfo);
1059 	for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
1060 	     inp = LIST_NEXT(inp, inp_list)) {
1061 		INP_WLOCK(inp);
1062 		if (inp->inp_gencnt <= gencnt &&
1063 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
1064 			in_pcbref(inp);
1065 			inp_list[i++] = inp;
1066 		}
1067 		INP_WUNLOCK(inp);
1068 	}
1069 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1070 	n = i;
1071 
1072 	error = 0;
1073 	for (i = 0; i < n; i++) {
1074 		inp = inp_list[i];
1075 		INP_RLOCK(inp);
1076 		if (inp->inp_gencnt <= gencnt) {
1077 			struct xinpcb xi;
1078 
1079 			bzero(&xi, sizeof(xi));
1080 			xi.xi_len = sizeof xi;
1081 			/* XXX should avoid extra copy */
1082 			bcopy(inp, &xi.xi_inp, sizeof *inp);
1083 			if (inp->inp_socket)
1084 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
1085 			INP_RUNLOCK(inp);
1086 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1087 		} else
1088 			INP_RUNLOCK(inp);
1089 	}
1090 	INP_INFO_WLOCK(&V_ripcbinfo);
1091 	for (i = 0; i < n; i++) {
1092 		inp = inp_list[i];
1093 		INP_RLOCK(inp);
1094 		if (!in_pcbrele_rlocked(inp))
1095 			INP_RUNLOCK(inp);
1096 	}
1097 	INP_INFO_WUNLOCK(&V_ripcbinfo);
1098 
1099 	if (!error) {
1100 		/*
1101 		 * Give the user an updated idea of our state.  If the
1102 		 * generation differs from what we told her before, she knows
1103 		 * that something happened while we were processing this
1104 		 * request, and it might be necessary to retry.
1105 		 */
1106 		INP_INFO_RLOCK(&V_ripcbinfo);
1107 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1108 		xig.xig_sogen = so_gencnt;
1109 		xig.xig_count = V_ripcbinfo.ipi_count;
1110 		INP_INFO_RUNLOCK(&V_ripcbinfo);
1111 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1112 	}
1113 	free(inp_list, M_TEMP);
1114 	return (error);
1115 }
1116 
1117 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist,
1118     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
1119     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1120 
1121 #ifdef INET
1122 struct pr_usrreqs rip_usrreqs = {
1123 	.pru_abort =		rip_abort,
1124 	.pru_attach =		rip_attach,
1125 	.pru_bind =		rip_bind,
1126 	.pru_connect =		rip_connect,
1127 	.pru_control =		in_control,
1128 	.pru_detach =		rip_detach,
1129 	.pru_disconnect =	rip_disconnect,
1130 	.pru_peeraddr =		in_getpeeraddr,
1131 	.pru_send =		rip_send,
1132 	.pru_shutdown =		rip_shutdown,
1133 	.pru_sockaddr =		in_getsockaddr,
1134 	.pru_sosetlabel =	in_pcbsosetlabel,
1135 	.pru_close =		rip_close,
1136 };
1137 #endif /* INET */
1138