xref: /freebsd/sys/netinet/raw_ip.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 4. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_inet6.h"
37 #include "opt_ipsec.h"
38 #include "opt_route.h"
39 #include "opt_mac.h"
40 
41 #include <sys/param.h>
42 #include <sys/jail.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/priv.h>
48 #include <sys/proc.h>
49 #include <sys/protosw.h>
50 #include <sys/rwlock.h>
51 #include <sys/signalvar.h>
52 #include <sys/socket.h>
53 #include <sys/socketvar.h>
54 #include <sys/sx.h>
55 #include <sys/sysctl.h>
56 #include <sys/systm.h>
57 #include <sys/vimage.h>
58 
59 #include <vm/uma.h>
60 
61 #include <net/if.h>
62 #include <net/route.h>
63 #include <net/vnet.h>
64 
65 #include <netinet/in.h>
66 #include <netinet/in_systm.h>
67 #include <netinet/in_pcb.h>
68 #include <netinet/in_var.h>
69 #include <netinet/ip.h>
70 #include <netinet/ip_var.h>
71 #include <netinet/ip_mroute.h>
72 
73 #include <netinet/ip_fw.h>
74 #include <netinet/ip_dummynet.h>
75 #include <netinet/vinet.h>
76 
77 #ifdef IPSEC
78 #include <netipsec/ipsec.h>
79 #endif /*IPSEC*/
80 
81 #include <security/mac/mac_framework.h>
82 
83 #ifdef VIMAGE_GLOBALS
84 struct	inpcbhead ripcb;
85 struct	inpcbinfo ripcbinfo;
86 #endif
87 
88 /* control hooks for ipfw and dummynet */
89 ip_fw_ctl_t *ip_fw_ctl_ptr = NULL;
90 ip_dn_ctl_t *ip_dn_ctl_ptr = NULL;
91 
92 /*
93  * Hooks for multicast routing. They all default to NULL, so leave them not
94  * initialized and rely on BSS being set to 0.
95  */
96 
97 /*
98  * The socket used to communicate with the multicast routing daemon.
99  */
100 #ifdef VIMAGE_GLOBALS
101 struct socket  *ip_mrouter;
102 #endif
103 
104 /*
105  * The various mrouter and rsvp functions.
106  */
107 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
108 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
109 int (*ip_mrouter_done)(void);
110 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
111 		   struct ip_moptions *);
112 int (*mrt_ioctl)(int, caddr_t, int);
113 int (*legal_vif_num)(int);
114 u_long (*ip_mcast_src)(int);
115 
116 void (*rsvp_input_p)(struct mbuf *m, int off);
117 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
118 void (*ip_rsvp_force_done)(struct socket *);
119 
120 /*
121  * Hash functions
122  */
123 
124 #define INP_PCBHASH_RAW_SIZE	256
125 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
126         (((proto) + (laddr) + (faddr)) % (mask) + 1)
127 
128 static void
129 rip_inshash(struct inpcb *inp)
130 {
131 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
132 	struct inpcbhead *pcbhash;
133 	int hash;
134 
135 	INP_INFO_WLOCK_ASSERT(pcbinfo);
136 	INP_WLOCK_ASSERT(inp);
137 
138 	if (inp->inp_ip_p != 0 &&
139 	    inp->inp_laddr.s_addr != INADDR_ANY &&
140 	    inp->inp_faddr.s_addr != INADDR_ANY) {
141 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
142 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
143 	} else
144 		hash = 0;
145 	pcbhash = &pcbinfo->ipi_hashbase[hash];
146 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
147 }
148 
149 static void
150 rip_delhash(struct inpcb *inp)
151 {
152 
153 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
154 	INP_WLOCK_ASSERT(inp);
155 
156 	LIST_REMOVE(inp, inp_hash);
157 }
158 
159 /*
160  * Raw interface to IP protocol.
161  */
162 
163 /*
164  * Initialize raw connection block q.
165  */
166 static void
167 rip_zone_change(void *tag)
168 {
169 	INIT_VNET_INET(curvnet);
170 
171 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
172 }
173 
174 static int
175 rip_inpcb_init(void *mem, int size, int flags)
176 {
177 	struct inpcb *inp = mem;
178 
179 	INP_LOCK_INIT(inp, "inp", "rawinp");
180 	return (0);
181 }
182 
183 void
184 rip_init(void)
185 {
186 	INIT_VNET_INET(curvnet);
187 
188 	INP_INFO_LOCK_INIT(&V_ripcbinfo, "rip");
189 	LIST_INIT(&V_ripcb);
190 #ifdef VIMAGE
191 	V_ripcbinfo.ipi_vnet = curvnet;
192 #endif
193 	V_ripcbinfo.ipi_listhead = &V_ripcb;
194 	V_ripcbinfo.ipi_hashbase =
195 	    hashinit(INP_PCBHASH_RAW_SIZE, M_PCB, &V_ripcbinfo.ipi_hashmask);
196 	V_ripcbinfo.ipi_porthashbase =
197 	    hashinit(1, M_PCB, &V_ripcbinfo.ipi_porthashmask);
198 	V_ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
199 	    NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
200 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
201 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
202 	    EVENTHANDLER_PRI_ANY);
203 }
204 
205 static int
206 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
207     struct sockaddr_in *ripsrc)
208 {
209 	int policyfail = 0;
210 
211 	INP_RLOCK_ASSERT(last);
212 
213 #ifdef IPSEC
214 	/* check AH/ESP integrity. */
215 	if (ipsec4_in_reject(n, last)) {
216 		policyfail = 1;
217 	}
218 #endif /* IPSEC */
219 #ifdef MAC
220 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
221 		policyfail = 1;
222 #endif
223 	/* Check the minimum TTL for socket. */
224 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
225 		policyfail = 1;
226 	if (!policyfail) {
227 		struct mbuf *opts = NULL;
228 		struct socket *so;
229 
230 		so = last->inp_socket;
231 		if ((last->inp_flags & INP_CONTROLOPTS) ||
232 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
233 			ip_savecontrol(last, &opts, ip, n);
234 		SOCKBUF_LOCK(&so->so_rcv);
235 		if (sbappendaddr_locked(&so->so_rcv,
236 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
237 			/* should notify about lost packet */
238 			m_freem(n);
239 			if (opts)
240 				m_freem(opts);
241 			SOCKBUF_UNLOCK(&so->so_rcv);
242 		} else
243 			sorwakeup_locked(so);
244 	} else
245 		m_freem(n);
246 	return (policyfail);
247 }
248 
249 /*
250  * Setup generic address and protocol structures for raw_input routine, then
251  * pass them along with mbuf chain.
252  */
253 void
254 rip_input(struct mbuf *m, int off)
255 {
256 	INIT_VNET_INET(curvnet);
257 	struct ifnet *ifp;
258 	struct ip *ip = mtod(m, struct ip *);
259 	int proto = ip->ip_p;
260 	struct inpcb *inp, *last;
261 	struct sockaddr_in ripsrc;
262 	int hash;
263 
264 	bzero(&ripsrc, sizeof(ripsrc));
265 	ripsrc.sin_len = sizeof(ripsrc);
266 	ripsrc.sin_family = AF_INET;
267 	ripsrc.sin_addr = ip->ip_src;
268 	last = NULL;
269 
270 	ifp = m->m_pkthdr.rcvif;
271 
272 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
273 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
274 	INP_INFO_RLOCK(&V_ripcbinfo);
275 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
276 		if (inp->inp_ip_p != proto)
277 			continue;
278 #ifdef INET6
279 		/* XXX inp locking */
280 		if ((inp->inp_vflag & INP_IPV4) == 0)
281 			continue;
282 #endif
283 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
284 			continue;
285 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
286 			continue;
287 		if (jailed(inp->inp_cred)) {
288 			/*
289 			 * XXX: If faddr was bound to multicast group,
290 			 * jailed raw socket will drop datagram.
291 			 */
292 			if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
293 				continue;
294 		}
295 		if (last != NULL) {
296 			struct mbuf *n;
297 
298 			n = m_copy(m, 0, (int)M_COPYALL);
299 			if (n != NULL)
300 		    	    (void) rip_append(last, ip, n, &ripsrc);
301 			/* XXX count dropped packet */
302 			INP_RUNLOCK(last);
303 		}
304 		INP_RLOCK(inp);
305 		last = inp;
306 	}
307 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
308 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
309 			continue;
310 #ifdef INET6
311 		/* XXX inp locking */
312 		if ((inp->inp_vflag & INP_IPV4) == 0)
313 			continue;
314 #endif
315 		if (!in_nullhost(inp->inp_laddr) &&
316 		    !in_hosteq(inp->inp_laddr, ip->ip_dst))
317 			continue;
318 		if (!in_nullhost(inp->inp_faddr) &&
319 		    !in_hosteq(inp->inp_faddr, ip->ip_src))
320 			continue;
321 		if (jailed(inp->inp_cred)) {
322 			/*
323 			 * Allow raw socket in jail to receive multicast;
324 			 * assume process had PRIV_NETINET_RAW at attach,
325 			 * and fall through into normal filter path if so.
326 			 */
327 			if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) &&
328 			    prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
329 				continue;
330 		}
331 		/*
332 		 * If this raw socket has multicast state, and we
333 		 * have received a multicast, check if this socket
334 		 * should receive it, as multicast filtering is now
335 		 * the responsibility of the transport layer.
336 		 */
337 		if (inp->inp_moptions != NULL &&
338 		    IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
339 			struct sockaddr_in group;
340 			int blocked;
341 
342 			bzero(&group, sizeof(struct sockaddr_in));
343 			group.sin_len = sizeof(struct sockaddr_in);
344 			group.sin_family = AF_INET;
345 			group.sin_addr = ip->ip_dst;
346 
347 			blocked = imo_multi_filter(inp->inp_moptions, ifp,
348 			    (struct sockaddr *)&group,
349 			    (struct sockaddr *)&ripsrc);
350 			if (blocked != MCAST_PASS) {
351 				IPSTAT_INC(ips_notmember);
352 				continue;
353 			}
354 		}
355 		if (last != NULL) {
356 			struct mbuf *n;
357 
358 			n = m_copy(m, 0, (int)M_COPYALL);
359 			if (n != NULL)
360 				(void) rip_append(last, ip, n, &ripsrc);
361 			/* XXX count dropped packet */
362 			INP_RUNLOCK(last);
363 		}
364 		INP_RLOCK(inp);
365 		last = inp;
366 	}
367 	INP_INFO_RUNLOCK(&V_ripcbinfo);
368 	if (last != NULL) {
369 		if (rip_append(last, ip, m, &ripsrc) != 0)
370 			IPSTAT_INC(ips_delivered);
371 		INP_RUNLOCK(last);
372 	} else {
373 		m_freem(m);
374 		IPSTAT_INC(ips_noproto);
375 		IPSTAT_DEC(ips_delivered);
376 	}
377 }
378 
379 /*
380  * Generate IP header and pass packet to ip_output.  Tack on options user may
381  * have setup with control call.
382  */
383 int
384 rip_output(struct mbuf *m, struct socket *so, u_long dst)
385 {
386 	INIT_VNET_INET(so->so_vnet);
387 	struct ip *ip;
388 	int error;
389 	struct inpcb *inp = sotoinpcb(so);
390 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
391 	    IP_ALLOWBROADCAST;
392 
393 	/*
394 	 * If the user handed us a complete IP packet, use it.  Otherwise,
395 	 * allocate an mbuf for a header and fill it in.
396 	 */
397 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
398 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
399 			m_freem(m);
400 			return(EMSGSIZE);
401 		}
402 		M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
403 		if (m == NULL)
404 			return(ENOBUFS);
405 
406 		INP_RLOCK(inp);
407 		ip = mtod(m, struct ip *);
408 		ip->ip_tos = inp->inp_ip_tos;
409 		if (inp->inp_flags & INP_DONTFRAG)
410 			ip->ip_off = IP_DF;
411 		else
412 			ip->ip_off = 0;
413 		ip->ip_p = inp->inp_ip_p;
414 		ip->ip_len = m->m_pkthdr.len;
415 		ip->ip_src = inp->inp_laddr;
416 		error = prison_get_ip4(inp->inp_cred, &ip->ip_src);
417 		if (error != 0) {
418 			INP_RUNLOCK(inp);
419 			m_freem(m);
420 			return (error);
421 		}
422 		ip->ip_dst.s_addr = dst;
423 		ip->ip_ttl = inp->inp_ip_ttl;
424 	} else {
425 		if (m->m_pkthdr.len > IP_MAXPACKET) {
426 			m_freem(m);
427 			return(EMSGSIZE);
428 		}
429 		INP_RLOCK(inp);
430 		ip = mtod(m, struct ip *);
431 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
432 		if (error != 0) {
433 			INP_RUNLOCK(inp);
434 			m_freem(m);
435 			return (error);
436 		}
437 
438 		/*
439 		 * Don't allow both user specified and setsockopt options,
440 		 * and don't allow packet length sizes that will crash.
441 		 */
442 		if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
443 		    || (ip->ip_len > m->m_pkthdr.len)
444 		    || (ip->ip_len < (ip->ip_hl << 2))) {
445 			INP_RUNLOCK(inp);
446 			m_freem(m);
447 			return (EINVAL);
448 		}
449 		if (ip->ip_id == 0)
450 			ip->ip_id = ip_newid();
451 
452 		/*
453 		 * XXX prevent ip_output from overwriting header fields.
454 		 */
455 		flags |= IP_RAWOUTPUT;
456 		IPSTAT_INC(ips_rawout);
457 	}
458 
459 	if (inp->inp_flags & INP_ONESBCAST)
460 		flags |= IP_SENDONES;
461 
462 #ifdef MAC
463 	mac_inpcb_create_mbuf(inp, m);
464 #endif
465 
466 	error = ip_output(m, inp->inp_options, NULL, flags,
467 	    inp->inp_moptions, inp);
468 	INP_RUNLOCK(inp);
469 	return (error);
470 }
471 
472 /*
473  * Raw IP socket option processing.
474  *
475  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
476  * only be created by a privileged process, and as such, socket option
477  * operations to manage system properties on any raw socket were allowed to
478  * take place without explicit additional access control checks.  However,
479  * raw sockets can now also be created in jail(), and therefore explicit
480  * checks are now required.  Likewise, raw sockets can be used by a process
481  * after it gives up privilege, so some caution is required.  For options
482  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
483  * performed in ip_ctloutput() and therefore no check occurs here.
484  * Unilaterally checking priv_check() here breaks normal IP socket option
485  * operations on raw sockets.
486  *
487  * When adding new socket options here, make sure to add access control
488  * checks here as necessary.
489  */
490 int
491 rip_ctloutput(struct socket *so, struct sockopt *sopt)
492 {
493 	struct	inpcb *inp = sotoinpcb(so);
494 	int	error, optval;
495 
496 	if (sopt->sopt_level != IPPROTO_IP) {
497 		if ((sopt->sopt_level == SOL_SOCKET) &&
498 		    (sopt->sopt_name == SO_SETFIB)) {
499 			inp->inp_inc.inc_fibnum = so->so_fibnum;
500 			return (0);
501 		}
502 		return (EINVAL);
503 	}
504 
505 	error = 0;
506 	switch (sopt->sopt_dir) {
507 	case SOPT_GET:
508 		switch (sopt->sopt_name) {
509 		case IP_HDRINCL:
510 			optval = inp->inp_flags & INP_HDRINCL;
511 			error = sooptcopyout(sopt, &optval, sizeof optval);
512 			break;
513 
514 		case IP_FW_ADD:	/* ADD actually returns the body... */
515 		case IP_FW_GET:
516 		case IP_FW_TABLE_GETSIZE:
517 		case IP_FW_TABLE_LIST:
518 		case IP_FW_NAT_GET_CONFIG:
519 		case IP_FW_NAT_GET_LOG:
520 			if (ip_fw_ctl_ptr != NULL)
521 				error = ip_fw_ctl_ptr(sopt);
522 			else
523 				error = ENOPROTOOPT;
524 			break;
525 
526 		case IP_DUMMYNET_GET:
527 			if (ip_dn_ctl_ptr != NULL)
528 				error = ip_dn_ctl_ptr(sopt);
529 			else
530 				error = ENOPROTOOPT;
531 			break ;
532 
533 		case MRT_INIT:
534 		case MRT_DONE:
535 		case MRT_ADD_VIF:
536 		case MRT_DEL_VIF:
537 		case MRT_ADD_MFC:
538 		case MRT_DEL_MFC:
539 		case MRT_VERSION:
540 		case MRT_ASSERT:
541 		case MRT_API_SUPPORT:
542 		case MRT_API_CONFIG:
543 		case MRT_ADD_BW_UPCALL:
544 		case MRT_DEL_BW_UPCALL:
545 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
546 			if (error != 0)
547 				return (error);
548 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
549 				EOPNOTSUPP;
550 			break;
551 
552 		default:
553 			error = ip_ctloutput(so, sopt);
554 			break;
555 		}
556 		break;
557 
558 	case SOPT_SET:
559 		switch (sopt->sopt_name) {
560 		case IP_HDRINCL:
561 			error = sooptcopyin(sopt, &optval, sizeof optval,
562 					    sizeof optval);
563 			if (error)
564 				break;
565 			if (optval)
566 				inp->inp_flags |= INP_HDRINCL;
567 			else
568 				inp->inp_flags &= ~INP_HDRINCL;
569 			break;
570 
571 		case IP_FW_ADD:
572 		case IP_FW_DEL:
573 		case IP_FW_FLUSH:
574 		case IP_FW_ZERO:
575 		case IP_FW_RESETLOG:
576 		case IP_FW_TABLE_ADD:
577 		case IP_FW_TABLE_DEL:
578 		case IP_FW_TABLE_FLUSH:
579 		case IP_FW_NAT_CFG:
580 		case IP_FW_NAT_DEL:
581 			if (ip_fw_ctl_ptr != NULL)
582 				error = ip_fw_ctl_ptr(sopt);
583 			else
584 				error = ENOPROTOOPT;
585 			break;
586 
587 		case IP_DUMMYNET_CONFIGURE:
588 		case IP_DUMMYNET_DEL:
589 		case IP_DUMMYNET_FLUSH:
590 			if (ip_dn_ctl_ptr != NULL)
591 				error = ip_dn_ctl_ptr(sopt);
592 			else
593 				error = ENOPROTOOPT ;
594 			break ;
595 
596 		case IP_RSVP_ON:
597 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
598 			if (error != 0)
599 				return (error);
600 			error = ip_rsvp_init(so);
601 			break;
602 
603 		case IP_RSVP_OFF:
604 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
605 			if (error != 0)
606 				return (error);
607 			error = ip_rsvp_done();
608 			break;
609 
610 		case IP_RSVP_VIF_ON:
611 		case IP_RSVP_VIF_OFF:
612 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
613 			if (error != 0)
614 				return (error);
615 			error = ip_rsvp_vif ?
616 				ip_rsvp_vif(so, sopt) : EINVAL;
617 			break;
618 
619 		case MRT_INIT:
620 		case MRT_DONE:
621 		case MRT_ADD_VIF:
622 		case MRT_DEL_VIF:
623 		case MRT_ADD_MFC:
624 		case MRT_DEL_MFC:
625 		case MRT_VERSION:
626 		case MRT_ASSERT:
627 		case MRT_API_SUPPORT:
628 		case MRT_API_CONFIG:
629 		case MRT_ADD_BW_UPCALL:
630 		case MRT_DEL_BW_UPCALL:
631 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
632 			if (error != 0)
633 				return (error);
634 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
635 					EOPNOTSUPP;
636 			break;
637 
638 		default:
639 			error = ip_ctloutput(so, sopt);
640 			break;
641 		}
642 		break;
643 	}
644 
645 	return (error);
646 }
647 
648 /*
649  * This function exists solely to receive the PRC_IFDOWN messages which are
650  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
651  * in_ifadown() to remove all routes corresponding to that address.  It also
652  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
653  * routes.
654  */
655 void
656 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
657 {
658 	INIT_VNET_INET(curvnet);
659 	struct in_ifaddr *ia;
660 	struct ifnet *ifp;
661 	int err;
662 	int flags;
663 
664 	switch (cmd) {
665 	case PRC_IFDOWN:
666 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
667 			if (ia->ia_ifa.ifa_addr == sa
668 			    && (ia->ia_flags & IFA_ROUTE)) {
669 				/*
670 				 * in_ifscrub kills the interface route.
671 				 */
672 				in_ifscrub(ia->ia_ifp, ia);
673 				/*
674 				 * in_ifadown gets rid of all the rest of the
675 				 * routes.  This is not quite the right thing
676 				 * to do, but at least if we are running a
677 				 * routing process they will come back.
678 				 */
679 				in_ifadown(&ia->ia_ifa, 0);
680 				break;
681 			}
682 		}
683 		break;
684 
685 	case PRC_IFUP:
686 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
687 			if (ia->ia_ifa.ifa_addr == sa)
688 				break;
689 		}
690 		if (ia == 0 || (ia->ia_flags & IFA_ROUTE))
691 			return;
692 		flags = RTF_UP;
693 		ifp = ia->ia_ifa.ifa_ifp;
694 
695 		if ((ifp->if_flags & IFF_LOOPBACK)
696 		    || (ifp->if_flags & IFF_POINTOPOINT))
697 			flags |= RTF_HOST;
698 
699 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
700 		if (err == 0)
701 			ia->ia_flags |= IFA_ROUTE;
702 		break;
703 	}
704 }
705 
706 u_long	rip_sendspace = 9216;
707 u_long	rip_recvspace = 9216;
708 
709 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
710     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
711 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
712     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
713 
714 static int
715 rip_attach(struct socket *so, int proto, struct thread *td)
716 {
717 	INIT_VNET_INET(so->so_vnet);
718 	struct inpcb *inp;
719 	int error;
720 
721 	inp = sotoinpcb(so);
722 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
723 
724 	error = priv_check(td, PRIV_NETINET_RAW);
725 	if (error)
726 		return (error);
727 	if (proto >= IPPROTO_MAX || proto < 0)
728 		return EPROTONOSUPPORT;
729 	error = soreserve(so, rip_sendspace, rip_recvspace);
730 	if (error)
731 		return (error);
732 	INP_INFO_WLOCK(&V_ripcbinfo);
733 	error = in_pcballoc(so, &V_ripcbinfo);
734 	if (error) {
735 		INP_INFO_WUNLOCK(&V_ripcbinfo);
736 		return (error);
737 	}
738 	inp = (struct inpcb *)so->so_pcb;
739 	inp->inp_vflag |= INP_IPV4;
740 	inp->inp_ip_p = proto;
741 	inp->inp_ip_ttl = V_ip_defttl;
742 	rip_inshash(inp);
743 	INP_INFO_WUNLOCK(&V_ripcbinfo);
744 	INP_WUNLOCK(inp);
745 	return (0);
746 }
747 
748 static void
749 rip_detach(struct socket *so)
750 {
751 	INIT_VNET_INET(so->so_vnet);
752 	struct inpcb *inp;
753 
754 	inp = sotoinpcb(so);
755 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
756 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
757 	    ("rip_detach: not closed"));
758 
759 	INP_INFO_WLOCK(&V_ripcbinfo);
760 	INP_WLOCK(inp);
761 	rip_delhash(inp);
762 	if (so == V_ip_mrouter && ip_mrouter_done)
763 		ip_mrouter_done();
764 	if (ip_rsvp_force_done)
765 		ip_rsvp_force_done(so);
766 	if (so == V_ip_rsvpd)
767 		ip_rsvp_done();
768 	in_pcbdetach(inp);
769 	in_pcbfree(inp);
770 	INP_INFO_WUNLOCK(&V_ripcbinfo);
771 }
772 
773 static void
774 rip_dodisconnect(struct socket *so, struct inpcb *inp)
775 {
776 
777 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
778 	INP_WLOCK_ASSERT(inp);
779 
780 	rip_delhash(inp);
781 	inp->inp_faddr.s_addr = INADDR_ANY;
782 	rip_inshash(inp);
783 	SOCK_LOCK(so);
784 	so->so_state &= ~SS_ISCONNECTED;
785 	SOCK_UNLOCK(so);
786 }
787 
788 static void
789 rip_abort(struct socket *so)
790 {
791 	INIT_VNET_INET(so->so_vnet);
792 	struct inpcb *inp;
793 
794 	inp = sotoinpcb(so);
795 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
796 
797 	INP_INFO_WLOCK(&V_ripcbinfo);
798 	INP_WLOCK(inp);
799 	rip_dodisconnect(so, inp);
800 	INP_WUNLOCK(inp);
801 	INP_INFO_WUNLOCK(&V_ripcbinfo);
802 }
803 
804 static void
805 rip_close(struct socket *so)
806 {
807 	INIT_VNET_INET(so->so_vnet);
808 	struct inpcb *inp;
809 
810 	inp = sotoinpcb(so);
811 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
812 
813 	INP_INFO_WLOCK(&V_ripcbinfo);
814 	INP_WLOCK(inp);
815 	rip_dodisconnect(so, inp);
816 	INP_WUNLOCK(inp);
817 	INP_INFO_WUNLOCK(&V_ripcbinfo);
818 }
819 
820 static int
821 rip_disconnect(struct socket *so)
822 {
823 	INIT_VNET_INET(so->so_vnet);
824 	struct inpcb *inp;
825 
826 	if ((so->so_state & SS_ISCONNECTED) == 0)
827 		return (ENOTCONN);
828 
829 	inp = sotoinpcb(so);
830 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
831 
832 	INP_INFO_WLOCK(&V_ripcbinfo);
833 	INP_WLOCK(inp);
834 	rip_dodisconnect(so, inp);
835 	INP_WUNLOCK(inp);
836 	INP_INFO_WUNLOCK(&V_ripcbinfo);
837 	return (0);
838 }
839 
840 static int
841 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
842 {
843 	INIT_VNET_NET(so->so_vnet);
844 	INIT_VNET_INET(so->so_vnet);
845 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
846 	struct inpcb *inp;
847 	int error;
848 
849 	if (nam->sa_len != sizeof(*addr))
850 		return (EINVAL);
851 
852 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
853 	if (error != 0)
854 		return (error);
855 
856 	inp = sotoinpcb(so);
857 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
858 
859 	if (TAILQ_EMPTY(&V_ifnet) ||
860 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
861 	    (addr->sin_addr.s_addr &&
862 	     (inp->inp_flags & INP_BINDANY) == 0 &&
863 	     ifa_ifwithaddr((struct sockaddr *)addr) == NULL))
864 		return (EADDRNOTAVAIL);
865 
866 	INP_INFO_WLOCK(&V_ripcbinfo);
867 	INP_WLOCK(inp);
868 	rip_delhash(inp);
869 	inp->inp_laddr = addr->sin_addr;
870 	rip_inshash(inp);
871 	INP_WUNLOCK(inp);
872 	INP_INFO_WUNLOCK(&V_ripcbinfo);
873 	return (0);
874 }
875 
876 static int
877 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
878 {
879 	INIT_VNET_NET(so->so_vnet);
880 	INIT_VNET_INET(so->so_vnet);
881 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
882 	struct inpcb *inp;
883 
884 	if (nam->sa_len != sizeof(*addr))
885 		return (EINVAL);
886 	if (TAILQ_EMPTY(&V_ifnet))
887 		return (EADDRNOTAVAIL);
888 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
889 		return (EAFNOSUPPORT);
890 
891 	inp = sotoinpcb(so);
892 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
893 
894 	INP_INFO_WLOCK(&V_ripcbinfo);
895 	INP_WLOCK(inp);
896 	rip_delhash(inp);
897 	inp->inp_faddr = addr->sin_addr;
898 	rip_inshash(inp);
899 	soisconnected(so);
900 	INP_WUNLOCK(inp);
901 	INP_INFO_WUNLOCK(&V_ripcbinfo);
902 	return (0);
903 }
904 
905 static int
906 rip_shutdown(struct socket *so)
907 {
908 	struct inpcb *inp;
909 
910 	inp = sotoinpcb(so);
911 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
912 
913 	INP_WLOCK(inp);
914 	socantsendmore(so);
915 	INP_WUNLOCK(inp);
916 	return (0);
917 }
918 
919 static int
920 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
921     struct mbuf *control, struct thread *td)
922 {
923 	struct inpcb *inp;
924 	u_long dst;
925 
926 	inp = sotoinpcb(so);
927 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
928 
929 	/*
930 	 * Note: 'dst' reads below are unlocked.
931 	 */
932 	if (so->so_state & SS_ISCONNECTED) {
933 		if (nam) {
934 			m_freem(m);
935 			return (EISCONN);
936 		}
937 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
938 	} else {
939 		if (nam == NULL) {
940 			m_freem(m);
941 			return (ENOTCONN);
942 		}
943 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
944 	}
945 	return (rip_output(m, so, dst));
946 }
947 
948 static int
949 rip_pcblist(SYSCTL_HANDLER_ARGS)
950 {
951 	INIT_VNET_INET(curvnet);
952 	int error, i, n;
953 	struct inpcb *inp, **inp_list;
954 	inp_gen_t gencnt;
955 	struct xinpgen xig;
956 
957 	/*
958 	 * The process of preparing the TCB list is too time-consuming and
959 	 * resource-intensive to repeat twice on every request.
960 	 */
961 	if (req->oldptr == 0) {
962 		n = V_ripcbinfo.ipi_count;
963 		req->oldidx = 2 * (sizeof xig)
964 		    + (n + n/8) * sizeof(struct xinpcb);
965 		return (0);
966 	}
967 
968 	if (req->newptr != 0)
969 		return (EPERM);
970 
971 	/*
972 	 * OK, now we're committed to doing something.
973 	 */
974 	INP_INFO_RLOCK(&V_ripcbinfo);
975 	gencnt = V_ripcbinfo.ipi_gencnt;
976 	n = V_ripcbinfo.ipi_count;
977 	INP_INFO_RUNLOCK(&V_ripcbinfo);
978 
979 	xig.xig_len = sizeof xig;
980 	xig.xig_count = n;
981 	xig.xig_gen = gencnt;
982 	xig.xig_sogen = so_gencnt;
983 	error = SYSCTL_OUT(req, &xig, sizeof xig);
984 	if (error)
985 		return (error);
986 
987 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
988 	if (inp_list == 0)
989 		return (ENOMEM);
990 
991 	INP_INFO_RLOCK(&V_ripcbinfo);
992 	for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
993 	     inp = LIST_NEXT(inp, inp_list)) {
994 		INP_RLOCK(inp);
995 		if (inp->inp_gencnt <= gencnt &&
996 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
997 			/* XXX held references? */
998 			inp_list[i++] = inp;
999 		}
1000 		INP_RUNLOCK(inp);
1001 	}
1002 	INP_INFO_RUNLOCK(&V_ripcbinfo);
1003 	n = i;
1004 
1005 	error = 0;
1006 	for (i = 0; i < n; i++) {
1007 		inp = inp_list[i];
1008 		INP_RLOCK(inp);
1009 		if (inp->inp_gencnt <= gencnt) {
1010 			struct xinpcb xi;
1011 
1012 			bzero(&xi, sizeof(xi));
1013 			xi.xi_len = sizeof xi;
1014 			/* XXX should avoid extra copy */
1015 			bcopy(inp, &xi.xi_inp, sizeof *inp);
1016 			if (inp->inp_socket)
1017 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
1018 			INP_RUNLOCK(inp);
1019 			error = SYSCTL_OUT(req, &xi, sizeof xi);
1020 		} else
1021 			INP_RUNLOCK(inp);
1022 	}
1023 	if (!error) {
1024 		/*
1025 		 * Give the user an updated idea of our state.  If the
1026 		 * generation differs from what we told her before, she knows
1027 		 * that something happened while we were processing this
1028 		 * request, and it might be necessary to retry.
1029 		 */
1030 		INP_INFO_RLOCK(&V_ripcbinfo);
1031 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
1032 		xig.xig_sogen = so_gencnt;
1033 		xig.xig_count = V_ripcbinfo.ipi_count;
1034 		INP_INFO_RUNLOCK(&V_ripcbinfo);
1035 		error = SYSCTL_OUT(req, &xig, sizeof xig);
1036 	}
1037 	free(inp_list, M_TEMP);
1038 	return (error);
1039 }
1040 
1041 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
1042     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
1043 
1044 struct pr_usrreqs rip_usrreqs = {
1045 	.pru_abort =		rip_abort,
1046 	.pru_attach =		rip_attach,
1047 	.pru_bind =		rip_bind,
1048 	.pru_connect =		rip_connect,
1049 	.pru_control =		in_control,
1050 	.pru_detach =		rip_detach,
1051 	.pru_disconnect =	rip_disconnect,
1052 	.pru_peeraddr =		in_getpeeraddr,
1053 	.pru_send =		rip_send,
1054 	.pru_shutdown =		rip_shutdown,
1055 	.pru_sockaddr =		in_getsockaddr,
1056 	.pru_sosetlabel =	in_pcbsosetlabel,
1057 	.pru_close =		rip_close,
1058 };
1059