xref: /freebsd/sys/netinet/raw_ip.c (revision b3aaa0cc21c63d388230c7ef2a80abd631ff20d5)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1993
3  *	The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 4. Neither the name of the University nor the names of its contributors
15  *    may be used to endorse or promote products derived from this software
16  *    without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  *
30  *	@(#)raw_ip.c	8.7 (Berkeley) 5/15/95
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include "opt_inet6.h"
37 #include "opt_ipsec.h"
38 #include "opt_mac.h"
39 
40 #include <sys/param.h>
41 #include <sys/jail.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/malloc.h>
45 #include <sys/mbuf.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/protosw.h>
49 #include <sys/rwlock.h>
50 #include <sys/signalvar.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/sx.h>
54 #include <sys/sysctl.h>
55 #include <sys/systm.h>
56 #include <sys/vimage.h>
57 
58 #include <vm/uma.h>
59 
60 #include <net/if.h>
61 #include <net/route.h>
62 #include <net/vnet.h>
63 
64 #include <netinet/in.h>
65 #include <netinet/in_systm.h>
66 #include <netinet/in_pcb.h>
67 #include <netinet/in_var.h>
68 #include <netinet/ip.h>
69 #include <netinet/ip_var.h>
70 #include <netinet/ip_mroute.h>
71 
72 #include <netinet/ip_fw.h>
73 #include <netinet/ip_dummynet.h>
74 #include <netinet/vinet.h>
75 
76 #ifdef IPSEC
77 #include <netipsec/ipsec.h>
78 #endif /*IPSEC*/
79 
80 #include <security/mac/mac_framework.h>
81 
82 #ifdef VIMAGE_GLOBALS
83 struct	inpcbhead ripcb;
84 struct	inpcbinfo ripcbinfo;
85 #endif
86 
87 /* control hooks for ipfw and dummynet */
88 ip_fw_ctl_t *ip_fw_ctl_ptr = NULL;
89 ip_dn_ctl_t *ip_dn_ctl_ptr = NULL;
90 
91 /*
92  * Hooks for multicast routing. They all default to NULL, so leave them not
93  * initialized and rely on BSS being set to 0.
94  */
95 
96 /*
97  * The socket used to communicate with the multicast routing daemon.
98  */
99 #ifdef VIMAGE_GLOBALS
100 struct socket  *ip_mrouter;
101 #endif
102 
103 /*
104  * The various mrouter and rsvp functions.
105  */
106 int (*ip_mrouter_set)(struct socket *, struct sockopt *);
107 int (*ip_mrouter_get)(struct socket *, struct sockopt *);
108 int (*ip_mrouter_done)(void);
109 int (*ip_mforward)(struct ip *, struct ifnet *, struct mbuf *,
110 		   struct ip_moptions *);
111 int (*mrt_ioctl)(int, caddr_t, int);
112 int (*legal_vif_num)(int);
113 u_long (*ip_mcast_src)(int);
114 
115 void (*rsvp_input_p)(struct mbuf *m, int off);
116 int (*ip_rsvp_vif)(struct socket *, struct sockopt *);
117 void (*ip_rsvp_force_done)(struct socket *);
118 
119 /*
120  * Hash functions
121  */
122 
123 #define INP_PCBHASH_RAW_SIZE	256
124 #define INP_PCBHASH_RAW(proto, laddr, faddr, mask) \
125         (((proto) + (laddr) + (faddr)) % (mask) + 1)
126 
127 static void
128 rip_inshash(struct inpcb *inp)
129 {
130 	struct inpcbinfo *pcbinfo = inp->inp_pcbinfo;
131 	struct inpcbhead *pcbhash;
132 	int hash;
133 
134 	INP_INFO_WLOCK_ASSERT(pcbinfo);
135 	INP_WLOCK_ASSERT(inp);
136 
137 	if (inp->inp_ip_p != 0 &&
138 	    inp->inp_laddr.s_addr != INADDR_ANY &&
139 	    inp->inp_faddr.s_addr != INADDR_ANY) {
140 		hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr,
141 		    inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask);
142 	} else
143 		hash = 0;
144 	pcbhash = &pcbinfo->ipi_hashbase[hash];
145 	LIST_INSERT_HEAD(pcbhash, inp, inp_hash);
146 }
147 
148 static void
149 rip_delhash(struct inpcb *inp)
150 {
151 
152 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
153 	INP_WLOCK_ASSERT(inp);
154 
155 	LIST_REMOVE(inp, inp_hash);
156 }
157 
158 /*
159  * Raw interface to IP protocol.
160  */
161 
162 /*
163  * Initialize raw connection block q.
164  */
165 static void
166 rip_zone_change(void *tag)
167 {
168 	INIT_VNET_INET(curvnet);
169 
170 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
171 }
172 
173 static int
174 rip_inpcb_init(void *mem, int size, int flags)
175 {
176 	struct inpcb *inp = mem;
177 
178 	INP_LOCK_INIT(inp, "inp", "rawinp");
179 	return (0);
180 }
181 
182 void
183 rip_init(void)
184 {
185 	INIT_VNET_INET(curvnet);
186 
187 	INP_INFO_LOCK_INIT(&V_ripcbinfo, "rip");
188 	LIST_INIT(&V_ripcb);
189 	V_ripcbinfo.ipi_listhead = &V_ripcb;
190 	V_ripcbinfo.ipi_hashbase =
191 	    hashinit(INP_PCBHASH_RAW_SIZE, M_PCB, &V_ripcbinfo.ipi_hashmask);
192 	V_ripcbinfo.ipi_porthashbase =
193 	    hashinit(1, M_PCB, &V_ripcbinfo.ipi_porthashmask);
194 	V_ripcbinfo.ipi_zone = uma_zcreate("ripcb", sizeof(struct inpcb),
195 	    NULL, NULL, rip_inpcb_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
196 	uma_zone_set_max(V_ripcbinfo.ipi_zone, maxsockets);
197 	EVENTHANDLER_REGISTER(maxsockets_change, rip_zone_change, NULL,
198 	    EVENTHANDLER_PRI_ANY);
199 }
200 
201 static int
202 rip_append(struct inpcb *last, struct ip *ip, struct mbuf *n,
203     struct sockaddr_in *ripsrc)
204 {
205 	int policyfail = 0;
206 
207 	INP_RLOCK_ASSERT(last);
208 
209 #ifdef IPSEC
210 	/* check AH/ESP integrity. */
211 	if (ipsec4_in_reject(n, last)) {
212 		policyfail = 1;
213 	}
214 #endif /* IPSEC */
215 #ifdef MAC
216 	if (!policyfail && mac_inpcb_check_deliver(last, n) != 0)
217 		policyfail = 1;
218 #endif
219 	/* Check the minimum TTL for socket. */
220 	if (last->inp_ip_minttl && last->inp_ip_minttl > ip->ip_ttl)
221 		policyfail = 1;
222 	if (!policyfail) {
223 		struct mbuf *opts = NULL;
224 		struct socket *so;
225 
226 		so = last->inp_socket;
227 		if ((last->inp_flags & INP_CONTROLOPTS) ||
228 		    (so->so_options & (SO_TIMESTAMP | SO_BINTIME)))
229 			ip_savecontrol(last, &opts, ip, n);
230 		SOCKBUF_LOCK(&so->so_rcv);
231 		if (sbappendaddr_locked(&so->so_rcv,
232 		    (struct sockaddr *)ripsrc, n, opts) == 0) {
233 			/* should notify about lost packet */
234 			m_freem(n);
235 			if (opts)
236 				m_freem(opts);
237 			SOCKBUF_UNLOCK(&so->so_rcv);
238 		} else
239 			sorwakeup_locked(so);
240 	} else
241 		m_freem(n);
242 	return (policyfail);
243 }
244 
245 /*
246  * Setup generic address and protocol structures for raw_input routine, then
247  * pass them along with mbuf chain.
248  */
249 void
250 rip_input(struct mbuf *m, int off)
251 {
252 	INIT_VNET_INET(curvnet);
253 	struct ip *ip = mtod(m, struct ip *);
254 	int proto = ip->ip_p;
255 	struct inpcb *inp, *last;
256 	struct sockaddr_in ripsrc;
257 	int hash;
258 
259 	bzero(&ripsrc, sizeof(ripsrc));
260 	ripsrc.sin_len = sizeof(ripsrc);
261 	ripsrc.sin_family = AF_INET;
262 	ripsrc.sin_addr = ip->ip_src;
263 	last = NULL;
264 	hash = INP_PCBHASH_RAW(proto, ip->ip_src.s_addr,
265 	    ip->ip_dst.s_addr, V_ripcbinfo.ipi_hashmask);
266 	INP_INFO_RLOCK(&V_ripcbinfo);
267 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[hash], inp_hash) {
268 		if (inp->inp_ip_p != proto)
269 			continue;
270 #ifdef INET6
271 		/* XXX inp locking */
272 		if ((inp->inp_vflag & INP_IPV4) == 0)
273 			continue;
274 #endif
275 		if (inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
276 			continue;
277 		if (inp->inp_faddr.s_addr != ip->ip_src.s_addr)
278 			continue;
279 		if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
280 			continue;
281 		if (last != NULL) {
282 			struct mbuf *n;
283 
284 			n = m_copy(m, 0, (int)M_COPYALL);
285 			if (n != NULL)
286 		    	    (void) rip_append(last, ip, n, &ripsrc);
287 			/* XXX count dropped packet */
288 			INP_RUNLOCK(last);
289 		}
290 		INP_RLOCK(inp);
291 		last = inp;
292 	}
293 	LIST_FOREACH(inp, &V_ripcbinfo.ipi_hashbase[0], inp_hash) {
294 		if (inp->inp_ip_p && inp->inp_ip_p != proto)
295 			continue;
296 #ifdef INET6
297 		/* XXX inp locking */
298 		if ((inp->inp_vflag & INP_IPV4) == 0)
299 			continue;
300 #endif
301 		if (inp->inp_laddr.s_addr &&
302 		    inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
303 			continue;
304 		if (inp->inp_faddr.s_addr &&
305 		    inp->inp_faddr.s_addr != ip->ip_src.s_addr)
306 			continue;
307 		if (prison_check_ip4(inp->inp_cred, &ip->ip_dst) != 0)
308 			continue;
309 		if (last != NULL) {
310 			struct mbuf *n;
311 
312 			n = m_copy(m, 0, (int)M_COPYALL);
313 			if (n != NULL)
314 				(void) rip_append(last, ip, n, &ripsrc);
315 			/* XXX count dropped packet */
316 			INP_RUNLOCK(last);
317 		}
318 		INP_RLOCK(inp);
319 		last = inp;
320 	}
321 	INP_INFO_RUNLOCK(&V_ripcbinfo);
322 	if (last != NULL) {
323 		if (rip_append(last, ip, m, &ripsrc) != 0)
324 			V_ipstat.ips_delivered--;
325 		INP_RUNLOCK(last);
326 	} else {
327 		m_freem(m);
328 		V_ipstat.ips_noproto++;
329 		V_ipstat.ips_delivered--;
330 	}
331 }
332 
333 /*
334  * Generate IP header and pass packet to ip_output.  Tack on options user may
335  * have setup with control call.
336  */
337 int
338 rip_output(struct mbuf *m, struct socket *so, u_long dst)
339 {
340 	INIT_VNET_INET(so->so_vnet);
341 	struct ip *ip;
342 	int error;
343 	struct inpcb *inp = sotoinpcb(so);
344 	int flags = ((so->so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0) |
345 	    IP_ALLOWBROADCAST;
346 
347 	/*
348 	 * If the user handed us a complete IP packet, use it.  Otherwise,
349 	 * allocate an mbuf for a header and fill it in.
350 	 */
351 	if ((inp->inp_flags & INP_HDRINCL) == 0) {
352 		if (m->m_pkthdr.len + sizeof(struct ip) > IP_MAXPACKET) {
353 			m_freem(m);
354 			return(EMSGSIZE);
355 		}
356 		M_PREPEND(m, sizeof(struct ip), M_DONTWAIT);
357 		if (m == NULL)
358 			return(ENOBUFS);
359 
360 		INP_RLOCK(inp);
361 		ip = mtod(m, struct ip *);
362 		ip->ip_tos = inp->inp_ip_tos;
363 		if (inp->inp_flags & INP_DONTFRAG)
364 			ip->ip_off = IP_DF;
365 		else
366 			ip->ip_off = 0;
367 		ip->ip_p = inp->inp_ip_p;
368 		ip->ip_len = m->m_pkthdr.len;
369 		ip->ip_src = inp->inp_laddr;
370 		error = prison_get_ip4(inp->inp_cred, &ip->ip_src);
371 		if (error != 0) {
372 			INP_RUNLOCK(inp);
373 			m_freem(m);
374 			return (error);
375 		}
376 		ip->ip_dst.s_addr = dst;
377 		ip->ip_ttl = inp->inp_ip_ttl;
378 	} else {
379 		if (m->m_pkthdr.len > IP_MAXPACKET) {
380 			m_freem(m);
381 			return(EMSGSIZE);
382 		}
383 		INP_RLOCK(inp);
384 		ip = mtod(m, struct ip *);
385 		error = prison_check_ip4(inp->inp_cred, &ip->ip_src);
386 		if (error != 0) {
387 			INP_RUNLOCK(inp);
388 			m_freem(m);
389 			return (error);
390 		}
391 
392 		/*
393 		 * Don't allow both user specified and setsockopt options,
394 		 * and don't allow packet length sizes that will crash.
395 		 */
396 		if (((ip->ip_hl != (sizeof (*ip) >> 2)) && inp->inp_options)
397 		    || (ip->ip_len > m->m_pkthdr.len)
398 		    || (ip->ip_len < (ip->ip_hl << 2))) {
399 			INP_RUNLOCK(inp);
400 			m_freem(m);
401 			return (EINVAL);
402 		}
403 		if (ip->ip_id == 0)
404 			ip->ip_id = ip_newid();
405 
406 		/*
407 		 * XXX prevent ip_output from overwriting header fields.
408 		 */
409 		flags |= IP_RAWOUTPUT;
410 		V_ipstat.ips_rawout++;
411 	}
412 
413 	if (inp->inp_flags & INP_ONESBCAST)
414 		flags |= IP_SENDONES;
415 
416 #ifdef MAC
417 	mac_inpcb_create_mbuf(inp, m);
418 #endif
419 
420 	error = ip_output(m, inp->inp_options, NULL, flags,
421 	    inp->inp_moptions, inp);
422 	INP_RUNLOCK(inp);
423 	return (error);
424 }
425 
426 /*
427  * Raw IP socket option processing.
428  *
429  * IMPORTANT NOTE regarding access control: Traditionally, raw sockets could
430  * only be created by a privileged process, and as such, socket option
431  * operations to manage system properties on any raw socket were allowed to
432  * take place without explicit additional access control checks.  However,
433  * raw sockets can now also be created in jail(), and therefore explicit
434  * checks are now required.  Likewise, raw sockets can be used by a process
435  * after it gives up privilege, so some caution is required.  For options
436  * passed down to the IP layer via ip_ctloutput(), checks are assumed to be
437  * performed in ip_ctloutput() and therefore no check occurs here.
438  * Unilaterally checking priv_check() here breaks normal IP socket option
439  * operations on raw sockets.
440  *
441  * When adding new socket options here, make sure to add access control
442  * checks here as necessary.
443  */
444 int
445 rip_ctloutput(struct socket *so, struct sockopt *sopt)
446 {
447 	struct	inpcb *inp = sotoinpcb(so);
448 	int	error, optval;
449 
450 	if (sopt->sopt_level != IPPROTO_IP) {
451 		if ((sopt->sopt_level == SOL_SOCKET) &&
452 		    (sopt->sopt_name == SO_SETFIB)) {
453 			inp->inp_inc.inc_fibnum = so->so_fibnum;
454 			return (0);
455 		}
456 		return (EINVAL);
457 	}
458 
459 	error = 0;
460 	switch (sopt->sopt_dir) {
461 	case SOPT_GET:
462 		switch (sopt->sopt_name) {
463 		case IP_HDRINCL:
464 			optval = inp->inp_flags & INP_HDRINCL;
465 			error = sooptcopyout(sopt, &optval, sizeof optval);
466 			break;
467 
468 		case IP_FW_ADD:	/* ADD actually returns the body... */
469 		case IP_FW_GET:
470 		case IP_FW_TABLE_GETSIZE:
471 		case IP_FW_TABLE_LIST:
472 		case IP_FW_NAT_GET_CONFIG:
473 		case IP_FW_NAT_GET_LOG:
474 			if (ip_fw_ctl_ptr != NULL)
475 				error = ip_fw_ctl_ptr(sopt);
476 			else
477 				error = ENOPROTOOPT;
478 			break;
479 
480 		case IP_DUMMYNET_GET:
481 			if (ip_dn_ctl_ptr != NULL)
482 				error = ip_dn_ctl_ptr(sopt);
483 			else
484 				error = ENOPROTOOPT;
485 			break ;
486 
487 		case MRT_INIT:
488 		case MRT_DONE:
489 		case MRT_ADD_VIF:
490 		case MRT_DEL_VIF:
491 		case MRT_ADD_MFC:
492 		case MRT_DEL_MFC:
493 		case MRT_VERSION:
494 		case MRT_ASSERT:
495 		case MRT_API_SUPPORT:
496 		case MRT_API_CONFIG:
497 		case MRT_ADD_BW_UPCALL:
498 		case MRT_DEL_BW_UPCALL:
499 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
500 			if (error != 0)
501 				return (error);
502 			error = ip_mrouter_get ? ip_mrouter_get(so, sopt) :
503 				EOPNOTSUPP;
504 			break;
505 
506 		default:
507 			error = ip_ctloutput(so, sopt);
508 			break;
509 		}
510 		break;
511 
512 	case SOPT_SET:
513 		switch (sopt->sopt_name) {
514 		case IP_HDRINCL:
515 			error = sooptcopyin(sopt, &optval, sizeof optval,
516 					    sizeof optval);
517 			if (error)
518 				break;
519 			if (optval)
520 				inp->inp_flags |= INP_HDRINCL;
521 			else
522 				inp->inp_flags &= ~INP_HDRINCL;
523 			break;
524 
525 		case IP_FW_ADD:
526 		case IP_FW_DEL:
527 		case IP_FW_FLUSH:
528 		case IP_FW_ZERO:
529 		case IP_FW_RESETLOG:
530 		case IP_FW_TABLE_ADD:
531 		case IP_FW_TABLE_DEL:
532 		case IP_FW_TABLE_FLUSH:
533 		case IP_FW_NAT_CFG:
534 		case IP_FW_NAT_DEL:
535 			if (ip_fw_ctl_ptr != NULL)
536 				error = ip_fw_ctl_ptr(sopt);
537 			else
538 				error = ENOPROTOOPT;
539 			break;
540 
541 		case IP_DUMMYNET_CONFIGURE:
542 		case IP_DUMMYNET_DEL:
543 		case IP_DUMMYNET_FLUSH:
544 			if (ip_dn_ctl_ptr != NULL)
545 				error = ip_dn_ctl_ptr(sopt);
546 			else
547 				error = ENOPROTOOPT ;
548 			break ;
549 
550 		case IP_RSVP_ON:
551 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
552 			if (error != 0)
553 				return (error);
554 			error = ip_rsvp_init(so);
555 			break;
556 
557 		case IP_RSVP_OFF:
558 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
559 			if (error != 0)
560 				return (error);
561 			error = ip_rsvp_done();
562 			break;
563 
564 		case IP_RSVP_VIF_ON:
565 		case IP_RSVP_VIF_OFF:
566 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
567 			if (error != 0)
568 				return (error);
569 			error = ip_rsvp_vif ?
570 				ip_rsvp_vif(so, sopt) : EINVAL;
571 			break;
572 
573 		case MRT_INIT:
574 		case MRT_DONE:
575 		case MRT_ADD_VIF:
576 		case MRT_DEL_VIF:
577 		case MRT_ADD_MFC:
578 		case MRT_DEL_MFC:
579 		case MRT_VERSION:
580 		case MRT_ASSERT:
581 		case MRT_API_SUPPORT:
582 		case MRT_API_CONFIG:
583 		case MRT_ADD_BW_UPCALL:
584 		case MRT_DEL_BW_UPCALL:
585 			error = priv_check(curthread, PRIV_NETINET_MROUTE);
586 			if (error != 0)
587 				return (error);
588 			error = ip_mrouter_set ? ip_mrouter_set(so, sopt) :
589 					EOPNOTSUPP;
590 			break;
591 
592 		default:
593 			error = ip_ctloutput(so, sopt);
594 			break;
595 		}
596 		break;
597 	}
598 
599 	return (error);
600 }
601 
602 /*
603  * This function exists solely to receive the PRC_IFDOWN messages which are
604  * sent by if_down().  It looks for an ifaddr whose ifa_addr is sa, and calls
605  * in_ifadown() to remove all routes corresponding to that address.  It also
606  * receives the PRC_IFUP messages from if_up() and reinstalls the interface
607  * routes.
608  */
609 void
610 rip_ctlinput(int cmd, struct sockaddr *sa, void *vip)
611 {
612 	INIT_VNET_INET(curvnet);
613 	struct in_ifaddr *ia;
614 	struct ifnet *ifp;
615 	int err;
616 	int flags;
617 
618 	switch (cmd) {
619 	case PRC_IFDOWN:
620 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
621 			if (ia->ia_ifa.ifa_addr == sa
622 			    && (ia->ia_flags & IFA_ROUTE)) {
623 				/*
624 				 * in_ifscrub kills the interface route.
625 				 */
626 				in_ifscrub(ia->ia_ifp, ia);
627 				/*
628 				 * in_ifadown gets rid of all the rest of the
629 				 * routes.  This is not quite the right thing
630 				 * to do, but at least if we are running a
631 				 * routing process they will come back.
632 				 */
633 				in_ifadown(&ia->ia_ifa, 0);
634 				break;
635 			}
636 		}
637 		break;
638 
639 	case PRC_IFUP:
640 		TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) {
641 			if (ia->ia_ifa.ifa_addr == sa)
642 				break;
643 		}
644 		if (ia == 0 || (ia->ia_flags & IFA_ROUTE))
645 			return;
646 		flags = RTF_UP;
647 		ifp = ia->ia_ifa.ifa_ifp;
648 
649 		if ((ifp->if_flags & IFF_LOOPBACK)
650 		    || (ifp->if_flags & IFF_POINTOPOINT))
651 			flags |= RTF_HOST;
652 
653 		err = rtinit(&ia->ia_ifa, RTM_ADD, flags);
654 		if (err == 0)
655 			ia->ia_flags |= IFA_ROUTE;
656 		break;
657 	}
658 }
659 
660 u_long	rip_sendspace = 9216;
661 u_long	rip_recvspace = 9216;
662 
663 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, maxdgram, CTLFLAG_RW,
664     &rip_sendspace, 0, "Maximum outgoing raw IP datagram size");
665 SYSCTL_ULONG(_net_inet_raw, OID_AUTO, recvspace, CTLFLAG_RW,
666     &rip_recvspace, 0, "Maximum space for incoming raw IP datagrams");
667 
668 static int
669 rip_attach(struct socket *so, int proto, struct thread *td)
670 {
671 	INIT_VNET_INET(so->so_vnet);
672 	struct inpcb *inp;
673 	int error;
674 
675 	inp = sotoinpcb(so);
676 	KASSERT(inp == NULL, ("rip_attach: inp != NULL"));
677 
678 	error = priv_check(td, PRIV_NETINET_RAW);
679 	if (error)
680 		return (error);
681 	if (proto >= IPPROTO_MAX || proto < 0)
682 		return EPROTONOSUPPORT;
683 	error = soreserve(so, rip_sendspace, rip_recvspace);
684 	if (error)
685 		return (error);
686 	INP_INFO_WLOCK(&V_ripcbinfo);
687 	error = in_pcballoc(so, &V_ripcbinfo);
688 	if (error) {
689 		INP_INFO_WUNLOCK(&V_ripcbinfo);
690 		return (error);
691 	}
692 	inp = (struct inpcb *)so->so_pcb;
693 	inp->inp_vflag |= INP_IPV4;
694 	inp->inp_ip_p = proto;
695 	inp->inp_ip_ttl = V_ip_defttl;
696 	rip_inshash(inp);
697 	INP_INFO_WUNLOCK(&V_ripcbinfo);
698 	INP_WUNLOCK(inp);
699 	return (0);
700 }
701 
702 static void
703 rip_detach(struct socket *so)
704 {
705 	INIT_VNET_INET(so->so_vnet);
706 	struct inpcb *inp;
707 
708 	inp = sotoinpcb(so);
709 	KASSERT(inp != NULL, ("rip_detach: inp == NULL"));
710 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
711 	    ("rip_detach: not closed"));
712 
713 	INP_INFO_WLOCK(&V_ripcbinfo);
714 	INP_WLOCK(inp);
715 	rip_delhash(inp);
716 	if (so == V_ip_mrouter && ip_mrouter_done)
717 		ip_mrouter_done();
718 	if (ip_rsvp_force_done)
719 		ip_rsvp_force_done(so);
720 	if (so == V_ip_rsvpd)
721 		ip_rsvp_done();
722 	in_pcbdetach(inp);
723 	in_pcbfree(inp);
724 	INP_INFO_WUNLOCK(&V_ripcbinfo);
725 }
726 
727 static void
728 rip_dodisconnect(struct socket *so, struct inpcb *inp)
729 {
730 
731 	INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo);
732 	INP_WLOCK_ASSERT(inp);
733 
734 	rip_delhash(inp);
735 	inp->inp_faddr.s_addr = INADDR_ANY;
736 	rip_inshash(inp);
737 	SOCK_LOCK(so);
738 	so->so_state &= ~SS_ISCONNECTED;
739 	SOCK_UNLOCK(so);
740 }
741 
742 static void
743 rip_abort(struct socket *so)
744 {
745 	INIT_VNET_INET(so->so_vnet);
746 	struct inpcb *inp;
747 
748 	inp = sotoinpcb(so);
749 	KASSERT(inp != NULL, ("rip_abort: inp == NULL"));
750 
751 	INP_INFO_WLOCK(&V_ripcbinfo);
752 	INP_WLOCK(inp);
753 	rip_dodisconnect(so, inp);
754 	INP_WUNLOCK(inp);
755 	INP_INFO_WUNLOCK(&V_ripcbinfo);
756 }
757 
758 static void
759 rip_close(struct socket *so)
760 {
761 	INIT_VNET_INET(so->so_vnet);
762 	struct inpcb *inp;
763 
764 	inp = sotoinpcb(so);
765 	KASSERT(inp != NULL, ("rip_close: inp == NULL"));
766 
767 	INP_INFO_WLOCK(&V_ripcbinfo);
768 	INP_WLOCK(inp);
769 	rip_dodisconnect(so, inp);
770 	INP_WUNLOCK(inp);
771 	INP_INFO_WUNLOCK(&V_ripcbinfo);
772 }
773 
774 static int
775 rip_disconnect(struct socket *so)
776 {
777 	INIT_VNET_INET(so->so_vnet);
778 	struct inpcb *inp;
779 
780 	if ((so->so_state & SS_ISCONNECTED) == 0)
781 		return (ENOTCONN);
782 
783 	inp = sotoinpcb(so);
784 	KASSERT(inp != NULL, ("rip_disconnect: inp == NULL"));
785 
786 	INP_INFO_WLOCK(&V_ripcbinfo);
787 	INP_WLOCK(inp);
788 	rip_dodisconnect(so, inp);
789 	INP_WUNLOCK(inp);
790 	INP_INFO_WUNLOCK(&V_ripcbinfo);
791 	return (0);
792 }
793 
794 static int
795 rip_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
796 {
797 	INIT_VNET_NET(so->so_vnet);
798 	INIT_VNET_INET(so->so_vnet);
799 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
800 	struct inpcb *inp;
801 	int error;
802 
803 	if (nam->sa_len != sizeof(*addr))
804 		return (EINVAL);
805 
806 	error = prison_check_ip4(td->td_ucred, &addr->sin_addr);
807 	if (error != 0)
808 		return (error);
809 
810 	if (TAILQ_EMPTY(&V_ifnet) ||
811 	    (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK) ||
812 	    (addr->sin_addr.s_addr &&
813 	     ifa_ifwithaddr((struct sockaddr *)addr) == 0))
814 		return (EADDRNOTAVAIL);
815 
816 	inp = sotoinpcb(so);
817 	KASSERT(inp != NULL, ("rip_bind: inp == NULL"));
818 
819 	INP_INFO_WLOCK(&V_ripcbinfo);
820 	INP_WLOCK(inp);
821 	rip_delhash(inp);
822 	inp->inp_laddr = addr->sin_addr;
823 	rip_inshash(inp);
824 	INP_WUNLOCK(inp);
825 	INP_INFO_WUNLOCK(&V_ripcbinfo);
826 	return (0);
827 }
828 
829 static int
830 rip_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
831 {
832 	INIT_VNET_NET(so->so_vnet);
833 	INIT_VNET_INET(so->so_vnet);
834 	struct sockaddr_in *addr = (struct sockaddr_in *)nam;
835 	struct inpcb *inp;
836 
837 	if (nam->sa_len != sizeof(*addr))
838 		return (EINVAL);
839 	if (TAILQ_EMPTY(&V_ifnet))
840 		return (EADDRNOTAVAIL);
841 	if (addr->sin_family != AF_INET && addr->sin_family != AF_IMPLINK)
842 		return (EAFNOSUPPORT);
843 
844 	inp = sotoinpcb(so);
845 	KASSERT(inp != NULL, ("rip_connect: inp == NULL"));
846 
847 	INP_INFO_WLOCK(&V_ripcbinfo);
848 	INP_WLOCK(inp);
849 	rip_delhash(inp);
850 	inp->inp_faddr = addr->sin_addr;
851 	rip_inshash(inp);
852 	soisconnected(so);
853 	INP_WUNLOCK(inp);
854 	INP_INFO_WUNLOCK(&V_ripcbinfo);
855 	return (0);
856 }
857 
858 static int
859 rip_shutdown(struct socket *so)
860 {
861 	struct inpcb *inp;
862 
863 	inp = sotoinpcb(so);
864 	KASSERT(inp != NULL, ("rip_shutdown: inp == NULL"));
865 
866 	INP_WLOCK(inp);
867 	socantsendmore(so);
868 	INP_WUNLOCK(inp);
869 	return (0);
870 }
871 
872 static int
873 rip_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
874     struct mbuf *control, struct thread *td)
875 {
876 	struct inpcb *inp;
877 	u_long dst;
878 
879 	inp = sotoinpcb(so);
880 	KASSERT(inp != NULL, ("rip_send: inp == NULL"));
881 
882 	/*
883 	 * Note: 'dst' reads below are unlocked.
884 	 */
885 	if (so->so_state & SS_ISCONNECTED) {
886 		if (nam) {
887 			m_freem(m);
888 			return (EISCONN);
889 		}
890 		dst = inp->inp_faddr.s_addr;	/* Unlocked read. */
891 	} else {
892 		if (nam == NULL) {
893 			m_freem(m);
894 			return (ENOTCONN);
895 		}
896 		dst = ((struct sockaddr_in *)nam)->sin_addr.s_addr;
897 	}
898 	return (rip_output(m, so, dst));
899 }
900 
901 static int
902 rip_pcblist(SYSCTL_HANDLER_ARGS)
903 {
904 	INIT_VNET_INET(curvnet);
905 	int error, i, n;
906 	struct inpcb *inp, **inp_list;
907 	inp_gen_t gencnt;
908 	struct xinpgen xig;
909 
910 	/*
911 	 * The process of preparing the TCB list is too time-consuming and
912 	 * resource-intensive to repeat twice on every request.
913 	 */
914 	if (req->oldptr == 0) {
915 		n = V_ripcbinfo.ipi_count;
916 		req->oldidx = 2 * (sizeof xig)
917 		    + (n + n/8) * sizeof(struct xinpcb);
918 		return (0);
919 	}
920 
921 	if (req->newptr != 0)
922 		return (EPERM);
923 
924 	/*
925 	 * OK, now we're committed to doing something.
926 	 */
927 	INP_INFO_RLOCK(&V_ripcbinfo);
928 	gencnt = V_ripcbinfo.ipi_gencnt;
929 	n = V_ripcbinfo.ipi_count;
930 	INP_INFO_RUNLOCK(&V_ripcbinfo);
931 
932 	xig.xig_len = sizeof xig;
933 	xig.xig_count = n;
934 	xig.xig_gen = gencnt;
935 	xig.xig_sogen = so_gencnt;
936 	error = SYSCTL_OUT(req, &xig, sizeof xig);
937 	if (error)
938 		return (error);
939 
940 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
941 	if (inp_list == 0)
942 		return (ENOMEM);
943 
944 	INP_INFO_RLOCK(&V_ripcbinfo);
945 	for (inp = LIST_FIRST(V_ripcbinfo.ipi_listhead), i = 0; inp && i < n;
946 	     inp = LIST_NEXT(inp, inp_list)) {
947 		INP_RLOCK(inp);
948 		if (inp->inp_gencnt <= gencnt &&
949 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
950 			/* XXX held references? */
951 			inp_list[i++] = inp;
952 		}
953 		INP_RUNLOCK(inp);
954 	}
955 	INP_INFO_RUNLOCK(&V_ripcbinfo);
956 	n = i;
957 
958 	error = 0;
959 	for (i = 0; i < n; i++) {
960 		inp = inp_list[i];
961 		INP_RLOCK(inp);
962 		if (inp->inp_gencnt <= gencnt) {
963 			struct xinpcb xi;
964 
965 			bzero(&xi, sizeof(xi));
966 			xi.xi_len = sizeof xi;
967 			/* XXX should avoid extra copy */
968 			bcopy(inp, &xi.xi_inp, sizeof *inp);
969 			if (inp->inp_socket)
970 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
971 			INP_RUNLOCK(inp);
972 			error = SYSCTL_OUT(req, &xi, sizeof xi);
973 		} else
974 			INP_RUNLOCK(inp);
975 	}
976 	if (!error) {
977 		/*
978 		 * Give the user an updated idea of our state.  If the
979 		 * generation differs from what we told her before, she knows
980 		 * that something happened while we were processing this
981 		 * request, and it might be necessary to retry.
982 		 */
983 		INP_INFO_RLOCK(&V_ripcbinfo);
984 		xig.xig_gen = V_ripcbinfo.ipi_gencnt;
985 		xig.xig_sogen = so_gencnt;
986 		xig.xig_count = V_ripcbinfo.ipi_count;
987 		INP_INFO_RUNLOCK(&V_ripcbinfo);
988 		error = SYSCTL_OUT(req, &xig, sizeof xig);
989 	}
990 	free(inp_list, M_TEMP);
991 	return (error);
992 }
993 
994 SYSCTL_PROC(_net_inet_raw, OID_AUTO/*XXX*/, pcblist, CTLFLAG_RD, 0, 0,
995     rip_pcblist, "S,xinpcb", "List of active raw IP sockets");
996 
997 struct pr_usrreqs rip_usrreqs = {
998 	.pru_abort =		rip_abort,
999 	.pru_attach =		rip_attach,
1000 	.pru_bind =		rip_bind,
1001 	.pru_connect =		rip_connect,
1002 	.pru_control =		in_control,
1003 	.pru_detach =		rip_detach,
1004 	.pru_disconnect =	rip_disconnect,
1005 	.pru_peeraddr =		in_getpeeraddr,
1006 	.pru_send =		rip_send,
1007 	.pru_shutdown =		rip_shutdown,
1008 	.pru_sockaddr =		in_getsockaddr,
1009 	.pru_sosetlabel =	in_pcbsosetlabel,
1010 	.pru_close =		rip_close,
1011 };
1012