xref: /freebsd/sys/netinet/udp_usrreq.c (revision 1c6d60de932c8553af44629218cb9697bc0f2ef1)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.
4  * Copyright (c) 2008 Robert N. M. Watson
5  * Copyright (c) 2010-2011 Juniper Networks, Inc.
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Robert N. M. Watson under
9  * contract to Juniper Networks, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)udp_usrreq.c	8.6 (Berkeley) 5/23/95
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_ipfw.h"
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 #include "opt_ipsec.h"
45 #include "opt_kdtrace.h"
46 
47 #include <sys/param.h>
48 #include <sys/domain.h>
49 #include <sys/eventhandler.h>
50 #include <sys/jail.h>
51 #include <sys/kernel.h>
52 #include <sys/lock.h>
53 #include <sys/malloc.h>
54 #include <sys/mbuf.h>
55 #include <sys/priv.h>
56 #include <sys/proc.h>
57 #include <sys/protosw.h>
58 #include <sys/sdt.h>
59 #include <sys/signalvar.h>
60 #include <sys/socket.h>
61 #include <sys/socketvar.h>
62 #include <sys/sx.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/systm.h>
66 
67 #include <vm/uma.h>
68 
69 #include <net/if.h>
70 #include <net/if_var.h>
71 #include <net/route.h>
72 
73 #include <netinet/in.h>
74 #include <netinet/in_kdtrace.h>
75 #include <netinet/in_pcb.h>
76 #include <netinet/in_systm.h>
77 #include <netinet/in_var.h>
78 #include <netinet/ip.h>
79 #ifdef INET6
80 #include <netinet/ip6.h>
81 #endif
82 #include <netinet/ip_icmp.h>
83 #include <netinet/icmp_var.h>
84 #include <netinet/ip_var.h>
85 #include <netinet/ip_options.h>
86 #ifdef INET6
87 #include <netinet6/ip6_var.h>
88 #endif
89 #include <netinet/udp.h>
90 #include <netinet/udp_var.h>
91 
92 #ifdef IPSEC
93 #include <netipsec/ipsec.h>
94 #include <netipsec/esp.h>
95 #endif
96 
97 #include <machine/in_cksum.h>
98 
99 #include <security/mac/mac_framework.h>
100 
101 /*
102  * UDP protocol implementation.
103  * Per RFC 768, August, 1980.
104  */
105 
106 /*
107  * BSD 4.2 defaulted the udp checksum to be off.  Turning off udp checksums
108  * removes the only data integrity mechanism for packets and malformed
109  * packets that would otherwise be discarded due to bad checksums, and may
110  * cause problems (especially for NFS data blocks).
111  */
112 VNET_DEFINE(int, udp_cksum) = 1;
113 SYSCTL_VNET_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
114     &VNET_NAME(udp_cksum), 0, "compute udp checksum");
115 
116 int	udp_log_in_vain = 0;
117 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
118     &udp_log_in_vain, 0, "Log all incoming UDP packets");
119 
120 VNET_DEFINE(int, udp_blackhole) = 0;
121 SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
122     &VNET_NAME(udp_blackhole), 0,
123     "Do not send port unreachables for refused connects");
124 
125 u_long	udp_sendspace = 9216;		/* really max datagram size */
126 					/* 40 1K datagrams */
127 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
128     &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
129 
130 u_long	udp_recvspace = 40 * (1024 +
131 #ifdef INET6
132 				      sizeof(struct sockaddr_in6)
133 #else
134 				      sizeof(struct sockaddr_in)
135 #endif
136 				      );
137 
138 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
139     &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
140 
141 VNET_DEFINE(struct inpcbhead, udb);		/* from udp_var.h */
142 VNET_DEFINE(struct inpcbinfo, udbinfo);
143 static VNET_DEFINE(uma_zone_t, udpcb_zone);
144 #define	V_udpcb_zone			VNET(udpcb_zone)
145 
146 #ifndef UDBHASHSIZE
147 #define	UDBHASHSIZE	128
148 #endif
149 
150 VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat);		/* from udp_var.h */
151 VNET_PCPUSTAT_SYSINIT(udpstat);
152 SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat,
153     udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
154 
155 #ifdef VIMAGE
156 VNET_PCPUSTAT_SYSUNINIT(udpstat);
157 #endif /* VIMAGE */
158 #ifdef INET
159 static void	udp_detach(struct socket *so);
160 static int	udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
161 		    struct mbuf *, struct thread *);
162 #endif
163 
164 #ifdef IPSEC
165 #ifdef IPSEC_NAT_T
166 #define	UF_ESPINUDP_ALL	(UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
167 #ifdef INET
168 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
169 #endif
170 #endif /* IPSEC_NAT_T */
171 #endif /* IPSEC */
172 
173 static void
174 udp_zone_change(void *tag)
175 {
176 
177 	uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
178 	uma_zone_set_max(V_udpcb_zone, maxsockets);
179 }
180 
181 static int
182 udp_inpcb_init(void *mem, int size, int flags)
183 {
184 	struct inpcb *inp;
185 
186 	inp = mem;
187 	INP_LOCK_INIT(inp, "inp", "udpinp");
188 	return (0);
189 }
190 
191 void
192 udp_init(void)
193 {
194 
195 	in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
196 	    "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE,
197 	    IPI_HASHFIELDS_2TUPLE);
198 	V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
199 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
200 	uma_zone_set_max(V_udpcb_zone, maxsockets);
201 	uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
202 	EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
203 	    EVENTHANDLER_PRI_ANY);
204 }
205 
206 /*
207  * Kernel module interface for updating udpstat.  The argument is an index
208  * into udpstat treated as an array of u_long.  While this encodes the
209  * general layout of udpstat into the caller, it doesn't encode its location,
210  * so that future changes to add, for example, per-CPU stats support won't
211  * cause binary compatibility problems for kernel modules.
212  */
213 void
214 kmod_udpstat_inc(int statnum)
215 {
216 
217 	counter_u64_add(VNET(udpstat)[statnum], 1);
218 }
219 
220 int
221 udp_newudpcb(struct inpcb *inp)
222 {
223 	struct udpcb *up;
224 
225 	up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
226 	if (up == NULL)
227 		return (ENOBUFS);
228 	inp->inp_ppcb = up;
229 	return (0);
230 }
231 
232 void
233 udp_discardcb(struct udpcb *up)
234 {
235 
236 	uma_zfree(V_udpcb_zone, up);
237 }
238 
239 #ifdef VIMAGE
240 void
241 udp_destroy(void)
242 {
243 
244 	in_pcbinfo_destroy(&V_udbinfo);
245 	uma_zdestroy(V_udpcb_zone);
246 }
247 #endif
248 
249 #ifdef INET
250 /*
251  * Subroutine of udp_input(), which appends the provided mbuf chain to the
252  * passed pcb/socket.  The caller must provide a sockaddr_in via udp_in that
253  * contains the source address.  If the socket ends up being an IPv6 socket,
254  * udp_append() will convert to a sockaddr_in6 before passing the address
255  * into the socket code.
256  */
257 static void
258 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
259     struct sockaddr_in *udp_in)
260 {
261 	struct sockaddr *append_sa;
262 	struct socket *so;
263 	struct mbuf *opts = 0;
264 #ifdef INET6
265 	struct sockaddr_in6 udp_in6;
266 #endif
267 	struct udpcb *up;
268 
269 	INP_LOCK_ASSERT(inp);
270 
271 	/*
272 	 * Engage the tunneling protocol.
273 	 */
274 	up = intoudpcb(inp);
275 	if (up->u_tun_func != NULL) {
276 		(*up->u_tun_func)(n, off, inp);
277 		return;
278 	}
279 
280 	if (n == NULL)
281 		return;
282 
283 	off += sizeof(struct udphdr);
284 
285 #ifdef IPSEC
286 	/* Check AH/ESP integrity. */
287 	if (ipsec4_in_reject(n, inp)) {
288 		m_freem(n);
289 		IPSECSTAT_INC(ips_in_polvio);
290 		return;
291 	}
292 #ifdef IPSEC_NAT_T
293 	up = intoudpcb(inp);
294 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
295 	if (up->u_flags & UF_ESPINUDP_ALL) {	/* IPSec UDP encaps. */
296 		n = udp4_espdecap(inp, n, off);
297 		if (n == NULL)				/* Consumed. */
298 			return;
299 	}
300 #endif /* IPSEC_NAT_T */
301 #endif /* IPSEC */
302 #ifdef MAC
303 	if (mac_inpcb_check_deliver(inp, n) != 0) {
304 		m_freem(n);
305 		return;
306 	}
307 #endif /* MAC */
308 	if (inp->inp_flags & INP_CONTROLOPTS ||
309 	    inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
310 #ifdef INET6
311 		if (inp->inp_vflag & INP_IPV6)
312 			(void)ip6_savecontrol_v4(inp, n, &opts, NULL);
313 		else
314 #endif /* INET6 */
315 			ip_savecontrol(inp, &opts, ip, n);
316 	}
317 #ifdef INET6
318 	if (inp->inp_vflag & INP_IPV6) {
319 		bzero(&udp_in6, sizeof(udp_in6));
320 		udp_in6.sin6_len = sizeof(udp_in6);
321 		udp_in6.sin6_family = AF_INET6;
322 		in6_sin_2_v4mapsin6(udp_in, &udp_in6);
323 		append_sa = (struct sockaddr *)&udp_in6;
324 	} else
325 #endif /* INET6 */
326 		append_sa = (struct sockaddr *)udp_in;
327 	m_adj(n, off);
328 
329 	so = inp->inp_socket;
330 	SOCKBUF_LOCK(&so->so_rcv);
331 	if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
332 		SOCKBUF_UNLOCK(&so->so_rcv);
333 		m_freem(n);
334 		if (opts)
335 			m_freem(opts);
336 		UDPSTAT_INC(udps_fullsock);
337 	} else
338 		sorwakeup_locked(so);
339 }
340 
341 void
342 udp_input(struct mbuf *m, int off)
343 {
344 	int iphlen = off;
345 	struct ip *ip;
346 	struct udphdr *uh;
347 	struct ifnet *ifp;
348 	struct inpcb *inp;
349 	uint16_t len, ip_len;
350 	struct ip save_ip;
351 	struct sockaddr_in udp_in;
352 	struct m_tag *fwd_tag;
353 
354 	ifp = m->m_pkthdr.rcvif;
355 	UDPSTAT_INC(udps_ipackets);
356 
357 	/*
358 	 * Strip IP options, if any; should skip this, make available to
359 	 * user, and use on returned packets, but we don't yet have a way to
360 	 * check the checksum with options still present.
361 	 */
362 	if (iphlen > sizeof (struct ip)) {
363 		ip_stripoptions(m);
364 		iphlen = sizeof(struct ip);
365 	}
366 
367 	/*
368 	 * Get IP and UDP header together in first mbuf.
369 	 */
370 	ip = mtod(m, struct ip *);
371 	if (m->m_len < iphlen + sizeof(struct udphdr)) {
372 		if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
373 			UDPSTAT_INC(udps_hdrops);
374 			return;
375 		}
376 		ip = mtod(m, struct ip *);
377 	}
378 	uh = (struct udphdr *)((caddr_t)ip + iphlen);
379 
380 	/*
381 	 * Destination port of 0 is illegal, based on RFC768.
382 	 */
383 	if (uh->uh_dport == 0)
384 		goto badunlocked;
385 
386 	/*
387 	 * Construct sockaddr format source address.  Stuff source address
388 	 * and datagram in user buffer.
389 	 */
390 	bzero(&udp_in, sizeof(udp_in));
391 	udp_in.sin_len = sizeof(udp_in);
392 	udp_in.sin_family = AF_INET;
393 	udp_in.sin_port = uh->uh_sport;
394 	udp_in.sin_addr = ip->ip_src;
395 
396 	/*
397 	 * Make mbuf data length reflect UDP length.  If not enough data to
398 	 * reflect UDP length, drop.
399 	 */
400 	len = ntohs((u_short)uh->uh_ulen);
401 	ip_len = ntohs(ip->ip_len) - iphlen;
402 	if (ip_len != len) {
403 		if (len > ip_len || len < sizeof(struct udphdr)) {
404 			UDPSTAT_INC(udps_badlen);
405 			goto badunlocked;
406 		}
407 		m_adj(m, len - ip_len);
408 	}
409 
410 	/*
411 	 * Save a copy of the IP header in case we want restore it for
412 	 * sending an ICMP error message in response.
413 	 */
414 	if (!V_udp_blackhole)
415 		save_ip = *ip;
416 	else
417 		memset(&save_ip, 0, sizeof(save_ip));
418 
419 	/*
420 	 * Checksum extended UDP header and data.
421 	 */
422 	if (uh->uh_sum) {
423 		u_short uh_sum;
424 
425 		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
426 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
427 				uh_sum = m->m_pkthdr.csum_data;
428 			else
429 				uh_sum = in_pseudo(ip->ip_src.s_addr,
430 				    ip->ip_dst.s_addr, htonl((u_short)len +
431 				    m->m_pkthdr.csum_data + IPPROTO_UDP));
432 			uh_sum ^= 0xffff;
433 		} else {
434 			char b[9];
435 
436 			bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
437 			bzero(((struct ipovly *)ip)->ih_x1, 9);
438 			((struct ipovly *)ip)->ih_len = uh->uh_ulen;
439 			uh_sum = in_cksum(m, len + sizeof (struct ip));
440 			bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
441 		}
442 		if (uh_sum) {
443 			UDPSTAT_INC(udps_badsum);
444 			m_freem(m);
445 			return;
446 		}
447 	} else
448 		UDPSTAT_INC(udps_nosum);
449 
450 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
451 	    in_broadcast(ip->ip_dst, ifp)) {
452 		struct inpcb *last;
453 		struct ip_moptions *imo;
454 
455 		INP_INFO_RLOCK(&V_udbinfo);
456 		last = NULL;
457 		LIST_FOREACH(inp, &V_udb, inp_list) {
458 			if (inp->inp_lport != uh->uh_dport)
459 				continue;
460 #ifdef INET6
461 			if ((inp->inp_vflag & INP_IPV4) == 0)
462 				continue;
463 #endif
464 			if (inp->inp_laddr.s_addr != INADDR_ANY &&
465 			    inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
466 				continue;
467 			if (inp->inp_faddr.s_addr != INADDR_ANY &&
468 			    inp->inp_faddr.s_addr != ip->ip_src.s_addr)
469 				continue;
470 			if (inp->inp_fport != 0 &&
471 			    inp->inp_fport != uh->uh_sport)
472 				continue;
473 
474 			INP_RLOCK(inp);
475 
476 			/*
477 			 * XXXRW: Because we weren't holding either the inpcb
478 			 * or the hash lock when we checked for a match
479 			 * before, we should probably recheck now that the
480 			 * inpcb lock is held.
481 			 */
482 
483 			/*
484 			 * Handle socket delivery policy for any-source
485 			 * and source-specific multicast. [RFC3678]
486 			 */
487 			imo = inp->inp_moptions;
488 			if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
489 				struct sockaddr_in	 group;
490 				int			 blocked;
491 				if (imo == NULL) {
492 					INP_RUNLOCK(inp);
493 					continue;
494 				}
495 				bzero(&group, sizeof(struct sockaddr_in));
496 				group.sin_len = sizeof(struct sockaddr_in);
497 				group.sin_family = AF_INET;
498 				group.sin_addr = ip->ip_dst;
499 
500 				blocked = imo_multi_filter(imo, ifp,
501 					(struct sockaddr *)&group,
502 					(struct sockaddr *)&udp_in);
503 				if (blocked != MCAST_PASS) {
504 					if (blocked == MCAST_NOTGMEMBER)
505 						IPSTAT_INC(ips_notmember);
506 					if (blocked == MCAST_NOTSMEMBER ||
507 					    blocked == MCAST_MUTED)
508 						UDPSTAT_INC(udps_filtermcast);
509 					INP_RUNLOCK(inp);
510 					continue;
511 				}
512 			}
513 			if (last != NULL) {
514 				struct mbuf *n;
515 
516 				n = m_copy(m, 0, M_COPYALL);
517 				udp_append(last, ip, n, iphlen, &udp_in);
518 				INP_RUNLOCK(last);
519 			}
520 			last = inp;
521 			/*
522 			 * Don't look for additional matches if this one does
523 			 * not have either the SO_REUSEPORT or SO_REUSEADDR
524 			 * socket options set.  This heuristic avoids
525 			 * searching through all pcbs in the common case of a
526 			 * non-shared port.  It assumes that an application
527 			 * will never clear these options after setting them.
528 			 */
529 			if ((last->inp_socket->so_options &
530 			    (SO_REUSEPORT|SO_REUSEADDR)) == 0)
531 				break;
532 		}
533 
534 		if (last == NULL) {
535 			/*
536 			 * No matching pcb found; discard datagram.  (No need
537 			 * to send an ICMP Port Unreachable for a broadcast
538 			 * or multicast datgram.)
539 			 */
540 			UDPSTAT_INC(udps_noportbcast);
541 			if (inp)
542 				INP_RUNLOCK(inp);
543 			INP_INFO_RUNLOCK(&V_udbinfo);
544 			goto badunlocked;
545 		}
546 		udp_append(last, ip, m, iphlen, &udp_in);
547 		INP_RUNLOCK(last);
548 		INP_INFO_RUNLOCK(&V_udbinfo);
549 		return;
550 	}
551 
552 	/*
553 	 * Locate pcb for datagram.
554 	 */
555 
556 	/*
557 	 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
558 	 */
559 	if ((m->m_flags & M_IP_NEXTHOP) &&
560 	    (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) {
561 		struct sockaddr_in *next_hop;
562 
563 		next_hop = (struct sockaddr_in *)(fwd_tag + 1);
564 
565 		/*
566 		 * Transparently forwarded. Pretend to be the destination.
567 		 * Already got one like this?
568 		 */
569 		inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
570 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m);
571 		if (!inp) {
572 			/*
573 			 * It's new.  Try to find the ambushing socket.
574 			 * Because we've rewritten the destination address,
575 			 * any hardware-generated hash is ignored.
576 			 */
577 			inp = in_pcblookup(&V_udbinfo, ip->ip_src,
578 			    uh->uh_sport, next_hop->sin_addr,
579 			    next_hop->sin_port ? htons(next_hop->sin_port) :
580 			    uh->uh_dport, INPLOOKUP_WILDCARD |
581 			    INPLOOKUP_RLOCKPCB, ifp);
582 		}
583 		/* Remove the tag from the packet. We don't need it anymore. */
584 		m_tag_delete(m, fwd_tag);
585 		m->m_flags &= ~M_IP_NEXTHOP;
586 	} else
587 		inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
588 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD |
589 		    INPLOOKUP_RLOCKPCB, ifp, m);
590 	if (inp == NULL) {
591 		if (udp_log_in_vain) {
592 			char buf[4*sizeof "123"];
593 
594 			strcpy(buf, inet_ntoa(ip->ip_dst));
595 			log(LOG_INFO,
596 			    "Connection attempt to UDP %s:%d from %s:%d\n",
597 			    buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
598 			    ntohs(uh->uh_sport));
599 		}
600 		UDPSTAT_INC(udps_noport);
601 		if (m->m_flags & (M_BCAST | M_MCAST)) {
602 			UDPSTAT_INC(udps_noportbcast);
603 			goto badunlocked;
604 		}
605 		if (V_udp_blackhole)
606 			goto badunlocked;
607 		if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
608 			goto badunlocked;
609 		*ip = save_ip;
610 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
611 		return;
612 	}
613 
614 	/*
615 	 * Check the minimum TTL for socket.
616 	 */
617 	INP_RLOCK_ASSERT(inp);
618 	if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
619 		INP_RUNLOCK(inp);
620 		m_freem(m);
621 		return;
622 	}
623 
624 	UDP_PROBE(receive, NULL, inp, ip, inp, uh);
625 	udp_append(inp, ip, m, iphlen, &udp_in);
626 	INP_RUNLOCK(inp);
627 	return;
628 
629 badunlocked:
630 	m_freem(m);
631 }
632 #endif /* INET */
633 
634 /*
635  * Notify a udp user of an asynchronous error; just wake up so that they can
636  * collect error status.
637  */
638 struct inpcb *
639 udp_notify(struct inpcb *inp, int errno)
640 {
641 
642 	/*
643 	 * While udp_ctlinput() always calls udp_notify() with a read lock
644 	 * when invoking it directly, in_pcbnotifyall() currently uses write
645 	 * locks due to sharing code with TCP.  For now, accept either a read
646 	 * or a write lock, but a read lock is sufficient.
647 	 */
648 	INP_LOCK_ASSERT(inp);
649 
650 	inp->inp_socket->so_error = errno;
651 	sorwakeup(inp->inp_socket);
652 	sowwakeup(inp->inp_socket);
653 	return (inp);
654 }
655 
656 #ifdef INET
657 void
658 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
659 {
660 	struct ip *ip = vip;
661 	struct udphdr *uh;
662 	struct in_addr faddr;
663 	struct inpcb *inp;
664 
665 	faddr = ((struct sockaddr_in *)sa)->sin_addr;
666 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
667 		return;
668 
669 	/*
670 	 * Redirects don't need to be handled up here.
671 	 */
672 	if (PRC_IS_REDIRECT(cmd))
673 		return;
674 
675 	/*
676 	 * Hostdead is ugly because it goes linearly through all PCBs.
677 	 *
678 	 * XXX: We never get this from ICMP, otherwise it makes an excellent
679 	 * DoS attack on machines with many connections.
680 	 */
681 	if (cmd == PRC_HOSTDEAD)
682 		ip = NULL;
683 	else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
684 		return;
685 	if (ip != NULL) {
686 		uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
687 		inp = in_pcblookup(&V_udbinfo, faddr, uh->uh_dport,
688 		    ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL);
689 		if (inp != NULL) {
690 			INP_RLOCK_ASSERT(inp);
691 			if (inp->inp_socket != NULL) {
692 				udp_notify(inp, inetctlerrmap[cmd]);
693 			}
694 			INP_RUNLOCK(inp);
695 		}
696 	} else
697 		in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
698 		    udp_notify);
699 }
700 #endif /* INET */
701 
702 static int
703 udp_pcblist(SYSCTL_HANDLER_ARGS)
704 {
705 	int error, i, n;
706 	struct inpcb *inp, **inp_list;
707 	inp_gen_t gencnt;
708 	struct xinpgen xig;
709 
710 	/*
711 	 * The process of preparing the PCB list is too time-consuming and
712 	 * resource-intensive to repeat twice on every request.
713 	 */
714 	if (req->oldptr == 0) {
715 		n = V_udbinfo.ipi_count;
716 		n += imax(n / 8, 10);
717 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
718 		return (0);
719 	}
720 
721 	if (req->newptr != 0)
722 		return (EPERM);
723 
724 	/*
725 	 * OK, now we're committed to doing something.
726 	 */
727 	INP_INFO_RLOCK(&V_udbinfo);
728 	gencnt = V_udbinfo.ipi_gencnt;
729 	n = V_udbinfo.ipi_count;
730 	INP_INFO_RUNLOCK(&V_udbinfo);
731 
732 	error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
733 		+ n * sizeof(struct xinpcb));
734 	if (error != 0)
735 		return (error);
736 
737 	xig.xig_len = sizeof xig;
738 	xig.xig_count = n;
739 	xig.xig_gen = gencnt;
740 	xig.xig_sogen = so_gencnt;
741 	error = SYSCTL_OUT(req, &xig, sizeof xig);
742 	if (error)
743 		return (error);
744 
745 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
746 	if (inp_list == 0)
747 		return (ENOMEM);
748 
749 	INP_INFO_RLOCK(&V_udbinfo);
750 	for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
751 	     inp = LIST_NEXT(inp, inp_list)) {
752 		INP_WLOCK(inp);
753 		if (inp->inp_gencnt <= gencnt &&
754 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
755 			in_pcbref(inp);
756 			inp_list[i++] = inp;
757 		}
758 		INP_WUNLOCK(inp);
759 	}
760 	INP_INFO_RUNLOCK(&V_udbinfo);
761 	n = i;
762 
763 	error = 0;
764 	for (i = 0; i < n; i++) {
765 		inp = inp_list[i];
766 		INP_RLOCK(inp);
767 		if (inp->inp_gencnt <= gencnt) {
768 			struct xinpcb xi;
769 
770 			bzero(&xi, sizeof(xi));
771 			xi.xi_len = sizeof xi;
772 			/* XXX should avoid extra copy */
773 			bcopy(inp, &xi.xi_inp, sizeof *inp);
774 			if (inp->inp_socket)
775 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
776 			xi.xi_inp.inp_gencnt = inp->inp_gencnt;
777 			INP_RUNLOCK(inp);
778 			error = SYSCTL_OUT(req, &xi, sizeof xi);
779 		} else
780 			INP_RUNLOCK(inp);
781 	}
782 	INP_INFO_WLOCK(&V_udbinfo);
783 	for (i = 0; i < n; i++) {
784 		inp = inp_list[i];
785 		INP_RLOCK(inp);
786 		if (!in_pcbrele_rlocked(inp))
787 			INP_RUNLOCK(inp);
788 	}
789 	INP_INFO_WUNLOCK(&V_udbinfo);
790 
791 	if (!error) {
792 		/*
793 		 * Give the user an updated idea of our state.  If the
794 		 * generation differs from what we told her before, she knows
795 		 * that something happened while we were processing this
796 		 * request, and it might be necessary to retry.
797 		 */
798 		INP_INFO_RLOCK(&V_udbinfo);
799 		xig.xig_gen = V_udbinfo.ipi_gencnt;
800 		xig.xig_sogen = so_gencnt;
801 		xig.xig_count = V_udbinfo.ipi_count;
802 		INP_INFO_RUNLOCK(&V_udbinfo);
803 		error = SYSCTL_OUT(req, &xig, sizeof xig);
804 	}
805 	free(inp_list, M_TEMP);
806 	return (error);
807 }
808 
809 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
810     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
811     udp_pcblist, "S,xinpcb", "List of active UDP sockets");
812 
813 #ifdef INET
814 static int
815 udp_getcred(SYSCTL_HANDLER_ARGS)
816 {
817 	struct xucred xuc;
818 	struct sockaddr_in addrs[2];
819 	struct inpcb *inp;
820 	int error;
821 
822 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
823 	if (error)
824 		return (error);
825 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
826 	if (error)
827 		return (error);
828 	inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
829 	    addrs[0].sin_addr, addrs[0].sin_port,
830 	    INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL);
831 	if (inp != NULL) {
832 		INP_RLOCK_ASSERT(inp);
833 		if (inp->inp_socket == NULL)
834 			error = ENOENT;
835 		if (error == 0)
836 			error = cr_canseeinpcb(req->td->td_ucred, inp);
837 		if (error == 0)
838 			cru2x(inp->inp_cred, &xuc);
839 		INP_RUNLOCK(inp);
840 	} else
841 		error = ENOENT;
842 	if (error == 0)
843 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
844 	return (error);
845 }
846 
847 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
848     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
849     udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
850 #endif /* INET */
851 
852 int
853 udp_ctloutput(struct socket *so, struct sockopt *sopt)
854 {
855 	int error = 0, optval;
856 	struct inpcb *inp;
857 #ifdef IPSEC_NAT_T
858 	struct udpcb *up;
859 #endif
860 
861 	inp = sotoinpcb(so);
862 	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
863 	INP_WLOCK(inp);
864 	if (sopt->sopt_level != IPPROTO_UDP) {
865 #ifdef INET6
866 		if (INP_CHECK_SOCKAF(so, AF_INET6)) {
867 			INP_WUNLOCK(inp);
868 			error = ip6_ctloutput(so, sopt);
869 		}
870 #endif
871 #if defined(INET) && defined(INET6)
872 		else
873 #endif
874 #ifdef INET
875 		{
876 			INP_WUNLOCK(inp);
877 			error = ip_ctloutput(so, sopt);
878 		}
879 #endif
880 		return (error);
881 	}
882 
883 	switch (sopt->sopt_dir) {
884 	case SOPT_SET:
885 		switch (sopt->sopt_name) {
886 		case UDP_ENCAP:
887 			INP_WUNLOCK(inp);
888 			error = sooptcopyin(sopt, &optval, sizeof optval,
889 					    sizeof optval);
890 			if (error)
891 				break;
892 			inp = sotoinpcb(so);
893 			KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
894 			INP_WLOCK(inp);
895 #ifdef IPSEC_NAT_T
896 			up = intoudpcb(inp);
897 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
898 #endif
899 			switch (optval) {
900 			case 0:
901 				/* Clear all UDP encap. */
902 #ifdef IPSEC_NAT_T
903 				up->u_flags &= ~UF_ESPINUDP_ALL;
904 #endif
905 				break;
906 #ifdef IPSEC_NAT_T
907 			case UDP_ENCAP_ESPINUDP:
908 			case UDP_ENCAP_ESPINUDP_NON_IKE:
909 				up->u_flags &= ~UF_ESPINUDP_ALL;
910 				if (optval == UDP_ENCAP_ESPINUDP)
911 					up->u_flags |= UF_ESPINUDP;
912 				else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
913 					up->u_flags |= UF_ESPINUDP_NON_IKE;
914 				break;
915 #endif
916 			default:
917 				error = EINVAL;
918 				break;
919 			}
920 			INP_WUNLOCK(inp);
921 			break;
922 		default:
923 			INP_WUNLOCK(inp);
924 			error = ENOPROTOOPT;
925 			break;
926 		}
927 		break;
928 	case SOPT_GET:
929 		switch (sopt->sopt_name) {
930 #ifdef IPSEC_NAT_T
931 		case UDP_ENCAP:
932 			up = intoudpcb(inp);
933 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
934 			optval = up->u_flags & UF_ESPINUDP_ALL;
935 			INP_WUNLOCK(inp);
936 			error = sooptcopyout(sopt, &optval, sizeof optval);
937 			break;
938 #endif
939 		default:
940 			INP_WUNLOCK(inp);
941 			error = ENOPROTOOPT;
942 			break;
943 		}
944 		break;
945 	}
946 	return (error);
947 }
948 
949 #ifdef INET
950 #define	UH_WLOCKED	2
951 #define	UH_RLOCKED	1
952 #define	UH_UNLOCKED	0
953 static int
954 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
955     struct mbuf *control, struct thread *td)
956 {
957 	struct udpiphdr *ui;
958 	int len = m->m_pkthdr.len;
959 	struct in_addr faddr, laddr;
960 	struct cmsghdr *cm;
961 	struct sockaddr_in *sin, src;
962 	int error = 0;
963 	int ipflags;
964 	u_short fport, lport;
965 	int unlock_udbinfo;
966 	u_char tos;
967 
968 	/*
969 	 * udp_output() may need to temporarily bind or connect the current
970 	 * inpcb.  As such, we don't know up front whether we will need the
971 	 * pcbinfo lock or not.  Do any work to decide what is needed up
972 	 * front before acquiring any locks.
973 	 */
974 	if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
975 		if (control)
976 			m_freem(control);
977 		m_freem(m);
978 		return (EMSGSIZE);
979 	}
980 
981 	src.sin_family = 0;
982 	INP_RLOCK(inp);
983 	tos = inp->inp_ip_tos;
984 	if (control != NULL) {
985 		/*
986 		 * XXX: Currently, we assume all the optional information is
987 		 * stored in a single mbuf.
988 		 */
989 		if (control->m_next) {
990 			INP_RUNLOCK(inp);
991 			m_freem(control);
992 			m_freem(m);
993 			return (EINVAL);
994 		}
995 		for (; control->m_len > 0;
996 		    control->m_data += CMSG_ALIGN(cm->cmsg_len),
997 		    control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
998 			cm = mtod(control, struct cmsghdr *);
999 			if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
1000 			    || cm->cmsg_len > control->m_len) {
1001 				error = EINVAL;
1002 				break;
1003 			}
1004 			if (cm->cmsg_level != IPPROTO_IP)
1005 				continue;
1006 
1007 			switch (cm->cmsg_type) {
1008 			case IP_SENDSRCADDR:
1009 				if (cm->cmsg_len !=
1010 				    CMSG_LEN(sizeof(struct in_addr))) {
1011 					error = EINVAL;
1012 					break;
1013 				}
1014 				bzero(&src, sizeof(src));
1015 				src.sin_family = AF_INET;
1016 				src.sin_len = sizeof(src);
1017 				src.sin_port = inp->inp_lport;
1018 				src.sin_addr =
1019 				    *(struct in_addr *)CMSG_DATA(cm);
1020 				break;
1021 
1022 			case IP_TOS:
1023 				if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1024 					error = EINVAL;
1025 					break;
1026 				}
1027 				tos = *(u_char *)CMSG_DATA(cm);
1028 				break;
1029 
1030 			default:
1031 				error = ENOPROTOOPT;
1032 				break;
1033 			}
1034 			if (error)
1035 				break;
1036 		}
1037 		m_freem(control);
1038 	}
1039 	if (error) {
1040 		INP_RUNLOCK(inp);
1041 		m_freem(m);
1042 		return (error);
1043 	}
1044 
1045 	/*
1046 	 * Depending on whether or not the application has bound or connected
1047 	 * the socket, we may have to do varying levels of work.  The optimal
1048 	 * case is for a connected UDP socket, as a global lock isn't
1049 	 * required at all.
1050 	 *
1051 	 * In order to decide which we need, we require stability of the
1052 	 * inpcb binding, which we ensure by acquiring a read lock on the
1053 	 * inpcb.  This doesn't strictly follow the lock order, so we play
1054 	 * the trylock and retry game; note that we may end up with more
1055 	 * conservative locks than required the second time around, so later
1056 	 * assertions have to accept that.  Further analysis of the number of
1057 	 * misses under contention is required.
1058 	 *
1059 	 * XXXRW: Check that hash locking update here is correct.
1060 	 */
1061 	sin = (struct sockaddr_in *)addr;
1062 	if (sin != NULL &&
1063 	    (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1064 		INP_RUNLOCK(inp);
1065 		INP_WLOCK(inp);
1066 		INP_HASH_WLOCK(&V_udbinfo);
1067 		unlock_udbinfo = UH_WLOCKED;
1068 	} else if ((sin != NULL && (
1069 	    (sin->sin_addr.s_addr == INADDR_ANY) ||
1070 	    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1071 	    (inp->inp_laddr.s_addr == INADDR_ANY) ||
1072 	    (inp->inp_lport == 0))) ||
1073 	    (src.sin_family == AF_INET)) {
1074 		INP_HASH_RLOCK(&V_udbinfo);
1075 		unlock_udbinfo = UH_RLOCKED;
1076 	} else
1077 		unlock_udbinfo = UH_UNLOCKED;
1078 
1079 	/*
1080 	 * If the IP_SENDSRCADDR control message was specified, override the
1081 	 * source address for this datagram.  Its use is invalidated if the
1082 	 * address thus specified is incomplete or clobbers other inpcbs.
1083 	 */
1084 	laddr = inp->inp_laddr;
1085 	lport = inp->inp_lport;
1086 	if (src.sin_family == AF_INET) {
1087 		INP_HASH_LOCK_ASSERT(&V_udbinfo);
1088 		if ((lport == 0) ||
1089 		    (laddr.s_addr == INADDR_ANY &&
1090 		     src.sin_addr.s_addr == INADDR_ANY)) {
1091 			error = EINVAL;
1092 			goto release;
1093 		}
1094 		error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1095 		    &laddr.s_addr, &lport, td->td_ucred);
1096 		if (error)
1097 			goto release;
1098 	}
1099 
1100 	/*
1101 	 * If a UDP socket has been connected, then a local address/port will
1102 	 * have been selected and bound.
1103 	 *
1104 	 * If a UDP socket has not been connected to, then an explicit
1105 	 * destination address must be used, in which case a local
1106 	 * address/port may not have been selected and bound.
1107 	 */
1108 	if (sin != NULL) {
1109 		INP_LOCK_ASSERT(inp);
1110 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
1111 			error = EISCONN;
1112 			goto release;
1113 		}
1114 
1115 		/*
1116 		 * Jail may rewrite the destination address, so let it do
1117 		 * that before we use it.
1118 		 */
1119 		error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1120 		if (error)
1121 			goto release;
1122 
1123 		/*
1124 		 * If a local address or port hasn't yet been selected, or if
1125 		 * the destination address needs to be rewritten due to using
1126 		 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1127 		 * to do the heavy lifting.  Once a port is selected, we
1128 		 * commit the binding back to the socket; we also commit the
1129 		 * binding of the address if in jail.
1130 		 *
1131 		 * If we already have a valid binding and we're not
1132 		 * requesting a destination address rewrite, use a fast path.
1133 		 */
1134 		if (inp->inp_laddr.s_addr == INADDR_ANY ||
1135 		    inp->inp_lport == 0 ||
1136 		    sin->sin_addr.s_addr == INADDR_ANY ||
1137 		    sin->sin_addr.s_addr == INADDR_BROADCAST) {
1138 			INP_HASH_LOCK_ASSERT(&V_udbinfo);
1139 			error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1140 			    &lport, &faddr.s_addr, &fport, NULL,
1141 			    td->td_ucred);
1142 			if (error)
1143 				goto release;
1144 
1145 			/*
1146 			 * XXXRW: Why not commit the port if the address is
1147 			 * !INADDR_ANY?
1148 			 */
1149 			/* Commit the local port if newly assigned. */
1150 			if (inp->inp_laddr.s_addr == INADDR_ANY &&
1151 			    inp->inp_lport == 0) {
1152 				INP_WLOCK_ASSERT(inp);
1153 				INP_HASH_WLOCK_ASSERT(&V_udbinfo);
1154 				/*
1155 				 * Remember addr if jailed, to prevent
1156 				 * rebinding.
1157 				 */
1158 				if (prison_flag(td->td_ucred, PR_IP4))
1159 					inp->inp_laddr = laddr;
1160 				inp->inp_lport = lport;
1161 				if (in_pcbinshash(inp) != 0) {
1162 					inp->inp_lport = 0;
1163 					error = EAGAIN;
1164 					goto release;
1165 				}
1166 				inp->inp_flags |= INP_ANONPORT;
1167 			}
1168 		} else {
1169 			faddr = sin->sin_addr;
1170 			fport = sin->sin_port;
1171 		}
1172 	} else {
1173 		INP_LOCK_ASSERT(inp);
1174 		faddr = inp->inp_faddr;
1175 		fport = inp->inp_fport;
1176 		if (faddr.s_addr == INADDR_ANY) {
1177 			error = ENOTCONN;
1178 			goto release;
1179 		}
1180 	}
1181 
1182 	/*
1183 	 * Calculate data length and get a mbuf for UDP, IP, and possible
1184 	 * link-layer headers.  Immediate slide the data pointer back forward
1185 	 * since we won't use that space at this layer.
1186 	 */
1187 	M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT);
1188 	if (m == NULL) {
1189 		error = ENOBUFS;
1190 		goto release;
1191 	}
1192 	m->m_data += max_linkhdr;
1193 	m->m_len -= max_linkhdr;
1194 	m->m_pkthdr.len -= max_linkhdr;
1195 
1196 	/*
1197 	 * Fill in mbuf with extended UDP header and addresses and length put
1198 	 * into network format.
1199 	 */
1200 	ui = mtod(m, struct udpiphdr *);
1201 	bzero(ui->ui_x1, sizeof(ui->ui_x1));	/* XXX still needed? */
1202 	ui->ui_v = IPVERSION << 4;
1203 	ui->ui_pr = IPPROTO_UDP;
1204 	ui->ui_src = laddr;
1205 	ui->ui_dst = faddr;
1206 	ui->ui_sport = lport;
1207 	ui->ui_dport = fport;
1208 	ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1209 
1210 	/*
1211 	 * Set the Don't Fragment bit in the IP header.
1212 	 */
1213 	if (inp->inp_flags & INP_DONTFRAG) {
1214 		struct ip *ip;
1215 
1216 		ip = (struct ip *)&ui->ui_i;
1217 		ip->ip_off |= htons(IP_DF);
1218 	}
1219 
1220 	ipflags = 0;
1221 	if (inp->inp_socket->so_options & SO_DONTROUTE)
1222 		ipflags |= IP_ROUTETOIF;
1223 	if (inp->inp_socket->so_options & SO_BROADCAST)
1224 		ipflags |= IP_ALLOWBROADCAST;
1225 	if (inp->inp_flags & INP_ONESBCAST)
1226 		ipflags |= IP_SENDONES;
1227 
1228 #ifdef MAC
1229 	mac_inpcb_create_mbuf(inp, m);
1230 #endif
1231 
1232 	/*
1233 	 * Set up checksum and output datagram.
1234 	 */
1235 	if (V_udp_cksum) {
1236 		if (inp->inp_flags & INP_ONESBCAST)
1237 			faddr.s_addr = INADDR_BROADCAST;
1238 		ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1239 		    htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1240 		m->m_pkthdr.csum_flags = CSUM_UDP;
1241 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1242 	} else
1243 		ui->ui_sum = 0;
1244 	((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len);
1245 	((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl;	/* XXX */
1246 	((struct ip *)ui)->ip_tos = tos;		/* XXX */
1247 	UDPSTAT_INC(udps_opackets);
1248 
1249 	if (unlock_udbinfo == UH_WLOCKED)
1250 		INP_HASH_WUNLOCK(&V_udbinfo);
1251 	else if (unlock_udbinfo == UH_RLOCKED)
1252 		INP_HASH_RUNLOCK(&V_udbinfo);
1253 	UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u);
1254 	error = ip_output(m, inp->inp_options, NULL, ipflags,
1255 	    inp->inp_moptions, inp);
1256 	if (unlock_udbinfo == UH_WLOCKED)
1257 		INP_WUNLOCK(inp);
1258 	else
1259 		INP_RUNLOCK(inp);
1260 	return (error);
1261 
1262 release:
1263 	if (unlock_udbinfo == UH_WLOCKED) {
1264 		INP_HASH_WUNLOCK(&V_udbinfo);
1265 		INP_WUNLOCK(inp);
1266 	} else if (unlock_udbinfo == UH_RLOCKED) {
1267 		INP_HASH_RUNLOCK(&V_udbinfo);
1268 		INP_RUNLOCK(inp);
1269 	} else
1270 		INP_RUNLOCK(inp);
1271 	m_freem(m);
1272 	return (error);
1273 }
1274 
1275 
1276 #if defined(IPSEC) && defined(IPSEC_NAT_T)
1277 /*
1278  * Potentially decap ESP in UDP frame.  Check for an ESP header
1279  * and optional marker; if present, strip the UDP header and
1280  * push the result through IPSec.
1281  *
1282  * Returns mbuf to be processed (potentially re-allocated) or
1283  * NULL if consumed and/or processed.
1284  */
1285 static struct mbuf *
1286 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1287 {
1288 	size_t minlen, payload, skip, iphlen;
1289 	caddr_t data;
1290 	struct udpcb *up;
1291 	struct m_tag *tag;
1292 	struct udphdr *udphdr;
1293 	struct ip *ip;
1294 
1295 	INP_RLOCK_ASSERT(inp);
1296 
1297 	/*
1298 	 * Pull up data so the longest case is contiguous:
1299 	 *    IP/UDP hdr + non ESP marker + ESP hdr.
1300 	 */
1301 	minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1302 	if (minlen > m->m_pkthdr.len)
1303 		minlen = m->m_pkthdr.len;
1304 	if ((m = m_pullup(m, minlen)) == NULL) {
1305 		IPSECSTAT_INC(ips_in_inval);
1306 		return (NULL);		/* Bypass caller processing. */
1307 	}
1308 	data = mtod(m, caddr_t);	/* Points to ip header. */
1309 	payload = m->m_len - off;	/* Size of payload. */
1310 
1311 	if (payload == 1 && data[off] == '\xff')
1312 		return (m);		/* NB: keepalive packet, no decap. */
1313 
1314 	up = intoudpcb(inp);
1315 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1316 	KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1317 	    ("u_flags 0x%x", up->u_flags));
1318 
1319 	/*
1320 	 * Check that the payload is large enough to hold an
1321 	 * ESP header and compute the amount of data to remove.
1322 	 *
1323 	 * NB: the caller has already done a pullup for us.
1324 	 * XXX can we assume alignment and eliminate bcopys?
1325 	 */
1326 	if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1327 		/*
1328 		 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1329 		 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1330 		 * possible AH mode non-IKE marker+non-ESP marker
1331 		 * from draft-ietf-ipsec-udp-encaps-00.txt.
1332 		 */
1333 		uint64_t marker;
1334 
1335 		if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1336 			return (m);	/* NB: no decap. */
1337 		bcopy(data + off, &marker, sizeof(uint64_t));
1338 		if (marker != 0)	/* Non-IKE marker. */
1339 			return (m);	/* NB: no decap. */
1340 		skip = sizeof(uint64_t) + sizeof(struct udphdr);
1341 	} else {
1342 		uint32_t spi;
1343 
1344 		if (payload <= sizeof(struct esp)) {
1345 			IPSECSTAT_INC(ips_in_inval);
1346 			m_freem(m);
1347 			return (NULL);	/* Discard. */
1348 		}
1349 		bcopy(data + off, &spi, sizeof(uint32_t));
1350 		if (spi == 0)		/* Non-ESP marker. */
1351 			return (m);	/* NB: no decap. */
1352 		skip = sizeof(struct udphdr);
1353 	}
1354 
1355 	/*
1356 	 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1357 	 * the UDP ports. This is required if we want to select
1358 	 * the right SPD for multiple hosts behind same NAT.
1359 	 *
1360 	 * NB: ports are maintained in network byte order everywhere
1361 	 *     in the NAT-T code.
1362 	 */
1363 	tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1364 		2 * sizeof(uint16_t), M_NOWAIT);
1365 	if (tag == NULL) {
1366 		IPSECSTAT_INC(ips_in_nomem);
1367 		m_freem(m);
1368 		return (NULL);		/* Discard. */
1369 	}
1370 	iphlen = off - sizeof(struct udphdr);
1371 	udphdr = (struct udphdr *)(data + iphlen);
1372 	((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1373 	((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1374 	m_tag_prepend(m, tag);
1375 
1376 	/*
1377 	 * Remove the UDP header (and possibly the non ESP marker)
1378 	 * IP header length is iphlen
1379 	 * Before:
1380 	 *   <--- off --->
1381 	 *   +----+------+-----+
1382 	 *   | IP |  UDP | ESP |
1383 	 *   +----+------+-----+
1384 	 *        <-skip->
1385 	 * After:
1386 	 *          +----+-----+
1387 	 *          | IP | ESP |
1388 	 *          +----+-----+
1389 	 *   <-skip->
1390 	 */
1391 	ovbcopy(data, data + skip, iphlen);
1392 	m_adj(m, skip);
1393 
1394 	ip = mtod(m, struct ip *);
1395 	ip->ip_len = htons(ntohs(ip->ip_len) - skip);
1396 	ip->ip_p = IPPROTO_ESP;
1397 
1398 	/*
1399 	 * We cannot yet update the cksums so clear any
1400 	 * h/w cksum flags as they are no longer valid.
1401 	 */
1402 	if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1403 		m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1404 
1405 	(void) ipsec4_common_input(m, iphlen, ip->ip_p);
1406 	return (NULL);			/* NB: consumed, bypass processing. */
1407 }
1408 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1409 
1410 static void
1411 udp_abort(struct socket *so)
1412 {
1413 	struct inpcb *inp;
1414 
1415 	inp = sotoinpcb(so);
1416 	KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1417 	INP_WLOCK(inp);
1418 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1419 		INP_HASH_WLOCK(&V_udbinfo);
1420 		in_pcbdisconnect(inp);
1421 		inp->inp_laddr.s_addr = INADDR_ANY;
1422 		INP_HASH_WUNLOCK(&V_udbinfo);
1423 		soisdisconnected(so);
1424 	}
1425 	INP_WUNLOCK(inp);
1426 }
1427 
1428 static int
1429 udp_attach(struct socket *so, int proto, struct thread *td)
1430 {
1431 	struct inpcb *inp;
1432 	int error;
1433 
1434 	inp = sotoinpcb(so);
1435 	KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1436 	error = soreserve(so, udp_sendspace, udp_recvspace);
1437 	if (error)
1438 		return (error);
1439 	INP_INFO_WLOCK(&V_udbinfo);
1440 	error = in_pcballoc(so, &V_udbinfo);
1441 	if (error) {
1442 		INP_INFO_WUNLOCK(&V_udbinfo);
1443 		return (error);
1444 	}
1445 
1446 	inp = sotoinpcb(so);
1447 	inp->inp_vflag |= INP_IPV4;
1448 	inp->inp_ip_ttl = V_ip_defttl;
1449 
1450 	error = udp_newudpcb(inp);
1451 	if (error) {
1452 		in_pcbdetach(inp);
1453 		in_pcbfree(inp);
1454 		INP_INFO_WUNLOCK(&V_udbinfo);
1455 		return (error);
1456 	}
1457 
1458 	INP_WUNLOCK(inp);
1459 	INP_INFO_WUNLOCK(&V_udbinfo);
1460 	return (0);
1461 }
1462 #endif /* INET */
1463 
1464 int
1465 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1466 {
1467 	struct inpcb *inp;
1468 	struct udpcb *up;
1469 
1470 	KASSERT(so->so_type == SOCK_DGRAM,
1471 	    ("udp_set_kernel_tunneling: !dgram"));
1472 	inp = sotoinpcb(so);
1473 	KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1474 	INP_WLOCK(inp);
1475 	up = intoudpcb(inp);
1476 	if (up->u_tun_func != NULL) {
1477 		INP_WUNLOCK(inp);
1478 		return (EBUSY);
1479 	}
1480 	up->u_tun_func = f;
1481 	INP_WUNLOCK(inp);
1482 	return (0);
1483 }
1484 
1485 #ifdef INET
1486 static int
1487 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1488 {
1489 	struct inpcb *inp;
1490 	int error;
1491 
1492 	inp = sotoinpcb(so);
1493 	KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1494 	INP_WLOCK(inp);
1495 	INP_HASH_WLOCK(&V_udbinfo);
1496 	error = in_pcbbind(inp, nam, td->td_ucred);
1497 	INP_HASH_WUNLOCK(&V_udbinfo);
1498 	INP_WUNLOCK(inp);
1499 	return (error);
1500 }
1501 
1502 static void
1503 udp_close(struct socket *so)
1504 {
1505 	struct inpcb *inp;
1506 
1507 	inp = sotoinpcb(so);
1508 	KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1509 	INP_WLOCK(inp);
1510 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1511 		INP_HASH_WLOCK(&V_udbinfo);
1512 		in_pcbdisconnect(inp);
1513 		inp->inp_laddr.s_addr = INADDR_ANY;
1514 		INP_HASH_WUNLOCK(&V_udbinfo);
1515 		soisdisconnected(so);
1516 	}
1517 	INP_WUNLOCK(inp);
1518 }
1519 
1520 static int
1521 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1522 {
1523 	struct inpcb *inp;
1524 	int error;
1525 	struct sockaddr_in *sin;
1526 
1527 	inp = sotoinpcb(so);
1528 	KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1529 	INP_WLOCK(inp);
1530 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1531 		INP_WUNLOCK(inp);
1532 		return (EISCONN);
1533 	}
1534 	sin = (struct sockaddr_in *)nam;
1535 	error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1536 	if (error != 0) {
1537 		INP_WUNLOCK(inp);
1538 		return (error);
1539 	}
1540 	INP_HASH_WLOCK(&V_udbinfo);
1541 	error = in_pcbconnect(inp, nam, td->td_ucred);
1542 	INP_HASH_WUNLOCK(&V_udbinfo);
1543 	if (error == 0)
1544 		soisconnected(so);
1545 	INP_WUNLOCK(inp);
1546 	return (error);
1547 }
1548 
1549 static void
1550 udp_detach(struct socket *so)
1551 {
1552 	struct inpcb *inp;
1553 	struct udpcb *up;
1554 
1555 	inp = sotoinpcb(so);
1556 	KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1557 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1558 	    ("udp_detach: not disconnected"));
1559 	INP_INFO_WLOCK(&V_udbinfo);
1560 	INP_WLOCK(inp);
1561 	up = intoudpcb(inp);
1562 	KASSERT(up != NULL, ("%s: up == NULL", __func__));
1563 	inp->inp_ppcb = NULL;
1564 	in_pcbdetach(inp);
1565 	in_pcbfree(inp);
1566 	INP_INFO_WUNLOCK(&V_udbinfo);
1567 	udp_discardcb(up);
1568 }
1569 
1570 static int
1571 udp_disconnect(struct socket *so)
1572 {
1573 	struct inpcb *inp;
1574 
1575 	inp = sotoinpcb(so);
1576 	KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1577 	INP_WLOCK(inp);
1578 	if (inp->inp_faddr.s_addr == INADDR_ANY) {
1579 		INP_WUNLOCK(inp);
1580 		return (ENOTCONN);
1581 	}
1582 	INP_HASH_WLOCK(&V_udbinfo);
1583 	in_pcbdisconnect(inp);
1584 	inp->inp_laddr.s_addr = INADDR_ANY;
1585 	INP_HASH_WUNLOCK(&V_udbinfo);
1586 	SOCK_LOCK(so);
1587 	so->so_state &= ~SS_ISCONNECTED;		/* XXX */
1588 	SOCK_UNLOCK(so);
1589 	INP_WUNLOCK(inp);
1590 	return (0);
1591 }
1592 
1593 static int
1594 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1595     struct mbuf *control, struct thread *td)
1596 {
1597 	struct inpcb *inp;
1598 
1599 	inp = sotoinpcb(so);
1600 	KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1601 	return (udp_output(inp, m, addr, control, td));
1602 }
1603 #endif /* INET */
1604 
1605 int
1606 udp_shutdown(struct socket *so)
1607 {
1608 	struct inpcb *inp;
1609 
1610 	inp = sotoinpcb(so);
1611 	KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1612 	INP_WLOCK(inp);
1613 	socantsendmore(so);
1614 	INP_WUNLOCK(inp);
1615 	return (0);
1616 }
1617 
1618 #ifdef INET
1619 struct pr_usrreqs udp_usrreqs = {
1620 	.pru_abort =		udp_abort,
1621 	.pru_attach =		udp_attach,
1622 	.pru_bind =		udp_bind,
1623 	.pru_connect =		udp_connect,
1624 	.pru_control =		in_control,
1625 	.pru_detach =		udp_detach,
1626 	.pru_disconnect =	udp_disconnect,
1627 	.pru_peeraddr =		in_getpeeraddr,
1628 	.pru_send =		udp_send,
1629 	.pru_soreceive =	soreceive_dgram,
1630 	.pru_sosend =		sosend_dgram,
1631 	.pru_shutdown =		udp_shutdown,
1632 	.pru_sockaddr =		in_getsockaddr,
1633 	.pru_sosetlabel =	in_pcbsosetlabel,
1634 	.pru_close =		udp_close,
1635 };
1636 #endif /* INET */
1637