xref: /freebsd/sys/netinet/udp_usrreq.c (revision 3f0164abf32b9b761e0a2cb4bdca3a8b84f156d4)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.
4  * Copyright (c) 2008 Robert N. M. Watson
5  * Copyright (c) 2010-2011 Juniper Networks, Inc.
6  * All rights reserved.
7  *
8  * Portions of this software were developed by Robert N. M. Watson under
9  * contract to Juniper Networks, Inc.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)udp_usrreq.c	8.6 (Berkeley) 5/23/95
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_ipfw.h"
42 #include "opt_inet.h"
43 #include "opt_inet6.h"
44 #include "opt_ipsec.h"
45 
46 #include <sys/param.h>
47 #include <sys/domain.h>
48 #include <sys/eventhandler.h>
49 #include <sys/jail.h>
50 #include <sys/kernel.h>
51 #include <sys/lock.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/priv.h>
55 #include <sys/proc.h>
56 #include <sys/protosw.h>
57 #include <sys/signalvar.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/sx.h>
61 #include <sys/sysctl.h>
62 #include <sys/syslog.h>
63 #include <sys/systm.h>
64 
65 #include <vm/uma.h>
66 
67 #include <net/if.h>
68 #include <net/route.h>
69 
70 #include <netinet/in.h>
71 #include <netinet/in_pcb.h>
72 #include <netinet/in_systm.h>
73 #include <netinet/in_var.h>
74 #include <netinet/ip.h>
75 #ifdef INET6
76 #include <netinet/ip6.h>
77 #endif
78 #include <netinet/ip_icmp.h>
79 #include <netinet/icmp_var.h>
80 #include <netinet/ip_var.h>
81 #include <netinet/ip_options.h>
82 #ifdef INET6
83 #include <netinet6/ip6_var.h>
84 #endif
85 #include <netinet/udp.h>
86 #include <netinet/udp_var.h>
87 
88 #ifdef IPSEC
89 #include <netipsec/ipsec.h>
90 #include <netipsec/esp.h>
91 #endif
92 
93 #include <machine/in_cksum.h>
94 
95 #include <security/mac/mac_framework.h>
96 
97 /*
98  * UDP protocol implementation.
99  * Per RFC 768, August, 1980.
100  */
101 
102 /*
103  * BSD 4.2 defaulted the udp checksum to be off.  Turning off udp checksums
104  * removes the only data integrity mechanism for packets and malformed
105  * packets that would otherwise be discarded due to bad checksums, and may
106  * cause problems (especially for NFS data blocks).
107  */
108 VNET_DEFINE(int, udp_cksum) = 1;
109 SYSCTL_VNET_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
110     &VNET_NAME(udp_cksum), 0, "compute udp checksum");
111 
112 int	udp_log_in_vain = 0;
113 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
114     &udp_log_in_vain, 0, "Log all incoming UDP packets");
115 
116 VNET_DEFINE(int, udp_blackhole) = 0;
117 SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
118     &VNET_NAME(udp_blackhole), 0,
119     "Do not send port unreachables for refused connects");
120 
121 u_long	udp_sendspace = 9216;		/* really max datagram size */
122 					/* 40 1K datagrams */
123 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
124     &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
125 
126 u_long	udp_recvspace = 40 * (1024 +
127 #ifdef INET6
128 				      sizeof(struct sockaddr_in6)
129 #else
130 				      sizeof(struct sockaddr_in)
131 #endif
132 				      );
133 
134 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
135     &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
136 
137 VNET_DEFINE(struct inpcbhead, udb);		/* from udp_var.h */
138 VNET_DEFINE(struct inpcbinfo, udbinfo);
139 static VNET_DEFINE(uma_zone_t, udpcb_zone);
140 #define	V_udpcb_zone			VNET(udpcb_zone)
141 
142 #ifndef UDBHASHSIZE
143 #define	UDBHASHSIZE	128
144 #endif
145 
146 VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat);		/* from udp_var.h */
147 VNET_PCPUSTAT_SYSINIT(udpstat);
148 SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat,
149     udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
150 
151 #ifdef VIMAGE
152 VNET_PCPUSTAT_SYSUNINIT(udpstat);
153 #endif /* VIMAGE */
154 #ifdef INET
155 static void	udp_detach(struct socket *so);
156 static int	udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
157 		    struct mbuf *, struct thread *);
158 #endif
159 
160 #ifdef IPSEC
161 #ifdef IPSEC_NAT_T
162 #define	UF_ESPINUDP_ALL	(UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
163 #ifdef INET
164 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
165 #endif
166 #endif /* IPSEC_NAT_T */
167 #endif /* IPSEC */
168 
169 static void
170 udp_zone_change(void *tag)
171 {
172 
173 	uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
174 	uma_zone_set_max(V_udpcb_zone, maxsockets);
175 }
176 
177 static int
178 udp_inpcb_init(void *mem, int size, int flags)
179 {
180 	struct inpcb *inp;
181 
182 	inp = mem;
183 	INP_LOCK_INIT(inp, "inp", "udpinp");
184 	return (0);
185 }
186 
187 void
188 udp_init(void)
189 {
190 
191 	in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
192 	    "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE,
193 	    IPI_HASHFIELDS_2TUPLE);
194 	V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
195 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
196 	uma_zone_set_max(V_udpcb_zone, maxsockets);
197 	uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
198 	EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
199 	    EVENTHANDLER_PRI_ANY);
200 }
201 
202 /*
203  * Kernel module interface for updating udpstat.  The argument is an index
204  * into udpstat treated as an array of u_long.  While this encodes the
205  * general layout of udpstat into the caller, it doesn't encode its location,
206  * so that future changes to add, for example, per-CPU stats support won't
207  * cause binary compatibility problems for kernel modules.
208  */
209 void
210 kmod_udpstat_inc(int statnum)
211 {
212 
213 	counter_u64_add(VNET(udpstat)[statnum], 1);
214 }
215 
216 int
217 udp_newudpcb(struct inpcb *inp)
218 {
219 	struct udpcb *up;
220 
221 	up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
222 	if (up == NULL)
223 		return (ENOBUFS);
224 	inp->inp_ppcb = up;
225 	return (0);
226 }
227 
228 void
229 udp_discardcb(struct udpcb *up)
230 {
231 
232 	uma_zfree(V_udpcb_zone, up);
233 }
234 
235 #ifdef VIMAGE
236 void
237 udp_destroy(void)
238 {
239 
240 	in_pcbinfo_destroy(&V_udbinfo);
241 	uma_zdestroy(V_udpcb_zone);
242 }
243 #endif
244 
245 #ifdef INET
246 /*
247  * Subroutine of udp_input(), which appends the provided mbuf chain to the
248  * passed pcb/socket.  The caller must provide a sockaddr_in via udp_in that
249  * contains the source address.  If the socket ends up being an IPv6 socket,
250  * udp_append() will convert to a sockaddr_in6 before passing the address
251  * into the socket code.
252  */
253 static void
254 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
255     struct sockaddr_in *udp_in)
256 {
257 	struct sockaddr *append_sa;
258 	struct socket *so;
259 	struct mbuf *opts = 0;
260 #ifdef INET6
261 	struct sockaddr_in6 udp_in6;
262 #endif
263 	struct udpcb *up;
264 
265 	INP_LOCK_ASSERT(inp);
266 
267 	/*
268 	 * Engage the tunneling protocol.
269 	 */
270 	up = intoudpcb(inp);
271 	if (up->u_tun_func != NULL) {
272 		(*up->u_tun_func)(n, off, inp);
273 		return;
274 	}
275 
276 	if (n == NULL)
277 		return;
278 
279 	off += sizeof(struct udphdr);
280 
281 #ifdef IPSEC
282 	/* Check AH/ESP integrity. */
283 	if (ipsec4_in_reject(n, inp)) {
284 		m_freem(n);
285 		IPSECSTAT_INC(ips_in_polvio);
286 		return;
287 	}
288 #ifdef IPSEC_NAT_T
289 	up = intoudpcb(inp);
290 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
291 	if (up->u_flags & UF_ESPINUDP_ALL) {	/* IPSec UDP encaps. */
292 		n = udp4_espdecap(inp, n, off);
293 		if (n == NULL)				/* Consumed. */
294 			return;
295 	}
296 #endif /* IPSEC_NAT_T */
297 #endif /* IPSEC */
298 #ifdef MAC
299 	if (mac_inpcb_check_deliver(inp, n) != 0) {
300 		m_freem(n);
301 		return;
302 	}
303 #endif /* MAC */
304 	if (inp->inp_flags & INP_CONTROLOPTS ||
305 	    inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
306 #ifdef INET6
307 		if (inp->inp_vflag & INP_IPV6)
308 			(void)ip6_savecontrol_v4(inp, n, &opts, NULL);
309 		else
310 #endif /* INET6 */
311 			ip_savecontrol(inp, &opts, ip, n);
312 	}
313 #ifdef INET6
314 	if (inp->inp_vflag & INP_IPV6) {
315 		bzero(&udp_in6, sizeof(udp_in6));
316 		udp_in6.sin6_len = sizeof(udp_in6);
317 		udp_in6.sin6_family = AF_INET6;
318 		in6_sin_2_v4mapsin6(udp_in, &udp_in6);
319 		append_sa = (struct sockaddr *)&udp_in6;
320 	} else
321 #endif /* INET6 */
322 		append_sa = (struct sockaddr *)udp_in;
323 	m_adj(n, off);
324 
325 	so = inp->inp_socket;
326 	SOCKBUF_LOCK(&so->so_rcv);
327 	if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
328 		SOCKBUF_UNLOCK(&so->so_rcv);
329 		m_freem(n);
330 		if (opts)
331 			m_freem(opts);
332 		UDPSTAT_INC(udps_fullsock);
333 	} else
334 		sorwakeup_locked(so);
335 }
336 
337 void
338 udp_input(struct mbuf *m, int off)
339 {
340 	int iphlen = off;
341 	struct ip *ip;
342 	struct udphdr *uh;
343 	struct ifnet *ifp;
344 	struct inpcb *inp;
345 	uint16_t len, ip_len;
346 	struct ip save_ip;
347 	struct sockaddr_in udp_in;
348 	struct m_tag *fwd_tag;
349 
350 	ifp = m->m_pkthdr.rcvif;
351 	UDPSTAT_INC(udps_ipackets);
352 
353 	/*
354 	 * Strip IP options, if any; should skip this, make available to
355 	 * user, and use on returned packets, but we don't yet have a way to
356 	 * check the checksum with options still present.
357 	 */
358 	if (iphlen > sizeof (struct ip)) {
359 		ip_stripoptions(m);
360 		iphlen = sizeof(struct ip);
361 	}
362 
363 	/*
364 	 * Get IP and UDP header together in first mbuf.
365 	 */
366 	ip = mtod(m, struct ip *);
367 	if (m->m_len < iphlen + sizeof(struct udphdr)) {
368 		if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == 0) {
369 			UDPSTAT_INC(udps_hdrops);
370 			return;
371 		}
372 		ip = mtod(m, struct ip *);
373 	}
374 	uh = (struct udphdr *)((caddr_t)ip + iphlen);
375 
376 	/*
377 	 * Destination port of 0 is illegal, based on RFC768.
378 	 */
379 	if (uh->uh_dport == 0)
380 		goto badunlocked;
381 
382 	/*
383 	 * Construct sockaddr format source address.  Stuff source address
384 	 * and datagram in user buffer.
385 	 */
386 	bzero(&udp_in, sizeof(udp_in));
387 	udp_in.sin_len = sizeof(udp_in);
388 	udp_in.sin_family = AF_INET;
389 	udp_in.sin_port = uh->uh_sport;
390 	udp_in.sin_addr = ip->ip_src;
391 
392 	/*
393 	 * Make mbuf data length reflect UDP length.  If not enough data to
394 	 * reflect UDP length, drop.
395 	 */
396 	len = ntohs((u_short)uh->uh_ulen);
397 	ip_len = ntohs(ip->ip_len) - iphlen;
398 	if (ip_len != len) {
399 		if (len > ip_len || len < sizeof(struct udphdr)) {
400 			UDPSTAT_INC(udps_badlen);
401 			goto badunlocked;
402 		}
403 		m_adj(m, len - ip_len);
404 	}
405 
406 	/*
407 	 * Save a copy of the IP header in case we want restore it for
408 	 * sending an ICMP error message in response.
409 	 */
410 	if (!V_udp_blackhole)
411 		save_ip = *ip;
412 	else
413 		memset(&save_ip, 0, sizeof(save_ip));
414 
415 	/*
416 	 * Checksum extended UDP header and data.
417 	 */
418 	if (uh->uh_sum) {
419 		u_short uh_sum;
420 
421 		if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
422 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
423 				uh_sum = m->m_pkthdr.csum_data;
424 			else
425 				uh_sum = in_pseudo(ip->ip_src.s_addr,
426 				    ip->ip_dst.s_addr, htonl((u_short)len +
427 				    m->m_pkthdr.csum_data + IPPROTO_UDP));
428 			uh_sum ^= 0xffff;
429 		} else {
430 			char b[9];
431 
432 			bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
433 			bzero(((struct ipovly *)ip)->ih_x1, 9);
434 			((struct ipovly *)ip)->ih_len = uh->uh_ulen;
435 			uh_sum = in_cksum(m, len + sizeof (struct ip));
436 			bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
437 		}
438 		if (uh_sum) {
439 			UDPSTAT_INC(udps_badsum);
440 			m_freem(m);
441 			return;
442 		}
443 	} else
444 		UDPSTAT_INC(udps_nosum);
445 
446 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
447 	    in_broadcast(ip->ip_dst, ifp)) {
448 		struct inpcb *last;
449 		struct ip_moptions *imo;
450 
451 		INP_INFO_RLOCK(&V_udbinfo);
452 		last = NULL;
453 		LIST_FOREACH(inp, &V_udb, inp_list) {
454 			if (inp->inp_lport != uh->uh_dport)
455 				continue;
456 #ifdef INET6
457 			if ((inp->inp_vflag & INP_IPV4) == 0)
458 				continue;
459 #endif
460 			if (inp->inp_laddr.s_addr != INADDR_ANY &&
461 			    inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
462 				continue;
463 			if (inp->inp_faddr.s_addr != INADDR_ANY &&
464 			    inp->inp_faddr.s_addr != ip->ip_src.s_addr)
465 				continue;
466 			if (inp->inp_fport != 0 &&
467 			    inp->inp_fport != uh->uh_sport)
468 				continue;
469 
470 			INP_RLOCK(inp);
471 
472 			/*
473 			 * XXXRW: Because we weren't holding either the inpcb
474 			 * or the hash lock when we checked for a match
475 			 * before, we should probably recheck now that the
476 			 * inpcb lock is held.
477 			 */
478 
479 			/*
480 			 * Handle socket delivery policy for any-source
481 			 * and source-specific multicast. [RFC3678]
482 			 */
483 			imo = inp->inp_moptions;
484 			if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
485 				struct sockaddr_in	 group;
486 				int			 blocked;
487 				if (imo == NULL) {
488 					INP_RUNLOCK(inp);
489 					continue;
490 				}
491 				bzero(&group, sizeof(struct sockaddr_in));
492 				group.sin_len = sizeof(struct sockaddr_in);
493 				group.sin_family = AF_INET;
494 				group.sin_addr = ip->ip_dst;
495 
496 				blocked = imo_multi_filter(imo, ifp,
497 					(struct sockaddr *)&group,
498 					(struct sockaddr *)&udp_in);
499 				if (blocked != MCAST_PASS) {
500 					if (blocked == MCAST_NOTGMEMBER)
501 						IPSTAT_INC(ips_notmember);
502 					if (blocked == MCAST_NOTSMEMBER ||
503 					    blocked == MCAST_MUTED)
504 						UDPSTAT_INC(udps_filtermcast);
505 					INP_RUNLOCK(inp);
506 					continue;
507 				}
508 			}
509 			if (last != NULL) {
510 				struct mbuf *n;
511 
512 				n = m_copy(m, 0, M_COPYALL);
513 				udp_append(last, ip, n, iphlen, &udp_in);
514 				INP_RUNLOCK(last);
515 			}
516 			last = inp;
517 			/*
518 			 * Don't look for additional matches if this one does
519 			 * not have either the SO_REUSEPORT or SO_REUSEADDR
520 			 * socket options set.  This heuristic avoids
521 			 * searching through all pcbs in the common case of a
522 			 * non-shared port.  It assumes that an application
523 			 * will never clear these options after setting them.
524 			 */
525 			if ((last->inp_socket->so_options &
526 			    (SO_REUSEPORT|SO_REUSEADDR)) == 0)
527 				break;
528 		}
529 
530 		if (last == NULL) {
531 			/*
532 			 * No matching pcb found; discard datagram.  (No need
533 			 * to send an ICMP Port Unreachable for a broadcast
534 			 * or multicast datgram.)
535 			 */
536 			UDPSTAT_INC(udps_noportbcast);
537 			if (inp)
538 				INP_RUNLOCK(inp);
539 			INP_INFO_RUNLOCK(&V_udbinfo);
540 			goto badunlocked;
541 		}
542 		udp_append(last, ip, m, iphlen, &udp_in);
543 		INP_RUNLOCK(last);
544 		INP_INFO_RUNLOCK(&V_udbinfo);
545 		return;
546 	}
547 
548 	/*
549 	 * Locate pcb for datagram.
550 	 */
551 
552 	/*
553 	 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
554 	 */
555 	if ((m->m_flags & M_IP_NEXTHOP) &&
556 	    (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) {
557 		struct sockaddr_in *next_hop;
558 
559 		next_hop = (struct sockaddr_in *)(fwd_tag + 1);
560 
561 		/*
562 		 * Transparently forwarded. Pretend to be the destination.
563 		 * Already got one like this?
564 		 */
565 		inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
566 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m);
567 		if (!inp) {
568 			/*
569 			 * It's new.  Try to find the ambushing socket.
570 			 * Because we've rewritten the destination address,
571 			 * any hardware-generated hash is ignored.
572 			 */
573 			inp = in_pcblookup(&V_udbinfo, ip->ip_src,
574 			    uh->uh_sport, next_hop->sin_addr,
575 			    next_hop->sin_port ? htons(next_hop->sin_port) :
576 			    uh->uh_dport, INPLOOKUP_WILDCARD |
577 			    INPLOOKUP_RLOCKPCB, ifp);
578 		}
579 		/* Remove the tag from the packet. We don't need it anymore. */
580 		m_tag_delete(m, fwd_tag);
581 		m->m_flags &= ~M_IP_NEXTHOP;
582 	} else
583 		inp = in_pcblookup_mbuf(&V_udbinfo, ip->ip_src, uh->uh_sport,
584 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD |
585 		    INPLOOKUP_RLOCKPCB, ifp, m);
586 	if (inp == NULL) {
587 		if (udp_log_in_vain) {
588 			char buf[4*sizeof "123"];
589 
590 			strcpy(buf, inet_ntoa(ip->ip_dst));
591 			log(LOG_INFO,
592 			    "Connection attempt to UDP %s:%d from %s:%d\n",
593 			    buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
594 			    ntohs(uh->uh_sport));
595 		}
596 		UDPSTAT_INC(udps_noport);
597 		if (m->m_flags & (M_BCAST | M_MCAST)) {
598 			UDPSTAT_INC(udps_noportbcast);
599 			goto badunlocked;
600 		}
601 		if (V_udp_blackhole)
602 			goto badunlocked;
603 		if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
604 			goto badunlocked;
605 		*ip = save_ip;
606 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
607 		return;
608 	}
609 
610 	/*
611 	 * Check the minimum TTL for socket.
612 	 */
613 	INP_RLOCK_ASSERT(inp);
614 	if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
615 		INP_RUNLOCK(inp);
616 		m_freem(m);
617 		return;
618 	}
619 	udp_append(inp, ip, m, iphlen, &udp_in);
620 	INP_RUNLOCK(inp);
621 	return;
622 
623 badunlocked:
624 	m_freem(m);
625 }
626 #endif /* INET */
627 
628 /*
629  * Notify a udp user of an asynchronous error; just wake up so that they can
630  * collect error status.
631  */
632 struct inpcb *
633 udp_notify(struct inpcb *inp, int errno)
634 {
635 
636 	/*
637 	 * While udp_ctlinput() always calls udp_notify() with a read lock
638 	 * when invoking it directly, in_pcbnotifyall() currently uses write
639 	 * locks due to sharing code with TCP.  For now, accept either a read
640 	 * or a write lock, but a read lock is sufficient.
641 	 */
642 	INP_LOCK_ASSERT(inp);
643 
644 	inp->inp_socket->so_error = errno;
645 	sorwakeup(inp->inp_socket);
646 	sowwakeup(inp->inp_socket);
647 	return (inp);
648 }
649 
650 #ifdef INET
651 void
652 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
653 {
654 	struct ip *ip = vip;
655 	struct udphdr *uh;
656 	struct in_addr faddr;
657 	struct inpcb *inp;
658 
659 	faddr = ((struct sockaddr_in *)sa)->sin_addr;
660 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
661 		return;
662 
663 	/*
664 	 * Redirects don't need to be handled up here.
665 	 */
666 	if (PRC_IS_REDIRECT(cmd))
667 		return;
668 
669 	/*
670 	 * Hostdead is ugly because it goes linearly through all PCBs.
671 	 *
672 	 * XXX: We never get this from ICMP, otherwise it makes an excellent
673 	 * DoS attack on machines with many connections.
674 	 */
675 	if (cmd == PRC_HOSTDEAD)
676 		ip = NULL;
677 	else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
678 		return;
679 	if (ip != NULL) {
680 		uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
681 		inp = in_pcblookup(&V_udbinfo, faddr, uh->uh_dport,
682 		    ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL);
683 		if (inp != NULL) {
684 			INP_RLOCK_ASSERT(inp);
685 			if (inp->inp_socket != NULL) {
686 				udp_notify(inp, inetctlerrmap[cmd]);
687 			}
688 			INP_RUNLOCK(inp);
689 		}
690 	} else
691 		in_pcbnotifyall(&V_udbinfo, faddr, inetctlerrmap[cmd],
692 		    udp_notify);
693 }
694 #endif /* INET */
695 
696 static int
697 udp_pcblist(SYSCTL_HANDLER_ARGS)
698 {
699 	int error, i, n;
700 	struct inpcb *inp, **inp_list;
701 	inp_gen_t gencnt;
702 	struct xinpgen xig;
703 
704 	/*
705 	 * The process of preparing the PCB list is too time-consuming and
706 	 * resource-intensive to repeat twice on every request.
707 	 */
708 	if (req->oldptr == 0) {
709 		n = V_udbinfo.ipi_count;
710 		n += imax(n / 8, 10);
711 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
712 		return (0);
713 	}
714 
715 	if (req->newptr != 0)
716 		return (EPERM);
717 
718 	/*
719 	 * OK, now we're committed to doing something.
720 	 */
721 	INP_INFO_RLOCK(&V_udbinfo);
722 	gencnt = V_udbinfo.ipi_gencnt;
723 	n = V_udbinfo.ipi_count;
724 	INP_INFO_RUNLOCK(&V_udbinfo);
725 
726 	error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
727 		+ n * sizeof(struct xinpcb));
728 	if (error != 0)
729 		return (error);
730 
731 	xig.xig_len = sizeof xig;
732 	xig.xig_count = n;
733 	xig.xig_gen = gencnt;
734 	xig.xig_sogen = so_gencnt;
735 	error = SYSCTL_OUT(req, &xig, sizeof xig);
736 	if (error)
737 		return (error);
738 
739 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
740 	if (inp_list == 0)
741 		return (ENOMEM);
742 
743 	INP_INFO_RLOCK(&V_udbinfo);
744 	for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
745 	     inp = LIST_NEXT(inp, inp_list)) {
746 		INP_WLOCK(inp);
747 		if (inp->inp_gencnt <= gencnt &&
748 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
749 			in_pcbref(inp);
750 			inp_list[i++] = inp;
751 		}
752 		INP_WUNLOCK(inp);
753 	}
754 	INP_INFO_RUNLOCK(&V_udbinfo);
755 	n = i;
756 
757 	error = 0;
758 	for (i = 0; i < n; i++) {
759 		inp = inp_list[i];
760 		INP_RLOCK(inp);
761 		if (inp->inp_gencnt <= gencnt) {
762 			struct xinpcb xi;
763 
764 			bzero(&xi, sizeof(xi));
765 			xi.xi_len = sizeof xi;
766 			/* XXX should avoid extra copy */
767 			bcopy(inp, &xi.xi_inp, sizeof *inp);
768 			if (inp->inp_socket)
769 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
770 			xi.xi_inp.inp_gencnt = inp->inp_gencnt;
771 			INP_RUNLOCK(inp);
772 			error = SYSCTL_OUT(req, &xi, sizeof xi);
773 		} else
774 			INP_RUNLOCK(inp);
775 	}
776 	INP_INFO_WLOCK(&V_udbinfo);
777 	for (i = 0; i < n; i++) {
778 		inp = inp_list[i];
779 		INP_RLOCK(inp);
780 		if (!in_pcbrele_rlocked(inp))
781 			INP_RUNLOCK(inp);
782 	}
783 	INP_INFO_WUNLOCK(&V_udbinfo);
784 
785 	if (!error) {
786 		/*
787 		 * Give the user an updated idea of our state.  If the
788 		 * generation differs from what we told her before, she knows
789 		 * that something happened while we were processing this
790 		 * request, and it might be necessary to retry.
791 		 */
792 		INP_INFO_RLOCK(&V_udbinfo);
793 		xig.xig_gen = V_udbinfo.ipi_gencnt;
794 		xig.xig_sogen = so_gencnt;
795 		xig.xig_count = V_udbinfo.ipi_count;
796 		INP_INFO_RUNLOCK(&V_udbinfo);
797 		error = SYSCTL_OUT(req, &xig, sizeof xig);
798 	}
799 	free(inp_list, M_TEMP);
800 	return (error);
801 }
802 
803 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
804     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
805     udp_pcblist, "S,xinpcb", "List of active UDP sockets");
806 
807 #ifdef INET
808 static int
809 udp_getcred(SYSCTL_HANDLER_ARGS)
810 {
811 	struct xucred xuc;
812 	struct sockaddr_in addrs[2];
813 	struct inpcb *inp;
814 	int error;
815 
816 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
817 	if (error)
818 		return (error);
819 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
820 	if (error)
821 		return (error);
822 	inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
823 	    addrs[0].sin_addr, addrs[0].sin_port,
824 	    INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL);
825 	if (inp != NULL) {
826 		INP_RLOCK_ASSERT(inp);
827 		if (inp->inp_socket == NULL)
828 			error = ENOENT;
829 		if (error == 0)
830 			error = cr_canseeinpcb(req->td->td_ucred, inp);
831 		if (error == 0)
832 			cru2x(inp->inp_cred, &xuc);
833 		INP_RUNLOCK(inp);
834 	} else
835 		error = ENOENT;
836 	if (error == 0)
837 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
838 	return (error);
839 }
840 
841 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
842     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
843     udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
844 #endif /* INET */
845 
846 int
847 udp_ctloutput(struct socket *so, struct sockopt *sopt)
848 {
849 	int error = 0, optval;
850 	struct inpcb *inp;
851 #ifdef IPSEC_NAT_T
852 	struct udpcb *up;
853 #endif
854 
855 	inp = sotoinpcb(so);
856 	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
857 	INP_WLOCK(inp);
858 	if (sopt->sopt_level != IPPROTO_UDP) {
859 #ifdef INET6
860 		if (INP_CHECK_SOCKAF(so, AF_INET6)) {
861 			INP_WUNLOCK(inp);
862 			error = ip6_ctloutput(so, sopt);
863 		}
864 #endif
865 #if defined(INET) && defined(INET6)
866 		else
867 #endif
868 #ifdef INET
869 		{
870 			INP_WUNLOCK(inp);
871 			error = ip_ctloutput(so, sopt);
872 		}
873 #endif
874 		return (error);
875 	}
876 
877 	switch (sopt->sopt_dir) {
878 	case SOPT_SET:
879 		switch (sopt->sopt_name) {
880 		case UDP_ENCAP:
881 			INP_WUNLOCK(inp);
882 			error = sooptcopyin(sopt, &optval, sizeof optval,
883 					    sizeof optval);
884 			if (error)
885 				break;
886 			inp = sotoinpcb(so);
887 			KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
888 			INP_WLOCK(inp);
889 #ifdef IPSEC_NAT_T
890 			up = intoudpcb(inp);
891 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
892 #endif
893 			switch (optval) {
894 			case 0:
895 				/* Clear all UDP encap. */
896 #ifdef IPSEC_NAT_T
897 				up->u_flags &= ~UF_ESPINUDP_ALL;
898 #endif
899 				break;
900 #ifdef IPSEC_NAT_T
901 			case UDP_ENCAP_ESPINUDP:
902 			case UDP_ENCAP_ESPINUDP_NON_IKE:
903 				up->u_flags &= ~UF_ESPINUDP_ALL;
904 				if (optval == UDP_ENCAP_ESPINUDP)
905 					up->u_flags |= UF_ESPINUDP;
906 				else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
907 					up->u_flags |= UF_ESPINUDP_NON_IKE;
908 				break;
909 #endif
910 			default:
911 				error = EINVAL;
912 				break;
913 			}
914 			INP_WUNLOCK(inp);
915 			break;
916 		default:
917 			INP_WUNLOCK(inp);
918 			error = ENOPROTOOPT;
919 			break;
920 		}
921 		break;
922 	case SOPT_GET:
923 		switch (sopt->sopt_name) {
924 #ifdef IPSEC_NAT_T
925 		case UDP_ENCAP:
926 			up = intoudpcb(inp);
927 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
928 			optval = up->u_flags & UF_ESPINUDP_ALL;
929 			INP_WUNLOCK(inp);
930 			error = sooptcopyout(sopt, &optval, sizeof optval);
931 			break;
932 #endif
933 		default:
934 			INP_WUNLOCK(inp);
935 			error = ENOPROTOOPT;
936 			break;
937 		}
938 		break;
939 	}
940 	return (error);
941 }
942 
943 #ifdef INET
944 #define	UH_WLOCKED	2
945 #define	UH_RLOCKED	1
946 #define	UH_UNLOCKED	0
947 static int
948 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
949     struct mbuf *control, struct thread *td)
950 {
951 	struct udpiphdr *ui;
952 	int len = m->m_pkthdr.len;
953 	struct in_addr faddr, laddr;
954 	struct cmsghdr *cm;
955 	struct sockaddr_in *sin, src;
956 	int error = 0;
957 	int ipflags;
958 	u_short fport, lport;
959 	int unlock_udbinfo;
960 	u_char tos;
961 
962 	/*
963 	 * udp_output() may need to temporarily bind or connect the current
964 	 * inpcb.  As such, we don't know up front whether we will need the
965 	 * pcbinfo lock or not.  Do any work to decide what is needed up
966 	 * front before acquiring any locks.
967 	 */
968 	if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
969 		if (control)
970 			m_freem(control);
971 		m_freem(m);
972 		return (EMSGSIZE);
973 	}
974 
975 	src.sin_family = 0;
976 	INP_RLOCK(inp);
977 	tos = inp->inp_ip_tos;
978 	if (control != NULL) {
979 		/*
980 		 * XXX: Currently, we assume all the optional information is
981 		 * stored in a single mbuf.
982 		 */
983 		if (control->m_next) {
984 			INP_RUNLOCK(inp);
985 			m_freem(control);
986 			m_freem(m);
987 			return (EINVAL);
988 		}
989 		for (; control->m_len > 0;
990 		    control->m_data += CMSG_ALIGN(cm->cmsg_len),
991 		    control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
992 			cm = mtod(control, struct cmsghdr *);
993 			if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
994 			    || cm->cmsg_len > control->m_len) {
995 				error = EINVAL;
996 				break;
997 			}
998 			if (cm->cmsg_level != IPPROTO_IP)
999 				continue;
1000 
1001 			switch (cm->cmsg_type) {
1002 			case IP_SENDSRCADDR:
1003 				if (cm->cmsg_len !=
1004 				    CMSG_LEN(sizeof(struct in_addr))) {
1005 					error = EINVAL;
1006 					break;
1007 				}
1008 				bzero(&src, sizeof(src));
1009 				src.sin_family = AF_INET;
1010 				src.sin_len = sizeof(src);
1011 				src.sin_port = inp->inp_lport;
1012 				src.sin_addr =
1013 				    *(struct in_addr *)CMSG_DATA(cm);
1014 				break;
1015 
1016 			case IP_TOS:
1017 				if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1018 					error = EINVAL;
1019 					break;
1020 				}
1021 				tos = *(u_char *)CMSG_DATA(cm);
1022 				break;
1023 
1024 			default:
1025 				error = ENOPROTOOPT;
1026 				break;
1027 			}
1028 			if (error)
1029 				break;
1030 		}
1031 		m_freem(control);
1032 	}
1033 	if (error) {
1034 		INP_RUNLOCK(inp);
1035 		m_freem(m);
1036 		return (error);
1037 	}
1038 
1039 	/*
1040 	 * Depending on whether or not the application has bound or connected
1041 	 * the socket, we may have to do varying levels of work.  The optimal
1042 	 * case is for a connected UDP socket, as a global lock isn't
1043 	 * required at all.
1044 	 *
1045 	 * In order to decide which we need, we require stability of the
1046 	 * inpcb binding, which we ensure by acquiring a read lock on the
1047 	 * inpcb.  This doesn't strictly follow the lock order, so we play
1048 	 * the trylock and retry game; note that we may end up with more
1049 	 * conservative locks than required the second time around, so later
1050 	 * assertions have to accept that.  Further analysis of the number of
1051 	 * misses under contention is required.
1052 	 *
1053 	 * XXXRW: Check that hash locking update here is correct.
1054 	 */
1055 	sin = (struct sockaddr_in *)addr;
1056 	if (sin != NULL &&
1057 	    (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1058 		INP_RUNLOCK(inp);
1059 		INP_WLOCK(inp);
1060 		INP_HASH_WLOCK(&V_udbinfo);
1061 		unlock_udbinfo = UH_WLOCKED;
1062 	} else if ((sin != NULL && (
1063 	    (sin->sin_addr.s_addr == INADDR_ANY) ||
1064 	    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1065 	    (inp->inp_laddr.s_addr == INADDR_ANY) ||
1066 	    (inp->inp_lport == 0))) ||
1067 	    (src.sin_family == AF_INET)) {
1068 		INP_HASH_RLOCK(&V_udbinfo);
1069 		unlock_udbinfo = UH_RLOCKED;
1070 	} else
1071 		unlock_udbinfo = UH_UNLOCKED;
1072 
1073 	/*
1074 	 * If the IP_SENDSRCADDR control message was specified, override the
1075 	 * source address for this datagram.  Its use is invalidated if the
1076 	 * address thus specified is incomplete or clobbers other inpcbs.
1077 	 */
1078 	laddr = inp->inp_laddr;
1079 	lport = inp->inp_lport;
1080 	if (src.sin_family == AF_INET) {
1081 		INP_HASH_LOCK_ASSERT(&V_udbinfo);
1082 		if ((lport == 0) ||
1083 		    (laddr.s_addr == INADDR_ANY &&
1084 		     src.sin_addr.s_addr == INADDR_ANY)) {
1085 			error = EINVAL;
1086 			goto release;
1087 		}
1088 		error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1089 		    &laddr.s_addr, &lport, td->td_ucred);
1090 		if (error)
1091 			goto release;
1092 	}
1093 
1094 	/*
1095 	 * If a UDP socket has been connected, then a local address/port will
1096 	 * have been selected and bound.
1097 	 *
1098 	 * If a UDP socket has not been connected to, then an explicit
1099 	 * destination address must be used, in which case a local
1100 	 * address/port may not have been selected and bound.
1101 	 */
1102 	if (sin != NULL) {
1103 		INP_LOCK_ASSERT(inp);
1104 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
1105 			error = EISCONN;
1106 			goto release;
1107 		}
1108 
1109 		/*
1110 		 * Jail may rewrite the destination address, so let it do
1111 		 * that before we use it.
1112 		 */
1113 		error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1114 		if (error)
1115 			goto release;
1116 
1117 		/*
1118 		 * If a local address or port hasn't yet been selected, or if
1119 		 * the destination address needs to be rewritten due to using
1120 		 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1121 		 * to do the heavy lifting.  Once a port is selected, we
1122 		 * commit the binding back to the socket; we also commit the
1123 		 * binding of the address if in jail.
1124 		 *
1125 		 * If we already have a valid binding and we're not
1126 		 * requesting a destination address rewrite, use a fast path.
1127 		 */
1128 		if (inp->inp_laddr.s_addr == INADDR_ANY ||
1129 		    inp->inp_lport == 0 ||
1130 		    sin->sin_addr.s_addr == INADDR_ANY ||
1131 		    sin->sin_addr.s_addr == INADDR_BROADCAST) {
1132 			INP_HASH_LOCK_ASSERT(&V_udbinfo);
1133 			error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1134 			    &lport, &faddr.s_addr, &fport, NULL,
1135 			    td->td_ucred);
1136 			if (error)
1137 				goto release;
1138 
1139 			/*
1140 			 * XXXRW: Why not commit the port if the address is
1141 			 * !INADDR_ANY?
1142 			 */
1143 			/* Commit the local port if newly assigned. */
1144 			if (inp->inp_laddr.s_addr == INADDR_ANY &&
1145 			    inp->inp_lport == 0) {
1146 				INP_WLOCK_ASSERT(inp);
1147 				INP_HASH_WLOCK_ASSERT(&V_udbinfo);
1148 				/*
1149 				 * Remember addr if jailed, to prevent
1150 				 * rebinding.
1151 				 */
1152 				if (prison_flag(td->td_ucred, PR_IP4))
1153 					inp->inp_laddr = laddr;
1154 				inp->inp_lport = lport;
1155 				if (in_pcbinshash(inp) != 0) {
1156 					inp->inp_lport = 0;
1157 					error = EAGAIN;
1158 					goto release;
1159 				}
1160 				inp->inp_flags |= INP_ANONPORT;
1161 			}
1162 		} else {
1163 			faddr = sin->sin_addr;
1164 			fport = sin->sin_port;
1165 		}
1166 	} else {
1167 		INP_LOCK_ASSERT(inp);
1168 		faddr = inp->inp_faddr;
1169 		fport = inp->inp_fport;
1170 		if (faddr.s_addr == INADDR_ANY) {
1171 			error = ENOTCONN;
1172 			goto release;
1173 		}
1174 	}
1175 
1176 	/*
1177 	 * Calculate data length and get a mbuf for UDP, IP, and possible
1178 	 * link-layer headers.  Immediate slide the data pointer back forward
1179 	 * since we won't use that space at this layer.
1180 	 */
1181 	M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT);
1182 	if (m == NULL) {
1183 		error = ENOBUFS;
1184 		goto release;
1185 	}
1186 	m->m_data += max_linkhdr;
1187 	m->m_len -= max_linkhdr;
1188 	m->m_pkthdr.len -= max_linkhdr;
1189 
1190 	/*
1191 	 * Fill in mbuf with extended UDP header and addresses and length put
1192 	 * into network format.
1193 	 */
1194 	ui = mtod(m, struct udpiphdr *);
1195 	bzero(ui->ui_x1, sizeof(ui->ui_x1));	/* XXX still needed? */
1196 	ui->ui_pr = IPPROTO_UDP;
1197 	ui->ui_src = laddr;
1198 	ui->ui_dst = faddr;
1199 	ui->ui_sport = lport;
1200 	ui->ui_dport = fport;
1201 	ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1202 
1203 	/*
1204 	 * Set the Don't Fragment bit in the IP header.
1205 	 */
1206 	if (inp->inp_flags & INP_DONTFRAG) {
1207 		struct ip *ip;
1208 
1209 		ip = (struct ip *)&ui->ui_i;
1210 		ip->ip_off |= htons(IP_DF);
1211 	}
1212 
1213 	ipflags = 0;
1214 	if (inp->inp_socket->so_options & SO_DONTROUTE)
1215 		ipflags |= IP_ROUTETOIF;
1216 	if (inp->inp_socket->so_options & SO_BROADCAST)
1217 		ipflags |= IP_ALLOWBROADCAST;
1218 	if (inp->inp_flags & INP_ONESBCAST)
1219 		ipflags |= IP_SENDONES;
1220 
1221 #ifdef MAC
1222 	mac_inpcb_create_mbuf(inp, m);
1223 #endif
1224 
1225 	/*
1226 	 * Set up checksum and output datagram.
1227 	 */
1228 	if (V_udp_cksum) {
1229 		if (inp->inp_flags & INP_ONESBCAST)
1230 			faddr.s_addr = INADDR_BROADCAST;
1231 		ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1232 		    htons((u_short)len + sizeof(struct udphdr) + IPPROTO_UDP));
1233 		m->m_pkthdr.csum_flags = CSUM_UDP;
1234 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1235 	} else
1236 		ui->ui_sum = 0;
1237 	((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len);
1238 	((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl;	/* XXX */
1239 	((struct ip *)ui)->ip_tos = tos;		/* XXX */
1240 	UDPSTAT_INC(udps_opackets);
1241 
1242 	if (unlock_udbinfo == UH_WLOCKED)
1243 		INP_HASH_WUNLOCK(&V_udbinfo);
1244 	else if (unlock_udbinfo == UH_RLOCKED)
1245 		INP_HASH_RUNLOCK(&V_udbinfo);
1246 	error = ip_output(m, inp->inp_options, NULL, ipflags,
1247 	    inp->inp_moptions, inp);
1248 	if (unlock_udbinfo == UH_WLOCKED)
1249 		INP_WUNLOCK(inp);
1250 	else
1251 		INP_RUNLOCK(inp);
1252 	return (error);
1253 
1254 release:
1255 	if (unlock_udbinfo == UH_WLOCKED) {
1256 		INP_HASH_WUNLOCK(&V_udbinfo);
1257 		INP_WUNLOCK(inp);
1258 	} else if (unlock_udbinfo == UH_RLOCKED) {
1259 		INP_HASH_RUNLOCK(&V_udbinfo);
1260 		INP_RUNLOCK(inp);
1261 	} else
1262 		INP_RUNLOCK(inp);
1263 	m_freem(m);
1264 	return (error);
1265 }
1266 
1267 
1268 #if defined(IPSEC) && defined(IPSEC_NAT_T)
1269 /*
1270  * Potentially decap ESP in UDP frame.  Check for an ESP header
1271  * and optional marker; if present, strip the UDP header and
1272  * push the result through IPSec.
1273  *
1274  * Returns mbuf to be processed (potentially re-allocated) or
1275  * NULL if consumed and/or processed.
1276  */
1277 static struct mbuf *
1278 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1279 {
1280 	size_t minlen, payload, skip, iphlen;
1281 	caddr_t data;
1282 	struct udpcb *up;
1283 	struct m_tag *tag;
1284 	struct udphdr *udphdr;
1285 	struct ip *ip;
1286 
1287 	INP_RLOCK_ASSERT(inp);
1288 
1289 	/*
1290 	 * Pull up data so the longest case is contiguous:
1291 	 *    IP/UDP hdr + non ESP marker + ESP hdr.
1292 	 */
1293 	minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1294 	if (minlen > m->m_pkthdr.len)
1295 		minlen = m->m_pkthdr.len;
1296 	if ((m = m_pullup(m, minlen)) == NULL) {
1297 		IPSECSTAT_INC(ips_in_inval);
1298 		return (NULL);		/* Bypass caller processing. */
1299 	}
1300 	data = mtod(m, caddr_t);	/* Points to ip header. */
1301 	payload = m->m_len - off;	/* Size of payload. */
1302 
1303 	if (payload == 1 && data[off] == '\xff')
1304 		return (m);		/* NB: keepalive packet, no decap. */
1305 
1306 	up = intoudpcb(inp);
1307 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1308 	KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1309 	    ("u_flags 0x%x", up->u_flags));
1310 
1311 	/*
1312 	 * Check that the payload is large enough to hold an
1313 	 * ESP header and compute the amount of data to remove.
1314 	 *
1315 	 * NB: the caller has already done a pullup for us.
1316 	 * XXX can we assume alignment and eliminate bcopys?
1317 	 */
1318 	if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1319 		/*
1320 		 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1321 		 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1322 		 * possible AH mode non-IKE marker+non-ESP marker
1323 		 * from draft-ietf-ipsec-udp-encaps-00.txt.
1324 		 */
1325 		uint64_t marker;
1326 
1327 		if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1328 			return (m);	/* NB: no decap. */
1329 		bcopy(data + off, &marker, sizeof(uint64_t));
1330 		if (marker != 0)	/* Non-IKE marker. */
1331 			return (m);	/* NB: no decap. */
1332 		skip = sizeof(uint64_t) + sizeof(struct udphdr);
1333 	} else {
1334 		uint32_t spi;
1335 
1336 		if (payload <= sizeof(struct esp)) {
1337 			IPSECSTAT_INC(ips_in_inval);
1338 			m_freem(m);
1339 			return (NULL);	/* Discard. */
1340 		}
1341 		bcopy(data + off, &spi, sizeof(uint32_t));
1342 		if (spi == 0)		/* Non-ESP marker. */
1343 			return (m);	/* NB: no decap. */
1344 		skip = sizeof(struct udphdr);
1345 	}
1346 
1347 	/*
1348 	 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1349 	 * the UDP ports. This is required if we want to select
1350 	 * the right SPD for multiple hosts behind same NAT.
1351 	 *
1352 	 * NB: ports are maintained in network byte order everywhere
1353 	 *     in the NAT-T code.
1354 	 */
1355 	tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1356 		2 * sizeof(uint16_t), M_NOWAIT);
1357 	if (tag == NULL) {
1358 		IPSECSTAT_INC(ips_in_nomem);
1359 		m_freem(m);
1360 		return (NULL);		/* Discard. */
1361 	}
1362 	iphlen = off - sizeof(struct udphdr);
1363 	udphdr = (struct udphdr *)(data + iphlen);
1364 	((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1365 	((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1366 	m_tag_prepend(m, tag);
1367 
1368 	/*
1369 	 * Remove the UDP header (and possibly the non ESP marker)
1370 	 * IP header length is iphlen
1371 	 * Before:
1372 	 *   <--- off --->
1373 	 *   +----+------+-----+
1374 	 *   | IP |  UDP | ESP |
1375 	 *   +----+------+-----+
1376 	 *        <-skip->
1377 	 * After:
1378 	 *          +----+-----+
1379 	 *          | IP | ESP |
1380 	 *          +----+-----+
1381 	 *   <-skip->
1382 	 */
1383 	ovbcopy(data, data + skip, iphlen);
1384 	m_adj(m, skip);
1385 
1386 	ip = mtod(m, struct ip *);
1387 	ip->ip_len = htons(ntohs(ip->ip_len) - skip);
1388 	ip->ip_p = IPPROTO_ESP;
1389 
1390 	/*
1391 	 * We cannot yet update the cksums so clear any
1392 	 * h/w cksum flags as they are no longer valid.
1393 	 */
1394 	if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1395 		m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1396 
1397 	(void) ipsec4_common_input(m, iphlen, ip->ip_p);
1398 	return (NULL);			/* NB: consumed, bypass processing. */
1399 }
1400 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1401 
1402 static void
1403 udp_abort(struct socket *so)
1404 {
1405 	struct inpcb *inp;
1406 
1407 	inp = sotoinpcb(so);
1408 	KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1409 	INP_WLOCK(inp);
1410 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1411 		INP_HASH_WLOCK(&V_udbinfo);
1412 		in_pcbdisconnect(inp);
1413 		inp->inp_laddr.s_addr = INADDR_ANY;
1414 		INP_HASH_WUNLOCK(&V_udbinfo);
1415 		soisdisconnected(so);
1416 	}
1417 	INP_WUNLOCK(inp);
1418 }
1419 
1420 static int
1421 udp_attach(struct socket *so, int proto, struct thread *td)
1422 {
1423 	struct inpcb *inp;
1424 	int error;
1425 
1426 	inp = sotoinpcb(so);
1427 	KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1428 	error = soreserve(so, udp_sendspace, udp_recvspace);
1429 	if (error)
1430 		return (error);
1431 	INP_INFO_WLOCK(&V_udbinfo);
1432 	error = in_pcballoc(so, &V_udbinfo);
1433 	if (error) {
1434 		INP_INFO_WUNLOCK(&V_udbinfo);
1435 		return (error);
1436 	}
1437 
1438 	inp = sotoinpcb(so);
1439 	inp->inp_vflag |= INP_IPV4;
1440 	inp->inp_ip_ttl = V_ip_defttl;
1441 
1442 	error = udp_newudpcb(inp);
1443 	if (error) {
1444 		in_pcbdetach(inp);
1445 		in_pcbfree(inp);
1446 		INP_INFO_WUNLOCK(&V_udbinfo);
1447 		return (error);
1448 	}
1449 
1450 	INP_WUNLOCK(inp);
1451 	INP_INFO_WUNLOCK(&V_udbinfo);
1452 	return (0);
1453 }
1454 #endif /* INET */
1455 
1456 int
1457 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1458 {
1459 	struct inpcb *inp;
1460 	struct udpcb *up;
1461 
1462 	KASSERT(so->so_type == SOCK_DGRAM,
1463 	    ("udp_set_kernel_tunneling: !dgram"));
1464 	inp = sotoinpcb(so);
1465 	KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1466 	INP_WLOCK(inp);
1467 	up = intoudpcb(inp);
1468 	if (up->u_tun_func != NULL) {
1469 		INP_WUNLOCK(inp);
1470 		return (EBUSY);
1471 	}
1472 	up->u_tun_func = f;
1473 	INP_WUNLOCK(inp);
1474 	return (0);
1475 }
1476 
1477 #ifdef INET
1478 static int
1479 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1480 {
1481 	struct inpcb *inp;
1482 	int error;
1483 
1484 	inp = sotoinpcb(so);
1485 	KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1486 	INP_WLOCK(inp);
1487 	INP_HASH_WLOCK(&V_udbinfo);
1488 	error = in_pcbbind(inp, nam, td->td_ucred);
1489 	INP_HASH_WUNLOCK(&V_udbinfo);
1490 	INP_WUNLOCK(inp);
1491 	return (error);
1492 }
1493 
1494 static void
1495 udp_close(struct socket *so)
1496 {
1497 	struct inpcb *inp;
1498 
1499 	inp = sotoinpcb(so);
1500 	KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1501 	INP_WLOCK(inp);
1502 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1503 		INP_HASH_WLOCK(&V_udbinfo);
1504 		in_pcbdisconnect(inp);
1505 		inp->inp_laddr.s_addr = INADDR_ANY;
1506 		INP_HASH_WUNLOCK(&V_udbinfo);
1507 		soisdisconnected(so);
1508 	}
1509 	INP_WUNLOCK(inp);
1510 }
1511 
1512 static int
1513 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1514 {
1515 	struct inpcb *inp;
1516 	int error;
1517 	struct sockaddr_in *sin;
1518 
1519 	inp = sotoinpcb(so);
1520 	KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1521 	INP_WLOCK(inp);
1522 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1523 		INP_WUNLOCK(inp);
1524 		return (EISCONN);
1525 	}
1526 	sin = (struct sockaddr_in *)nam;
1527 	error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1528 	if (error != 0) {
1529 		INP_WUNLOCK(inp);
1530 		return (error);
1531 	}
1532 	INP_HASH_WLOCK(&V_udbinfo);
1533 	error = in_pcbconnect(inp, nam, td->td_ucred);
1534 	INP_HASH_WUNLOCK(&V_udbinfo);
1535 	if (error == 0)
1536 		soisconnected(so);
1537 	INP_WUNLOCK(inp);
1538 	return (error);
1539 }
1540 
1541 static void
1542 udp_detach(struct socket *so)
1543 {
1544 	struct inpcb *inp;
1545 	struct udpcb *up;
1546 
1547 	inp = sotoinpcb(so);
1548 	KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1549 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1550 	    ("udp_detach: not disconnected"));
1551 	INP_INFO_WLOCK(&V_udbinfo);
1552 	INP_WLOCK(inp);
1553 	up = intoudpcb(inp);
1554 	KASSERT(up != NULL, ("%s: up == NULL", __func__));
1555 	inp->inp_ppcb = NULL;
1556 	in_pcbdetach(inp);
1557 	in_pcbfree(inp);
1558 	INP_INFO_WUNLOCK(&V_udbinfo);
1559 	udp_discardcb(up);
1560 }
1561 
1562 static int
1563 udp_disconnect(struct socket *so)
1564 {
1565 	struct inpcb *inp;
1566 
1567 	inp = sotoinpcb(so);
1568 	KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1569 	INP_WLOCK(inp);
1570 	if (inp->inp_faddr.s_addr == INADDR_ANY) {
1571 		INP_WUNLOCK(inp);
1572 		return (ENOTCONN);
1573 	}
1574 	INP_HASH_WLOCK(&V_udbinfo);
1575 	in_pcbdisconnect(inp);
1576 	inp->inp_laddr.s_addr = INADDR_ANY;
1577 	INP_HASH_WUNLOCK(&V_udbinfo);
1578 	SOCK_LOCK(so);
1579 	so->so_state &= ~SS_ISCONNECTED;		/* XXX */
1580 	SOCK_UNLOCK(so);
1581 	INP_WUNLOCK(inp);
1582 	return (0);
1583 }
1584 
1585 static int
1586 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1587     struct mbuf *control, struct thread *td)
1588 {
1589 	struct inpcb *inp;
1590 
1591 	inp = sotoinpcb(so);
1592 	KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1593 	return (udp_output(inp, m, addr, control, td));
1594 }
1595 #endif /* INET */
1596 
1597 int
1598 udp_shutdown(struct socket *so)
1599 {
1600 	struct inpcb *inp;
1601 
1602 	inp = sotoinpcb(so);
1603 	KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1604 	INP_WLOCK(inp);
1605 	socantsendmore(so);
1606 	INP_WUNLOCK(inp);
1607 	return (0);
1608 }
1609 
1610 #ifdef INET
1611 struct pr_usrreqs udp_usrreqs = {
1612 	.pru_abort =		udp_abort,
1613 	.pru_attach =		udp_attach,
1614 	.pru_bind =		udp_bind,
1615 	.pru_connect =		udp_connect,
1616 	.pru_control =		in_control,
1617 	.pru_detach =		udp_detach,
1618 	.pru_disconnect =	udp_disconnect,
1619 	.pru_peeraddr =		in_getpeeraddr,
1620 	.pru_send =		udp_send,
1621 	.pru_soreceive =	soreceive_dgram,
1622 	.pru_sosend =		sosend_dgram,
1623 	.pru_shutdown =		udp_shutdown,
1624 	.pru_sockaddr =		in_getsockaddr,
1625 	.pru_sosetlabel =	in_pcbsosetlabel,
1626 	.pru_close =		udp_close,
1627 };
1628 #endif /* INET */
1629