xref: /freebsd/sys/netinet/udp_usrreq.c (revision c94c8223bd444fa38ded3797060110c590f422f4)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.
4  * Copyright (c) 2008 Robert N. M. Watson
5  * Copyright (c) 2010-2011 Juniper Networks, Inc.
6  * Copyright (c) 2014 Kevin Lo
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Robert N. M. Watson under
10  * contract to Juniper Networks, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)udp_usrreq.c	8.6 (Berkeley) 5/23/95
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ipfw.h"
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
45 #include "opt_ipsec.h"
46 #include "opt_rss.h"
47 
48 #include <sys/param.h>
49 #include <sys/domain.h>
50 #include <sys/eventhandler.h>
51 #include <sys/jail.h>
52 #include <sys/kernel.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/priv.h>
57 #include <sys/proc.h>
58 #include <sys/protosw.h>
59 #include <sys/sdt.h>
60 #include <sys/signalvar.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/sx.h>
64 #include <sys/sysctl.h>
65 #include <sys/syslog.h>
66 #include <sys/systm.h>
67 
68 #include <vm/uma.h>
69 
70 #include <net/if.h>
71 #include <net/if_var.h>
72 #include <net/route.h>
73 #include <net/rss_config.h>
74 
75 #include <netinet/in.h>
76 #include <netinet/in_kdtrace.h>
77 #include <netinet/in_pcb.h>
78 #include <netinet/in_systm.h>
79 #include <netinet/in_var.h>
80 #include <netinet/ip.h>
81 #ifdef INET6
82 #include <netinet/ip6.h>
83 #endif
84 #include <netinet/ip_icmp.h>
85 #include <netinet/icmp_var.h>
86 #include <netinet/ip_var.h>
87 #include <netinet/ip_options.h>
88 #ifdef INET6
89 #include <netinet6/ip6_var.h>
90 #endif
91 #include <netinet/udp.h>
92 #include <netinet/udp_var.h>
93 #include <netinet/udplite.h>
94 #include <netinet/in_rss.h>
95 
96 #ifdef IPSEC
97 #include <netipsec/ipsec.h>
98 #include <netipsec/esp.h>
99 #endif
100 
101 #include <machine/in_cksum.h>
102 
103 #include <security/mac/mac_framework.h>
104 
105 /*
106  * UDP and UDP-Lite protocols implementation.
107  * Per RFC 768, August, 1980.
108  * Per RFC 3828, July, 2004.
109  */
110 
111 /*
112  * BSD 4.2 defaulted the udp checksum to be off.  Turning off udp checksums
113  * removes the only data integrity mechanism for packets and malformed
114  * packets that would otherwise be discarded due to bad checksums, and may
115  * cause problems (especially for NFS data blocks).
116  */
117 VNET_DEFINE(int, udp_cksum) = 1;
118 SYSCTL_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_VNET | CTLFLAG_RW,
119     &VNET_NAME(udp_cksum), 0, "compute udp checksum");
120 
121 int	udp_log_in_vain = 0;
122 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
123     &udp_log_in_vain, 0, "Log all incoming UDP packets");
124 
125 VNET_DEFINE(int, udp_blackhole) = 0;
126 SYSCTL_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW,
127     &VNET_NAME(udp_blackhole), 0,
128     "Do not send port unreachables for refused connects");
129 
130 u_long	udp_sendspace = 9216;		/* really max datagram size */
131 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
132     &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
133 
134 u_long	udp_recvspace = 40 * (1024 +
135 #ifdef INET6
136 				      sizeof(struct sockaddr_in6)
137 #else
138 				      sizeof(struct sockaddr_in)
139 #endif
140 				      );	/* 40 1K datagrams */
141 
142 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
143     &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
144 
145 VNET_DEFINE(struct inpcbhead, udb);		/* from udp_var.h */
146 VNET_DEFINE(struct inpcbinfo, udbinfo);
147 VNET_DEFINE(struct inpcbhead, ulitecb);
148 VNET_DEFINE(struct inpcbinfo, ulitecbinfo);
149 static VNET_DEFINE(uma_zone_t, udpcb_zone);
150 #define	V_udpcb_zone			VNET(udpcb_zone)
151 
152 #ifndef UDBHASHSIZE
153 #define	UDBHASHSIZE	128
154 #endif
155 
156 VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat);		/* from udp_var.h */
157 VNET_PCPUSTAT_SYSINIT(udpstat);
158 SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat,
159     udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
160 
161 #ifdef VIMAGE
162 VNET_PCPUSTAT_SYSUNINIT(udpstat);
163 #endif /* VIMAGE */
164 #ifdef INET
165 static void	udp_detach(struct socket *so);
166 static int	udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
167 		    struct mbuf *, struct thread *);
168 #endif
169 
170 #ifdef IPSEC
171 #ifdef IPSEC_NAT_T
172 #define	UF_ESPINUDP_ALL	(UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
173 #ifdef INET
174 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
175 #endif
176 #endif /* IPSEC_NAT_T */
177 #endif /* IPSEC */
178 
179 static void
180 udp_zone_change(void *tag)
181 {
182 
183 	uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
184 	uma_zone_set_max(V_udpcb_zone, maxsockets);
185 }
186 
187 static int
188 udp_inpcb_init(void *mem, int size, int flags)
189 {
190 	struct inpcb *inp;
191 
192 	inp = mem;
193 	INP_LOCK_INIT(inp, "inp", "udpinp");
194 	return (0);
195 }
196 
197 static int
198 udplite_inpcb_init(void *mem, int size, int flags)
199 {
200 	struct inpcb *inp;
201 
202 	inp = mem;
203 	INP_LOCK_INIT(inp, "inp", "udpliteinp");
204 	return (0);
205 }
206 
207 void
208 udp_init(void)
209 {
210 
211 	/*
212 	 * For now default to 2-tuple UDP hashing - until the fragment
213 	 * reassembly code can also update the flowid.
214 	 *
215 	 * Once we can calculate the flowid that way and re-establish
216 	 * a 4-tuple, flip this to 4-tuple.
217 	 */
218 	in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
219 	    "udp_inpcb", udp_inpcb_init, NULL, 0,
220 	    IPI_HASHFIELDS_2TUPLE);
221 	V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
222 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
223 	uma_zone_set_max(V_udpcb_zone, maxsockets);
224 	uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
225 	EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
226 	    EVENTHANDLER_PRI_ANY);
227 }
228 
229 void
230 udplite_init(void)
231 {
232 
233 	in_pcbinfo_init(&V_ulitecbinfo, "udplite", &V_ulitecb, UDBHASHSIZE,
234 	    UDBHASHSIZE, "udplite_inpcb", udplite_inpcb_init, NULL,
235 	    0, IPI_HASHFIELDS_2TUPLE);
236 }
237 
238 /*
239  * Kernel module interface for updating udpstat.  The argument is an index
240  * into udpstat treated as an array of u_long.  While this encodes the
241  * general layout of udpstat into the caller, it doesn't encode its location,
242  * so that future changes to add, for example, per-CPU stats support won't
243  * cause binary compatibility problems for kernel modules.
244  */
245 void
246 kmod_udpstat_inc(int statnum)
247 {
248 
249 	counter_u64_add(VNET(udpstat)[statnum], 1);
250 }
251 
252 int
253 udp_newudpcb(struct inpcb *inp)
254 {
255 	struct udpcb *up;
256 
257 	up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
258 	if (up == NULL)
259 		return (ENOBUFS);
260 	inp->inp_ppcb = up;
261 	return (0);
262 }
263 
264 void
265 udp_discardcb(struct udpcb *up)
266 {
267 
268 	uma_zfree(V_udpcb_zone, up);
269 }
270 
271 #ifdef VIMAGE
272 void
273 udp_destroy(void)
274 {
275 
276 	in_pcbinfo_destroy(&V_udbinfo);
277 	uma_zdestroy(V_udpcb_zone);
278 }
279 
280 void
281 udplite_destroy(void)
282 {
283 
284 	in_pcbinfo_destroy(&V_ulitecbinfo);
285 }
286 #endif
287 
288 #ifdef INET
289 /*
290  * Subroutine of udp_input(), which appends the provided mbuf chain to the
291  * passed pcb/socket.  The caller must provide a sockaddr_in via udp_in that
292  * contains the source address.  If the socket ends up being an IPv6 socket,
293  * udp_append() will convert to a sockaddr_in6 before passing the address
294  * into the socket code.
295  *
296  * In the normal case udp_append() will return 0, indicating that you
297  * must unlock the inp. However if a tunneling protocol is in place we increment
298  * the inpcb refcnt and unlock the inp, on return from the tunneling protocol we
299  * then decrement the reference count. If the inp_rele returns 1, indicating the
300  * inp is gone, we return that to the caller to tell them *not* to unlock
301  * the inp. In the case of multi-cast this will cause the distribution
302  * to stop (though most tunneling protocols known currently do *not* use
303  * multicast).
304  */
305 static int
306 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
307     struct sockaddr_in *udp_in)
308 {
309 	struct sockaddr *append_sa;
310 	struct socket *so;
311 	struct mbuf *opts = 0;
312 #ifdef INET6
313 	struct sockaddr_in6 udp_in6;
314 #endif
315 	struct udpcb *up;
316 
317 	INP_LOCK_ASSERT(inp);
318 
319 	/*
320 	 * Engage the tunneling protocol.
321 	 */
322 	up = intoudpcb(inp);
323 	if (up->u_tun_func != NULL) {
324 		in_pcbref(inp);
325 		INP_RUNLOCK(inp);
326 		(*up->u_tun_func)(n, off, inp, (struct sockaddr *)udp_in,
327 		    up->u_tun_ctx);
328 		INP_RLOCK(inp);
329 		return (in_pcbrele_rlocked(inp));
330 	}
331 
332 	off += sizeof(struct udphdr);
333 
334 #ifdef IPSEC
335 	/* Check AH/ESP integrity. */
336 	if (ipsec4_in_reject(n, inp)) {
337 		m_freem(n);
338 		return (0);
339 	}
340 #ifdef IPSEC_NAT_T
341 	up = intoudpcb(inp);
342 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
343 	if (up->u_flags & UF_ESPINUDP_ALL) {	/* IPSec UDP encaps. */
344 		n = udp4_espdecap(inp, n, off);
345 		if (n == NULL)				/* Consumed. */
346 			return (0);
347 	}
348 #endif /* IPSEC_NAT_T */
349 #endif /* IPSEC */
350 #ifdef MAC
351 	if (mac_inpcb_check_deliver(inp, n) != 0) {
352 		m_freem(n);
353 		return (0);
354 	}
355 #endif /* MAC */
356 	if (inp->inp_flags & INP_CONTROLOPTS ||
357 	    inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
358 #ifdef INET6
359 		if (inp->inp_vflag & INP_IPV6)
360 			(void)ip6_savecontrol_v4(inp, n, &opts, NULL);
361 		else
362 #endif /* INET6 */
363 			ip_savecontrol(inp, &opts, ip, n);
364 	}
365 #ifdef INET6
366 	if (inp->inp_vflag & INP_IPV6) {
367 		bzero(&udp_in6, sizeof(udp_in6));
368 		udp_in6.sin6_len = sizeof(udp_in6);
369 		udp_in6.sin6_family = AF_INET6;
370 		in6_sin_2_v4mapsin6(udp_in, &udp_in6);
371 		append_sa = (struct sockaddr *)&udp_in6;
372 	} else
373 #endif /* INET6 */
374 		append_sa = (struct sockaddr *)udp_in;
375 	m_adj(n, off);
376 
377 	so = inp->inp_socket;
378 	SOCKBUF_LOCK(&so->so_rcv);
379 	if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
380 		SOCKBUF_UNLOCK(&so->so_rcv);
381 		m_freem(n);
382 		if (opts)
383 			m_freem(opts);
384 		UDPSTAT_INC(udps_fullsock);
385 	} else
386 		sorwakeup_locked(so);
387 	return (0);
388 }
389 
390 int
391 udp_input(struct mbuf **mp, int *offp, int proto)
392 {
393 	struct ip *ip;
394 	struct udphdr *uh;
395 	struct ifnet *ifp;
396 	struct inpcb *inp;
397 	uint16_t len, ip_len;
398 	struct inpcbinfo *pcbinfo;
399 	struct ip save_ip;
400 	struct sockaddr_in udp_in;
401 	struct mbuf *m;
402 	struct m_tag *fwd_tag;
403 	int cscov_partial, iphlen;
404 
405 	m = *mp;
406 	iphlen = *offp;
407 	ifp = m->m_pkthdr.rcvif;
408 	*mp = NULL;
409 	UDPSTAT_INC(udps_ipackets);
410 
411 	/*
412 	 * Strip IP options, if any; should skip this, make available to
413 	 * user, and use on returned packets, but we don't yet have a way to
414 	 * check the checksum with options still present.
415 	 */
416 	if (iphlen > sizeof (struct ip)) {
417 		ip_stripoptions(m);
418 		iphlen = sizeof(struct ip);
419 	}
420 
421 	/*
422 	 * Get IP and UDP header together in first mbuf.
423 	 */
424 	ip = mtod(m, struct ip *);
425 	if (m->m_len < iphlen + sizeof(struct udphdr)) {
426 		if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == NULL) {
427 			UDPSTAT_INC(udps_hdrops);
428 			return (IPPROTO_DONE);
429 		}
430 		ip = mtod(m, struct ip *);
431 	}
432 	uh = (struct udphdr *)((caddr_t)ip + iphlen);
433 	cscov_partial = (proto == IPPROTO_UDPLITE) ? 1 : 0;
434 
435 	/*
436 	 * Destination port of 0 is illegal, based on RFC768.
437 	 */
438 	if (uh->uh_dport == 0)
439 		goto badunlocked;
440 
441 	/*
442 	 * Construct sockaddr format source address.  Stuff source address
443 	 * and datagram in user buffer.
444 	 */
445 	bzero(&udp_in, sizeof(udp_in));
446 	udp_in.sin_len = sizeof(udp_in);
447 	udp_in.sin_family = AF_INET;
448 	udp_in.sin_port = uh->uh_sport;
449 	udp_in.sin_addr = ip->ip_src;
450 
451 	/*
452 	 * Make mbuf data length reflect UDP length.  If not enough data to
453 	 * reflect UDP length, drop.
454 	 */
455 	len = ntohs((u_short)uh->uh_ulen);
456 	ip_len = ntohs(ip->ip_len) - iphlen;
457 	if (proto == IPPROTO_UDPLITE && (len == 0 || len == ip_len)) {
458 		/* Zero means checksum over the complete packet. */
459 		if (len == 0)
460 			len = ip_len;
461 		cscov_partial = 0;
462 	}
463 	if (ip_len != len) {
464 		if (len > ip_len || len < sizeof(struct udphdr)) {
465 			UDPSTAT_INC(udps_badlen);
466 			goto badunlocked;
467 		}
468 		if (proto == IPPROTO_UDP)
469 			m_adj(m, len - ip_len);
470 	}
471 
472 	/*
473 	 * Save a copy of the IP header in case we want restore it for
474 	 * sending an ICMP error message in response.
475 	 */
476 	if (!V_udp_blackhole)
477 		save_ip = *ip;
478 	else
479 		memset(&save_ip, 0, sizeof(save_ip));
480 
481 	/*
482 	 * Checksum extended UDP header and data.
483 	 */
484 	if (uh->uh_sum) {
485 		u_short uh_sum;
486 
487 		if ((m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
488 		    !cscov_partial) {
489 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
490 				uh_sum = m->m_pkthdr.csum_data;
491 			else
492 				uh_sum = in_pseudo(ip->ip_src.s_addr,
493 				    ip->ip_dst.s_addr, htonl((u_short)len +
494 				    m->m_pkthdr.csum_data + proto));
495 			uh_sum ^= 0xffff;
496 		} else {
497 			char b[9];
498 
499 			bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
500 			bzero(((struct ipovly *)ip)->ih_x1, 9);
501 			((struct ipovly *)ip)->ih_len = (proto == IPPROTO_UDP) ?
502 			    uh->uh_ulen : htons(ip_len);
503 			uh_sum = in_cksum(m, len + sizeof (struct ip));
504 			bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
505 		}
506 		if (uh_sum) {
507 			UDPSTAT_INC(udps_badsum);
508 			m_freem(m);
509 			return (IPPROTO_DONE);
510 		}
511 	} else {
512 		if (proto == IPPROTO_UDP) {
513 			UDPSTAT_INC(udps_nosum);
514 		} else {
515 			/* UDPLite requires a checksum */
516 			/* XXX: What is the right UDPLite MIB counter here? */
517 			m_freem(m);
518 			return (IPPROTO_DONE);
519 		}
520 	}
521 
522 	pcbinfo = udp_get_inpcbinfo(proto);
523 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
524 	    in_broadcast(ip->ip_dst, ifp)) {
525 		struct inpcb *last;
526 		struct inpcbhead *pcblist;
527 		struct ip_moptions *imo;
528 
529 		INP_INFO_RLOCK(pcbinfo);
530 		pcblist = udp_get_pcblist(proto);
531 		last = NULL;
532 		LIST_FOREACH(inp, pcblist, inp_list) {
533 			if (inp->inp_lport != uh->uh_dport)
534 				continue;
535 #ifdef INET6
536 			if ((inp->inp_vflag & INP_IPV4) == 0)
537 				continue;
538 #endif
539 			if (inp->inp_laddr.s_addr != INADDR_ANY &&
540 			    inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
541 				continue;
542 			if (inp->inp_faddr.s_addr != INADDR_ANY &&
543 			    inp->inp_faddr.s_addr != ip->ip_src.s_addr)
544 				continue;
545 			if (inp->inp_fport != 0 &&
546 			    inp->inp_fport != uh->uh_sport)
547 				continue;
548 
549 			INP_RLOCK(inp);
550 
551 			/*
552 			 * XXXRW: Because we weren't holding either the inpcb
553 			 * or the hash lock when we checked for a match
554 			 * before, we should probably recheck now that the
555 			 * inpcb lock is held.
556 			 */
557 
558 			/*
559 			 * Handle socket delivery policy for any-source
560 			 * and source-specific multicast. [RFC3678]
561 			 */
562 			imo = inp->inp_moptions;
563 			if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
564 				struct sockaddr_in	 group;
565 				int			 blocked;
566 				if (imo == NULL) {
567 					INP_RUNLOCK(inp);
568 					continue;
569 				}
570 				bzero(&group, sizeof(struct sockaddr_in));
571 				group.sin_len = sizeof(struct sockaddr_in);
572 				group.sin_family = AF_INET;
573 				group.sin_addr = ip->ip_dst;
574 
575 				blocked = imo_multi_filter(imo, ifp,
576 					(struct sockaddr *)&group,
577 					(struct sockaddr *)&udp_in);
578 				if (blocked != MCAST_PASS) {
579 					if (blocked == MCAST_NOTGMEMBER)
580 						IPSTAT_INC(ips_notmember);
581 					if (blocked == MCAST_NOTSMEMBER ||
582 					    blocked == MCAST_MUTED)
583 						UDPSTAT_INC(udps_filtermcast);
584 					INP_RUNLOCK(inp);
585 					continue;
586 				}
587 			}
588 			if (last != NULL) {
589 				struct mbuf *n;
590 
591 				if ((n = m_copy(m, 0, M_COPYALL)) != NULL) {
592 					UDP_PROBE(receive, NULL, last, ip,
593 					    last, uh);
594 					if (udp_append(last, ip, n, iphlen,
595 						&udp_in)) {
596 						goto inp_lost;
597 					}
598 				}
599 				INP_RUNLOCK(last);
600 			}
601 			last = inp;
602 			/*
603 			 * Don't look for additional matches if this one does
604 			 * not have either the SO_REUSEPORT or SO_REUSEADDR
605 			 * socket options set.  This heuristic avoids
606 			 * searching through all pcbs in the common case of a
607 			 * non-shared port.  It assumes that an application
608 			 * will never clear these options after setting them.
609 			 */
610 			if ((last->inp_socket->so_options &
611 			    (SO_REUSEPORT|SO_REUSEADDR)) == 0)
612 				break;
613 		}
614 
615 		if (last == NULL) {
616 			/*
617 			 * No matching pcb found; discard datagram.  (No need
618 			 * to send an ICMP Port Unreachable for a broadcast
619 			 * or multicast datgram.)
620 			 */
621 			UDPSTAT_INC(udps_noportbcast);
622 			if (inp)
623 				INP_RUNLOCK(inp);
624 			INP_INFO_RUNLOCK(pcbinfo);
625 			goto badunlocked;
626 		}
627 		UDP_PROBE(receive, NULL, last, ip, last, uh);
628 		if (udp_append(last, ip, m, iphlen, &udp_in) == 0)
629 			INP_RUNLOCK(last);
630 	inp_lost:
631 		INP_INFO_RUNLOCK(pcbinfo);
632 		return (IPPROTO_DONE);
633 	}
634 
635 	/*
636 	 * Locate pcb for datagram.
637 	 */
638 
639 	/*
640 	 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
641 	 */
642 	if ((m->m_flags & M_IP_NEXTHOP) &&
643 	    (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) {
644 		struct sockaddr_in *next_hop;
645 
646 		next_hop = (struct sockaddr_in *)(fwd_tag + 1);
647 
648 		/*
649 		 * Transparently forwarded. Pretend to be the destination.
650 		 * Already got one like this?
651 		 */
652 		inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport,
653 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m);
654 		if (!inp) {
655 			/*
656 			 * It's new.  Try to find the ambushing socket.
657 			 * Because we've rewritten the destination address,
658 			 * any hardware-generated hash is ignored.
659 			 */
660 			inp = in_pcblookup(pcbinfo, ip->ip_src,
661 			    uh->uh_sport, next_hop->sin_addr,
662 			    next_hop->sin_port ? htons(next_hop->sin_port) :
663 			    uh->uh_dport, INPLOOKUP_WILDCARD |
664 			    INPLOOKUP_RLOCKPCB, ifp);
665 		}
666 		/* Remove the tag from the packet. We don't need it anymore. */
667 		m_tag_delete(m, fwd_tag);
668 		m->m_flags &= ~M_IP_NEXTHOP;
669 	} else
670 		inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport,
671 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD |
672 		    INPLOOKUP_RLOCKPCB, ifp, m);
673 	if (inp == NULL) {
674 		if (udp_log_in_vain) {
675 			char buf[4*sizeof "123"];
676 
677 			strcpy(buf, inet_ntoa(ip->ip_dst));
678 			log(LOG_INFO,
679 			    "Connection attempt to UDP %s:%d from %s:%d\n",
680 			    buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
681 			    ntohs(uh->uh_sport));
682 		}
683 		UDPSTAT_INC(udps_noport);
684 		if (m->m_flags & (M_BCAST | M_MCAST)) {
685 			UDPSTAT_INC(udps_noportbcast);
686 			goto badunlocked;
687 		}
688 		if (V_udp_blackhole)
689 			goto badunlocked;
690 		if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
691 			goto badunlocked;
692 		*ip = save_ip;
693 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
694 		return (IPPROTO_DONE);
695 	}
696 
697 	/*
698 	 * Check the minimum TTL for socket.
699 	 */
700 	INP_RLOCK_ASSERT(inp);
701 	if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
702 		INP_RUNLOCK(inp);
703 		m_freem(m);
704 		return (IPPROTO_DONE);
705 	}
706 	if (cscov_partial) {
707 		struct udpcb *up;
708 
709 		up = intoudpcb(inp);
710 		if (up->u_rxcslen == 0 || up->u_rxcslen > len) {
711 			INP_RUNLOCK(inp);
712 			m_freem(m);
713 			return (IPPROTO_DONE);
714 		}
715 	}
716 
717 	UDP_PROBE(receive, NULL, inp, ip, inp, uh);
718 	if (udp_append(inp, ip, m, iphlen, &udp_in) == 0)
719 		INP_RUNLOCK(inp);
720 	return (IPPROTO_DONE);
721 
722 badunlocked:
723 	m_freem(m);
724 	return (IPPROTO_DONE);
725 }
726 #endif /* INET */
727 
728 /*
729  * Notify a udp user of an asynchronous error; just wake up so that they can
730  * collect error status.
731  */
732 struct inpcb *
733 udp_notify(struct inpcb *inp, int errno)
734 {
735 
736 	/*
737 	 * While udp_ctlinput() always calls udp_notify() with a read lock
738 	 * when invoking it directly, in_pcbnotifyall() currently uses write
739 	 * locks due to sharing code with TCP.  For now, accept either a read
740 	 * or a write lock, but a read lock is sufficient.
741 	 */
742 	INP_LOCK_ASSERT(inp);
743 	if ((errno == EHOSTUNREACH || errno == ENETUNREACH ||
744 	     errno == EHOSTDOWN) && inp->inp_route.ro_rt) {
745 		RTFREE(inp->inp_route.ro_rt);
746 		inp->inp_route.ro_rt = (struct rtentry *)NULL;
747 	}
748 
749 	inp->inp_socket->so_error = errno;
750 	sorwakeup(inp->inp_socket);
751 	sowwakeup(inp->inp_socket);
752 	return (inp);
753 }
754 
755 #ifdef INET
756 static void
757 udp_common_ctlinput(int cmd, struct sockaddr *sa, void *vip,
758     struct inpcbinfo *pcbinfo)
759 {
760 	struct ip *ip = vip;
761 	struct udphdr *uh;
762 	struct in_addr faddr;
763 	struct inpcb *inp;
764 
765 	faddr = ((struct sockaddr_in *)sa)->sin_addr;
766 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
767 		return;
768 
769 	if (PRC_IS_REDIRECT(cmd)) {
770 		/* signal EHOSTDOWN, as it flushes the cached route */
771 		in_pcbnotifyall(&V_udbinfo, faddr, EHOSTDOWN, udp_notify);
772 		return;
773 	}
774 
775 	/*
776 	 * Hostdead is ugly because it goes linearly through all PCBs.
777 	 *
778 	 * XXX: We never get this from ICMP, otherwise it makes an excellent
779 	 * DoS attack on machines with many connections.
780 	 */
781 	if (cmd == PRC_HOSTDEAD)
782 		ip = NULL;
783 	else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
784 		return;
785 	if (ip != NULL) {
786 		uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
787 		inp = in_pcblookup(pcbinfo, faddr, uh->uh_dport,
788 		    ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL);
789 		if (inp != NULL) {
790 			INP_RLOCK_ASSERT(inp);
791 			if (inp->inp_socket != NULL) {
792 				udp_notify(inp, inetctlerrmap[cmd]);
793 			}
794 			INP_RUNLOCK(inp);
795 		}
796 	} else
797 		in_pcbnotifyall(pcbinfo, faddr, inetctlerrmap[cmd],
798 		    udp_notify);
799 }
800 void
801 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
802 {
803 
804 	return (udp_common_ctlinput(cmd, sa, vip, &V_udbinfo));
805 }
806 
807 void
808 udplite_ctlinput(int cmd, struct sockaddr *sa, void *vip)
809 {
810 
811 	return (udp_common_ctlinput(cmd, sa, vip, &V_ulitecbinfo));
812 }
813 #endif /* INET */
814 
815 static int
816 udp_pcblist(SYSCTL_HANDLER_ARGS)
817 {
818 	int error, i, n;
819 	struct inpcb *inp, **inp_list;
820 	inp_gen_t gencnt;
821 	struct xinpgen xig;
822 
823 	/*
824 	 * The process of preparing the PCB list is too time-consuming and
825 	 * resource-intensive to repeat twice on every request.
826 	 */
827 	if (req->oldptr == 0) {
828 		n = V_udbinfo.ipi_count;
829 		n += imax(n / 8, 10);
830 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
831 		return (0);
832 	}
833 
834 	if (req->newptr != 0)
835 		return (EPERM);
836 
837 	/*
838 	 * OK, now we're committed to doing something.
839 	 */
840 	INP_INFO_RLOCK(&V_udbinfo);
841 	gencnt = V_udbinfo.ipi_gencnt;
842 	n = V_udbinfo.ipi_count;
843 	INP_INFO_RUNLOCK(&V_udbinfo);
844 
845 	error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
846 		+ n * sizeof(struct xinpcb));
847 	if (error != 0)
848 		return (error);
849 
850 	xig.xig_len = sizeof xig;
851 	xig.xig_count = n;
852 	xig.xig_gen = gencnt;
853 	xig.xig_sogen = so_gencnt;
854 	error = SYSCTL_OUT(req, &xig, sizeof xig);
855 	if (error)
856 		return (error);
857 
858 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
859 	if (inp_list == 0)
860 		return (ENOMEM);
861 
862 	INP_INFO_RLOCK(&V_udbinfo);
863 	for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
864 	     inp = LIST_NEXT(inp, inp_list)) {
865 		INP_WLOCK(inp);
866 		if (inp->inp_gencnt <= gencnt &&
867 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
868 			in_pcbref(inp);
869 			inp_list[i++] = inp;
870 		}
871 		INP_WUNLOCK(inp);
872 	}
873 	INP_INFO_RUNLOCK(&V_udbinfo);
874 	n = i;
875 
876 	error = 0;
877 	for (i = 0; i < n; i++) {
878 		inp = inp_list[i];
879 		INP_RLOCK(inp);
880 		if (inp->inp_gencnt <= gencnt) {
881 			struct xinpcb xi;
882 
883 			bzero(&xi, sizeof(xi));
884 			xi.xi_len = sizeof xi;
885 			/* XXX should avoid extra copy */
886 			bcopy(inp, &xi.xi_inp, sizeof *inp);
887 			if (inp->inp_socket)
888 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
889 			xi.xi_inp.inp_gencnt = inp->inp_gencnt;
890 			INP_RUNLOCK(inp);
891 			error = SYSCTL_OUT(req, &xi, sizeof xi);
892 		} else
893 			INP_RUNLOCK(inp);
894 	}
895 	INP_INFO_WLOCK(&V_udbinfo);
896 	for (i = 0; i < n; i++) {
897 		inp = inp_list[i];
898 		INP_RLOCK(inp);
899 		if (!in_pcbrele_rlocked(inp))
900 			INP_RUNLOCK(inp);
901 	}
902 	INP_INFO_WUNLOCK(&V_udbinfo);
903 
904 	if (!error) {
905 		/*
906 		 * Give the user an updated idea of our state.  If the
907 		 * generation differs from what we told her before, she knows
908 		 * that something happened while we were processing this
909 		 * request, and it might be necessary to retry.
910 		 */
911 		INP_INFO_RLOCK(&V_udbinfo);
912 		xig.xig_gen = V_udbinfo.ipi_gencnt;
913 		xig.xig_sogen = so_gencnt;
914 		xig.xig_count = V_udbinfo.ipi_count;
915 		INP_INFO_RUNLOCK(&V_udbinfo);
916 		error = SYSCTL_OUT(req, &xig, sizeof xig);
917 	}
918 	free(inp_list, M_TEMP);
919 	return (error);
920 }
921 
922 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
923     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
924     udp_pcblist, "S,xinpcb", "List of active UDP sockets");
925 
926 #ifdef INET
927 static int
928 udp_getcred(SYSCTL_HANDLER_ARGS)
929 {
930 	struct xucred xuc;
931 	struct sockaddr_in addrs[2];
932 	struct inpcb *inp;
933 	int error;
934 
935 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
936 	if (error)
937 		return (error);
938 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
939 	if (error)
940 		return (error);
941 	inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
942 	    addrs[0].sin_addr, addrs[0].sin_port,
943 	    INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL);
944 	if (inp != NULL) {
945 		INP_RLOCK_ASSERT(inp);
946 		if (inp->inp_socket == NULL)
947 			error = ENOENT;
948 		if (error == 0)
949 			error = cr_canseeinpcb(req->td->td_ucred, inp);
950 		if (error == 0)
951 			cru2x(inp->inp_cred, &xuc);
952 		INP_RUNLOCK(inp);
953 	} else
954 		error = ENOENT;
955 	if (error == 0)
956 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
957 	return (error);
958 }
959 
960 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
961     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
962     udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
963 #endif /* INET */
964 
965 int
966 udp_ctloutput(struct socket *so, struct sockopt *sopt)
967 {
968 	struct inpcb *inp;
969 	struct udpcb *up;
970 	int isudplite, error, optval;
971 
972 	error = 0;
973 	isudplite = (so->so_proto->pr_protocol == IPPROTO_UDPLITE) ? 1 : 0;
974 	inp = sotoinpcb(so);
975 	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
976 	INP_WLOCK(inp);
977 	if (sopt->sopt_level != so->so_proto->pr_protocol) {
978 #ifdef INET6
979 		if (INP_CHECK_SOCKAF(so, AF_INET6)) {
980 			INP_WUNLOCK(inp);
981 			error = ip6_ctloutput(so, sopt);
982 		}
983 #endif
984 #if defined(INET) && defined(INET6)
985 		else
986 #endif
987 #ifdef INET
988 		{
989 			INP_WUNLOCK(inp);
990 			error = ip_ctloutput(so, sopt);
991 		}
992 #endif
993 		return (error);
994 	}
995 
996 	switch (sopt->sopt_dir) {
997 	case SOPT_SET:
998 		switch (sopt->sopt_name) {
999 		case UDP_ENCAP:
1000 			INP_WUNLOCK(inp);
1001 			error = sooptcopyin(sopt, &optval, sizeof optval,
1002 					    sizeof optval);
1003 			if (error)
1004 				break;
1005 			inp = sotoinpcb(so);
1006 			KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
1007 			INP_WLOCK(inp);
1008 #ifdef IPSEC_NAT_T
1009 			up = intoudpcb(inp);
1010 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1011 #endif
1012 			switch (optval) {
1013 			case 0:
1014 				/* Clear all UDP encap. */
1015 #ifdef IPSEC_NAT_T
1016 				up->u_flags &= ~UF_ESPINUDP_ALL;
1017 #endif
1018 				break;
1019 #ifdef IPSEC_NAT_T
1020 			case UDP_ENCAP_ESPINUDP:
1021 			case UDP_ENCAP_ESPINUDP_NON_IKE:
1022 				up->u_flags &= ~UF_ESPINUDP_ALL;
1023 				if (optval == UDP_ENCAP_ESPINUDP)
1024 					up->u_flags |= UF_ESPINUDP;
1025 				else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
1026 					up->u_flags |= UF_ESPINUDP_NON_IKE;
1027 				break;
1028 #endif
1029 			default:
1030 				error = EINVAL;
1031 				break;
1032 			}
1033 			INP_WUNLOCK(inp);
1034 			break;
1035 		case UDPLITE_SEND_CSCOV:
1036 		case UDPLITE_RECV_CSCOV:
1037 			if (!isudplite) {
1038 				INP_WUNLOCK(inp);
1039 				error = ENOPROTOOPT;
1040 				break;
1041 			}
1042 			INP_WUNLOCK(inp);
1043 			error = sooptcopyin(sopt, &optval, sizeof(optval),
1044 			    sizeof(optval));
1045 			if (error != 0)
1046 				break;
1047 			inp = sotoinpcb(so);
1048 			KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
1049 			INP_WLOCK(inp);
1050 			up = intoudpcb(inp);
1051 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1052 			if ((optval != 0 && optval < 8) || (optval > 65535)) {
1053 				INP_WUNLOCK(inp);
1054 				error = EINVAL;
1055 				break;
1056 			}
1057 			if (sopt->sopt_name == UDPLITE_SEND_CSCOV)
1058 				up->u_txcslen = optval;
1059 			else
1060 				up->u_rxcslen = optval;
1061 			INP_WUNLOCK(inp);
1062 			break;
1063 		default:
1064 			INP_WUNLOCK(inp);
1065 			error = ENOPROTOOPT;
1066 			break;
1067 		}
1068 		break;
1069 	case SOPT_GET:
1070 		switch (sopt->sopt_name) {
1071 #ifdef IPSEC_NAT_T
1072 		case UDP_ENCAP:
1073 			up = intoudpcb(inp);
1074 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1075 			optval = up->u_flags & UF_ESPINUDP_ALL;
1076 			INP_WUNLOCK(inp);
1077 			error = sooptcopyout(sopt, &optval, sizeof optval);
1078 			break;
1079 #endif
1080 		case UDPLITE_SEND_CSCOV:
1081 		case UDPLITE_RECV_CSCOV:
1082 			if (!isudplite) {
1083 				INP_WUNLOCK(inp);
1084 				error = ENOPROTOOPT;
1085 				break;
1086 			}
1087 			up = intoudpcb(inp);
1088 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1089 			if (sopt->sopt_name == UDPLITE_SEND_CSCOV)
1090 				optval = up->u_txcslen;
1091 			else
1092 				optval = up->u_rxcslen;
1093 			INP_WUNLOCK(inp);
1094 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1095 			break;
1096 		default:
1097 			INP_WUNLOCK(inp);
1098 			error = ENOPROTOOPT;
1099 			break;
1100 		}
1101 		break;
1102 	}
1103 	return (error);
1104 }
1105 
1106 #ifdef INET
1107 #define	UH_WLOCKED	2
1108 #define	UH_RLOCKED	1
1109 #define	UH_UNLOCKED	0
1110 static int
1111 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
1112     struct mbuf *control, struct thread *td)
1113 {
1114 	struct udpiphdr *ui;
1115 	int len = m->m_pkthdr.len;
1116 	struct in_addr faddr, laddr;
1117 	struct cmsghdr *cm;
1118 	struct inpcbinfo *pcbinfo;
1119 	struct sockaddr_in *sin, src;
1120 	int cscov_partial = 0;
1121 	int error = 0;
1122 	int ipflags;
1123 	u_short fport, lport;
1124 	int unlock_udbinfo, unlock_inp;
1125 	u_char tos;
1126 	uint8_t pr;
1127 	uint16_t cscov = 0;
1128 	uint32_t flowid = 0;
1129 	uint8_t flowtype = M_HASHTYPE_NONE;
1130 
1131 	/*
1132 	 * udp_output() may need to temporarily bind or connect the current
1133 	 * inpcb.  As such, we don't know up front whether we will need the
1134 	 * pcbinfo lock or not.  Do any work to decide what is needed up
1135 	 * front before acquiring any locks.
1136 	 */
1137 	if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1138 		if (control)
1139 			m_freem(control);
1140 		m_freem(m);
1141 		return (EMSGSIZE);
1142 	}
1143 
1144 	src.sin_family = 0;
1145 	sin = (struct sockaddr_in *)addr;
1146 	if (sin == NULL ||
1147 	    (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1148 		INP_WLOCK(inp);
1149 		unlock_inp = UH_WLOCKED;
1150 	} else {
1151 		INP_RLOCK(inp);
1152 		unlock_inp = UH_RLOCKED;
1153 	}
1154 	tos = inp->inp_ip_tos;
1155 	if (control != NULL) {
1156 		/*
1157 		 * XXX: Currently, we assume all the optional information is
1158 		 * stored in a single mbuf.
1159 		 */
1160 		if (control->m_next) {
1161 			if (unlock_inp == UH_WLOCKED)
1162 				INP_WUNLOCK(inp);
1163 			else
1164 				INP_RUNLOCK(inp);
1165 			m_freem(control);
1166 			m_freem(m);
1167 			return (EINVAL);
1168 		}
1169 		for (; control->m_len > 0;
1170 		    control->m_data += CMSG_ALIGN(cm->cmsg_len),
1171 		    control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
1172 			cm = mtod(control, struct cmsghdr *);
1173 			if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
1174 			    || cm->cmsg_len > control->m_len) {
1175 				error = EINVAL;
1176 				break;
1177 			}
1178 			if (cm->cmsg_level != IPPROTO_IP)
1179 				continue;
1180 
1181 			switch (cm->cmsg_type) {
1182 			case IP_SENDSRCADDR:
1183 				if (cm->cmsg_len !=
1184 				    CMSG_LEN(sizeof(struct in_addr))) {
1185 					error = EINVAL;
1186 					break;
1187 				}
1188 				bzero(&src, sizeof(src));
1189 				src.sin_family = AF_INET;
1190 				src.sin_len = sizeof(src);
1191 				src.sin_port = inp->inp_lport;
1192 				src.sin_addr =
1193 				    *(struct in_addr *)CMSG_DATA(cm);
1194 				break;
1195 
1196 			case IP_TOS:
1197 				if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1198 					error = EINVAL;
1199 					break;
1200 				}
1201 				tos = *(u_char *)CMSG_DATA(cm);
1202 				break;
1203 
1204 			case IP_FLOWID:
1205 				if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) {
1206 					error = EINVAL;
1207 					break;
1208 				}
1209 				flowid = *(uint32_t *) CMSG_DATA(cm);
1210 				break;
1211 
1212 			case IP_FLOWTYPE:
1213 				if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) {
1214 					error = EINVAL;
1215 					break;
1216 				}
1217 				flowtype = *(uint32_t *) CMSG_DATA(cm);
1218 				break;
1219 
1220 #ifdef	RSS
1221 			case IP_RSSBUCKETID:
1222 				if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) {
1223 					error = EINVAL;
1224 					break;
1225 				}
1226 				/* This is just a placeholder for now */
1227 				break;
1228 #endif	/* RSS */
1229 			default:
1230 				error = ENOPROTOOPT;
1231 				break;
1232 			}
1233 			if (error)
1234 				break;
1235 		}
1236 		m_freem(control);
1237 	}
1238 	if (error) {
1239 		if (unlock_inp == UH_WLOCKED)
1240 			INP_WUNLOCK(inp);
1241 		else
1242 			INP_RUNLOCK(inp);
1243 		m_freem(m);
1244 		return (error);
1245 	}
1246 
1247 	/*
1248 	 * Depending on whether or not the application has bound or connected
1249 	 * the socket, we may have to do varying levels of work.  The optimal
1250 	 * case is for a connected UDP socket, as a global lock isn't
1251 	 * required at all.
1252 	 *
1253 	 * In order to decide which we need, we require stability of the
1254 	 * inpcb binding, which we ensure by acquiring a read lock on the
1255 	 * inpcb.  This doesn't strictly follow the lock order, so we play
1256 	 * the trylock and retry game; note that we may end up with more
1257 	 * conservative locks than required the second time around, so later
1258 	 * assertions have to accept that.  Further analysis of the number of
1259 	 * misses under contention is required.
1260 	 *
1261 	 * XXXRW: Check that hash locking update here is correct.
1262 	 */
1263 	pr = inp->inp_socket->so_proto->pr_protocol;
1264 	pcbinfo = udp_get_inpcbinfo(pr);
1265 	sin = (struct sockaddr_in *)addr;
1266 	if (sin != NULL &&
1267 	    (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1268 		INP_HASH_WLOCK(pcbinfo);
1269 		unlock_udbinfo = UH_WLOCKED;
1270 	} else if ((sin != NULL && (
1271 	    (sin->sin_addr.s_addr == INADDR_ANY) ||
1272 	    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1273 	    (inp->inp_laddr.s_addr == INADDR_ANY) ||
1274 	    (inp->inp_lport == 0))) ||
1275 	    (src.sin_family == AF_INET)) {
1276 		INP_HASH_RLOCK(pcbinfo);
1277 		unlock_udbinfo = UH_RLOCKED;
1278 	} else
1279 		unlock_udbinfo = UH_UNLOCKED;
1280 
1281 	/*
1282 	 * If the IP_SENDSRCADDR control message was specified, override the
1283 	 * source address for this datagram.  Its use is invalidated if the
1284 	 * address thus specified is incomplete or clobbers other inpcbs.
1285 	 */
1286 	laddr = inp->inp_laddr;
1287 	lport = inp->inp_lport;
1288 	if (src.sin_family == AF_INET) {
1289 		INP_HASH_LOCK_ASSERT(pcbinfo);
1290 		if ((lport == 0) ||
1291 		    (laddr.s_addr == INADDR_ANY &&
1292 		     src.sin_addr.s_addr == INADDR_ANY)) {
1293 			error = EINVAL;
1294 			goto release;
1295 		}
1296 		error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1297 		    &laddr.s_addr, &lport, td->td_ucred);
1298 		if (error)
1299 			goto release;
1300 	}
1301 
1302 	/*
1303 	 * If a UDP socket has been connected, then a local address/port will
1304 	 * have been selected and bound.
1305 	 *
1306 	 * If a UDP socket has not been connected to, then an explicit
1307 	 * destination address must be used, in which case a local
1308 	 * address/port may not have been selected and bound.
1309 	 */
1310 	if (sin != NULL) {
1311 		INP_LOCK_ASSERT(inp);
1312 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
1313 			error = EISCONN;
1314 			goto release;
1315 		}
1316 
1317 		/*
1318 		 * Jail may rewrite the destination address, so let it do
1319 		 * that before we use it.
1320 		 */
1321 		error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1322 		if (error)
1323 			goto release;
1324 
1325 		/*
1326 		 * If a local address or port hasn't yet been selected, or if
1327 		 * the destination address needs to be rewritten due to using
1328 		 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1329 		 * to do the heavy lifting.  Once a port is selected, we
1330 		 * commit the binding back to the socket; we also commit the
1331 		 * binding of the address if in jail.
1332 		 *
1333 		 * If we already have a valid binding and we're not
1334 		 * requesting a destination address rewrite, use a fast path.
1335 		 */
1336 		if (inp->inp_laddr.s_addr == INADDR_ANY ||
1337 		    inp->inp_lport == 0 ||
1338 		    sin->sin_addr.s_addr == INADDR_ANY ||
1339 		    sin->sin_addr.s_addr == INADDR_BROADCAST) {
1340 			INP_HASH_LOCK_ASSERT(pcbinfo);
1341 			error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1342 			    &lport, &faddr.s_addr, &fport, NULL,
1343 			    td->td_ucred);
1344 			if (error)
1345 				goto release;
1346 
1347 			/*
1348 			 * XXXRW: Why not commit the port if the address is
1349 			 * !INADDR_ANY?
1350 			 */
1351 			/* Commit the local port if newly assigned. */
1352 			if (inp->inp_laddr.s_addr == INADDR_ANY &&
1353 			    inp->inp_lport == 0) {
1354 				INP_WLOCK_ASSERT(inp);
1355 				INP_HASH_WLOCK_ASSERT(pcbinfo);
1356 				/*
1357 				 * Remember addr if jailed, to prevent
1358 				 * rebinding.
1359 				 */
1360 				if (prison_flag(td->td_ucred, PR_IP4))
1361 					inp->inp_laddr = laddr;
1362 				inp->inp_lport = lport;
1363 				if (in_pcbinshash(inp) != 0) {
1364 					inp->inp_lport = 0;
1365 					error = EAGAIN;
1366 					goto release;
1367 				}
1368 				inp->inp_flags |= INP_ANONPORT;
1369 			}
1370 		} else {
1371 			faddr = sin->sin_addr;
1372 			fport = sin->sin_port;
1373 		}
1374 	} else {
1375 		INP_LOCK_ASSERT(inp);
1376 		faddr = inp->inp_faddr;
1377 		fport = inp->inp_fport;
1378 		if (faddr.s_addr == INADDR_ANY) {
1379 			error = ENOTCONN;
1380 			goto release;
1381 		}
1382 	}
1383 
1384 	/*
1385 	 * Calculate data length and get a mbuf for UDP, IP, and possible
1386 	 * link-layer headers.  Immediate slide the data pointer back forward
1387 	 * since we won't use that space at this layer.
1388 	 */
1389 	M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT);
1390 	if (m == NULL) {
1391 		error = ENOBUFS;
1392 		goto release;
1393 	}
1394 	m->m_data += max_linkhdr;
1395 	m->m_len -= max_linkhdr;
1396 	m->m_pkthdr.len -= max_linkhdr;
1397 
1398 	/*
1399 	 * Fill in mbuf with extended UDP header and addresses and length put
1400 	 * into network format.
1401 	 */
1402 	ui = mtod(m, struct udpiphdr *);
1403 	bzero(ui->ui_x1, sizeof(ui->ui_x1));	/* XXX still needed? */
1404 	ui->ui_pr = pr;
1405 	ui->ui_src = laddr;
1406 	ui->ui_dst = faddr;
1407 	ui->ui_sport = lport;
1408 	ui->ui_dport = fport;
1409 	ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1410 	if (pr == IPPROTO_UDPLITE) {
1411 		struct udpcb *up;
1412 		uint16_t plen;
1413 
1414 		up = intoudpcb(inp);
1415 		cscov = up->u_txcslen;
1416 		plen = (u_short)len + sizeof(struct udphdr);
1417 		if (cscov >= plen)
1418 			cscov = 0;
1419 		ui->ui_len = htons(plen);
1420 		ui->ui_ulen = htons(cscov);
1421 		/*
1422 		 * For UDP-Lite, checksum coverage length of zero means
1423 		 * the entire UDPLite packet is covered by the checksum.
1424 		 */
1425 		cscov_partial = (cscov == 0) ? 0 : 1;
1426 	} else
1427 		ui->ui_v = IPVERSION << 4;
1428 
1429 	/*
1430 	 * Set the Don't Fragment bit in the IP header.
1431 	 */
1432 	if (inp->inp_flags & INP_DONTFRAG) {
1433 		struct ip *ip;
1434 
1435 		ip = (struct ip *)&ui->ui_i;
1436 		ip->ip_off |= htons(IP_DF);
1437 	}
1438 
1439 	ipflags = 0;
1440 	if (inp->inp_socket->so_options & SO_DONTROUTE)
1441 		ipflags |= IP_ROUTETOIF;
1442 	if (inp->inp_socket->so_options & SO_BROADCAST)
1443 		ipflags |= IP_ALLOWBROADCAST;
1444 	if (inp->inp_flags & INP_ONESBCAST)
1445 		ipflags |= IP_SENDONES;
1446 
1447 #ifdef MAC
1448 	mac_inpcb_create_mbuf(inp, m);
1449 #endif
1450 
1451 	/*
1452 	 * Set up checksum and output datagram.
1453 	 */
1454 	ui->ui_sum = 0;
1455 	if (pr == IPPROTO_UDPLITE) {
1456 		if (inp->inp_flags & INP_ONESBCAST)
1457 			faddr.s_addr = INADDR_BROADCAST;
1458 		if (cscov_partial) {
1459 			if ((ui->ui_sum = in_cksum(m, sizeof(struct ip) + cscov)) == 0)
1460 				ui->ui_sum = 0xffff;
1461 		} else {
1462 			if ((ui->ui_sum = in_cksum(m, sizeof(struct udpiphdr) + len)) == 0)
1463 				ui->ui_sum = 0xffff;
1464 		}
1465 	} else if (V_udp_cksum) {
1466 		if (inp->inp_flags & INP_ONESBCAST)
1467 			faddr.s_addr = INADDR_BROADCAST;
1468 		ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1469 		    htons((u_short)len + sizeof(struct udphdr) + pr));
1470 		m->m_pkthdr.csum_flags = CSUM_UDP;
1471 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1472 	}
1473 	((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len);
1474 	((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl;	/* XXX */
1475 	((struct ip *)ui)->ip_tos = tos;		/* XXX */
1476 	UDPSTAT_INC(udps_opackets);
1477 
1478 	/*
1479 	 * Setup flowid / RSS information for outbound socket.
1480 	 *
1481 	 * Once the UDP code decides to set a flowid some other way,
1482 	 * this allows the flowid to be overridden by userland.
1483 	 */
1484 	if (flowtype != M_HASHTYPE_NONE) {
1485 		m->m_pkthdr.flowid = flowid;
1486 		M_HASHTYPE_SET(m, flowtype);
1487 #ifdef	RSS
1488 	} else {
1489 		uint32_t hash_val, hash_type;
1490 		/*
1491 		 * Calculate an appropriate RSS hash for UDP and
1492 		 * UDP Lite.
1493 		 *
1494 		 * The called function will take care of figuring out
1495 		 * whether a 2-tuple or 4-tuple hash is required based
1496 		 * on the currently configured scheme.
1497 		 *
1498 		 * Later later on connected socket values should be
1499 		 * cached in the inpcb and reused, rather than constantly
1500 		 * re-calculating it.
1501 		 *
1502 		 * UDP Lite is a different protocol number and will
1503 		 * likely end up being hashed as a 2-tuple until
1504 		 * RSS / NICs grow UDP Lite protocol awareness.
1505 		 */
1506 		if (rss_proto_software_hash_v4(faddr, laddr, fport, lport,
1507 		    pr, &hash_val, &hash_type) == 0) {
1508 			m->m_pkthdr.flowid = hash_val;
1509 			M_HASHTYPE_SET(m, hash_type);
1510 		}
1511 #endif
1512 	}
1513 
1514 #ifdef	RSS
1515 	/*
1516 	 * Don't override with the inp cached flowid value.
1517 	 *
1518 	 * Depending upon the kind of send being done, the inp
1519 	 * flowid/flowtype values may actually not be appropriate
1520 	 * for this particular socket send.
1521 	 *
1522 	 * We should either leave the flowid at zero (which is what is
1523 	 * currently done) or set it to some software generated
1524 	 * hash value based on the packet contents.
1525 	 */
1526 	ipflags |= IP_NODEFAULTFLOWID;
1527 #endif	/* RSS */
1528 
1529 	if (unlock_udbinfo == UH_WLOCKED)
1530 		INP_HASH_WUNLOCK(pcbinfo);
1531 	else if (unlock_udbinfo == UH_RLOCKED)
1532 		INP_HASH_RUNLOCK(pcbinfo);
1533 	UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u);
1534 	error = ip_output(m, inp->inp_options,
1535 	    (unlock_inp == UH_WLOCKED ? &inp->inp_route : NULL), ipflags,
1536 	    inp->inp_moptions, inp);
1537 	if (unlock_inp == UH_WLOCKED)
1538 		INP_WUNLOCK(inp);
1539 	else
1540 		INP_RUNLOCK(inp);
1541 	return (error);
1542 
1543 release:
1544 	if (unlock_udbinfo == UH_WLOCKED) {
1545 		INP_HASH_WUNLOCK(pcbinfo);
1546 		INP_WUNLOCK(inp);
1547 	} else if (unlock_udbinfo == UH_RLOCKED) {
1548 		INP_HASH_RUNLOCK(pcbinfo);
1549 		INP_RUNLOCK(inp);
1550 	} else
1551 		INP_RUNLOCK(inp);
1552 	m_freem(m);
1553 	return (error);
1554 }
1555 
1556 
1557 #if defined(IPSEC) && defined(IPSEC_NAT_T)
1558 /*
1559  * Potentially decap ESP in UDP frame.  Check for an ESP header
1560  * and optional marker; if present, strip the UDP header and
1561  * push the result through IPSec.
1562  *
1563  * Returns mbuf to be processed (potentially re-allocated) or
1564  * NULL if consumed and/or processed.
1565  */
1566 static struct mbuf *
1567 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1568 {
1569 	size_t minlen, payload, skip, iphlen;
1570 	caddr_t data;
1571 	struct udpcb *up;
1572 	struct m_tag *tag;
1573 	struct udphdr *udphdr;
1574 	struct ip *ip;
1575 
1576 	INP_RLOCK_ASSERT(inp);
1577 
1578 	/*
1579 	 * Pull up data so the longest case is contiguous:
1580 	 *    IP/UDP hdr + non ESP marker + ESP hdr.
1581 	 */
1582 	minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1583 	if (minlen > m->m_pkthdr.len)
1584 		minlen = m->m_pkthdr.len;
1585 	if ((m = m_pullup(m, minlen)) == NULL) {
1586 		IPSECSTAT_INC(ips_in_inval);
1587 		return (NULL);		/* Bypass caller processing. */
1588 	}
1589 	data = mtod(m, caddr_t);	/* Points to ip header. */
1590 	payload = m->m_len - off;	/* Size of payload. */
1591 
1592 	if (payload == 1 && data[off] == '\xff')
1593 		return (m);		/* NB: keepalive packet, no decap. */
1594 
1595 	up = intoudpcb(inp);
1596 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1597 	KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1598 	    ("u_flags 0x%x", up->u_flags));
1599 
1600 	/*
1601 	 * Check that the payload is large enough to hold an
1602 	 * ESP header and compute the amount of data to remove.
1603 	 *
1604 	 * NB: the caller has already done a pullup for us.
1605 	 * XXX can we assume alignment and eliminate bcopys?
1606 	 */
1607 	if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1608 		/*
1609 		 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1610 		 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1611 		 * possible AH mode non-IKE marker+non-ESP marker
1612 		 * from draft-ietf-ipsec-udp-encaps-00.txt.
1613 		 */
1614 		uint64_t marker;
1615 
1616 		if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1617 			return (m);	/* NB: no decap. */
1618 		bcopy(data + off, &marker, sizeof(uint64_t));
1619 		if (marker != 0)	/* Non-IKE marker. */
1620 			return (m);	/* NB: no decap. */
1621 		skip = sizeof(uint64_t) + sizeof(struct udphdr);
1622 	} else {
1623 		uint32_t spi;
1624 
1625 		if (payload <= sizeof(struct esp)) {
1626 			IPSECSTAT_INC(ips_in_inval);
1627 			m_freem(m);
1628 			return (NULL);	/* Discard. */
1629 		}
1630 		bcopy(data + off, &spi, sizeof(uint32_t));
1631 		if (spi == 0)		/* Non-ESP marker. */
1632 			return (m);	/* NB: no decap. */
1633 		skip = sizeof(struct udphdr);
1634 	}
1635 
1636 	/*
1637 	 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1638 	 * the UDP ports. This is required if we want to select
1639 	 * the right SPD for multiple hosts behind same NAT.
1640 	 *
1641 	 * NB: ports are maintained in network byte order everywhere
1642 	 *     in the NAT-T code.
1643 	 */
1644 	tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1645 		2 * sizeof(uint16_t), M_NOWAIT);
1646 	if (tag == NULL) {
1647 		IPSECSTAT_INC(ips_in_nomem);
1648 		m_freem(m);
1649 		return (NULL);		/* Discard. */
1650 	}
1651 	iphlen = off - sizeof(struct udphdr);
1652 	udphdr = (struct udphdr *)(data + iphlen);
1653 	((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1654 	((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1655 	m_tag_prepend(m, tag);
1656 
1657 	/*
1658 	 * Remove the UDP header (and possibly the non ESP marker)
1659 	 * IP header length is iphlen
1660 	 * Before:
1661 	 *   <--- off --->
1662 	 *   +----+------+-----+
1663 	 *   | IP |  UDP | ESP |
1664 	 *   +----+------+-----+
1665 	 *        <-skip->
1666 	 * After:
1667 	 *          +----+-----+
1668 	 *          | IP | ESP |
1669 	 *          +----+-----+
1670 	 *   <-skip->
1671 	 */
1672 	ovbcopy(data, data + skip, iphlen);
1673 	m_adj(m, skip);
1674 
1675 	ip = mtod(m, struct ip *);
1676 	ip->ip_len = htons(ntohs(ip->ip_len) - skip);
1677 	ip->ip_p = IPPROTO_ESP;
1678 
1679 	/*
1680 	 * We cannot yet update the cksums so clear any
1681 	 * h/w cksum flags as they are no longer valid.
1682 	 */
1683 	if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1684 		m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1685 
1686 	(void) ipsec_common_input(m, iphlen, offsetof(struct ip, ip_p),
1687 				AF_INET, ip->ip_p);
1688 	return (NULL);			/* NB: consumed, bypass processing. */
1689 }
1690 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1691 
1692 static void
1693 udp_abort(struct socket *so)
1694 {
1695 	struct inpcb *inp;
1696 	struct inpcbinfo *pcbinfo;
1697 
1698 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1699 	inp = sotoinpcb(so);
1700 	KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1701 	INP_WLOCK(inp);
1702 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1703 		INP_HASH_WLOCK(pcbinfo);
1704 		in_pcbdisconnect(inp);
1705 		inp->inp_laddr.s_addr = INADDR_ANY;
1706 		INP_HASH_WUNLOCK(pcbinfo);
1707 		soisdisconnected(so);
1708 	}
1709 	INP_WUNLOCK(inp);
1710 }
1711 
1712 static int
1713 udp_attach(struct socket *so, int proto, struct thread *td)
1714 {
1715 	struct inpcb *inp;
1716 	struct inpcbinfo *pcbinfo;
1717 	int error;
1718 
1719 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1720 	inp = sotoinpcb(so);
1721 	KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1722 	error = soreserve(so, udp_sendspace, udp_recvspace);
1723 	if (error)
1724 		return (error);
1725 	INP_INFO_WLOCK(pcbinfo);
1726 	error = in_pcballoc(so, pcbinfo);
1727 	if (error) {
1728 		INP_INFO_WUNLOCK(pcbinfo);
1729 		return (error);
1730 	}
1731 
1732 	inp = sotoinpcb(so);
1733 	inp->inp_vflag |= INP_IPV4;
1734 	inp->inp_ip_ttl = V_ip_defttl;
1735 
1736 	error = udp_newudpcb(inp);
1737 	if (error) {
1738 		in_pcbdetach(inp);
1739 		in_pcbfree(inp);
1740 		INP_INFO_WUNLOCK(pcbinfo);
1741 		return (error);
1742 	}
1743 
1744 	INP_WUNLOCK(inp);
1745 	INP_INFO_WUNLOCK(pcbinfo);
1746 	return (0);
1747 }
1748 #endif /* INET */
1749 
1750 int
1751 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f, void *ctx)
1752 {
1753 	struct inpcb *inp;
1754 	struct udpcb *up;
1755 
1756 	KASSERT(so->so_type == SOCK_DGRAM,
1757 	    ("udp_set_kernel_tunneling: !dgram"));
1758 	inp = sotoinpcb(so);
1759 	KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1760 	INP_WLOCK(inp);
1761 	up = intoudpcb(inp);
1762 	if (up->u_tun_func != NULL) {
1763 		INP_WUNLOCK(inp);
1764 		return (EBUSY);
1765 	}
1766 	up->u_tun_func = f;
1767 	up->u_tun_ctx = ctx;
1768 	INP_WUNLOCK(inp);
1769 	return (0);
1770 }
1771 
1772 #ifdef INET
1773 static int
1774 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1775 {
1776 	struct inpcb *inp;
1777 	struct inpcbinfo *pcbinfo;
1778 	int error;
1779 
1780 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1781 	inp = sotoinpcb(so);
1782 	KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1783 	INP_WLOCK(inp);
1784 	INP_HASH_WLOCK(pcbinfo);
1785 	error = in_pcbbind(inp, nam, td->td_ucred);
1786 	INP_HASH_WUNLOCK(pcbinfo);
1787 	INP_WUNLOCK(inp);
1788 	return (error);
1789 }
1790 
1791 static void
1792 udp_close(struct socket *so)
1793 {
1794 	struct inpcb *inp;
1795 	struct inpcbinfo *pcbinfo;
1796 
1797 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1798 	inp = sotoinpcb(so);
1799 	KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1800 	INP_WLOCK(inp);
1801 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1802 		INP_HASH_WLOCK(pcbinfo);
1803 		in_pcbdisconnect(inp);
1804 		inp->inp_laddr.s_addr = INADDR_ANY;
1805 		INP_HASH_WUNLOCK(pcbinfo);
1806 		soisdisconnected(so);
1807 	}
1808 	INP_WUNLOCK(inp);
1809 }
1810 
1811 static int
1812 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1813 {
1814 	struct inpcb *inp;
1815 	struct inpcbinfo *pcbinfo;
1816 	struct sockaddr_in *sin;
1817 	int error;
1818 
1819 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1820 	inp = sotoinpcb(so);
1821 	KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1822 	INP_WLOCK(inp);
1823 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1824 		INP_WUNLOCK(inp);
1825 		return (EISCONN);
1826 	}
1827 	sin = (struct sockaddr_in *)nam;
1828 	error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1829 	if (error != 0) {
1830 		INP_WUNLOCK(inp);
1831 		return (error);
1832 	}
1833 	INP_HASH_WLOCK(pcbinfo);
1834 	error = in_pcbconnect(inp, nam, td->td_ucred);
1835 	INP_HASH_WUNLOCK(pcbinfo);
1836 	if (error == 0)
1837 		soisconnected(so);
1838 	INP_WUNLOCK(inp);
1839 	return (error);
1840 }
1841 
1842 static void
1843 udp_detach(struct socket *so)
1844 {
1845 	struct inpcb *inp;
1846 	struct inpcbinfo *pcbinfo;
1847 	struct udpcb *up;
1848 
1849 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1850 	inp = sotoinpcb(so);
1851 	KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1852 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1853 	    ("udp_detach: not disconnected"));
1854 	INP_INFO_WLOCK(pcbinfo);
1855 	INP_WLOCK(inp);
1856 	up = intoudpcb(inp);
1857 	KASSERT(up != NULL, ("%s: up == NULL", __func__));
1858 	inp->inp_ppcb = NULL;
1859 	in_pcbdetach(inp);
1860 	in_pcbfree(inp);
1861 	INP_INFO_WUNLOCK(pcbinfo);
1862 	udp_discardcb(up);
1863 }
1864 
1865 static int
1866 udp_disconnect(struct socket *so)
1867 {
1868 	struct inpcb *inp;
1869 	struct inpcbinfo *pcbinfo;
1870 
1871 	pcbinfo = udp_get_inpcbinfo(so->so_proto->pr_protocol);
1872 	inp = sotoinpcb(so);
1873 	KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1874 	INP_WLOCK(inp);
1875 	if (inp->inp_faddr.s_addr == INADDR_ANY) {
1876 		INP_WUNLOCK(inp);
1877 		return (ENOTCONN);
1878 	}
1879 	INP_HASH_WLOCK(pcbinfo);
1880 	in_pcbdisconnect(inp);
1881 	inp->inp_laddr.s_addr = INADDR_ANY;
1882 	INP_HASH_WUNLOCK(pcbinfo);
1883 	SOCK_LOCK(so);
1884 	so->so_state &= ~SS_ISCONNECTED;		/* XXX */
1885 	SOCK_UNLOCK(so);
1886 	INP_WUNLOCK(inp);
1887 	return (0);
1888 }
1889 
1890 static int
1891 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1892     struct mbuf *control, struct thread *td)
1893 {
1894 	struct inpcb *inp;
1895 
1896 	inp = sotoinpcb(so);
1897 	KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1898 	return (udp_output(inp, m, addr, control, td));
1899 }
1900 #endif /* INET */
1901 
1902 int
1903 udp_shutdown(struct socket *so)
1904 {
1905 	struct inpcb *inp;
1906 
1907 	inp = sotoinpcb(so);
1908 	KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1909 	INP_WLOCK(inp);
1910 	socantsendmore(so);
1911 	INP_WUNLOCK(inp);
1912 	return (0);
1913 }
1914 
1915 #ifdef INET
1916 struct pr_usrreqs udp_usrreqs = {
1917 	.pru_abort =		udp_abort,
1918 	.pru_attach =		udp_attach,
1919 	.pru_bind =		udp_bind,
1920 	.pru_connect =		udp_connect,
1921 	.pru_control =		in_control,
1922 	.pru_detach =		udp_detach,
1923 	.pru_disconnect =	udp_disconnect,
1924 	.pru_peeraddr =		in_getpeeraddr,
1925 	.pru_send =		udp_send,
1926 	.pru_soreceive =	soreceive_dgram,
1927 	.pru_sosend =		sosend_dgram,
1928 	.pru_shutdown =		udp_shutdown,
1929 	.pru_sockaddr =		in_getsockaddr,
1930 	.pru_sosetlabel =	in_pcbsosetlabel,
1931 	.pru_close =		udp_close,
1932 };
1933 #endif /* INET */
1934