xref: /freebsd/sys/netinet/udp_usrreq.c (revision 6186fd1857626de0f7cb1a9e4dff19082f9ebb11)
1 /*-
2  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
3  *	The Regents of the University of California.
4  * Copyright (c) 2008 Robert N. M. Watson
5  * Copyright (c) 2010-2011 Juniper Networks, Inc.
6  * Copyright (c) 2014 Kevin Lo
7  * All rights reserved.
8  *
9  * Portions of this software were developed by Robert N. M. Watson under
10  * contract to Juniper Networks, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)udp_usrreq.c	8.6 (Berkeley) 5/23/95
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_ipfw.h"
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
45 #include "opt_ipsec.h"
46 #include "opt_rss.h"
47 
48 #include <sys/param.h>
49 #include <sys/domain.h>
50 #include <sys/eventhandler.h>
51 #include <sys/jail.h>
52 #include <sys/kernel.h>
53 #include <sys/lock.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/priv.h>
57 #include <sys/proc.h>
58 #include <sys/protosw.h>
59 #include <sys/sdt.h>
60 #include <sys/signalvar.h>
61 #include <sys/socket.h>
62 #include <sys/socketvar.h>
63 #include <sys/sx.h>
64 #include <sys/sysctl.h>
65 #include <sys/syslog.h>
66 #include <sys/systm.h>
67 
68 #include <vm/uma.h>
69 
70 #include <net/if.h>
71 #include <net/if_var.h>
72 #include <net/route.h>
73 
74 #include <netinet/in.h>
75 #include <netinet/in_kdtrace.h>
76 #include <netinet/in_pcb.h>
77 #include <netinet/in_systm.h>
78 #include <netinet/in_var.h>
79 #include <netinet/ip.h>
80 #ifdef INET6
81 #include <netinet/ip6.h>
82 #endif
83 #include <netinet/ip_icmp.h>
84 #include <netinet/icmp_var.h>
85 #include <netinet/ip_var.h>
86 #include <netinet/ip_options.h>
87 #ifdef INET6
88 #include <netinet6/ip6_var.h>
89 #endif
90 #include <netinet/udp.h>
91 #include <netinet/udp_var.h>
92 #include <netinet/udplite.h>
93 #include <netinet/in_rss.h>
94 
95 #ifdef IPSEC
96 #include <netipsec/ipsec.h>
97 #include <netipsec/esp.h>
98 #endif
99 
100 #include <machine/in_cksum.h>
101 
102 #include <security/mac/mac_framework.h>
103 
104 /*
105  * UDP and UDP-Lite protocols implementation.
106  * Per RFC 768, August, 1980.
107  * Per RFC 3828, July, 2004.
108  */
109 
110 /*
111  * BSD 4.2 defaulted the udp checksum to be off.  Turning off udp checksums
112  * removes the only data integrity mechanism for packets and malformed
113  * packets that would otherwise be discarded due to bad checksums, and may
114  * cause problems (especially for NFS data blocks).
115  */
116 VNET_DEFINE(int, udp_cksum) = 1;
117 SYSCTL_VNET_INT(_net_inet_udp, UDPCTL_CHECKSUM, checksum, CTLFLAG_RW,
118     &VNET_NAME(udp_cksum), 0, "compute udp checksum");
119 
120 int	udp_log_in_vain = 0;
121 SYSCTL_INT(_net_inet_udp, OID_AUTO, log_in_vain, CTLFLAG_RW,
122     &udp_log_in_vain, 0, "Log all incoming UDP packets");
123 
124 VNET_DEFINE(int, udp_blackhole) = 0;
125 SYSCTL_VNET_INT(_net_inet_udp, OID_AUTO, blackhole, CTLFLAG_RW,
126     &VNET_NAME(udp_blackhole), 0,
127     "Do not send port unreachables for refused connects");
128 
129 u_long	udp_sendspace = 9216;		/* really max datagram size */
130 					/* 40 1K datagrams */
131 SYSCTL_ULONG(_net_inet_udp, UDPCTL_MAXDGRAM, maxdgram, CTLFLAG_RW,
132     &udp_sendspace, 0, "Maximum outgoing UDP datagram size");
133 
134 u_long	udp_recvspace = 40 * (1024 +
135 #ifdef INET6
136 				      sizeof(struct sockaddr_in6)
137 #else
138 				      sizeof(struct sockaddr_in)
139 #endif
140 				      );
141 
142 SYSCTL_ULONG(_net_inet_udp, UDPCTL_RECVSPACE, recvspace, CTLFLAG_RW,
143     &udp_recvspace, 0, "Maximum space for incoming UDP datagrams");
144 
145 VNET_DEFINE(struct inpcbhead, udb);		/* from udp_var.h */
146 VNET_DEFINE(struct inpcbinfo, udbinfo);
147 VNET_DEFINE(struct inpcbhead, ulitecb);
148 VNET_DEFINE(struct inpcbinfo, ulitecbinfo);
149 static VNET_DEFINE(uma_zone_t, udpcb_zone);
150 #define	V_udpcb_zone			VNET(udpcb_zone)
151 
152 #ifndef UDBHASHSIZE
153 #define	UDBHASHSIZE	128
154 #endif
155 
156 VNET_PCPUSTAT_DEFINE(struct udpstat, udpstat);		/* from udp_var.h */
157 VNET_PCPUSTAT_SYSINIT(udpstat);
158 SYSCTL_VNET_PCPUSTAT(_net_inet_udp, UDPCTL_STATS, stats, struct udpstat,
159     udpstat, "UDP statistics (struct udpstat, netinet/udp_var.h)");
160 
161 #ifdef VIMAGE
162 VNET_PCPUSTAT_SYSUNINIT(udpstat);
163 #endif /* VIMAGE */
164 #ifdef INET
165 static void	udp_detach(struct socket *so);
166 static int	udp_output(struct inpcb *, struct mbuf *, struct sockaddr *,
167 		    struct mbuf *, struct thread *);
168 #endif
169 
170 #ifdef IPSEC
171 #ifdef IPSEC_NAT_T
172 #define	UF_ESPINUDP_ALL	(UF_ESPINUDP_NON_IKE|UF_ESPINUDP)
173 #ifdef INET
174 static struct mbuf *udp4_espdecap(struct inpcb *, struct mbuf *, int);
175 #endif
176 #endif /* IPSEC_NAT_T */
177 #endif /* IPSEC */
178 
179 static void
180 udp_zone_change(void *tag)
181 {
182 
183 	uma_zone_set_max(V_udbinfo.ipi_zone, maxsockets);
184 	uma_zone_set_max(V_udpcb_zone, maxsockets);
185 }
186 
187 static int
188 udp_inpcb_init(void *mem, int size, int flags)
189 {
190 	struct inpcb *inp;
191 
192 	inp = mem;
193 	INP_LOCK_INIT(inp, "inp", "udpinp");
194 	return (0);
195 }
196 
197 static int
198 udplite_inpcb_init(void *mem, int size, int flags)
199 {
200 	struct inpcb *inp;
201 
202 	inp = mem;
203 	INP_LOCK_INIT(inp, "inp", "udpliteinp");
204 	return (0);
205 }
206 
207 void
208 udp_init(void)
209 {
210 
211 	/*
212 	 * For now default to 2-tuple UDP hashing - until the fragment
213 	 * reassembly code can also update the flowid.
214 	 *
215 	 * Once we can calculate the flowid that way and re-establish
216 	 * a 4-tuple, flip this to 4-tuple.
217 	 */
218 	in_pcbinfo_init(&V_udbinfo, "udp", &V_udb, UDBHASHSIZE, UDBHASHSIZE,
219 	    "udp_inpcb", udp_inpcb_init, NULL, UMA_ZONE_NOFREE,
220 	    IPI_HASHFIELDS_2TUPLE);
221 	V_udpcb_zone = uma_zcreate("udpcb", sizeof(struct udpcb),
222 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
223 	uma_zone_set_max(V_udpcb_zone, maxsockets);
224 	uma_zone_set_warning(V_udpcb_zone, "kern.ipc.maxsockets limit reached");
225 	EVENTHANDLER_REGISTER(maxsockets_change, udp_zone_change, NULL,
226 	    EVENTHANDLER_PRI_ANY);
227 }
228 
229 void
230 udplite_init(void)
231 {
232 
233 	in_pcbinfo_init(&V_ulitecbinfo, "udplite", &V_ulitecb, UDBHASHSIZE,
234 	    UDBHASHSIZE, "udplite_inpcb", udplite_inpcb_init, NULL,
235 	    UMA_ZONE_NOFREE, IPI_HASHFIELDS_2TUPLE);
236 }
237 
238 /*
239  * Kernel module interface for updating udpstat.  The argument is an index
240  * into udpstat treated as an array of u_long.  While this encodes the
241  * general layout of udpstat into the caller, it doesn't encode its location,
242  * so that future changes to add, for example, per-CPU stats support won't
243  * cause binary compatibility problems for kernel modules.
244  */
245 void
246 kmod_udpstat_inc(int statnum)
247 {
248 
249 	counter_u64_add(VNET(udpstat)[statnum], 1);
250 }
251 
252 int
253 udp_newudpcb(struct inpcb *inp)
254 {
255 	struct udpcb *up;
256 
257 	up = uma_zalloc(V_udpcb_zone, M_NOWAIT | M_ZERO);
258 	if (up == NULL)
259 		return (ENOBUFS);
260 	inp->inp_ppcb = up;
261 	return (0);
262 }
263 
264 void
265 udp_discardcb(struct udpcb *up)
266 {
267 
268 	uma_zfree(V_udpcb_zone, up);
269 }
270 
271 #ifdef VIMAGE
272 void
273 udp_destroy(void)
274 {
275 
276 	in_pcbinfo_destroy(&V_udbinfo);
277 	uma_zdestroy(V_udpcb_zone);
278 }
279 
280 void
281 udplite_destroy(void)
282 {
283 
284 	in_pcbinfo_destroy(&V_ulitecbinfo);
285 }
286 #endif
287 
288 #ifdef INET
289 /*
290  * Subroutine of udp_input(), which appends the provided mbuf chain to the
291  * passed pcb/socket.  The caller must provide a sockaddr_in via udp_in that
292  * contains the source address.  If the socket ends up being an IPv6 socket,
293  * udp_append() will convert to a sockaddr_in6 before passing the address
294  * into the socket code.
295  */
296 static void
297 udp_append(struct inpcb *inp, struct ip *ip, struct mbuf *n, int off,
298     struct sockaddr_in *udp_in)
299 {
300 	struct sockaddr *append_sa;
301 	struct socket *so;
302 	struct mbuf *opts = 0;
303 #ifdef INET6
304 	struct sockaddr_in6 udp_in6;
305 #endif
306 	struct udpcb *up;
307 
308 	INP_LOCK_ASSERT(inp);
309 
310 	/*
311 	 * Engage the tunneling protocol.
312 	 */
313 	up = intoudpcb(inp);
314 	if (up->u_tun_func != NULL) {
315 		(*up->u_tun_func)(n, off, inp);
316 		return;
317 	}
318 
319 	if (n == NULL)
320 		return;
321 
322 	off += sizeof(struct udphdr);
323 
324 #ifdef IPSEC
325 	/* Check AH/ESP integrity. */
326 	if (ipsec4_in_reject(n, inp)) {
327 		m_freem(n);
328 		IPSECSTAT_INC(ips_in_polvio);
329 		return;
330 	}
331 #ifdef IPSEC_NAT_T
332 	up = intoudpcb(inp);
333 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
334 	if (up->u_flags & UF_ESPINUDP_ALL) {	/* IPSec UDP encaps. */
335 		n = udp4_espdecap(inp, n, off);
336 		if (n == NULL)				/* Consumed. */
337 			return;
338 	}
339 #endif /* IPSEC_NAT_T */
340 #endif /* IPSEC */
341 #ifdef MAC
342 	if (mac_inpcb_check_deliver(inp, n) != 0) {
343 		m_freem(n);
344 		return;
345 	}
346 #endif /* MAC */
347 	if (inp->inp_flags & INP_CONTROLOPTS ||
348 	    inp->inp_socket->so_options & (SO_TIMESTAMP | SO_BINTIME)) {
349 #ifdef INET6
350 		if (inp->inp_vflag & INP_IPV6)
351 			(void)ip6_savecontrol_v4(inp, n, &opts, NULL);
352 		else
353 #endif /* INET6 */
354 			ip_savecontrol(inp, &opts, ip, n);
355 	}
356 #ifdef INET6
357 	if (inp->inp_vflag & INP_IPV6) {
358 		bzero(&udp_in6, sizeof(udp_in6));
359 		udp_in6.sin6_len = sizeof(udp_in6);
360 		udp_in6.sin6_family = AF_INET6;
361 		in6_sin_2_v4mapsin6(udp_in, &udp_in6);
362 		append_sa = (struct sockaddr *)&udp_in6;
363 	} else
364 #endif /* INET6 */
365 		append_sa = (struct sockaddr *)udp_in;
366 	m_adj(n, off);
367 
368 	so = inp->inp_socket;
369 	SOCKBUF_LOCK(&so->so_rcv);
370 	if (sbappendaddr_locked(&so->so_rcv, append_sa, n, opts) == 0) {
371 		SOCKBUF_UNLOCK(&so->so_rcv);
372 		m_freem(n);
373 		if (opts)
374 			m_freem(opts);
375 		UDPSTAT_INC(udps_fullsock);
376 	} else
377 		sorwakeup_locked(so);
378 }
379 
380 int
381 udp_input(struct mbuf **mp, int *offp, int proto)
382 {
383 	struct ip *ip;
384 	struct udphdr *uh;
385 	struct ifnet *ifp;
386 	struct inpcb *inp;
387 	uint16_t len, ip_len;
388 	struct inpcbinfo *pcbinfo;
389 	struct ip save_ip;
390 	struct sockaddr_in udp_in;
391 	struct mbuf *m;
392 	struct m_tag *fwd_tag;
393 	int cscov_partial, iphlen;
394 
395 	m = *mp;
396 	iphlen = *offp;
397 	ifp = m->m_pkthdr.rcvif;
398 	*mp = NULL;
399 	UDPSTAT_INC(udps_ipackets);
400 
401 	/*
402 	 * Strip IP options, if any; should skip this, make available to
403 	 * user, and use on returned packets, but we don't yet have a way to
404 	 * check the checksum with options still present.
405 	 */
406 	if (iphlen > sizeof (struct ip)) {
407 		ip_stripoptions(m);
408 		iphlen = sizeof(struct ip);
409 	}
410 
411 	/*
412 	 * Get IP and UDP header together in first mbuf.
413 	 */
414 	ip = mtod(m, struct ip *);
415 	if (m->m_len < iphlen + sizeof(struct udphdr)) {
416 		if ((m = m_pullup(m, iphlen + sizeof(struct udphdr))) == NULL) {
417 			UDPSTAT_INC(udps_hdrops);
418 			return (IPPROTO_DONE);
419 		}
420 		ip = mtod(m, struct ip *);
421 	}
422 	uh = (struct udphdr *)((caddr_t)ip + iphlen);
423 	cscov_partial = (proto == IPPROTO_UDPLITE) ? 1 : 0;
424 
425 	/*
426 	 * Destination port of 0 is illegal, based on RFC768.
427 	 */
428 	if (uh->uh_dport == 0)
429 		goto badunlocked;
430 
431 	/*
432 	 * Construct sockaddr format source address.  Stuff source address
433 	 * and datagram in user buffer.
434 	 */
435 	bzero(&udp_in, sizeof(udp_in));
436 	udp_in.sin_len = sizeof(udp_in);
437 	udp_in.sin_family = AF_INET;
438 	udp_in.sin_port = uh->uh_sport;
439 	udp_in.sin_addr = ip->ip_src;
440 
441 	/*
442 	 * Make mbuf data length reflect UDP length.  If not enough data to
443 	 * reflect UDP length, drop.
444 	 */
445 	len = ntohs((u_short)uh->uh_ulen);
446 	ip_len = ntohs(ip->ip_len) - iphlen;
447 	if (proto == IPPROTO_UDPLITE && (len == 0 || len == ip_len)) {
448 		/* Zero means checksum over the complete packet. */
449 		if (len == 0)
450 			len = ip_len;
451 		cscov_partial = 0;
452 	}
453 	if (ip_len != len) {
454 		if (len > ip_len || len < sizeof(struct udphdr)) {
455 			UDPSTAT_INC(udps_badlen);
456 			goto badunlocked;
457 		}
458 		if (proto == IPPROTO_UDP)
459 			m_adj(m, len - ip_len);
460 	}
461 
462 	/*
463 	 * Save a copy of the IP header in case we want restore it for
464 	 * sending an ICMP error message in response.
465 	 */
466 	if (!V_udp_blackhole)
467 		save_ip = *ip;
468 	else
469 		memset(&save_ip, 0, sizeof(save_ip));
470 
471 	/*
472 	 * Checksum extended UDP header and data.
473 	 */
474 	if (uh->uh_sum) {
475 		u_short uh_sum;
476 
477 		if ((m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
478 		    !cscov_partial) {
479 			if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR)
480 				uh_sum = m->m_pkthdr.csum_data;
481 			else
482 				uh_sum = in_pseudo(ip->ip_src.s_addr,
483 				    ip->ip_dst.s_addr, htonl((u_short)len +
484 				    m->m_pkthdr.csum_data + proto));
485 			uh_sum ^= 0xffff;
486 		} else {
487 			char b[9];
488 
489 			bcopy(((struct ipovly *)ip)->ih_x1, b, 9);
490 			bzero(((struct ipovly *)ip)->ih_x1, 9);
491 			((struct ipovly *)ip)->ih_len = (proto == IPPROTO_UDP) ?
492 			    uh->uh_ulen : htons(ip_len);
493 			uh_sum = in_cksum(m, len + sizeof (struct ip));
494 			bcopy(b, ((struct ipovly *)ip)->ih_x1, 9);
495 		}
496 		if (uh_sum) {
497 			UDPSTAT_INC(udps_badsum);
498 			m_freem(m);
499 			return (IPPROTO_DONE);
500 		}
501 	} else {
502 		if (proto == IPPROTO_UDP) {
503 			UDPSTAT_INC(udps_nosum);
504 		} else {
505 			/* UDPLite requires a checksum */
506 			/* XXX: What is the right UDPLite MIB counter here? */
507 			m_freem(m);
508 			return (IPPROTO_DONE);
509 		}
510 	}
511 
512 	pcbinfo = get_inpcbinfo(proto);
513 	if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) ||
514 	    in_broadcast(ip->ip_dst, ifp)) {
515 		struct inpcb *last;
516 		struct inpcbhead *pcblist;
517 		struct ip_moptions *imo;
518 
519 		INP_INFO_RLOCK(pcbinfo);
520 		pcblist = get_pcblist(proto);
521 		last = NULL;
522 		LIST_FOREACH(inp, pcblist, inp_list) {
523 			if (inp->inp_lport != uh->uh_dport)
524 				continue;
525 #ifdef INET6
526 			if ((inp->inp_vflag & INP_IPV4) == 0)
527 				continue;
528 #endif
529 			if (inp->inp_laddr.s_addr != INADDR_ANY &&
530 			    inp->inp_laddr.s_addr != ip->ip_dst.s_addr)
531 				continue;
532 			if (inp->inp_faddr.s_addr != INADDR_ANY &&
533 			    inp->inp_faddr.s_addr != ip->ip_src.s_addr)
534 				continue;
535 			if (inp->inp_fport != 0 &&
536 			    inp->inp_fport != uh->uh_sport)
537 				continue;
538 
539 			INP_RLOCK(inp);
540 
541 			/*
542 			 * XXXRW: Because we weren't holding either the inpcb
543 			 * or the hash lock when we checked for a match
544 			 * before, we should probably recheck now that the
545 			 * inpcb lock is held.
546 			 */
547 
548 			/*
549 			 * Handle socket delivery policy for any-source
550 			 * and source-specific multicast. [RFC3678]
551 			 */
552 			imo = inp->inp_moptions;
553 			if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
554 				struct sockaddr_in	 group;
555 				int			 blocked;
556 				if (imo == NULL) {
557 					INP_RUNLOCK(inp);
558 					continue;
559 				}
560 				bzero(&group, sizeof(struct sockaddr_in));
561 				group.sin_len = sizeof(struct sockaddr_in);
562 				group.sin_family = AF_INET;
563 				group.sin_addr = ip->ip_dst;
564 
565 				blocked = imo_multi_filter(imo, ifp,
566 					(struct sockaddr *)&group,
567 					(struct sockaddr *)&udp_in);
568 				if (blocked != MCAST_PASS) {
569 					if (blocked == MCAST_NOTGMEMBER)
570 						IPSTAT_INC(ips_notmember);
571 					if (blocked == MCAST_NOTSMEMBER ||
572 					    blocked == MCAST_MUTED)
573 						UDPSTAT_INC(udps_filtermcast);
574 					INP_RUNLOCK(inp);
575 					continue;
576 				}
577 			}
578 			if (last != NULL) {
579 				struct mbuf *n;
580 
581 				n = m_copy(m, 0, M_COPYALL);
582 				udp_append(last, ip, n, iphlen, &udp_in);
583 				INP_RUNLOCK(last);
584 			}
585 			last = inp;
586 			/*
587 			 * Don't look for additional matches if this one does
588 			 * not have either the SO_REUSEPORT or SO_REUSEADDR
589 			 * socket options set.  This heuristic avoids
590 			 * searching through all pcbs in the common case of a
591 			 * non-shared port.  It assumes that an application
592 			 * will never clear these options after setting them.
593 			 */
594 			if ((last->inp_socket->so_options &
595 			    (SO_REUSEPORT|SO_REUSEADDR)) == 0)
596 				break;
597 		}
598 
599 		if (last == NULL) {
600 			/*
601 			 * No matching pcb found; discard datagram.  (No need
602 			 * to send an ICMP Port Unreachable for a broadcast
603 			 * or multicast datgram.)
604 			 */
605 			UDPSTAT_INC(udps_noportbcast);
606 			if (inp)
607 				INP_RUNLOCK(inp);
608 			INP_INFO_RUNLOCK(pcbinfo);
609 			goto badunlocked;
610 		}
611 		udp_append(last, ip, m, iphlen, &udp_in);
612 		INP_RUNLOCK(last);
613 		INP_INFO_RUNLOCK(pcbinfo);
614 		return (IPPROTO_DONE);
615 	}
616 
617 	/*
618 	 * Locate pcb for datagram.
619 	 */
620 
621 	/*
622 	 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain.
623 	 */
624 	if ((m->m_flags & M_IP_NEXTHOP) &&
625 	    (fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL)) != NULL) {
626 		struct sockaddr_in *next_hop;
627 
628 		next_hop = (struct sockaddr_in *)(fwd_tag + 1);
629 
630 		/*
631 		 * Transparently forwarded. Pretend to be the destination.
632 		 * Already got one like this?
633 		 */
634 		inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport,
635 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_RLOCKPCB, ifp, m);
636 		if (!inp) {
637 			/*
638 			 * It's new.  Try to find the ambushing socket.
639 			 * Because we've rewritten the destination address,
640 			 * any hardware-generated hash is ignored.
641 			 */
642 			inp = in_pcblookup(pcbinfo, ip->ip_src,
643 			    uh->uh_sport, next_hop->sin_addr,
644 			    next_hop->sin_port ? htons(next_hop->sin_port) :
645 			    uh->uh_dport, INPLOOKUP_WILDCARD |
646 			    INPLOOKUP_RLOCKPCB, ifp);
647 		}
648 		/* Remove the tag from the packet. We don't need it anymore. */
649 		m_tag_delete(m, fwd_tag);
650 		m->m_flags &= ~M_IP_NEXTHOP;
651 	} else
652 		inp = in_pcblookup_mbuf(pcbinfo, ip->ip_src, uh->uh_sport,
653 		    ip->ip_dst, uh->uh_dport, INPLOOKUP_WILDCARD |
654 		    INPLOOKUP_RLOCKPCB, ifp, m);
655 	if (inp == NULL) {
656 		if (udp_log_in_vain) {
657 			char buf[4*sizeof "123"];
658 
659 			strcpy(buf, inet_ntoa(ip->ip_dst));
660 			log(LOG_INFO,
661 			    "Connection attempt to UDP %s:%d from %s:%d\n",
662 			    buf, ntohs(uh->uh_dport), inet_ntoa(ip->ip_src),
663 			    ntohs(uh->uh_sport));
664 		}
665 		UDPSTAT_INC(udps_noport);
666 		if (m->m_flags & (M_BCAST | M_MCAST)) {
667 			UDPSTAT_INC(udps_noportbcast);
668 			goto badunlocked;
669 		}
670 		if (V_udp_blackhole)
671 			goto badunlocked;
672 		if (badport_bandlim(BANDLIM_ICMP_UNREACH) < 0)
673 			goto badunlocked;
674 		*ip = save_ip;
675 		icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_PORT, 0, 0);
676 		return (IPPROTO_DONE);
677 	}
678 
679 	/*
680 	 * Check the minimum TTL for socket.
681 	 */
682 	INP_RLOCK_ASSERT(inp);
683 	if (inp->inp_ip_minttl && inp->inp_ip_minttl > ip->ip_ttl) {
684 		INP_RUNLOCK(inp);
685 		m_freem(m);
686 		return (IPPROTO_DONE);
687 	}
688 	if (cscov_partial) {
689 		struct udpcb *up;
690 
691 		up = intoudpcb(inp);
692 		if (up->u_rxcslen == 0 || up->u_rxcslen > len) {
693 			INP_RUNLOCK(inp);
694 			m_freem(m);
695 			return (IPPROTO_DONE);
696 		}
697 	}
698 
699 	UDP_PROBE(receive, NULL, inp, ip, inp, uh);
700 	udp_append(inp, ip, m, iphlen, &udp_in);
701 	INP_RUNLOCK(inp);
702 	return (IPPROTO_DONE);
703 
704 badunlocked:
705 	m_freem(m);
706 	return (IPPROTO_DONE);
707 }
708 #endif /* INET */
709 
710 /*
711  * Notify a udp user of an asynchronous error; just wake up so that they can
712  * collect error status.
713  */
714 struct inpcb *
715 udp_notify(struct inpcb *inp, int errno)
716 {
717 
718 	/*
719 	 * While udp_ctlinput() always calls udp_notify() with a read lock
720 	 * when invoking it directly, in_pcbnotifyall() currently uses write
721 	 * locks due to sharing code with TCP.  For now, accept either a read
722 	 * or a write lock, but a read lock is sufficient.
723 	 */
724 	INP_LOCK_ASSERT(inp);
725 
726 	inp->inp_socket->so_error = errno;
727 	sorwakeup(inp->inp_socket);
728 	sowwakeup(inp->inp_socket);
729 	return (inp);
730 }
731 
732 #ifdef INET
733 static void
734 udp_common_ctlinput(int cmd, struct sockaddr *sa, void *vip,
735     struct inpcbinfo *pcbinfo)
736 {
737 	struct ip *ip = vip;
738 	struct udphdr *uh;
739 	struct in_addr faddr;
740 	struct inpcb *inp;
741 
742 	faddr = ((struct sockaddr_in *)sa)->sin_addr;
743 	if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY)
744 		return;
745 
746 	/*
747 	 * Redirects don't need to be handled up here.
748 	 */
749 	if (PRC_IS_REDIRECT(cmd))
750 		return;
751 
752 	/*
753 	 * Hostdead is ugly because it goes linearly through all PCBs.
754 	 *
755 	 * XXX: We never get this from ICMP, otherwise it makes an excellent
756 	 * DoS attack on machines with many connections.
757 	 */
758 	if (cmd == PRC_HOSTDEAD)
759 		ip = NULL;
760 	else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0)
761 		return;
762 	if (ip != NULL) {
763 		uh = (struct udphdr *)((caddr_t)ip + (ip->ip_hl << 2));
764 		inp = in_pcblookup(pcbinfo, faddr, uh->uh_dport,
765 		    ip->ip_src, uh->uh_sport, INPLOOKUP_RLOCKPCB, NULL);
766 		if (inp != NULL) {
767 			INP_RLOCK_ASSERT(inp);
768 			if (inp->inp_socket != NULL) {
769 				udp_notify(inp, inetctlerrmap[cmd]);
770 			}
771 			INP_RUNLOCK(inp);
772 		}
773 	} else
774 		in_pcbnotifyall(pcbinfo, faddr, inetctlerrmap[cmd],
775 		    udp_notify);
776 }
777 void
778 udp_ctlinput(int cmd, struct sockaddr *sa, void *vip)
779 {
780 
781 	return (udp_common_ctlinput(cmd, sa, vip, &V_udbinfo));
782 }
783 
784 void
785 udplite_ctlinput(int cmd, struct sockaddr *sa, void *vip)
786 {
787 
788 	return (udp_common_ctlinput(cmd, sa, vip, &V_ulitecbinfo));
789 }
790 #endif /* INET */
791 
792 static int
793 udp_pcblist(SYSCTL_HANDLER_ARGS)
794 {
795 	int error, i, n;
796 	struct inpcb *inp, **inp_list;
797 	inp_gen_t gencnt;
798 	struct xinpgen xig;
799 
800 	/*
801 	 * The process of preparing the PCB list is too time-consuming and
802 	 * resource-intensive to repeat twice on every request.
803 	 */
804 	if (req->oldptr == 0) {
805 		n = V_udbinfo.ipi_count;
806 		n += imax(n / 8, 10);
807 		req->oldidx = 2 * (sizeof xig) + n * sizeof(struct xinpcb);
808 		return (0);
809 	}
810 
811 	if (req->newptr != 0)
812 		return (EPERM);
813 
814 	/*
815 	 * OK, now we're committed to doing something.
816 	 */
817 	INP_INFO_RLOCK(&V_udbinfo);
818 	gencnt = V_udbinfo.ipi_gencnt;
819 	n = V_udbinfo.ipi_count;
820 	INP_INFO_RUNLOCK(&V_udbinfo);
821 
822 	error = sysctl_wire_old_buffer(req, 2 * (sizeof xig)
823 		+ n * sizeof(struct xinpcb));
824 	if (error != 0)
825 		return (error);
826 
827 	xig.xig_len = sizeof xig;
828 	xig.xig_count = n;
829 	xig.xig_gen = gencnt;
830 	xig.xig_sogen = so_gencnt;
831 	error = SYSCTL_OUT(req, &xig, sizeof xig);
832 	if (error)
833 		return (error);
834 
835 	inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK);
836 	if (inp_list == 0)
837 		return (ENOMEM);
838 
839 	INP_INFO_RLOCK(&V_udbinfo);
840 	for (inp = LIST_FIRST(V_udbinfo.ipi_listhead), i = 0; inp && i < n;
841 	     inp = LIST_NEXT(inp, inp_list)) {
842 		INP_WLOCK(inp);
843 		if (inp->inp_gencnt <= gencnt &&
844 		    cr_canseeinpcb(req->td->td_ucred, inp) == 0) {
845 			in_pcbref(inp);
846 			inp_list[i++] = inp;
847 		}
848 		INP_WUNLOCK(inp);
849 	}
850 	INP_INFO_RUNLOCK(&V_udbinfo);
851 	n = i;
852 
853 	error = 0;
854 	for (i = 0; i < n; i++) {
855 		inp = inp_list[i];
856 		INP_RLOCK(inp);
857 		if (inp->inp_gencnt <= gencnt) {
858 			struct xinpcb xi;
859 
860 			bzero(&xi, sizeof(xi));
861 			xi.xi_len = sizeof xi;
862 			/* XXX should avoid extra copy */
863 			bcopy(inp, &xi.xi_inp, sizeof *inp);
864 			if (inp->inp_socket)
865 				sotoxsocket(inp->inp_socket, &xi.xi_socket);
866 			xi.xi_inp.inp_gencnt = inp->inp_gencnt;
867 			INP_RUNLOCK(inp);
868 			error = SYSCTL_OUT(req, &xi, sizeof xi);
869 		} else
870 			INP_RUNLOCK(inp);
871 	}
872 	INP_INFO_WLOCK(&V_udbinfo);
873 	for (i = 0; i < n; i++) {
874 		inp = inp_list[i];
875 		INP_RLOCK(inp);
876 		if (!in_pcbrele_rlocked(inp))
877 			INP_RUNLOCK(inp);
878 	}
879 	INP_INFO_WUNLOCK(&V_udbinfo);
880 
881 	if (!error) {
882 		/*
883 		 * Give the user an updated idea of our state.  If the
884 		 * generation differs from what we told her before, she knows
885 		 * that something happened while we were processing this
886 		 * request, and it might be necessary to retry.
887 		 */
888 		INP_INFO_RLOCK(&V_udbinfo);
889 		xig.xig_gen = V_udbinfo.ipi_gencnt;
890 		xig.xig_sogen = so_gencnt;
891 		xig.xig_count = V_udbinfo.ipi_count;
892 		INP_INFO_RUNLOCK(&V_udbinfo);
893 		error = SYSCTL_OUT(req, &xig, sizeof xig);
894 	}
895 	free(inp_list, M_TEMP);
896 	return (error);
897 }
898 
899 SYSCTL_PROC(_net_inet_udp, UDPCTL_PCBLIST, pcblist,
900     CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0,
901     udp_pcblist, "S,xinpcb", "List of active UDP sockets");
902 
903 #ifdef INET
904 static int
905 udp_getcred(SYSCTL_HANDLER_ARGS)
906 {
907 	struct xucred xuc;
908 	struct sockaddr_in addrs[2];
909 	struct inpcb *inp;
910 	int error;
911 
912 	error = priv_check(req->td, PRIV_NETINET_GETCRED);
913 	if (error)
914 		return (error);
915 	error = SYSCTL_IN(req, addrs, sizeof(addrs));
916 	if (error)
917 		return (error);
918 	inp = in_pcblookup(&V_udbinfo, addrs[1].sin_addr, addrs[1].sin_port,
919 	    addrs[0].sin_addr, addrs[0].sin_port,
920 	    INPLOOKUP_WILDCARD | INPLOOKUP_RLOCKPCB, NULL);
921 	if (inp != NULL) {
922 		INP_RLOCK_ASSERT(inp);
923 		if (inp->inp_socket == NULL)
924 			error = ENOENT;
925 		if (error == 0)
926 			error = cr_canseeinpcb(req->td->td_ucred, inp);
927 		if (error == 0)
928 			cru2x(inp->inp_cred, &xuc);
929 		INP_RUNLOCK(inp);
930 	} else
931 		error = ENOENT;
932 	if (error == 0)
933 		error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
934 	return (error);
935 }
936 
937 SYSCTL_PROC(_net_inet_udp, OID_AUTO, getcred,
938     CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0,
939     udp_getcred, "S,xucred", "Get the xucred of a UDP connection");
940 #endif /* INET */
941 
942 int
943 udp_ctloutput(struct socket *so, struct sockopt *sopt)
944 {
945 	struct inpcb *inp;
946 	struct udpcb *up;
947 	int isudplite, error, optval;
948 
949 	error = 0;
950 	isudplite = (so->so_proto->pr_protocol == IPPROTO_UDPLITE) ? 1 : 0;
951 	inp = sotoinpcb(so);
952 	KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
953 	INP_WLOCK(inp);
954 	if (sopt->sopt_level != so->so_proto->pr_protocol) {
955 #ifdef INET6
956 		if (INP_CHECK_SOCKAF(so, AF_INET6)) {
957 			INP_WUNLOCK(inp);
958 			error = ip6_ctloutput(so, sopt);
959 		}
960 #endif
961 #if defined(INET) && defined(INET6)
962 		else
963 #endif
964 #ifdef INET
965 		{
966 			INP_WUNLOCK(inp);
967 			error = ip_ctloutput(so, sopt);
968 		}
969 #endif
970 		return (error);
971 	}
972 
973 	switch (sopt->sopt_dir) {
974 	case SOPT_SET:
975 		switch (sopt->sopt_name) {
976 		case UDP_ENCAP:
977 			INP_WUNLOCK(inp);
978 			error = sooptcopyin(sopt, &optval, sizeof optval,
979 					    sizeof optval);
980 			if (error)
981 				break;
982 			inp = sotoinpcb(so);
983 			KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
984 			INP_WLOCK(inp);
985 #ifdef IPSEC_NAT_T
986 			up = intoudpcb(inp);
987 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
988 #endif
989 			switch (optval) {
990 			case 0:
991 				/* Clear all UDP encap. */
992 #ifdef IPSEC_NAT_T
993 				up->u_flags &= ~UF_ESPINUDP_ALL;
994 #endif
995 				break;
996 #ifdef IPSEC_NAT_T
997 			case UDP_ENCAP_ESPINUDP:
998 			case UDP_ENCAP_ESPINUDP_NON_IKE:
999 				up->u_flags &= ~UF_ESPINUDP_ALL;
1000 				if (optval == UDP_ENCAP_ESPINUDP)
1001 					up->u_flags |= UF_ESPINUDP;
1002 				else if (optval == UDP_ENCAP_ESPINUDP_NON_IKE)
1003 					up->u_flags |= UF_ESPINUDP_NON_IKE;
1004 				break;
1005 #endif
1006 			default:
1007 				error = EINVAL;
1008 				break;
1009 			}
1010 			INP_WUNLOCK(inp);
1011 			break;
1012 		case UDPLITE_SEND_CSCOV:
1013 		case UDPLITE_RECV_CSCOV:
1014 			if (!isudplite) {
1015 				INP_WUNLOCK(inp);
1016 				error = ENOPROTOOPT;
1017 				break;
1018 			}
1019 			INP_WUNLOCK(inp);
1020 			error = sooptcopyin(sopt, &optval, sizeof(optval),
1021 			    sizeof(optval));
1022 			if (error != 0)
1023 				break;
1024 			inp = sotoinpcb(so);
1025 			KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
1026 			INP_WLOCK(inp);
1027 			up = intoudpcb(inp);
1028 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1029 			if ((optval != 0 && optval < 8) || (optval > 65535)) {
1030 				INP_WUNLOCK(inp);
1031 				error = EINVAL;
1032 				break;
1033 			}
1034 			if (sopt->sopt_name == UDPLITE_SEND_CSCOV)
1035 				up->u_txcslen = optval;
1036 			else
1037 				up->u_rxcslen = optval;
1038 			INP_WUNLOCK(inp);
1039 			break;
1040 		default:
1041 			INP_WUNLOCK(inp);
1042 			error = ENOPROTOOPT;
1043 			break;
1044 		}
1045 		break;
1046 	case SOPT_GET:
1047 		switch (sopt->sopt_name) {
1048 #ifdef IPSEC_NAT_T
1049 		case UDP_ENCAP:
1050 			up = intoudpcb(inp);
1051 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1052 			optval = up->u_flags & UF_ESPINUDP_ALL;
1053 			INP_WUNLOCK(inp);
1054 			error = sooptcopyout(sopt, &optval, sizeof optval);
1055 			break;
1056 #endif
1057 		case UDPLITE_SEND_CSCOV:
1058 		case UDPLITE_RECV_CSCOV:
1059 			if (!isudplite) {
1060 				INP_WUNLOCK(inp);
1061 				error = ENOPROTOOPT;
1062 				break;
1063 			}
1064 			up = intoudpcb(inp);
1065 			KASSERT(up != NULL, ("%s: up == NULL", __func__));
1066 			if (sopt->sopt_name == UDPLITE_SEND_CSCOV)
1067 				optval = up->u_txcslen;
1068 			else
1069 				optval = up->u_rxcslen;
1070 			INP_WUNLOCK(inp);
1071 			error = sooptcopyout(sopt, &optval, sizeof(optval));
1072 			break;
1073 		default:
1074 			INP_WUNLOCK(inp);
1075 			error = ENOPROTOOPT;
1076 			break;
1077 		}
1078 		break;
1079 	}
1080 	return (error);
1081 }
1082 
1083 #ifdef INET
1084 #define	UH_WLOCKED	2
1085 #define	UH_RLOCKED	1
1086 #define	UH_UNLOCKED	0
1087 static int
1088 udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
1089     struct mbuf *control, struct thread *td)
1090 {
1091 	struct udpiphdr *ui;
1092 	int len = m->m_pkthdr.len;
1093 	struct in_addr faddr, laddr;
1094 	struct cmsghdr *cm;
1095 	struct inpcbinfo *pcbinfo;
1096 	struct sockaddr_in *sin, src;
1097 	int cscov_partial = 0;
1098 	int error = 0;
1099 	int ipflags;
1100 	u_short fport, lport;
1101 	int unlock_udbinfo;
1102 	u_char tos;
1103 	uint8_t pr;
1104 	uint16_t cscov = 0;
1105 	uint32_t flowid = 0;
1106 	int flowid_type = 0;
1107 	int use_flowid = 0;
1108 
1109 	/*
1110 	 * udp_output() may need to temporarily bind or connect the current
1111 	 * inpcb.  As such, we don't know up front whether we will need the
1112 	 * pcbinfo lock or not.  Do any work to decide what is needed up
1113 	 * front before acquiring any locks.
1114 	 */
1115 	if (len + sizeof(struct udpiphdr) > IP_MAXPACKET) {
1116 		if (control)
1117 			m_freem(control);
1118 		m_freem(m);
1119 		return (EMSGSIZE);
1120 	}
1121 
1122 	src.sin_family = 0;
1123 	INP_RLOCK(inp);
1124 	tos = inp->inp_ip_tos;
1125 	if (control != NULL) {
1126 		/*
1127 		 * XXX: Currently, we assume all the optional information is
1128 		 * stored in a single mbuf.
1129 		 */
1130 		if (control->m_next) {
1131 			INP_RUNLOCK(inp);
1132 			m_freem(control);
1133 			m_freem(m);
1134 			return (EINVAL);
1135 		}
1136 		for (; control->m_len > 0;
1137 		    control->m_data += CMSG_ALIGN(cm->cmsg_len),
1138 		    control->m_len -= CMSG_ALIGN(cm->cmsg_len)) {
1139 			cm = mtod(control, struct cmsghdr *);
1140 			if (control->m_len < sizeof(*cm) || cm->cmsg_len == 0
1141 			    || cm->cmsg_len > control->m_len) {
1142 				error = EINVAL;
1143 				break;
1144 			}
1145 			if (cm->cmsg_level != IPPROTO_IP)
1146 				continue;
1147 
1148 			switch (cm->cmsg_type) {
1149 			case IP_SENDSRCADDR:
1150 				if (cm->cmsg_len !=
1151 				    CMSG_LEN(sizeof(struct in_addr))) {
1152 					error = EINVAL;
1153 					break;
1154 				}
1155 				bzero(&src, sizeof(src));
1156 				src.sin_family = AF_INET;
1157 				src.sin_len = sizeof(src);
1158 				src.sin_port = inp->inp_lport;
1159 				src.sin_addr =
1160 				    *(struct in_addr *)CMSG_DATA(cm);
1161 				break;
1162 
1163 			case IP_TOS:
1164 				if (cm->cmsg_len != CMSG_LEN(sizeof(u_char))) {
1165 					error = EINVAL;
1166 					break;
1167 				}
1168 				tos = *(u_char *)CMSG_DATA(cm);
1169 				break;
1170 
1171 			case IP_FLOWID:
1172 				if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) {
1173 					error = EINVAL;
1174 					break;
1175 				}
1176 				flowid = *(uint32_t *) CMSG_DATA(cm);
1177 				break;
1178 
1179 			case IP_FLOWTYPE:
1180 				if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) {
1181 					error = EINVAL;
1182 					break;
1183 				}
1184 				flowid_type = *(uint32_t *) CMSG_DATA(cm);
1185 				use_flowid = 1;
1186 				break;
1187 
1188 #ifdef	RSS
1189 			case IP_RSSBUCKETID:
1190 				if (cm->cmsg_len != CMSG_LEN(sizeof(uint32_t))) {
1191 					error = EINVAL;
1192 					break;
1193 				}
1194 				/* This is just a placeholder for now */
1195 				break;
1196 #endif	/* RSS */
1197 			default:
1198 				error = ENOPROTOOPT;
1199 				break;
1200 			}
1201 			if (error)
1202 				break;
1203 		}
1204 		m_freem(control);
1205 	}
1206 	if (error) {
1207 		INP_RUNLOCK(inp);
1208 		m_freem(m);
1209 		return (error);
1210 	}
1211 
1212 	/*
1213 	 * Depending on whether or not the application has bound or connected
1214 	 * the socket, we may have to do varying levels of work.  The optimal
1215 	 * case is for a connected UDP socket, as a global lock isn't
1216 	 * required at all.
1217 	 *
1218 	 * In order to decide which we need, we require stability of the
1219 	 * inpcb binding, which we ensure by acquiring a read lock on the
1220 	 * inpcb.  This doesn't strictly follow the lock order, so we play
1221 	 * the trylock and retry game; note that we may end up with more
1222 	 * conservative locks than required the second time around, so later
1223 	 * assertions have to accept that.  Further analysis of the number of
1224 	 * misses under contention is required.
1225 	 *
1226 	 * XXXRW: Check that hash locking update here is correct.
1227 	 */
1228 	pr = inp->inp_socket->so_proto->pr_protocol;
1229 	pcbinfo = get_inpcbinfo(pr);
1230 	sin = (struct sockaddr_in *)addr;
1231 	if (sin != NULL &&
1232 	    (inp->inp_laddr.s_addr == INADDR_ANY && inp->inp_lport == 0)) {
1233 		INP_RUNLOCK(inp);
1234 		INP_WLOCK(inp);
1235 		INP_HASH_WLOCK(pcbinfo);
1236 		unlock_udbinfo = UH_WLOCKED;
1237 	} else if ((sin != NULL && (
1238 	    (sin->sin_addr.s_addr == INADDR_ANY) ||
1239 	    (sin->sin_addr.s_addr == INADDR_BROADCAST) ||
1240 	    (inp->inp_laddr.s_addr == INADDR_ANY) ||
1241 	    (inp->inp_lport == 0))) ||
1242 	    (src.sin_family == AF_INET)) {
1243 		INP_HASH_RLOCK(pcbinfo);
1244 		unlock_udbinfo = UH_RLOCKED;
1245 	} else
1246 		unlock_udbinfo = UH_UNLOCKED;
1247 
1248 	/*
1249 	 * If the IP_SENDSRCADDR control message was specified, override the
1250 	 * source address for this datagram.  Its use is invalidated if the
1251 	 * address thus specified is incomplete or clobbers other inpcbs.
1252 	 */
1253 	laddr = inp->inp_laddr;
1254 	lport = inp->inp_lport;
1255 	if (src.sin_family == AF_INET) {
1256 		INP_HASH_LOCK_ASSERT(pcbinfo);
1257 		if ((lport == 0) ||
1258 		    (laddr.s_addr == INADDR_ANY &&
1259 		     src.sin_addr.s_addr == INADDR_ANY)) {
1260 			error = EINVAL;
1261 			goto release;
1262 		}
1263 		error = in_pcbbind_setup(inp, (struct sockaddr *)&src,
1264 		    &laddr.s_addr, &lport, td->td_ucred);
1265 		if (error)
1266 			goto release;
1267 	}
1268 
1269 	/*
1270 	 * If a UDP socket has been connected, then a local address/port will
1271 	 * have been selected and bound.
1272 	 *
1273 	 * If a UDP socket has not been connected to, then an explicit
1274 	 * destination address must be used, in which case a local
1275 	 * address/port may not have been selected and bound.
1276 	 */
1277 	if (sin != NULL) {
1278 		INP_LOCK_ASSERT(inp);
1279 		if (inp->inp_faddr.s_addr != INADDR_ANY) {
1280 			error = EISCONN;
1281 			goto release;
1282 		}
1283 
1284 		/*
1285 		 * Jail may rewrite the destination address, so let it do
1286 		 * that before we use it.
1287 		 */
1288 		error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1289 		if (error)
1290 			goto release;
1291 
1292 		/*
1293 		 * If a local address or port hasn't yet been selected, or if
1294 		 * the destination address needs to be rewritten due to using
1295 		 * a special INADDR_ constant, invoke in_pcbconnect_setup()
1296 		 * to do the heavy lifting.  Once a port is selected, we
1297 		 * commit the binding back to the socket; we also commit the
1298 		 * binding of the address if in jail.
1299 		 *
1300 		 * If we already have a valid binding and we're not
1301 		 * requesting a destination address rewrite, use a fast path.
1302 		 */
1303 		if (inp->inp_laddr.s_addr == INADDR_ANY ||
1304 		    inp->inp_lport == 0 ||
1305 		    sin->sin_addr.s_addr == INADDR_ANY ||
1306 		    sin->sin_addr.s_addr == INADDR_BROADCAST) {
1307 			INP_HASH_LOCK_ASSERT(pcbinfo);
1308 			error = in_pcbconnect_setup(inp, addr, &laddr.s_addr,
1309 			    &lport, &faddr.s_addr, &fport, NULL,
1310 			    td->td_ucred);
1311 			if (error)
1312 				goto release;
1313 
1314 			/*
1315 			 * XXXRW: Why not commit the port if the address is
1316 			 * !INADDR_ANY?
1317 			 */
1318 			/* Commit the local port if newly assigned. */
1319 			if (inp->inp_laddr.s_addr == INADDR_ANY &&
1320 			    inp->inp_lport == 0) {
1321 				INP_WLOCK_ASSERT(inp);
1322 				INP_HASH_WLOCK_ASSERT(pcbinfo);
1323 				/*
1324 				 * Remember addr if jailed, to prevent
1325 				 * rebinding.
1326 				 */
1327 				if (prison_flag(td->td_ucred, PR_IP4))
1328 					inp->inp_laddr = laddr;
1329 				inp->inp_lport = lport;
1330 				if (in_pcbinshash(inp) != 0) {
1331 					inp->inp_lport = 0;
1332 					error = EAGAIN;
1333 					goto release;
1334 				}
1335 				inp->inp_flags |= INP_ANONPORT;
1336 			}
1337 		} else {
1338 			faddr = sin->sin_addr;
1339 			fport = sin->sin_port;
1340 		}
1341 	} else {
1342 		INP_LOCK_ASSERT(inp);
1343 		faddr = inp->inp_faddr;
1344 		fport = inp->inp_fport;
1345 		if (faddr.s_addr == INADDR_ANY) {
1346 			error = ENOTCONN;
1347 			goto release;
1348 		}
1349 	}
1350 
1351 	/*
1352 	 * Calculate data length and get a mbuf for UDP, IP, and possible
1353 	 * link-layer headers.  Immediate slide the data pointer back forward
1354 	 * since we won't use that space at this layer.
1355 	 */
1356 	M_PREPEND(m, sizeof(struct udpiphdr) + max_linkhdr, M_NOWAIT);
1357 	if (m == NULL) {
1358 		error = ENOBUFS;
1359 		goto release;
1360 	}
1361 	m->m_data += max_linkhdr;
1362 	m->m_len -= max_linkhdr;
1363 	m->m_pkthdr.len -= max_linkhdr;
1364 
1365 	/*
1366 	 * Fill in mbuf with extended UDP header and addresses and length put
1367 	 * into network format.
1368 	 */
1369 	ui = mtod(m, struct udpiphdr *);
1370 	bzero(ui->ui_x1, sizeof(ui->ui_x1));	/* XXX still needed? */
1371 	ui->ui_pr = pr;
1372 	ui->ui_src = laddr;
1373 	ui->ui_dst = faddr;
1374 	ui->ui_sport = lport;
1375 	ui->ui_dport = fport;
1376 	ui->ui_ulen = htons((u_short)len + sizeof(struct udphdr));
1377 	if (pr == IPPROTO_UDPLITE) {
1378 		struct udpcb *up;
1379 		uint16_t plen;
1380 
1381 		up = intoudpcb(inp);
1382 		cscov = up->u_txcslen;
1383 		plen = (u_short)len + sizeof(struct udphdr);
1384 		if (cscov >= plen)
1385 			cscov = 0;
1386 		ui->ui_len = htons(plen);
1387 		ui->ui_ulen = htons(cscov);
1388 		/*
1389 		 * For UDP-Lite, checksum coverage length of zero means
1390 		 * the entire UDPLite packet is covered by the checksum.
1391 		 */
1392 		cscov_partial = (cscov == 0) ? 0 : 1;
1393 	} else
1394 		ui->ui_v = IPVERSION << 4;
1395 
1396 	/*
1397 	 * Set the Don't Fragment bit in the IP header.
1398 	 */
1399 	if (inp->inp_flags & INP_DONTFRAG) {
1400 		struct ip *ip;
1401 
1402 		ip = (struct ip *)&ui->ui_i;
1403 		ip->ip_off |= htons(IP_DF);
1404 	}
1405 
1406 	ipflags = 0;
1407 	if (inp->inp_socket->so_options & SO_DONTROUTE)
1408 		ipflags |= IP_ROUTETOIF;
1409 	if (inp->inp_socket->so_options & SO_BROADCAST)
1410 		ipflags |= IP_ALLOWBROADCAST;
1411 	if (inp->inp_flags & INP_ONESBCAST)
1412 		ipflags |= IP_SENDONES;
1413 
1414 #ifdef MAC
1415 	mac_inpcb_create_mbuf(inp, m);
1416 #endif
1417 
1418 	/*
1419 	 * Set up checksum and output datagram.
1420 	 */
1421 	ui->ui_sum = 0;
1422 	if (pr == IPPROTO_UDPLITE) {
1423 		if (inp->inp_flags & INP_ONESBCAST)
1424 			faddr.s_addr = INADDR_BROADCAST;
1425 		if (cscov_partial) {
1426 			if ((ui->ui_sum = in_cksum(m, sizeof(struct ip) + cscov)) == 0)
1427 				ui->ui_sum = 0xffff;
1428 		} else {
1429 			if ((ui->ui_sum = in_cksum(m, sizeof(struct udpiphdr) + len)) == 0)
1430 				ui->ui_sum = 0xffff;
1431 		}
1432 	} else if (V_udp_cksum) {
1433 		if (inp->inp_flags & INP_ONESBCAST)
1434 			faddr.s_addr = INADDR_BROADCAST;
1435 		ui->ui_sum = in_pseudo(ui->ui_src.s_addr, faddr.s_addr,
1436 		    htons((u_short)len + sizeof(struct udphdr) + pr));
1437 		m->m_pkthdr.csum_flags = CSUM_UDP;
1438 		m->m_pkthdr.csum_data = offsetof(struct udphdr, uh_sum);
1439 	}
1440 	((struct ip *)ui)->ip_len = htons(sizeof(struct udpiphdr) + len);
1441 	((struct ip *)ui)->ip_ttl = inp->inp_ip_ttl;	/* XXX */
1442 	((struct ip *)ui)->ip_tos = tos;		/* XXX */
1443 	UDPSTAT_INC(udps_opackets);
1444 
1445 	/*
1446 	 * Setup flowid / RSS information for outbound socket.
1447 	 *
1448 	 * Once the UDP code decides to set a flowid some other way,
1449 	 * this allows the flowid to be overridden by userland.
1450 	 */
1451 	if (use_flowid) {
1452 		m->m_flags |= M_FLOWID;
1453 		m->m_pkthdr.flowid = flowid;
1454 		M_HASHTYPE_SET(m, flowid_type);
1455 #ifdef	RSS
1456 	} else {
1457 		uint32_t hash_val, hash_type;
1458 		/*
1459 		 * Calculate an appropriate RSS hash for UDP and
1460 		 * UDP Lite.
1461 		 *
1462 		 * The called function will take care of figuring out
1463 		 * whether a 2-tuple or 4-tuple hash is required based
1464 		 * on the currently configured scheme.
1465 		 *
1466 		 * Later later on connected socket values should be
1467 		 * cached in the inpcb and reused, rather than constantly
1468 		 * re-calculating it.
1469 		 *
1470 		 * UDP Lite is a different protocol number and will
1471 		 * likely end up being hashed as a 2-tuple until
1472 		 * RSS / NICs grow UDP Lite protocol awareness.
1473 		 */
1474 		if (rss_proto_software_hash_v4(faddr, laddr, fport, lport,
1475 		    pr, &hash_val, &hash_type) == 0) {
1476 			m->m_pkthdr.flowid = hash_val;
1477 			m->m_flags |= M_FLOWID;
1478 			M_HASHTYPE_SET(m, hash_type);
1479 		}
1480 #endif
1481 	}
1482 
1483 #ifdef	RSS
1484 	/*
1485 	 * Don't override with the inp cached flowid value.
1486 	 *
1487 	 * Depending upon the kind of send being done, the inp
1488 	 * flowid/flowtype values may actually not be appropriate
1489 	 * for this particular socket send.
1490 	 *
1491 	 * We should either leave the flowid at zero (which is what is
1492 	 * currently done) or set it to some software generated
1493 	 * hash value based on the packet contents.
1494 	 */
1495 	ipflags |= IP_NODEFAULTFLOWID;
1496 #endif	/* RSS */
1497 
1498 	if (unlock_udbinfo == UH_WLOCKED)
1499 		INP_HASH_WUNLOCK(pcbinfo);
1500 	else if (unlock_udbinfo == UH_RLOCKED)
1501 		INP_HASH_RUNLOCK(pcbinfo);
1502 	UDP_PROBE(send, NULL, inp, &ui->ui_i, inp, &ui->ui_u);
1503 	error = ip_output(m, inp->inp_options, NULL, ipflags,
1504 	    inp->inp_moptions, inp);
1505 	if (unlock_udbinfo == UH_WLOCKED)
1506 		INP_WUNLOCK(inp);
1507 	else
1508 		INP_RUNLOCK(inp);
1509 	return (error);
1510 
1511 release:
1512 	if (unlock_udbinfo == UH_WLOCKED) {
1513 		INP_HASH_WUNLOCK(pcbinfo);
1514 		INP_WUNLOCK(inp);
1515 	} else if (unlock_udbinfo == UH_RLOCKED) {
1516 		INP_HASH_RUNLOCK(pcbinfo);
1517 		INP_RUNLOCK(inp);
1518 	} else
1519 		INP_RUNLOCK(inp);
1520 	m_freem(m);
1521 	return (error);
1522 }
1523 
1524 
1525 #if defined(IPSEC) && defined(IPSEC_NAT_T)
1526 /*
1527  * Potentially decap ESP in UDP frame.  Check for an ESP header
1528  * and optional marker; if present, strip the UDP header and
1529  * push the result through IPSec.
1530  *
1531  * Returns mbuf to be processed (potentially re-allocated) or
1532  * NULL if consumed and/or processed.
1533  */
1534 static struct mbuf *
1535 udp4_espdecap(struct inpcb *inp, struct mbuf *m, int off)
1536 {
1537 	size_t minlen, payload, skip, iphlen;
1538 	caddr_t data;
1539 	struct udpcb *up;
1540 	struct m_tag *tag;
1541 	struct udphdr *udphdr;
1542 	struct ip *ip;
1543 
1544 	INP_RLOCK_ASSERT(inp);
1545 
1546 	/*
1547 	 * Pull up data so the longest case is contiguous:
1548 	 *    IP/UDP hdr + non ESP marker + ESP hdr.
1549 	 */
1550 	minlen = off + sizeof(uint64_t) + sizeof(struct esp);
1551 	if (minlen > m->m_pkthdr.len)
1552 		minlen = m->m_pkthdr.len;
1553 	if ((m = m_pullup(m, minlen)) == NULL) {
1554 		IPSECSTAT_INC(ips_in_inval);
1555 		return (NULL);		/* Bypass caller processing. */
1556 	}
1557 	data = mtod(m, caddr_t);	/* Points to ip header. */
1558 	payload = m->m_len - off;	/* Size of payload. */
1559 
1560 	if (payload == 1 && data[off] == '\xff')
1561 		return (m);		/* NB: keepalive packet, no decap. */
1562 
1563 	up = intoudpcb(inp);
1564 	KASSERT(up != NULL, ("%s: udpcb NULL", __func__));
1565 	KASSERT((up->u_flags & UF_ESPINUDP_ALL) != 0,
1566 	    ("u_flags 0x%x", up->u_flags));
1567 
1568 	/*
1569 	 * Check that the payload is large enough to hold an
1570 	 * ESP header and compute the amount of data to remove.
1571 	 *
1572 	 * NB: the caller has already done a pullup for us.
1573 	 * XXX can we assume alignment and eliminate bcopys?
1574 	 */
1575 	if (up->u_flags & UF_ESPINUDP_NON_IKE) {
1576 		/*
1577 		 * draft-ietf-ipsec-nat-t-ike-0[01].txt and
1578 		 * draft-ietf-ipsec-udp-encaps-(00/)01.txt, ignoring
1579 		 * possible AH mode non-IKE marker+non-ESP marker
1580 		 * from draft-ietf-ipsec-udp-encaps-00.txt.
1581 		 */
1582 		uint64_t marker;
1583 
1584 		if (payload <= sizeof(uint64_t) + sizeof(struct esp))
1585 			return (m);	/* NB: no decap. */
1586 		bcopy(data + off, &marker, sizeof(uint64_t));
1587 		if (marker != 0)	/* Non-IKE marker. */
1588 			return (m);	/* NB: no decap. */
1589 		skip = sizeof(uint64_t) + sizeof(struct udphdr);
1590 	} else {
1591 		uint32_t spi;
1592 
1593 		if (payload <= sizeof(struct esp)) {
1594 			IPSECSTAT_INC(ips_in_inval);
1595 			m_freem(m);
1596 			return (NULL);	/* Discard. */
1597 		}
1598 		bcopy(data + off, &spi, sizeof(uint32_t));
1599 		if (spi == 0)		/* Non-ESP marker. */
1600 			return (m);	/* NB: no decap. */
1601 		skip = sizeof(struct udphdr);
1602 	}
1603 
1604 	/*
1605 	 * Setup a PACKET_TAG_IPSEC_NAT_T_PORT tag to remember
1606 	 * the UDP ports. This is required if we want to select
1607 	 * the right SPD for multiple hosts behind same NAT.
1608 	 *
1609 	 * NB: ports are maintained in network byte order everywhere
1610 	 *     in the NAT-T code.
1611 	 */
1612 	tag = m_tag_get(PACKET_TAG_IPSEC_NAT_T_PORTS,
1613 		2 * sizeof(uint16_t), M_NOWAIT);
1614 	if (tag == NULL) {
1615 		IPSECSTAT_INC(ips_in_nomem);
1616 		m_freem(m);
1617 		return (NULL);		/* Discard. */
1618 	}
1619 	iphlen = off - sizeof(struct udphdr);
1620 	udphdr = (struct udphdr *)(data + iphlen);
1621 	((uint16_t *)(tag + 1))[0] = udphdr->uh_sport;
1622 	((uint16_t *)(tag + 1))[1] = udphdr->uh_dport;
1623 	m_tag_prepend(m, tag);
1624 
1625 	/*
1626 	 * Remove the UDP header (and possibly the non ESP marker)
1627 	 * IP header length is iphlen
1628 	 * Before:
1629 	 *   <--- off --->
1630 	 *   +----+------+-----+
1631 	 *   | IP |  UDP | ESP |
1632 	 *   +----+------+-----+
1633 	 *        <-skip->
1634 	 * After:
1635 	 *          +----+-----+
1636 	 *          | IP | ESP |
1637 	 *          +----+-----+
1638 	 *   <-skip->
1639 	 */
1640 	ovbcopy(data, data + skip, iphlen);
1641 	m_adj(m, skip);
1642 
1643 	ip = mtod(m, struct ip *);
1644 	ip->ip_len = htons(ntohs(ip->ip_len) - skip);
1645 	ip->ip_p = IPPROTO_ESP;
1646 
1647 	/*
1648 	 * We cannot yet update the cksums so clear any
1649 	 * h/w cksum flags as they are no longer valid.
1650 	 */
1651 	if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID)
1652 		m->m_pkthdr.csum_flags &= ~(CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
1653 
1654 	(void) ipsec4_common_input(m, iphlen, ip->ip_p);
1655 	return (NULL);			/* NB: consumed, bypass processing. */
1656 }
1657 #endif /* defined(IPSEC) && defined(IPSEC_NAT_T) */
1658 
1659 static void
1660 udp_abort(struct socket *so)
1661 {
1662 	struct inpcb *inp;
1663 	struct inpcbinfo *pcbinfo;
1664 
1665 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1666 	inp = sotoinpcb(so);
1667 	KASSERT(inp != NULL, ("udp_abort: inp == NULL"));
1668 	INP_WLOCK(inp);
1669 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1670 		INP_HASH_WLOCK(pcbinfo);
1671 		in_pcbdisconnect(inp);
1672 		inp->inp_laddr.s_addr = INADDR_ANY;
1673 		INP_HASH_WUNLOCK(pcbinfo);
1674 		soisdisconnected(so);
1675 	}
1676 	INP_WUNLOCK(inp);
1677 }
1678 
1679 static int
1680 udp_attach(struct socket *so, int proto, struct thread *td)
1681 {
1682 	struct inpcb *inp;
1683 	struct inpcbinfo *pcbinfo;
1684 	int error;
1685 
1686 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1687 	inp = sotoinpcb(so);
1688 	KASSERT(inp == NULL, ("udp_attach: inp != NULL"));
1689 	error = soreserve(so, udp_sendspace, udp_recvspace);
1690 	if (error)
1691 		return (error);
1692 	INP_INFO_WLOCK(pcbinfo);
1693 	error = in_pcballoc(so, pcbinfo);
1694 	if (error) {
1695 		INP_INFO_WUNLOCK(pcbinfo);
1696 		return (error);
1697 	}
1698 
1699 	inp = sotoinpcb(so);
1700 	inp->inp_vflag |= INP_IPV4;
1701 	inp->inp_ip_ttl = V_ip_defttl;
1702 
1703 	error = udp_newudpcb(inp);
1704 	if (error) {
1705 		in_pcbdetach(inp);
1706 		in_pcbfree(inp);
1707 		INP_INFO_WUNLOCK(pcbinfo);
1708 		return (error);
1709 	}
1710 
1711 	INP_WUNLOCK(inp);
1712 	INP_INFO_WUNLOCK(pcbinfo);
1713 	return (0);
1714 }
1715 #endif /* INET */
1716 
1717 int
1718 udp_set_kernel_tunneling(struct socket *so, udp_tun_func_t f)
1719 {
1720 	struct inpcb *inp;
1721 	struct udpcb *up;
1722 
1723 	KASSERT(so->so_type == SOCK_DGRAM,
1724 	    ("udp_set_kernel_tunneling: !dgram"));
1725 	inp = sotoinpcb(so);
1726 	KASSERT(inp != NULL, ("udp_set_kernel_tunneling: inp == NULL"));
1727 	INP_WLOCK(inp);
1728 	up = intoudpcb(inp);
1729 	if (up->u_tun_func != NULL) {
1730 		INP_WUNLOCK(inp);
1731 		return (EBUSY);
1732 	}
1733 	up->u_tun_func = f;
1734 	INP_WUNLOCK(inp);
1735 	return (0);
1736 }
1737 
1738 #ifdef INET
1739 static int
1740 udp_bind(struct socket *so, struct sockaddr *nam, struct thread *td)
1741 {
1742 	struct inpcb *inp;
1743 	struct inpcbinfo *pcbinfo;
1744 	int error;
1745 
1746 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1747 	inp = sotoinpcb(so);
1748 	KASSERT(inp != NULL, ("udp_bind: inp == NULL"));
1749 	INP_WLOCK(inp);
1750 	INP_HASH_WLOCK(pcbinfo);
1751 	error = in_pcbbind(inp, nam, td->td_ucred);
1752 	INP_HASH_WUNLOCK(pcbinfo);
1753 	INP_WUNLOCK(inp);
1754 	return (error);
1755 }
1756 
1757 static void
1758 udp_close(struct socket *so)
1759 {
1760 	struct inpcb *inp;
1761 	struct inpcbinfo *pcbinfo;
1762 
1763 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1764 	inp = sotoinpcb(so);
1765 	KASSERT(inp != NULL, ("udp_close: inp == NULL"));
1766 	INP_WLOCK(inp);
1767 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1768 		INP_HASH_WLOCK(pcbinfo);
1769 		in_pcbdisconnect(inp);
1770 		inp->inp_laddr.s_addr = INADDR_ANY;
1771 		INP_HASH_WUNLOCK(pcbinfo);
1772 		soisdisconnected(so);
1773 	}
1774 	INP_WUNLOCK(inp);
1775 }
1776 
1777 static int
1778 udp_connect(struct socket *so, struct sockaddr *nam, struct thread *td)
1779 {
1780 	struct inpcb *inp;
1781 	struct inpcbinfo *pcbinfo;
1782 	struct sockaddr_in *sin;
1783 	int error;
1784 
1785 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1786 	inp = sotoinpcb(so);
1787 	KASSERT(inp != NULL, ("udp_connect: inp == NULL"));
1788 	INP_WLOCK(inp);
1789 	if (inp->inp_faddr.s_addr != INADDR_ANY) {
1790 		INP_WUNLOCK(inp);
1791 		return (EISCONN);
1792 	}
1793 	sin = (struct sockaddr_in *)nam;
1794 	error = prison_remote_ip4(td->td_ucred, &sin->sin_addr);
1795 	if (error != 0) {
1796 		INP_WUNLOCK(inp);
1797 		return (error);
1798 	}
1799 	INP_HASH_WLOCK(pcbinfo);
1800 	error = in_pcbconnect(inp, nam, td->td_ucred);
1801 	INP_HASH_WUNLOCK(pcbinfo);
1802 	if (error == 0)
1803 		soisconnected(so);
1804 	INP_WUNLOCK(inp);
1805 	return (error);
1806 }
1807 
1808 static void
1809 udp_detach(struct socket *so)
1810 {
1811 	struct inpcb *inp;
1812 	struct inpcbinfo *pcbinfo;
1813 	struct udpcb *up;
1814 
1815 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1816 	inp = sotoinpcb(so);
1817 	KASSERT(inp != NULL, ("udp_detach: inp == NULL"));
1818 	KASSERT(inp->inp_faddr.s_addr == INADDR_ANY,
1819 	    ("udp_detach: not disconnected"));
1820 	INP_INFO_WLOCK(pcbinfo);
1821 	INP_WLOCK(inp);
1822 	up = intoudpcb(inp);
1823 	KASSERT(up != NULL, ("%s: up == NULL", __func__));
1824 	inp->inp_ppcb = NULL;
1825 	in_pcbdetach(inp);
1826 	in_pcbfree(inp);
1827 	INP_INFO_WUNLOCK(pcbinfo);
1828 	udp_discardcb(up);
1829 }
1830 
1831 static int
1832 udp_disconnect(struct socket *so)
1833 {
1834 	struct inpcb *inp;
1835 	struct inpcbinfo *pcbinfo;
1836 
1837 	pcbinfo = get_inpcbinfo(so->so_proto->pr_protocol);
1838 	inp = sotoinpcb(so);
1839 	KASSERT(inp != NULL, ("udp_disconnect: inp == NULL"));
1840 	INP_WLOCK(inp);
1841 	if (inp->inp_faddr.s_addr == INADDR_ANY) {
1842 		INP_WUNLOCK(inp);
1843 		return (ENOTCONN);
1844 	}
1845 	INP_HASH_WLOCK(pcbinfo);
1846 	in_pcbdisconnect(inp);
1847 	inp->inp_laddr.s_addr = INADDR_ANY;
1848 	INP_HASH_WUNLOCK(pcbinfo);
1849 	SOCK_LOCK(so);
1850 	so->so_state &= ~SS_ISCONNECTED;		/* XXX */
1851 	SOCK_UNLOCK(so);
1852 	INP_WUNLOCK(inp);
1853 	return (0);
1854 }
1855 
1856 static int
1857 udp_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
1858     struct mbuf *control, struct thread *td)
1859 {
1860 	struct inpcb *inp;
1861 
1862 	inp = sotoinpcb(so);
1863 	KASSERT(inp != NULL, ("udp_send: inp == NULL"));
1864 	return (udp_output(inp, m, addr, control, td));
1865 }
1866 #endif /* INET */
1867 
1868 int
1869 udp_shutdown(struct socket *so)
1870 {
1871 	struct inpcb *inp;
1872 
1873 	inp = sotoinpcb(so);
1874 	KASSERT(inp != NULL, ("udp_shutdown: inp == NULL"));
1875 	INP_WLOCK(inp);
1876 	socantsendmore(so);
1877 	INP_WUNLOCK(inp);
1878 	return (0);
1879 }
1880 
1881 #ifdef INET
1882 struct pr_usrreqs udp_usrreqs = {
1883 	.pru_abort =		udp_abort,
1884 	.pru_attach =		udp_attach,
1885 	.pru_bind =		udp_bind,
1886 	.pru_connect =		udp_connect,
1887 	.pru_control =		in_control,
1888 	.pru_detach =		udp_detach,
1889 	.pru_disconnect =	udp_disconnect,
1890 	.pru_peeraddr =		in_getpeeraddr,
1891 	.pru_send =		udp_send,
1892 	.pru_soreceive =	soreceive_dgram,
1893 	.pru_sosend =		sosend_dgram,
1894 	.pru_shutdown =		udp_shutdown,
1895 	.pru_sockaddr =		in_getsockaddr,
1896 	.pru_sosetlabel =	in_pcbsosetlabel,
1897 	.pru_close =		udp_close,
1898 };
1899 #endif /* INET */
1900