xref: /illumos-gate/usr/src/uts/common/inet/udp/udp.c (revision 15e6edf145a9c2bb0e0272cf8debe823bb97529b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /* Copyright (c) 1990 Mentat Inc. */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 const char udp_version[] = "%Z%%M%	%I%	%E% SMI";
30 
31 #include <sys/types.h>
32 #include <sys/stream.h>
33 #include <sys/dlpi.h>
34 #include <sys/pattr.h>
35 #include <sys/stropts.h>
36 #include <sys/strlog.h>
37 #include <sys/strsun.h>
38 #include <sys/time.h>
39 #define	_SUN_TPI_VERSION 2
40 #include <sys/tihdr.h>
41 #include <sys/timod.h>
42 #include <sys/ddi.h>
43 #include <sys/sunddi.h>
44 #include <sys/strsubr.h>
45 #include <sys/suntpi.h>
46 #include <sys/xti_inet.h>
47 #include <sys/cmn_err.h>
48 #include <sys/kmem.h>
49 #include <sys/policy.h>
50 #include <sys/ucred.h>
51 #include <sys/zone.h>
52 
53 #include <sys/socket.h>
54 #include <sys/sockio.h>
55 #include <sys/vtrace.h>
56 #include <sys/sdt.h>
57 #include <sys/debug.h>
58 #include <sys/isa_defs.h>
59 #include <sys/random.h>
60 #include <netinet/in.h>
61 #include <netinet/ip6.h>
62 #include <netinet/icmp6.h>
63 #include <netinet/udp.h>
64 #include <net/if.h>
65 #include <net/route.h>
66 
67 #include <inet/common.h>
68 #include <inet/ip.h>
69 #include <inet/ip_impl.h>
70 #include <inet/ip6.h>
71 #include <inet/ip_ire.h>
72 #include <inet/ip_if.h>
73 #include <inet/ip_multi.h>
74 #include <inet/ip_ndp.h>
75 #include <inet/mi.h>
76 #include <inet/mib2.h>
77 #include <inet/nd.h>
78 #include <inet/optcom.h>
79 #include <inet/snmpcom.h>
80 #include <inet/kstatcom.h>
81 #include <inet/udp_impl.h>
82 #include <inet/ipclassifier.h>
83 #include <inet/ipsec_impl.h>
84 #include <inet/ipp_common.h>
85 
86 /*
87  * The ipsec_info.h header file is here since it has the definition for the
88  * M_CTL message types used by IP to convey information to the ULP. The
89  * ipsec_info.h needs the pfkeyv2.h, hence the latter's presence.
90  */
91 #include <net/pfkeyv2.h>
92 #include <inet/ipsec_info.h>
93 
94 #include <sys/tsol/label.h>
95 #include <sys/tsol/tnet.h>
96 #include <rpc/pmap_prot.h>
97 
98 /*
99  * Synchronization notes:
100  *
101  * UDP is MT and uses the usual kernel synchronization primitives. There are 2
102  * locks, the fanout lock (uf_lock) and the udp endpoint lock udp_rwlock.
103  * We also use conn_lock when updating things that affect the IP classifier
104  * lookup.
105  * The lock order is udp_rwlock -> uf_lock and is udp_rwlock -> conn_lock.
106  *
107  * The fanout lock uf_lock:
108  * When a UDP endpoint is bound to a local port, it is inserted into
109  * a bind hash list.  The list consists of an array of udp_fanout_t buckets.
110  * The size of the array is controlled by the udp_bind_fanout_size variable.
111  * This variable can be changed in /etc/system if the default value is
112  * not large enough.  Each bind hash bucket is protected by a per bucket
113  * lock.  It protects the udp_bind_hash and udp_ptpbhn fields in the udp_t
114  * structure and a few other fields in the udp_t. A UDP endpoint is removed
115  * from the bind hash list only when it is being unbound or being closed.
116  * The per bucket lock also protects a UDP endpoint's state changes.
117  *
118  * The udp_rwlock:
119  * This protects most of the other fields in the udp_t. The exact list of
120  * fields which are protected by each of the above locks is documented in
121  * the udp_t structure definition.
122  *
123  * Plumbing notes:
124  * UDP is always a device driver. For compatibility with mibopen() code
125  * it is possible to I_PUSH "udp", but that results in pushing a passthrough
126  * dummy module.
127  *
128  * The above implies that we don't support any intermediate module to
129  * reside in between /dev/ip and udp -- in fact, we never supported such
130  * scenario in the past as the inter-layer communication semantics have
131  * always been private.
132  */
133 
134 /* For /etc/system control */
135 uint_t udp_bind_fanout_size = UDP_BIND_FANOUT_SIZE;
136 
137 #define	NDD_TOO_QUICK_MSG \
138 	"ndd get info rate too high for non-privileged users, try again " \
139 	"later.\n"
140 #define	NDD_OUT_OF_BUF_MSG	"<< Out of buffer >>\n"
141 
142 /* Option processing attrs */
143 typedef struct udpattrs_s {
144 	union {
145 		ip6_pkt_t	*udpattr_ipp6;	/* For V6 */
146 		ip4_pkt_t 	*udpattr_ipp4;	/* For V4 */
147 	} udpattr_ippu;
148 #define	udpattr_ipp6 udpattr_ippu.udpattr_ipp6
149 #define	udpattr_ipp4 udpattr_ippu.udpattr_ipp4
150 	mblk_t		*udpattr_mb;
151 	boolean_t	udpattr_credset;
152 } udpattrs_t;
153 
154 static void	udp_addr_req(queue_t *q, mblk_t *mp);
155 static void	udp_bind(queue_t *q, mblk_t *mp);
156 static void	udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp);
157 static void	udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock);
158 static void	udp_bind_result(conn_t *, mblk_t *);
159 static void	udp_bind_ack(conn_t *, mblk_t *mp);
160 static void	udp_bind_error(conn_t *, mblk_t *mp);
161 static int	udp_build_hdrs(udp_t *udp);
162 static void	udp_capability_req(queue_t *q, mblk_t *mp);
163 static int	udp_close(queue_t *q);
164 static void	udp_connect(queue_t *q, mblk_t *mp);
165 static void	udp_disconnect(queue_t *q, mblk_t *mp);
166 static void	udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error,
167 		    int sys_error);
168 static void	udp_err_ack_prim(queue_t *q, mblk_t *mp, int primitive,
169 		    t_scalar_t tlierr, int unixerr);
170 static int	udp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp,
171 		    cred_t *cr);
172 static int	udp_extra_priv_ports_add(queue_t *q, mblk_t *mp,
173 		    char *value, caddr_t cp, cred_t *cr);
174 static int	udp_extra_priv_ports_del(queue_t *q, mblk_t *mp,
175 		    char *value, caddr_t cp, cred_t *cr);
176 static void	udp_icmp_error(queue_t *q, mblk_t *mp);
177 static void	udp_icmp_error_ipv6(queue_t *q, mblk_t *mp);
178 static void	udp_info_req(queue_t *q, mblk_t *mp);
179 static void	udp_input(void *, mblk_t *, void *);
180 static mblk_t	*udp_ip_bind_mp(udp_t *udp, t_scalar_t bind_prim,
181 		    t_scalar_t addr_length);
182 static void	udp_lrput(queue_t *, mblk_t *);
183 static void	udp_lwput(queue_t *, mblk_t *);
184 static int	udp_open(queue_t *q, dev_t *devp, int flag, int sflag,
185 		    cred_t *credp, boolean_t isv6);
186 static int	udp_openv4(queue_t *q, dev_t *devp, int flag, int sflag,
187 		    cred_t *credp);
188 static int	udp_openv6(queue_t *q, dev_t *devp, int flag, int sflag,
189 		    cred_t *credp);
190 static  int	udp_unitdata_opt_process(queue_t *q, mblk_t *mp,
191 		    int *errorp, udpattrs_t *udpattrs);
192 static boolean_t udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name);
193 static int	udp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr);
194 static boolean_t udp_param_register(IDP *ndp, udpparam_t *udppa, int cnt);
195 static int	udp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
196 		    cred_t *cr);
197 static void	udp_report_item(mblk_t *mp, udp_t *udp);
198 static int	udp_rinfop(queue_t *q, infod_t *dp);
199 static int	udp_rrw(queue_t *q, struiod_t *dp);
200 static int	udp_status_report(queue_t *q, mblk_t *mp, caddr_t cp,
201 		    cred_t *cr);
202 static void	udp_send_data(udp_t *, queue_t *, mblk_t *, ipha_t *);
203 static void	udp_ud_err(queue_t *q, mblk_t *mp, uchar_t *destaddr,
204 		    t_scalar_t destlen, t_scalar_t err);
205 static void	udp_unbind(queue_t *q, mblk_t *mp);
206 static in_port_t udp_update_next_port(udp_t *udp, in_port_t port,
207     boolean_t random);
208 static mblk_t	*udp_output_v4(conn_t *, mblk_t *, ipaddr_t, uint16_t, uint_t,
209 		    int *, boolean_t);
210 static mblk_t	*udp_output_v6(conn_t *connp, mblk_t *mp, sin6_t *sin6,
211 		    int *error);
212 static void	udp_wput_other(queue_t *q, mblk_t *mp);
213 static void	udp_wput_iocdata(queue_t *q, mblk_t *mp);
214 static size_t	udp_set_rcv_hiwat(udp_t *udp, size_t size);
215 
216 static void	*udp_stack_init(netstackid_t stackid, netstack_t *ns);
217 static void	udp_stack_fini(netstackid_t stackid, void *arg);
218 
219 static void	*udp_kstat_init(netstackid_t stackid);
220 static void	udp_kstat_fini(netstackid_t stackid, kstat_t *ksp);
221 static void	*udp_kstat2_init(netstackid_t, udp_stat_t *);
222 static void	udp_kstat2_fini(netstackid_t, kstat_t *);
223 static int	udp_kstat_update(kstat_t *kp, int rw);
224 
225 static void	udp_rcv_enqueue(queue_t *q, udp_t *udp, mblk_t *mp,
226 		    uint_t pkt_len);
227 static void	udp_rcv_drain(queue_t *q, udp_t *udp, boolean_t closing);
228 static void	udp_xmit(queue_t *, mblk_t *, ire_t *ire, conn_t *, zoneid_t);
229 
230 #define	UDP_RECV_HIWATER	(56 * 1024)
231 #define	UDP_RECV_LOWATER	128
232 #define	UDP_XMIT_HIWATER	(56 * 1024)
233 #define	UDP_XMIT_LOWATER	1024
234 
235 static struct module_info udp_mod_info =  {
236 	UDP_MOD_ID, UDP_MOD_NAME, 1, INFPSZ, UDP_RECV_HIWATER, UDP_RECV_LOWATER
237 };
238 
239 /*
240  * Entry points for UDP as a device.
241  * We have separate open functions for the /dev/udp and /dev/udp6 devices.
242  */
243 static struct qinit udp_rinitv4 = {
244 	NULL, NULL, udp_openv4, udp_close, NULL,
245 	&udp_mod_info, NULL, udp_rrw, udp_rinfop, STRUIOT_STANDARD
246 };
247 
248 static struct qinit udp_rinitv6 = {
249 	NULL, NULL, udp_openv6, udp_close, NULL,
250 	&udp_mod_info, NULL, udp_rrw, udp_rinfop, STRUIOT_STANDARD
251 };
252 
253 static struct qinit udp_winit = {
254 	(pfi_t)udp_wput, (pfi_t)ip_wsrv, NULL, NULL, NULL,
255 	&udp_mod_info, NULL, NULL, NULL, STRUIOT_NONE
256 };
257 
258 /*
259  * UDP needs to handle I_LINK and I_PLINK since ifconfig
260  * likes to use it as a place to hang the various streams.
261  */
262 static struct qinit udp_lrinit = {
263 	(pfi_t)udp_lrput, NULL, udp_openv4, udp_close, NULL,
264 	&udp_mod_info
265 };
266 
267 static struct qinit udp_lwinit = {
268 	(pfi_t)udp_lwput, NULL, udp_openv4, udp_close, NULL,
269 	&udp_mod_info
270 };
271 
272 /* For AF_INET aka /dev/udp */
273 struct streamtab udpinfov4 = {
274 	&udp_rinitv4, &udp_winit, &udp_lrinit, &udp_lwinit
275 };
276 
277 /* For AF_INET6 aka /dev/udp6 */
278 struct streamtab udpinfov6 = {
279 	&udp_rinitv6, &udp_winit, &udp_lrinit, &udp_lwinit
280 };
281 
282 static	sin_t	sin_null;	/* Zero address for quick clears */
283 static	sin6_t	sin6_null;	/* Zero address for quick clears */
284 
285 #define	UDP_MAXPACKET_IPV4 (IP_MAXPACKET - UDPH_SIZE - IP_SIMPLE_HDR_LENGTH)
286 
287 /* Default structure copied into T_INFO_ACK messages */
288 static struct T_info_ack udp_g_t_info_ack_ipv4 = {
289 	T_INFO_ACK,
290 	UDP_MAXPACKET_IPV4,	/* TSDU_size. Excl. headers */
291 	T_INVALID,	/* ETSU_size.  udp does not support expedited data. */
292 	T_INVALID,	/* CDATA_size. udp does not support connect data. */
293 	T_INVALID,	/* DDATA_size. udp does not support disconnect data. */
294 	sizeof (sin_t),	/* ADDR_size. */
295 	0,		/* OPT_size - not initialized here */
296 	UDP_MAXPACKET_IPV4,	/* TIDU_size.  Excl. headers */
297 	T_CLTS,		/* SERV_type.  udp supports connection-less. */
298 	TS_UNBND,	/* CURRENT_state.  This is set from udp_state. */
299 	(XPG4_1|SENDZERO) /* PROVIDER_flag */
300 };
301 
302 #define	UDP_MAXPACKET_IPV6 (IP_MAXPACKET - UDPH_SIZE - IPV6_HDR_LEN)
303 
304 static	struct T_info_ack udp_g_t_info_ack_ipv6 = {
305 	T_INFO_ACK,
306 	UDP_MAXPACKET_IPV6,	/* TSDU_size.  Excl. headers */
307 	T_INVALID,	/* ETSU_size.  udp does not support expedited data. */
308 	T_INVALID,	/* CDATA_size. udp does not support connect data. */
309 	T_INVALID,	/* DDATA_size. udp does not support disconnect data. */
310 	sizeof (sin6_t), /* ADDR_size. */
311 	0,		/* OPT_size - not initialized here */
312 	UDP_MAXPACKET_IPV6,	/* TIDU_size. Excl. headers */
313 	T_CLTS,		/* SERV_type.  udp supports connection-less. */
314 	TS_UNBND,	/* CURRENT_state.  This is set from udp_state. */
315 	(XPG4_1|SENDZERO) /* PROVIDER_flag */
316 };
317 
318 /* largest UDP port number */
319 #define	UDP_MAX_PORT	65535
320 
321 /*
322  * Table of ND variables supported by udp.  These are loaded into us_nd
323  * in udp_open.
324  * All of these are alterable, within the min/max values given, at run time.
325  */
326 /* BEGIN CSTYLED */
327 udpparam_t udp_param_arr[] = {
328  /*min		max		value		name */
329  { 0L,		256,		32,		"udp_wroff_extra" },
330  { 1L,		255,		255,		"udp_ipv4_ttl" },
331  { 0,		IPV6_MAX_HOPS,	IPV6_DEFAULT_HOPS, "udp_ipv6_hoplimit"},
332  { 1024,	(32 * 1024),	1024,		"udp_smallest_nonpriv_port" },
333  { 0,		1,		1,		"udp_do_checksum" },
334  { 1024,	UDP_MAX_PORT,	(32 * 1024),	"udp_smallest_anon_port" },
335  { 1024,	UDP_MAX_PORT,	UDP_MAX_PORT,	"udp_largest_anon_port" },
336  { UDP_XMIT_LOWATER, (1<<30), UDP_XMIT_HIWATER,	"udp_xmit_hiwat"},
337  { 0,		     (1<<30), UDP_XMIT_LOWATER, "udp_xmit_lowat"},
338  { UDP_RECV_LOWATER, (1<<30), UDP_RECV_HIWATER,	"udp_recv_hiwat"},
339  { 65536,	(1<<30),	2*1024*1024,	"udp_max_buf"},
340  { 100,		60000,		1000,		"udp_ndd_get_info_interval"},
341 };
342 /* END CSTYLED */
343 
344 /* Setable in /etc/system */
345 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */
346 uint32_t udp_random_anon_port = 1;
347 
348 /*
349  * Hook functions to enable cluster networking.
350  * On non-clustered systems these vectors must always be NULL
351  */
352 
353 void (*cl_inet_bind)(uchar_t protocol, sa_family_t addr_family,
354     uint8_t *laddrp, in_port_t lport) = NULL;
355 void (*cl_inet_unbind)(uint8_t protocol, sa_family_t addr_family,
356     uint8_t *laddrp, in_port_t lport) = NULL;
357 
358 typedef union T_primitives *t_primp_t;
359 
360 /*
361  * Return the next anonymous port in the privileged port range for
362  * bind checking.
363  *
364  * Trusted Extension (TX) notes: TX allows administrator to mark or
365  * reserve ports as Multilevel ports (MLP). MLP has special function
366  * on TX systems. Once a port is made MLP, it's not available as
367  * ordinary port. This creates "holes" in the port name space. It
368  * may be necessary to skip the "holes" find a suitable anon port.
369  */
370 static in_port_t
371 udp_get_next_priv_port(udp_t *udp)
372 {
373 	static in_port_t next_priv_port = IPPORT_RESERVED - 1;
374 	in_port_t nextport;
375 	boolean_t restart = B_FALSE;
376 	udp_stack_t *us = udp->udp_us;
377 
378 retry:
379 	if (next_priv_port < us->us_min_anonpriv_port ||
380 	    next_priv_port >= IPPORT_RESERVED) {
381 		next_priv_port = IPPORT_RESERVED - 1;
382 		if (restart)
383 			return (0);
384 		restart = B_TRUE;
385 	}
386 
387 	if (is_system_labeled() &&
388 	    (nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred),
389 	    next_priv_port, IPPROTO_UDP, B_FALSE)) != 0) {
390 		next_priv_port = nextport;
391 		goto retry;
392 	}
393 
394 	return (next_priv_port--);
395 }
396 
397 /* UDP bind hash report triggered via the Named Dispatch mechanism. */
398 /* ARGSUSED */
399 static int
400 udp_bind_hash_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
401 {
402 	udp_fanout_t	*udpf;
403 	int		i;
404 	zoneid_t	zoneid;
405 	conn_t		*connp;
406 	udp_t		*udp;
407 	udp_stack_t	*us;
408 
409 	connp = Q_TO_CONN(q);
410 	udp = connp->conn_udp;
411 	us = udp->udp_us;
412 
413 	/* Refer to comments in udp_status_report(). */
414 	if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) {
415 		if (ddi_get_lbolt() - us->us_last_ndd_get_info_time <
416 		    drv_usectohz(us->us_ndd_get_info_interval * 1000)) {
417 			(void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG);
418 			return (0);
419 		}
420 	}
421 	if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) {
422 		/* The following may work even if we cannot get a large buf. */
423 		(void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG);
424 		return (0);
425 	}
426 
427 	(void) mi_mpprintf(mp,
428 	    "UDP     " MI_COL_HDRPAD_STR
429 	/*   12345678[89ABCDEF] */
430 	    " zone lport src addr        dest addr       port  state");
431 	/*    1234 12345 xxx.xxx.xxx.xxx xxx.xxx.xxx.xxx 12345 UNBOUND */
432 
433 	zoneid = connp->conn_zoneid;
434 
435 	for (i = 0; i < us->us_bind_fanout_size; i++) {
436 		udpf = &us->us_bind_fanout[i];
437 		mutex_enter(&udpf->uf_lock);
438 
439 		/* Print the hash index. */
440 		udp = udpf->uf_udp;
441 		if (zoneid != GLOBAL_ZONEID) {
442 			/* skip to first entry in this zone; might be none */
443 			while (udp != NULL &&
444 			    udp->udp_connp->conn_zoneid != zoneid)
445 				udp = udp->udp_bind_hash;
446 		}
447 		if (udp != NULL) {
448 			uint_t print_len, buf_len;
449 
450 			buf_len = mp->b_cont->b_datap->db_lim -
451 			    mp->b_cont->b_wptr;
452 			print_len = snprintf((char *)mp->b_cont->b_wptr,
453 			    buf_len, "%d\n", i);
454 			if (print_len < buf_len) {
455 				mp->b_cont->b_wptr += print_len;
456 			} else {
457 				mp->b_cont->b_wptr += buf_len;
458 			}
459 			for (; udp != NULL; udp = udp->udp_bind_hash) {
460 				if (zoneid == GLOBAL_ZONEID ||
461 				    zoneid == udp->udp_connp->conn_zoneid)
462 					udp_report_item(mp->b_cont, udp);
463 			}
464 		}
465 		mutex_exit(&udpf->uf_lock);
466 	}
467 	us->us_last_ndd_get_info_time = ddi_get_lbolt();
468 	return (0);
469 }
470 
471 /*
472  * Hash list removal routine for udp_t structures.
473  */
474 static void
475 udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock)
476 {
477 	udp_t	*udpnext;
478 	kmutex_t *lockp;
479 	udp_stack_t *us = udp->udp_us;
480 
481 	if (udp->udp_ptpbhn == NULL)
482 		return;
483 
484 	/*
485 	 * Extract the lock pointer in case there are concurrent
486 	 * hash_remove's for this instance.
487 	 */
488 	ASSERT(udp->udp_port != 0);
489 	if (!caller_holds_lock) {
490 		lockp = &us->us_bind_fanout[UDP_BIND_HASH(udp->udp_port,
491 		    us->us_bind_fanout_size)].uf_lock;
492 		ASSERT(lockp != NULL);
493 		mutex_enter(lockp);
494 	}
495 	if (udp->udp_ptpbhn != NULL) {
496 		udpnext = udp->udp_bind_hash;
497 		if (udpnext != NULL) {
498 			udpnext->udp_ptpbhn = udp->udp_ptpbhn;
499 			udp->udp_bind_hash = NULL;
500 		}
501 		*udp->udp_ptpbhn = udpnext;
502 		udp->udp_ptpbhn = NULL;
503 	}
504 	if (!caller_holds_lock) {
505 		mutex_exit(lockp);
506 	}
507 }
508 
509 static void
510 udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp)
511 {
512 	udp_t	**udpp;
513 	udp_t	*udpnext;
514 
515 	ASSERT(MUTEX_HELD(&uf->uf_lock));
516 	ASSERT(udp->udp_ptpbhn == NULL);
517 	udpp = &uf->uf_udp;
518 	udpnext = udpp[0];
519 	if (udpnext != NULL) {
520 		/*
521 		 * If the new udp bound to the INADDR_ANY address
522 		 * and the first one in the list is not bound to
523 		 * INADDR_ANY we skip all entries until we find the
524 		 * first one bound to INADDR_ANY.
525 		 * This makes sure that applications binding to a
526 		 * specific address get preference over those binding to
527 		 * INADDR_ANY.
528 		 */
529 		if (V6_OR_V4_INADDR_ANY(udp->udp_bound_v6src) &&
530 		    !V6_OR_V4_INADDR_ANY(udpnext->udp_bound_v6src)) {
531 			while ((udpnext = udpp[0]) != NULL &&
532 			    !V6_OR_V4_INADDR_ANY(
533 			    udpnext->udp_bound_v6src)) {
534 				udpp = &(udpnext->udp_bind_hash);
535 			}
536 			if (udpnext != NULL)
537 				udpnext->udp_ptpbhn = &udp->udp_bind_hash;
538 		} else {
539 			udpnext->udp_ptpbhn = &udp->udp_bind_hash;
540 		}
541 	}
542 	udp->udp_bind_hash = udpnext;
543 	udp->udp_ptpbhn = udpp;
544 	udpp[0] = udp;
545 }
546 
547 /*
548  * This routine is called to handle each O_T_BIND_REQ/T_BIND_REQ message
549  * passed to udp_wput.
550  * It associates a port number and local address with the stream.
551  * The O_T_BIND_REQ/T_BIND_REQ is passed downstream to ip with the UDP
552  * protocol type (IPPROTO_UDP) placed in the message following the address.
553  * A T_BIND_ACK message is passed upstream when ip acknowledges the request.
554  * (Called as writer.)
555  *
556  * Note that UDP over IPv4 and IPv6 sockets can use the same port number
557  * without setting SO_REUSEADDR. This is needed so that they
558  * can be viewed as two independent transport protocols.
559  * However, anonymouns ports are allocated from the same range to avoid
560  * duplicating the us->us_next_port_to_try.
561  */
562 static void
563 udp_bind(queue_t *q, mblk_t *mp)
564 {
565 	sin_t		*sin;
566 	sin6_t		*sin6;
567 	mblk_t		*mp1;
568 	in_port_t	port;		/* Host byte order */
569 	in_port_t	requested_port;	/* Host byte order */
570 	struct T_bind_req *tbr;
571 	int		count;
572 	in6_addr_t	v6src;
573 	boolean_t	bind_to_req_port_only;
574 	int		loopmax;
575 	udp_fanout_t	*udpf;
576 	in_port_t	lport;		/* Network byte order */
577 	zoneid_t	zoneid;
578 	conn_t		*connp;
579 	udp_t		*udp;
580 	boolean_t	is_inaddr_any;
581 	mlp_type_t	addrtype, mlptype;
582 	udp_stack_t	*us;
583 
584 	connp = Q_TO_CONN(q);
585 	udp = connp->conn_udp;
586 	us = udp->udp_us;
587 	if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) {
588 		(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
589 		    "udp_bind: bad req, len %u",
590 		    (uint_t)(mp->b_wptr - mp->b_rptr));
591 		udp_err_ack(q, mp, TPROTO, 0);
592 		return;
593 	}
594 	if (udp->udp_state != TS_UNBND) {
595 		(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
596 		    "udp_bind: bad state, %u", udp->udp_state);
597 		udp_err_ack(q, mp, TOUTSTATE, 0);
598 		return;
599 	}
600 	/*
601 	 * Reallocate the message to make sure we have enough room for an
602 	 * address and the protocol type.
603 	 */
604 	mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t) + 1, 1);
605 	if (!mp1) {
606 		udp_err_ack(q, mp, TSYSERR, ENOMEM);
607 		return;
608 	}
609 
610 	mp = mp1;
611 	tbr = (struct T_bind_req *)mp->b_rptr;
612 	switch (tbr->ADDR_length) {
613 	case 0:			/* Request for a generic port */
614 		tbr->ADDR_offset = sizeof (struct T_bind_req);
615 		if (udp->udp_family == AF_INET) {
616 			tbr->ADDR_length = sizeof (sin_t);
617 			sin = (sin_t *)&tbr[1];
618 			*sin = sin_null;
619 			sin->sin_family = AF_INET;
620 			mp->b_wptr = (uchar_t *)&sin[1];
621 		} else {
622 			ASSERT(udp->udp_family == AF_INET6);
623 			tbr->ADDR_length = sizeof (sin6_t);
624 			sin6 = (sin6_t *)&tbr[1];
625 			*sin6 = sin6_null;
626 			sin6->sin6_family = AF_INET6;
627 			mp->b_wptr = (uchar_t *)&sin6[1];
628 		}
629 		port = 0;
630 		break;
631 
632 	case sizeof (sin_t):	/* Complete IPv4 address */
633 		sin = (sin_t *)mi_offset_param(mp, tbr->ADDR_offset,
634 		    sizeof (sin_t));
635 		if (sin == NULL || !OK_32PTR((char *)sin)) {
636 			udp_err_ack(q, mp, TSYSERR, EINVAL);
637 			return;
638 		}
639 		if (udp->udp_family != AF_INET ||
640 		    sin->sin_family != AF_INET) {
641 			udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
642 			return;
643 		}
644 		port = ntohs(sin->sin_port);
645 		break;
646 
647 	case sizeof (sin6_t):	/* complete IPv6 address */
648 		sin6 = (sin6_t *)mi_offset_param(mp, tbr->ADDR_offset,
649 		    sizeof (sin6_t));
650 		if (sin6 == NULL || !OK_32PTR((char *)sin6)) {
651 			udp_err_ack(q, mp, TSYSERR, EINVAL);
652 			return;
653 		}
654 		if (udp->udp_family != AF_INET6 ||
655 		    sin6->sin6_family != AF_INET6) {
656 			udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
657 			return;
658 		}
659 		port = ntohs(sin6->sin6_port);
660 		break;
661 
662 	default:		/* Invalid request */
663 		(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
664 		    "udp_bind: bad ADDR_length length %u", tbr->ADDR_length);
665 		udp_err_ack(q, mp, TBADADDR, 0);
666 		return;
667 	}
668 
669 	requested_port = port;
670 
671 	if (requested_port == 0 || tbr->PRIM_type == O_T_BIND_REQ)
672 		bind_to_req_port_only = B_FALSE;
673 	else			/* T_BIND_REQ and requested_port != 0 */
674 		bind_to_req_port_only = B_TRUE;
675 
676 	if (requested_port == 0) {
677 		/*
678 		 * If the application passed in zero for the port number, it
679 		 * doesn't care which port number we bind to. Get one in the
680 		 * valid range.
681 		 */
682 		if (udp->udp_anon_priv_bind) {
683 			port = udp_get_next_priv_port(udp);
684 		} else {
685 			port = udp_update_next_port(udp,
686 			    us->us_next_port_to_try, B_TRUE);
687 		}
688 	} else {
689 		/*
690 		 * If the port is in the well-known privileged range,
691 		 * make sure the caller was privileged.
692 		 */
693 		int i;
694 		boolean_t priv = B_FALSE;
695 
696 		if (port < us->us_smallest_nonpriv_port) {
697 			priv = B_TRUE;
698 		} else {
699 			for (i = 0; i < us->us_num_epriv_ports; i++) {
700 				if (port == us->us_epriv_ports[i]) {
701 					priv = B_TRUE;
702 					break;
703 				}
704 			}
705 		}
706 
707 		if (priv) {
708 			cred_t *cr = DB_CREDDEF(mp, connp->conn_cred);
709 
710 			if (secpolicy_net_privaddr(cr, port,
711 			    IPPROTO_UDP) != 0) {
712 				udp_err_ack(q, mp, TACCES, 0);
713 				return;
714 			}
715 		}
716 	}
717 
718 	if (port == 0) {
719 		udp_err_ack(q, mp, TNOADDR, 0);
720 		return;
721 	}
722 
723 	/*
724 	 * The state must be TS_UNBND. TPI mandates that users must send
725 	 * TPI primitives only 1 at a time and wait for the response before
726 	 * sending the next primitive.
727 	 */
728 	rw_enter(&udp->udp_rwlock, RW_WRITER);
729 	if (udp->udp_state != TS_UNBND || udp->udp_pending_op != -1) {
730 		rw_exit(&udp->udp_rwlock);
731 		(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
732 		    "udp_bind: bad state, %u", udp->udp_state);
733 		udp_err_ack(q, mp, TOUTSTATE, 0);
734 		return;
735 	}
736 	udp->udp_pending_op = tbr->PRIM_type;
737 	/*
738 	 * Copy the source address into our udp structure. This address
739 	 * may still be zero; if so, IP will fill in the correct address
740 	 * each time an outbound packet is passed to it. Since the udp is
741 	 * not yet in the bind hash list, we don't grab the uf_lock to
742 	 * change udp_ipversion
743 	 */
744 	if (udp->udp_family == AF_INET) {
745 		ASSERT(sin != NULL);
746 		ASSERT(udp->udp_ipversion == IPV4_VERSION);
747 		udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH + UDPH_SIZE +
748 		    udp->udp_ip_snd_options_len;
749 		IN6_IPADDR_TO_V4MAPPED(sin->sin_addr.s_addr, &v6src);
750 	} else {
751 		ASSERT(sin6 != NULL);
752 		v6src = sin6->sin6_addr;
753 		if (IN6_IS_ADDR_V4MAPPED(&v6src)) {
754 			/*
755 			 * no need to hold the uf_lock to set the udp_ipversion
756 			 * since we are not yet in the fanout list
757 			 */
758 			udp->udp_ipversion = IPV4_VERSION;
759 			udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH +
760 			    UDPH_SIZE + udp->udp_ip_snd_options_len;
761 		} else {
762 			udp->udp_ipversion = IPV6_VERSION;
763 			udp->udp_max_hdr_len = udp->udp_sticky_hdrs_len;
764 		}
765 	}
766 
767 	/*
768 	 * If udp_reuseaddr is not set, then we have to make sure that
769 	 * the IP address and port number the application requested
770 	 * (or we selected for the application) is not being used by
771 	 * another stream.  If another stream is already using the
772 	 * requested IP address and port, the behavior depends on
773 	 * "bind_to_req_port_only". If set the bind fails; otherwise we
774 	 * search for any an unused port to bind to the the stream.
775 	 *
776 	 * As per the BSD semantics, as modified by the Deering multicast
777 	 * changes, if udp_reuseaddr is set, then we allow multiple binds
778 	 * to the same port independent of the local IP address.
779 	 *
780 	 * This is slightly different than in SunOS 4.X which did not
781 	 * support IP multicast. Note that the change implemented by the
782 	 * Deering multicast code effects all binds - not only binding
783 	 * to IP multicast addresses.
784 	 *
785 	 * Note that when binding to port zero we ignore SO_REUSEADDR in
786 	 * order to guarantee a unique port.
787 	 */
788 
789 	count = 0;
790 	if (udp->udp_anon_priv_bind) {
791 		/*
792 		 * loopmax = (IPPORT_RESERVED-1) -
793 		 *    us->us_min_anonpriv_port + 1
794 		 */
795 		loopmax = IPPORT_RESERVED - us->us_min_anonpriv_port;
796 	} else {
797 		loopmax = us->us_largest_anon_port -
798 		    us->us_smallest_anon_port + 1;
799 	}
800 
801 	is_inaddr_any = V6_OR_V4_INADDR_ANY(v6src);
802 	zoneid = connp->conn_zoneid;
803 
804 	for (;;) {
805 		udp_t		*udp1;
806 		boolean_t	found_exclbind = B_FALSE;
807 
808 		/*
809 		 * Walk through the list of udp streams bound to
810 		 * requested port with the same IP address.
811 		 */
812 		lport = htons(port);
813 		udpf = &us->us_bind_fanout[UDP_BIND_HASH(lport,
814 		    us->us_bind_fanout_size)];
815 		mutex_enter(&udpf->uf_lock);
816 		for (udp1 = udpf->uf_udp; udp1 != NULL;
817 		    udp1 = udp1->udp_bind_hash) {
818 			if (lport != udp1->udp_port)
819 				continue;
820 
821 			/*
822 			 * On a labeled system, we must treat bindings to ports
823 			 * on shared IP addresses by sockets with MAC exemption
824 			 * privilege as being in all zones, as there's
825 			 * otherwise no way to identify the right receiver.
826 			 */
827 			if (!(IPCL_ZONE_MATCH(udp1->udp_connp, zoneid) ||
828 			    IPCL_ZONE_MATCH(connp,
829 			    udp1->udp_connp->conn_zoneid)) &&
830 			    !connp->conn_mac_exempt && \
831 			    !udp1->udp_connp->conn_mac_exempt)
832 				continue;
833 
834 			/*
835 			 * If UDP_EXCLBIND is set for either the bound or
836 			 * binding endpoint, the semantics of bind
837 			 * is changed according to the following chart.
838 			 *
839 			 * spec = specified address (v4 or v6)
840 			 * unspec = unspecified address (v4 or v6)
841 			 * A = specified addresses are different for endpoints
842 			 *
843 			 * bound	bind to		allowed?
844 			 * -------------------------------------
845 			 * unspec	unspec		no
846 			 * unspec	spec		no
847 			 * spec		unspec		no
848 			 * spec		spec		yes if A
849 			 *
850 			 * For labeled systems, SO_MAC_EXEMPT behaves the same
851 			 * as UDP_EXCLBIND, except that zoneid is ignored.
852 			 */
853 			if (udp1->udp_exclbind || udp->udp_exclbind ||
854 			    udp1->udp_connp->conn_mac_exempt ||
855 			    connp->conn_mac_exempt) {
856 				if (V6_OR_V4_INADDR_ANY(
857 				    udp1->udp_bound_v6src) ||
858 				    is_inaddr_any ||
859 				    IN6_ARE_ADDR_EQUAL(&udp1->udp_bound_v6src,
860 				    &v6src)) {
861 					found_exclbind = B_TRUE;
862 					break;
863 				}
864 				continue;
865 			}
866 
867 			/*
868 			 * Check ipversion to allow IPv4 and IPv6 sockets to
869 			 * have disjoint port number spaces.
870 			 */
871 			if (udp->udp_ipversion != udp1->udp_ipversion) {
872 
873 				/*
874 				 * On the first time through the loop, if the
875 				 * the user intentionally specified a
876 				 * particular port number, then ignore any
877 				 * bindings of the other protocol that may
878 				 * conflict. This allows the user to bind IPv6
879 				 * alone and get both v4 and v6, or bind both
880 				 * both and get each seperately. On subsequent
881 				 * times through the loop, we're checking a
882 				 * port that we chose (not the user) and thus
883 				 * we do not allow casual duplicate bindings.
884 				 */
885 				if (count == 0 && requested_port != 0)
886 					continue;
887 			}
888 
889 			/*
890 			 * No difference depending on SO_REUSEADDR.
891 			 *
892 			 * If existing port is bound to a
893 			 * non-wildcard IP address and
894 			 * the requesting stream is bound to
895 			 * a distinct different IP addresses
896 			 * (non-wildcard, also), keep going.
897 			 */
898 			if (!is_inaddr_any &&
899 			    !V6_OR_V4_INADDR_ANY(udp1->udp_bound_v6src) &&
900 			    !IN6_ARE_ADDR_EQUAL(&udp1->udp_bound_v6src,
901 			    &v6src)) {
902 				continue;
903 			}
904 			break;
905 		}
906 
907 		if (!found_exclbind &&
908 		    (udp->udp_reuseaddr && requested_port != 0)) {
909 			break;
910 		}
911 
912 		if (udp1 == NULL) {
913 			/*
914 			 * No other stream has this IP address
915 			 * and port number. We can use it.
916 			 */
917 			break;
918 		}
919 		mutex_exit(&udpf->uf_lock);
920 		if (bind_to_req_port_only) {
921 			/*
922 			 * We get here only when requested port
923 			 * is bound (and only first  of the for()
924 			 * loop iteration).
925 			 *
926 			 * The semantics of this bind request
927 			 * require it to fail so we return from
928 			 * the routine (and exit the loop).
929 			 *
930 			 */
931 			udp->udp_pending_op = -1;
932 			rw_exit(&udp->udp_rwlock);
933 			udp_err_ack(q, mp, TADDRBUSY, 0);
934 			return;
935 		}
936 
937 		if (udp->udp_anon_priv_bind) {
938 			port = udp_get_next_priv_port(udp);
939 		} else {
940 			if ((count == 0) && (requested_port != 0)) {
941 				/*
942 				 * If the application wants us to find
943 				 * a port, get one to start with. Set
944 				 * requested_port to 0, so that we will
945 				 * update us->us_next_port_to_try below.
946 				 */
947 				port = udp_update_next_port(udp,
948 				    us->us_next_port_to_try, B_TRUE);
949 				requested_port = 0;
950 			} else {
951 				port = udp_update_next_port(udp, port + 1,
952 				    B_FALSE);
953 			}
954 		}
955 
956 		if (port == 0 || ++count >= loopmax) {
957 			/*
958 			 * We've tried every possible port number and
959 			 * there are none available, so send an error
960 			 * to the user.
961 			 */
962 			udp->udp_pending_op = -1;
963 			rw_exit(&udp->udp_rwlock);
964 			udp_err_ack(q, mp, TNOADDR, 0);
965 			return;
966 		}
967 	}
968 
969 	/*
970 	 * Copy the source address into our udp structure.  This address
971 	 * may still be zero; if so, ip will fill in the correct address
972 	 * each time an outbound packet is passed to it.
973 	 * If we are binding to a broadcast or multicast address then
974 	 * udp_bind_ack will clear the source address when it receives
975 	 * the T_BIND_ACK.
976 	 */
977 	udp->udp_v6src = udp->udp_bound_v6src = v6src;
978 	udp->udp_port = lport;
979 	/*
980 	 * Now reset the the next anonymous port if the application requested
981 	 * an anonymous port, or we handed out the next anonymous port.
982 	 */
983 	if ((requested_port == 0) && (!udp->udp_anon_priv_bind)) {
984 		us->us_next_port_to_try = port + 1;
985 	}
986 
987 	/* Initialize the O_T_BIND_REQ/T_BIND_REQ for ip. */
988 	if (udp->udp_family == AF_INET) {
989 		sin->sin_port = udp->udp_port;
990 	} else {
991 		int error;
992 
993 		sin6->sin6_port = udp->udp_port;
994 		/* Rebuild the header template */
995 		error = udp_build_hdrs(udp);
996 		if (error != 0) {
997 			udp->udp_pending_op = -1;
998 			rw_exit(&udp->udp_rwlock);
999 			mutex_exit(&udpf->uf_lock);
1000 			udp_err_ack(q, mp, TSYSERR, error);
1001 			return;
1002 		}
1003 	}
1004 	udp->udp_state = TS_IDLE;
1005 	udp_bind_hash_insert(udpf, udp);
1006 	mutex_exit(&udpf->uf_lock);
1007 	rw_exit(&udp->udp_rwlock);
1008 
1009 	if (cl_inet_bind) {
1010 		/*
1011 		 * Running in cluster mode - register bind information
1012 		 */
1013 		if (udp->udp_ipversion == IPV4_VERSION) {
1014 			(*cl_inet_bind)(IPPROTO_UDP, AF_INET,
1015 			    (uint8_t *)(&V4_PART_OF_V6(udp->udp_v6src)),
1016 			    (in_port_t)udp->udp_port);
1017 		} else {
1018 			(*cl_inet_bind)(IPPROTO_UDP, AF_INET6,
1019 			    (uint8_t *)&(udp->udp_v6src),
1020 			    (in_port_t)udp->udp_port);
1021 		}
1022 
1023 	}
1024 
1025 	connp->conn_anon_port = (is_system_labeled() && requested_port == 0);
1026 	if (is_system_labeled() && (!connp->conn_anon_port ||
1027 	    connp->conn_anon_mlp)) {
1028 		uint16_t mlpport;
1029 		cred_t *cr = connp->conn_cred;
1030 		zone_t *zone;
1031 
1032 		zone = crgetzone(cr);
1033 		connp->conn_mlp_type = udp->udp_recvucred ? mlptBoth :
1034 		    mlptSingle;
1035 		addrtype = tsol_mlp_addr_type(zone->zone_id, IPV6_VERSION,
1036 		    &v6src, us->us_netstack->netstack_ip);
1037 		if (addrtype == mlptSingle) {
1038 			rw_enter(&udp->udp_rwlock, RW_WRITER);
1039 			udp->udp_pending_op = -1;
1040 			rw_exit(&udp->udp_rwlock);
1041 			udp_err_ack(q, mp, TNOADDR, 0);
1042 			connp->conn_anon_port = B_FALSE;
1043 			connp->conn_mlp_type = mlptSingle;
1044 			return;
1045 		}
1046 		mlpport = connp->conn_anon_port ? PMAPPORT : port;
1047 		mlptype = tsol_mlp_port_type(zone, IPPROTO_UDP, mlpport,
1048 		    addrtype);
1049 		if (mlptype != mlptSingle &&
1050 		    (connp->conn_mlp_type == mlptSingle ||
1051 		    secpolicy_net_bindmlp(cr) != 0)) {
1052 			if (udp->udp_debug) {
1053 				(void) strlog(UDP_MOD_ID, 0, 1,
1054 				    SL_ERROR|SL_TRACE,
1055 				    "udp_bind: no priv for multilevel port %d",
1056 				    mlpport);
1057 			}
1058 			rw_enter(&udp->udp_rwlock, RW_WRITER);
1059 			udp->udp_pending_op = -1;
1060 			rw_exit(&udp->udp_rwlock);
1061 			udp_err_ack(q, mp, TACCES, 0);
1062 			connp->conn_anon_port = B_FALSE;
1063 			connp->conn_mlp_type = mlptSingle;
1064 			return;
1065 		}
1066 
1067 		/*
1068 		 * If we're specifically binding a shared IP address and the
1069 		 * port is MLP on shared addresses, then check to see if this
1070 		 * zone actually owns the MLP.  Reject if not.
1071 		 */
1072 		if (mlptype == mlptShared && addrtype == mlptShared) {
1073 			/*
1074 			 * No need to handle exclusive-stack zones since
1075 			 * ALL_ZONES only applies to the shared stack.
1076 			 */
1077 			zoneid_t mlpzone;
1078 
1079 			mlpzone = tsol_mlp_findzone(IPPROTO_UDP,
1080 			    htons(mlpport));
1081 			if (connp->conn_zoneid != mlpzone) {
1082 				if (udp->udp_debug) {
1083 					(void) strlog(UDP_MOD_ID, 0, 1,
1084 					    SL_ERROR|SL_TRACE,
1085 					    "udp_bind: attempt to bind port "
1086 					    "%d on shared addr in zone %d "
1087 					    "(should be %d)",
1088 					    mlpport, connp->conn_zoneid,
1089 					    mlpzone);
1090 				}
1091 				rw_enter(&udp->udp_rwlock, RW_WRITER);
1092 				udp->udp_pending_op = -1;
1093 				rw_exit(&udp->udp_rwlock);
1094 				udp_err_ack(q, mp, TACCES, 0);
1095 				connp->conn_anon_port = B_FALSE;
1096 				connp->conn_mlp_type = mlptSingle;
1097 				return;
1098 			}
1099 		}
1100 		if (connp->conn_anon_port) {
1101 			int error;
1102 
1103 			error = tsol_mlp_anon(zone, mlptype, connp->conn_ulp,
1104 			    port, B_TRUE);
1105 			if (error != 0) {
1106 				if (udp->udp_debug) {
1107 					(void) strlog(UDP_MOD_ID, 0, 1,
1108 					    SL_ERROR|SL_TRACE,
1109 					    "udp_bind: cannot establish anon "
1110 					    "MLP for port %d", port);
1111 				}
1112 				rw_enter(&udp->udp_rwlock, RW_WRITER);
1113 				udp->udp_pending_op = -1;
1114 				rw_exit(&udp->udp_rwlock);
1115 				udp_err_ack(q, mp, TACCES, 0);
1116 				connp->conn_anon_port = B_FALSE;
1117 				connp->conn_mlp_type = mlptSingle;
1118 				return;
1119 			}
1120 		}
1121 		connp->conn_mlp_type = mlptype;
1122 	}
1123 
1124 	/* Pass the protocol number in the message following the address. */
1125 	*mp->b_wptr++ = IPPROTO_UDP;
1126 	if (!V6_OR_V4_INADDR_ANY(udp->udp_v6src)) {
1127 		/*
1128 		 * Append a request for an IRE if udp_v6src not
1129 		 * zero (IPv4 - INADDR_ANY, or IPv6 - all-zeroes address).
1130 		 */
1131 		mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
1132 		if (!mp->b_cont) {
1133 			rw_enter(&udp->udp_rwlock, RW_WRITER);
1134 			udp->udp_pending_op = -1;
1135 			rw_exit(&udp->udp_rwlock);
1136 			udp_err_ack(q, mp, TSYSERR, ENOMEM);
1137 			return;
1138 		}
1139 		mp->b_cont->b_wptr += sizeof (ire_t);
1140 		mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE;
1141 	}
1142 	if (udp->udp_family == AF_INET6)
1143 		mp = ip_bind_v6(q, mp, connp, NULL);
1144 	else
1145 		mp = ip_bind_v4(q, mp, connp);
1146 
1147 	/* The above return NULL if the bind needs to be deferred */
1148 	if (mp != NULL)
1149 		udp_bind_result(connp, mp);
1150 	else
1151 		CONN_INC_REF(connp);
1152 }
1153 
1154 /*
1155  * This is called from ip_wput_nondata to handle the results of a
1156  * deferred UDP bind. It is called once the bind has been completed.
1157  */
1158 void
1159 udp_resume_bind(conn_t *connp, mblk_t *mp)
1160 {
1161 	ASSERT(connp != NULL && IPCL_IS_UDP(connp));
1162 
1163 	udp_bind_result(connp, mp);
1164 
1165 	CONN_OPER_PENDING_DONE(connp);
1166 }
1167 
1168 /*
1169  * This routine handles each T_CONN_REQ message passed to udp.  It
1170  * associates a default destination address with the stream.
1171  *
1172  * This routine sends down a T_BIND_REQ to IP with the following mblks:
1173  *	T_BIND_REQ	- specifying local and remote address/port
1174  *	IRE_DB_REQ_TYPE	- to get an IRE back containing ire_type and src
1175  *	T_OK_ACK	- for the T_CONN_REQ
1176  *	T_CONN_CON	- to keep the TPI user happy
1177  *
1178  * The connect completes in udp_bind_result.
1179  * When a T_BIND_ACK is received information is extracted from the IRE
1180  * and the two appended messages are sent to the TPI user.
1181  * Should udp_bind_result receive T_ERROR_ACK for the T_BIND_REQ it will
1182  * convert it to an error ack for the appropriate primitive.
1183  */
1184 static void
1185 udp_connect(queue_t *q, mblk_t *mp)
1186 {
1187 	sin6_t	*sin6;
1188 	sin_t	*sin;
1189 	struct T_conn_req	*tcr;
1190 	in6_addr_t v6dst;
1191 	ipaddr_t v4dst;
1192 	uint16_t dstport;
1193 	uint32_t flowinfo;
1194 	mblk_t	*mp1, *mp2;
1195 	udp_fanout_t	*udpf;
1196 	udp_t	*udp, *udp1;
1197 	ushort_t	ipversion;
1198 	udp_stack_t	*us;
1199 	conn_t		*connp = Q_TO_CONN(q);
1200 
1201 	udp = connp->conn_udp;
1202 	tcr = (struct T_conn_req *)mp->b_rptr;
1203 	us = udp->udp_us;
1204 
1205 	/* A bit of sanity checking */
1206 	if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_req)) {
1207 		udp_err_ack(q, mp, TPROTO, 0);
1208 		return;
1209 	}
1210 
1211 	if (tcr->OPT_length != 0) {
1212 		udp_err_ack(q, mp, TBADOPT, 0);
1213 		return;
1214 	}
1215 
1216 	/*
1217 	 * Determine packet type based on type of address passed in
1218 	 * the request should contain an IPv4 or IPv6 address.
1219 	 * Make sure that address family matches the type of
1220 	 * family of the the address passed down
1221 	 */
1222 	switch (tcr->DEST_length) {
1223 	default:
1224 		udp_err_ack(q, mp, TBADADDR, 0);
1225 		return;
1226 
1227 	case sizeof (sin_t):
1228 		sin = (sin_t *)mi_offset_param(mp, tcr->DEST_offset,
1229 		    sizeof (sin_t));
1230 		if (sin == NULL || !OK_32PTR((char *)sin)) {
1231 			udp_err_ack(q, mp, TSYSERR, EINVAL);
1232 			return;
1233 		}
1234 		if (udp->udp_family != AF_INET ||
1235 		    sin->sin_family != AF_INET) {
1236 			udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
1237 			return;
1238 		}
1239 		v4dst = sin->sin_addr.s_addr;
1240 		dstport = sin->sin_port;
1241 		IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst);
1242 		ASSERT(udp->udp_ipversion == IPV4_VERSION);
1243 		ipversion = IPV4_VERSION;
1244 		break;
1245 
1246 	case sizeof (sin6_t):
1247 		sin6 = (sin6_t *)mi_offset_param(mp, tcr->DEST_offset,
1248 		    sizeof (sin6_t));
1249 		if (sin6 == NULL || !OK_32PTR((char *)sin6)) {
1250 			udp_err_ack(q, mp, TSYSERR, EINVAL);
1251 			return;
1252 		}
1253 		if (udp->udp_family != AF_INET6 ||
1254 		    sin6->sin6_family != AF_INET6) {
1255 			udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT);
1256 			return;
1257 		}
1258 		v6dst = sin6->sin6_addr;
1259 		dstport = sin6->sin6_port;
1260 		if (IN6_IS_ADDR_V4MAPPED(&v6dst)) {
1261 			IN6_V4MAPPED_TO_IPADDR(&v6dst, v4dst);
1262 			ipversion = IPV4_VERSION;
1263 			flowinfo = 0;
1264 		} else {
1265 			ipversion = IPV6_VERSION;
1266 			flowinfo = sin6->sin6_flowinfo;
1267 		}
1268 		break;
1269 	}
1270 	if (dstport == 0) {
1271 		udp_err_ack(q, mp, TBADADDR, 0);
1272 		return;
1273 	}
1274 
1275 	rw_enter(&udp->udp_rwlock, RW_WRITER);
1276 
1277 	/*
1278 	 * This UDP must have bound to a port already before doing a connect.
1279 	 * TPI mandates that users must send TPI primitives only 1 at a time
1280 	 * and wait for the response before sending the next primitive.
1281 	 */
1282 	if (udp->udp_state == TS_UNBND || udp->udp_pending_op != -1) {
1283 		rw_exit(&udp->udp_rwlock);
1284 		(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
1285 		    "udp_connect: bad state, %u", udp->udp_state);
1286 		udp_err_ack(q, mp, TOUTSTATE, 0);
1287 		return;
1288 	}
1289 	udp->udp_pending_op = T_CONN_REQ;
1290 	ASSERT(udp->udp_port != 0 && udp->udp_ptpbhn != NULL);
1291 
1292 	if (ipversion == IPV4_VERSION) {
1293 		udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH + UDPH_SIZE +
1294 		    udp->udp_ip_snd_options_len;
1295 	} else {
1296 		udp->udp_max_hdr_len = udp->udp_sticky_hdrs_len;
1297 	}
1298 
1299 	udpf = &us->us_bind_fanout[UDP_BIND_HASH(udp->udp_port,
1300 	    us->us_bind_fanout_size)];
1301 
1302 	mutex_enter(&udpf->uf_lock);
1303 	if (udp->udp_state == TS_DATA_XFER) {
1304 		/* Already connected - clear out state */
1305 		udp->udp_v6src = udp->udp_bound_v6src;
1306 		udp->udp_state = TS_IDLE;
1307 	}
1308 
1309 	/*
1310 	 * Create a default IP header with no IP options.
1311 	 */
1312 	udp->udp_dstport = dstport;
1313 	udp->udp_ipversion = ipversion;
1314 	if (ipversion == IPV4_VERSION) {
1315 		/*
1316 		 * Interpret a zero destination to mean loopback.
1317 		 * Update the T_CONN_REQ (sin/sin6) since it is used to
1318 		 * generate the T_CONN_CON.
1319 		 */
1320 		if (v4dst == INADDR_ANY) {
1321 			v4dst = htonl(INADDR_LOOPBACK);
1322 			IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst);
1323 			if (udp->udp_family == AF_INET) {
1324 				sin->sin_addr.s_addr = v4dst;
1325 			} else {
1326 				sin6->sin6_addr = v6dst;
1327 			}
1328 		}
1329 		udp->udp_v6dst = v6dst;
1330 		udp->udp_flowinfo = 0;
1331 
1332 		/*
1333 		 * If the destination address is multicast and
1334 		 * an outgoing multicast interface has been set,
1335 		 * use the address of that interface as our
1336 		 * source address if no source address has been set.
1337 		 */
1338 		if (V4_PART_OF_V6(udp->udp_v6src) == INADDR_ANY &&
1339 		    CLASSD(v4dst) &&
1340 		    udp->udp_multicast_if_addr != INADDR_ANY) {
1341 			IN6_IPADDR_TO_V4MAPPED(udp->udp_multicast_if_addr,
1342 			    &udp->udp_v6src);
1343 		}
1344 	} else {
1345 		ASSERT(udp->udp_ipversion == IPV6_VERSION);
1346 		/*
1347 		 * Interpret a zero destination to mean loopback.
1348 		 * Update the T_CONN_REQ (sin/sin6) since it is used to
1349 		 * generate the T_CONN_CON.
1350 		 */
1351 		if (IN6_IS_ADDR_UNSPECIFIED(&v6dst)) {
1352 			v6dst = ipv6_loopback;
1353 			sin6->sin6_addr = v6dst;
1354 		}
1355 		udp->udp_v6dst = v6dst;
1356 		udp->udp_flowinfo = flowinfo;
1357 		/*
1358 		 * If the destination address is multicast and
1359 		 * an outgoing multicast interface has been set,
1360 		 * then the ip bind logic will pick the correct source
1361 		 * address (i.e. matching the outgoing multicast interface).
1362 		 */
1363 	}
1364 
1365 	/*
1366 	 * Verify that the src/port/dst/port is unique for all
1367 	 * connections in TS_DATA_XFER
1368 	 */
1369 	for (udp1 = udpf->uf_udp; udp1 != NULL; udp1 = udp1->udp_bind_hash) {
1370 		if (udp1->udp_state != TS_DATA_XFER)
1371 			continue;
1372 		if (udp->udp_port != udp1->udp_port ||
1373 		    udp->udp_ipversion != udp1->udp_ipversion ||
1374 		    dstport != udp1->udp_dstport ||
1375 		    !IN6_ARE_ADDR_EQUAL(&udp->udp_v6src, &udp1->udp_v6src) ||
1376 		    !IN6_ARE_ADDR_EQUAL(&v6dst, &udp1->udp_v6dst) ||
1377 		    !(IPCL_ZONE_MATCH(udp->udp_connp,
1378 		    udp1->udp_connp->conn_zoneid) ||
1379 		    IPCL_ZONE_MATCH(udp1->udp_connp,
1380 		    udp->udp_connp->conn_zoneid)))
1381 			continue;
1382 		mutex_exit(&udpf->uf_lock);
1383 		udp->udp_pending_op = -1;
1384 		rw_exit(&udp->udp_rwlock);
1385 		udp_err_ack(q, mp, TBADADDR, 0);
1386 		return;
1387 	}
1388 	udp->udp_state = TS_DATA_XFER;
1389 	mutex_exit(&udpf->uf_lock);
1390 
1391 	/*
1392 	 * Send down bind to IP to verify that there is a route
1393 	 * and to determine the source address.
1394 	 * This will come back as T_BIND_ACK with an IRE_DB_TYPE in rput.
1395 	 */
1396 	if (udp->udp_family == AF_INET)
1397 		mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (ipa_conn_t));
1398 	else
1399 		mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (ipa6_conn_t));
1400 	if (mp1 == NULL) {
1401 bind_failed:
1402 		mutex_enter(&udpf->uf_lock);
1403 		udp->udp_state = TS_IDLE;
1404 		udp->udp_pending_op = -1;
1405 		mutex_exit(&udpf->uf_lock);
1406 		rw_exit(&udp->udp_rwlock);
1407 		udp_err_ack(q, mp, TSYSERR, ENOMEM);
1408 		return;
1409 	}
1410 
1411 	rw_exit(&udp->udp_rwlock);
1412 	/*
1413 	 * We also have to send a connection confirmation to
1414 	 * keep TLI happy. Prepare it for udp_bind_result.
1415 	 */
1416 	if (udp->udp_family == AF_INET)
1417 		mp2 = mi_tpi_conn_con(NULL, (char *)sin,
1418 		    sizeof (*sin), NULL, 0);
1419 	else
1420 		mp2 = mi_tpi_conn_con(NULL, (char *)sin6,
1421 		    sizeof (*sin6), NULL, 0);
1422 	if (mp2 == NULL) {
1423 		freemsg(mp1);
1424 		rw_enter(&udp->udp_rwlock, RW_WRITER);
1425 		goto bind_failed;
1426 	}
1427 
1428 	mp = mi_tpi_ok_ack_alloc(mp);
1429 	if (mp == NULL) {
1430 		/* Unable to reuse the T_CONN_REQ for the ack. */
1431 		freemsg(mp2);
1432 		rw_enter(&udp->udp_rwlock, RW_WRITER);
1433 		mutex_enter(&udpf->uf_lock);
1434 		udp->udp_state = TS_IDLE;
1435 		udp->udp_pending_op = -1;
1436 		mutex_exit(&udpf->uf_lock);
1437 		rw_exit(&udp->udp_rwlock);
1438 		udp_err_ack_prim(q, mp1, T_CONN_REQ, TSYSERR, ENOMEM);
1439 		return;
1440 	}
1441 
1442 	/* Hang onto the T_OK_ACK and T_CONN_CON for later. */
1443 	linkb(mp1, mp);
1444 	linkb(mp1, mp2);
1445 
1446 	mblk_setcred(mp1, connp->conn_cred);
1447 	if (udp->udp_family == AF_INET)
1448 		mp1 = ip_bind_v4(q, mp1, connp);
1449 	else
1450 		mp1 = ip_bind_v6(q, mp1, connp, NULL);
1451 
1452 	/* The above return NULL if the bind needs to be deferred */
1453 	if (mp1 != NULL)
1454 		udp_bind_result(connp, mp1);
1455 	else
1456 		CONN_INC_REF(connp);
1457 }
1458 
1459 static int
1460 udp_close(queue_t *q)
1461 {
1462 	conn_t	*connp = (conn_t *)q->q_ptr;
1463 	udp_t	*udp;
1464 
1465 	ASSERT(connp != NULL && IPCL_IS_UDP(connp));
1466 	udp = connp->conn_udp;
1467 
1468 	udp_quiesce_conn(connp);
1469 	ip_quiesce_conn(connp);
1470 	/*
1471 	 * Disable read-side synchronous stream
1472 	 * interface and drain any queued data.
1473 	 */
1474 	udp_rcv_drain(q, udp, B_TRUE);
1475 	ASSERT(!udp->udp_direct_sockfs);
1476 
1477 	qprocsoff(q);
1478 
1479 	ASSERT(udp->udp_rcv_cnt == 0);
1480 	ASSERT(udp->udp_rcv_msgcnt == 0);
1481 	ASSERT(udp->udp_rcv_list_head == NULL);
1482 	ASSERT(udp->udp_rcv_list_tail == NULL);
1483 
1484 	udp_close_free(connp);
1485 
1486 	/*
1487 	 * Now we are truly single threaded on this stream, and can
1488 	 * delete the things hanging off the connp, and finally the connp.
1489 	 * We removed this connp from the fanout list, it cannot be
1490 	 * accessed thru the fanouts, and we already waited for the
1491 	 * conn_ref to drop to 0. We are already in close, so
1492 	 * there cannot be any other thread from the top. qprocsoff
1493 	 * has completed, and service has completed or won't run in
1494 	 * future.
1495 	 */
1496 	ASSERT(connp->conn_ref == 1);
1497 	inet_minor_free(connp->conn_minor_arena, connp->conn_dev);
1498 	connp->conn_ref--;
1499 	ipcl_conn_destroy(connp);
1500 
1501 	q->q_ptr = WR(q)->q_ptr = NULL;
1502 	return (0);
1503 }
1504 
1505 /*
1506  * Called in the close path to quiesce the conn
1507  */
1508 void
1509 udp_quiesce_conn(conn_t *connp)
1510 {
1511 	udp_t	*udp = connp->conn_udp;
1512 
1513 	if (cl_inet_unbind != NULL && udp->udp_state == TS_IDLE) {
1514 		/*
1515 		 * Running in cluster mode - register unbind information
1516 		 */
1517 		if (udp->udp_ipversion == IPV4_VERSION) {
1518 			(*cl_inet_unbind)(IPPROTO_UDP, AF_INET,
1519 			    (uint8_t *)(&(V4_PART_OF_V6(udp->udp_v6src))),
1520 			    (in_port_t)udp->udp_port);
1521 		} else {
1522 			(*cl_inet_unbind)(IPPROTO_UDP, AF_INET6,
1523 			    (uint8_t *)(&(udp->udp_v6src)),
1524 			    (in_port_t)udp->udp_port);
1525 		}
1526 	}
1527 
1528 	udp_bind_hash_remove(udp, B_FALSE);
1529 
1530 }
1531 
1532 void
1533 udp_close_free(conn_t *connp)
1534 {
1535 	udp_t *udp = connp->conn_udp;
1536 
1537 	/* If there are any options associated with the stream, free them. */
1538 	if (udp->udp_ip_snd_options != NULL) {
1539 		mi_free((char *)udp->udp_ip_snd_options);
1540 		udp->udp_ip_snd_options = NULL;
1541 		udp->udp_ip_snd_options_len = 0;
1542 	}
1543 
1544 	if (udp->udp_ip_rcv_options != NULL) {
1545 		mi_free((char *)udp->udp_ip_rcv_options);
1546 		udp->udp_ip_rcv_options = NULL;
1547 		udp->udp_ip_rcv_options_len = 0;
1548 	}
1549 
1550 	/* Free memory associated with sticky options */
1551 	if (udp->udp_sticky_hdrs_len != 0) {
1552 		kmem_free(udp->udp_sticky_hdrs,
1553 		    udp->udp_sticky_hdrs_len);
1554 		udp->udp_sticky_hdrs = NULL;
1555 		udp->udp_sticky_hdrs_len = 0;
1556 	}
1557 
1558 	ip6_pkt_free(&udp->udp_sticky_ipp);
1559 
1560 	/*
1561 	 * Clear any fields which the kmem_cache constructor clears.
1562 	 * Only udp_connp needs to be preserved.
1563 	 * TBD: We should make this more efficient to avoid clearing
1564 	 * everything.
1565 	 */
1566 	ASSERT(udp->udp_connp == connp);
1567 	bzero(udp, sizeof (udp_t));
1568 	udp->udp_connp = connp;
1569 }
1570 
1571 /*
1572  * This routine handles each T_DISCON_REQ message passed to udp
1573  * as an indicating that UDP is no longer connected. This results
1574  * in sending a T_BIND_REQ to IP to restore the binding to just
1575  * the local address/port.
1576  *
1577  * This routine sends down a T_BIND_REQ to IP with the following mblks:
1578  *	T_BIND_REQ	- specifying just the local address/port
1579  *	T_OK_ACK	- for the T_DISCON_REQ
1580  *
1581  * The disconnect completes in udp_bind_result.
1582  * When a T_BIND_ACK is received the appended T_OK_ACK is sent to the TPI user.
1583  * Should udp_bind_result receive T_ERROR_ACK for the T_BIND_REQ it will
1584  * convert it to an error ack for the appropriate primitive.
1585  */
1586 static void
1587 udp_disconnect(queue_t *q, mblk_t *mp)
1588 {
1589 	udp_t	*udp;
1590 	mblk_t	*mp1;
1591 	udp_fanout_t *udpf;
1592 	udp_stack_t *us;
1593 	conn_t	*connp = Q_TO_CONN(q);
1594 
1595 	udp = connp->conn_udp;
1596 	us = udp->udp_us;
1597 	rw_enter(&udp->udp_rwlock, RW_WRITER);
1598 	if (udp->udp_state != TS_DATA_XFER || udp->udp_pending_op != -1) {
1599 		rw_exit(&udp->udp_rwlock);
1600 		(void) mi_strlog(q, 1, SL_ERROR|SL_TRACE,
1601 		    "udp_disconnect: bad state, %u", udp->udp_state);
1602 		udp_err_ack(q, mp, TOUTSTATE, 0);
1603 		return;
1604 	}
1605 	udp->udp_pending_op = T_DISCON_REQ;
1606 	udpf = &us->us_bind_fanout[UDP_BIND_HASH(udp->udp_port,
1607 	    us->us_bind_fanout_size)];
1608 	mutex_enter(&udpf->uf_lock);
1609 	udp->udp_v6src = udp->udp_bound_v6src;
1610 	udp->udp_state = TS_IDLE;
1611 	mutex_exit(&udpf->uf_lock);
1612 
1613 	/*
1614 	 * Send down bind to IP to remove the full binding and revert
1615 	 * to the local address binding.
1616 	 */
1617 	if (udp->udp_family == AF_INET)
1618 		mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (sin_t));
1619 	else
1620 		mp1 = udp_ip_bind_mp(udp, O_T_BIND_REQ, sizeof (sin6_t));
1621 	if (mp1 == NULL) {
1622 		udp->udp_pending_op = -1;
1623 		rw_exit(&udp->udp_rwlock);
1624 		udp_err_ack(q, mp, TSYSERR, ENOMEM);
1625 		return;
1626 	}
1627 	mp = mi_tpi_ok_ack_alloc(mp);
1628 	if (mp == NULL) {
1629 		/* Unable to reuse the T_DISCON_REQ for the ack. */
1630 		udp->udp_pending_op = -1;
1631 		rw_exit(&udp->udp_rwlock);
1632 		udp_err_ack_prim(q, mp1, T_DISCON_REQ, TSYSERR, ENOMEM);
1633 		return;
1634 	}
1635 
1636 	if (udp->udp_family == AF_INET6) {
1637 		int error;
1638 
1639 		/* Rebuild the header template */
1640 		error = udp_build_hdrs(udp);
1641 		if (error != 0) {
1642 			udp->udp_pending_op = -1;
1643 			rw_exit(&udp->udp_rwlock);
1644 			udp_err_ack_prim(q, mp, T_DISCON_REQ, TSYSERR, error);
1645 			freemsg(mp1);
1646 			return;
1647 		}
1648 	}
1649 
1650 	rw_exit(&udp->udp_rwlock);
1651 	/* Append the T_OK_ACK to the T_BIND_REQ for udp_bind_ack */
1652 	linkb(mp1, mp);
1653 
1654 	if (udp->udp_family == AF_INET6)
1655 		mp1 = ip_bind_v6(q, mp1, connp, NULL);
1656 	else
1657 		mp1 = ip_bind_v4(q, mp1, connp);
1658 
1659 	/* The above return NULL if the bind needs to be deferred */
1660 	if (mp1 != NULL)
1661 		udp_bind_result(connp, mp1);
1662 	else
1663 		CONN_INC_REF(connp);
1664 }
1665 
1666 /* This routine creates a T_ERROR_ACK message and passes it upstream. */
1667 static void
1668 udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error, int sys_error)
1669 {
1670 	if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL)
1671 		qreply(q, mp);
1672 }
1673 
1674 /* Shorthand to generate and send TPI error acks to our client */
1675 static void
1676 udp_err_ack_prim(queue_t *q, mblk_t *mp, int primitive, t_scalar_t t_error,
1677     int sys_error)
1678 {
1679 	struct T_error_ack	*teackp;
1680 
1681 	if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack),
1682 	    M_PCPROTO, T_ERROR_ACK)) != NULL) {
1683 		teackp = (struct T_error_ack *)mp->b_rptr;
1684 		teackp->ERROR_prim = primitive;
1685 		teackp->TLI_error = t_error;
1686 		teackp->UNIX_error = sys_error;
1687 		qreply(q, mp);
1688 	}
1689 }
1690 
1691 /*ARGSUSED*/
1692 static int
1693 udp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
1694 {
1695 	int i;
1696 	udp_t		*udp = Q_TO_UDP(q);
1697 	udp_stack_t *us = udp->udp_us;
1698 
1699 	for (i = 0; i < us->us_num_epriv_ports; i++) {
1700 		if (us->us_epriv_ports[i] != 0)
1701 			(void) mi_mpprintf(mp, "%d ", us->us_epriv_ports[i]);
1702 	}
1703 	return (0);
1704 }
1705 
1706 /* ARGSUSED */
1707 static int
1708 udp_extra_priv_ports_add(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
1709     cred_t *cr)
1710 {
1711 	long	new_value;
1712 	int	i;
1713 	udp_t		*udp = Q_TO_UDP(q);
1714 	udp_stack_t *us = udp->udp_us;
1715 
1716 	/*
1717 	 * Fail the request if the new value does not lie within the
1718 	 * port number limits.
1719 	 */
1720 	if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
1721 	    new_value <= 0 || new_value >= 65536) {
1722 		return (EINVAL);
1723 	}
1724 
1725 	/* Check if the value is already in the list */
1726 	for (i = 0; i < us->us_num_epriv_ports; i++) {
1727 		if (new_value == us->us_epriv_ports[i]) {
1728 			return (EEXIST);
1729 		}
1730 	}
1731 	/* Find an empty slot */
1732 	for (i = 0; i < us->us_num_epriv_ports; i++) {
1733 		if (us->us_epriv_ports[i] == 0)
1734 			break;
1735 	}
1736 	if (i == us->us_num_epriv_ports) {
1737 		return (EOVERFLOW);
1738 	}
1739 
1740 	/* Set the new value */
1741 	us->us_epriv_ports[i] = (in_port_t)new_value;
1742 	return (0);
1743 }
1744 
1745 /* ARGSUSED */
1746 static int
1747 udp_extra_priv_ports_del(queue_t *q, mblk_t *mp, char *value, caddr_t cp,
1748     cred_t *cr)
1749 {
1750 	long	new_value;
1751 	int	i;
1752 	udp_t		*udp = Q_TO_UDP(q);
1753 	udp_stack_t *us = udp->udp_us;
1754 
1755 	/*
1756 	 * Fail the request if the new value does not lie within the
1757 	 * port number limits.
1758 	 */
1759 	if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
1760 	    new_value <= 0 || new_value >= 65536) {
1761 		return (EINVAL);
1762 	}
1763 
1764 	/* Check that the value is already in the list */
1765 	for (i = 0; i < us->us_num_epriv_ports; i++) {
1766 		if (us->us_epriv_ports[i] == new_value)
1767 			break;
1768 	}
1769 	if (i == us->us_num_epriv_ports) {
1770 		return (ESRCH);
1771 	}
1772 
1773 	/* Clear the value */
1774 	us->us_epriv_ports[i] = 0;
1775 	return (0);
1776 }
1777 
1778 /* At minimum we need 4 bytes of UDP header */
1779 #define	ICMP_MIN_UDP_HDR	4
1780 
1781 /*
1782  * udp_icmp_error is called by udp_input to process ICMP msgs. passed up by IP.
1783  * Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors.
1784  * Assumes that IP has pulled up everything up to and including the ICMP header.
1785  */
1786 static void
1787 udp_icmp_error(queue_t *q, mblk_t *mp)
1788 {
1789 	icmph_t *icmph;
1790 	ipha_t	*ipha;
1791 	int	iph_hdr_length;
1792 	udpha_t	*udpha;
1793 	sin_t	sin;
1794 	sin6_t	sin6;
1795 	mblk_t	*mp1;
1796 	int	error = 0;
1797 	udp_t	*udp = Q_TO_UDP(q);
1798 
1799 	ipha = (ipha_t *)mp->b_rptr;
1800 
1801 	ASSERT(OK_32PTR(mp->b_rptr));
1802 
1803 	if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) {
1804 		ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION);
1805 		udp_icmp_error_ipv6(q, mp);
1806 		return;
1807 	}
1808 	ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION);
1809 
1810 	/* Skip past the outer IP and ICMP headers */
1811 	iph_hdr_length = IPH_HDR_LENGTH(ipha);
1812 	icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length];
1813 	ipha = (ipha_t *)&icmph[1];
1814 
1815 	/* Skip past the inner IP and find the ULP header */
1816 	iph_hdr_length = IPH_HDR_LENGTH(ipha);
1817 	udpha = (udpha_t *)((char *)ipha + iph_hdr_length);
1818 
1819 	switch (icmph->icmph_type) {
1820 	case ICMP_DEST_UNREACHABLE:
1821 		switch (icmph->icmph_code) {
1822 		case ICMP_FRAGMENTATION_NEEDED:
1823 			/*
1824 			 * IP has already adjusted the path MTU.
1825 			 */
1826 			break;
1827 		case ICMP_PORT_UNREACHABLE:
1828 		case ICMP_PROTOCOL_UNREACHABLE:
1829 			error = ECONNREFUSED;
1830 			break;
1831 		default:
1832 			/* Transient errors */
1833 			break;
1834 		}
1835 		break;
1836 	default:
1837 		/* Transient errors */
1838 		break;
1839 	}
1840 	if (error == 0) {
1841 		freemsg(mp);
1842 		return;
1843 	}
1844 
1845 	/*
1846 	 * Deliver T_UDERROR_IND when the application has asked for it.
1847 	 * The socket layer enables this automatically when connected.
1848 	 */
1849 	if (!udp->udp_dgram_errind) {
1850 		freemsg(mp);
1851 		return;
1852 	}
1853 
1854 	switch (udp->udp_family) {
1855 	case AF_INET:
1856 		sin = sin_null;
1857 		sin.sin_family = AF_INET;
1858 		sin.sin_addr.s_addr = ipha->ipha_dst;
1859 		sin.sin_port = udpha->uha_dst_port;
1860 		mp1 = mi_tpi_uderror_ind((char *)&sin, sizeof (sin_t), NULL, 0,
1861 		    error);
1862 		break;
1863 	case AF_INET6:
1864 		sin6 = sin6_null;
1865 		sin6.sin6_family = AF_INET6;
1866 		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &sin6.sin6_addr);
1867 		sin6.sin6_port = udpha->uha_dst_port;
1868 
1869 		mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t),
1870 		    NULL, 0, error);
1871 		break;
1872 	}
1873 	if (mp1)
1874 		putnext(q, mp1);
1875 	freemsg(mp);
1876 }
1877 
1878 /*
1879  * udp_icmp_error_ipv6 is called by udp_icmp_error to process ICMP for IPv6.
1880  * Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors.
1881  * Assumes that IP has pulled up all the extension headers as well as the
1882  * ICMPv6 header.
1883  */
1884 static void
1885 udp_icmp_error_ipv6(queue_t *q, mblk_t *mp)
1886 {
1887 	icmp6_t		*icmp6;
1888 	ip6_t		*ip6h, *outer_ip6h;
1889 	uint16_t	iph_hdr_length;
1890 	uint8_t		*nexthdrp;
1891 	udpha_t		*udpha;
1892 	sin6_t		sin6;
1893 	mblk_t		*mp1;
1894 	int		error = 0;
1895 	udp_t		*udp = Q_TO_UDP(q);
1896 	udp_stack_t	*us = udp->udp_us;
1897 
1898 	outer_ip6h = (ip6_t *)mp->b_rptr;
1899 	if (outer_ip6h->ip6_nxt != IPPROTO_ICMPV6)
1900 		iph_hdr_length = ip_hdr_length_v6(mp, outer_ip6h);
1901 	else
1902 		iph_hdr_length = IPV6_HDR_LEN;
1903 	icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length];
1904 	ip6h = (ip6_t *)&icmp6[1];
1905 	if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) {
1906 		freemsg(mp);
1907 		return;
1908 	}
1909 	udpha = (udpha_t *)((char *)ip6h + iph_hdr_length);
1910 
1911 	switch (icmp6->icmp6_type) {
1912 	case ICMP6_DST_UNREACH:
1913 		switch (icmp6->icmp6_code) {
1914 		case ICMP6_DST_UNREACH_NOPORT:
1915 			error = ECONNREFUSED;
1916 			break;
1917 		case ICMP6_DST_UNREACH_ADMIN:
1918 		case ICMP6_DST_UNREACH_NOROUTE:
1919 		case ICMP6_DST_UNREACH_BEYONDSCOPE:
1920 		case ICMP6_DST_UNREACH_ADDR:
1921 			/* Transient errors */
1922 			break;
1923 		default:
1924 			break;
1925 		}
1926 		break;
1927 	case ICMP6_PACKET_TOO_BIG: {
1928 		struct T_unitdata_ind	*tudi;
1929 		struct T_opthdr		*toh;
1930 		size_t			udi_size;
1931 		mblk_t			*newmp;
1932 		t_scalar_t		opt_length = sizeof (struct T_opthdr) +
1933 		    sizeof (struct ip6_mtuinfo);
1934 		sin6_t			*sin6;
1935 		struct ip6_mtuinfo	*mtuinfo;
1936 
1937 		/*
1938 		 * If the application has requested to receive path mtu
1939 		 * information, send up an empty message containing an
1940 		 * IPV6_PATHMTU ancillary data item.
1941 		 */
1942 		if (!udp->udp_ipv6_recvpathmtu)
1943 			break;
1944 
1945 		udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t) +
1946 		    opt_length;
1947 		if ((newmp = allocb(udi_size, BPRI_MED)) == NULL) {
1948 			BUMP_MIB(&us->us_udp_mib, udpInErrors);
1949 			break;
1950 		}
1951 
1952 		/*
1953 		 * newmp->b_cont is left to NULL on purpose.  This is an
1954 		 * empty message containing only ancillary data.
1955 		 */
1956 		newmp->b_datap->db_type = M_PROTO;
1957 		tudi = (struct T_unitdata_ind *)newmp->b_rptr;
1958 		newmp->b_wptr = (uchar_t *)tudi + udi_size;
1959 		tudi->PRIM_type = T_UNITDATA_IND;
1960 		tudi->SRC_length = sizeof (sin6_t);
1961 		tudi->SRC_offset = sizeof (struct T_unitdata_ind);
1962 		tudi->OPT_offset = tudi->SRC_offset + sizeof (sin6_t);
1963 		tudi->OPT_length = opt_length;
1964 
1965 		sin6 = (sin6_t *)&tudi[1];
1966 		bzero(sin6, sizeof (sin6_t));
1967 		sin6->sin6_family = AF_INET6;
1968 		sin6->sin6_addr = udp->udp_v6dst;
1969 
1970 		toh = (struct T_opthdr *)&sin6[1];
1971 		toh->level = IPPROTO_IPV6;
1972 		toh->name = IPV6_PATHMTU;
1973 		toh->len = opt_length;
1974 		toh->status = 0;
1975 
1976 		mtuinfo = (struct ip6_mtuinfo *)&toh[1];
1977 		bzero(mtuinfo, sizeof (struct ip6_mtuinfo));
1978 		mtuinfo->ip6m_addr.sin6_family = AF_INET6;
1979 		mtuinfo->ip6m_addr.sin6_addr = ip6h->ip6_dst;
1980 		mtuinfo->ip6m_mtu = icmp6->icmp6_mtu;
1981 		/*
1982 		 * We've consumed everything we need from the original
1983 		 * message.  Free it, then send our empty message.
1984 		 */
1985 		freemsg(mp);
1986 		putnext(q, newmp);
1987 		return;
1988 	}
1989 	case ICMP6_TIME_EXCEEDED:
1990 		/* Transient errors */
1991 		break;
1992 	case ICMP6_PARAM_PROB:
1993 		/* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */
1994 		if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER &&
1995 		    (uchar_t *)ip6h + icmp6->icmp6_pptr ==
1996 		    (uchar_t *)nexthdrp) {
1997 			error = ECONNREFUSED;
1998 			break;
1999 		}
2000 		break;
2001 	}
2002 	if (error == 0) {
2003 		freemsg(mp);
2004 		return;
2005 	}
2006 
2007 	/*
2008 	 * Deliver T_UDERROR_IND when the application has asked for it.
2009 	 * The socket layer enables this automatically when connected.
2010 	 */
2011 	if (!udp->udp_dgram_errind) {
2012 		freemsg(mp);
2013 		return;
2014 	}
2015 
2016 	sin6 = sin6_null;
2017 	sin6.sin6_family = AF_INET6;
2018 	sin6.sin6_addr = ip6h->ip6_dst;
2019 	sin6.sin6_port = udpha->uha_dst_port;
2020 	sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK;
2021 
2022 	mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t), NULL, 0,
2023 	    error);
2024 	if (mp1)
2025 		putnext(q, mp1);
2026 	freemsg(mp);
2027 }
2028 
2029 /*
2030  * This routine responds to T_ADDR_REQ messages.  It is called by udp_wput.
2031  * The local address is filled in if endpoint is bound. The remote address
2032  * is filled in if remote address has been precified ("connected endpoint")
2033  * (The concept of connected CLTS sockets is alien to published TPI
2034  *  but we support it anyway).
2035  */
2036 static void
2037 udp_addr_req(queue_t *q, mblk_t *mp)
2038 {
2039 	sin_t	*sin;
2040 	sin6_t	*sin6;
2041 	mblk_t	*ackmp;
2042 	struct T_addr_ack *taa;
2043 	udp_t	*udp = Q_TO_UDP(q);
2044 
2045 	/* Make it large enough for worst case */
2046 	ackmp = reallocb(mp, sizeof (struct T_addr_ack) +
2047 	    2 * sizeof (sin6_t), 1);
2048 	if (ackmp == NULL) {
2049 		udp_err_ack(q, mp, TSYSERR, ENOMEM);
2050 		return;
2051 	}
2052 	taa = (struct T_addr_ack *)ackmp->b_rptr;
2053 
2054 	bzero(taa, sizeof (struct T_addr_ack));
2055 	ackmp->b_wptr = (uchar_t *)&taa[1];
2056 
2057 	taa->PRIM_type = T_ADDR_ACK;
2058 	ackmp->b_datap->db_type = M_PCPROTO;
2059 	rw_enter(&udp->udp_rwlock, RW_READER);
2060 	/*
2061 	 * Note: Following code assumes 32 bit alignment of basic
2062 	 * data structures like sin_t and struct T_addr_ack.
2063 	 */
2064 	if (udp->udp_state != TS_UNBND) {
2065 		/*
2066 		 * Fill in local address first
2067 		 */
2068 		taa->LOCADDR_offset = sizeof (*taa);
2069 		if (udp->udp_family == AF_INET) {
2070 			taa->LOCADDR_length = sizeof (sin_t);
2071 			sin = (sin_t *)&taa[1];
2072 			/* Fill zeroes and then initialize non-zero fields */
2073 			*sin = sin_null;
2074 			sin->sin_family = AF_INET;
2075 			if (!IN6_IS_ADDR_V4MAPPED_ANY(&udp->udp_v6src) &&
2076 			    !IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src)) {
2077 				IN6_V4MAPPED_TO_IPADDR(&udp->udp_v6src,
2078 				    sin->sin_addr.s_addr);
2079 			} else {
2080 				/*
2081 				 * INADDR_ANY
2082 				 * udp_v6src is not set, we might be bound to
2083 				 * broadcast/multicast. Use udp_bound_v6src as
2084 				 * local address instead (that could
2085 				 * also still be INADDR_ANY)
2086 				 */
2087 				IN6_V4MAPPED_TO_IPADDR(&udp->udp_bound_v6src,
2088 				    sin->sin_addr.s_addr);
2089 			}
2090 			sin->sin_port = udp->udp_port;
2091 			ackmp->b_wptr = (uchar_t *)&sin[1];
2092 			if (udp->udp_state == TS_DATA_XFER) {
2093 				/*
2094 				 * connected, fill remote address too
2095 				 */
2096 				taa->REMADDR_length = sizeof (sin_t);
2097 				/* assumed 32-bit alignment */
2098 				taa->REMADDR_offset = taa->LOCADDR_offset +
2099 				    taa->LOCADDR_length;
2100 
2101 				sin = (sin_t *)(ackmp->b_rptr +
2102 				    taa->REMADDR_offset);
2103 				/* initialize */
2104 				*sin = sin_null;
2105 				sin->sin_family = AF_INET;
2106 				sin->sin_addr.s_addr =
2107 				    V4_PART_OF_V6(udp->udp_v6dst);
2108 				sin->sin_port = udp->udp_dstport;
2109 				ackmp->b_wptr = (uchar_t *)&sin[1];
2110 			}
2111 		} else {
2112 			taa->LOCADDR_length = sizeof (sin6_t);
2113 			sin6 = (sin6_t *)&taa[1];
2114 			/* Fill zeroes and then initialize non-zero fields */
2115 			*sin6 = sin6_null;
2116 			sin6->sin6_family = AF_INET6;
2117 			if (!IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src)) {
2118 				sin6->sin6_addr = udp->udp_v6src;
2119 			} else {
2120 				/*
2121 				 * UNSPECIFIED
2122 				 * udp_v6src is not set, we might be bound to
2123 				 * broadcast/multicast. Use udp_bound_v6src as
2124 				 * local address instead (that could
2125 				 * also still be UNSPECIFIED)
2126 				 */
2127 				sin6->sin6_addr =
2128 				    udp->udp_bound_v6src;
2129 			}
2130 			sin6->sin6_port = udp->udp_port;
2131 			ackmp->b_wptr = (uchar_t *)&sin6[1];
2132 			if (udp->udp_state == TS_DATA_XFER) {
2133 				/*
2134 				 * connected, fill remote address too
2135 				 */
2136 				taa->REMADDR_length = sizeof (sin6_t);
2137 				/* assumed 32-bit alignment */
2138 				taa->REMADDR_offset = taa->LOCADDR_offset +
2139 				    taa->LOCADDR_length;
2140 
2141 				sin6 = (sin6_t *)(ackmp->b_rptr +
2142 				    taa->REMADDR_offset);
2143 				/* initialize */
2144 				*sin6 = sin6_null;
2145 				sin6->sin6_family = AF_INET6;
2146 				sin6->sin6_addr = udp->udp_v6dst;
2147 				sin6->sin6_port =  udp->udp_dstport;
2148 				ackmp->b_wptr = (uchar_t *)&sin6[1];
2149 			}
2150 			ackmp->b_wptr = (uchar_t *)&sin6[1];
2151 		}
2152 	}
2153 	rw_exit(&udp->udp_rwlock);
2154 	ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim);
2155 	qreply(q, ackmp);
2156 }
2157 
2158 static void
2159 udp_copy_info(struct T_info_ack *tap, udp_t *udp)
2160 {
2161 	if (udp->udp_family == AF_INET) {
2162 		*tap = udp_g_t_info_ack_ipv4;
2163 	} else {
2164 		*tap = udp_g_t_info_ack_ipv6;
2165 	}
2166 	tap->CURRENT_state = udp->udp_state;
2167 	tap->OPT_size = udp_max_optsize;
2168 }
2169 
2170 /*
2171  * This routine responds to T_CAPABILITY_REQ messages.  It is called by
2172  * udp_wput.  Much of the T_CAPABILITY_ACK information is copied from
2173  * udp_g_t_info_ack.  The current state of the stream is copied from
2174  * udp_state.
2175  */
2176 static void
2177 udp_capability_req(queue_t *q, mblk_t *mp)
2178 {
2179 	t_uscalar_t		cap_bits1;
2180 	struct T_capability_ack	*tcap;
2181 	udp_t	*udp = Q_TO_UDP(q);
2182 
2183 	cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1;
2184 
2185 	mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack),
2186 	    mp->b_datap->db_type, T_CAPABILITY_ACK);
2187 	if (!mp)
2188 		return;
2189 
2190 	tcap = (struct T_capability_ack *)mp->b_rptr;
2191 	tcap->CAP_bits1 = 0;
2192 
2193 	if (cap_bits1 & TC1_INFO) {
2194 		udp_copy_info(&tcap->INFO_ack, udp);
2195 		tcap->CAP_bits1 |= TC1_INFO;
2196 	}
2197 
2198 	qreply(q, mp);
2199 }
2200 
2201 /*
2202  * This routine responds to T_INFO_REQ messages.  It is called by udp_wput.
2203  * Most of the T_INFO_ACK information is copied from udp_g_t_info_ack.
2204  * The current state of the stream is copied from udp_state.
2205  */
2206 static void
2207 udp_info_req(queue_t *q, mblk_t *mp)
2208 {
2209 	udp_t *udp = Q_TO_UDP(q);
2210 
2211 	/* Create a T_INFO_ACK message. */
2212 	mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO,
2213 	    T_INFO_ACK);
2214 	if (!mp)
2215 		return;
2216 	udp_copy_info((struct T_info_ack *)mp->b_rptr, udp);
2217 	qreply(q, mp);
2218 }
2219 
2220 /*
2221  * IP recognizes seven kinds of bind requests:
2222  *
2223  * - A zero-length address binds only to the protocol number.
2224  *
2225  * - A 4-byte address is treated as a request to
2226  * validate that the address is a valid local IPv4
2227  * address, appropriate for an application to bind to.
2228  * IP does the verification, but does not make any note
2229  * of the address at this time.
2230  *
2231  * - A 16-byte address contains is treated as a request
2232  * to validate a local IPv6 address, as the 4-byte
2233  * address case above.
2234  *
2235  * - A 16-byte sockaddr_in to validate the local IPv4 address and also
2236  * use it for the inbound fanout of packets.
2237  *
2238  * - A 24-byte sockaddr_in6 to validate the local IPv6 address and also
2239  * use it for the inbound fanout of packets.
2240  *
2241  * - A 12-byte address (ipa_conn_t) containing complete IPv4 fanout
2242  * information consisting of local and remote addresses
2243  * and ports.  In this case, the addresses are both
2244  * validated as appropriate for this operation, and, if
2245  * so, the information is retained for use in the
2246  * inbound fanout.
2247  *
2248  * - A 36-byte address address (ipa6_conn_t) containing complete IPv6
2249  * fanout information, like the 12-byte case above.
2250  *
2251  * IP will also fill in the IRE request mblk with information
2252  * regarding our peer.  In all cases, we notify IP of our protocol
2253  * type by appending a single protocol byte to the bind request.
2254  */
2255 static mblk_t *
2256 udp_ip_bind_mp(udp_t *udp, t_scalar_t bind_prim, t_scalar_t addr_length)
2257 {
2258 	char	*cp;
2259 	mblk_t	*mp;
2260 	struct T_bind_req *tbr;
2261 	ipa_conn_t	*ac;
2262 	ipa6_conn_t	*ac6;
2263 	sin_t		*sin;
2264 	sin6_t		*sin6;
2265 
2266 	ASSERT(bind_prim == O_T_BIND_REQ || bind_prim == T_BIND_REQ);
2267 	ASSERT(RW_LOCK_HELD(&udp->udp_rwlock));
2268 	mp = allocb(sizeof (*tbr) + addr_length + 1, BPRI_HI);
2269 	if (!mp)
2270 		return (mp);
2271 	mp->b_datap->db_type = M_PROTO;
2272 	tbr = (struct T_bind_req *)mp->b_rptr;
2273 	tbr->PRIM_type = bind_prim;
2274 	tbr->ADDR_offset = sizeof (*tbr);
2275 	tbr->CONIND_number = 0;
2276 	tbr->ADDR_length = addr_length;
2277 	cp = (char *)&tbr[1];
2278 	switch (addr_length) {
2279 	case sizeof (ipa_conn_t):
2280 		ASSERT(udp->udp_family == AF_INET);
2281 		/* Append a request for an IRE */
2282 		mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
2283 		if (!mp->b_cont) {
2284 			freemsg(mp);
2285 			return (NULL);
2286 		}
2287 		mp->b_cont->b_wptr += sizeof (ire_t);
2288 		mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE;
2289 
2290 		/* cp known to be 32 bit aligned */
2291 		ac = (ipa_conn_t *)cp;
2292 		ac->ac_laddr = V4_PART_OF_V6(udp->udp_v6src);
2293 		ac->ac_faddr = V4_PART_OF_V6(udp->udp_v6dst);
2294 		ac->ac_fport = udp->udp_dstport;
2295 		ac->ac_lport = udp->udp_port;
2296 		break;
2297 
2298 	case sizeof (ipa6_conn_t):
2299 		ASSERT(udp->udp_family == AF_INET6);
2300 		/* Append a request for an IRE */
2301 		mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
2302 		if (!mp->b_cont) {
2303 			freemsg(mp);
2304 			return (NULL);
2305 		}
2306 		mp->b_cont->b_wptr += sizeof (ire_t);
2307 		mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE;
2308 
2309 		/* cp known to be 32 bit aligned */
2310 		ac6 = (ipa6_conn_t *)cp;
2311 		ac6->ac6_laddr = udp->udp_v6src;
2312 		ac6->ac6_faddr = udp->udp_v6dst;
2313 		ac6->ac6_fport = udp->udp_dstport;
2314 		ac6->ac6_lport = udp->udp_port;
2315 		break;
2316 
2317 	case sizeof (sin_t):
2318 		ASSERT(udp->udp_family == AF_INET);
2319 		/* Append a request for an IRE */
2320 		mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
2321 		if (!mp->b_cont) {
2322 			freemsg(mp);
2323 			return (NULL);
2324 		}
2325 		mp->b_cont->b_wptr += sizeof (ire_t);
2326 		mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE;
2327 
2328 		sin = (sin_t *)cp;
2329 		*sin = sin_null;
2330 		sin->sin_family = AF_INET;
2331 		sin->sin_addr.s_addr = V4_PART_OF_V6(udp->udp_bound_v6src);
2332 		sin->sin_port = udp->udp_port;
2333 		break;
2334 
2335 	case sizeof (sin6_t):
2336 		ASSERT(udp->udp_family == AF_INET6);
2337 		/* Append a request for an IRE */
2338 		mp->b_cont = allocb(sizeof (ire_t), BPRI_HI);
2339 		if (!mp->b_cont) {
2340 			freemsg(mp);
2341 			return (NULL);
2342 		}
2343 		mp->b_cont->b_wptr += sizeof (ire_t);
2344 		mp->b_cont->b_datap->db_type = IRE_DB_REQ_TYPE;
2345 
2346 		sin6 = (sin6_t *)cp;
2347 		*sin6 = sin6_null;
2348 		sin6->sin6_family = AF_INET6;
2349 		sin6->sin6_addr = udp->udp_bound_v6src;
2350 		sin6->sin6_port = udp->udp_port;
2351 		break;
2352 	}
2353 	/* Add protocol number to end */
2354 	cp[addr_length] = (char)IPPROTO_UDP;
2355 	mp->b_wptr = (uchar_t *)&cp[addr_length + 1];
2356 	return (mp);
2357 }
2358 
2359 /* For /dev/udp aka AF_INET open */
2360 static int
2361 udp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2362 {
2363 	return (udp_open(q, devp, flag, sflag, credp, B_FALSE));
2364 }
2365 
2366 /* For /dev/udp6 aka AF_INET6 open */
2367 static int
2368 udp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp)
2369 {
2370 	return (udp_open(q, devp, flag, sflag, credp, B_TRUE));
2371 }
2372 
2373 /*
2374  * This is the open routine for udp.  It allocates a udp_t structure for
2375  * the stream and, on the first open of the module, creates an ND table.
2376  */
2377 /*ARGSUSED2*/
2378 static int
2379 udp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp,
2380     boolean_t isv6)
2381 {
2382 	int		err;
2383 	udp_t		*udp;
2384 	conn_t		*connp;
2385 	dev_t		conn_dev;
2386 	zoneid_t	zoneid;
2387 	netstack_t	*ns;
2388 	udp_stack_t	*us;
2389 	vmem_t		*minor_arena;
2390 
2391 	TRACE_1(TR_FAC_UDP, TR_UDP_OPEN, "udp_open: q %p", q);
2392 
2393 	/* If the stream is already open, return immediately. */
2394 	if (q->q_ptr != NULL)
2395 		return (0);
2396 
2397 	if (sflag == MODOPEN)
2398 		return (EINVAL);
2399 
2400 	ns = netstack_find_by_cred(credp);
2401 	ASSERT(ns != NULL);
2402 	us = ns->netstack_udp;
2403 	ASSERT(us != NULL);
2404 
2405 	/*
2406 	 * For exclusive stacks we set the zoneid to zero
2407 	 * to make UDP operate as if in the global zone.
2408 	 */
2409 	if (ns->netstack_stackid != GLOBAL_NETSTACKID)
2410 		zoneid = GLOBAL_ZONEID;
2411 	else
2412 		zoneid = crgetzoneid(credp);
2413 
2414 	if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) &&
2415 	    ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) {
2416 		minor_arena = ip_minor_arena_la;
2417 	} else {
2418 		/*
2419 		 * Either minor numbers in the large arena were exhausted
2420 		 * or a non socket application is doing the open.
2421 		 * Try to allocate from the small arena.
2422 		 */
2423 		if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) {
2424 			netstack_rele(ns);
2425 			return (EBUSY);
2426 		}
2427 		minor_arena = ip_minor_arena_sa;
2428 	}
2429 
2430 	*devp = makedevice(getemajor(*devp), (minor_t)conn_dev);
2431 
2432 	connp = ipcl_conn_create(IPCL_UDPCONN, KM_SLEEP, ns);
2433 	connp->conn_dev = conn_dev;
2434 	connp->conn_minor_arena = minor_arena;
2435 	udp = connp->conn_udp;
2436 
2437 	/*
2438 	 * ipcl_conn_create did a netstack_hold. Undo the hold that was
2439 	 * done by netstack_find_by_cred()
2440 	 */
2441 	netstack_rele(ns);
2442 
2443 	/*
2444 	 * Initialize the udp_t structure for this stream.
2445 	 */
2446 	q->q_ptr = connp;
2447 	WR(q)->q_ptr = connp;
2448 	connp->conn_rq = q;
2449 	connp->conn_wq = WR(q);
2450 
2451 	rw_enter(&udp->udp_rwlock, RW_WRITER);
2452 	ASSERT(connp->conn_ulp == IPPROTO_UDP);
2453 	ASSERT(connp->conn_udp == udp);
2454 	ASSERT(udp->udp_connp == connp);
2455 
2456 	/* Set the initial state of the stream and the privilege status. */
2457 	udp->udp_state = TS_UNBND;
2458 	if (isv6) {
2459 		udp->udp_family = AF_INET6;
2460 		udp->udp_ipversion = IPV6_VERSION;
2461 		udp->udp_max_hdr_len = IPV6_HDR_LEN + UDPH_SIZE;
2462 		udp->udp_ttl = us->us_ipv6_hoplimit;
2463 		connp->conn_af_isv6 = B_TRUE;
2464 		connp->conn_flags |= IPCL_ISV6;
2465 	} else {
2466 		udp->udp_family = AF_INET;
2467 		udp->udp_ipversion = IPV4_VERSION;
2468 		udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH + UDPH_SIZE;
2469 		udp->udp_ttl = us->us_ipv4_ttl;
2470 		connp->conn_af_isv6 = B_FALSE;
2471 		connp->conn_flags &= ~IPCL_ISV6;
2472 	}
2473 
2474 	udp->udp_multicast_ttl = IP_DEFAULT_MULTICAST_TTL;
2475 	udp->udp_pending_op = -1;
2476 	connp->conn_multicast_loop = IP_DEFAULT_MULTICAST_LOOP;
2477 	connp->conn_zoneid = zoneid;
2478 
2479 	udp->udp_open_time = lbolt64;
2480 	udp->udp_open_pid = curproc->p_pid;
2481 
2482 	/*
2483 	 * If the caller has the process-wide flag set, then default to MAC
2484 	 * exempt mode.  This allows read-down to unlabeled hosts.
2485 	 */
2486 	if (getpflags(NET_MAC_AWARE, credp) != 0)
2487 		connp->conn_mac_exempt = B_TRUE;
2488 
2489 	if (flag & SO_SOCKSTR) {
2490 		connp->conn_flags |= IPCL_SOCKET;
2491 		udp->udp_issocket = B_TRUE;
2492 		udp->udp_direct_sockfs = B_TRUE;
2493 	}
2494 
2495 	connp->conn_ulp_labeled = is_system_labeled();
2496 
2497 	udp->udp_us = us;
2498 
2499 	q->q_hiwat = us->us_recv_hiwat;
2500 	WR(q)->q_hiwat = us->us_xmit_hiwat;
2501 	WR(q)->q_lowat = us->us_xmit_lowat;
2502 
2503 	connp->conn_recv = udp_input;
2504 	crhold(credp);
2505 	connp->conn_cred = credp;
2506 
2507 	mutex_enter(&connp->conn_lock);
2508 	connp->conn_state_flags &= ~CONN_INCIPIENT;
2509 	mutex_exit(&connp->conn_lock);
2510 
2511 	qprocson(q);
2512 
2513 	if (udp->udp_family == AF_INET6) {
2514 		/* Build initial header template for transmit */
2515 		if ((err = udp_build_hdrs(udp)) != 0) {
2516 			rw_exit(&udp->udp_rwlock);
2517 			qprocsoff(q);
2518 			ipcl_conn_destroy(connp);
2519 			return (err);
2520 		}
2521 	}
2522 	rw_exit(&udp->udp_rwlock);
2523 
2524 	/* Set the Stream head write offset and high watermark. */
2525 	(void) mi_set_sth_wroff(q,
2526 	    udp->udp_max_hdr_len + us->us_wroff_extra);
2527 	(void) mi_set_sth_hiwat(q, udp_set_rcv_hiwat(udp, q->q_hiwat));
2528 
2529 	return (0);
2530 }
2531 
2532 /*
2533  * Which UDP options OK to set through T_UNITDATA_REQ...
2534  */
2535 /* ARGSUSED */
2536 static boolean_t
2537 udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name)
2538 {
2539 	return (B_TRUE);
2540 }
2541 
2542 /*
2543  * This routine gets default values of certain options whose default
2544  * values are maintained by protcol specific code
2545  */
2546 /* ARGSUSED */
2547 int
2548 udp_opt_default(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr)
2549 {
2550 	udp_t		*udp = Q_TO_UDP(q);
2551 	udp_stack_t *us = udp->udp_us;
2552 	int *i1 = (int *)ptr;
2553 
2554 	switch (level) {
2555 	case IPPROTO_IP:
2556 		switch (name) {
2557 		case IP_MULTICAST_TTL:
2558 			*ptr = (uchar_t)IP_DEFAULT_MULTICAST_TTL;
2559 			return (sizeof (uchar_t));
2560 		case IP_MULTICAST_LOOP:
2561 			*ptr = (uchar_t)IP_DEFAULT_MULTICAST_LOOP;
2562 			return (sizeof (uchar_t));
2563 		}
2564 		break;
2565 	case IPPROTO_IPV6:
2566 		switch (name) {
2567 		case IPV6_MULTICAST_HOPS:
2568 			*i1 = IP_DEFAULT_MULTICAST_TTL;
2569 			return (sizeof (int));
2570 		case IPV6_MULTICAST_LOOP:
2571 			*i1 = IP_DEFAULT_MULTICAST_LOOP;
2572 			return (sizeof (int));
2573 		case IPV6_UNICAST_HOPS:
2574 			*i1 = us->us_ipv6_hoplimit;
2575 			return (sizeof (int));
2576 		}
2577 		break;
2578 	}
2579 	return (-1);
2580 }
2581 
2582 /*
2583  * This routine retrieves the current status of socket options.
2584  * It returns the size of the option retrieved.
2585  */
2586 int
2587 udp_opt_get_locked(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr)
2588 {
2589 	int	*i1 = (int *)ptr;
2590 	conn_t	*connp;
2591 	udp_t	*udp;
2592 	ip6_pkt_t *ipp;
2593 	int	len;
2594 	udp_stack_t	*us;
2595 
2596 	connp = Q_TO_CONN(q);
2597 	udp = connp->conn_udp;
2598 	ipp = &udp->udp_sticky_ipp;
2599 	us = udp->udp_us;
2600 
2601 	switch (level) {
2602 	case SOL_SOCKET:
2603 		switch (name) {
2604 		case SO_DEBUG:
2605 			*i1 = udp->udp_debug;
2606 			break;	/* goto sizeof (int) option return */
2607 		case SO_REUSEADDR:
2608 			*i1 = udp->udp_reuseaddr;
2609 			break;	/* goto sizeof (int) option return */
2610 		case SO_TYPE:
2611 			*i1 = SOCK_DGRAM;
2612 			break;	/* goto sizeof (int) option return */
2613 
2614 		/*
2615 		 * The following three items are available here,
2616 		 * but are only meaningful to IP.
2617 		 */
2618 		case SO_DONTROUTE:
2619 			*i1 = udp->udp_dontroute;
2620 			break;	/* goto sizeof (int) option return */
2621 		case SO_USELOOPBACK:
2622 			*i1 = udp->udp_useloopback;
2623 			break;	/* goto sizeof (int) option return */
2624 		case SO_BROADCAST:
2625 			*i1 = udp->udp_broadcast;
2626 			break;	/* goto sizeof (int) option return */
2627 
2628 		case SO_SNDBUF:
2629 			*i1 = q->q_hiwat;
2630 			break;	/* goto sizeof (int) option return */
2631 		case SO_RCVBUF:
2632 			*i1 = RD(q)->q_hiwat;
2633 			break;	/* goto sizeof (int) option return */
2634 		case SO_DGRAM_ERRIND:
2635 			*i1 = udp->udp_dgram_errind;
2636 			break;	/* goto sizeof (int) option return */
2637 		case SO_RECVUCRED:
2638 			*i1 = udp->udp_recvucred;
2639 			break;	/* goto sizeof (int) option return */
2640 		case SO_TIMESTAMP:
2641 			*i1 = udp->udp_timestamp;
2642 			break;	/* goto sizeof (int) option return */
2643 		case SO_ANON_MLP:
2644 			*i1 = connp->conn_anon_mlp;
2645 			break;	/* goto sizeof (int) option return */
2646 		case SO_MAC_EXEMPT:
2647 			*i1 = connp->conn_mac_exempt;
2648 			break;	/* goto sizeof (int) option return */
2649 		case SO_ALLZONES:
2650 			*i1 = connp->conn_allzones;
2651 			break;	/* goto sizeof (int) option return */
2652 		case SO_EXCLBIND:
2653 			*i1 = udp->udp_exclbind ? SO_EXCLBIND : 0;
2654 			break;
2655 		case SO_PROTOTYPE:
2656 			*i1 = IPPROTO_UDP;
2657 			break;
2658 		case SO_DOMAIN:
2659 			*i1 = udp->udp_family;
2660 			break;
2661 		default:
2662 			return (-1);
2663 		}
2664 		break;
2665 	case IPPROTO_IP:
2666 		if (udp->udp_family != AF_INET)
2667 			return (-1);
2668 		switch (name) {
2669 		case IP_OPTIONS:
2670 		case T_IP_OPTIONS:
2671 			len = udp->udp_ip_rcv_options_len - udp->udp_label_len;
2672 			if (len > 0) {
2673 				bcopy(udp->udp_ip_rcv_options +
2674 				    udp->udp_label_len, ptr, len);
2675 			}
2676 			return (len);
2677 		case IP_TOS:
2678 		case T_IP_TOS:
2679 			*i1 = (int)udp->udp_type_of_service;
2680 			break;	/* goto sizeof (int) option return */
2681 		case IP_TTL:
2682 			*i1 = (int)udp->udp_ttl;
2683 			break;	/* goto sizeof (int) option return */
2684 		case IP_DHCPINIT_IF:
2685 			return (-EINVAL);
2686 		case IP_NEXTHOP:
2687 		case IP_RECVPKTINFO:
2688 			/*
2689 			 * This also handles IP_PKTINFO.
2690 			 * IP_PKTINFO and IP_RECVPKTINFO have the same value.
2691 			 * Differentiation is based on the size of the argument
2692 			 * passed in.
2693 			 * This option is handled in IP which will return an
2694 			 * error for IP_PKTINFO as it's not supported as a
2695 			 * sticky option.
2696 			 */
2697 			return (-EINVAL);
2698 		case IP_MULTICAST_IF:
2699 			/* 0 address if not set */
2700 			*(ipaddr_t *)ptr = udp->udp_multicast_if_addr;
2701 			return (sizeof (ipaddr_t));
2702 		case IP_MULTICAST_TTL:
2703 			*(uchar_t *)ptr = udp->udp_multicast_ttl;
2704 			return (sizeof (uchar_t));
2705 		case IP_MULTICAST_LOOP:
2706 			*ptr = connp->conn_multicast_loop;
2707 			return (sizeof (uint8_t));
2708 		case IP_RECVOPTS:
2709 			*i1 = udp->udp_recvopts;
2710 			break;	/* goto sizeof (int) option return */
2711 		case IP_RECVDSTADDR:
2712 			*i1 = udp->udp_recvdstaddr;
2713 			break;	/* goto sizeof (int) option return */
2714 		case IP_RECVIF:
2715 			*i1 = udp->udp_recvif;
2716 			break;	/* goto sizeof (int) option return */
2717 		case IP_RECVSLLA:
2718 			*i1 = udp->udp_recvslla;
2719 			break;	/* goto sizeof (int) option return */
2720 		case IP_RECVTTL:
2721 			*i1 = udp->udp_recvttl;
2722 			break;	/* goto sizeof (int) option return */
2723 		case IP_ADD_MEMBERSHIP:
2724 		case IP_DROP_MEMBERSHIP:
2725 		case IP_BLOCK_SOURCE:
2726 		case IP_UNBLOCK_SOURCE:
2727 		case IP_ADD_SOURCE_MEMBERSHIP:
2728 		case IP_DROP_SOURCE_MEMBERSHIP:
2729 		case MCAST_JOIN_GROUP:
2730 		case MCAST_LEAVE_GROUP:
2731 		case MCAST_BLOCK_SOURCE:
2732 		case MCAST_UNBLOCK_SOURCE:
2733 		case MCAST_JOIN_SOURCE_GROUP:
2734 		case MCAST_LEAVE_SOURCE_GROUP:
2735 		case IP_DONTFAILOVER_IF:
2736 			/* cannot "get" the value for these */
2737 			return (-1);
2738 		case IP_BOUND_IF:
2739 			/* Zero if not set */
2740 			*i1 = udp->udp_bound_if;
2741 			break;	/* goto sizeof (int) option return */
2742 		case IP_UNSPEC_SRC:
2743 			*i1 = udp->udp_unspec_source;
2744 			break;	/* goto sizeof (int) option return */
2745 		case IP_BROADCAST_TTL:
2746 			*(uchar_t *)ptr = connp->conn_broadcast_ttl;
2747 			return (sizeof (uchar_t));
2748 		default:
2749 			return (-1);
2750 		}
2751 		break;
2752 	case IPPROTO_IPV6:
2753 		if (udp->udp_family != AF_INET6)
2754 			return (-1);
2755 		switch (name) {
2756 		case IPV6_UNICAST_HOPS:
2757 			*i1 = (unsigned int)udp->udp_ttl;
2758 			break;	/* goto sizeof (int) option return */
2759 		case IPV6_MULTICAST_IF:
2760 			/* 0 index if not set */
2761 			*i1 = udp->udp_multicast_if_index;
2762 			break;	/* goto sizeof (int) option return */
2763 		case IPV6_MULTICAST_HOPS:
2764 			*i1 = udp->udp_multicast_ttl;
2765 			break;	/* goto sizeof (int) option return */
2766 		case IPV6_MULTICAST_LOOP:
2767 			*i1 = connp->conn_multicast_loop;
2768 			break;	/* goto sizeof (int) option return */
2769 		case IPV6_JOIN_GROUP:
2770 		case IPV6_LEAVE_GROUP:
2771 		case MCAST_JOIN_GROUP:
2772 		case MCAST_LEAVE_GROUP:
2773 		case MCAST_BLOCK_SOURCE:
2774 		case MCAST_UNBLOCK_SOURCE:
2775 		case MCAST_JOIN_SOURCE_GROUP:
2776 		case MCAST_LEAVE_SOURCE_GROUP:
2777 			/* cannot "get" the value for these */
2778 			return (-1);
2779 		case IPV6_BOUND_IF:
2780 			/* Zero if not set */
2781 			*i1 = udp->udp_bound_if;
2782 			break;	/* goto sizeof (int) option return */
2783 		case IPV6_UNSPEC_SRC:
2784 			*i1 = udp->udp_unspec_source;
2785 			break;	/* goto sizeof (int) option return */
2786 		case IPV6_RECVPKTINFO:
2787 			*i1 = udp->udp_ip_recvpktinfo;
2788 			break;	/* goto sizeof (int) option return */
2789 		case IPV6_RECVTCLASS:
2790 			*i1 = udp->udp_ipv6_recvtclass;
2791 			break;	/* goto sizeof (int) option return */
2792 		case IPV6_RECVPATHMTU:
2793 			*i1 = udp->udp_ipv6_recvpathmtu;
2794 			break;	/* goto sizeof (int) option return */
2795 		case IPV6_RECVHOPLIMIT:
2796 			*i1 = udp->udp_ipv6_recvhoplimit;
2797 			break;	/* goto sizeof (int) option return */
2798 		case IPV6_RECVHOPOPTS:
2799 			*i1 = udp->udp_ipv6_recvhopopts;
2800 			break;	/* goto sizeof (int) option return */
2801 		case IPV6_RECVDSTOPTS:
2802 			*i1 = udp->udp_ipv6_recvdstopts;
2803 			break;	/* goto sizeof (int) option return */
2804 		case _OLD_IPV6_RECVDSTOPTS:
2805 			*i1 = udp->udp_old_ipv6_recvdstopts;
2806 			break;	/* goto sizeof (int) option return */
2807 		case IPV6_RECVRTHDRDSTOPTS:
2808 			*i1 = udp->udp_ipv6_recvrthdrdstopts;
2809 			break;	/* goto sizeof (int) option return */
2810 		case IPV6_RECVRTHDR:
2811 			*i1 = udp->udp_ipv6_recvrthdr;
2812 			break;	/* goto sizeof (int) option return */
2813 		case IPV6_PKTINFO: {
2814 			/* XXX assumes that caller has room for max size! */
2815 			struct in6_pktinfo *pkti;
2816 
2817 			pkti = (struct in6_pktinfo *)ptr;
2818 			if (ipp->ipp_fields & IPPF_IFINDEX)
2819 				pkti->ipi6_ifindex = ipp->ipp_ifindex;
2820 			else
2821 				pkti->ipi6_ifindex = 0;
2822 			if (ipp->ipp_fields & IPPF_ADDR)
2823 				pkti->ipi6_addr = ipp->ipp_addr;
2824 			else
2825 				pkti->ipi6_addr = ipv6_all_zeros;
2826 			return (sizeof (struct in6_pktinfo));
2827 		}
2828 		case IPV6_TCLASS:
2829 			if (ipp->ipp_fields & IPPF_TCLASS)
2830 				*i1 = ipp->ipp_tclass;
2831 			else
2832 				*i1 = IPV6_FLOW_TCLASS(
2833 				    IPV6_DEFAULT_VERS_AND_FLOW);
2834 			break;	/* goto sizeof (int) option return */
2835 		case IPV6_NEXTHOP: {
2836 			sin6_t *sin6 = (sin6_t *)ptr;
2837 
2838 			if (!(ipp->ipp_fields & IPPF_NEXTHOP))
2839 				return (0);
2840 			*sin6 = sin6_null;
2841 			sin6->sin6_family = AF_INET6;
2842 			sin6->sin6_addr = ipp->ipp_nexthop;
2843 			return (sizeof (sin6_t));
2844 		}
2845 		case IPV6_HOPOPTS:
2846 			if (!(ipp->ipp_fields & IPPF_HOPOPTS))
2847 				return (0);
2848 			if (ipp->ipp_hopoptslen <= udp->udp_label_len_v6)
2849 				return (0);
2850 			/*
2851 			 * The cipso/label option is added by kernel.
2852 			 * User is not usually aware of this option.
2853 			 * We copy out the hbh opt after the label option.
2854 			 */
2855 			bcopy((char *)ipp->ipp_hopopts + udp->udp_label_len_v6,
2856 			    ptr, ipp->ipp_hopoptslen - udp->udp_label_len_v6);
2857 			if (udp->udp_label_len_v6 > 0) {
2858 				ptr[0] = ((char *)ipp->ipp_hopopts)[0];
2859 				ptr[1] = (ipp->ipp_hopoptslen -
2860 				    udp->udp_label_len_v6 + 7) / 8 - 1;
2861 			}
2862 			return (ipp->ipp_hopoptslen - udp->udp_label_len_v6);
2863 		case IPV6_RTHDRDSTOPTS:
2864 			if (!(ipp->ipp_fields & IPPF_RTDSTOPTS))
2865 				return (0);
2866 			bcopy(ipp->ipp_rtdstopts, ptr, ipp->ipp_rtdstoptslen);
2867 			return (ipp->ipp_rtdstoptslen);
2868 		case IPV6_RTHDR:
2869 			if (!(ipp->ipp_fields & IPPF_RTHDR))
2870 				return (0);
2871 			bcopy(ipp->ipp_rthdr, ptr, ipp->ipp_rthdrlen);
2872 			return (ipp->ipp_rthdrlen);
2873 		case IPV6_DSTOPTS:
2874 			if (!(ipp->ipp_fields & IPPF_DSTOPTS))
2875 				return (0);
2876 			bcopy(ipp->ipp_dstopts, ptr, ipp->ipp_dstoptslen);
2877 			return (ipp->ipp_dstoptslen);
2878 		case IPV6_PATHMTU:
2879 			return (ip_fill_mtuinfo(&udp->udp_v6dst,
2880 			    udp->udp_dstport, (struct ip6_mtuinfo *)ptr,
2881 			    us->us_netstack));
2882 		default:
2883 			return (-1);
2884 		}
2885 		break;
2886 	case IPPROTO_UDP:
2887 		switch (name) {
2888 		case UDP_ANONPRIVBIND:
2889 			*i1 = udp->udp_anon_priv_bind;
2890 			break;
2891 		case UDP_EXCLBIND:
2892 			*i1 = udp->udp_exclbind ? UDP_EXCLBIND : 0;
2893 			break;
2894 		case UDP_RCVHDR:
2895 			*i1 = udp->udp_rcvhdr ? 1 : 0;
2896 			break;
2897 		case UDP_NAT_T_ENDPOINT:
2898 			*i1 = udp->udp_nat_t_endpoint;
2899 			break;
2900 		default:
2901 			return (-1);
2902 		}
2903 		break;
2904 	default:
2905 		return (-1);
2906 	}
2907 	return (sizeof (int));
2908 }
2909 
2910 int
2911 udp_opt_get(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr)
2912 {
2913 	udp_t	*udp;
2914 	int	err;
2915 
2916 	udp = Q_TO_UDP(q);
2917 
2918 	rw_enter(&udp->udp_rwlock, RW_READER);
2919 	err = udp_opt_get_locked(q, level, name, ptr);
2920 	rw_exit(&udp->udp_rwlock);
2921 	return (err);
2922 }
2923 
2924 /*
2925  * This routine sets socket options.
2926  */
2927 /* ARGSUSED */
2928 int
2929 udp_opt_set_locked(queue_t *q, uint_t optset_context, int level,
2930     int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp,
2931     uchar_t *outvalp, void *thisdg_attrs, cred_t *cr, mblk_t *mblk)
2932 {
2933 	udpattrs_t *attrs = thisdg_attrs;
2934 	int	*i1 = (int *)invalp;
2935 	boolean_t onoff = (*i1 == 0) ? 0 : 1;
2936 	boolean_t checkonly;
2937 	int	error;
2938 	conn_t	*connp;
2939 	udp_t	*udp;
2940 	uint_t	newlen;
2941 	udp_stack_t *us;
2942 	size_t	sth_wroff;
2943 
2944 	connp = Q_TO_CONN(q);
2945 	udp = connp->conn_udp;
2946 	us = udp->udp_us;
2947 
2948 	switch (optset_context) {
2949 	case SETFN_OPTCOM_CHECKONLY:
2950 		checkonly = B_TRUE;
2951 		/*
2952 		 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ
2953 		 * inlen != 0 implies value supplied and
2954 		 * 	we have to "pretend" to set it.
2955 		 * inlen == 0 implies that there is no
2956 		 * 	value part in T_CHECK request and just validation
2957 		 * done elsewhere should be enough, we just return here.
2958 		 */
2959 		if (inlen == 0) {
2960 			*outlenp = 0;
2961 			return (0);
2962 		}
2963 		break;
2964 	case SETFN_OPTCOM_NEGOTIATE:
2965 		checkonly = B_FALSE;
2966 		break;
2967 	case SETFN_UD_NEGOTIATE:
2968 	case SETFN_CONN_NEGOTIATE:
2969 		checkonly = B_FALSE;
2970 		/*
2971 		 * Negotiating local and "association-related" options
2972 		 * through T_UNITDATA_REQ.
2973 		 *
2974 		 * Following routine can filter out ones we do not
2975 		 * want to be "set" this way.
2976 		 */
2977 		if (!udp_opt_allow_udr_set(level, name)) {
2978 			*outlenp = 0;
2979 			return (EINVAL);
2980 		}
2981 		break;
2982 	default:
2983 		/*
2984 		 * We should never get here
2985 		 */
2986 		*outlenp = 0;
2987 		return (EINVAL);
2988 	}
2989 
2990 	ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) ||
2991 	    (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0));
2992 
2993 	/*
2994 	 * For fixed length options, no sanity check
2995 	 * of passed in length is done. It is assumed *_optcom_req()
2996 	 * routines do the right thing.
2997 	 */
2998 
2999 	switch (level) {
3000 	case SOL_SOCKET:
3001 		switch (name) {
3002 		case SO_REUSEADDR:
3003 			if (!checkonly)
3004 				udp->udp_reuseaddr = onoff;
3005 			break;
3006 		case SO_DEBUG:
3007 			if (!checkonly)
3008 				udp->udp_debug = onoff;
3009 			break;
3010 		/*
3011 		 * The following three items are available here,
3012 		 * but are only meaningful to IP.
3013 		 */
3014 		case SO_DONTROUTE:
3015 			if (!checkonly)
3016 				udp->udp_dontroute = onoff;
3017 			break;
3018 		case SO_USELOOPBACK:
3019 			if (!checkonly)
3020 				udp->udp_useloopback = onoff;
3021 			break;
3022 		case SO_BROADCAST:
3023 			if (!checkonly)
3024 				udp->udp_broadcast = onoff;
3025 			break;
3026 
3027 		case SO_SNDBUF:
3028 			if (*i1 > us->us_max_buf) {
3029 				*outlenp = 0;
3030 				return (ENOBUFS);
3031 			}
3032 			if (!checkonly) {
3033 				q->q_hiwat = *i1;
3034 			}
3035 			break;
3036 		case SO_RCVBUF:
3037 			if (*i1 > us->us_max_buf) {
3038 				*outlenp = 0;
3039 				return (ENOBUFS);
3040 			}
3041 			if (!checkonly) {
3042 				RD(q)->q_hiwat = *i1;
3043 				rw_exit(&udp->udp_rwlock);
3044 				(void) mi_set_sth_hiwat(RD(q),
3045 				    udp_set_rcv_hiwat(udp, *i1));
3046 				rw_enter(&udp->udp_rwlock, RW_WRITER);
3047 			}
3048 			break;
3049 		case SO_DGRAM_ERRIND:
3050 			if (!checkonly)
3051 				udp->udp_dgram_errind = onoff;
3052 			break;
3053 		case SO_RECVUCRED:
3054 			if (!checkonly)
3055 				udp->udp_recvucred = onoff;
3056 			break;
3057 		case SO_ALLZONES:
3058 			/*
3059 			 * "soft" error (negative)
3060 			 * option not handled at this level
3061 			 * Do not modify *outlenp.
3062 			 */
3063 			return (-EINVAL);
3064 		case SO_TIMESTAMP:
3065 			if (!checkonly)
3066 				udp->udp_timestamp = onoff;
3067 			break;
3068 		case SO_ANON_MLP:
3069 			/* Pass option along to IP level for handling */
3070 			return (-EINVAL);
3071 		case SO_MAC_EXEMPT:
3072 			/* Pass option along to IP level for handling */
3073 			return (-EINVAL);
3074 		case SCM_UCRED: {
3075 			struct ucred_s *ucr;
3076 			cred_t *cr, *newcr;
3077 			ts_label_t *tsl;
3078 
3079 			/*
3080 			 * Only sockets that have proper privileges and are
3081 			 * bound to MLPs will have any other value here, so
3082 			 * this implicitly tests for privilege to set label.
3083 			 */
3084 			if (connp->conn_mlp_type == mlptSingle)
3085 				break;
3086 			ucr = (struct ucred_s *)invalp;
3087 			if (inlen != ucredsize ||
3088 			    ucr->uc_labeloff < sizeof (*ucr) ||
3089 			    ucr->uc_labeloff + sizeof (bslabel_t) > inlen)
3090 				return (EINVAL);
3091 			if (!checkonly) {
3092 				mblk_t *mb;
3093 
3094 				if (attrs == NULL ||
3095 				    (mb = attrs->udpattr_mb) == NULL)
3096 					return (EINVAL);
3097 				if ((cr = DB_CRED(mb)) == NULL)
3098 					cr = udp->udp_connp->conn_cred;
3099 				ASSERT(cr != NULL);
3100 				if ((tsl = crgetlabel(cr)) == NULL)
3101 					return (EINVAL);
3102 				newcr = copycred_from_bslabel(cr, UCLABEL(ucr),
3103 				    tsl->tsl_doi, KM_NOSLEEP);
3104 				if (newcr == NULL)
3105 					return (ENOSR);
3106 				mblk_setcred(mb, newcr);
3107 				attrs->udpattr_credset = B_TRUE;
3108 				crfree(newcr);
3109 			}
3110 			break;
3111 		}
3112 		case SO_EXCLBIND:
3113 			if (!checkonly)
3114 				udp->udp_exclbind = onoff;
3115 			break;
3116 		default:
3117 			*outlenp = 0;
3118 			return (EINVAL);
3119 		}
3120 		break;
3121 	case IPPROTO_IP:
3122 		if (udp->udp_family != AF_INET) {
3123 			*outlenp = 0;
3124 			return (ENOPROTOOPT);
3125 		}
3126 		switch (name) {
3127 		case IP_OPTIONS:
3128 		case T_IP_OPTIONS:
3129 			/* Save options for use by IP. */
3130 			newlen = inlen + udp->udp_label_len;
3131 			if ((inlen & 0x3) || newlen > IP_MAX_OPT_LENGTH) {
3132 				*outlenp = 0;
3133 				return (EINVAL);
3134 			}
3135 			if (checkonly)
3136 				break;
3137 
3138 			/*
3139 			 * Update the stored options taking into account
3140 			 * any CIPSO option which we should not overwrite.
3141 			 */
3142 			if (!tsol_option_set(&udp->udp_ip_snd_options,
3143 			    &udp->udp_ip_snd_options_len,
3144 			    udp->udp_label_len, invalp, inlen)) {
3145 				*outlenp = 0;
3146 				return (ENOMEM);
3147 			}
3148 
3149 			udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH +
3150 			    UDPH_SIZE + udp->udp_ip_snd_options_len;
3151 			sth_wroff = udp->udp_max_hdr_len + us->us_wroff_extra;
3152 			rw_exit(&udp->udp_rwlock);
3153 			(void) mi_set_sth_wroff(RD(q), sth_wroff);
3154 			rw_enter(&udp->udp_rwlock, RW_WRITER);
3155 			break;
3156 
3157 		case IP_TTL:
3158 			if (!checkonly) {
3159 				udp->udp_ttl = (uchar_t)*i1;
3160 			}
3161 			break;
3162 		case IP_TOS:
3163 		case T_IP_TOS:
3164 			if (!checkonly) {
3165 				udp->udp_type_of_service = (uchar_t)*i1;
3166 			}
3167 			break;
3168 		case IP_MULTICAST_IF: {
3169 			/*
3170 			 * TODO should check OPTMGMT reply and undo this if
3171 			 * there is an error.
3172 			 */
3173 			struct in_addr *inap = (struct in_addr *)invalp;
3174 			if (!checkonly) {
3175 				udp->udp_multicast_if_addr =
3176 				    inap->s_addr;
3177 			}
3178 			break;
3179 		}
3180 		case IP_MULTICAST_TTL:
3181 			if (!checkonly)
3182 				udp->udp_multicast_ttl = *invalp;
3183 			break;
3184 		case IP_MULTICAST_LOOP:
3185 			if (!checkonly)
3186 				connp->conn_multicast_loop = *invalp;
3187 			break;
3188 		case IP_RECVOPTS:
3189 			if (!checkonly)
3190 				udp->udp_recvopts = onoff;
3191 			break;
3192 		case IP_RECVDSTADDR:
3193 			if (!checkonly)
3194 				udp->udp_recvdstaddr = onoff;
3195 			break;
3196 		case IP_RECVIF:
3197 			if (!checkonly)
3198 				udp->udp_recvif = onoff;
3199 			break;
3200 		case IP_RECVSLLA:
3201 			if (!checkonly)
3202 				udp->udp_recvslla = onoff;
3203 			break;
3204 		case IP_RECVTTL:
3205 			if (!checkonly)
3206 				udp->udp_recvttl = onoff;
3207 			break;
3208 		case IP_PKTINFO: {
3209 			/*
3210 			 * This also handles IP_RECVPKTINFO.
3211 			 * IP_PKTINFO and IP_RECVPKTINFO have same value.
3212 			 * Differentiation is based on the size of the
3213 			 * argument passed in.
3214 			 */
3215 			struct in_pktinfo *pktinfop;
3216 			ip4_pkt_t *attr_pktinfop;
3217 
3218 			if (checkonly)
3219 				break;
3220 
3221 			if (inlen == sizeof (int)) {
3222 				/*
3223 				 * This is IP_RECVPKTINFO option.
3224 				 * Keep a local copy of whether this option is
3225 				 * set or not and pass it down to IP for
3226 				 * processing.
3227 				 */
3228 
3229 				udp->udp_ip_recvpktinfo = onoff;
3230 				return (-EINVAL);
3231 			}
3232 
3233 			if (attrs == NULL ||
3234 			    (attr_pktinfop = attrs->udpattr_ipp4) == NULL) {
3235 				/*
3236 				 * sticky option or no buffer to return
3237 				 * the results.
3238 				 */
3239 				return (EINVAL);
3240 			}
3241 
3242 			if (inlen != sizeof (struct in_pktinfo))
3243 				return (EINVAL);
3244 
3245 			pktinfop = (struct in_pktinfo *)invalp;
3246 
3247 			/*
3248 			 * At least one of the values should be specified
3249 			 */
3250 			if (pktinfop->ipi_ifindex == 0 &&
3251 			    pktinfop->ipi_spec_dst.s_addr == INADDR_ANY) {
3252 				return (EINVAL);
3253 			}
3254 
3255 			attr_pktinfop->ip4_addr = pktinfop->ipi_spec_dst.s_addr;
3256 			attr_pktinfop->ip4_ill_index = pktinfop->ipi_ifindex;
3257 
3258 			break;
3259 		}
3260 		case IP_ADD_MEMBERSHIP:
3261 		case IP_DROP_MEMBERSHIP:
3262 		case IP_BLOCK_SOURCE:
3263 		case IP_UNBLOCK_SOURCE:
3264 		case IP_ADD_SOURCE_MEMBERSHIP:
3265 		case IP_DROP_SOURCE_MEMBERSHIP:
3266 		case MCAST_JOIN_GROUP:
3267 		case MCAST_LEAVE_GROUP:
3268 		case MCAST_BLOCK_SOURCE:
3269 		case MCAST_UNBLOCK_SOURCE:
3270 		case MCAST_JOIN_SOURCE_GROUP:
3271 		case MCAST_LEAVE_SOURCE_GROUP:
3272 		case IP_SEC_OPT:
3273 		case IP_NEXTHOP:
3274 		case IP_DHCPINIT_IF:
3275 			/*
3276 			 * "soft" error (negative)
3277 			 * option not handled at this level
3278 			 * Do not modify *outlenp.
3279 			 */
3280 			return (-EINVAL);
3281 		case IP_BOUND_IF:
3282 			if (!checkonly)
3283 				udp->udp_bound_if = *i1;
3284 			break;
3285 		case IP_UNSPEC_SRC:
3286 			if (!checkonly)
3287 				udp->udp_unspec_source = onoff;
3288 			break;
3289 		case IP_BROADCAST_TTL:
3290 			if (!checkonly)
3291 				connp->conn_broadcast_ttl = *invalp;
3292 			break;
3293 		default:
3294 			*outlenp = 0;
3295 			return (EINVAL);
3296 		}
3297 		break;
3298 	case IPPROTO_IPV6: {
3299 		ip6_pkt_t		*ipp;
3300 		boolean_t		sticky;
3301 
3302 		if (udp->udp_family != AF_INET6) {
3303 			*outlenp = 0;
3304 			return (ENOPROTOOPT);
3305 		}
3306 		/*
3307 		 * Deal with both sticky options and ancillary data
3308 		 */
3309 		sticky = B_FALSE;
3310 		if (attrs == NULL || (ipp = attrs->udpattr_ipp6) ==
3311 		    NULL) {
3312 			/* sticky options, or none */
3313 			ipp = &udp->udp_sticky_ipp;
3314 			sticky = B_TRUE;
3315 		}
3316 
3317 		switch (name) {
3318 		case IPV6_MULTICAST_IF:
3319 			if (!checkonly)
3320 				udp->udp_multicast_if_index = *i1;
3321 			break;
3322 		case IPV6_UNICAST_HOPS:
3323 			/* -1 means use default */
3324 			if (*i1 < -1 || *i1 > IPV6_MAX_HOPS) {
3325 				*outlenp = 0;
3326 				return (EINVAL);
3327 			}
3328 			if (!checkonly) {
3329 				if (*i1 == -1) {
3330 					udp->udp_ttl = ipp->ipp_unicast_hops =
3331 					    us->us_ipv6_hoplimit;
3332 					ipp->ipp_fields &= ~IPPF_UNICAST_HOPS;
3333 					/* Pass modified value to IP. */
3334 					*i1 = udp->udp_ttl;
3335 				} else {
3336 					udp->udp_ttl = ipp->ipp_unicast_hops =
3337 					    (uint8_t)*i1;
3338 					ipp->ipp_fields |= IPPF_UNICAST_HOPS;
3339 				}
3340 				/* Rebuild the header template */
3341 				error = udp_build_hdrs(udp);
3342 				if (error != 0) {
3343 					*outlenp = 0;
3344 					return (error);
3345 				}
3346 			}
3347 			break;
3348 		case IPV6_MULTICAST_HOPS:
3349 			/* -1 means use default */
3350 			if (*i1 < -1 || *i1 > IPV6_MAX_HOPS) {
3351 				*outlenp = 0;
3352 				return (EINVAL);
3353 			}
3354 			if (!checkonly) {
3355 				if (*i1 == -1) {
3356 					udp->udp_multicast_ttl =
3357 					    ipp->ipp_multicast_hops =
3358 					    IP_DEFAULT_MULTICAST_TTL;
3359 					ipp->ipp_fields &= ~IPPF_MULTICAST_HOPS;
3360 					/* Pass modified value to IP. */
3361 					*i1 = udp->udp_multicast_ttl;
3362 				} else {
3363 					udp->udp_multicast_ttl =
3364 					    ipp->ipp_multicast_hops =
3365 					    (uint8_t)*i1;
3366 					ipp->ipp_fields |= IPPF_MULTICAST_HOPS;
3367 				}
3368 			}
3369 			break;
3370 		case IPV6_MULTICAST_LOOP:
3371 			if (*i1 != 0 && *i1 != 1) {
3372 				*outlenp = 0;
3373 				return (EINVAL);
3374 			}
3375 			if (!checkonly)
3376 				connp->conn_multicast_loop = *i1;
3377 			break;
3378 		case IPV6_JOIN_GROUP:
3379 		case IPV6_LEAVE_GROUP:
3380 		case MCAST_JOIN_GROUP:
3381 		case MCAST_LEAVE_GROUP:
3382 		case MCAST_BLOCK_SOURCE:
3383 		case MCAST_UNBLOCK_SOURCE:
3384 		case MCAST_JOIN_SOURCE_GROUP:
3385 		case MCAST_LEAVE_SOURCE_GROUP:
3386 			/*
3387 			 * "soft" error (negative)
3388 			 * option not handled at this level
3389 			 * Note: Do not modify *outlenp
3390 			 */
3391 			return (-EINVAL);
3392 		case IPV6_BOUND_IF:
3393 			if (!checkonly)
3394 				udp->udp_bound_if = *i1;
3395 			break;
3396 		case IPV6_UNSPEC_SRC:
3397 			if (!checkonly)
3398 				udp->udp_unspec_source = onoff;
3399 			break;
3400 		/*
3401 		 * Set boolean switches for ancillary data delivery
3402 		 */
3403 		case IPV6_RECVPKTINFO:
3404 			if (!checkonly)
3405 				udp->udp_ip_recvpktinfo = onoff;
3406 			break;
3407 		case IPV6_RECVTCLASS:
3408 			if (!checkonly) {
3409 				udp->udp_ipv6_recvtclass = onoff;
3410 			}
3411 			break;
3412 		case IPV6_RECVPATHMTU:
3413 			if (!checkonly) {
3414 				udp->udp_ipv6_recvpathmtu = onoff;
3415 			}
3416 			break;
3417 		case IPV6_RECVHOPLIMIT:
3418 			if (!checkonly)
3419 				udp->udp_ipv6_recvhoplimit = onoff;
3420 			break;
3421 		case IPV6_RECVHOPOPTS:
3422 			if (!checkonly)
3423 				udp->udp_ipv6_recvhopopts = onoff;
3424 			break;
3425 		case IPV6_RECVDSTOPTS:
3426 			if (!checkonly)
3427 				udp->udp_ipv6_recvdstopts = onoff;
3428 			break;
3429 		case _OLD_IPV6_RECVDSTOPTS:
3430 			if (!checkonly)
3431 				udp->udp_old_ipv6_recvdstopts = onoff;
3432 			break;
3433 		case IPV6_RECVRTHDRDSTOPTS:
3434 			if (!checkonly)
3435 				udp->udp_ipv6_recvrthdrdstopts = onoff;
3436 			break;
3437 		case IPV6_RECVRTHDR:
3438 			if (!checkonly)
3439 				udp->udp_ipv6_recvrthdr = onoff;
3440 			break;
3441 		/*
3442 		 * Set sticky options or ancillary data.
3443 		 * If sticky options, (re)build any extension headers
3444 		 * that might be needed as a result.
3445 		 */
3446 		case IPV6_PKTINFO:
3447 			/*
3448 			 * The source address and ifindex are verified
3449 			 * in ip_opt_set(). For ancillary data the
3450 			 * source address is checked in ip_wput_v6.
3451 			 */
3452 			if (inlen != 0 && inlen != sizeof (struct in6_pktinfo))
3453 				return (EINVAL);
3454 			if (checkonly)
3455 				break;
3456 
3457 			if (inlen == 0) {
3458 				ipp->ipp_fields &= ~(IPPF_IFINDEX|IPPF_ADDR);
3459 				ipp->ipp_sticky_ignored |=
3460 				    (IPPF_IFINDEX|IPPF_ADDR);
3461 			} else {
3462 				struct in6_pktinfo *pkti;
3463 
3464 				pkti = (struct in6_pktinfo *)invalp;
3465 				ipp->ipp_ifindex = pkti->ipi6_ifindex;
3466 				ipp->ipp_addr = pkti->ipi6_addr;
3467 				if (ipp->ipp_ifindex != 0)
3468 					ipp->ipp_fields |= IPPF_IFINDEX;
3469 				else
3470 					ipp->ipp_fields &= ~IPPF_IFINDEX;
3471 				if (!IN6_IS_ADDR_UNSPECIFIED(
3472 				    &ipp->ipp_addr))
3473 					ipp->ipp_fields |= IPPF_ADDR;
3474 				else
3475 					ipp->ipp_fields &= ~IPPF_ADDR;
3476 			}
3477 			if (sticky) {
3478 				error = udp_build_hdrs(udp);
3479 				if (error != 0)
3480 					return (error);
3481 			}
3482 			break;
3483 		case IPV6_HOPLIMIT:
3484 			if (sticky)
3485 				return (EINVAL);
3486 			if (inlen != 0 && inlen != sizeof (int))
3487 				return (EINVAL);
3488 			if (checkonly)
3489 				break;
3490 
3491 			if (inlen == 0) {
3492 				ipp->ipp_fields &= ~IPPF_HOPLIMIT;
3493 				ipp->ipp_sticky_ignored |= IPPF_HOPLIMIT;
3494 			} else {
3495 				if (*i1 > 255 || *i1 < -1)
3496 					return (EINVAL);
3497 				if (*i1 == -1)
3498 					ipp->ipp_hoplimit =
3499 					    us->us_ipv6_hoplimit;
3500 				else
3501 					ipp->ipp_hoplimit = *i1;
3502 				ipp->ipp_fields |= IPPF_HOPLIMIT;
3503 			}
3504 			break;
3505 		case IPV6_TCLASS:
3506 			if (inlen != 0 && inlen != sizeof (int))
3507 				return (EINVAL);
3508 			if (checkonly)
3509 				break;
3510 
3511 			if (inlen == 0) {
3512 				ipp->ipp_fields &= ~IPPF_TCLASS;
3513 				ipp->ipp_sticky_ignored |= IPPF_TCLASS;
3514 			} else {
3515 				if (*i1 > 255 || *i1 < -1)
3516 					return (EINVAL);
3517 				if (*i1 == -1)
3518 					ipp->ipp_tclass = 0;
3519 				else
3520 					ipp->ipp_tclass = *i1;
3521 				ipp->ipp_fields |= IPPF_TCLASS;
3522 			}
3523 			if (sticky) {
3524 				error = udp_build_hdrs(udp);
3525 				if (error != 0)
3526 					return (error);
3527 			}
3528 			break;
3529 		case IPV6_NEXTHOP:
3530 			/*
3531 			 * IP will verify that the nexthop is reachable
3532 			 * and fail for sticky options.
3533 			 */
3534 			if (inlen != 0 && inlen != sizeof (sin6_t))
3535 				return (EINVAL);
3536 			if (checkonly)
3537 				break;
3538 
3539 			if (inlen == 0) {
3540 				ipp->ipp_fields &= ~IPPF_NEXTHOP;
3541 				ipp->ipp_sticky_ignored |= IPPF_NEXTHOP;
3542 			} else {
3543 				sin6_t *sin6 = (sin6_t *)invalp;
3544 
3545 				if (sin6->sin6_family != AF_INET6)
3546 					return (EAFNOSUPPORT);
3547 				if (IN6_IS_ADDR_V4MAPPED(
3548 				    &sin6->sin6_addr))
3549 					return (EADDRNOTAVAIL);
3550 				ipp->ipp_nexthop = sin6->sin6_addr;
3551 				if (!IN6_IS_ADDR_UNSPECIFIED(
3552 				    &ipp->ipp_nexthop))
3553 					ipp->ipp_fields |= IPPF_NEXTHOP;
3554 				else
3555 					ipp->ipp_fields &= ~IPPF_NEXTHOP;
3556 			}
3557 			if (sticky) {
3558 				error = udp_build_hdrs(udp);
3559 				if (error != 0)
3560 					return (error);
3561 			}
3562 			break;
3563 		case IPV6_HOPOPTS: {
3564 			ip6_hbh_t *hopts = (ip6_hbh_t *)invalp;
3565 			/*
3566 			 * Sanity checks - minimum size, size a multiple of
3567 			 * eight bytes, and matching size passed in.
3568 			 */
3569 			if (inlen != 0 &&
3570 			    inlen != (8 * (hopts->ip6h_len + 1)))
3571 				return (EINVAL);
3572 
3573 			if (checkonly)
3574 				break;
3575 
3576 			error = optcom_pkt_set(invalp, inlen, sticky,
3577 			    (uchar_t **)&ipp->ipp_hopopts,
3578 			    &ipp->ipp_hopoptslen,
3579 			    sticky ? udp->udp_label_len_v6 : 0);
3580 			if (error != 0)
3581 				return (error);
3582 			if (ipp->ipp_hopoptslen == 0) {
3583 				ipp->ipp_fields &= ~IPPF_HOPOPTS;
3584 				ipp->ipp_sticky_ignored |= IPPF_HOPOPTS;
3585 			} else {
3586 				ipp->ipp_fields |= IPPF_HOPOPTS;
3587 			}
3588 			if (sticky) {
3589 				error = udp_build_hdrs(udp);
3590 				if (error != 0)
3591 					return (error);
3592 			}
3593 			break;
3594 		}
3595 		case IPV6_RTHDRDSTOPTS: {
3596 			ip6_dest_t *dopts = (ip6_dest_t *)invalp;
3597 
3598 			/*
3599 			 * Sanity checks - minimum size, size a multiple of
3600 			 * eight bytes, and matching size passed in.
3601 			 */
3602 			if (inlen != 0 &&
3603 			    inlen != (8 * (dopts->ip6d_len + 1)))
3604 				return (EINVAL);
3605 
3606 			if (checkonly)
3607 				break;
3608 
3609 			if (inlen == 0) {
3610 				if (sticky &&
3611 				    (ipp->ipp_fields & IPPF_RTDSTOPTS) != 0) {
3612 					kmem_free(ipp->ipp_rtdstopts,
3613 					    ipp->ipp_rtdstoptslen);
3614 					ipp->ipp_rtdstopts = NULL;
3615 					ipp->ipp_rtdstoptslen = 0;
3616 				}
3617 
3618 				ipp->ipp_fields &= ~IPPF_RTDSTOPTS;
3619 				ipp->ipp_sticky_ignored |= IPPF_RTDSTOPTS;
3620 			} else {
3621 				error = optcom_pkt_set(invalp, inlen, sticky,
3622 				    (uchar_t **)&ipp->ipp_rtdstopts,
3623 				    &ipp->ipp_rtdstoptslen, 0);
3624 				if (error != 0)
3625 					return (error);
3626 				ipp->ipp_fields |= IPPF_RTDSTOPTS;
3627 			}
3628 			if (sticky) {
3629 				error = udp_build_hdrs(udp);
3630 				if (error != 0)
3631 					return (error);
3632 			}
3633 			break;
3634 		}
3635 		case IPV6_DSTOPTS: {
3636 			ip6_dest_t *dopts = (ip6_dest_t *)invalp;
3637 
3638 			/*
3639 			 * Sanity checks - minimum size, size a multiple of
3640 			 * eight bytes, and matching size passed in.
3641 			 */
3642 			if (inlen != 0 &&
3643 			    inlen != (8 * (dopts->ip6d_len + 1)))
3644 				return (EINVAL);
3645 
3646 			if (checkonly)
3647 				break;
3648 
3649 			if (inlen == 0) {
3650 				if (sticky &&
3651 				    (ipp->ipp_fields & IPPF_DSTOPTS) != 0) {
3652 					kmem_free(ipp->ipp_dstopts,
3653 					    ipp->ipp_dstoptslen);
3654 					ipp->ipp_dstopts = NULL;
3655 					ipp->ipp_dstoptslen = 0;
3656 				}
3657 				ipp->ipp_fields &= ~IPPF_DSTOPTS;
3658 				ipp->ipp_sticky_ignored |= IPPF_DSTOPTS;
3659 			} else {
3660 				error = optcom_pkt_set(invalp, inlen, sticky,
3661 				    (uchar_t **)&ipp->ipp_dstopts,
3662 				    &ipp->ipp_dstoptslen, 0);
3663 				if (error != 0)
3664 					return (error);
3665 				ipp->ipp_fields |= IPPF_DSTOPTS;
3666 			}
3667 			if (sticky) {
3668 				error = udp_build_hdrs(udp);
3669 				if (error != 0)
3670 					return (error);
3671 			}
3672 			break;
3673 		}
3674 		case IPV6_RTHDR: {
3675 			ip6_rthdr_t *rt = (ip6_rthdr_t *)invalp;
3676 
3677 			/*
3678 			 * Sanity checks - minimum size, size a multiple of
3679 			 * eight bytes, and matching size passed in.
3680 			 */
3681 			if (inlen != 0 &&
3682 			    inlen != (8 * (rt->ip6r_len + 1)))
3683 				return (EINVAL);
3684 
3685 			if (checkonly)
3686 				break;
3687 
3688 			if (inlen == 0) {
3689 				if (sticky &&
3690 				    (ipp->ipp_fields & IPPF_RTHDR) != 0) {
3691 					kmem_free(ipp->ipp_rthdr,
3692 					    ipp->ipp_rthdrlen);
3693 					ipp->ipp_rthdr = NULL;
3694 					ipp->ipp_rthdrlen = 0;
3695 				}
3696 				ipp->ipp_fields &= ~IPPF_RTHDR;
3697 				ipp->ipp_sticky_ignored |= IPPF_RTHDR;
3698 			} else {
3699 				error = optcom_pkt_set(invalp, inlen, sticky,
3700 				    (uchar_t **)&ipp->ipp_rthdr,
3701 				    &ipp->ipp_rthdrlen, 0);
3702 				if (error != 0)
3703 					return (error);
3704 				ipp->ipp_fields |= IPPF_RTHDR;
3705 			}
3706 			if (sticky) {
3707 				error = udp_build_hdrs(udp);
3708 				if (error != 0)
3709 					return (error);
3710 			}
3711 			break;
3712 		}
3713 
3714 		case IPV6_DONTFRAG:
3715 			if (checkonly)
3716 				break;
3717 
3718 			if (onoff) {
3719 				ipp->ipp_fields |= IPPF_DONTFRAG;
3720 			} else {
3721 				ipp->ipp_fields &= ~IPPF_DONTFRAG;
3722 			}
3723 			break;
3724 
3725 		case IPV6_USE_MIN_MTU:
3726 			if (inlen != sizeof (int))
3727 				return (EINVAL);
3728 
3729 			if (*i1 < -1 || *i1 > 1)
3730 				return (EINVAL);
3731 
3732 			if (checkonly)
3733 				break;
3734 
3735 			ipp->ipp_fields |= IPPF_USE_MIN_MTU;
3736 			ipp->ipp_use_min_mtu = *i1;
3737 			break;
3738 
3739 		case IPV6_BOUND_PIF:
3740 		case IPV6_SEC_OPT:
3741 		case IPV6_DONTFAILOVER_IF:
3742 		case IPV6_SRC_PREFERENCES:
3743 		case IPV6_V6ONLY:
3744 			/* Handled at the IP level */
3745 			return (-EINVAL);
3746 		default:
3747 			*outlenp = 0;
3748 			return (EINVAL);
3749 		}
3750 		break;
3751 		}		/* end IPPROTO_IPV6 */
3752 	case IPPROTO_UDP:
3753 		switch (name) {
3754 		case UDP_ANONPRIVBIND:
3755 			if ((error = secpolicy_net_privaddr(cr, 0,
3756 			    IPPROTO_UDP)) != 0) {
3757 				*outlenp = 0;
3758 				return (error);
3759 			}
3760 			if (!checkonly) {
3761 				udp->udp_anon_priv_bind = onoff;
3762 			}
3763 			break;
3764 		case UDP_EXCLBIND:
3765 			if (!checkonly)
3766 				udp->udp_exclbind = onoff;
3767 			break;
3768 		case UDP_RCVHDR:
3769 			if (!checkonly)
3770 				udp->udp_rcvhdr = onoff;
3771 			break;
3772 		case UDP_NAT_T_ENDPOINT:
3773 			if ((error = secpolicy_ip_config(cr, B_FALSE)) != 0) {
3774 				*outlenp = 0;
3775 				return (error);
3776 			}
3777 
3778 			/*
3779 			 * Use udp_family instead so we can avoid ambiguitites
3780 			 * with AF_INET6 sockets that may switch from IPv4
3781 			 * to IPv6.
3782 			 */
3783 			if (udp->udp_family != AF_INET) {
3784 				*outlenp = 0;
3785 				return (EAFNOSUPPORT);
3786 			}
3787 
3788 			if (!checkonly) {
3789 				udp->udp_nat_t_endpoint = onoff;
3790 
3791 				udp->udp_max_hdr_len = IP_SIMPLE_HDR_LENGTH +
3792 				    UDPH_SIZE + udp->udp_ip_snd_options_len;
3793 
3794 				/* Also, adjust wroff */
3795 				if (onoff) {
3796 					udp->udp_max_hdr_len +=
3797 					    sizeof (uint32_t);
3798 				}
3799 				(void) mi_set_sth_wroff(RD(q),
3800 				    udp->udp_max_hdr_len + us->us_wroff_extra);
3801 			}
3802 			break;
3803 		default:
3804 			*outlenp = 0;
3805 			return (EINVAL);
3806 		}
3807 		break;
3808 	default:
3809 		*outlenp = 0;
3810 		return (EINVAL);
3811 	}
3812 	/*
3813 	 * Common case of OK return with outval same as inval.
3814 	 */
3815 	if (invalp != outvalp) {
3816 		/* don't trust bcopy for identical src/dst */
3817 		(void) bcopy(invalp, outvalp, inlen);
3818 	}
3819 	*outlenp = inlen;
3820 	return (0);
3821 }
3822 
3823 int
3824 udp_opt_set(queue_t *q, uint_t optset_context, int level,
3825     int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp,
3826     uchar_t *outvalp, void *thisdg_attrs, cred_t *cr, mblk_t *mblk)
3827 {
3828 	udp_t	*udp;
3829 	int	err;
3830 
3831 	udp = Q_TO_UDP(q);
3832 
3833 	rw_enter(&udp->udp_rwlock, RW_WRITER);
3834 	err = udp_opt_set_locked(q, optset_context, level, name, inlen, invalp,
3835 	    outlenp, outvalp, thisdg_attrs, cr, mblk);
3836 	rw_exit(&udp->udp_rwlock);
3837 	return (err);
3838 }
3839 
3840 /*
3841  * Update udp_sticky_hdrs based on udp_sticky_ipp, udp_v6src, and udp_ttl.
3842  * The headers include ip6i_t (if needed), ip6_t, any sticky extension
3843  * headers, and the udp header.
3844  * Returns failure if can't allocate memory.
3845  */
3846 static int
3847 udp_build_hdrs(udp_t *udp)
3848 {
3849 	udp_stack_t *us = udp->udp_us;
3850 	uchar_t	*hdrs;
3851 	uint_t	hdrs_len;
3852 	ip6_t	*ip6h;
3853 	ip6i_t	*ip6i;
3854 	udpha_t	*udpha;
3855 	ip6_pkt_t *ipp = &udp->udp_sticky_ipp;
3856 	size_t	sth_wroff;
3857 
3858 	ASSERT(RW_WRITE_HELD(&udp->udp_rwlock));
3859 	hdrs_len = ip_total_hdrs_len_v6(ipp) + UDPH_SIZE;
3860 	ASSERT(hdrs_len != 0);
3861 	if (hdrs_len != udp->udp_sticky_hdrs_len) {
3862 		/* Need to reallocate */
3863 		hdrs = kmem_alloc(hdrs_len, KM_NOSLEEP);
3864 		if (hdrs == NULL)
3865 			return (ENOMEM);
3866 
3867 		if (udp->udp_sticky_hdrs_len != 0) {
3868 			kmem_free(udp->udp_sticky_hdrs,
3869 			    udp->udp_sticky_hdrs_len);
3870 		}
3871 		udp->udp_sticky_hdrs = hdrs;
3872 		udp->udp_sticky_hdrs_len = hdrs_len;
3873 	}
3874 	ip_build_hdrs_v6(udp->udp_sticky_hdrs,
3875 	    udp->udp_sticky_hdrs_len - UDPH_SIZE, ipp, IPPROTO_UDP);
3876 
3877 	/* Set header fields not in ipp */
3878 	if (ipp->ipp_fields & IPPF_HAS_IP6I) {
3879 		ip6i = (ip6i_t *)udp->udp_sticky_hdrs;
3880 		ip6h = (ip6_t *)&ip6i[1];
3881 	} else {
3882 		ip6h = (ip6_t *)udp->udp_sticky_hdrs;
3883 	}
3884 
3885 	if (!(ipp->ipp_fields & IPPF_ADDR))
3886 		ip6h->ip6_src = udp->udp_v6src;
3887 
3888 	udpha = (udpha_t *)(udp->udp_sticky_hdrs + hdrs_len - UDPH_SIZE);
3889 	udpha->uha_src_port = udp->udp_port;
3890 
3891 	/* Try to get everything in a single mblk */
3892 	if (hdrs_len > udp->udp_max_hdr_len) {
3893 		udp->udp_max_hdr_len = hdrs_len;
3894 		sth_wroff = udp->udp_max_hdr_len + us->us_wroff_extra;
3895 		rw_exit(&udp->udp_rwlock);
3896 		(void) mi_set_sth_wroff(udp->udp_connp->conn_rq, sth_wroff);
3897 		rw_enter(&udp->udp_rwlock, RW_WRITER);
3898 	}
3899 	return (0);
3900 }
3901 
3902 /*
3903  * This routine retrieves the value of an ND variable in a udpparam_t
3904  * structure.  It is called through nd_getset when a user reads the
3905  * variable.
3906  */
3907 /* ARGSUSED */
3908 static int
3909 udp_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
3910 {
3911 	udpparam_t *udppa = (udpparam_t *)cp;
3912 
3913 	(void) mi_mpprintf(mp, "%d", udppa->udp_param_value);
3914 	return (0);
3915 }
3916 
3917 /*
3918  * Walk through the param array specified registering each element with the
3919  * named dispatch (ND) handler.
3920  */
3921 static boolean_t
3922 udp_param_register(IDP *ndp, udpparam_t *udppa, int cnt)
3923 {
3924 	for (; cnt-- > 0; udppa++) {
3925 		if (udppa->udp_param_name && udppa->udp_param_name[0]) {
3926 			if (!nd_load(ndp, udppa->udp_param_name,
3927 			    udp_param_get, udp_param_set,
3928 			    (caddr_t)udppa)) {
3929 				nd_free(ndp);
3930 				return (B_FALSE);
3931 			}
3932 		}
3933 	}
3934 	if (!nd_load(ndp, "udp_extra_priv_ports",
3935 	    udp_extra_priv_ports_get, NULL, NULL)) {
3936 		nd_free(ndp);
3937 		return (B_FALSE);
3938 	}
3939 	if (!nd_load(ndp, "udp_extra_priv_ports_add",
3940 	    NULL, udp_extra_priv_ports_add, NULL)) {
3941 		nd_free(ndp);
3942 		return (B_FALSE);
3943 	}
3944 	if (!nd_load(ndp, "udp_extra_priv_ports_del",
3945 	    NULL, udp_extra_priv_ports_del, NULL)) {
3946 		nd_free(ndp);
3947 		return (B_FALSE);
3948 	}
3949 	if (!nd_load(ndp, "udp_status", udp_status_report, NULL,
3950 	    NULL)) {
3951 		nd_free(ndp);
3952 		return (B_FALSE);
3953 	}
3954 	if (!nd_load(ndp, "udp_bind_hash", udp_bind_hash_report, NULL,
3955 	    NULL)) {
3956 		nd_free(ndp);
3957 		return (B_FALSE);
3958 	}
3959 	return (B_TRUE);
3960 }
3961 
3962 /* This routine sets an ND variable in a udpparam_t structure. */
3963 /* ARGSUSED */
3964 static int
3965 udp_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *cr)
3966 {
3967 	long		new_value;
3968 	udpparam_t	*udppa = (udpparam_t *)cp;
3969 
3970 	/*
3971 	 * Fail the request if the new value does not lie within the
3972 	 * required bounds.
3973 	 */
3974 	if (ddi_strtol(value, NULL, 10, &new_value) != 0 ||
3975 	    new_value < udppa->udp_param_min ||
3976 	    new_value > udppa->udp_param_max) {
3977 		return (EINVAL);
3978 	}
3979 
3980 	/* Set the new value */
3981 	udppa->udp_param_value = new_value;
3982 	return (0);
3983 }
3984 
3985 /*
3986  * Copy hop-by-hop option from ipp->ipp_hopopts to the buffer provided (with
3987  * T_opthdr) and return the number of bytes copied.  'dbuf' may be NULL to
3988  * just count the length needed for allocation.  If 'dbuf' is non-NULL,
3989  * then it's assumed to be allocated to be large enough.
3990  *
3991  * Returns zero if trimming of the security option causes all options to go
3992  * away.
3993  */
3994 static size_t
3995 copy_hop_opts(const ip6_pkt_t *ipp, uchar_t *dbuf)
3996 {
3997 	struct T_opthdr *toh;
3998 	size_t hol = ipp->ipp_hopoptslen;
3999 	ip6_hbh_t *dstopt = NULL;
4000 	const ip6_hbh_t *srcopt = ipp->ipp_hopopts;
4001 	size_t tlen, olen, plen;
4002 	boolean_t deleting;
4003 	const struct ip6_opt *sopt, *lastpad;
4004 	struct ip6_opt *dopt;
4005 
4006 	if ((toh = (struct T_opthdr *)dbuf) != NULL) {
4007 		toh->level = IPPROTO_IPV6;
4008 		toh->name = IPV6_HOPOPTS;
4009 		toh->status = 0;
4010 		dstopt = (ip6_hbh_t *)(toh + 1);
4011 	}
4012 
4013 	/*
4014 	 * If labeling is enabled, then skip the label option
4015 	 * but get other options if there are any.
4016 	 */
4017 	if (is_system_labeled()) {
4018 		dopt = NULL;
4019 		if (dstopt != NULL) {
4020 			/* will fill in ip6h_len later */
4021 			dstopt->ip6h_nxt = srcopt->ip6h_nxt;
4022 			dopt = (struct ip6_opt *)(dstopt + 1);
4023 		}
4024 		sopt = (const struct ip6_opt *)(srcopt + 1);
4025 		hol -= sizeof (*srcopt);
4026 		tlen = sizeof (*dstopt);
4027 		lastpad = NULL;
4028 		deleting = B_FALSE;
4029 		/*
4030 		 * This loop finds the first (lastpad pointer) of any number of
4031 		 * pads that preceeds the security option, then treats the
4032 		 * security option as though it were a pad, and then finds the
4033 		 * next non-pad option (or end of list).
4034 		 *
4035 		 * It then treats the entire block as one big pad.  To preserve
4036 		 * alignment of any options that follow, or just the end of the
4037 		 * list, it computes a minimal new padding size that keeps the
4038 		 * same alignment for the next option.
4039 		 *
4040 		 * If it encounters just a sequence of pads with no security
4041 		 * option, those are copied as-is rather than collapsed.
4042 		 *
4043 		 * Note that to handle the end of list case, the code makes one
4044 		 * loop with 'hol' set to zero.
4045 		 */
4046 		for (;;) {
4047 			if (hol > 0) {
4048 				if (sopt->ip6o_type == IP6OPT_PAD1) {
4049 					if (lastpad == NULL)
4050 						lastpad = sopt;
4051 					sopt = (const struct ip6_opt *)
4052 					    &sopt->ip6o_len;
4053 					hol--;
4054 					continue;
4055 				}
4056 				olen = sopt->ip6o_len + sizeof (*sopt);
4057 				if (olen > hol)
4058 					olen = hol;
4059 				if (sopt->ip6o_type == IP6OPT_PADN ||
4060 				    sopt->ip6o_type == ip6opt_ls) {
4061 					if (sopt->ip6o_type == ip6opt_ls)
4062 						deleting = B_TRUE;
4063 					if (lastpad == NULL)
4064 						lastpad = sopt;
4065 					sopt = (const struct ip6_opt *)
4066 					    ((const char *)sopt + olen);
4067 					hol -= olen;
4068 					continue;
4069 				}
4070 			} else {
4071 				/* if nothing was copied at all, then delete */
4072 				if (tlen == sizeof (*dstopt))
4073 					return (0);
4074 				/* last pass; pick up any trailing padding */
4075 				olen = 0;
4076 			}
4077 			if (deleting) {
4078 				/*
4079 				 * compute aligning effect of deleted material
4080 				 * to reproduce with pad.
4081 				 */
4082 				plen = ((const char *)sopt -
4083 				    (const char *)lastpad) & 7;
4084 				tlen += plen;
4085 				if (dopt != NULL) {
4086 					if (plen == 1) {
4087 						dopt->ip6o_type = IP6OPT_PAD1;
4088 					} else if (plen > 1) {
4089 						plen -= sizeof (*dopt);
4090 						dopt->ip6o_type = IP6OPT_PADN;
4091 						dopt->ip6o_len = plen;
4092 						if (plen > 0)
4093 							bzero(dopt + 1, plen);
4094 					}
4095 					dopt = (struct ip6_opt *)
4096 					    ((char *)dopt + plen);
4097 				}
4098 				deleting = B_FALSE;
4099 				lastpad = NULL;
4100 			}
4101 			/* if there's uncopied padding, then copy that now */
4102 			if (lastpad != NULL) {
4103 				olen += (const char *)sopt -
4104 				    (const char *)lastpad;
4105 				sopt = lastpad;
4106 				lastpad = NULL;
4107 			}
4108 			if (dopt != NULL && olen > 0) {
4109 				bcopy(sopt, dopt, olen);
4110 				dopt = (struct ip6_opt *)((char *)dopt + olen);
4111 			}
4112 			if (hol == 0)
4113 				break;
4114 			tlen += olen;
4115 			sopt = (const struct ip6_opt *)
4116 			    ((const char *)sopt + olen);
4117 			hol -= olen;
4118 		}
4119 		/* go back and patch up the length value, rounded upward */
4120 		if (dstopt != NULL)
4121 			dstopt->ip6h_len = (tlen - 1) >> 3;
4122 	} else {
4123 		tlen = hol;
4124 		if (dstopt != NULL)
4125 			bcopy(srcopt, dstopt, hol);
4126 	}
4127 
4128 	tlen += sizeof (*toh);
4129 	if (toh != NULL)
4130 		toh->len = tlen;
4131 
4132 	return (tlen);
4133 }
4134 
4135 /*
4136  * Update udp_rcv_opt_len from the packet.
4137  * Called when options received, and when no options received but
4138  * udp_ip_recv_opt_len has previously recorded options.
4139  */
4140 static void
4141 udp_save_ip_rcv_opt(udp_t *udp, void *opt, int opt_len)
4142 {
4143 	/* Save the options if any */
4144 	if (opt_len > 0) {
4145 		if (opt_len > udp->udp_ip_rcv_options_len) {
4146 			/* Need to allocate larger buffer */
4147 			if (udp->udp_ip_rcv_options_len != 0)
4148 				mi_free((char *)udp->udp_ip_rcv_options);
4149 			udp->udp_ip_rcv_options_len = 0;
4150 			udp->udp_ip_rcv_options =
4151 			    (uchar_t *)mi_alloc(opt_len, BPRI_HI);
4152 			if (udp->udp_ip_rcv_options != NULL)
4153 				udp->udp_ip_rcv_options_len = opt_len;
4154 		}
4155 		if (udp->udp_ip_rcv_options_len != 0) {
4156 			bcopy(opt, udp->udp_ip_rcv_options, opt_len);
4157 			/* Adjust length if we are resusing the space */
4158 			udp->udp_ip_rcv_options_len = opt_len;
4159 		}
4160 	} else if (udp->udp_ip_rcv_options_len != 0) {
4161 		/* Clear out previously recorded options */
4162 		mi_free((char *)udp->udp_ip_rcv_options);
4163 		udp->udp_ip_rcv_options = NULL;
4164 		udp->udp_ip_rcv_options_len = 0;
4165 	}
4166 }
4167 
4168 /* ARGSUSED2 */
4169 static void
4170 udp_input(void *arg1, mblk_t *mp, void *arg2)
4171 {
4172 	conn_t *connp = (conn_t *)arg1;
4173 	struct T_unitdata_ind	*tudi;
4174 	uchar_t			*rptr;		/* Pointer to IP header */
4175 	int			hdr_length;	/* Length of IP+UDP headers */
4176 	int			opt_len;
4177 	int			udi_size;	/* Size of T_unitdata_ind */
4178 	int			mp_len;
4179 	udp_t			*udp;
4180 	udpha_t			*udpha;
4181 	int			ipversion;
4182 	ip6_pkt_t		ipp;
4183 	ip6_t			*ip6h;
4184 	ip6i_t			*ip6i;
4185 	mblk_t			*mp1;
4186 	mblk_t			*options_mp = NULL;
4187 	ip_pktinfo_t		*pinfo = NULL;
4188 	cred_t			*cr = NULL;
4189 	pid_t			cpid;
4190 	uint32_t		udp_ip_rcv_options_len;
4191 	udp_bits_t		udp_bits;
4192 	cred_t			*rcr = connp->conn_cred;
4193 	udp_stack_t *us;
4194 
4195 	ASSERT(connp->conn_flags & IPCL_UDPCONN);
4196 
4197 	udp = connp->conn_udp;
4198 	us = udp->udp_us;
4199 	rptr = mp->b_rptr;
4200 	ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_CTL);
4201 	ASSERT(OK_32PTR(rptr));
4202 
4203 	/*
4204 	 * IP should have prepended the options data in an M_CTL
4205 	 * Check M_CTL "type" to make sure are not here bcos of
4206 	 * a valid ICMP message
4207 	 */
4208 	if (DB_TYPE(mp) == M_CTL) {
4209 		if (MBLKL(mp) == sizeof (ip_pktinfo_t) &&
4210 		    ((ip_pktinfo_t *)mp->b_rptr)->ip_pkt_ulp_type ==
4211 		    IN_PKTINFO) {
4212 			/*
4213 			 * IP_RECVIF or IP_RECVSLLA or IPF_RECVADDR information
4214 			 * has been prepended to the packet by IP. We need to
4215 			 * extract the mblk and adjust the rptr
4216 			 */
4217 			pinfo = (ip_pktinfo_t *)mp->b_rptr;
4218 			options_mp = mp;
4219 			mp = mp->b_cont;
4220 			rptr = mp->b_rptr;
4221 			UDP_STAT(us, udp_in_pktinfo);
4222 		} else {
4223 			/*
4224 			 * ICMP messages.
4225 			 */
4226 			udp_icmp_error(connp->conn_rq, mp);
4227 			return;
4228 		}
4229 	}
4230 
4231 	mp_len = msgdsize(mp);
4232 	/*
4233 	 * This is the inbound data path.
4234 	 * First, we check to make sure the IP version number is correct,
4235 	 * and then pull the IP and UDP headers into the first mblk.
4236 	 */
4237 
4238 	/* Initialize regardless if ipversion is IPv4 or IPv6 */
4239 	ipp.ipp_fields = 0;
4240 
4241 	ipversion = IPH_HDR_VERSION(rptr);
4242 
4243 	rw_enter(&udp->udp_rwlock, RW_READER);
4244 	udp_ip_rcv_options_len = udp->udp_ip_rcv_options_len;
4245 	udp_bits = udp->udp_bits;
4246 	rw_exit(&udp->udp_rwlock);
4247 
4248 	switch (ipversion) {
4249 	case IPV4_VERSION:
4250 		ASSERT(MBLKL(mp) >= sizeof (ipha_t));
4251 		ASSERT(((ipha_t *)rptr)->ipha_protocol == IPPROTO_UDP);
4252 		hdr_length = IPH_HDR_LENGTH(rptr) + UDPH_SIZE;
4253 		opt_len = hdr_length - (IP_SIMPLE_HDR_LENGTH + UDPH_SIZE);
4254 		if ((opt_len > 0 || udp_ip_rcv_options_len > 0) &&
4255 		    udp->udp_family == AF_INET) {
4256 			/*
4257 			 * Record/update udp_ip_rcv_options with the lock
4258 			 * held. Not needed for AF_INET6 sockets
4259 			 * since they don't support a getsockopt of IP_OPTIONS.
4260 			 */
4261 			rw_enter(&udp->udp_rwlock, RW_WRITER);
4262 			udp_save_ip_rcv_opt(udp, rptr + IP_SIMPLE_HDR_LENGTH,
4263 			    opt_len);
4264 			rw_exit(&udp->udp_rwlock);
4265 		}
4266 		/* Handle IPV6_RECVPKTINFO even for IPv4 packet. */
4267 		if ((udp->udp_family == AF_INET6) && (pinfo != NULL) &&
4268 		    udp->udp_ip_recvpktinfo) {
4269 			if (pinfo->ip_pkt_flags & IPF_RECVIF) {
4270 				ipp.ipp_fields |= IPPF_IFINDEX;
4271 				ipp.ipp_ifindex = pinfo->ip_pkt_ifindex;
4272 			}
4273 		}
4274 		break;
4275 	case IPV6_VERSION:
4276 		/*
4277 		 * IPv6 packets can only be received by applications
4278 		 * that are prepared to receive IPv6 addresses.
4279 		 * The IP fanout must ensure this.
4280 		 */
4281 		ASSERT(udp->udp_family == AF_INET6);
4282 
4283 		ip6h = (ip6_t *)rptr;
4284 		ASSERT((uchar_t *)&ip6h[1] <= mp->b_wptr);
4285 
4286 		if (ip6h->ip6_nxt != IPPROTO_UDP) {
4287 			uint8_t nexthdrp;
4288 			/* Look for ifindex information */
4289 			if (ip6h->ip6_nxt == IPPROTO_RAW) {
4290 				ip6i = (ip6i_t *)ip6h;
4291 				if ((uchar_t *)&ip6i[1] > mp->b_wptr)
4292 					goto tossit;
4293 
4294 				if (ip6i->ip6i_flags & IP6I_IFINDEX) {
4295 					ASSERT(ip6i->ip6i_ifindex != 0);
4296 					ipp.ipp_fields |= IPPF_IFINDEX;
4297 					ipp.ipp_ifindex = ip6i->ip6i_ifindex;
4298 				}
4299 				rptr = (uchar_t *)&ip6i[1];
4300 				mp->b_rptr = rptr;
4301 				if (rptr == mp->b_wptr) {
4302 					mp1 = mp->b_cont;
4303 					freeb(mp);
4304 					mp = mp1;
4305 					rptr = mp->b_rptr;
4306 				}
4307 				if (MBLKL(mp) < (IPV6_HDR_LEN + UDPH_SIZE))
4308 					goto tossit;
4309 				ip6h = (ip6_t *)rptr;
4310 				mp_len = msgdsize(mp);
4311 			}
4312 			/*
4313 			 * Find any potentially interesting extension headers
4314 			 * as well as the length of the IPv6 + extension
4315 			 * headers.
4316 			 */
4317 			hdr_length = ip_find_hdr_v6(mp, ip6h, &ipp, &nexthdrp) +
4318 			    UDPH_SIZE;
4319 			ASSERT(nexthdrp == IPPROTO_UDP);
4320 		} else {
4321 			hdr_length = IPV6_HDR_LEN + UDPH_SIZE;
4322 			ip6i = NULL;
4323 		}
4324 		break;
4325 	default:
4326 		ASSERT(0);
4327 	}
4328 
4329 	/*
4330 	 * IP inspected the UDP header thus all of it must be in the mblk.
4331 	 * UDP length check is performed for IPv6 packets and IPv4 packets
4332 	 * to check if the size of the packet as specified
4333 	 * by the header is the same as the physical size of the packet.
4334 	 * FIXME? Didn't IP already check this?
4335 	 */
4336 	udpha = (udpha_t *)(rptr + (hdr_length - UDPH_SIZE));
4337 	if ((MBLKL(mp) < hdr_length) ||
4338 	    (mp_len != (ntohs(udpha->uha_length) + hdr_length - UDPH_SIZE))) {
4339 		goto tossit;
4340 	}
4341 
4342 
4343 	/* Walk past the headers unless IP_RECVHDR was set. */
4344 	if (!udp_bits.udpb_rcvhdr) {
4345 		mp->b_rptr = rptr + hdr_length;
4346 		mp_len -= hdr_length;
4347 	}
4348 
4349 	/*
4350 	 * This is the inbound data path.  Packets are passed upstream as
4351 	 * T_UNITDATA_IND messages with full IP headers still attached.
4352 	 */
4353 	if (udp->udp_family == AF_INET) {
4354 		sin_t *sin;
4355 
4356 		ASSERT(IPH_HDR_VERSION((ipha_t *)rptr) == IPV4_VERSION);
4357 
4358 		/*
4359 		 * Normally only send up the source address.
4360 		 * If IP_RECVDSTADDR is set we include the destination IP
4361 		 * address as an option. With IP_RECVOPTS we include all
4362 		 * the IP options.
4363 		 */
4364 		udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin_t);
4365 		if (udp_bits.udpb_recvdstaddr) {
4366 			udi_size += sizeof (struct T_opthdr) +
4367 			    sizeof (struct in_addr);
4368 			UDP_STAT(us, udp_in_recvdstaddr);
4369 		}
4370 
4371 		if (udp_bits.udpb_ip_recvpktinfo && (pinfo != NULL) &&
4372 		    (pinfo->ip_pkt_flags & IPF_RECVADDR)) {
4373 			udi_size += sizeof (struct T_opthdr) +
4374 			    sizeof (struct in_pktinfo);
4375 			UDP_STAT(us, udp_ip_rcvpktinfo);
4376 		}
4377 
4378 		if ((udp_bits.udpb_recvopts) && opt_len > 0) {
4379 			udi_size += sizeof (struct T_opthdr) + opt_len;
4380 			UDP_STAT(us, udp_in_recvopts);
4381 		}
4382 
4383 		/*
4384 		 * If the IP_RECVSLLA or the IP_RECVIF is set then allocate
4385 		 * space accordingly
4386 		 */
4387 		if ((udp_bits.udpb_recvif) && (pinfo != NULL) &&
4388 		    (pinfo->ip_pkt_flags & IPF_RECVIF)) {
4389 			udi_size += sizeof (struct T_opthdr) + sizeof (uint_t);
4390 			UDP_STAT(us, udp_in_recvif);
4391 		}
4392 
4393 		if ((udp_bits.udpb_recvslla) && (pinfo != NULL) &&
4394 		    (pinfo->ip_pkt_flags & IPF_RECVSLLA)) {
4395 			udi_size += sizeof (struct T_opthdr) +
4396 			    sizeof (struct sockaddr_dl);
4397 			UDP_STAT(us, udp_in_recvslla);
4398 		}
4399 
4400 		if ((udp_bits.udpb_recvucred) &&
4401 		    (cr = DB_CRED(mp)) != NULL) {
4402 			udi_size += sizeof (struct T_opthdr) + ucredsize;
4403 			cpid = DB_CPID(mp);
4404 			UDP_STAT(us, udp_in_recvucred);
4405 		}
4406 
4407 		/*
4408 		 * If SO_TIMESTAMP is set allocate the appropriate sized
4409 		 * buffer. Since gethrestime() expects a pointer aligned
4410 		 * argument, we allocate space necessary for extra
4411 		 * alignment (even though it might not be used).
4412 		 */
4413 		if (udp_bits.udpb_timestamp) {
4414 			udi_size += sizeof (struct T_opthdr) +
4415 			    sizeof (timestruc_t) + _POINTER_ALIGNMENT;
4416 			UDP_STAT(us, udp_in_timestamp);
4417 		}
4418 
4419 		/*
4420 		 * If IP_RECVTTL is set allocate the appropriate sized buffer
4421 		 */
4422 		if (udp_bits.udpb_recvttl) {
4423 			udi_size += sizeof (struct T_opthdr) + sizeof (uint8_t);
4424 			UDP_STAT(us, udp_in_recvttl);
4425 		}
4426 
4427 		/* Allocate a message block for the T_UNITDATA_IND structure. */
4428 		mp1 = allocb(udi_size, BPRI_MED);
4429 		if (mp1 == NULL) {
4430 			freemsg(mp);
4431 			if (options_mp != NULL)
4432 				freeb(options_mp);
4433 			BUMP_MIB(&us->us_udp_mib, udpInErrors);
4434 			return;
4435 		}
4436 		mp1->b_cont = mp;
4437 		mp = mp1;
4438 		mp->b_datap->db_type = M_PROTO;
4439 		tudi = (struct T_unitdata_ind *)mp->b_rptr;
4440 		mp->b_wptr = (uchar_t *)tudi + udi_size;
4441 		tudi->PRIM_type = T_UNITDATA_IND;
4442 		tudi->SRC_length = sizeof (sin_t);
4443 		tudi->SRC_offset = sizeof (struct T_unitdata_ind);
4444 		tudi->OPT_offset = sizeof (struct T_unitdata_ind) +
4445 		    sizeof (sin_t);
4446 		udi_size -= (sizeof (struct T_unitdata_ind) + sizeof (sin_t));
4447 		tudi->OPT_length = udi_size;
4448 		sin = (sin_t *)&tudi[1];
4449 		sin->sin_addr.s_addr = ((ipha_t *)rptr)->ipha_src;
4450 		sin->sin_port =	udpha->uha_src_port;
4451 		sin->sin_family = udp->udp_family;
4452 		*(uint32_t *)&sin->sin_zero[0] = 0;
4453 		*(uint32_t *)&sin->sin_zero[4] = 0;
4454 
4455 		/*
4456 		 * Add options if IP_RECVDSTADDR, IP_RECVIF, IP_RECVSLLA or
4457 		 * IP_RECVTTL has been set.
4458 		 */
4459 		if (udi_size != 0) {
4460 			/*
4461 			 * Copy in destination address before options to avoid
4462 			 * any padding issues.
4463 			 */
4464 			char *dstopt;
4465 
4466 			dstopt = (char *)&sin[1];
4467 			if (udp_bits.udpb_recvdstaddr) {
4468 				struct T_opthdr *toh;
4469 				ipaddr_t *dstptr;
4470 
4471 				toh = (struct T_opthdr *)dstopt;
4472 				toh->level = IPPROTO_IP;
4473 				toh->name = IP_RECVDSTADDR;
4474 				toh->len = sizeof (struct T_opthdr) +
4475 				    sizeof (ipaddr_t);
4476 				toh->status = 0;
4477 				dstopt += sizeof (struct T_opthdr);
4478 				dstptr = (ipaddr_t *)dstopt;
4479 				*dstptr = ((ipha_t *)rptr)->ipha_dst;
4480 				dstopt += sizeof (ipaddr_t);
4481 				udi_size -= toh->len;
4482 			}
4483 
4484 			if (udp_bits.udpb_recvopts && opt_len > 0) {
4485 				struct T_opthdr *toh;
4486 
4487 				toh = (struct T_opthdr *)dstopt;
4488 				toh->level = IPPROTO_IP;
4489 				toh->name = IP_RECVOPTS;
4490 				toh->len = sizeof (struct T_opthdr) + opt_len;
4491 				toh->status = 0;
4492 				dstopt += sizeof (struct T_opthdr);
4493 				bcopy(rptr + IP_SIMPLE_HDR_LENGTH, dstopt,
4494 				    opt_len);
4495 				dstopt += opt_len;
4496 				udi_size -= toh->len;
4497 			}
4498 
4499 			if ((udp_bits.udpb_ip_recvpktinfo) && (pinfo != NULL) &&
4500 			    (pinfo->ip_pkt_flags & IPF_RECVADDR)) {
4501 				struct T_opthdr *toh;
4502 				struct in_pktinfo *pktinfop;
4503 
4504 				toh = (struct T_opthdr *)dstopt;
4505 				toh->level = IPPROTO_IP;
4506 				toh->name = IP_PKTINFO;
4507 				toh->len = sizeof (struct T_opthdr) +
4508 				    sizeof (*pktinfop);
4509 				toh->status = 0;
4510 				dstopt += sizeof (struct T_opthdr);
4511 				pktinfop = (struct in_pktinfo *)dstopt;
4512 				pktinfop->ipi_ifindex = pinfo->ip_pkt_ifindex;
4513 				pktinfop->ipi_spec_dst =
4514 				    pinfo->ip_pkt_match_addr;
4515 				pktinfop->ipi_addr.s_addr =
4516 				    ((ipha_t *)rptr)->ipha_dst;
4517 
4518 				dstopt += sizeof (struct in_pktinfo);
4519 				udi_size -= toh->len;
4520 			}
4521 
4522 			if ((udp_bits.udpb_recvslla) && (pinfo != NULL) &&
4523 			    (pinfo->ip_pkt_flags & IPF_RECVSLLA)) {
4524 
4525 				struct T_opthdr *toh;
4526 				struct sockaddr_dl	*dstptr;
4527 
4528 				toh = (struct T_opthdr *)dstopt;
4529 				toh->level = IPPROTO_IP;
4530 				toh->name = IP_RECVSLLA;
4531 				toh->len = sizeof (struct T_opthdr) +
4532 				    sizeof (struct sockaddr_dl);
4533 				toh->status = 0;
4534 				dstopt += sizeof (struct T_opthdr);
4535 				dstptr = (struct sockaddr_dl *)dstopt;
4536 				bcopy(&pinfo->ip_pkt_slla, dstptr,
4537 				    sizeof (struct sockaddr_dl));
4538 				dstopt += sizeof (struct sockaddr_dl);
4539 				udi_size -= toh->len;
4540 			}
4541 
4542 			if ((udp_bits.udpb_recvif) && (pinfo != NULL) &&
4543 			    (pinfo->ip_pkt_flags & IPF_RECVIF)) {
4544 
4545 				struct T_opthdr *toh;
4546 				uint_t		*dstptr;
4547 
4548 				toh = (struct T_opthdr *)dstopt;
4549 				toh->level = IPPROTO_IP;
4550 				toh->name = IP_RECVIF;
4551 				toh->len = sizeof (struct T_opthdr) +
4552 				    sizeof (uint_t);
4553 				toh->status = 0;
4554 				dstopt += sizeof (struct T_opthdr);
4555 				dstptr = (uint_t *)dstopt;
4556 				*dstptr = pinfo->ip_pkt_ifindex;
4557 				dstopt += sizeof (uint_t);
4558 				udi_size -= toh->len;
4559 			}
4560 
4561 			if (cr != NULL) {
4562 				struct T_opthdr *toh;
4563 
4564 				toh = (struct T_opthdr *)dstopt;
4565 				toh->level = SOL_SOCKET;
4566 				toh->name = SCM_UCRED;
4567 				toh->len = sizeof (struct T_opthdr) + ucredsize;
4568 				toh->status = 0;
4569 				dstopt += sizeof (struct T_opthdr);
4570 				(void) cred2ucred(cr, cpid, dstopt, rcr);
4571 				dstopt += ucredsize;
4572 				udi_size -= toh->len;
4573 			}
4574 
4575 			if (udp_bits.udpb_timestamp) {
4576 				struct	T_opthdr *toh;
4577 
4578 				toh = (struct T_opthdr *)dstopt;
4579 				toh->level = SOL_SOCKET;
4580 				toh->name = SCM_TIMESTAMP;
4581 				toh->len = sizeof (struct T_opthdr) +
4582 				    sizeof (timestruc_t) + _POINTER_ALIGNMENT;
4583 				toh->status = 0;
4584 				dstopt += sizeof (struct T_opthdr);
4585 				/* Align for gethrestime() */
4586 				dstopt = (char *)P2ROUNDUP((intptr_t)dstopt,
4587 				    sizeof (intptr_t));
4588 				gethrestime((timestruc_t *)dstopt);
4589 				dstopt = (char *)toh + toh->len;
4590 				udi_size -= toh->len;
4591 			}
4592 
4593 			/*
4594 			 * CAUTION:
4595 			 * Due to aligment issues
4596 			 * Processing of IP_RECVTTL option
4597 			 * should always be the last. Adding
4598 			 * any option processing after this will
4599 			 * cause alignment panic.
4600 			 */
4601 			if (udp_bits.udpb_recvttl) {
4602 				struct	T_opthdr *toh;
4603 				uint8_t	*dstptr;
4604 
4605 				toh = (struct T_opthdr *)dstopt;
4606 				toh->level = IPPROTO_IP;
4607 				toh->name = IP_RECVTTL;
4608 				toh->len = sizeof (struct T_opthdr) +
4609 				    sizeof (uint8_t);
4610 				toh->status = 0;
4611 				dstopt += sizeof (struct T_opthdr);
4612 				dstptr = (uint8_t *)dstopt;
4613 				*dstptr = ((ipha_t *)rptr)->ipha_ttl;
4614 				dstopt += sizeof (uint8_t);
4615 				udi_size -= toh->len;
4616 			}
4617 
4618 			/* Consumed all of allocated space */
4619 			ASSERT(udi_size == 0);
4620 		}
4621 	} else {
4622 		sin6_t *sin6;
4623 
4624 		/*
4625 		 * Handle both IPv4 and IPv6 packets for IPv6 sockets.
4626 		 *
4627 		 * Normally we only send up the address. If receiving of any
4628 		 * optional receive side information is enabled, we also send
4629 		 * that up as options.
4630 		 */
4631 		udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t);
4632 
4633 		if (ipp.ipp_fields & (IPPF_HOPOPTS|IPPF_DSTOPTS|IPPF_RTDSTOPTS|
4634 		    IPPF_RTHDR|IPPF_IFINDEX)) {
4635 			if ((udp_bits.udpb_ipv6_recvhopopts) &&
4636 			    (ipp.ipp_fields & IPPF_HOPOPTS)) {
4637 				size_t hlen;
4638 
4639 				UDP_STAT(us, udp_in_recvhopopts);
4640 				hlen = copy_hop_opts(&ipp, NULL);
4641 				if (hlen == 0)
4642 					ipp.ipp_fields &= ~IPPF_HOPOPTS;
4643 				udi_size += hlen;
4644 			}
4645 			if (((udp_bits.udpb_ipv6_recvdstopts) ||
4646 			    udp_bits.udpb_old_ipv6_recvdstopts) &&
4647 			    (ipp.ipp_fields & IPPF_DSTOPTS)) {
4648 				udi_size += sizeof (struct T_opthdr) +
4649 				    ipp.ipp_dstoptslen;
4650 				UDP_STAT(us, udp_in_recvdstopts);
4651 			}
4652 			if ((((udp_bits.udpb_ipv6_recvdstopts) &&
4653 			    udp_bits.udpb_ipv6_recvrthdr &&
4654 			    (ipp.ipp_fields & IPPF_RTHDR)) ||
4655 			    (udp_bits.udpb_ipv6_recvrthdrdstopts)) &&
4656 			    (ipp.ipp_fields & IPPF_RTDSTOPTS)) {
4657 				udi_size += sizeof (struct T_opthdr) +
4658 				    ipp.ipp_rtdstoptslen;
4659 				UDP_STAT(us, udp_in_recvrtdstopts);
4660 			}
4661 			if ((udp_bits.udpb_ipv6_recvrthdr) &&
4662 			    (ipp.ipp_fields & IPPF_RTHDR)) {
4663 				udi_size += sizeof (struct T_opthdr) +
4664 				    ipp.ipp_rthdrlen;
4665 				UDP_STAT(us, udp_in_recvrthdr);
4666 			}
4667 			if ((udp_bits.udpb_ip_recvpktinfo) &&
4668 			    (ipp.ipp_fields & IPPF_IFINDEX)) {
4669 				udi_size += sizeof (struct T_opthdr) +
4670 				    sizeof (struct in6_pktinfo);
4671 				UDP_STAT(us, udp_in_recvpktinfo);
4672 			}
4673 
4674 		}
4675 		if ((udp_bits.udpb_recvucred) &&
4676 		    (cr = DB_CRED(mp)) != NULL) {
4677 			udi_size += sizeof (struct T_opthdr) + ucredsize;
4678 			cpid = DB_CPID(mp);
4679 			UDP_STAT(us, udp_in_recvucred);
4680 		}
4681 
4682 		/*
4683 		 * If SO_TIMESTAMP is set allocate the appropriate sized
4684 		 * buffer. Since gethrestime() expects a pointer aligned
4685 		 * argument, we allocate space necessary for extra
4686 		 * alignment (even though it might not be used).
4687 		 */
4688 		if (udp_bits.udpb_timestamp) {
4689 			udi_size += sizeof (struct T_opthdr) +
4690 			    sizeof (timestruc_t) + _POINTER_ALIGNMENT;
4691 			UDP_STAT(us, udp_in_timestamp);
4692 		}
4693 
4694 		if (udp_bits.udpb_ipv6_recvhoplimit) {
4695 			udi_size += sizeof (struct T_opthdr) + sizeof (int);
4696 			UDP_STAT(us, udp_in_recvhoplimit);
4697 		}
4698 
4699 		if (udp_bits.udpb_ipv6_recvtclass) {
4700 			udi_size += sizeof (struct T_opthdr) + sizeof (int);
4701 			UDP_STAT(us, udp_in_recvtclass);
4702 		}
4703 
4704 		mp1 = allocb(udi_size, BPRI_MED);
4705 		if (mp1 == NULL) {
4706 			freemsg(mp);
4707 			if (options_mp != NULL)
4708 				freeb(options_mp);
4709 			BUMP_MIB(&us->us_udp_mib, udpInErrors);
4710 			return;
4711 		}
4712 		mp1->b_cont = mp;
4713 		mp = mp1;
4714 		mp->b_datap->db_type = M_PROTO;
4715 		tudi = (struct T_unitdata_ind *)mp->b_rptr;
4716 		mp->b_wptr = (uchar_t *)tudi + udi_size;
4717 		tudi->PRIM_type = T_UNITDATA_IND;
4718 		tudi->SRC_length = sizeof (sin6_t);
4719 		tudi->SRC_offset = sizeof (struct T_unitdata_ind);
4720 		tudi->OPT_offset = sizeof (struct T_unitdata_ind) +
4721 		    sizeof (sin6_t);
4722 		udi_size -= (sizeof (struct T_unitdata_ind) + sizeof (sin6_t));
4723 		tudi->OPT_length = udi_size;
4724 		sin6 = (sin6_t *)&tudi[1];
4725 		if (ipversion == IPV4_VERSION) {
4726 			in6_addr_t v6dst;
4727 
4728 			IN6_IPADDR_TO_V4MAPPED(((ipha_t *)rptr)->ipha_src,
4729 			    &sin6->sin6_addr);
4730 			IN6_IPADDR_TO_V4MAPPED(((ipha_t *)rptr)->ipha_dst,
4731 			    &v6dst);
4732 			sin6->sin6_flowinfo = 0;
4733 			sin6->sin6_scope_id = 0;
4734 			sin6->__sin6_src_id = ip_srcid_find_addr(&v6dst,
4735 			    connp->conn_zoneid, us->us_netstack);
4736 		} else {
4737 			sin6->sin6_addr = ip6h->ip6_src;
4738 			/* No sin6_flowinfo per API */
4739 			sin6->sin6_flowinfo = 0;
4740 			/* For link-scope source pass up scope id */
4741 			if ((ipp.ipp_fields & IPPF_IFINDEX) &&
4742 			    IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src))
4743 				sin6->sin6_scope_id = ipp.ipp_ifindex;
4744 			else
4745 				sin6->sin6_scope_id = 0;
4746 			sin6->__sin6_src_id = ip_srcid_find_addr(
4747 			    &ip6h->ip6_dst, connp->conn_zoneid,
4748 			    us->us_netstack);
4749 		}
4750 		sin6->sin6_port = udpha->uha_src_port;
4751 		sin6->sin6_family = udp->udp_family;
4752 
4753 		if (udi_size != 0) {
4754 			uchar_t *dstopt;
4755 
4756 			dstopt = (uchar_t *)&sin6[1];
4757 			if ((udp_bits.udpb_ip_recvpktinfo) &&
4758 			    (ipp.ipp_fields & IPPF_IFINDEX)) {
4759 				struct T_opthdr *toh;
4760 				struct in6_pktinfo *pkti;
4761 
4762 				toh = (struct T_opthdr *)dstopt;
4763 				toh->level = IPPROTO_IPV6;
4764 				toh->name = IPV6_PKTINFO;
4765 				toh->len = sizeof (struct T_opthdr) +
4766 				    sizeof (*pkti);
4767 				toh->status = 0;
4768 				dstopt += sizeof (struct T_opthdr);
4769 				pkti = (struct in6_pktinfo *)dstopt;
4770 				if (ipversion == IPV6_VERSION)
4771 					pkti->ipi6_addr = ip6h->ip6_dst;
4772 				else
4773 					IN6_IPADDR_TO_V4MAPPED(
4774 					    ((ipha_t *)rptr)->ipha_dst,
4775 					    &pkti->ipi6_addr);
4776 				pkti->ipi6_ifindex = ipp.ipp_ifindex;
4777 				dstopt += sizeof (*pkti);
4778 				udi_size -= toh->len;
4779 			}
4780 			if (udp_bits.udpb_ipv6_recvhoplimit) {
4781 				struct T_opthdr *toh;
4782 
4783 				toh = (struct T_opthdr *)dstopt;
4784 				toh->level = IPPROTO_IPV6;
4785 				toh->name = IPV6_HOPLIMIT;
4786 				toh->len = sizeof (struct T_opthdr) +
4787 				    sizeof (uint_t);
4788 				toh->status = 0;
4789 				dstopt += sizeof (struct T_opthdr);
4790 				if (ipversion == IPV6_VERSION)
4791 					*(uint_t *)dstopt = ip6h->ip6_hops;
4792 				else
4793 					*(uint_t *)dstopt =
4794 					    ((ipha_t *)rptr)->ipha_ttl;
4795 				dstopt += sizeof (uint_t);
4796 				udi_size -= toh->len;
4797 			}
4798 			if (udp_bits.udpb_ipv6_recvtclass) {
4799 				struct T_opthdr *toh;
4800 
4801 				toh = (struct T_opthdr *)dstopt;
4802 				toh->level = IPPROTO_IPV6;
4803 				toh->name = IPV6_TCLASS;
4804 				toh->len = sizeof (struct T_opthdr) +
4805 				    sizeof (uint_t);
4806 				toh->status = 0;
4807 				dstopt += sizeof (struct T_opthdr);
4808 				if (ipversion == IPV6_VERSION) {
4809 					*(uint_t *)dstopt =
4810 					    IPV6_FLOW_TCLASS(ip6h->ip6_flow);
4811 				} else {
4812 					ipha_t *ipha = (ipha_t *)rptr;
4813 					*(uint_t *)dstopt =
4814 					    ipha->ipha_type_of_service;
4815 				}
4816 				dstopt += sizeof (uint_t);
4817 				udi_size -= toh->len;
4818 			}
4819 			if ((udp_bits.udpb_ipv6_recvhopopts) &&
4820 			    (ipp.ipp_fields & IPPF_HOPOPTS)) {
4821 				size_t hlen;
4822 
4823 				hlen = copy_hop_opts(&ipp, dstopt);
4824 				dstopt += hlen;
4825 				udi_size -= hlen;
4826 			}
4827 			if ((udp_bits.udpb_ipv6_recvdstopts) &&
4828 			    (udp_bits.udpb_ipv6_recvrthdr) &&
4829 			    (ipp.ipp_fields & IPPF_RTHDR) &&
4830 			    (ipp.ipp_fields & IPPF_RTDSTOPTS)) {
4831 				struct T_opthdr *toh;
4832 
4833 				toh = (struct T_opthdr *)dstopt;
4834 				toh->level = IPPROTO_IPV6;
4835 				toh->name = IPV6_DSTOPTS;
4836 				toh->len = sizeof (struct T_opthdr) +
4837 				    ipp.ipp_rtdstoptslen;
4838 				toh->status = 0;
4839 				dstopt += sizeof (struct T_opthdr);
4840 				bcopy(ipp.ipp_rtdstopts, dstopt,
4841 				    ipp.ipp_rtdstoptslen);
4842 				dstopt += ipp.ipp_rtdstoptslen;
4843 				udi_size -= toh->len;
4844 			}
4845 			if ((udp_bits.udpb_ipv6_recvrthdr) &&
4846 			    (ipp.ipp_fields & IPPF_RTHDR)) {
4847 				struct T_opthdr *toh;
4848 
4849 				toh = (struct T_opthdr *)dstopt;
4850 				toh->level = IPPROTO_IPV6;
4851 				toh->name = IPV6_RTHDR;
4852 				toh->len = sizeof (struct T_opthdr) +
4853 				    ipp.ipp_rthdrlen;
4854 				toh->status = 0;
4855 				dstopt += sizeof (struct T_opthdr);
4856 				bcopy(ipp.ipp_rthdr, dstopt, ipp.ipp_rthdrlen);
4857 				dstopt += ipp.ipp_rthdrlen;
4858 				udi_size -= toh->len;
4859 			}
4860 			if ((udp_bits.udpb_ipv6_recvdstopts) &&
4861 			    (ipp.ipp_fields & IPPF_DSTOPTS)) {
4862 				struct T_opthdr *toh;
4863 
4864 				toh = (struct T_opthdr *)dstopt;
4865 				toh->level = IPPROTO_IPV6;
4866 				toh->name = IPV6_DSTOPTS;
4867 				toh->len = sizeof (struct T_opthdr) +
4868 				    ipp.ipp_dstoptslen;
4869 				toh->status = 0;
4870 				dstopt += sizeof (struct T_opthdr);
4871 				bcopy(ipp.ipp_dstopts, dstopt,
4872 				    ipp.ipp_dstoptslen);
4873 				dstopt += ipp.ipp_dstoptslen;
4874 				udi_size -= toh->len;
4875 			}
4876 
4877 			if (cr != NULL) {
4878 				struct T_opthdr *toh;
4879 
4880 				toh = (struct T_opthdr *)dstopt;
4881 				toh->level = SOL_SOCKET;
4882 				toh->name = SCM_UCRED;
4883 				toh->len = sizeof (struct T_opthdr) + ucredsize;
4884 				toh->status = 0;
4885 				(void) cred2ucred(cr, cpid, &toh[1], rcr);
4886 				dstopt += toh->len;
4887 				udi_size -= toh->len;
4888 			}
4889 			if (udp_bits.udpb_timestamp) {
4890 				struct	T_opthdr *toh;
4891 
4892 				toh = (struct T_opthdr *)dstopt;
4893 				toh->level = SOL_SOCKET;
4894 				toh->name = SCM_TIMESTAMP;
4895 				toh->len = sizeof (struct T_opthdr) +
4896 				    sizeof (timestruc_t) + _POINTER_ALIGNMENT;
4897 				toh->status = 0;
4898 				dstopt += sizeof (struct T_opthdr);
4899 				/* Align for gethrestime() */
4900 				dstopt = (uchar_t *)P2ROUNDUP((intptr_t)dstopt,
4901 				    sizeof (intptr_t));
4902 				gethrestime((timestruc_t *)dstopt);
4903 				dstopt = (uchar_t *)toh + toh->len;
4904 				udi_size -= toh->len;
4905 			}
4906 
4907 			/* Consumed all of allocated space */
4908 			ASSERT(udi_size == 0);
4909 		}
4910 #undef	sin6
4911 		/* No IP_RECVDSTADDR for IPv6. */
4912 	}
4913 
4914 	BUMP_MIB(&us->us_udp_mib, udpHCInDatagrams);
4915 	if (options_mp != NULL)
4916 		freeb(options_mp);
4917 
4918 	if (udp_bits.udpb_direct_sockfs) {
4919 		/*
4920 		 * There is nothing above us except for the stream head;
4921 		 * use the read-side synchronous stream interface in
4922 		 * order to reduce the time spent in interrupt thread.
4923 		 */
4924 		ASSERT(udp->udp_issocket);
4925 		udp_rcv_enqueue(connp->conn_rq, udp, mp, mp_len);
4926 	} else {
4927 		/*
4928 		 * Use regular STREAMS interface to pass data upstream
4929 		 * if this is not a socket endpoint, or if we have
4930 		 * switched over to the slow mode due to sockmod being
4931 		 * popped or a module being pushed on top of us.
4932 		 */
4933 		putnext(connp->conn_rq, mp);
4934 	}
4935 	return;
4936 
4937 tossit:
4938 	freemsg(mp);
4939 	if (options_mp != NULL)
4940 		freeb(options_mp);
4941 	BUMP_MIB(&us->us_udp_mib, udpInErrors);
4942 }
4943 
4944 /*
4945  * Handle the results of a T_BIND_REQ whether deferred by IP or handled
4946  * immediately.
4947  */
4948 static void
4949 udp_bind_result(conn_t *connp, mblk_t *mp)
4950 {
4951 	struct T_error_ack	*tea;
4952 
4953 	switch (mp->b_datap->db_type) {
4954 	case M_PROTO:
4955 	case M_PCPROTO:
4956 		/* M_PROTO messages contain some type of TPI message. */
4957 		ASSERT((uintptr_t)(mp->b_wptr - mp->b_rptr) <=
4958 		    (uintptr_t)INT_MAX);
4959 		if (mp->b_wptr - mp->b_rptr < sizeof (t_scalar_t)) {
4960 			freemsg(mp);
4961 			return;
4962 		}
4963 		tea = (struct T_error_ack *)mp->b_rptr;
4964 
4965 		switch (tea->PRIM_type) {
4966 		case T_ERROR_ACK:
4967 			switch (tea->ERROR_prim) {
4968 			case O_T_BIND_REQ:
4969 			case T_BIND_REQ:
4970 				udp_bind_error(connp, mp);
4971 				return;
4972 			default:
4973 				break;
4974 			}
4975 			ASSERT(0);
4976 			freemsg(mp);
4977 			return;
4978 
4979 		case T_BIND_ACK:
4980 			udp_bind_ack(connp, mp);
4981 			return;
4982 
4983 		default:
4984 			break;
4985 		}
4986 		freemsg(mp);
4987 		return;
4988 	default:
4989 		/* FIXME: other cases? */
4990 		ASSERT(0);
4991 		freemsg(mp);
4992 		return;
4993 	}
4994 }
4995 
4996 /*
4997  * Process a T_BIND_ACK
4998  */
4999 static void
5000 udp_bind_ack(conn_t *connp, mblk_t *mp)
5001 {
5002 	udp_t	*udp = connp->conn_udp;
5003 	mblk_t	*mp1;
5004 	ire_t	*ire;
5005 	struct T_bind_ack *tba;
5006 	uchar_t *addrp;
5007 	ipa_conn_t	*ac;
5008 	ipa6_conn_t	*ac6;
5009 	udp_fanout_t	*udpf;
5010 	udp_stack_t	*us = udp->udp_us;
5011 
5012 	ASSERT(udp->udp_pending_op != -1);
5013 	rw_enter(&udp->udp_rwlock, RW_WRITER);
5014 	/*
5015 	 * If a broadcast/multicast address was bound set
5016 	 * the source address to 0.
5017 	 * This ensures no datagrams with broadcast address
5018 	 * as source address are emitted (which would violate
5019 	 * RFC1122 - Hosts requirements)
5020 	 *
5021 	 * Note that when connecting the returned IRE is
5022 	 * for the destination address and we only perform
5023 	 * the broadcast check for the source address (it
5024 	 * is OK to connect to a broadcast/multicast address.)
5025 	 */
5026 	mp1 = mp->b_cont;
5027 	if (mp1 != NULL && mp1->b_datap->db_type == IRE_DB_TYPE) {
5028 		ire = (ire_t *)mp1->b_rptr;
5029 
5030 		/*
5031 		 * Note: we get IRE_BROADCAST for IPv6 to "mark" a multicast
5032 		 * local address.
5033 		 */
5034 		udpf = &us->us_bind_fanout[UDP_BIND_HASH(udp->udp_port,
5035 		    us->us_bind_fanout_size)];
5036 		if (ire->ire_type == IRE_BROADCAST &&
5037 		    udp->udp_state != TS_DATA_XFER) {
5038 			ASSERT(udp->udp_pending_op == T_BIND_REQ ||
5039 			    udp->udp_pending_op == O_T_BIND_REQ);
5040 			/* This was just a local bind to a broadcast addr */
5041 			mutex_enter(&udpf->uf_lock);
5042 			V6_SET_ZERO(udp->udp_v6src);
5043 			mutex_exit(&udpf->uf_lock);
5044 			if (udp->udp_family == AF_INET6)
5045 				(void) udp_build_hdrs(udp);
5046 		} else if (V6_OR_V4_INADDR_ANY(udp->udp_v6src)) {
5047 			/*
5048 			 * Local address not yet set - pick it from the
5049 			 * T_bind_ack
5050 			 */
5051 			tba = (struct T_bind_ack *)mp->b_rptr;
5052 			addrp = &mp->b_rptr[tba->ADDR_offset];
5053 			switch (udp->udp_family) {
5054 			case AF_INET:
5055 				if (tba->ADDR_length == sizeof (ipa_conn_t)) {
5056 					ac = (ipa_conn_t *)addrp;
5057 				} else {
5058 					ASSERT(tba->ADDR_length ==
5059 					    sizeof (ipa_conn_x_t));
5060 					ac = &((ipa_conn_x_t *)addrp)->acx_conn;
5061 				}
5062 				mutex_enter(&udpf->uf_lock);
5063 				IN6_IPADDR_TO_V4MAPPED(ac->ac_laddr,
5064 				    &udp->udp_v6src);
5065 				mutex_exit(&udpf->uf_lock);
5066 				break;
5067 			case AF_INET6:
5068 				if (tba->ADDR_length == sizeof (ipa6_conn_t)) {
5069 					ac6 = (ipa6_conn_t *)addrp;
5070 				} else {
5071 					ASSERT(tba->ADDR_length ==
5072 					    sizeof (ipa6_conn_x_t));
5073 					ac6 = &((ipa6_conn_x_t *)
5074 					    addrp)->ac6x_conn;
5075 				}
5076 				mutex_enter(&udpf->uf_lock);
5077 				udp->udp_v6src = ac6->ac6_laddr;
5078 				mutex_exit(&udpf->uf_lock);
5079 				(void) udp_build_hdrs(udp);
5080 				break;
5081 			}
5082 		}
5083 		mp1 = mp1->b_cont;
5084 	}
5085 	udp->udp_pending_op = -1;
5086 	rw_exit(&udp->udp_rwlock);
5087 	/*
5088 	 * Look for one or more appended ACK message added by
5089 	 * udp_connect or udp_disconnect.
5090 	 * If none found just send up the T_BIND_ACK.
5091 	 * udp_connect has appended a T_OK_ACK and a T_CONN_CON.
5092 	 * udp_disconnect has appended a T_OK_ACK.
5093 	 */
5094 	if (mp1 != NULL) {
5095 		if (mp->b_cont == mp1)
5096 			mp->b_cont = NULL;
5097 		else {
5098 			ASSERT(mp->b_cont->b_cont == mp1);
5099 			mp->b_cont->b_cont = NULL;
5100 		}
5101 		freemsg(mp);
5102 		mp = mp1;
5103 		while (mp != NULL) {
5104 			mp1 = mp->b_cont;
5105 			mp->b_cont = NULL;
5106 			putnext(connp->conn_rq, mp);
5107 			mp = mp1;
5108 		}
5109 		return;
5110 	}
5111 	freemsg(mp->b_cont);
5112 	mp->b_cont = NULL;
5113 	putnext(connp->conn_rq, mp);
5114 }
5115 
5116 static void
5117 udp_bind_error(conn_t *connp, mblk_t *mp)
5118 {
5119 	udp_t	*udp = connp->conn_udp;
5120 	struct T_error_ack *tea;
5121 	udp_fanout_t	*udpf;
5122 	udp_stack_t	*us = udp->udp_us;
5123 
5124 	tea = (struct T_error_ack *)mp->b_rptr;
5125 
5126 	/*
5127 	 * If our O_T_BIND_REQ/T_BIND_REQ fails,
5128 	 * clear out the associated port and source
5129 	 * address before passing the message
5130 	 * upstream. If this was caused by a T_CONN_REQ
5131 	 * revert back to bound state.
5132 	 */
5133 
5134 	rw_enter(&udp->udp_rwlock, RW_WRITER);
5135 	ASSERT(udp->udp_pending_op != -1);
5136 	tea->ERROR_prim = udp->udp_pending_op;
5137 	udp->udp_pending_op = -1;
5138 	udpf = &us->us_bind_fanout[
5139 	    UDP_BIND_HASH(udp->udp_port,
5140 	    us->us_bind_fanout_size)];
5141 	mutex_enter(&udpf->uf_lock);
5142 
5143 	switch (tea->ERROR_prim) {
5144 	case T_CONN_REQ:
5145 		ASSERT(udp->udp_state == TS_DATA_XFER);
5146 		/* Connect failed */
5147 		/* Revert back to the bound source */
5148 		udp->udp_v6src = udp->udp_bound_v6src;
5149 		udp->udp_state = TS_IDLE;
5150 		mutex_exit(&udpf->uf_lock);
5151 		if (udp->udp_family == AF_INET6)
5152 			(void) udp_build_hdrs(udp);
5153 		rw_exit(&udp->udp_rwlock);
5154 		break;
5155 
5156 	case T_DISCON_REQ:
5157 	case T_BIND_REQ:
5158 	case O_T_BIND_REQ:
5159 		V6_SET_ZERO(udp->udp_v6src);
5160 		V6_SET_ZERO(udp->udp_bound_v6src);
5161 		udp->udp_state = TS_UNBND;
5162 		udp_bind_hash_remove(udp, B_TRUE);
5163 		udp->udp_port = 0;
5164 		mutex_exit(&udpf->uf_lock);
5165 		if (udp->udp_family == AF_INET6)
5166 			(void) udp_build_hdrs(udp);
5167 		rw_exit(&udp->udp_rwlock);
5168 		break;
5169 
5170 	default:
5171 		mutex_exit(&udpf->uf_lock);
5172 		rw_exit(&udp->udp_rwlock);
5173 		(void) mi_strlog(connp->conn_rq, 1,
5174 		    SL_ERROR|SL_TRACE,
5175 		    "udp_input_other: bad ERROR_prim, "
5176 		    "len %d", tea->ERROR_prim);
5177 	}
5178 	putnext(connp->conn_rq, mp);
5179 }
5180 
5181 /*
5182  * return SNMP stuff in buffer in mpdata. We don't hold any lock and report
5183  * information that can be changing beneath us.
5184  */
5185 mblk_t *
5186 udp_snmp_get(queue_t *q, mblk_t *mpctl)
5187 {
5188 	mblk_t			*mpdata;
5189 	mblk_t			*mp_conn_ctl;
5190 	mblk_t			*mp_attr_ctl;
5191 	mblk_t			*mp6_conn_ctl;
5192 	mblk_t			*mp6_attr_ctl;
5193 	mblk_t			*mp_conn_tail;
5194 	mblk_t			*mp_attr_tail;
5195 	mblk_t			*mp6_conn_tail;
5196 	mblk_t			*mp6_attr_tail;
5197 	struct opthdr		*optp;
5198 	mib2_udpEntry_t		ude;
5199 	mib2_udp6Entry_t	ude6;
5200 	mib2_transportMLPEntry_t mlp;
5201 	int			state;
5202 	zoneid_t		zoneid;
5203 	int			i;
5204 	connf_t			*connfp;
5205 	conn_t			*connp = Q_TO_CONN(q);
5206 	int			v4_conn_idx;
5207 	int			v6_conn_idx;
5208 	boolean_t		needattr;
5209 	udp_t			*udp;
5210 	ip_stack_t		*ipst = connp->conn_netstack->netstack_ip;
5211 	udp_stack_t		*us = connp->conn_netstack->netstack_udp;
5212 	mblk_t			*mp2ctl;
5213 
5214 	/*
5215 	 * make a copy of the original message
5216 	 */
5217 	mp2ctl = copymsg(mpctl);
5218 
5219 	mp_conn_ctl = mp_attr_ctl = mp6_conn_ctl = NULL;
5220 	if (mpctl == NULL ||
5221 	    (mpdata = mpctl->b_cont) == NULL ||
5222 	    (mp_conn_ctl = copymsg(mpctl)) == NULL ||
5223 	    (mp_attr_ctl = copymsg(mpctl)) == NULL ||
5224 	    (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
5225 	    (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
5226 		freemsg(mp_conn_ctl);
5227 		freemsg(mp_attr_ctl);
5228 		freemsg(mp6_conn_ctl);
5229 		freemsg(mpctl);
5230 		freemsg(mp2ctl);
5231 		return (0);
5232 	}
5233 
5234 	zoneid = connp->conn_zoneid;
5235 
5236 	/* fixed length structure for IPv4 and IPv6 counters */
5237 	SET_MIB(us->us_udp_mib.udpEntrySize, sizeof (mib2_udpEntry_t));
5238 	SET_MIB(us->us_udp_mib.udp6EntrySize, sizeof (mib2_udp6Entry_t));
5239 	/* synchronize 64- and 32-bit counters */
5240 	SYNC32_MIB(&us->us_udp_mib, udpInDatagrams, udpHCInDatagrams);
5241 	SYNC32_MIB(&us->us_udp_mib, udpOutDatagrams, udpHCOutDatagrams);
5242 
5243 	optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
5244 	optp->level = MIB2_UDP;
5245 	optp->name = 0;
5246 	(void) snmp_append_data(mpdata, (char *)&us->us_udp_mib,
5247 	    sizeof (us->us_udp_mib));
5248 	optp->len = msgdsize(mpdata);
5249 	qreply(q, mpctl);
5250 
5251 	mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;
5252 	v4_conn_idx = v6_conn_idx = 0;
5253 
5254 	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
5255 		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
5256 		connp = NULL;
5257 
5258 		while ((connp = ipcl_get_next_conn(connfp, connp,
5259 		    IPCL_UDPCONN))) {
5260 			udp = connp->conn_udp;
5261 			if (zoneid != connp->conn_zoneid)
5262 				continue;
5263 
5264 			/*
5265 			 * Note that the port numbers are sent in
5266 			 * host byte order
5267 			 */
5268 
5269 			if (udp->udp_state == TS_UNBND)
5270 				state = MIB2_UDP_unbound;
5271 			else if (udp->udp_state == TS_IDLE)
5272 				state = MIB2_UDP_idle;
5273 			else if (udp->udp_state == TS_DATA_XFER)
5274 				state = MIB2_UDP_connected;
5275 			else
5276 				state = MIB2_UDP_unknown;
5277 
5278 			needattr = B_FALSE;
5279 			bzero(&mlp, sizeof (mlp));
5280 			if (connp->conn_mlp_type != mlptSingle) {
5281 				if (connp->conn_mlp_type == mlptShared ||
5282 				    connp->conn_mlp_type == mlptBoth)
5283 					mlp.tme_flags |= MIB2_TMEF_SHARED;
5284 				if (connp->conn_mlp_type == mlptPrivate ||
5285 				    connp->conn_mlp_type == mlptBoth)
5286 					mlp.tme_flags |= MIB2_TMEF_PRIVATE;
5287 				needattr = B_TRUE;
5288 			}
5289 
5290 			/*
5291 			 * Create an IPv4 table entry for IPv4 entries and also
5292 			 * any IPv6 entries which are bound to in6addr_any
5293 			 * (i.e. anything a IPv4 peer could connect/send to).
5294 			 */
5295 			if (udp->udp_ipversion == IPV4_VERSION ||
5296 			    (udp->udp_state <= TS_IDLE &&
5297 			    IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src))) {
5298 				ude.udpEntryInfo.ue_state = state;
5299 				/*
5300 				 * If in6addr_any this will set it to
5301 				 * INADDR_ANY
5302 				 */
5303 				ude.udpLocalAddress =
5304 				    V4_PART_OF_V6(udp->udp_v6src);
5305 				ude.udpLocalPort = ntohs(udp->udp_port);
5306 				if (udp->udp_state == TS_DATA_XFER) {
5307 					/*
5308 					 * Can potentially get here for
5309 					 * v6 socket if another process
5310 					 * (say, ping) has just done a
5311 					 * sendto(), changing the state
5312 					 * from the TS_IDLE above to
5313 					 * TS_DATA_XFER by the time we hit
5314 					 * this part of the code.
5315 					 */
5316 					ude.udpEntryInfo.ue_RemoteAddress =
5317 					    V4_PART_OF_V6(udp->udp_v6dst);
5318 					ude.udpEntryInfo.ue_RemotePort =
5319 					    ntohs(udp->udp_dstport);
5320 				} else {
5321 					ude.udpEntryInfo.ue_RemoteAddress = 0;
5322 					ude.udpEntryInfo.ue_RemotePort = 0;
5323 				}
5324 
5325 				/*
5326 				 * We make the assumption that all udp_t
5327 				 * structs will be created within an address
5328 				 * region no larger than 32-bits.
5329 				 */
5330 				ude.udpInstance = (uint32_t)(uintptr_t)udp;
5331 				ude.udpCreationProcess =
5332 				    (udp->udp_open_pid < 0) ?
5333 				    MIB2_UNKNOWN_PROCESS :
5334 				    udp->udp_open_pid;
5335 				ude.udpCreationTime = udp->udp_open_time;
5336 
5337 				(void) snmp_append_data2(mp_conn_ctl->b_cont,
5338 				    &mp_conn_tail, (char *)&ude, sizeof (ude));
5339 				mlp.tme_connidx = v4_conn_idx++;
5340 				if (needattr)
5341 					(void) snmp_append_data2(
5342 					    mp_attr_ctl->b_cont, &mp_attr_tail,
5343 					    (char *)&mlp, sizeof (mlp));
5344 			}
5345 			if (udp->udp_ipversion == IPV6_VERSION) {
5346 				ude6.udp6EntryInfo.ue_state  = state;
5347 				ude6.udp6LocalAddress = udp->udp_v6src;
5348 				ude6.udp6LocalPort = ntohs(udp->udp_port);
5349 				ude6.udp6IfIndex = udp->udp_bound_if;
5350 				if (udp->udp_state == TS_DATA_XFER) {
5351 					ude6.udp6EntryInfo.ue_RemoteAddress =
5352 					    udp->udp_v6dst;
5353 					ude6.udp6EntryInfo.ue_RemotePort =
5354 					    ntohs(udp->udp_dstport);
5355 				} else {
5356 					ude6.udp6EntryInfo.ue_RemoteAddress =
5357 					    sin6_null.sin6_addr;
5358 					ude6.udp6EntryInfo.ue_RemotePort = 0;
5359 				}
5360 				/*
5361 				 * We make the assumption that all udp_t
5362 				 * structs will be created within an address
5363 				 * region no larger than 32-bits.
5364 				 */
5365 				ude6.udp6Instance = (uint32_t)(uintptr_t)udp;
5366 				ude6.udp6CreationProcess =
5367 				    (udp->udp_open_pid < 0) ?
5368 				    MIB2_UNKNOWN_PROCESS :
5369 				    udp->udp_open_pid;
5370 				ude6.udp6CreationTime = udp->udp_open_time;
5371 
5372 				(void) snmp_append_data2(mp6_conn_ctl->b_cont,
5373 				    &mp6_conn_tail, (char *)&ude6,
5374 				    sizeof (ude6));
5375 				mlp.tme_connidx = v6_conn_idx++;
5376 				if (needattr)
5377 					(void) snmp_append_data2(
5378 					    mp6_attr_ctl->b_cont,
5379 					    &mp6_attr_tail, (char *)&mlp,
5380 					    sizeof (mlp));
5381 			}
5382 		}
5383 	}
5384 
5385 	/* IPv4 UDP endpoints */
5386 	optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
5387 	    sizeof (struct T_optmgmt_ack)];
5388 	optp->level = MIB2_UDP;
5389 	optp->name = MIB2_UDP_ENTRY;
5390 	optp->len = msgdsize(mp_conn_ctl->b_cont);
5391 	qreply(q, mp_conn_ctl);
5392 
5393 	/* table of MLP attributes... */
5394 	optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
5395 	    sizeof (struct T_optmgmt_ack)];
5396 	optp->level = MIB2_UDP;
5397 	optp->name = EXPER_XPORT_MLP;
5398 	optp->len = msgdsize(mp_attr_ctl->b_cont);
5399 	if (optp->len == 0)
5400 		freemsg(mp_attr_ctl);
5401 	else
5402 		qreply(q, mp_attr_ctl);
5403 
5404 	/* IPv6 UDP endpoints */
5405 	optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
5406 	    sizeof (struct T_optmgmt_ack)];
5407 	optp->level = MIB2_UDP6;
5408 	optp->name = MIB2_UDP6_ENTRY;
5409 	optp->len = msgdsize(mp6_conn_ctl->b_cont);
5410 	qreply(q, mp6_conn_ctl);
5411 
5412 	/* table of MLP attributes... */
5413 	optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
5414 	    sizeof (struct T_optmgmt_ack)];
5415 	optp->level = MIB2_UDP6;
5416 	optp->name = EXPER_XPORT_MLP;
5417 	optp->len = msgdsize(mp6_attr_ctl->b_cont);
5418 	if (optp->len == 0)
5419 		freemsg(mp6_attr_ctl);
5420 	else
5421 		qreply(q, mp6_attr_ctl);
5422 
5423 	return (mp2ctl);
5424 }
5425 
5426 /*
5427  * Return 0 if invalid set request, 1 otherwise, including non-udp requests.
5428  * NOTE: Per MIB-II, UDP has no writable data.
5429  * TODO:  If this ever actually tries to set anything, it needs to be
5430  * to do the appropriate locking.
5431  */
5432 /* ARGSUSED */
5433 int
5434 udp_snmp_set(queue_t *q, t_scalar_t level, t_scalar_t name,
5435     uchar_t *ptr, int len)
5436 {
5437 	switch (level) {
5438 	case MIB2_UDP:
5439 		return (0);
5440 	default:
5441 		return (1);
5442 	}
5443 }
5444 
5445 static void
5446 udp_report_item(mblk_t *mp, udp_t *udp)
5447 {
5448 	char *state;
5449 	char addrbuf1[INET6_ADDRSTRLEN];
5450 	char addrbuf2[INET6_ADDRSTRLEN];
5451 	uint_t print_len, buf_len;
5452 
5453 	buf_len = mp->b_datap->db_lim - mp->b_wptr;
5454 	ASSERT(buf_len >= 0);
5455 	if (buf_len == 0)
5456 		return;
5457 
5458 	if (udp->udp_state == TS_UNBND)
5459 		state = "UNBOUND";
5460 	else if (udp->udp_state == TS_IDLE)
5461 		state = "IDLE";
5462 	else if (udp->udp_state == TS_DATA_XFER)
5463 		state = "CONNECTED";
5464 	else
5465 		state = "UnkState";
5466 	print_len = snprintf((char *)mp->b_wptr, buf_len,
5467 	    MI_COL_PTRFMT_STR "%4d %5u %s %s %5u %s\n",
5468 	    (void *)udp, udp->udp_connp->conn_zoneid, ntohs(udp->udp_port),
5469 	    inet_ntop(AF_INET6, &udp->udp_v6src, addrbuf1, sizeof (addrbuf1)),
5470 	    inet_ntop(AF_INET6, &udp->udp_v6dst, addrbuf2, sizeof (addrbuf2)),
5471 	    ntohs(udp->udp_dstport), state);
5472 	if (print_len < buf_len) {
5473 		mp->b_wptr += print_len;
5474 	} else {
5475 		mp->b_wptr += buf_len;
5476 	}
5477 }
5478 
5479 /* Report for ndd "udp_status" */
5480 /* ARGSUSED */
5481 static int
5482 udp_status_report(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *cr)
5483 {
5484 	zoneid_t zoneid;
5485 	connf_t	*connfp;
5486 	conn_t	*connp = Q_TO_CONN(q);
5487 	udp_t	*udp = connp->conn_udp;
5488 	int	i;
5489 	udp_stack_t *us = udp->udp_us;
5490 	ip_stack_t *ipst = connp->conn_netstack->netstack_ip;
5491 
5492 	/*
5493 	 * Because of the ndd constraint, at most we can have 64K buffer
5494 	 * to put in all UDP info.  So to be more efficient, just
5495 	 * allocate a 64K buffer here, assuming we need that large buffer.
5496 	 * This may be a problem as any user can read udp_status.  Therefore
5497 	 * we limit the rate of doing this using us_ndd_get_info_interval.
5498 	 * This should be OK as normal users should not do this too often.
5499 	 */
5500 	if (cr == NULL || secpolicy_ip_config(cr, B_TRUE) != 0) {
5501 		if (ddi_get_lbolt() - us->us_last_ndd_get_info_time <
5502 		    drv_usectohz(us->us_ndd_get_info_interval * 1000)) {
5503 			(void) mi_mpprintf(mp, NDD_TOO_QUICK_MSG);
5504 			return (0);
5505 		}
5506 	}
5507 	if ((mp->b_cont = allocb(ND_MAX_BUF_LEN, BPRI_HI)) == NULL) {
5508 		/* The following may work even if we cannot get a large buf. */
5509 		(void) mi_mpprintf(mp, NDD_OUT_OF_BUF_MSG);
5510 		return (0);
5511 	}
5512 	(void) mi_mpprintf(mp,
5513 	    "UDP     " MI_COL_HDRPAD_STR
5514 	/*   12345678[89ABCDEF] */
5515 	    " zone lport src addr        dest addr       port  state");
5516 	/*    1234 12345 xxx.xxx.xxx.xxx xxx.xxx.xxx.xxx 12345 UNBOUND */
5517 
5518 	zoneid = connp->conn_zoneid;
5519 
5520 	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
5521 		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
5522 		connp = NULL;
5523 
5524 		while ((connp = ipcl_get_next_conn(connfp, connp,
5525 		    IPCL_UDPCONN))) {
5526 			udp = connp->conn_udp;
5527 			if (zoneid != GLOBAL_ZONEID &&
5528 			    zoneid != connp->conn_zoneid)
5529 				continue;
5530 
5531 			udp_report_item(mp->b_cont, udp);
5532 		}
5533 	}
5534 	us->us_last_ndd_get_info_time = ddi_get_lbolt();
5535 	return (0);
5536 }
5537 
5538 /*
5539  * This routine creates a T_UDERROR_IND message and passes it upstream.
5540  * The address and options are copied from the T_UNITDATA_REQ message
5541  * passed in mp.  This message is freed.
5542  */
5543 static void
5544 udp_ud_err(queue_t *q, mblk_t *mp, uchar_t *destaddr, t_scalar_t destlen,
5545     t_scalar_t err)
5546 {
5547 	struct T_unitdata_req *tudr;
5548 	mblk_t	*mp1;
5549 	uchar_t	*optaddr;
5550 	t_scalar_t optlen;
5551 
5552 	if (DB_TYPE(mp) == M_DATA) {
5553 		ASSERT(destaddr != NULL && destlen != 0);
5554 		optaddr = NULL;
5555 		optlen = 0;
5556 	} else {
5557 		if ((mp->b_wptr < mp->b_rptr) ||
5558 		    (MBLKL(mp)) < sizeof (struct T_unitdata_req)) {
5559 			goto done;
5560 		}
5561 		tudr = (struct T_unitdata_req *)mp->b_rptr;
5562 		destaddr = mp->b_rptr + tudr->DEST_offset;
5563 		if (destaddr < mp->b_rptr || destaddr >= mp->b_wptr ||
5564 		    destaddr + tudr->DEST_length < mp->b_rptr ||
5565 		    destaddr + tudr->DEST_length > mp->b_wptr) {
5566 			goto done;
5567 		}
5568 		optaddr = mp->b_rptr + tudr->OPT_offset;
5569 		if (optaddr < mp->b_rptr || optaddr >= mp->b_wptr ||
5570 		    optaddr + tudr->OPT_length < mp->b_rptr ||
5571 		    optaddr + tudr->OPT_length > mp->b_wptr) {
5572 			goto done;
5573 		}
5574 		destlen = tudr->DEST_length;
5575 		optlen = tudr->OPT_length;
5576 	}
5577 
5578 	mp1 = mi_tpi_uderror_ind((char *)destaddr, destlen,
5579 	    (char *)optaddr, optlen, err);
5580 	if (mp1 != NULL)
5581 		qreply(q, mp1);
5582 
5583 done:
5584 	freemsg(mp);
5585 }
5586 
5587 /*
5588  * This routine removes a port number association from a stream.  It
5589  * is called by udp_wput to handle T_UNBIND_REQ messages.
5590  */
5591 static void
5592 udp_unbind(queue_t *q, mblk_t *mp)
5593 {
5594 	udp_t *udp = Q_TO_UDP(q);
5595 	udp_fanout_t	*udpf;
5596 	udp_stack_t	*us = udp->udp_us;
5597 
5598 	if (cl_inet_unbind != NULL) {
5599 		/*
5600 		 * Running in cluster mode - register unbind information
5601 		 */
5602 		if (udp->udp_ipversion == IPV4_VERSION) {
5603 			(*cl_inet_unbind)(IPPROTO_UDP, AF_INET,
5604 			    (uint8_t *)(&V4_PART_OF_V6(udp->udp_v6src)),
5605 			    (in_port_t)udp->udp_port);
5606 		} else {
5607 			(*cl_inet_unbind)(IPPROTO_UDP, AF_INET6,
5608 			    (uint8_t *)&(udp->udp_v6src),
5609 			    (in_port_t)udp->udp_port);
5610 		}
5611 	}
5612 
5613 	rw_enter(&udp->udp_rwlock, RW_WRITER);
5614 	if (udp->udp_state == TS_UNBND || udp->udp_pending_op != -1) {
5615 		rw_exit(&udp->udp_rwlock);
5616 		udp_err_ack(q, mp, TOUTSTATE, 0);
5617 		return;
5618 	}
5619 	udp->udp_pending_op = T_UNBIND_REQ;
5620 	rw_exit(&udp->udp_rwlock);
5621 
5622 	/*
5623 	 * Pass the unbind to IP; T_UNBIND_REQ is larger than T_OK_ACK
5624 	 * and therefore ip_unbind must never return NULL.
5625 	 */
5626 	mp = ip_unbind(q, mp);
5627 	ASSERT(mp != NULL);
5628 	ASSERT(((struct T_ok_ack *)mp->b_rptr)->PRIM_type == T_OK_ACK);
5629 
5630 	/*
5631 	 * Once we're unbound from IP, the pending operation may be cleared
5632 	 * here.
5633 	 */
5634 	rw_enter(&udp->udp_rwlock, RW_WRITER);
5635 	udpf = &us->us_bind_fanout[UDP_BIND_HASH(udp->udp_port,
5636 	    us->us_bind_fanout_size)];
5637 	mutex_enter(&udpf->uf_lock);
5638 	udp_bind_hash_remove(udp, B_TRUE);
5639 	V6_SET_ZERO(udp->udp_v6src);
5640 	V6_SET_ZERO(udp->udp_bound_v6src);
5641 	udp->udp_port = 0;
5642 	mutex_exit(&udpf->uf_lock);
5643 
5644 	udp->udp_pending_op = -1;
5645 	udp->udp_state = TS_UNBND;
5646 	if (udp->udp_family == AF_INET6)
5647 		(void) udp_build_hdrs(udp);
5648 	rw_exit(&udp->udp_rwlock);
5649 
5650 	qreply(q, mp);
5651 }
5652 
5653 /*
5654  * Don't let port fall into the privileged range.
5655  * Since the extra privileged ports can be arbitrary we also
5656  * ensure that we exclude those from consideration.
5657  * us->us_epriv_ports is not sorted thus we loop over it until
5658  * there are no changes.
5659  */
5660 static in_port_t
5661 udp_update_next_port(udp_t *udp, in_port_t port, boolean_t random)
5662 {
5663 	int i;
5664 	in_port_t nextport;
5665 	boolean_t restart = B_FALSE;
5666 	udp_stack_t *us = udp->udp_us;
5667 
5668 	if (random && udp_random_anon_port != 0) {
5669 		(void) random_get_pseudo_bytes((uint8_t *)&port,
5670 		    sizeof (in_port_t));
5671 		/*
5672 		 * Unless changed by a sys admin, the smallest anon port
5673 		 * is 32768 and the largest anon port is 65535.  It is
5674 		 * very likely (50%) for the random port to be smaller
5675 		 * than the smallest anon port.  When that happens,
5676 		 * add port % (anon port range) to the smallest anon
5677 		 * port to get the random port.  It should fall into the
5678 		 * valid anon port range.
5679 		 */
5680 		if (port < us->us_smallest_anon_port) {
5681 			port = us->us_smallest_anon_port +
5682 			    port % (us->us_largest_anon_port -
5683 			    us->us_smallest_anon_port);
5684 		}
5685 	}
5686 
5687 retry:
5688 	if (port < us->us_smallest_anon_port)
5689 		port = us->us_smallest_anon_port;
5690 
5691 	if (port > us->us_largest_anon_port) {
5692 		port = us->us_smallest_anon_port;
5693 		if (restart)
5694 			return (0);
5695 		restart = B_TRUE;
5696 	}
5697 
5698 	if (port < us->us_smallest_nonpriv_port)
5699 		port = us->us_smallest_nonpriv_port;
5700 
5701 	for (i = 0; i < us->us_num_epriv_ports; i++) {
5702 		if (port == us->us_epriv_ports[i]) {
5703 			port++;
5704 			/*
5705 			 * Make sure that the port is in the
5706 			 * valid range.
5707 			 */
5708 			goto retry;
5709 		}
5710 	}
5711 
5712 	if (is_system_labeled() &&
5713 	    (nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred),
5714 	    port, IPPROTO_UDP, B_TRUE)) != 0) {
5715 		port = nextport;
5716 		goto retry;
5717 	}
5718 
5719 	return (port);
5720 }
5721 
5722 static int
5723 udp_update_label(queue_t *wq, mblk_t *mp, ipaddr_t dst)
5724 {
5725 	int err;
5726 	uchar_t opt_storage[IP_MAX_OPT_LENGTH];
5727 	udp_t *udp = Q_TO_UDP(wq);
5728 	udp_stack_t	*us = udp->udp_us;
5729 
5730 	err = tsol_compute_label(DB_CREDDEF(mp, udp->udp_connp->conn_cred), dst,
5731 	    opt_storage, udp->udp_connp->conn_mac_exempt,
5732 	    us->us_netstack->netstack_ip);
5733 	if (err == 0) {
5734 		err = tsol_update_options(&udp->udp_ip_snd_options,
5735 		    &udp->udp_ip_snd_options_len, &udp->udp_label_len,
5736 		    opt_storage);
5737 	}
5738 	if (err != 0) {
5739 		DTRACE_PROBE4(
5740 		    tx__ip__log__info__updatelabel__udp,
5741 		    char *, "queue(1) failed to update options(2) on mp(3)",
5742 		    queue_t *, wq, char *, opt_storage, mblk_t *, mp);
5743 	} else {
5744 		IN6_IPADDR_TO_V4MAPPED(dst, &udp->udp_v6lastdst);
5745 	}
5746 	return (err);
5747 }
5748 
5749 static mblk_t *
5750 udp_output_v4(conn_t *connp, mblk_t *mp, ipaddr_t v4dst, uint16_t port,
5751     uint_t srcid, int *error, boolean_t insert_spi)
5752 {
5753 	udp_t	*udp = connp->conn_udp;
5754 	queue_t	*q = connp->conn_wq;
5755 	mblk_t	*mp1 = mp;
5756 	mblk_t	*mp2;
5757 	ipha_t	*ipha;
5758 	int	ip_hdr_length;
5759 	uint32_t ip_len;
5760 	udpha_t	*udpha;
5761 	boolean_t lock_held = B_FALSE;
5762 	in_port_t	uha_src_port;
5763 	udpattrs_t	attrs;
5764 	uchar_t	ip_snd_opt[IP_MAX_OPT_LENGTH];
5765 	uint32_t	ip_snd_opt_len = 0;
5766 	ip4_pkt_t  pktinfo;
5767 	ip4_pkt_t  *pktinfop = &pktinfo;
5768 	ip_opt_info_t optinfo;
5769 	ip_stack_t	*ipst = connp->conn_netstack->netstack_ip;
5770 	udp_stack_t	*us = udp->udp_us;
5771 	ipsec_stack_t	*ipss = ipst->ips_netstack->netstack_ipsec;
5772 
5773 
5774 	*error = 0;
5775 	pktinfop->ip4_ill_index = 0;
5776 	pktinfop->ip4_addr = INADDR_ANY;
5777 	optinfo.ip_opt_flags = 0;
5778 	optinfo.ip_opt_ill_index = 0;
5779 
5780 	if (v4dst == INADDR_ANY)
5781 		v4dst = htonl(INADDR_LOOPBACK);
5782 
5783 	/*
5784 	 * If options passed in, feed it for verification and handling
5785 	 */
5786 	attrs.udpattr_credset = B_FALSE;
5787 	if (DB_TYPE(mp) != M_DATA) {
5788 		mp1 = mp->b_cont;
5789 		if (((struct T_unitdata_req *)mp->b_rptr)->OPT_length != 0) {
5790 			attrs.udpattr_ipp4 = pktinfop;
5791 			attrs.udpattr_mb = mp;
5792 			if (udp_unitdata_opt_process(q, mp, error, &attrs) < 0)
5793 				goto done;
5794 			/*
5795 			 * Note: success in processing options.
5796 			 * mp option buffer represented by
5797 			 * OPT_length/offset now potentially modified
5798 			 * and contain option setting results
5799 			 */
5800 			ASSERT(*error == 0);
5801 		}
5802 	}
5803 
5804 	/* mp1 points to the M_DATA mblk carrying the packet */
5805 	ASSERT(mp1 != NULL && DB_TYPE(mp1) == M_DATA);
5806 
5807 	rw_enter(&udp->udp_rwlock, RW_READER);
5808 	lock_held = B_TRUE;
5809 	/*
5810 	 * Check if our saved options are valid; update if not.
5811 	 * TSOL Note: Since we are not in WRITER mode, UDP packets
5812 	 * to different destination may require different labels,
5813 	 * or worse, UDP packets to same IP address may require
5814 	 * different labels due to use of shared all-zones address.
5815 	 * We use conn_lock to ensure that lastdst, ip_snd_options,
5816 	 * and ip_snd_options_len are consistent for the current
5817 	 * destination and are updated atomically.
5818 	 */
5819 	mutex_enter(&connp->conn_lock);
5820 	if (is_system_labeled()) {
5821 		/* Using UDP MLP requires SCM_UCRED from user */
5822 		if (connp->conn_mlp_type != mlptSingle &&
5823 		    !attrs.udpattr_credset) {
5824 			mutex_exit(&connp->conn_lock);
5825 			DTRACE_PROBE4(
5826 			    tx__ip__log__info__output__udp,
5827 			    char *, "MLP mp(1) lacks SCM_UCRED attr(2) on q(3)",
5828 			    mblk_t *, mp1, udpattrs_t *, &attrs, queue_t *, q);
5829 			*error = ECONNREFUSED;
5830 			goto done;
5831 		}
5832 		/*
5833 		 * update label option for this UDP socket if
5834 		 * - the destination has changed, or
5835 		 * - the UDP socket is MLP
5836 		 */
5837 		if ((!IN6_IS_ADDR_V4MAPPED(&udp->udp_v6lastdst) ||
5838 		    V4_PART_OF_V6(udp->udp_v6lastdst) != v4dst ||
5839 		    connp->conn_mlp_type != mlptSingle) &&
5840 		    (*error = udp_update_label(q, mp, v4dst)) != 0) {
5841 			mutex_exit(&connp->conn_lock);
5842 			goto done;
5843 		}
5844 	}
5845 	if (udp->udp_ip_snd_options_len > 0) {
5846 		ip_snd_opt_len = udp->udp_ip_snd_options_len;
5847 		bcopy(udp->udp_ip_snd_options, ip_snd_opt, ip_snd_opt_len);
5848 	}
5849 	mutex_exit(&connp->conn_lock);
5850 
5851 	/* Add an IP header */
5852 	ip_hdr_length = IP_SIMPLE_HDR_LENGTH + UDPH_SIZE + ip_snd_opt_len +
5853 	    (insert_spi ? sizeof (uint32_t) : 0);
5854 	ipha = (ipha_t *)&mp1->b_rptr[-ip_hdr_length];
5855 	if (DB_REF(mp1) != 1 || (uchar_t *)ipha < DB_BASE(mp1) ||
5856 	    !OK_32PTR(ipha)) {
5857 		mp2 = allocb(ip_hdr_length + us->us_wroff_extra, BPRI_LO);
5858 		if (mp2 == NULL) {
5859 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
5860 			    "udp_wput_end: q %p (%S)", q, "allocbfail2");
5861 			*error = ENOMEM;
5862 			goto done;
5863 		}
5864 		mp2->b_wptr = DB_LIM(mp2);
5865 		mp2->b_cont = mp1;
5866 		mp1 = mp2;
5867 		if (DB_TYPE(mp) != M_DATA)
5868 			mp->b_cont = mp1;
5869 		else
5870 			mp = mp1;
5871 
5872 		ipha = (ipha_t *)(mp1->b_wptr - ip_hdr_length);
5873 	}
5874 	ip_hdr_length -= (UDPH_SIZE + (insert_spi ? sizeof (uint32_t) : 0));
5875 #ifdef	_BIG_ENDIAN
5876 	/* Set version, header length, and tos */
5877 	*(uint16_t *)&ipha->ipha_version_and_hdr_length =
5878 	    ((((IP_VERSION << 4) | (ip_hdr_length>>2)) << 8) |
5879 	    udp->udp_type_of_service);
5880 	/* Set ttl and protocol */
5881 	*(uint16_t *)&ipha->ipha_ttl = (udp->udp_ttl << 8) | IPPROTO_UDP;
5882 #else
5883 	/* Set version, header length, and tos */
5884 	*(uint16_t *)&ipha->ipha_version_and_hdr_length =
5885 	    ((udp->udp_type_of_service << 8) |
5886 	    ((IP_VERSION << 4) | (ip_hdr_length>>2)));
5887 	/* Set ttl and protocol */
5888 	*(uint16_t *)&ipha->ipha_ttl = (IPPROTO_UDP << 8) | udp->udp_ttl;
5889 #endif
5890 	if (pktinfop->ip4_addr != INADDR_ANY) {
5891 		ipha->ipha_src = pktinfop->ip4_addr;
5892 		optinfo.ip_opt_flags = IP_VERIFY_SRC;
5893 	} else {
5894 		/*
5895 		 * Copy our address into the packet.  If this is zero,
5896 		 * first look at __sin6_src_id for a hint. If we leave the
5897 		 * source as INADDR_ANY then ip will fill in the real source
5898 		 * address.
5899 		 */
5900 		IN6_V4MAPPED_TO_IPADDR(&udp->udp_v6src, ipha->ipha_src);
5901 		if (srcid != 0 && ipha->ipha_src == INADDR_ANY) {
5902 			in6_addr_t v6src;
5903 
5904 			ip_srcid_find_id(srcid, &v6src, connp->conn_zoneid,
5905 			    us->us_netstack);
5906 			IN6_V4MAPPED_TO_IPADDR(&v6src, ipha->ipha_src);
5907 		}
5908 	}
5909 	uha_src_port = udp->udp_port;
5910 	if (ip_hdr_length == IP_SIMPLE_HDR_LENGTH) {
5911 		rw_exit(&udp->udp_rwlock);
5912 		lock_held = B_FALSE;
5913 	}
5914 
5915 	if (pktinfop->ip4_ill_index != 0) {
5916 		optinfo.ip_opt_ill_index = pktinfop->ip4_ill_index;
5917 	}
5918 
5919 	ipha->ipha_fragment_offset_and_flags = 0;
5920 	ipha->ipha_ident = 0;
5921 
5922 	mp1->b_rptr = (uchar_t *)ipha;
5923 
5924 	ASSERT((uintptr_t)(mp1->b_wptr - (uchar_t *)ipha) <=
5925 	    (uintptr_t)UINT_MAX);
5926 
5927 	/* Determine length of packet */
5928 	ip_len = (uint32_t)(mp1->b_wptr - (uchar_t *)ipha);
5929 	if ((mp2 = mp1->b_cont) != NULL) {
5930 		do {
5931 			ASSERT((uintptr_t)MBLKL(mp2) <= (uintptr_t)UINT_MAX);
5932 			ip_len += (uint32_t)MBLKL(mp2);
5933 		} while ((mp2 = mp2->b_cont) != NULL);
5934 	}
5935 	/*
5936 	 * If the size of the packet is greater than the maximum allowed by
5937 	 * ip, return an error. Passing this down could cause panics because
5938 	 * the size will have wrapped and be inconsistent with the msg size.
5939 	 */
5940 	if (ip_len > IP_MAXPACKET) {
5941 		TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
5942 		    "udp_wput_end: q %p (%S)", q, "IP length exceeded");
5943 		*error = EMSGSIZE;
5944 		goto done;
5945 	}
5946 	ipha->ipha_length = htons((uint16_t)ip_len);
5947 	ip_len -= ip_hdr_length;
5948 	ip_len = htons((uint16_t)ip_len);
5949 	udpha = (udpha_t *)(((uchar_t *)ipha) + ip_hdr_length);
5950 
5951 	/* Insert all-0s SPI now. */
5952 	if (insert_spi)
5953 		*((uint32_t *)(udpha + 1)) = 0;
5954 
5955 	/*
5956 	 * Copy in the destination address
5957 	 */
5958 	ipha->ipha_dst = v4dst;
5959 
5960 	/*
5961 	 * Set ttl based on IP_MULTICAST_TTL to match IPv6 logic.
5962 	 */
5963 	if (CLASSD(v4dst))
5964 		ipha->ipha_ttl = udp->udp_multicast_ttl;
5965 
5966 	udpha->uha_dst_port = port;
5967 	udpha->uha_src_port = uha_src_port;
5968 
5969 	if (ip_snd_opt_len > 0) {
5970 		uint32_t	cksum;
5971 
5972 		bcopy(ip_snd_opt, &ipha[1], ip_snd_opt_len);
5973 		lock_held = B_FALSE;
5974 		rw_exit(&udp->udp_rwlock);
5975 		/*
5976 		 * Massage source route putting first source route in ipha_dst.
5977 		 * Ignore the destination in T_unitdata_req.
5978 		 * Create a checksum adjustment for a source route, if any.
5979 		 */
5980 		cksum = ip_massage_options(ipha, us->us_netstack);
5981 		cksum = (cksum & 0xFFFF) + (cksum >> 16);
5982 		cksum -= ((ipha->ipha_dst >> 16) & 0xFFFF) +
5983 		    (ipha->ipha_dst & 0xFFFF);
5984 		if ((int)cksum < 0)
5985 			cksum--;
5986 		cksum = (cksum & 0xFFFF) + (cksum >> 16);
5987 		/*
5988 		 * IP does the checksum if uha_checksum is non-zero,
5989 		 * We make it easy for IP to include our pseudo header
5990 		 * by putting our length in uha_checksum.
5991 		 */
5992 		cksum += ip_len;
5993 		cksum = (cksum & 0xFFFF) + (cksum >> 16);
5994 		/* There might be a carry. */
5995 		cksum = (cksum & 0xFFFF) + (cksum >> 16);
5996 #ifdef _LITTLE_ENDIAN
5997 		if (us->us_do_checksum)
5998 			ip_len = (cksum << 16) | ip_len;
5999 #else
6000 		if (us->us_do_checksum)
6001 			ip_len = (ip_len << 16) | cksum;
6002 		else
6003 			ip_len <<= 16;
6004 #endif
6005 	} else {
6006 		/*
6007 		 * IP does the checksum if uha_checksum is non-zero,
6008 		 * We make it easy for IP to include our pseudo header
6009 		 * by putting our length in uha_checksum.
6010 		 */
6011 		if (us->us_do_checksum)
6012 			ip_len |= (ip_len << 16);
6013 #ifndef _LITTLE_ENDIAN
6014 		else
6015 			ip_len <<= 16;
6016 #endif
6017 	}
6018 	ASSERT(!lock_held);
6019 	/* Set UDP length and checksum */
6020 	*((uint32_t *)&udpha->uha_length) = ip_len;
6021 	if (DB_CRED(mp) != NULL)
6022 		mblk_setcred(mp1, DB_CRED(mp));
6023 
6024 	if (DB_TYPE(mp) != M_DATA) {
6025 		ASSERT(mp != mp1);
6026 		freeb(mp);
6027 	}
6028 
6029 	/* mp has been consumed and we'll return success */
6030 	ASSERT(*error == 0);
6031 	mp = NULL;
6032 
6033 	/* We're done.  Pass the packet to ip. */
6034 	BUMP_MIB(&us->us_udp_mib, udpHCOutDatagrams);
6035 	TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6036 	    "udp_wput_end: q %p (%S)", q, "end");
6037 
6038 	if ((connp->conn_flags & IPCL_CHECK_POLICY) != 0 ||
6039 	    CONN_OUTBOUND_POLICY_PRESENT(connp, ipss) ||
6040 	    connp->conn_dontroute ||
6041 	    connp->conn_nofailover_ill != NULL ||
6042 	    connp->conn_outgoing_ill != NULL || optinfo.ip_opt_flags != 0 ||
6043 	    optinfo.ip_opt_ill_index != 0 ||
6044 	    ipha->ipha_version_and_hdr_length != IP_SIMPLE_HDR_VERSION ||
6045 	    IPP_ENABLED(IPP_LOCAL_OUT, ipst) ||
6046 	    ipst->ips_ip_g_mrouter != NULL) {
6047 		UDP_STAT(us, udp_ip_send);
6048 		ip_output_options(connp, mp1, connp->conn_wq, IP_WPUT,
6049 		    &optinfo);
6050 	} else {
6051 		udp_send_data(udp, connp->conn_wq, mp1, ipha);
6052 	}
6053 
6054 done:
6055 	if (lock_held)
6056 		rw_exit(&udp->udp_rwlock);
6057 	if (*error != 0) {
6058 		ASSERT(mp != NULL);
6059 		BUMP_MIB(&us->us_udp_mib, udpOutErrors);
6060 	}
6061 	return (mp);
6062 }
6063 
6064 static void
6065 udp_send_data(udp_t *udp, queue_t *q, mblk_t *mp, ipha_t *ipha)
6066 {
6067 	conn_t	*connp = udp->udp_connp;
6068 	ipaddr_t src, dst;
6069 	ire_t	*ire;
6070 	ipif_t	*ipif = NULL;
6071 	mblk_t	*ire_fp_mp;
6072 	boolean_t retry_caching;
6073 	udp_stack_t *us = udp->udp_us;
6074 	ip_stack_t	*ipst = connp->conn_netstack->netstack_ip;
6075 
6076 	dst = ipha->ipha_dst;
6077 	src = ipha->ipha_src;
6078 	ASSERT(ipha->ipha_ident == 0);
6079 
6080 	if (CLASSD(dst)) {
6081 		int err;
6082 
6083 		ipif = conn_get_held_ipif(connp,
6084 		    &connp->conn_multicast_ipif, &err);
6085 
6086 		if (ipif == NULL || ipif->ipif_isv6 ||
6087 		    (ipif->ipif_ill->ill_phyint->phyint_flags &
6088 		    PHYI_LOOPBACK)) {
6089 			if (ipif != NULL)
6090 				ipif_refrele(ipif);
6091 			UDP_STAT(us, udp_ip_send);
6092 			ip_output(connp, mp, q, IP_WPUT);
6093 			return;
6094 		}
6095 	}
6096 
6097 	retry_caching = B_FALSE;
6098 	mutex_enter(&connp->conn_lock);
6099 	ire = connp->conn_ire_cache;
6100 	ASSERT(!(connp->conn_state_flags & CONN_INCIPIENT));
6101 
6102 	if (ire == NULL || ire->ire_addr != dst ||
6103 	    (ire->ire_marks & IRE_MARK_CONDEMNED)) {
6104 		retry_caching = B_TRUE;
6105 	} else if (CLASSD(dst) && (ire->ire_type & IRE_CACHE)) {
6106 		ill_t *stq_ill = (ill_t *)ire->ire_stq->q_ptr;
6107 
6108 		ASSERT(ipif != NULL);
6109 		if (stq_ill != ipif->ipif_ill && (stq_ill->ill_group == NULL ||
6110 		    stq_ill->ill_group != ipif->ipif_ill->ill_group))
6111 			retry_caching = B_TRUE;
6112 	}
6113 
6114 	if (!retry_caching) {
6115 		ASSERT(ire != NULL);
6116 		IRE_REFHOLD(ire);
6117 		mutex_exit(&connp->conn_lock);
6118 	} else {
6119 		boolean_t cached = B_FALSE;
6120 
6121 		connp->conn_ire_cache = NULL;
6122 		mutex_exit(&connp->conn_lock);
6123 
6124 		/* Release the old ire */
6125 		if (ire != NULL) {
6126 			IRE_REFRELE_NOTR(ire);
6127 			ire = NULL;
6128 		}
6129 
6130 		if (CLASSD(dst)) {
6131 			ASSERT(ipif != NULL);
6132 			ire = ire_ctable_lookup(dst, 0, 0, ipif,
6133 			    connp->conn_zoneid, MBLK_GETLABEL(mp),
6134 			    MATCH_IRE_ILL_GROUP, ipst);
6135 		} else {
6136 			ASSERT(ipif == NULL);
6137 			ire = ire_cache_lookup(dst, connp->conn_zoneid,
6138 			    MBLK_GETLABEL(mp), ipst);
6139 		}
6140 
6141 		if (ire == NULL) {
6142 			if (ipif != NULL)
6143 				ipif_refrele(ipif);
6144 			UDP_STAT(us, udp_ire_null);
6145 			ip_output(connp, mp, q, IP_WPUT);
6146 			return;
6147 		}
6148 		IRE_REFHOLD_NOTR(ire);
6149 
6150 		mutex_enter(&connp->conn_lock);
6151 		if (CONN_CACHE_IRE(connp) && connp->conn_ire_cache == NULL &&
6152 		    !(ire->ire_marks & IRE_MARK_CONDEMNED)) {
6153 			irb_t		*irb = ire->ire_bucket;
6154 
6155 			/*
6156 			 * IRE's created for non-connection oriented transports
6157 			 * are normally initialized with IRE_MARK_TEMPORARY set
6158 			 * in the ire_marks. These IRE's are preferentially
6159 			 * reaped when the hash chain length in the cache
6160 			 * bucket exceeds the maximum value specified in
6161 			 * ip[6]_ire_max_bucket_cnt. This can severely affect
6162 			 * UDP performance if IRE cache entries that we need
6163 			 * to reuse are continually removed. To remedy this,
6164 			 * when we cache the IRE in the conn_t, we remove the
6165 			 * IRE_MARK_TEMPORARY bit from the ire_marks if it was
6166 			 * set.
6167 			 */
6168 			if (ire->ire_marks & IRE_MARK_TEMPORARY) {
6169 				rw_enter(&irb->irb_lock, RW_WRITER);
6170 				if (ire->ire_marks & IRE_MARK_TEMPORARY) {
6171 					ire->ire_marks &= ~IRE_MARK_TEMPORARY;
6172 					irb->irb_tmp_ire_cnt--;
6173 				}
6174 				rw_exit(&irb->irb_lock);
6175 			}
6176 			connp->conn_ire_cache = ire;
6177 			cached = B_TRUE;
6178 		}
6179 		mutex_exit(&connp->conn_lock);
6180 
6181 		/*
6182 		 * We can continue to use the ire but since it was not
6183 		 * cached, we should drop the extra reference.
6184 		 */
6185 		if (!cached)
6186 			IRE_REFRELE_NOTR(ire);
6187 	}
6188 	ASSERT(ire != NULL && ire->ire_ipversion == IPV4_VERSION);
6189 	ASSERT(!CLASSD(dst) || ipif != NULL);
6190 
6191 	/*
6192 	 * Check if we can take the fast-path.
6193 	 * Note that "incomplete" ire's (where the link-layer for next hop
6194 	 * is not resolved, or where the fast-path header in nce_fp_mp is not
6195 	 * available yet) are sent down the legacy (slow) path
6196 	 */
6197 	if ((ire->ire_type & (IRE_BROADCAST|IRE_LOCAL|IRE_LOOPBACK)) ||
6198 	    (ire->ire_flags & RTF_MULTIRT) || (ire->ire_stq == NULL) ||
6199 	    (ire->ire_max_frag < ntohs(ipha->ipha_length)) ||
6200 	    ((ire->ire_nce == NULL) ||
6201 	    ((ire_fp_mp = ire->ire_nce->nce_fp_mp) == NULL)) ||
6202 	    connp->conn_nexthop_set || (MBLKL(ire_fp_mp) > MBLKHEAD(mp))) {
6203 		if (ipif != NULL)
6204 			ipif_refrele(ipif);
6205 		UDP_STAT(us, udp_ip_ire_send);
6206 		IRE_REFRELE(ire);
6207 		ip_output(connp, mp, q, IP_WPUT);
6208 		return;
6209 	}
6210 
6211 	if (src == INADDR_ANY && !connp->conn_unspec_src) {
6212 		if (CLASSD(dst) && !(ire->ire_flags & RTF_SETSRC))
6213 			ipha->ipha_src = ipif->ipif_src_addr;
6214 		else
6215 			ipha->ipha_src = ire->ire_src_addr;
6216 	}
6217 
6218 	if (ipif != NULL)
6219 		ipif_refrele(ipif);
6220 
6221 	udp_xmit(connp->conn_wq, mp, ire, connp, connp->conn_zoneid);
6222 }
6223 
6224 static void
6225 udp_xmit(queue_t *q, mblk_t *mp, ire_t *ire, conn_t *connp, zoneid_t zoneid)
6226 {
6227 	ipaddr_t src, dst;
6228 	ill_t	*ill;
6229 	mblk_t	*ire_fp_mp;
6230 	uint_t	ire_fp_mp_len;
6231 	uint16_t *up;
6232 	uint32_t cksum, hcksum_txflags;
6233 	queue_t	*dev_q;
6234 	udp_t	*udp = connp->conn_udp;
6235 	ipha_t	*ipha = (ipha_t *)mp->b_rptr;
6236 	udp_stack_t	*us = udp->udp_us;
6237 	ip_stack_t	*ipst = connp->conn_netstack->netstack_ip;
6238 	boolean_t ll_multicast = B_FALSE;
6239 
6240 	dev_q = ire->ire_stq->q_next;
6241 	ASSERT(dev_q != NULL);
6242 
6243 
6244 	if (DEV_Q_IS_FLOW_CTLED(dev_q)) {
6245 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsHCOutRequests);
6246 		BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
6247 		if (ipst->ips_ip_output_queue)
6248 			(void) putq(connp->conn_wq, mp);
6249 		else
6250 			freemsg(mp);
6251 		ire_refrele(ire);
6252 		return;
6253 	}
6254 
6255 	ire_fp_mp = ire->ire_nce->nce_fp_mp;
6256 	ire_fp_mp_len = MBLKL(ire_fp_mp);
6257 	ASSERT(MBLKHEAD(mp) >= ire_fp_mp_len);
6258 
6259 	dst = ipha->ipha_dst;
6260 	src = ipha->ipha_src;
6261 
6262 	ill = ire_to_ill(ire);
6263 	ASSERT(ill != NULL);
6264 
6265 	BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutRequests);
6266 
6267 	ipha->ipha_ident = (uint16_t)atomic_add_32_nv(&ire->ire_ident, 1);
6268 #ifndef _BIG_ENDIAN
6269 	ipha->ipha_ident = (ipha->ipha_ident << 8) | (ipha->ipha_ident >> 8);
6270 #endif
6271 
6272 	if (ILL_HCKSUM_CAPABLE(ill) && dohwcksum) {
6273 		ASSERT(ill->ill_hcksum_capab != NULL);
6274 		hcksum_txflags = ill->ill_hcksum_capab->ill_hcksum_txflags;
6275 	} else {
6276 		hcksum_txflags = 0;
6277 	}
6278 
6279 	/* pseudo-header checksum (do it in parts for IP header checksum) */
6280 	cksum = (dst >> 16) + (dst & 0xFFFF) + (src >> 16) + (src & 0xFFFF);
6281 
6282 	ASSERT(ipha->ipha_version_and_hdr_length == IP_SIMPLE_HDR_VERSION);
6283 	up = IPH_UDPH_CHECKSUMP(ipha, IP_SIMPLE_HDR_LENGTH);
6284 	if (*up != 0) {
6285 		IP_CKSUM_XMIT_FAST(ire->ire_ipversion, hcksum_txflags,
6286 		    mp, ipha, up, IPPROTO_UDP, IP_SIMPLE_HDR_LENGTH,
6287 		    ntohs(ipha->ipha_length), cksum);
6288 
6289 		/* Software checksum? */
6290 		if (DB_CKSUMFLAGS(mp) == 0) {
6291 			UDP_STAT(us, udp_out_sw_cksum);
6292 			UDP_STAT_UPDATE(us, udp_out_sw_cksum_bytes,
6293 			    ntohs(ipha->ipha_length) - IP_SIMPLE_HDR_LENGTH);
6294 		}
6295 	}
6296 
6297 	if (!CLASSD(dst)) {
6298 		ipha->ipha_fragment_offset_and_flags |=
6299 		    (uint32_t)htons(ire->ire_frag_flag);
6300 	}
6301 
6302 	/* Calculate IP header checksum if hardware isn't capable */
6303 	if (!(DB_CKSUMFLAGS(mp) & HCK_IPV4_HDRCKSUM)) {
6304 		IP_HDR_CKSUM(ipha, cksum, ((uint32_t *)ipha)[0],
6305 		    ((uint16_t *)ipha)[4]);
6306 	}
6307 
6308 	if (CLASSD(dst)) {
6309 		boolean_t ilm_exists;
6310 
6311 		ILM_WALKER_HOLD(ill);
6312 		ilm_exists = (ilm_lookup_ill(ill, dst, ALL_ZONES) != NULL);
6313 		ILM_WALKER_RELE(ill);
6314 		if (ilm_exists) {
6315 			ip_multicast_loopback(q, ill, mp,
6316 			    connp->conn_multicast_loop ? 0 :
6317 			    IP_FF_NO_MCAST_LOOP, zoneid);
6318 		}
6319 
6320 		/* If multicast TTL is 0 then we are done */
6321 		if (ipha->ipha_ttl == 0) {
6322 			freemsg(mp);
6323 			ire_refrele(ire);
6324 			return;
6325 		}
6326 		ll_multicast = B_TRUE;
6327 	}
6328 
6329 	ASSERT(DB_TYPE(ire_fp_mp) == M_DATA);
6330 	mp->b_rptr = (uchar_t *)ipha - ire_fp_mp_len;
6331 	bcopy(ire_fp_mp->b_rptr, mp->b_rptr, ire_fp_mp_len);
6332 
6333 	UPDATE_OB_PKT_COUNT(ire);
6334 	ire->ire_last_used_time = lbolt;
6335 
6336 	BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits);
6337 	UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets,
6338 	    ntohs(ipha->ipha_length));
6339 
6340 	if (ILL_DLS_CAPABLE(ill)) {
6341 		/*
6342 		 * Send the packet directly to DLD, where it may be queued
6343 		 * depending on the availability of transmit resources at
6344 		 * the media layer.
6345 		 */
6346 		IP_DLS_ILL_TX(ill, ipha, mp, ipst);
6347 	} else {
6348 		DTRACE_PROBE4(ip4__physical__out__start,
6349 		    ill_t *, NULL, ill_t *, ill,
6350 		    ipha_t *, ipha, mblk_t *, mp);
6351 		FW_HOOKS(ipst->ips_ip4_physical_out_event,
6352 		    ipst->ips_ipv4firewall_physical_out,
6353 		    NULL, ill, ipha, mp, mp, ll_multicast, ipst);
6354 		DTRACE_PROBE1(ip4__physical__out__end, mblk_t *, mp);
6355 		if (mp != NULL) {
6356 			DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL,
6357 			    void_ip_t *, ipha, __dtrace_ipsr_ill_t *, ill,
6358 			    ipha_t *, ipha, ip6_t *, NULL, int, 0);
6359 			putnext(ire->ire_stq, mp);
6360 		}
6361 	}
6362 
6363 	IRE_REFRELE(ire);
6364 }
6365 
6366 static boolean_t
6367 udp_update_label_v6(queue_t *wq, mblk_t *mp, in6_addr_t *dst)
6368 {
6369 	udp_t *udp = Q_TO_UDP(wq);
6370 	int err;
6371 	uchar_t opt_storage[TSOL_MAX_IPV6_OPTION];
6372 	udp_stack_t		*us = udp->udp_us;
6373 
6374 	err = tsol_compute_label_v6(DB_CREDDEF(mp, udp->udp_connp->conn_cred),
6375 	    dst, opt_storage, udp->udp_connp->conn_mac_exempt,
6376 	    us->us_netstack->netstack_ip);
6377 	if (err == 0) {
6378 		err = tsol_update_sticky(&udp->udp_sticky_ipp,
6379 		    &udp->udp_label_len_v6, opt_storage);
6380 	}
6381 	if (err != 0) {
6382 		DTRACE_PROBE4(
6383 		    tx__ip__log__drop__updatelabel__udp6,
6384 		    char *, "queue(1) failed to update options(2) on mp(3)",
6385 		    queue_t *, wq, char *, opt_storage, mblk_t *, mp);
6386 	} else {
6387 		udp->udp_v6lastdst = *dst;
6388 	}
6389 	return (err);
6390 }
6391 
6392 void
6393 udp_output_connected(void *arg, mblk_t *mp)
6394 {
6395 	conn_t	*connp = (conn_t *)arg;
6396 	udp_t	*udp = connp->conn_udp;
6397 	udp_stack_t	*us = udp->udp_us;
6398 	ipaddr_t	v4dst;
6399 	in_port_t	dstport;
6400 	boolean_t	mapped_addr;
6401 	struct sockaddr_storage ss;
6402 	sin_t		*sin;
6403 	sin6_t		*sin6;
6404 	struct sockaddr	*addr;
6405 	socklen_t	addrlen;
6406 	int		error;
6407 	boolean_t	insert_spi = udp->udp_nat_t_endpoint;
6408 
6409 	/* M_DATA for connected socket */
6410 
6411 	ASSERT(udp->udp_issocket);
6412 	UDP_DBGSTAT(us, udp_data_conn);
6413 
6414 	mutex_enter(&connp->conn_lock);
6415 	if (udp->udp_state != TS_DATA_XFER) {
6416 		mutex_exit(&connp->conn_lock);
6417 		BUMP_MIB(&us->us_udp_mib, udpOutErrors);
6418 		UDP_STAT(us, udp_out_err_notconn);
6419 		freemsg(mp);
6420 		TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6421 		    "udp_wput_end: connp %p (%S)", connp,
6422 		    "not-connected; address required");
6423 		return;
6424 	}
6425 
6426 	mapped_addr = IN6_IS_ADDR_V4MAPPED(&udp->udp_v6dst);
6427 	if (mapped_addr)
6428 		IN6_V4MAPPED_TO_IPADDR(&udp->udp_v6dst, v4dst);
6429 
6430 	/* Initialize addr and addrlen as if they're passed in */
6431 	if (udp->udp_family == AF_INET) {
6432 		sin = (sin_t *)&ss;
6433 		sin->sin_family = AF_INET;
6434 		dstport = sin->sin_port = udp->udp_dstport;
6435 		ASSERT(mapped_addr);
6436 		sin->sin_addr.s_addr = v4dst;
6437 		addr = (struct sockaddr *)sin;
6438 		addrlen = sizeof (*sin);
6439 	} else {
6440 		sin6 = (sin6_t *)&ss;
6441 		sin6->sin6_family = AF_INET6;
6442 		dstport = sin6->sin6_port = udp->udp_dstport;
6443 		sin6->sin6_flowinfo = udp->udp_flowinfo;
6444 		sin6->sin6_addr = udp->udp_v6dst;
6445 		sin6->sin6_scope_id = 0;
6446 		sin6->__sin6_src_id = 0;
6447 		addr = (struct sockaddr *)sin6;
6448 		addrlen = sizeof (*sin6);
6449 	}
6450 	mutex_exit(&connp->conn_lock);
6451 
6452 	if (mapped_addr) {
6453 		/*
6454 		 * Handle both AF_INET and AF_INET6; the latter
6455 		 * for IPV4 mapped destination addresses.  Note
6456 		 * here that both addr and addrlen point to the
6457 		 * corresponding struct depending on the address
6458 		 * family of the socket.
6459 		 */
6460 		mp = udp_output_v4(connp, mp, v4dst, dstport, 0, &error,
6461 		    insert_spi);
6462 	} else {
6463 		mp = udp_output_v6(connp, mp, sin6, &error);
6464 	}
6465 	if (error == 0) {
6466 		ASSERT(mp == NULL);
6467 		return;
6468 	}
6469 
6470 	UDP_STAT(us, udp_out_err_output);
6471 	ASSERT(mp != NULL);
6472 	/* mp is freed by the following routine */
6473 	udp_ud_err(connp->conn_wq, mp, (uchar_t *)addr, (t_scalar_t)addrlen,
6474 	    (t_scalar_t)error);
6475 }
6476 
6477 /*
6478  * This routine handles all messages passed downstream.  It either
6479  * consumes the message or passes it downstream; it never queues a
6480  * a message.
6481  *
6482  * Also entry point for sockfs when udp is in "direct sockfs" mode.  This mode
6483  * is valid when we are directly beneath the stream head, and thus sockfs
6484  * is able to bypass STREAMS and directly call us, passing along the sockaddr
6485  * structure without the cumbersome T_UNITDATA_REQ interface for the case of
6486  * connected endpoints.
6487  */
6488 void
6489 udp_wput(queue_t *q, mblk_t *mp)
6490 {
6491 	sin6_t		*sin6;
6492 	sin_t		*sin;
6493 	ipaddr_t	v4dst;
6494 	uint16_t	port;
6495 	uint_t		srcid;
6496 	conn_t		*connp = Q_TO_CONN(q);
6497 	udp_t		*udp = connp->conn_udp;
6498 	int		error = 0;
6499 	struct sockaddr	*addr;
6500 	socklen_t	addrlen;
6501 	udp_stack_t *us = udp->udp_us;
6502 	boolean_t	insert_spi = udp->udp_nat_t_endpoint;
6503 
6504 	TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_START,
6505 	    "udp_wput_start: queue %p mp %p", q, mp);
6506 
6507 	/*
6508 	 * We directly handle several cases here: T_UNITDATA_REQ message
6509 	 * coming down as M_PROTO/M_PCPROTO and M_DATA messages for connected
6510 	 * socket.
6511 	 */
6512 	switch (DB_TYPE(mp)) {
6513 	case M_DATA:
6514 		/*
6515 		 * Quick check for error cases. Checks will be done again
6516 		 * under the lock later on
6517 		 */
6518 		if (!udp->udp_direct_sockfs || udp->udp_state != TS_DATA_XFER) {
6519 			/* Not connected; address is required */
6520 			BUMP_MIB(&us->us_udp_mib, udpOutErrors);
6521 			UDP_STAT(us, udp_out_err_notconn);
6522 			freemsg(mp);
6523 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6524 			    "udp_wput_end: connp %p (%S)", connp,
6525 			    "not-connected; address required");
6526 			return;
6527 		}
6528 		udp_output_connected(connp, mp);
6529 		return;
6530 
6531 	case M_PROTO:
6532 	case M_PCPROTO: {
6533 		struct T_unitdata_req *tudr;
6534 
6535 		ASSERT((uintptr_t)MBLKL(mp) <= (uintptr_t)INT_MAX);
6536 		tudr = (struct T_unitdata_req *)mp->b_rptr;
6537 
6538 		/* Handle valid T_UNITDATA_REQ here */
6539 		if (MBLKL(mp) >= sizeof (*tudr) &&
6540 		    ((t_primp_t)mp->b_rptr)->type == T_UNITDATA_REQ) {
6541 			if (mp->b_cont == NULL) {
6542 				TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6543 				    "udp_wput_end: q %p (%S)", q, "badaddr");
6544 				error = EPROTO;
6545 				goto ud_error;
6546 			}
6547 
6548 			if (!MBLKIN(mp, 0, tudr->DEST_offset +
6549 			    tudr->DEST_length)) {
6550 				TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6551 				    "udp_wput_end: q %p (%S)", q, "badaddr");
6552 				error = EADDRNOTAVAIL;
6553 				goto ud_error;
6554 			}
6555 			/*
6556 			 * If a port has not been bound to the stream, fail.
6557 			 * This is not a problem when sockfs is directly
6558 			 * above us, because it will ensure that the socket
6559 			 * is first bound before allowing data to be sent.
6560 			 */
6561 			if (udp->udp_state == TS_UNBND) {
6562 				TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6563 				    "udp_wput_end: q %p (%S)", q, "outstate");
6564 				error = EPROTO;
6565 				goto ud_error;
6566 			}
6567 			addr = (struct sockaddr *)
6568 			    &mp->b_rptr[tudr->DEST_offset];
6569 			addrlen = tudr->DEST_length;
6570 			if (tudr->OPT_length != 0)
6571 				UDP_STAT(us, udp_out_opt);
6572 			break;
6573 		}
6574 		/* FALLTHRU */
6575 	}
6576 	default:
6577 		udp_wput_other(q, mp);
6578 		return;
6579 	}
6580 	ASSERT(addr != NULL);
6581 
6582 	switch (udp->udp_family) {
6583 	case AF_INET6:
6584 		sin6 = (sin6_t *)addr;
6585 		if (!OK_32PTR((char *)sin6) || (addrlen != sizeof (sin6_t)) ||
6586 		    (sin6->sin6_family != AF_INET6)) {
6587 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6588 			    "udp_wput_end: q %p (%S)", q, "badaddr");
6589 			error = EADDRNOTAVAIL;
6590 			goto ud_error;
6591 		}
6592 
6593 		if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
6594 			/*
6595 			 * Destination is a non-IPv4-compatible IPv6 address.
6596 			 * Send out an IPv6 format packet.
6597 			 */
6598 			mp = udp_output_v6(connp, mp, sin6, &error);
6599 			if (error != 0)
6600 				goto ud_error;
6601 
6602 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6603 			    "udp_wput_end: q %p (%S)", q, "udp_output_v6");
6604 			return;
6605 		}
6606 		/*
6607 		 * If the local address is not zero or a mapped address
6608 		 * return an error.  It would be possible to send an IPv4
6609 		 * packet but the response would never make it back to the
6610 		 * application since it is bound to a non-mapped address.
6611 		 */
6612 		if (!IN6_IS_ADDR_V4MAPPED(&udp->udp_v6src) &&
6613 		    !IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src)) {
6614 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6615 			    "udp_wput_end: q %p (%S)", q, "badaddr");
6616 			error = EADDRNOTAVAIL;
6617 			goto ud_error;
6618 		}
6619 		/* Send IPv4 packet without modifying udp_ipversion */
6620 		/* Extract port and ipaddr */
6621 		port = sin6->sin6_port;
6622 		IN6_V4MAPPED_TO_IPADDR(&sin6->sin6_addr, v4dst);
6623 		srcid = sin6->__sin6_src_id;
6624 		break;
6625 
6626 	case AF_INET:
6627 		sin = (sin_t *)addr;
6628 		if ((!OK_32PTR((char *)sin) || addrlen != sizeof (sin_t)) ||
6629 		    (sin->sin_family != AF_INET)) {
6630 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_END,
6631 			    "udp_wput_end: q %p (%S)", q, "badaddr");
6632 			error = EADDRNOTAVAIL;
6633 			goto ud_error;
6634 		}
6635 		/* Extract port and ipaddr */
6636 		port = sin->sin_port;
6637 		v4dst = sin->sin_addr.s_addr;
6638 		srcid = 0;
6639 		break;
6640 	}
6641 
6642 	mp = udp_output_v4(connp, mp, v4dst, port, srcid, &error, insert_spi);
6643 	if (error != 0) {
6644 ud_error:
6645 		UDP_STAT(us, udp_out_err_output);
6646 		ASSERT(mp != NULL);
6647 		/* mp is freed by the following routine */
6648 		udp_ud_err(q, mp, (uchar_t *)addr, (t_scalar_t)addrlen,
6649 		    (t_scalar_t)error);
6650 	}
6651 }
6652 
6653 /*
6654  * udp_output_v6():
6655  * Assumes that udp_wput did some sanity checking on the destination
6656  * address.
6657  */
6658 static mblk_t *
6659 udp_output_v6(conn_t *connp, mblk_t *mp, sin6_t *sin6, int *error)
6660 {
6661 	ip6_t		*ip6h;
6662 	ip6i_t		*ip6i;	/* mp1->b_rptr even if no ip6i_t */
6663 	mblk_t		*mp1 = mp;
6664 	mblk_t		*mp2;
6665 	int		udp_ip_hdr_len = IPV6_HDR_LEN + UDPH_SIZE;
6666 	size_t		ip_len;
6667 	udpha_t		*udph;
6668 	udp_t		*udp = connp->conn_udp;
6669 	queue_t		*q = connp->conn_wq;
6670 	ip6_pkt_t	ipp_s;	/* For ancillary data options */
6671 	ip6_pkt_t	*ipp = &ipp_s;
6672 	ip6_pkt_t	*tipp;	/* temporary ipp */
6673 	uint32_t	csum = 0;
6674 	uint_t		ignore = 0;
6675 	uint_t		option_exists = 0, is_sticky = 0;
6676 	uint8_t		*cp;
6677 	uint8_t		*nxthdr_ptr;
6678 	in6_addr_t	ip6_dst;
6679 	udpattrs_t	attrs;
6680 	boolean_t	opt_present;
6681 	ip6_hbh_t	*hopoptsptr = NULL;
6682 	uint_t		hopoptslen = 0;
6683 	boolean_t	is_ancillary = B_FALSE;
6684 	udp_stack_t	*us = udp->udp_us;
6685 	size_t		sth_wroff = 0;
6686 
6687 	*error = 0;
6688 
6689 	/*
6690 	 * If the local address is a mapped address return
6691 	 * an error.
6692 	 * It would be possible to send an IPv6 packet but the
6693 	 * response would never make it back to the application
6694 	 * since it is bound to a mapped address.
6695 	 */
6696 	if (IN6_IS_ADDR_V4MAPPED(&udp->udp_v6src)) {
6697 		*error = EADDRNOTAVAIL;
6698 		goto done;
6699 	}
6700 
6701 	ipp->ipp_fields = 0;
6702 	ipp->ipp_sticky_ignored = 0;
6703 
6704 	/*
6705 	 * If TPI options passed in, feed it for verification and handling
6706 	 */
6707 	attrs.udpattr_credset = B_FALSE;
6708 	opt_present = B_FALSE;
6709 	if (DB_TYPE(mp) != M_DATA) {
6710 		mp1 = mp->b_cont;
6711 		if (((struct T_unitdata_req *)mp->b_rptr)->OPT_length != 0) {
6712 			attrs.udpattr_ipp6 = ipp;
6713 			attrs.udpattr_mb = mp;
6714 			if (udp_unitdata_opt_process(q, mp, error,
6715 			    &attrs) < 0) {
6716 				goto done;
6717 			}
6718 			ASSERT(*error == 0);
6719 			opt_present = B_TRUE;
6720 		}
6721 	}
6722 	rw_enter(&udp->udp_rwlock, RW_READER);
6723 	ignore = ipp->ipp_sticky_ignored;
6724 
6725 	/* mp1 points to the M_DATA mblk carrying the packet */
6726 	ASSERT(mp1 != NULL && DB_TYPE(mp1) == M_DATA);
6727 
6728 	if (sin6->sin6_scope_id != 0 &&
6729 	    IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
6730 		/*
6731 		 * IPPF_SCOPE_ID is special.  It's neither a sticky
6732 		 * option nor ancillary data.  It needs to be
6733 		 * explicitly set in options_exists.
6734 		 */
6735 		option_exists |= IPPF_SCOPE_ID;
6736 	}
6737 
6738 	/*
6739 	 * Compute the destination address
6740 	 */
6741 	ip6_dst = sin6->sin6_addr;
6742 	if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
6743 		ip6_dst = ipv6_loopback;
6744 
6745 	/*
6746 	 * If we're not going to the same destination as last time, then
6747 	 * recompute the label required.  This is done in a separate routine to
6748 	 * avoid blowing up our stack here.
6749 	 *
6750 	 * TSOL Note: Since we are not in WRITER mode, UDP packets
6751 	 * to different destination may require different labels,
6752 	 * or worse, UDP packets to same IP address may require
6753 	 * different labels due to use of shared all-zones address.
6754 	 * We use conn_lock to ensure that lastdst, sticky ipp_hopopts,
6755 	 * and sticky ipp_hopoptslen are consistent for the current
6756 	 * destination and are updated atomically.
6757 	 */
6758 	mutex_enter(&connp->conn_lock);
6759 	if (is_system_labeled()) {
6760 		/* Using UDP MLP requires SCM_UCRED from user */
6761 		if (connp->conn_mlp_type != mlptSingle &&
6762 		    !attrs.udpattr_credset) {
6763 			DTRACE_PROBE4(
6764 			    tx__ip__log__info__output__udp6,
6765 			    char *, "MLP mp(1) lacks SCM_UCRED attr(2) on q(3)",
6766 			    mblk_t *, mp1, udpattrs_t *, &attrs, queue_t *, q);
6767 			*error = ECONNREFUSED;
6768 			rw_exit(&udp->udp_rwlock);
6769 			mutex_exit(&connp->conn_lock);
6770 			goto done;
6771 		}
6772 		/*
6773 		 * update label option for this UDP socket if
6774 		 * - the destination has changed, or
6775 		 * - the UDP socket is MLP
6776 		 */
6777 		if ((opt_present ||
6778 		    !IN6_ARE_ADDR_EQUAL(&udp->udp_v6lastdst, &ip6_dst) ||
6779 		    connp->conn_mlp_type != mlptSingle) &&
6780 		    (*error = udp_update_label_v6(q, mp, &ip6_dst)) != 0) {
6781 			rw_exit(&udp->udp_rwlock);
6782 			mutex_exit(&connp->conn_lock);
6783 			goto done;
6784 		}
6785 	}
6786 
6787 	/*
6788 	 * If there's a security label here, then we ignore any options the
6789 	 * user may try to set.  We keep the peer's label as a hidden sticky
6790 	 * option. We make a private copy of this label before releasing the
6791 	 * lock so that label is kept consistent with the destination addr.
6792 	 */
6793 	if (udp->udp_label_len_v6 > 0) {
6794 		ignore &= ~IPPF_HOPOPTS;
6795 		ipp->ipp_fields &= ~IPPF_HOPOPTS;
6796 	}
6797 
6798 	if ((udp->udp_sticky_ipp.ipp_fields == 0) && (ipp->ipp_fields == 0)) {
6799 		/* No sticky options nor ancillary data. */
6800 		mutex_exit(&connp->conn_lock);
6801 		goto no_options;
6802 	}
6803 
6804 	/*
6805 	 * Go through the options figuring out where each is going to
6806 	 * come from and build two masks.  The first mask indicates if
6807 	 * the option exists at all.  The second mask indicates if the
6808 	 * option is sticky or ancillary.
6809 	 */
6810 	if (!(ignore & IPPF_HOPOPTS)) {
6811 		if (ipp->ipp_fields & IPPF_HOPOPTS) {
6812 			option_exists |= IPPF_HOPOPTS;
6813 			udp_ip_hdr_len += ipp->ipp_hopoptslen;
6814 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_HOPOPTS) {
6815 			option_exists |= IPPF_HOPOPTS;
6816 			is_sticky |= IPPF_HOPOPTS;
6817 			ASSERT(udp->udp_sticky_ipp.ipp_hopoptslen != 0);
6818 			hopoptsptr = kmem_alloc(
6819 			    udp->udp_sticky_ipp.ipp_hopoptslen, KM_NOSLEEP);
6820 			if (hopoptsptr == NULL) {
6821 				*error = ENOMEM;
6822 				mutex_exit(&connp->conn_lock);
6823 				goto done;
6824 			}
6825 			hopoptslen = udp->udp_sticky_ipp.ipp_hopoptslen;
6826 			bcopy(udp->udp_sticky_ipp.ipp_hopopts, hopoptsptr,
6827 			    hopoptslen);
6828 			udp_ip_hdr_len += hopoptslen;
6829 		}
6830 	}
6831 	mutex_exit(&connp->conn_lock);
6832 
6833 	if (!(ignore & IPPF_RTHDR)) {
6834 		if (ipp->ipp_fields & IPPF_RTHDR) {
6835 			option_exists |= IPPF_RTHDR;
6836 			udp_ip_hdr_len += ipp->ipp_rthdrlen;
6837 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_RTHDR) {
6838 			option_exists |= IPPF_RTHDR;
6839 			is_sticky |= IPPF_RTHDR;
6840 			udp_ip_hdr_len += udp->udp_sticky_ipp.ipp_rthdrlen;
6841 		}
6842 	}
6843 
6844 	if (!(ignore & IPPF_RTDSTOPTS) && (option_exists & IPPF_RTHDR)) {
6845 		if (ipp->ipp_fields & IPPF_RTDSTOPTS) {
6846 			option_exists |= IPPF_RTDSTOPTS;
6847 			udp_ip_hdr_len += ipp->ipp_rtdstoptslen;
6848 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_RTDSTOPTS) {
6849 			option_exists |= IPPF_RTDSTOPTS;
6850 			is_sticky |= IPPF_RTDSTOPTS;
6851 			udp_ip_hdr_len += udp->udp_sticky_ipp.ipp_rtdstoptslen;
6852 		}
6853 	}
6854 
6855 	if (!(ignore & IPPF_DSTOPTS)) {
6856 		if (ipp->ipp_fields & IPPF_DSTOPTS) {
6857 			option_exists |= IPPF_DSTOPTS;
6858 			udp_ip_hdr_len += ipp->ipp_dstoptslen;
6859 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_DSTOPTS) {
6860 			option_exists |= IPPF_DSTOPTS;
6861 			is_sticky |= IPPF_DSTOPTS;
6862 			udp_ip_hdr_len += udp->udp_sticky_ipp.ipp_dstoptslen;
6863 		}
6864 	}
6865 
6866 	if (!(ignore & IPPF_IFINDEX)) {
6867 		if (ipp->ipp_fields & IPPF_IFINDEX) {
6868 			option_exists |= IPPF_IFINDEX;
6869 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_IFINDEX) {
6870 			option_exists |= IPPF_IFINDEX;
6871 			is_sticky |= IPPF_IFINDEX;
6872 		}
6873 	}
6874 
6875 	if (!(ignore & IPPF_ADDR)) {
6876 		if (ipp->ipp_fields & IPPF_ADDR) {
6877 			option_exists |= IPPF_ADDR;
6878 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_ADDR) {
6879 			option_exists |= IPPF_ADDR;
6880 			is_sticky |= IPPF_ADDR;
6881 		}
6882 	}
6883 
6884 	if (!(ignore & IPPF_DONTFRAG)) {
6885 		if (ipp->ipp_fields & IPPF_DONTFRAG) {
6886 			option_exists |= IPPF_DONTFRAG;
6887 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_DONTFRAG) {
6888 			option_exists |= IPPF_DONTFRAG;
6889 			is_sticky |= IPPF_DONTFRAG;
6890 		}
6891 	}
6892 
6893 	if (!(ignore & IPPF_USE_MIN_MTU)) {
6894 		if (ipp->ipp_fields & IPPF_USE_MIN_MTU) {
6895 			option_exists |= IPPF_USE_MIN_MTU;
6896 		} else if (udp->udp_sticky_ipp.ipp_fields &
6897 		    IPPF_USE_MIN_MTU) {
6898 			option_exists |= IPPF_USE_MIN_MTU;
6899 			is_sticky |= IPPF_USE_MIN_MTU;
6900 		}
6901 	}
6902 
6903 	if (!(ignore & IPPF_HOPLIMIT) && (ipp->ipp_fields & IPPF_HOPLIMIT))
6904 		option_exists |= IPPF_HOPLIMIT;
6905 	/* IPV6_HOPLIMIT can never be sticky */
6906 	ASSERT(!(udp->udp_sticky_ipp.ipp_fields & IPPF_HOPLIMIT));
6907 
6908 	if (!(ignore & IPPF_UNICAST_HOPS) &&
6909 	    (udp->udp_sticky_ipp.ipp_fields & IPPF_UNICAST_HOPS)) {
6910 		option_exists |= IPPF_UNICAST_HOPS;
6911 		is_sticky |= IPPF_UNICAST_HOPS;
6912 	}
6913 
6914 	if (!(ignore & IPPF_MULTICAST_HOPS) &&
6915 	    (udp->udp_sticky_ipp.ipp_fields & IPPF_MULTICAST_HOPS)) {
6916 		option_exists |= IPPF_MULTICAST_HOPS;
6917 		is_sticky |= IPPF_MULTICAST_HOPS;
6918 	}
6919 
6920 	if (!(ignore & IPPF_TCLASS)) {
6921 		if (ipp->ipp_fields & IPPF_TCLASS) {
6922 			option_exists |= IPPF_TCLASS;
6923 		} else if (udp->udp_sticky_ipp.ipp_fields & IPPF_TCLASS) {
6924 			option_exists |= IPPF_TCLASS;
6925 			is_sticky |= IPPF_TCLASS;
6926 		}
6927 	}
6928 
6929 	if (!(ignore & IPPF_NEXTHOP) &&
6930 	    (udp->udp_sticky_ipp.ipp_fields & IPPF_NEXTHOP)) {
6931 		option_exists |= IPPF_NEXTHOP;
6932 		is_sticky |= IPPF_NEXTHOP;
6933 	}
6934 
6935 no_options:
6936 
6937 	/*
6938 	 * If any options carried in the ip6i_t were specified, we
6939 	 * need to account for the ip6i_t in the data we'll be sending
6940 	 * down.
6941 	 */
6942 	if (option_exists & IPPF_HAS_IP6I)
6943 		udp_ip_hdr_len += sizeof (ip6i_t);
6944 
6945 	/* check/fix buffer config, setup pointers into it */
6946 	ip6h = (ip6_t *)&mp1->b_rptr[-udp_ip_hdr_len];
6947 	if (DB_REF(mp1) != 1 || ((unsigned char *)ip6h < DB_BASE(mp1)) ||
6948 	    !OK_32PTR(ip6h)) {
6949 
6950 		/* Try to get everything in a single mblk next time */
6951 		if (udp_ip_hdr_len > udp->udp_max_hdr_len) {
6952 			udp->udp_max_hdr_len = udp_ip_hdr_len;
6953 			sth_wroff = udp->udp_max_hdr_len + us->us_wroff_extra;
6954 		}
6955 
6956 		mp2 = allocb(udp_ip_hdr_len + us->us_wroff_extra, BPRI_LO);
6957 		if (mp2 == NULL) {
6958 			*error = ENOMEM;
6959 			rw_exit(&udp->udp_rwlock);
6960 			goto done;
6961 		}
6962 		mp2->b_wptr = DB_LIM(mp2);
6963 		mp2->b_cont = mp1;
6964 		mp1 = mp2;
6965 		if (DB_TYPE(mp) != M_DATA)
6966 			mp->b_cont = mp1;
6967 		else
6968 			mp = mp1;
6969 
6970 		ip6h = (ip6_t *)(mp1->b_wptr - udp_ip_hdr_len);
6971 	}
6972 	mp1->b_rptr = (unsigned char *)ip6h;
6973 	ip6i = (ip6i_t *)ip6h;
6974 
6975 #define	ANCIL_OR_STICKY_PTR(f) ((is_sticky & f) ? &udp->udp_sticky_ipp : ipp)
6976 	if (option_exists & IPPF_HAS_IP6I) {
6977 		ip6h = (ip6_t *)&ip6i[1];
6978 		ip6i->ip6i_flags = 0;
6979 		ip6i->ip6i_vcf = IPV6_DEFAULT_VERS_AND_FLOW;
6980 
6981 		/* sin6_scope_id takes precendence over IPPF_IFINDEX */
6982 		if (option_exists & IPPF_SCOPE_ID) {
6983 			ip6i->ip6i_flags |= IP6I_IFINDEX;
6984 			ip6i->ip6i_ifindex = sin6->sin6_scope_id;
6985 		} else if (option_exists & IPPF_IFINDEX) {
6986 			tipp = ANCIL_OR_STICKY_PTR(IPPF_IFINDEX);
6987 			ASSERT(tipp->ipp_ifindex != 0);
6988 			ip6i->ip6i_flags |= IP6I_IFINDEX;
6989 			ip6i->ip6i_ifindex = tipp->ipp_ifindex;
6990 		}
6991 
6992 		if (option_exists & IPPF_ADDR) {
6993 			/*
6994 			 * Enable per-packet source address verification if
6995 			 * IPV6_PKTINFO specified the source address.
6996 			 * ip6_src is set in the transport's _wput function.
6997 			 */
6998 			ip6i->ip6i_flags |= IP6I_VERIFY_SRC;
6999 		}
7000 
7001 		if (option_exists & IPPF_DONTFRAG) {
7002 			ip6i->ip6i_flags |= IP6I_DONTFRAG;
7003 		}
7004 
7005 		if (option_exists & IPPF_USE_MIN_MTU) {
7006 			ip6i->ip6i_flags = IP6I_API_USE_MIN_MTU(
7007 			    ip6i->ip6i_flags, ipp->ipp_use_min_mtu);
7008 		}
7009 
7010 		if (option_exists & IPPF_NEXTHOP) {
7011 			tipp = ANCIL_OR_STICKY_PTR(IPPF_NEXTHOP);
7012 			ASSERT(!IN6_IS_ADDR_UNSPECIFIED(&tipp->ipp_nexthop));
7013 			ip6i->ip6i_flags |= IP6I_NEXTHOP;
7014 			ip6i->ip6i_nexthop = tipp->ipp_nexthop;
7015 		}
7016 
7017 		/*
7018 		 * tell IP this is an ip6i_t private header
7019 		 */
7020 		ip6i->ip6i_nxt = IPPROTO_RAW;
7021 	}
7022 
7023 	/* Initialize IPv6 header */
7024 	ip6h->ip6_vcf = IPV6_DEFAULT_VERS_AND_FLOW;
7025 	bzero(&ip6h->ip6_src, sizeof (ip6h->ip6_src));
7026 
7027 	/* Set the hoplimit of the outgoing packet. */
7028 	if (option_exists & IPPF_HOPLIMIT) {
7029 		/* IPV6_HOPLIMIT ancillary data overrides all other settings. */
7030 		ip6h->ip6_hops = ipp->ipp_hoplimit;
7031 		ip6i->ip6i_flags |= IP6I_HOPLIMIT;
7032 	} else if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
7033 		ip6h->ip6_hops = udp->udp_multicast_ttl;
7034 		if (option_exists & IPPF_MULTICAST_HOPS)
7035 			ip6i->ip6i_flags |= IP6I_HOPLIMIT;
7036 	} else {
7037 		ip6h->ip6_hops = udp->udp_ttl;
7038 		if (option_exists & IPPF_UNICAST_HOPS)
7039 			ip6i->ip6i_flags |= IP6I_HOPLIMIT;
7040 	}
7041 
7042 	if (option_exists & IPPF_ADDR) {
7043 		tipp = ANCIL_OR_STICKY_PTR(IPPF_ADDR);
7044 		ASSERT(!IN6_IS_ADDR_UNSPECIFIED(&tipp->ipp_addr));
7045 		ip6h->ip6_src = tipp->ipp_addr;
7046 	} else {
7047 		/*
7048 		 * The source address was not set using IPV6_PKTINFO.
7049 		 * First look at the bound source.
7050 		 * If unspecified fallback to __sin6_src_id.
7051 		 */
7052 		ip6h->ip6_src = udp->udp_v6src;
7053 		if (sin6->__sin6_src_id != 0 &&
7054 		    IN6_IS_ADDR_UNSPECIFIED(&ip6h->ip6_src)) {
7055 			ip_srcid_find_id(sin6->__sin6_src_id,
7056 			    &ip6h->ip6_src, connp->conn_zoneid,
7057 			    us->us_netstack);
7058 		}
7059 	}
7060 
7061 	nxthdr_ptr = (uint8_t *)&ip6h->ip6_nxt;
7062 	cp = (uint8_t *)&ip6h[1];
7063 
7064 	/*
7065 	 * Here's where we have to start stringing together
7066 	 * any extension headers in the right order:
7067 	 * Hop-by-hop, destination, routing, and final destination opts.
7068 	 */
7069 	if (option_exists & IPPF_HOPOPTS) {
7070 		/* Hop-by-hop options */
7071 		ip6_hbh_t *hbh = (ip6_hbh_t *)cp;
7072 		tipp = ANCIL_OR_STICKY_PTR(IPPF_HOPOPTS);
7073 		if (hopoptslen == 0) {
7074 			hopoptsptr = tipp->ipp_hopopts;
7075 			hopoptslen = tipp->ipp_hopoptslen;
7076 			is_ancillary = B_TRUE;
7077 		}
7078 
7079 		*nxthdr_ptr = IPPROTO_HOPOPTS;
7080 		nxthdr_ptr = &hbh->ip6h_nxt;
7081 
7082 		bcopy(hopoptsptr, cp, hopoptslen);
7083 		cp += hopoptslen;
7084 
7085 		if (hopoptsptr != NULL && !is_ancillary) {
7086 			kmem_free(hopoptsptr, hopoptslen);
7087 			hopoptsptr = NULL;
7088 			hopoptslen = 0;
7089 		}
7090 	}
7091 	/*
7092 	 * En-route destination options
7093 	 * Only do them if there's a routing header as well
7094 	 */
7095 	if (option_exists & IPPF_RTDSTOPTS) {
7096 		ip6_dest_t *dst = (ip6_dest_t *)cp;
7097 		tipp = ANCIL_OR_STICKY_PTR(IPPF_RTDSTOPTS);
7098 
7099 		*nxthdr_ptr = IPPROTO_DSTOPTS;
7100 		nxthdr_ptr = &dst->ip6d_nxt;
7101 
7102 		bcopy(tipp->ipp_rtdstopts, cp, tipp->ipp_rtdstoptslen);
7103 		cp += tipp->ipp_rtdstoptslen;
7104 	}
7105 	/*
7106 	 * Routing header next
7107 	 */
7108 	if (option_exists & IPPF_RTHDR) {
7109 		ip6_rthdr_t *rt = (ip6_rthdr_t *)cp;
7110 		tipp = ANCIL_OR_STICKY_PTR(IPPF_RTHDR);
7111 
7112 		*nxthdr_ptr = IPPROTO_ROUTING;
7113 		nxthdr_ptr = &rt->ip6r_nxt;
7114 
7115 		bcopy(tipp->ipp_rthdr, cp, tipp->ipp_rthdrlen);
7116 		cp += tipp->ipp_rthdrlen;
7117 	}
7118 	/*
7119 	 * Do ultimate destination options
7120 	 */
7121 	if (option_exists & IPPF_DSTOPTS) {
7122 		ip6_dest_t *dest = (ip6_dest_t *)cp;
7123 		tipp = ANCIL_OR_STICKY_PTR(IPPF_DSTOPTS);
7124 
7125 		*nxthdr_ptr = IPPROTO_DSTOPTS;
7126 		nxthdr_ptr = &dest->ip6d_nxt;
7127 
7128 		bcopy(tipp->ipp_dstopts, cp, tipp->ipp_dstoptslen);
7129 		cp += tipp->ipp_dstoptslen;
7130 	}
7131 	/*
7132 	 * Now set the last header pointer to the proto passed in
7133 	 */
7134 	ASSERT((int)(cp - (uint8_t *)ip6i) == (udp_ip_hdr_len - UDPH_SIZE));
7135 	*nxthdr_ptr = IPPROTO_UDP;
7136 
7137 	/* Update UDP header */
7138 	udph = (udpha_t *)((uchar_t *)ip6i + udp_ip_hdr_len - UDPH_SIZE);
7139 	udph->uha_dst_port = sin6->sin6_port;
7140 	udph->uha_src_port = udp->udp_port;
7141 
7142 	/*
7143 	 * Copy in the destination address
7144 	 */
7145 	ip6h->ip6_dst = ip6_dst;
7146 
7147 	ip6h->ip6_vcf =
7148 	    (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) |
7149 	    (sin6->sin6_flowinfo & ~IPV6_VERS_AND_FLOW_MASK);
7150 
7151 	if (option_exists & IPPF_TCLASS) {
7152 		tipp = ANCIL_OR_STICKY_PTR(IPPF_TCLASS);
7153 		ip6h->ip6_vcf = IPV6_TCLASS_FLOW(ip6h->ip6_vcf,
7154 		    tipp->ipp_tclass);
7155 	}
7156 	rw_exit(&udp->udp_rwlock);
7157 
7158 	if (option_exists & IPPF_RTHDR) {
7159 		ip6_rthdr_t	*rth;
7160 
7161 		/*
7162 		 * Perform any processing needed for source routing.
7163 		 * We know that all extension headers will be in the same mblk
7164 		 * as the IPv6 header.
7165 		 */
7166 		rth = ip_find_rthdr_v6(ip6h, mp1->b_wptr);
7167 		if (rth != NULL && rth->ip6r_segleft != 0) {
7168 			if (rth->ip6r_type != IPV6_RTHDR_TYPE_0) {
7169 				/*
7170 				 * Drop packet - only support Type 0 routing.
7171 				 * Notify the application as well.
7172 				 */
7173 				*error = EPROTO;
7174 				goto done;
7175 			}
7176 
7177 			/*
7178 			 * rth->ip6r_len is twice the number of
7179 			 * addresses in the header. Thus it must be even.
7180 			 */
7181 			if (rth->ip6r_len & 0x1) {
7182 				*error = EPROTO;
7183 				goto done;
7184 			}
7185 			/*
7186 			 * Shuffle the routing header and ip6_dst
7187 			 * addresses, and get the checksum difference
7188 			 * between the first hop (in ip6_dst) and
7189 			 * the destination (in the last routing hdr entry).
7190 			 */
7191 			csum = ip_massage_options_v6(ip6h, rth,
7192 			    us->us_netstack);
7193 			/*
7194 			 * Verify that the first hop isn't a mapped address.
7195 			 * Routers along the path need to do this verification
7196 			 * for subsequent hops.
7197 			 */
7198 			if (IN6_IS_ADDR_V4MAPPED(&ip6h->ip6_dst)) {
7199 				*error = EADDRNOTAVAIL;
7200 				goto done;
7201 			}
7202 
7203 			cp += (rth->ip6r_len + 1)*8;
7204 		}
7205 	}
7206 
7207 	/* count up length of UDP packet */
7208 	ip_len = (mp1->b_wptr - (unsigned char *)ip6h) - IPV6_HDR_LEN;
7209 	if ((mp2 = mp1->b_cont) != NULL) {
7210 		do {
7211 			ASSERT((uintptr_t)MBLKL(mp2) <= (uintptr_t)UINT_MAX);
7212 			ip_len += (uint32_t)MBLKL(mp2);
7213 		} while ((mp2 = mp2->b_cont) != NULL);
7214 	}
7215 
7216 	/*
7217 	 * If the size of the packet is greater than the maximum allowed by
7218 	 * ip, return an error. Passing this down could cause panics because
7219 	 * the size will have wrapped and be inconsistent with the msg size.
7220 	 */
7221 	if (ip_len > IP_MAXPACKET) {
7222 		*error = EMSGSIZE;
7223 		goto done;
7224 	}
7225 
7226 	/* Store the UDP length. Subtract length of extension hdrs */
7227 	udph->uha_length = htons(ip_len + IPV6_HDR_LEN -
7228 	    (int)((uchar_t *)udph - (uchar_t *)ip6h));
7229 
7230 	/*
7231 	 * We make it easy for IP to include our pseudo header
7232 	 * by putting our length in uh_checksum, modified (if
7233 	 * we have a routing header) by the checksum difference
7234 	 * between the ultimate destination and first hop addresses.
7235 	 * Note: UDP over IPv6 must always checksum the packet.
7236 	 */
7237 	csum += udph->uha_length;
7238 	csum = (csum & 0xFFFF) + (csum >> 16);
7239 	udph->uha_checksum = (uint16_t)csum;
7240 
7241 #ifdef _LITTLE_ENDIAN
7242 	ip_len = htons(ip_len);
7243 #endif
7244 	ip6h->ip6_plen = ip_len;
7245 	if (DB_CRED(mp) != NULL)
7246 		mblk_setcred(mp1, DB_CRED(mp));
7247 
7248 	if (DB_TYPE(mp) != M_DATA) {
7249 		ASSERT(mp != mp1);
7250 		freeb(mp);
7251 	}
7252 
7253 	/* mp has been consumed and we'll return success */
7254 	ASSERT(*error == 0);
7255 	mp = NULL;
7256 
7257 	/* We're done. Pass the packet to IP */
7258 	BUMP_MIB(&us->us_udp_mib, udpHCOutDatagrams);
7259 	ip_output_v6(connp, mp1, q, IP_WPUT);
7260 
7261 done:
7262 	if (sth_wroff != 0) {
7263 		(void) mi_set_sth_wroff(RD(q),
7264 		    udp->udp_max_hdr_len + us->us_wroff_extra);
7265 	}
7266 	if (hopoptsptr != NULL && !is_ancillary) {
7267 		kmem_free(hopoptsptr, hopoptslen);
7268 		hopoptsptr = NULL;
7269 	}
7270 	if (*error != 0) {
7271 		ASSERT(mp != NULL);
7272 		BUMP_MIB(&us->us_udp_mib, udpOutErrors);
7273 	}
7274 	return (mp);
7275 }
7276 
7277 
7278 static int
7279 udp_getpeername(udp_t *udp, struct sockaddr *sa, uint_t *salenp)
7280 {
7281 	sin_t *sin = (sin_t *)sa;
7282 	sin6_t *sin6 = (sin6_t *)sa;
7283 
7284 	ASSERT(RW_LOCK_HELD(&udp->udp_rwlock));
7285 
7286 	if (udp->udp_state != TS_DATA_XFER)
7287 		return (ENOTCONN);
7288 
7289 	switch (udp->udp_family) {
7290 	case AF_INET:
7291 		ASSERT(udp->udp_ipversion == IPV4_VERSION);
7292 
7293 		if (*salenp < sizeof (sin_t))
7294 			return (EINVAL);
7295 
7296 		*salenp = sizeof (sin_t);
7297 		*sin = sin_null;
7298 		sin->sin_family = AF_INET;
7299 		sin->sin_port = udp->udp_dstport;
7300 		sin->sin_addr.s_addr = V4_PART_OF_V6(udp->udp_v6dst);
7301 		break;
7302 
7303 	case AF_INET6:
7304 		if (*salenp < sizeof (sin6_t))
7305 			return (EINVAL);
7306 
7307 		*salenp = sizeof (sin6_t);
7308 		*sin6 = sin6_null;
7309 		sin6->sin6_family = AF_INET6;
7310 		sin6->sin6_port = udp->udp_dstport;
7311 		sin6->sin6_addr = udp->udp_v6dst;
7312 		sin6->sin6_flowinfo = udp->udp_flowinfo;
7313 		break;
7314 	}
7315 
7316 	return (0);
7317 }
7318 
7319 static int
7320 udp_getmyname(udp_t *udp, struct sockaddr *sa, uint_t *salenp)
7321 {
7322 	sin_t *sin = (sin_t *)sa;
7323 	sin6_t *sin6 = (sin6_t *)sa;
7324 
7325 	ASSERT(RW_LOCK_HELD(&udp->udp_rwlock));
7326 
7327 	switch (udp->udp_family) {
7328 	case AF_INET:
7329 		ASSERT(udp->udp_ipversion == IPV4_VERSION);
7330 
7331 		if (*salenp < sizeof (sin_t))
7332 			return (EINVAL);
7333 
7334 		*salenp = sizeof (sin_t);
7335 		*sin = sin_null;
7336 		sin->sin_family = AF_INET;
7337 		sin->sin_port = udp->udp_port;
7338 
7339 		/*
7340 		 * If udp_v6src is unspecified, we might be bound to broadcast
7341 		 * / multicast.  Use udp_bound_v6src as local address instead
7342 		 * (that could also still be unspecified).
7343 		 */
7344 		if (!IN6_IS_ADDR_V4MAPPED_ANY(&udp->udp_v6src) &&
7345 		    !IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src)) {
7346 			sin->sin_addr.s_addr = V4_PART_OF_V6(udp->udp_v6src);
7347 		} else {
7348 			sin->sin_addr.s_addr =
7349 			    V4_PART_OF_V6(udp->udp_bound_v6src);
7350 		}
7351 		break;
7352 
7353 	case AF_INET6:
7354 		if (*salenp < sizeof (sin6_t))
7355 			return (EINVAL);
7356 
7357 		*salenp = sizeof (sin6_t);
7358 		*sin6 = sin6_null;
7359 		sin6->sin6_family = AF_INET6;
7360 		sin6->sin6_port = udp->udp_port;
7361 		sin6->sin6_flowinfo = udp->udp_flowinfo;
7362 
7363 		/*
7364 		 * If udp_v6src is unspecified, we might be bound to broadcast
7365 		 * / multicast.  Use udp_bound_v6src as local address instead
7366 		 * (that could also still be unspecified).
7367 		 */
7368 		if (!IN6_IS_ADDR_UNSPECIFIED(&udp->udp_v6src))
7369 			sin6->sin6_addr = udp->udp_v6src;
7370 		else
7371 			sin6->sin6_addr = udp->udp_bound_v6src;
7372 		break;
7373 	}
7374 
7375 	return (0);
7376 }
7377 
7378 /*
7379  * Handle special out-of-band ioctl requests (see PSARC/2008/265).
7380  */
7381 static void
7382 udp_wput_cmdblk(queue_t *q, mblk_t *mp)
7383 {
7384 	void	*data;
7385 	mblk_t	*datamp = mp->b_cont;
7386 	udp_t	*udp = Q_TO_UDP(q);
7387 	cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr;
7388 
7389 	if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) {
7390 		cmdp->cb_error = EPROTO;
7391 		qreply(q, mp);
7392 		return;
7393 	}
7394 	data = datamp->b_rptr;
7395 
7396 	rw_enter(&udp->udp_rwlock, RW_READER);
7397 	switch (cmdp->cb_cmd) {
7398 	case TI_GETPEERNAME:
7399 		cmdp->cb_error = udp_getpeername(udp, data, &cmdp->cb_len);
7400 		break;
7401 	case TI_GETMYNAME:
7402 		cmdp->cb_error = udp_getmyname(udp, data, &cmdp->cb_len);
7403 		break;
7404 	default:
7405 		cmdp->cb_error = EINVAL;
7406 		break;
7407 	}
7408 	rw_exit(&udp->udp_rwlock);
7409 
7410 	qreply(q, mp);
7411 }
7412 
7413 static void
7414 udp_wput_other(queue_t *q, mblk_t *mp)
7415 {
7416 	uchar_t	*rptr = mp->b_rptr;
7417 	struct datab *db;
7418 	struct iocblk *iocp;
7419 	cred_t	*cr;
7420 	conn_t	*connp = Q_TO_CONN(q);
7421 	udp_t	*udp = connp->conn_udp;
7422 	udp_stack_t *us;
7423 
7424 	TRACE_1(TR_FAC_UDP, TR_UDP_WPUT_OTHER_START,
7425 	    "udp_wput_other_start: q %p", q);
7426 
7427 	us = udp->udp_us;
7428 	db = mp->b_datap;
7429 
7430 	cr = DB_CREDDEF(mp, connp->conn_cred);
7431 
7432 	switch (db->db_type) {
7433 	case M_CMD:
7434 		udp_wput_cmdblk(q, mp);
7435 		return;
7436 
7437 	case M_PROTO:
7438 	case M_PCPROTO:
7439 		if (mp->b_wptr - rptr < sizeof (t_scalar_t)) {
7440 			freemsg(mp);
7441 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7442 			    "udp_wput_other_end: q %p (%S)", q, "protoshort");
7443 			return;
7444 		}
7445 		switch (((t_primp_t)rptr)->type) {
7446 		case T_ADDR_REQ:
7447 			udp_addr_req(q, mp);
7448 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7449 			    "udp_wput_other_end: q %p (%S)", q, "addrreq");
7450 			return;
7451 		case O_T_BIND_REQ:
7452 		case T_BIND_REQ:
7453 			udp_bind(q, mp);
7454 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7455 			    "udp_wput_other_end: q %p (%S)", q, "bindreq");
7456 			return;
7457 		case T_CONN_REQ:
7458 			udp_connect(q, mp);
7459 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7460 			    "udp_wput_other_end: q %p (%S)", q, "connreq");
7461 			return;
7462 		case T_CAPABILITY_REQ:
7463 			udp_capability_req(q, mp);
7464 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7465 			    "udp_wput_other_end: q %p (%S)", q, "capabreq");
7466 			return;
7467 		case T_INFO_REQ:
7468 			udp_info_req(q, mp);
7469 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7470 			    "udp_wput_other_end: q %p (%S)", q, "inforeq");
7471 			return;
7472 		case T_UNITDATA_REQ:
7473 			/*
7474 			 * If a T_UNITDATA_REQ gets here, the address must
7475 			 * be bad.  Valid T_UNITDATA_REQs are handled
7476 			 * in udp_wput.
7477 			 */
7478 			udp_ud_err(q, mp, NULL, 0, EADDRNOTAVAIL);
7479 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7480 			    "udp_wput_other_end: q %p (%S)", q, "unitdatareq");
7481 			return;
7482 		case T_UNBIND_REQ:
7483 			udp_unbind(q, mp);
7484 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7485 			    "udp_wput_other_end: q %p (%S)", q, "unbindreq");
7486 			return;
7487 		case T_SVR4_OPTMGMT_REQ:
7488 			if (!snmpcom_req(q, mp, udp_snmp_set, ip_snmp_get,
7489 			    cr)) {
7490 				(void) svr4_optcom_req(q,
7491 				    mp, cr, &udp_opt_obj, B_TRUE);
7492 			}
7493 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7494 			    "udp_wput_other_end: q %p (%S)", q, "optmgmtreq");
7495 			return;
7496 
7497 		case T_OPTMGMT_REQ:
7498 			(void) tpi_optcom_req(q, mp, cr, &udp_opt_obj, B_TRUE);
7499 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7500 			    "udp_wput_other_end: q %p (%S)", q, "optmgmtreq");
7501 			return;
7502 
7503 		case T_DISCON_REQ:
7504 			udp_disconnect(q, mp);
7505 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7506 			    "udp_wput_other_end: q %p (%S)", q, "disconreq");
7507 			return;
7508 
7509 		/* The following TPI message is not supported by udp. */
7510 		case O_T_CONN_RES:
7511 		case T_CONN_RES:
7512 			udp_err_ack(q, mp, TNOTSUPPORT, 0);
7513 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7514 			    "udp_wput_other_end: q %p (%S)", q,
7515 			    "connres/disconreq");
7516 			return;
7517 
7518 		/* The following 3 TPI messages are illegal for udp. */
7519 		case T_DATA_REQ:
7520 		case T_EXDATA_REQ:
7521 		case T_ORDREL_REQ:
7522 			udp_err_ack(q, mp, TNOTSUPPORT, 0);
7523 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7524 			    "udp_wput_other_end: q %p (%S)", q,
7525 			    "data/exdata/ordrel");
7526 			return;
7527 		default:
7528 			break;
7529 		}
7530 		break;
7531 	case M_FLUSH:
7532 		if (*rptr & FLUSHW)
7533 			flushq(q, FLUSHDATA);
7534 		break;
7535 	case M_IOCTL:
7536 		iocp = (struct iocblk *)mp->b_rptr;
7537 		switch (iocp->ioc_cmd) {
7538 		case TI_GETPEERNAME:
7539 			if (udp->udp_state != TS_DATA_XFER) {
7540 				/*
7541 				 * If a default destination address has not
7542 				 * been associated with the stream, then we
7543 				 * don't know the peer's name.
7544 				 */
7545 				iocp->ioc_error = ENOTCONN;
7546 				iocp->ioc_count = 0;
7547 				mp->b_datap->db_type = M_IOCACK;
7548 				qreply(q, mp);
7549 				TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7550 				    "udp_wput_other_end: q %p (%S)", q,
7551 				    "getpeername");
7552 				return;
7553 			}
7554 			/* FALLTHRU */
7555 		case TI_GETMYNAME: {
7556 			/*
7557 			 * For TI_GETPEERNAME and TI_GETMYNAME, we first
7558 			 * need to copyin the user's strbuf structure.
7559 			 * Processing will continue in the M_IOCDATA case
7560 			 * below.
7561 			 */
7562 			mi_copyin(q, mp, NULL,
7563 			    SIZEOF_STRUCT(strbuf, iocp->ioc_flag));
7564 			TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7565 			    "udp_wput_other_end: q %p (%S)", q, "getmyname");
7566 			return;
7567 			}
7568 		case ND_SET:
7569 			/* nd_getset performs the necessary checking */
7570 		case ND_GET:
7571 			if (nd_getset(q, us->us_nd, mp)) {
7572 				qreply(q, mp);
7573 				TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7574 				    "udp_wput_other_end: q %p (%S)", q, "get");
7575 				return;
7576 			}
7577 			break;
7578 		case _SIOCSOCKFALLBACK:
7579 			/*
7580 			 * Either sockmod is about to be popped and the
7581 			 * socket would now be treated as a plain stream,
7582 			 * or a module is about to be pushed so we could
7583 			 * no longer use read-side synchronous stream.
7584 			 * Drain any queued data and disable direct sockfs
7585 			 * interface from now on.
7586 			 */
7587 			if (!udp->udp_issocket) {
7588 				DB_TYPE(mp) = M_IOCNAK;
7589 				iocp->ioc_error = EINVAL;
7590 			} else {
7591 				udp->udp_issocket = B_FALSE;
7592 				if (udp->udp_direct_sockfs) {
7593 					/*
7594 					 * Disable read-side synchronous
7595 					 * stream interface and drain any
7596 					 * queued data.
7597 					 */
7598 					udp_rcv_drain(RD(q), udp,
7599 					    B_FALSE);
7600 					ASSERT(!udp->udp_direct_sockfs);
7601 					UDP_STAT(us, udp_sock_fallback);
7602 				}
7603 				DB_TYPE(mp) = M_IOCACK;
7604 				iocp->ioc_error = 0;
7605 			}
7606 			iocp->ioc_count = 0;
7607 			iocp->ioc_rval = 0;
7608 			qreply(q, mp);
7609 			return;
7610 		default:
7611 			break;
7612 		}
7613 		break;
7614 	case M_IOCDATA:
7615 		udp_wput_iocdata(q, mp);
7616 		TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7617 		    "udp_wput_other_end: q %p (%S)", q, "iocdata");
7618 		return;
7619 	default:
7620 		/* Unrecognized messages are passed through without change. */
7621 		break;
7622 	}
7623 	TRACE_2(TR_FAC_UDP, TR_UDP_WPUT_OTHER_END,
7624 	    "udp_wput_other_end: q %p (%S)", q, "end");
7625 	ip_output(connp, mp, q, IP_WPUT);
7626 }
7627 
7628 /*
7629  * udp_wput_iocdata is called by udp_wput_other to handle all M_IOCDATA
7630  * messages.
7631  */
7632 static void
7633 udp_wput_iocdata(queue_t *q, mblk_t *mp)
7634 {
7635 	mblk_t	*mp1;
7636 	struct iocblk *iocp = (struct iocblk *)mp->b_rptr;
7637 	STRUCT_HANDLE(strbuf, sb);
7638 	udp_t	*udp = Q_TO_UDP(q);
7639 	int	error;
7640 	uint_t	addrlen;
7641 
7642 	/* Make sure it is one of ours. */
7643 	switch (iocp->ioc_cmd) {
7644 	case TI_GETMYNAME:
7645 	case TI_GETPEERNAME:
7646 		break;
7647 	default:
7648 		ip_output(udp->udp_connp, mp, q, IP_WPUT);
7649 		return;
7650 	}
7651 
7652 	switch (mi_copy_state(q, mp, &mp1)) {
7653 	case -1:
7654 		return;
7655 	case MI_COPY_CASE(MI_COPY_IN, 1):
7656 		break;
7657 	case MI_COPY_CASE(MI_COPY_OUT, 1):
7658 		/*
7659 		 * The address has been copied out, so now
7660 		 * copyout the strbuf.
7661 		 */
7662 		mi_copyout(q, mp);
7663 		return;
7664 	case MI_COPY_CASE(MI_COPY_OUT, 2):
7665 		/*
7666 		 * The address and strbuf have been copied out.
7667 		 * We're done, so just acknowledge the original
7668 		 * M_IOCTL.
7669 		 */
7670 		mi_copy_done(q, mp, 0);
7671 		return;
7672 	default:
7673 		/*
7674 		 * Something strange has happened, so acknowledge
7675 		 * the original M_IOCTL with an EPROTO error.
7676 		 */
7677 		mi_copy_done(q, mp, EPROTO);
7678 		return;
7679 	}
7680 
7681 	/*
7682 	 * Now we have the strbuf structure for TI_GETMYNAME
7683 	 * and TI_GETPEERNAME.  Next we copyout the requested
7684 	 * address and then we'll copyout the strbuf.
7685 	 */
7686 	STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr);
7687 	addrlen = udp->udp_family == AF_INET ? sizeof (sin_t) : sizeof (sin6_t);
7688 	if (STRUCT_FGET(sb, maxlen) < addrlen) {
7689 		mi_copy_done(q, mp, EINVAL);
7690 		return;
7691 	}
7692 
7693 	mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE);
7694 	if (mp1 == NULL)
7695 		return;
7696 
7697 	rw_enter(&udp->udp_rwlock, RW_READER);
7698 	switch (iocp->ioc_cmd) {
7699 	case TI_GETMYNAME:
7700 		error = udp_getmyname(udp, (void *)mp1->b_rptr, &addrlen);
7701 		break;
7702 	case TI_GETPEERNAME:
7703 		error = udp_getpeername(udp, (void *)mp1->b_rptr, &addrlen);
7704 		break;
7705 	}
7706 	rw_exit(&udp->udp_rwlock);
7707 
7708 	if (error != 0) {
7709 		mi_copy_done(q, mp, error);
7710 	} else {
7711 		mp1->b_wptr += addrlen;
7712 		STRUCT_FSET(sb, len, addrlen);
7713 
7714 		/* Copy out the address */
7715 		mi_copyout(q, mp);
7716 	}
7717 }
7718 
7719 static int
7720 udp_unitdata_opt_process(queue_t *q, mblk_t *mp, int *errorp,
7721     udpattrs_t *udpattrs)
7722 {
7723 	struct T_unitdata_req *udreqp;
7724 	int is_absreq_failure;
7725 	cred_t *cr;
7726 	conn_t	*connp = Q_TO_CONN(q);
7727 
7728 	ASSERT(((t_primp_t)mp->b_rptr)->type);
7729 
7730 	cr = DB_CREDDEF(mp, connp->conn_cred);
7731 
7732 	udreqp = (struct T_unitdata_req *)mp->b_rptr;
7733 
7734 	*errorp = tpi_optcom_buf(q, mp, &udreqp->OPT_length,
7735 	    udreqp->OPT_offset, cr, &udp_opt_obj,
7736 	    udpattrs, &is_absreq_failure);
7737 
7738 	if (*errorp != 0) {
7739 		/*
7740 		 * Note: No special action needed in this
7741 		 * module for "is_absreq_failure"
7742 		 */
7743 		return (-1);		/* failure */
7744 	}
7745 	ASSERT(is_absreq_failure == 0);
7746 	return (0);	/* success */
7747 }
7748 
7749 void
7750 udp_ddi_init(void)
7751 {
7752 	udp_max_optsize = optcom_max_optsize(udp_opt_obj.odb_opt_des_arr,
7753 	    udp_opt_obj.odb_opt_arr_cnt);
7754 
7755 	/*
7756 	 * We want to be informed each time a stack is created or
7757 	 * destroyed in the kernel, so we can maintain the
7758 	 * set of udp_stack_t's.
7759 	 */
7760 	netstack_register(NS_UDP, udp_stack_init, NULL, udp_stack_fini);
7761 }
7762 
7763 void
7764 udp_ddi_destroy(void)
7765 {
7766 	netstack_unregister(NS_UDP);
7767 }
7768 
7769 /*
7770  * Initialize the UDP stack instance.
7771  */
7772 static void *
7773 udp_stack_init(netstackid_t stackid, netstack_t *ns)
7774 {
7775 	udp_stack_t	*us;
7776 	udpparam_t	*pa;
7777 	int		i;
7778 
7779 	us = (udp_stack_t *)kmem_zalloc(sizeof (*us), KM_SLEEP);
7780 	us->us_netstack = ns;
7781 
7782 	us->us_num_epriv_ports = UDP_NUM_EPRIV_PORTS;
7783 	us->us_epriv_ports[0] = 2049;
7784 	us->us_epriv_ports[1] = 4045;
7785 
7786 	/*
7787 	 * The smallest anonymous port in the priviledged port range which UDP
7788 	 * looks for free port.  Use in the option UDP_ANONPRIVBIND.
7789 	 */
7790 	us->us_min_anonpriv_port = 512;
7791 
7792 	us->us_bind_fanout_size = udp_bind_fanout_size;
7793 
7794 	/* Roundup variable that might have been modified in /etc/system */
7795 	if (us->us_bind_fanout_size & (us->us_bind_fanout_size - 1)) {
7796 		/* Not a power of two. Round up to nearest power of two */
7797 		for (i = 0; i < 31; i++) {
7798 			if (us->us_bind_fanout_size < (1 << i))
7799 				break;
7800 		}
7801 		us->us_bind_fanout_size = 1 << i;
7802 	}
7803 	us->us_bind_fanout = kmem_zalloc(us->us_bind_fanout_size *
7804 	    sizeof (udp_fanout_t), KM_SLEEP);
7805 	for (i = 0; i < us->us_bind_fanout_size; i++) {
7806 		mutex_init(&us->us_bind_fanout[i].uf_lock, NULL, MUTEX_DEFAULT,
7807 		    NULL);
7808 	}
7809 
7810 	pa = (udpparam_t *)kmem_alloc(sizeof (udp_param_arr), KM_SLEEP);
7811 
7812 	us->us_param_arr = pa;
7813 	bcopy(udp_param_arr, us->us_param_arr, sizeof (udp_param_arr));
7814 
7815 	(void) udp_param_register(&us->us_nd,
7816 	    us->us_param_arr, A_CNT(udp_param_arr));
7817 
7818 	us->us_kstat = udp_kstat2_init(stackid, &us->us_statistics);
7819 	us->us_mibkp = udp_kstat_init(stackid);
7820 	return (us);
7821 }
7822 
7823 /*
7824  * Free the UDP stack instance.
7825  */
7826 static void
7827 udp_stack_fini(netstackid_t stackid, void *arg)
7828 {
7829 	udp_stack_t *us = (udp_stack_t *)arg;
7830 	int i;
7831 
7832 	for (i = 0; i < us->us_bind_fanout_size; i++) {
7833 		mutex_destroy(&us->us_bind_fanout[i].uf_lock);
7834 	}
7835 
7836 	kmem_free(us->us_bind_fanout, us->us_bind_fanout_size *
7837 	    sizeof (udp_fanout_t));
7838 
7839 	us->us_bind_fanout = NULL;
7840 
7841 	nd_free(&us->us_nd);
7842 	kmem_free(us->us_param_arr, sizeof (udp_param_arr));
7843 	us->us_param_arr = NULL;
7844 
7845 	udp_kstat_fini(stackid, us->us_mibkp);
7846 	us->us_mibkp = NULL;
7847 
7848 	udp_kstat2_fini(stackid, us->us_kstat);
7849 	us->us_kstat = NULL;
7850 	bzero(&us->us_statistics, sizeof (us->us_statistics));
7851 	kmem_free(us, sizeof (*us));
7852 }
7853 
7854 static void *
7855 udp_kstat2_init(netstackid_t stackid, udp_stat_t *us_statisticsp)
7856 {
7857 	kstat_t *ksp;
7858 
7859 	udp_stat_t template = {
7860 		{ "udp_ip_send",		KSTAT_DATA_UINT64 },
7861 		{ "udp_ip_ire_send",		KSTAT_DATA_UINT64 },
7862 		{ "udp_ire_null",		KSTAT_DATA_UINT64 },
7863 		{ "udp_drain",			KSTAT_DATA_UINT64 },
7864 		{ "udp_sock_fallback",		KSTAT_DATA_UINT64 },
7865 		{ "udp_rrw_busy",		KSTAT_DATA_UINT64 },
7866 		{ "udp_rrw_msgcnt",		KSTAT_DATA_UINT64 },
7867 		{ "udp_out_sw_cksum",		KSTAT_DATA_UINT64 },
7868 		{ "udp_out_sw_cksum_bytes",	KSTAT_DATA_UINT64 },
7869 		{ "udp_out_opt",		KSTAT_DATA_UINT64 },
7870 		{ "udp_out_err_notconn",	KSTAT_DATA_UINT64 },
7871 		{ "udp_out_err_output",		KSTAT_DATA_UINT64 },
7872 		{ "udp_out_err_tudr",		KSTAT_DATA_UINT64 },
7873 		{ "udp_in_pktinfo",		KSTAT_DATA_UINT64 },
7874 		{ "udp_in_recvdstaddr",		KSTAT_DATA_UINT64 },
7875 		{ "udp_in_recvopts",		KSTAT_DATA_UINT64 },
7876 		{ "udp_in_recvif",		KSTAT_DATA_UINT64 },
7877 		{ "udp_in_recvslla",		KSTAT_DATA_UINT64 },
7878 		{ "udp_in_recvucred",		KSTAT_DATA_UINT64 },
7879 		{ "udp_in_recvttl",		KSTAT_DATA_UINT64 },
7880 		{ "udp_in_recvhopopts",		KSTAT_DATA_UINT64 },
7881 		{ "udp_in_recvhoplimit",	KSTAT_DATA_UINT64 },
7882 		{ "udp_in_recvdstopts",		KSTAT_DATA_UINT64 },
7883 		{ "udp_in_recvrtdstopts",	KSTAT_DATA_UINT64 },
7884 		{ "udp_in_recvrthdr",		KSTAT_DATA_UINT64 },
7885 		{ "udp_in_recvpktinfo",		KSTAT_DATA_UINT64 },
7886 		{ "udp_in_recvtclass",		KSTAT_DATA_UINT64 },
7887 		{ "udp_in_timestamp",		KSTAT_DATA_UINT64 },
7888 #ifdef DEBUG
7889 		{ "udp_data_conn",		KSTAT_DATA_UINT64 },
7890 		{ "udp_data_notconn",		KSTAT_DATA_UINT64 },
7891 #endif
7892 	};
7893 
7894 	ksp = kstat_create_netstack(UDP_MOD_NAME, 0, "udpstat", "net",
7895 	    KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
7896 	    KSTAT_FLAG_VIRTUAL, stackid);
7897 
7898 	if (ksp == NULL)
7899 		return (NULL);
7900 
7901 	bcopy(&template, us_statisticsp, sizeof (template));
7902 	ksp->ks_data = (void *)us_statisticsp;
7903 	ksp->ks_private = (void *)(uintptr_t)stackid;
7904 
7905 	kstat_install(ksp);
7906 	return (ksp);
7907 }
7908 
7909 static void
7910 udp_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
7911 {
7912 	if (ksp != NULL) {
7913 		ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
7914 		kstat_delete_netstack(ksp, stackid);
7915 	}
7916 }
7917 
7918 static void *
7919 udp_kstat_init(netstackid_t stackid)
7920 {
7921 	kstat_t	*ksp;
7922 
7923 	udp_named_kstat_t template = {
7924 		{ "inDatagrams",	KSTAT_DATA_UINT64, 0 },
7925 		{ "inErrors",		KSTAT_DATA_UINT32, 0 },
7926 		{ "outDatagrams",	KSTAT_DATA_UINT64, 0 },
7927 		{ "entrySize",		KSTAT_DATA_INT32, 0 },
7928 		{ "entry6Size",		KSTAT_DATA_INT32, 0 },
7929 		{ "outErrors",		KSTAT_DATA_UINT32, 0 },
7930 	};
7931 
7932 	ksp = kstat_create_netstack(UDP_MOD_NAME, 0, UDP_MOD_NAME, "mib2",
7933 	    KSTAT_TYPE_NAMED,
7934 	    NUM_OF_FIELDS(udp_named_kstat_t), 0, stackid);
7935 
7936 	if (ksp == NULL || ksp->ks_data == NULL)
7937 		return (NULL);
7938 
7939 	template.entrySize.value.ui32 = sizeof (mib2_udpEntry_t);
7940 	template.entry6Size.value.ui32 = sizeof (mib2_udp6Entry_t);
7941 
7942 	bcopy(&template, ksp->ks_data, sizeof (template));
7943 	ksp->ks_update = udp_kstat_update;
7944 	ksp->ks_private = (void *)(uintptr_t)stackid;
7945 
7946 	kstat_install(ksp);
7947 	return (ksp);
7948 }
7949 
7950 static void
7951 udp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
7952 {
7953 	if (ksp != NULL) {
7954 		ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
7955 		kstat_delete_netstack(ksp, stackid);
7956 	}
7957 }
7958 
7959 static int
7960 udp_kstat_update(kstat_t *kp, int rw)
7961 {
7962 	udp_named_kstat_t *udpkp;
7963 	netstackid_t	stackid = (netstackid_t)(uintptr_t)kp->ks_private;
7964 	netstack_t	*ns;
7965 	udp_stack_t	*us;
7966 
7967 	if ((kp == NULL) || (kp->ks_data == NULL))
7968 		return (EIO);
7969 
7970 	if (rw == KSTAT_WRITE)
7971 		return (EACCES);
7972 
7973 	ns = netstack_find_by_stackid(stackid);
7974 	if (ns == NULL)
7975 		return (-1);
7976 	us = ns->netstack_udp;
7977 	if (us == NULL) {
7978 		netstack_rele(ns);
7979 		return (-1);
7980 	}
7981 	udpkp = (udp_named_kstat_t *)kp->ks_data;
7982 
7983 	udpkp->inDatagrams.value.ui64 =	us->us_udp_mib.udpHCInDatagrams;
7984 	udpkp->inErrors.value.ui32 =	us->us_udp_mib.udpInErrors;
7985 	udpkp->outDatagrams.value.ui64 = us->us_udp_mib.udpHCOutDatagrams;
7986 	udpkp->outErrors.value.ui32 =	us->us_udp_mib.udpOutErrors;
7987 	netstack_rele(ns);
7988 	return (0);
7989 }
7990 
7991 /*
7992  * Read-side synchronous stream info entry point, called as a
7993  * result of handling certain STREAMS ioctl operations.
7994  */
7995 static int
7996 udp_rinfop(queue_t *q, infod_t *dp)
7997 {
7998 	mblk_t	*mp;
7999 	uint_t	cmd = dp->d_cmd;
8000 	int	res = 0;
8001 	int	error = 0;
8002 	udp_t	*udp = Q_TO_UDP(q);
8003 	struct stdata *stp = STREAM(q);
8004 
8005 	mutex_enter(&udp->udp_drain_lock);
8006 	/* If shutdown on read has happened, return nothing */
8007 	mutex_enter(&stp->sd_lock);
8008 	if (stp->sd_flag & STREOF) {
8009 		mutex_exit(&stp->sd_lock);
8010 		goto done;
8011 	}
8012 	mutex_exit(&stp->sd_lock);
8013 
8014 	if ((mp = udp->udp_rcv_list_head) == NULL)
8015 		goto done;
8016 
8017 	ASSERT(DB_TYPE(mp) != M_DATA && mp->b_cont != NULL);
8018 
8019 	if (cmd & INFOD_COUNT) {
8020 		/*
8021 		 * Return the number of messages.
8022 		 */
8023 		dp->d_count += udp->udp_rcv_msgcnt;
8024 		res |= INFOD_COUNT;
8025 	}
8026 	if (cmd & INFOD_BYTES) {
8027 		/*
8028 		 * Return size of all data messages.
8029 		 */
8030 		dp->d_bytes += udp->udp_rcv_cnt;
8031 		res |= INFOD_BYTES;
8032 	}
8033 	if (cmd & INFOD_FIRSTBYTES) {
8034 		/*
8035 		 * Return size of first data message.
8036 		 */
8037 		dp->d_bytes = msgdsize(mp);
8038 		res |= INFOD_FIRSTBYTES;
8039 		dp->d_cmd &= ~INFOD_FIRSTBYTES;
8040 	}
8041 	if (cmd & INFOD_COPYOUT) {
8042 		mblk_t *mp1 = mp->b_cont;
8043 		int n;
8044 		/*
8045 		 * Return data contents of first message.
8046 		 */
8047 		ASSERT(DB_TYPE(mp1) == M_DATA);
8048 		while (mp1 != NULL && dp->d_uiop->uio_resid > 0) {
8049 			n = MIN(dp->d_uiop->uio_resid, MBLKL(mp1));
8050 			if (n != 0 && (error = uiomove((char *)mp1->b_rptr, n,
8051 			    UIO_READ, dp->d_uiop)) != 0) {
8052 				goto done;
8053 			}
8054 			mp1 = mp1->b_cont;
8055 		}
8056 		res |= INFOD_COPYOUT;
8057 		dp->d_cmd &= ~INFOD_COPYOUT;
8058 	}
8059 done:
8060 	mutex_exit(&udp->udp_drain_lock);
8061 
8062 	dp->d_res |= res;
8063 
8064 	return (error);
8065 }
8066 
8067 /*
8068  * Read-side synchronous stream entry point.  This is called as a result
8069  * of recv/read operation done at sockfs, and is guaranteed to execute
8070  * outside of the interrupt thread context.  It returns a single datagram
8071  * (b_cont chain of T_UNITDATA_IND plus data) to the upper layer.
8072  */
8073 static int
8074 udp_rrw(queue_t *q, struiod_t *dp)
8075 {
8076 	mblk_t	*mp;
8077 	udp_t	*udp = Q_TO_UDP(q);
8078 	udp_stack_t *us = udp->udp_us;
8079 
8080 	/*
8081 	 * Dequeue datagram from the head of the list and return
8082 	 * it to caller; also ensure that RSLEEP sd_wakeq flag is
8083 	 * set/cleared depending on whether or not there's data
8084 	 * remaining in the list.
8085 	 */
8086 	mutex_enter(&udp->udp_drain_lock);
8087 	if (!udp->udp_direct_sockfs) {
8088 		mutex_exit(&udp->udp_drain_lock);
8089 		UDP_STAT(us, udp_rrw_busy);
8090 		return (EBUSY);
8091 	}
8092 	if ((mp = udp->udp_rcv_list_head) != NULL) {
8093 		uint_t size = msgdsize(mp);
8094 
8095 		/* Last datagram in the list? */
8096 		if ((udp->udp_rcv_list_head = mp->b_next) == NULL)
8097 			udp->udp_rcv_list_tail = NULL;
8098 		mp->b_next = NULL;
8099 
8100 		udp->udp_rcv_cnt -= size;
8101 		udp->udp_rcv_msgcnt--;
8102 		UDP_STAT(us, udp_rrw_msgcnt);
8103 
8104 		/* No longer flow-controlling? */
8105 		if (udp->udp_rcv_cnt < udp->udp_rcv_hiwat &&
8106 		    udp->udp_rcv_msgcnt < udp->udp_rcv_hiwat)
8107 			udp->udp_drain_qfull = B_FALSE;
8108 	}
8109 	if (udp->udp_rcv_list_head == NULL) {
8110 		/*
8111 		 * Either we just dequeued the last datagram or
8112 		 * we get here from sockfs and have nothing to
8113 		 * return; in this case clear RSLEEP.
8114 		 */
8115 		ASSERT(udp->udp_rcv_cnt == 0);
8116 		ASSERT(udp->udp_rcv_msgcnt == 0);
8117 		ASSERT(udp->udp_rcv_list_tail == NULL);
8118 		STR_WAKEUP_CLEAR(STREAM(q));
8119 	} else {
8120 		/*
8121 		 * More data follows; we need udp_rrw() to be
8122 		 * called in future to pick up the rest.
8123 		 */
8124 		STR_WAKEUP_SET(STREAM(q));
8125 	}
8126 	mutex_exit(&udp->udp_drain_lock);
8127 	dp->d_mp = mp;
8128 	return (0);
8129 }
8130 
8131 /*
8132  * Enqueue a completely-built T_UNITDATA_IND message into the receive
8133  * list; this is typically executed within the interrupt thread context
8134  * and so we do things as quickly as possible.
8135  */
8136 static void
8137 udp_rcv_enqueue(queue_t *q, udp_t *udp, mblk_t *mp, uint_t pkt_len)
8138 {
8139 	ASSERT(q == RD(q));
8140 	ASSERT(pkt_len == msgdsize(mp));
8141 	ASSERT(mp->b_next == NULL && mp->b_cont != NULL);
8142 	ASSERT(DB_TYPE(mp) == M_PROTO && DB_TYPE(mp->b_cont) == M_DATA);
8143 	ASSERT(MBLKL(mp) >= sizeof (struct T_unitdata_ind));
8144 
8145 	mutex_enter(&udp->udp_drain_lock);
8146 	/*
8147 	 * Wake up and signal the receiving app; it is okay to do this
8148 	 * before enqueueing the mp because we are holding the drain lock.
8149 	 * One of the advantages of synchronous stream is the ability for
8150 	 * us to find out when the application performs a read on the
8151 	 * socket by way of udp_rrw() entry point being called.  We need
8152 	 * to generate SIGPOLL/SIGIO for each received data in the case
8153 	 * of asynchronous socket just as in the strrput() case.  However,
8154 	 * we only wake the application up when necessary, i.e. during the
8155 	 * first enqueue.  When udp_rrw() is called, we send up a single
8156 	 * datagram upstream and call STR_WAKEUP_SET() again when there
8157 	 * are still data remaining in our receive queue.
8158 	 */
8159 	STR_WAKEUP_SENDSIG(STREAM(q), udp->udp_rcv_list_head);
8160 	if (udp->udp_rcv_list_head == NULL)
8161 		udp->udp_rcv_list_head = mp;
8162 	else
8163 		udp->udp_rcv_list_tail->b_next = mp;
8164 	udp->udp_rcv_list_tail = mp;
8165 	udp->udp_rcv_cnt += pkt_len;
8166 	udp->udp_rcv_msgcnt++;
8167 
8168 	/* Need to flow-control? */
8169 	if (udp->udp_rcv_cnt >= udp->udp_rcv_hiwat ||
8170 	    udp->udp_rcv_msgcnt >= udp->udp_rcv_hiwat)
8171 		udp->udp_drain_qfull = B_TRUE;
8172 
8173 	mutex_exit(&udp->udp_drain_lock);
8174 }
8175 
8176 /*
8177  * Drain the contents of receive list to the module upstream; we do
8178  * this during close or when we fallback to the slow mode due to
8179  * sockmod being popped or a module being pushed on top of us.
8180  */
8181 static void
8182 udp_rcv_drain(queue_t *q, udp_t *udp, boolean_t closing)
8183 {
8184 	mblk_t *mp;
8185 	udp_stack_t *us = udp->udp_us;
8186 
8187 	ASSERT(q == RD(q));
8188 
8189 	mutex_enter(&udp->udp_drain_lock);
8190 	/*
8191 	 * There is no race with a concurrent udp_input() sending
8192 	 * up packets using putnext() after we have cleared the
8193 	 * udp_direct_sockfs flag but before we have completed
8194 	 * sending up the packets in udp_rcv_list, since we are
8195 	 * either a writer or we have quiesced the conn.
8196 	 */
8197 	udp->udp_direct_sockfs = B_FALSE;
8198 	mutex_exit(&udp->udp_drain_lock);
8199 
8200 	if (udp->udp_rcv_list_head != NULL)
8201 		UDP_STAT(us, udp_drain);
8202 
8203 	/*
8204 	 * Send up everything via putnext(); note here that we
8205 	 * don't need the udp_drain_lock to protect us since
8206 	 * nothing can enter udp_rrw() and that we currently
8207 	 * have exclusive access to this udp.
8208 	 */
8209 	while ((mp = udp->udp_rcv_list_head) != NULL) {
8210 		udp->udp_rcv_list_head = mp->b_next;
8211 		mp->b_next = NULL;
8212 		udp->udp_rcv_cnt -= msgdsize(mp);
8213 		udp->udp_rcv_msgcnt--;
8214 		if (closing) {
8215 			freemsg(mp);
8216 		} else {
8217 			putnext(q, mp);
8218 		}
8219 	}
8220 	ASSERT(udp->udp_rcv_cnt == 0);
8221 	ASSERT(udp->udp_rcv_msgcnt == 0);
8222 	ASSERT(udp->udp_rcv_list_head == NULL);
8223 	udp->udp_rcv_list_tail = NULL;
8224 	udp->udp_drain_qfull = B_FALSE;
8225 }
8226 
8227 static size_t
8228 udp_set_rcv_hiwat(udp_t *udp, size_t size)
8229 {
8230 	udp_stack_t *us = udp->udp_us;
8231 
8232 	/* We add a bit of extra buffering */
8233 	size += size >> 1;
8234 	if (size > us->us_max_buf)
8235 		size = us->us_max_buf;
8236 
8237 	udp->udp_rcv_hiwat = size;
8238 	return (size);
8239 }
8240 
8241 /*
8242  * For the lower queue so that UDP can be a dummy mux.
8243  * Nobody should be sending
8244  * packets up this stream
8245  */
8246 static void
8247 udp_lrput(queue_t *q, mblk_t *mp)
8248 {
8249 	mblk_t *mp1;
8250 
8251 	switch (mp->b_datap->db_type) {
8252 	case M_FLUSH:
8253 		/* Turn around */
8254 		if (*mp->b_rptr & FLUSHW) {
8255 			*mp->b_rptr &= ~FLUSHR;
8256 			qreply(q, mp);
8257 			return;
8258 		}
8259 		break;
8260 	}
8261 	/* Could receive messages that passed through ar_rput */
8262 	for (mp1 = mp; mp1; mp1 = mp1->b_cont)
8263 		mp1->b_prev = mp1->b_next = NULL;
8264 	freemsg(mp);
8265 }
8266 
8267 /*
8268  * For the lower queue so that UDP can be a dummy mux.
8269  * Nobody should be sending packets down this stream.
8270  */
8271 /* ARGSUSED */
8272 void
8273 udp_lwput(queue_t *q, mblk_t *mp)
8274 {
8275 	freemsg(mp);
8276 }
8277