1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 24 * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #include <sys/sysmacros.h> 29 #include <sys/types.h> 30 #include <sys/stream.h> 31 #include <sys/stropts.h> 32 #include <sys/strlog.h> 33 #include <sys/strsun.h> 34 #define _SUN_TPI_VERSION 2 35 #include <sys/tihdr.h> 36 #include <sys/timod.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/strsubr.h> 40 #include <sys/suntpi.h> 41 #include <sys/xti_inet.h> 42 #include <sys/kmem.h> 43 #include <sys/cred_impl.h> 44 #include <sys/policy.h> 45 #include <sys/priv.h> 46 #include <sys/ucred.h> 47 #include <sys/zone.h> 48 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sockio.h> 52 #include <sys/vtrace.h> 53 #include <sys/sdt.h> 54 #include <sys/debug.h> 55 #include <sys/isa_defs.h> 56 #include <sys/random.h> 57 #include <netinet/in.h> 58 #include <netinet/ip6.h> 59 #include <netinet/icmp6.h> 60 #include <netinet/udp.h> 61 62 #include <inet/common.h> 63 #include <inet/ip.h> 64 #include <inet/ip_impl.h> 65 #include <inet/ipsec_impl.h> 66 #include <inet/ip6.h> 67 #include <inet/ip_ire.h> 68 #include <inet/ip_if.h> 69 #include <inet/ip_multi.h> 70 #include <inet/ip_ndp.h> 71 #include <inet/proto_set.h> 72 #include <inet/mib2.h> 73 #include <inet/optcom.h> 74 #include <inet/snmpcom.h> 75 #include <inet/kstatcom.h> 76 #include <inet/ipclassifier.h> 77 #include <sys/squeue_impl.h> 78 #include <inet/ipnet.h> 79 #include <sys/ethernet.h> 80 81 #include <sys/tsol/label.h> 82 #include <sys/tsol/tnet.h> 83 #include <rpc/pmap_prot.h> 84 85 #include <inet/udp_impl.h> 86 87 /* 88 * Synchronization notes: 89 * 90 * UDP is MT and uses the usual kernel synchronization primitives. There are 2 91 * locks, the fanout lock (uf_lock) and conn_lock. conn_lock 92 * protects the contents of the udp_t. uf_lock protects the address and the 93 * fanout information. 94 * The lock order is conn_lock -> uf_lock. 95 * 96 * The fanout lock uf_lock: 97 * When a UDP endpoint is bound to a local port, it is inserted into 98 * a bind hash list. The list consists of an array of udp_fanout_t buckets. 99 * The size of the array is controlled by the udp_bind_fanout_size variable. 100 * This variable can be changed in /etc/system if the default value is 101 * not large enough. Each bind hash bucket is protected by a per bucket 102 * lock. It protects the udp_bind_hash and udp_ptpbhn fields in the udp_t 103 * structure and a few other fields in the udp_t. A UDP endpoint is removed 104 * from the bind hash list only when it is being unbound or being closed. 105 * The per bucket lock also protects a UDP endpoint's state changes. 106 * 107 * Plumbing notes: 108 * UDP is always a device driver. For compatibility with mibopen() code 109 * it is possible to I_PUSH "udp", but that results in pushing a passthrough 110 * dummy module. 111 * 112 * The above implies that we don't support any intermediate module to 113 * reside in between /dev/ip and udp -- in fact, we never supported such 114 * scenario in the past as the inter-layer communication semantics have 115 * always been private. 116 */ 117 118 /* For /etc/system control */ 119 uint_t udp_bind_fanout_size = UDP_BIND_FANOUT_SIZE; 120 121 static void udp_addr_req(queue_t *q, mblk_t *mp); 122 static void udp_tpi_bind(queue_t *q, mblk_t *mp); 123 static void udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp); 124 static void udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock); 125 static int udp_build_hdr_template(conn_t *, const in6_addr_t *, 126 const in6_addr_t *, in_port_t, uint32_t); 127 static void udp_capability_req(queue_t *q, mblk_t *mp); 128 static int udp_tpi_close(queue_t *q, int flags); 129 static void udp_close_free(conn_t *); 130 static void udp_tpi_connect(queue_t *q, mblk_t *mp); 131 static void udp_tpi_disconnect(queue_t *q, mblk_t *mp); 132 static void udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error, 133 int sys_error); 134 static void udp_err_ack_prim(queue_t *q, mblk_t *mp, t_scalar_t primitive, 135 t_scalar_t tlierr, int sys_error); 136 static int udp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, 137 cred_t *cr); 138 static int udp_extra_priv_ports_add(queue_t *q, mblk_t *mp, 139 char *value, caddr_t cp, cred_t *cr); 140 static int udp_extra_priv_ports_del(queue_t *q, mblk_t *mp, 141 char *value, caddr_t cp, cred_t *cr); 142 static void udp_icmp_input(void *, mblk_t *, void *, ip_recv_attr_t *); 143 static void udp_icmp_error_ipv6(conn_t *connp, mblk_t *mp, 144 ip_recv_attr_t *ira); 145 static void udp_info_req(queue_t *q, mblk_t *mp); 146 static void udp_input(void *, mblk_t *, void *, ip_recv_attr_t *); 147 static void udp_lrput(queue_t *, mblk_t *); 148 static void udp_lwput(queue_t *, mblk_t *); 149 static int udp_open(queue_t *q, dev_t *devp, int flag, int sflag, 150 cred_t *credp, boolean_t isv6); 151 static int udp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, 152 cred_t *credp); 153 static int udp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, 154 cred_t *credp); 155 static boolean_t udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name); 156 int udp_opt_set(conn_t *connp, uint_t optset_context, 157 int level, int name, uint_t inlen, 158 uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 159 void *thisdg_attrs, cred_t *cr); 160 int udp_opt_get(conn_t *connp, int level, int name, 161 uchar_t *ptr); 162 static int udp_output_connected(conn_t *connp, mblk_t *mp, cred_t *cr, 163 pid_t pid); 164 static int udp_output_lastdst(conn_t *connp, mblk_t *mp, cred_t *cr, 165 pid_t pid, ip_xmit_attr_t *ixa); 166 static int udp_output_newdst(conn_t *connp, mblk_t *data_mp, sin_t *sin, 167 sin6_t *sin6, ushort_t ipversion, cred_t *cr, pid_t, 168 ip_xmit_attr_t *ixa); 169 static mblk_t *udp_prepend_hdr(conn_t *, ip_xmit_attr_t *, const ip_pkt_t *, 170 const in6_addr_t *, const in6_addr_t *, in_port_t, uint32_t, mblk_t *, 171 int *); 172 static mblk_t *udp_prepend_header_template(conn_t *, ip_xmit_attr_t *, 173 mblk_t *, const in6_addr_t *, in_port_t, uint32_t, int *); 174 static void udp_ud_err(queue_t *q, mblk_t *mp, t_scalar_t err); 175 static void udp_ud_err_connected(conn_t *, t_scalar_t); 176 static void udp_tpi_unbind(queue_t *q, mblk_t *mp); 177 static in_port_t udp_update_next_port(udp_t *udp, in_port_t port, 178 boolean_t random); 179 static void udp_wput_other(queue_t *q, mblk_t *mp); 180 static void udp_wput_iocdata(queue_t *q, mblk_t *mp); 181 static void udp_wput_fallback(queue_t *q, mblk_t *mp); 182 static size_t udp_set_rcv_hiwat(udp_t *udp, size_t size); 183 184 static void *udp_stack_init(netstackid_t stackid, netstack_t *ns); 185 static void udp_stack_fini(netstackid_t stackid, void *arg); 186 187 /* Common routines for TPI and socket module */ 188 static void udp_ulp_recv(conn_t *, mblk_t *, uint_t, ip_recv_attr_t *); 189 190 /* Common routine for TPI and socket module */ 191 static conn_t *udp_do_open(cred_t *, boolean_t, int, int *); 192 static void udp_do_close(conn_t *); 193 static int udp_do_bind(conn_t *, struct sockaddr *, socklen_t, cred_t *, 194 boolean_t); 195 static int udp_do_unbind(conn_t *); 196 197 int udp_getsockname(sock_lower_handle_t, 198 struct sockaddr *, socklen_t *, cred_t *); 199 int udp_getpeername(sock_lower_handle_t, 200 struct sockaddr *, socklen_t *, cred_t *); 201 static int udp_do_connect(conn_t *, const struct sockaddr *, socklen_t, 202 cred_t *, pid_t); 203 204 #pragma inline(udp_output_connected, udp_output_newdst, udp_output_lastdst) 205 206 /* 207 * Checks if the given destination addr/port is allowed out. 208 * If allowed, registers the (dest_addr/port, node_ID) mapping at Cluster. 209 * Called for each connect() and for sendto()/sendmsg() to a different 210 * destination. 211 * For connect(), called in udp_connect(). 212 * For sendto()/sendmsg(), called in udp_output_newdst(). 213 * 214 * This macro assumes that the cl_inet_connect2 hook is not NULL. 215 * Please check this before calling this macro. 216 * 217 * void 218 * CL_INET_UDP_CONNECT(conn_t cp, udp_t *udp, boolean_t is_outgoing, 219 * in6_addr_t *faddrp, in_port_t (or uint16_t) fport, int err); 220 */ 221 #define CL_INET_UDP_CONNECT(cp, is_outgoing, faddrp, fport, err) { \ 222 (err) = 0; \ 223 /* \ 224 * Running in cluster mode - check and register active \ 225 * "connection" information \ 226 */ \ 227 if ((cp)->conn_ipversion == IPV4_VERSION) \ 228 (err) = (*cl_inet_connect2)( \ 229 (cp)->conn_netstack->netstack_stackid, \ 230 IPPROTO_UDP, is_outgoing, AF_INET, \ 231 (uint8_t *)&((cp)->conn_laddr_v4), \ 232 (cp)->conn_lport, \ 233 (uint8_t *)&(V4_PART_OF_V6(*faddrp)), \ 234 (in_port_t)(fport), NULL); \ 235 else \ 236 (err) = (*cl_inet_connect2)( \ 237 (cp)->conn_netstack->netstack_stackid, \ 238 IPPROTO_UDP, is_outgoing, AF_INET6, \ 239 (uint8_t *)&((cp)->conn_laddr_v6), \ 240 (cp)->conn_lport, \ 241 (uint8_t *)(faddrp), (in_port_t)(fport), NULL); \ 242 } 243 244 static struct module_info udp_mod_info = { 245 UDP_MOD_ID, UDP_MOD_NAME, 1, INFPSZ, UDP_RECV_HIWATER, UDP_RECV_LOWATER 246 }; 247 248 /* 249 * Entry points for UDP as a device. 250 * We have separate open functions for the /dev/udp and /dev/udp6 devices. 251 */ 252 static struct qinit udp_rinitv4 = { 253 NULL, NULL, udp_openv4, udp_tpi_close, NULL, &udp_mod_info, NULL 254 }; 255 256 static struct qinit udp_rinitv6 = { 257 NULL, NULL, udp_openv6, udp_tpi_close, NULL, &udp_mod_info, NULL 258 }; 259 260 static struct qinit udp_winit = { 261 (pfi_t)udp_wput, (pfi_t)ip_wsrv, NULL, NULL, NULL, &udp_mod_info 262 }; 263 264 /* UDP entry point during fallback */ 265 struct qinit udp_fallback_sock_winit = { 266 (pfi_t)udp_wput_fallback, NULL, NULL, NULL, NULL, &udp_mod_info 267 }; 268 269 /* 270 * UDP needs to handle I_LINK and I_PLINK since ifconfig 271 * likes to use it as a place to hang the various streams. 272 */ 273 static struct qinit udp_lrinit = { 274 (pfi_t)udp_lrput, NULL, udp_openv4, udp_tpi_close, NULL, &udp_mod_info 275 }; 276 277 static struct qinit udp_lwinit = { 278 (pfi_t)udp_lwput, NULL, udp_openv4, udp_tpi_close, NULL, &udp_mod_info 279 }; 280 281 /* For AF_INET aka /dev/udp */ 282 struct streamtab udpinfov4 = { 283 &udp_rinitv4, &udp_winit, &udp_lrinit, &udp_lwinit 284 }; 285 286 /* For AF_INET6 aka /dev/udp6 */ 287 struct streamtab udpinfov6 = { 288 &udp_rinitv6, &udp_winit, &udp_lrinit, &udp_lwinit 289 }; 290 291 #define REUSELIST_MAX 64 292 struct reuselist { 293 conn_t *ru_conns[REUSELIST_MAX]; 294 int ru_entries; /* number of entries */ 295 int ru_next; /* round-robin pointer */ 296 kmutex_t ru_lock; 297 }; 298 299 #define UDP_MAXPACKET_IPV4 (IP_MAXPACKET - UDPH_SIZE - IP_SIMPLE_HDR_LENGTH) 300 301 /* Default structure copied into T_INFO_ACK messages */ 302 static struct T_info_ack udp_g_t_info_ack_ipv4 = { 303 T_INFO_ACK, 304 UDP_MAXPACKET_IPV4, /* TSDU_size. Excl. headers */ 305 T_INVALID, /* ETSU_size. udp does not support expedited data. */ 306 T_INVALID, /* CDATA_size. udp does not support connect data. */ 307 T_INVALID, /* DDATA_size. udp does not support disconnect data. */ 308 sizeof (sin_t), /* ADDR_size. */ 309 0, /* OPT_size - not initialized here */ 310 UDP_MAXPACKET_IPV4, /* TIDU_size. Excl. headers */ 311 T_CLTS, /* SERV_type. udp supports connection-less. */ 312 TS_UNBND, /* CURRENT_state. This is set from udp_state. */ 313 (XPG4_1|SENDZERO) /* PROVIDER_flag */ 314 }; 315 316 #define UDP_MAXPACKET_IPV6 (IP_MAXPACKET - UDPH_SIZE - IPV6_HDR_LEN) 317 318 static struct T_info_ack udp_g_t_info_ack_ipv6 = { 319 T_INFO_ACK, 320 UDP_MAXPACKET_IPV6, /* TSDU_size. Excl. headers */ 321 T_INVALID, /* ETSU_size. udp does not support expedited data. */ 322 T_INVALID, /* CDATA_size. udp does not support connect data. */ 323 T_INVALID, /* DDATA_size. udp does not support disconnect data. */ 324 sizeof (sin6_t), /* ADDR_size. */ 325 0, /* OPT_size - not initialized here */ 326 UDP_MAXPACKET_IPV6, /* TIDU_size. Excl. headers */ 327 T_CLTS, /* SERV_type. udp supports connection-less. */ 328 TS_UNBND, /* CURRENT_state. This is set from udp_state. */ 329 (XPG4_1|SENDZERO) /* PROVIDER_flag */ 330 }; 331 332 /* 333 * UDP tunables related declarations. Definitions are in udp_tunables.c 334 */ 335 extern mod_prop_info_t udp_propinfo_tbl[]; 336 extern int udp_propinfo_count; 337 338 /* Setable in /etc/system */ 339 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */ 340 uint32_t udp_random_anon_port = 1; 341 342 /* 343 * Hook functions to enable cluster networking. 344 * On non-clustered systems these vectors must always be NULL 345 */ 346 347 void (*cl_inet_bind)(netstackid_t stack_id, uchar_t protocol, 348 sa_family_t addr_family, uint8_t *laddrp, in_port_t lport, 349 void *args) = NULL; 350 void (*cl_inet_unbind)(netstackid_t stack_id, uint8_t protocol, 351 sa_family_t addr_family, uint8_t *laddrp, in_port_t lport, 352 void *args) = NULL; 353 354 typedef union T_primitives *t_primp_t; 355 356 static int 357 udp_reuselist_add(struct reuselist *reusep, conn_t *connp, boolean_t last) 358 { 359 /* 360 * we don't need to operate under the mutex here, because it is not 361 * in use yet 362 */ 363 364 /* 365 * check if the table is full. If last is zero, it can't be filled 366 * up by design, just assert it. Otherwise check and return an error 367 */ 368 ASSERT(last || reusep->ru_entries < REUSELIST_MAX); 369 370 if (last && reusep->ru_entries == REUSELIST_MAX) 371 return -1; 372 373 reusep->ru_conns[reusep->ru_entries++] = connp; 374 375 return 0; 376 } 377 378 static void 379 udp_reuselist_remove(conn_t *connp) 380 { 381 int i; 382 struct reuselist *reusep = connp->conn_reuselist; 383 384 if (reusep == NULL) 385 return; 386 387 ASSERT(MUTEX_HELD(&connp->conn_lock)); 388 389 mutex_enter(&reusep->ru_lock); 390 391 for (i = 0; i < reusep->ru_entries; ++i) { 392 if (reusep->ru_conns[i] == connp) 393 break; 394 } 395 ASSERT(i < reusep->ru_entries); 396 397 /* move last entry into freed slot */ 398 if (--reusep->ru_entries == 0) { 399 /* last entry, free list */ 400 mutex_exit(&reusep->ru_lock); 401 mutex_destroy(&reusep->ru_lock); 402 kmem_free(reusep, sizeof (*reusep)); 403 connp->conn_reuselist = NULL; 404 } else { 405 reusep->ru_conns[i] = reusep->ru_conns[reusep->ru_entries]; 406 407 /* 408 * reset round-robin pointer, so it doesn't accidentally point 409 * to the last entry 410 */ 411 reusep->ru_next = 0; 412 mutex_exit(&reusep->ru_lock); 413 } 414 } 415 416 /* 417 * Return the next anonymous port in the privileged port range for 418 * bind checking. 419 * 420 * Trusted Extension (TX) notes: TX allows administrator to mark or 421 * reserve ports as Multilevel ports (MLP). MLP has special function 422 * on TX systems. Once a port is made MLP, it's not available as 423 * ordinary port. This creates "holes" in the port name space. It 424 * may be necessary to skip the "holes" find a suitable anon port. 425 */ 426 static in_port_t 427 udp_get_next_priv_port(udp_t *udp) 428 { 429 static in_port_t next_priv_port = IPPORT_RESERVED - 1; 430 in_port_t nextport; 431 boolean_t restart = B_FALSE; 432 udp_stack_t *us = udp->udp_us; 433 434 retry: 435 if (next_priv_port < us->us_min_anonpriv_port || 436 next_priv_port >= IPPORT_RESERVED) { 437 next_priv_port = IPPORT_RESERVED - 1; 438 if (restart) 439 return (0); 440 restart = B_TRUE; 441 } 442 443 if (is_system_labeled() && 444 (nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred), 445 next_priv_port, IPPROTO_UDP, B_FALSE)) != 0) { 446 next_priv_port = nextport; 447 goto retry; 448 } 449 450 return (next_priv_port--); 451 } 452 453 /* 454 * Hash list removal routine for udp_t structures. 455 */ 456 static void 457 udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock) 458 { 459 udp_t *udpnext; 460 kmutex_t *lockp; 461 udp_stack_t *us = udp->udp_us; 462 conn_t *connp = udp->udp_connp; 463 464 if (udp->udp_ptpbhn == NULL) 465 return; 466 467 /* 468 * Extract the lock pointer in case there are concurrent 469 * hash_remove's for this instance. 470 */ 471 ASSERT(connp->conn_lport != 0); 472 if (!caller_holds_lock) { 473 lockp = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 474 us->us_bind_fanout_size)].uf_lock; 475 ASSERT(lockp != NULL); 476 mutex_enter(lockp); 477 } 478 if (udp->udp_ptpbhn != NULL) { 479 udpnext = udp->udp_bind_hash; 480 if (udpnext != NULL) { 481 udpnext->udp_ptpbhn = udp->udp_ptpbhn; 482 udp->udp_bind_hash = NULL; 483 } 484 *udp->udp_ptpbhn = udpnext; 485 udp->udp_ptpbhn = NULL; 486 } 487 if (!caller_holds_lock) { 488 mutex_exit(lockp); 489 } 490 } 491 492 static void 493 udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp) 494 { 495 conn_t *connp = udp->udp_connp; 496 udp_t **udpp; 497 udp_t *udpnext; 498 conn_t *connext; 499 500 ASSERT(MUTEX_HELD(&uf->uf_lock)); 501 ASSERT(udp->udp_ptpbhn == NULL); 502 udpp = &uf->uf_udp; 503 udpnext = udpp[0]; 504 if (udpnext != NULL) { 505 /* 506 * If the new udp bound to the INADDR_ANY address 507 * and the first one in the list is not bound to 508 * INADDR_ANY we skip all entries until we find the 509 * first one bound to INADDR_ANY. 510 * This makes sure that applications binding to a 511 * specific address get preference over those binding to 512 * INADDR_ANY. 513 */ 514 connext = udpnext->udp_connp; 515 if (V6_OR_V4_INADDR_ANY(connp->conn_bound_addr_v6) && 516 !V6_OR_V4_INADDR_ANY(connext->conn_bound_addr_v6)) { 517 while ((udpnext = udpp[0]) != NULL && 518 !V6_OR_V4_INADDR_ANY(connext->conn_bound_addr_v6)) { 519 udpp = &(udpnext->udp_bind_hash); 520 } 521 if (udpnext != NULL) 522 udpnext->udp_ptpbhn = &udp->udp_bind_hash; 523 } else { 524 udpnext->udp_ptpbhn = &udp->udp_bind_hash; 525 } 526 } 527 udp->udp_bind_hash = udpnext; 528 udp->udp_ptpbhn = udpp; 529 udpp[0] = udp; 530 } 531 532 /* 533 * This routine is called to handle each O_T_BIND_REQ/T_BIND_REQ message 534 * passed to udp_wput. 535 * It associates a port number and local address with the stream. 536 * It calls IP to verify the local IP address, and calls IP to insert 537 * the conn_t in the fanout table. 538 * If everything is ok it then sends the T_BIND_ACK back up. 539 * 540 * Note that UDP over IPv4 and IPv6 sockets can use the same port number 541 * without setting SO_REUSEADDR. This is needed so that they 542 * can be viewed as two independent transport protocols. 543 * However, anonymouns ports are allocated from the same range to avoid 544 * duplicating the us->us_next_port_to_try. 545 */ 546 static void 547 udp_tpi_bind(queue_t *q, mblk_t *mp) 548 { 549 sin_t *sin; 550 sin6_t *sin6; 551 mblk_t *mp1; 552 struct T_bind_req *tbr; 553 conn_t *connp; 554 udp_t *udp; 555 int error; 556 struct sockaddr *sa; 557 cred_t *cr; 558 559 /* 560 * All Solaris components should pass a db_credp 561 * for this TPI message, hence we ASSERT. 562 * But in case there is some other M_PROTO that looks 563 * like a TPI message sent by some other kernel 564 * component, we check and return an error. 565 */ 566 cr = msg_getcred(mp, NULL); 567 ASSERT(cr != NULL); 568 if (cr == NULL) { 569 udp_err_ack(q, mp, TSYSERR, EINVAL); 570 return; 571 } 572 573 connp = Q_TO_CONN(q); 574 udp = connp->conn_udp; 575 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) { 576 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 577 "udp_bind: bad req, len %u", 578 (uint_t)(mp->b_wptr - mp->b_rptr)); 579 udp_err_ack(q, mp, TPROTO, 0); 580 return; 581 } 582 if (udp->udp_state != TS_UNBND) { 583 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 584 "udp_bind: bad state, %u", udp->udp_state); 585 udp_err_ack(q, mp, TOUTSTATE, 0); 586 return; 587 } 588 /* 589 * Reallocate the message to make sure we have enough room for an 590 * address. 591 */ 592 mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t), 1); 593 if (mp1 == NULL) { 594 udp_err_ack(q, mp, TSYSERR, ENOMEM); 595 return; 596 } 597 598 mp = mp1; 599 600 /* Reset the message type in preparation for shipping it back. */ 601 DB_TYPE(mp) = M_PCPROTO; 602 603 tbr = (struct T_bind_req *)mp->b_rptr; 604 switch (tbr->ADDR_length) { 605 case 0: /* Request for a generic port */ 606 tbr->ADDR_offset = sizeof (struct T_bind_req); 607 if (connp->conn_family == AF_INET) { 608 tbr->ADDR_length = sizeof (sin_t); 609 sin = (sin_t *)&tbr[1]; 610 *sin = sin_null; 611 sin->sin_family = AF_INET; 612 mp->b_wptr = (uchar_t *)&sin[1]; 613 sa = (struct sockaddr *)sin; 614 } else { 615 ASSERT(connp->conn_family == AF_INET6); 616 tbr->ADDR_length = sizeof (sin6_t); 617 sin6 = (sin6_t *)&tbr[1]; 618 *sin6 = sin6_null; 619 sin6->sin6_family = AF_INET6; 620 mp->b_wptr = (uchar_t *)&sin6[1]; 621 sa = (struct sockaddr *)sin6; 622 } 623 break; 624 625 case sizeof (sin_t): /* Complete IPv4 address */ 626 sa = (struct sockaddr *)mi_offset_param(mp, tbr->ADDR_offset, 627 sizeof (sin_t)); 628 if (sa == NULL || !OK_32PTR((char *)sa)) { 629 udp_err_ack(q, mp, TSYSERR, EINVAL); 630 return; 631 } 632 if (connp->conn_family != AF_INET || 633 sa->sa_family != AF_INET) { 634 udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT); 635 return; 636 } 637 break; 638 639 case sizeof (sin6_t): /* complete IPv6 address */ 640 sa = (struct sockaddr *)mi_offset_param(mp, tbr->ADDR_offset, 641 sizeof (sin6_t)); 642 if (sa == NULL || !OK_32PTR((char *)sa)) { 643 udp_err_ack(q, mp, TSYSERR, EINVAL); 644 return; 645 } 646 if (connp->conn_family != AF_INET6 || 647 sa->sa_family != AF_INET6) { 648 udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT); 649 return; 650 } 651 break; 652 653 default: /* Invalid request */ 654 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 655 "udp_bind: bad ADDR_length length %u", tbr->ADDR_length); 656 udp_err_ack(q, mp, TBADADDR, 0); 657 return; 658 } 659 660 error = udp_do_bind(connp, sa, tbr->ADDR_length, cr, 661 tbr->PRIM_type != O_T_BIND_REQ); 662 663 if (error != 0) { 664 if (error > 0) { 665 udp_err_ack(q, mp, TSYSERR, error); 666 } else { 667 udp_err_ack(q, mp, -error, 0); 668 } 669 } else { 670 tbr->PRIM_type = T_BIND_ACK; 671 qreply(q, mp); 672 } 673 } 674 675 /* 676 * This routine handles each T_CONN_REQ message passed to udp. It 677 * associates a default destination address with the stream. 678 * 679 * After various error checks are completed, udp_connect() lays 680 * the target address and port into the composite header template. 681 * Then we ask IP for information, including a source address if we didn't 682 * already have one. Finally we send up the T_OK_ACK reply message. 683 */ 684 static void 685 udp_tpi_connect(queue_t *q, mblk_t *mp) 686 { 687 conn_t *connp = Q_TO_CONN(q); 688 int error; 689 socklen_t len; 690 struct sockaddr *sa; 691 struct T_conn_req *tcr; 692 cred_t *cr; 693 pid_t pid; 694 /* 695 * All Solaris components should pass a db_credp 696 * for this TPI message, hence we ASSERT. 697 * But in case there is some other M_PROTO that looks 698 * like a TPI message sent by some other kernel 699 * component, we check and return an error. 700 */ 701 cr = msg_getcred(mp, &pid); 702 ASSERT(cr != NULL); 703 if (cr == NULL) { 704 udp_err_ack(q, mp, TSYSERR, EINVAL); 705 return; 706 } 707 708 tcr = (struct T_conn_req *)mp->b_rptr; 709 710 /* A bit of sanity checking */ 711 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_req)) { 712 udp_err_ack(q, mp, TPROTO, 0); 713 return; 714 } 715 716 if (tcr->OPT_length != 0) { 717 udp_err_ack(q, mp, TBADOPT, 0); 718 return; 719 } 720 721 /* 722 * Determine packet type based on type of address passed in 723 * the request should contain an IPv4 or IPv6 address. 724 * Make sure that address family matches the type of 725 * family of the address passed down. 726 */ 727 len = tcr->DEST_length; 728 switch (tcr->DEST_length) { 729 default: 730 udp_err_ack(q, mp, TBADADDR, 0); 731 return; 732 733 case sizeof (sin_t): 734 sa = (struct sockaddr *)mi_offset_param(mp, tcr->DEST_offset, 735 sizeof (sin_t)); 736 break; 737 738 case sizeof (sin6_t): 739 sa = (struct sockaddr *)mi_offset_param(mp, tcr->DEST_offset, 740 sizeof (sin6_t)); 741 break; 742 } 743 744 error = proto_verify_ip_addr(connp->conn_family, sa, len); 745 if (error != 0) { 746 udp_err_ack(q, mp, TSYSERR, error); 747 return; 748 } 749 750 error = udp_do_connect(connp, sa, len, cr, pid); 751 if (error != 0) { 752 if (error < 0) 753 udp_err_ack(q, mp, -error, 0); 754 else 755 udp_err_ack(q, mp, TSYSERR, error); 756 } else { 757 mblk_t *mp1; 758 /* 759 * We have to send a connection confirmation to 760 * keep TLI happy. 761 */ 762 if (connp->conn_family == AF_INET) { 763 mp1 = mi_tpi_conn_con(NULL, (char *)sa, 764 sizeof (sin_t), NULL, 0); 765 } else { 766 mp1 = mi_tpi_conn_con(NULL, (char *)sa, 767 sizeof (sin6_t), NULL, 0); 768 } 769 if (mp1 == NULL) { 770 udp_err_ack(q, mp, TSYSERR, ENOMEM); 771 return; 772 } 773 774 /* 775 * Send ok_ack for T_CONN_REQ 776 */ 777 mp = mi_tpi_ok_ack_alloc(mp); 778 if (mp == NULL) { 779 /* Unable to reuse the T_CONN_REQ for the ack. */ 780 udp_err_ack_prim(q, mp1, T_CONN_REQ, TSYSERR, ENOMEM); 781 return; 782 } 783 784 putnext(connp->conn_rq, mp); 785 putnext(connp->conn_rq, mp1); 786 } 787 } 788 789 static int 790 udp_tpi_close(queue_t *q, int flags) 791 { 792 conn_t *connp; 793 794 if (flags & SO_FALLBACK) { 795 /* 796 * stream is being closed while in fallback 797 * simply free the resources that were allocated 798 */ 799 inet_minor_free(WR(q)->q_ptr, (dev_t)(RD(q)->q_ptr)); 800 qprocsoff(q); 801 goto done; 802 } 803 804 connp = Q_TO_CONN(q); 805 udp_do_close(connp); 806 done: 807 q->q_ptr = WR(q)->q_ptr = NULL; 808 return (0); 809 } 810 811 static void 812 udp_close_free(conn_t *connp) 813 { 814 udp_t *udp = connp->conn_udp; 815 816 /* If there are any options associated with the stream, free them. */ 817 if (udp->udp_recv_ipp.ipp_fields != 0) 818 ip_pkt_free(&udp->udp_recv_ipp); 819 820 /* 821 * Clear any fields which the kmem_cache constructor clears. 822 * Only udp_connp needs to be preserved. 823 * TBD: We should make this more efficient to avoid clearing 824 * everything. 825 */ 826 ASSERT(udp->udp_connp == connp); 827 bzero(udp, sizeof (udp_t)); 828 udp->udp_connp = connp; 829 } 830 831 static int 832 udp_do_disconnect(conn_t *connp) 833 { 834 udp_t *udp; 835 udp_fanout_t *udpf; 836 udp_stack_t *us; 837 int error; 838 839 udp = connp->conn_udp; 840 us = udp->udp_us; 841 mutex_enter(&connp->conn_lock); 842 if (udp->udp_state != TS_DATA_XFER) { 843 mutex_exit(&connp->conn_lock); 844 return (-TOUTSTATE); 845 } 846 udpf = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 847 us->us_bind_fanout_size)]; 848 mutex_enter(&udpf->uf_lock); 849 if (connp->conn_mcbc_bind) 850 connp->conn_saddr_v6 = ipv6_all_zeros; 851 else 852 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 853 connp->conn_laddr_v6 = connp->conn_bound_addr_v6; 854 connp->conn_faddr_v6 = ipv6_all_zeros; 855 connp->conn_fport = 0; 856 udp->udp_state = TS_IDLE; 857 mutex_exit(&udpf->uf_lock); 858 859 /* Remove any remnants of mapped address binding */ 860 if (connp->conn_family == AF_INET6) 861 connp->conn_ipversion = IPV6_VERSION; 862 863 connp->conn_v6lastdst = ipv6_all_zeros; 864 error = udp_build_hdr_template(connp, &connp->conn_saddr_v6, 865 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 866 mutex_exit(&connp->conn_lock); 867 if (error != 0) 868 return (error); 869 870 /* 871 * Tell IP to remove the full binding and revert 872 * to the local address binding. 873 */ 874 return (ip_laddr_fanout_insert(connp)); 875 } 876 877 static void 878 udp_tpi_disconnect(queue_t *q, mblk_t *mp) 879 { 880 conn_t *connp = Q_TO_CONN(q); 881 int error; 882 883 /* 884 * Allocate the largest primitive we need to send back 885 * T_error_ack is > than T_ok_ack 886 */ 887 mp = reallocb(mp, sizeof (struct T_error_ack), 1); 888 if (mp == NULL) { 889 /* Unable to reuse the T_DISCON_REQ for the ack. */ 890 udp_err_ack_prim(q, mp, T_DISCON_REQ, TSYSERR, ENOMEM); 891 return; 892 } 893 894 error = udp_do_disconnect(connp); 895 896 if (error != 0) { 897 if (error < 0) { 898 udp_err_ack(q, mp, -error, 0); 899 } else { 900 udp_err_ack(q, mp, TSYSERR, error); 901 } 902 } else { 903 mp = mi_tpi_ok_ack_alloc(mp); 904 ASSERT(mp != NULL); 905 qreply(q, mp); 906 } 907 } 908 909 int 910 udp_disconnect(conn_t *connp) 911 { 912 int error; 913 914 connp->conn_dgram_errind = B_FALSE; 915 error = udp_do_disconnect(connp); 916 if (error < 0) 917 error = proto_tlitosyserr(-error); 918 919 return (error); 920 } 921 922 /* This routine creates a T_ERROR_ACK message and passes it upstream. */ 923 static void 924 udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error, int sys_error) 925 { 926 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 927 qreply(q, mp); 928 } 929 930 /* Shorthand to generate and send TPI error acks to our client */ 931 static void 932 udp_err_ack_prim(queue_t *q, mblk_t *mp, t_scalar_t primitive, 933 t_scalar_t t_error, int sys_error) 934 { 935 struct T_error_ack *teackp; 936 937 if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 938 M_PCPROTO, T_ERROR_ACK)) != NULL) { 939 teackp = (struct T_error_ack *)mp->b_rptr; 940 teackp->ERROR_prim = primitive; 941 teackp->TLI_error = t_error; 942 teackp->UNIX_error = sys_error; 943 qreply(q, mp); 944 } 945 } 946 947 /* At minimum we need 4 bytes of UDP header */ 948 #define ICMP_MIN_UDP_HDR 4 949 950 /* 951 * udp_icmp_input is called as conn_recvicmp to process ICMP messages. 952 * Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors. 953 * Assumes that IP has pulled up everything up to and including the ICMP header. 954 */ 955 /* ARGSUSED2 */ 956 static void 957 udp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 958 { 959 conn_t *connp = (conn_t *)arg1; 960 icmph_t *icmph; 961 ipha_t *ipha; 962 int iph_hdr_length; 963 udpha_t *udpha; 964 sin_t sin; 965 sin6_t sin6; 966 mblk_t *mp1; 967 int error = 0; 968 udp_t *udp = connp->conn_udp; 969 970 ipha = (ipha_t *)mp->b_rptr; 971 972 ASSERT(OK_32PTR(mp->b_rptr)); 973 974 if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) { 975 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION); 976 udp_icmp_error_ipv6(connp, mp, ira); 977 return; 978 } 979 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 980 981 /* Skip past the outer IP and ICMP headers */ 982 ASSERT(IPH_HDR_LENGTH(ipha) == ira->ira_ip_hdr_length); 983 iph_hdr_length = ira->ira_ip_hdr_length; 984 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 985 ipha = (ipha_t *)&icmph[1]; /* Inner IP header */ 986 987 /* Skip past the inner IP and find the ULP header */ 988 iph_hdr_length = IPH_HDR_LENGTH(ipha); 989 udpha = (udpha_t *)((char *)ipha + iph_hdr_length); 990 991 switch (icmph->icmph_type) { 992 case ICMP_DEST_UNREACHABLE: 993 switch (icmph->icmph_code) { 994 case ICMP_FRAGMENTATION_NEEDED: { 995 ipha_t *ipha; 996 ip_xmit_attr_t *ixa; 997 /* 998 * IP has already adjusted the path MTU. 999 * But we need to adjust DF for IPv4. 1000 */ 1001 if (connp->conn_ipversion != IPV4_VERSION) 1002 break; 1003 1004 ixa = conn_get_ixa(connp, B_FALSE); 1005 if (ixa == NULL || ixa->ixa_ire == NULL) { 1006 /* 1007 * Some other thread holds conn_ixa. We will 1008 * redo this on the next ICMP too big. 1009 */ 1010 if (ixa != NULL) 1011 ixa_refrele(ixa); 1012 break; 1013 } 1014 (void) ip_get_pmtu(ixa); 1015 1016 mutex_enter(&connp->conn_lock); 1017 ipha = (ipha_t *)connp->conn_ht_iphc; 1018 if (ixa->ixa_flags & IXAF_PMTU_IPV4_DF) { 1019 ipha->ipha_fragment_offset_and_flags |= 1020 IPH_DF_HTONS; 1021 } else { 1022 ipha->ipha_fragment_offset_and_flags &= 1023 ~IPH_DF_HTONS; 1024 } 1025 mutex_exit(&connp->conn_lock); 1026 ixa_refrele(ixa); 1027 break; 1028 } 1029 case ICMP_PORT_UNREACHABLE: 1030 case ICMP_PROTOCOL_UNREACHABLE: 1031 error = ECONNREFUSED; 1032 break; 1033 default: 1034 /* Transient errors */ 1035 break; 1036 } 1037 break; 1038 default: 1039 /* Transient errors */ 1040 break; 1041 } 1042 if (error == 0) { 1043 freemsg(mp); 1044 return; 1045 } 1046 1047 /* 1048 * Deliver T_UDERROR_IND when the application has asked for it. 1049 * The socket layer enables this automatically when connected. 1050 */ 1051 if (!connp->conn_dgram_errind) { 1052 freemsg(mp); 1053 return; 1054 } 1055 1056 switch (connp->conn_family) { 1057 case AF_INET: 1058 sin = sin_null; 1059 sin.sin_family = AF_INET; 1060 sin.sin_addr.s_addr = ipha->ipha_dst; 1061 sin.sin_port = udpha->uha_dst_port; 1062 if (IPCL_IS_NONSTR(connp)) { 1063 mutex_enter(&connp->conn_lock); 1064 if (udp->udp_state == TS_DATA_XFER) { 1065 if (sin.sin_port == connp->conn_fport && 1066 sin.sin_addr.s_addr == 1067 connp->conn_faddr_v4) { 1068 mutex_exit(&connp->conn_lock); 1069 (*connp->conn_upcalls->su_set_error) 1070 (connp->conn_upper_handle, error); 1071 goto done; 1072 } 1073 } else { 1074 udp->udp_delayed_error = error; 1075 *((sin_t *)&udp->udp_delayed_addr) = sin; 1076 } 1077 mutex_exit(&connp->conn_lock); 1078 } else { 1079 mp1 = mi_tpi_uderror_ind((char *)&sin, sizeof (sin_t), 1080 NULL, 0, error); 1081 if (mp1 != NULL) 1082 putnext(connp->conn_rq, mp1); 1083 } 1084 break; 1085 case AF_INET6: 1086 sin6 = sin6_null; 1087 sin6.sin6_family = AF_INET6; 1088 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &sin6.sin6_addr); 1089 sin6.sin6_port = udpha->uha_dst_port; 1090 if (IPCL_IS_NONSTR(connp)) { 1091 mutex_enter(&connp->conn_lock); 1092 if (udp->udp_state == TS_DATA_XFER) { 1093 if (sin6.sin6_port == connp->conn_fport && 1094 IN6_ARE_ADDR_EQUAL(&sin6.sin6_addr, 1095 &connp->conn_faddr_v6)) { 1096 mutex_exit(&connp->conn_lock); 1097 (*connp->conn_upcalls->su_set_error) 1098 (connp->conn_upper_handle, error); 1099 goto done; 1100 } 1101 } else { 1102 udp->udp_delayed_error = error; 1103 *((sin6_t *)&udp->udp_delayed_addr) = sin6; 1104 } 1105 mutex_exit(&connp->conn_lock); 1106 } else { 1107 mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t), 1108 NULL, 0, error); 1109 if (mp1 != NULL) 1110 putnext(connp->conn_rq, mp1); 1111 } 1112 break; 1113 } 1114 done: 1115 freemsg(mp); 1116 } 1117 1118 /* 1119 * udp_icmp_error_ipv6 is called by udp_icmp_error to process ICMP for IPv6. 1120 * Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors. 1121 * Assumes that IP has pulled up all the extension headers as well as the 1122 * ICMPv6 header. 1123 */ 1124 static void 1125 udp_icmp_error_ipv6(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira) 1126 { 1127 icmp6_t *icmp6; 1128 ip6_t *ip6h, *outer_ip6h; 1129 uint16_t iph_hdr_length; 1130 uint8_t *nexthdrp; 1131 udpha_t *udpha; 1132 sin6_t sin6; 1133 mblk_t *mp1; 1134 int error = 0; 1135 udp_t *udp = connp->conn_udp; 1136 udp_stack_t *us = udp->udp_us; 1137 1138 outer_ip6h = (ip6_t *)mp->b_rptr; 1139 #ifdef DEBUG 1140 if (outer_ip6h->ip6_nxt != IPPROTO_ICMPV6) 1141 iph_hdr_length = ip_hdr_length_v6(mp, outer_ip6h); 1142 else 1143 iph_hdr_length = IPV6_HDR_LEN; 1144 ASSERT(iph_hdr_length == ira->ira_ip_hdr_length); 1145 #endif 1146 /* Skip past the outer IP and ICMP headers */ 1147 iph_hdr_length = ira->ira_ip_hdr_length; 1148 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 1149 1150 /* Skip past the inner IP and find the ULP header */ 1151 ip6h = (ip6_t *)&icmp6[1]; /* Inner IP header */ 1152 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) { 1153 freemsg(mp); 1154 return; 1155 } 1156 udpha = (udpha_t *)((char *)ip6h + iph_hdr_length); 1157 1158 switch (icmp6->icmp6_type) { 1159 case ICMP6_DST_UNREACH: 1160 switch (icmp6->icmp6_code) { 1161 case ICMP6_DST_UNREACH_NOPORT: 1162 error = ECONNREFUSED; 1163 break; 1164 case ICMP6_DST_UNREACH_ADMIN: 1165 case ICMP6_DST_UNREACH_NOROUTE: 1166 case ICMP6_DST_UNREACH_BEYONDSCOPE: 1167 case ICMP6_DST_UNREACH_ADDR: 1168 /* Transient errors */ 1169 break; 1170 default: 1171 break; 1172 } 1173 break; 1174 case ICMP6_PACKET_TOO_BIG: { 1175 struct T_unitdata_ind *tudi; 1176 struct T_opthdr *toh; 1177 size_t udi_size; 1178 mblk_t *newmp; 1179 t_scalar_t opt_length = sizeof (struct T_opthdr) + 1180 sizeof (struct ip6_mtuinfo); 1181 sin6_t *sin6; 1182 struct ip6_mtuinfo *mtuinfo; 1183 1184 /* 1185 * If the application has requested to receive path mtu 1186 * information, send up an empty message containing an 1187 * IPV6_PATHMTU ancillary data item. 1188 */ 1189 if (!connp->conn_ipv6_recvpathmtu) 1190 break; 1191 1192 udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t) + 1193 opt_length; 1194 if ((newmp = allocb(udi_size, BPRI_MED)) == NULL) { 1195 UDPS_BUMP_MIB(us, udpInErrors); 1196 break; 1197 } 1198 1199 /* 1200 * newmp->b_cont is left to NULL on purpose. This is an 1201 * empty message containing only ancillary data. 1202 */ 1203 newmp->b_datap->db_type = M_PROTO; 1204 tudi = (struct T_unitdata_ind *)newmp->b_rptr; 1205 newmp->b_wptr = (uchar_t *)tudi + udi_size; 1206 tudi->PRIM_type = T_UNITDATA_IND; 1207 tudi->SRC_length = sizeof (sin6_t); 1208 tudi->SRC_offset = sizeof (struct T_unitdata_ind); 1209 tudi->OPT_offset = tudi->SRC_offset + sizeof (sin6_t); 1210 tudi->OPT_length = opt_length; 1211 1212 sin6 = (sin6_t *)&tudi[1]; 1213 bzero(sin6, sizeof (sin6_t)); 1214 sin6->sin6_family = AF_INET6; 1215 sin6->sin6_addr = connp->conn_faddr_v6; 1216 1217 toh = (struct T_opthdr *)&sin6[1]; 1218 toh->level = IPPROTO_IPV6; 1219 toh->name = IPV6_PATHMTU; 1220 toh->len = opt_length; 1221 toh->status = 0; 1222 1223 mtuinfo = (struct ip6_mtuinfo *)&toh[1]; 1224 bzero(mtuinfo, sizeof (struct ip6_mtuinfo)); 1225 mtuinfo->ip6m_addr.sin6_family = AF_INET6; 1226 mtuinfo->ip6m_addr.sin6_addr = ip6h->ip6_dst; 1227 mtuinfo->ip6m_mtu = icmp6->icmp6_mtu; 1228 /* 1229 * We've consumed everything we need from the original 1230 * message. Free it, then send our empty message. 1231 */ 1232 freemsg(mp); 1233 udp_ulp_recv(connp, newmp, msgdsize(newmp), ira); 1234 return; 1235 } 1236 case ICMP6_TIME_EXCEEDED: 1237 /* Transient errors */ 1238 break; 1239 case ICMP6_PARAM_PROB: 1240 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 1241 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 1242 (uchar_t *)ip6h + icmp6->icmp6_pptr == 1243 (uchar_t *)nexthdrp) { 1244 error = ECONNREFUSED; 1245 break; 1246 } 1247 break; 1248 } 1249 if (error == 0) { 1250 freemsg(mp); 1251 return; 1252 } 1253 1254 /* 1255 * Deliver T_UDERROR_IND when the application has asked for it. 1256 * The socket layer enables this automatically when connected. 1257 */ 1258 if (!connp->conn_dgram_errind) { 1259 freemsg(mp); 1260 return; 1261 } 1262 1263 sin6 = sin6_null; 1264 sin6.sin6_family = AF_INET6; 1265 sin6.sin6_addr = ip6h->ip6_dst; 1266 sin6.sin6_port = udpha->uha_dst_port; 1267 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 1268 1269 if (IPCL_IS_NONSTR(connp)) { 1270 mutex_enter(&connp->conn_lock); 1271 if (udp->udp_state == TS_DATA_XFER) { 1272 if (sin6.sin6_port == connp->conn_fport && 1273 IN6_ARE_ADDR_EQUAL(&sin6.sin6_addr, 1274 &connp->conn_faddr_v6)) { 1275 mutex_exit(&connp->conn_lock); 1276 (*connp->conn_upcalls->su_set_error) 1277 (connp->conn_upper_handle, error); 1278 goto done; 1279 } 1280 } else { 1281 udp->udp_delayed_error = error; 1282 *((sin6_t *)&udp->udp_delayed_addr) = sin6; 1283 } 1284 mutex_exit(&connp->conn_lock); 1285 } else { 1286 mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t), 1287 NULL, 0, error); 1288 if (mp1 != NULL) 1289 putnext(connp->conn_rq, mp1); 1290 } 1291 done: 1292 freemsg(mp); 1293 } 1294 1295 /* 1296 * This routine responds to T_ADDR_REQ messages. It is called by udp_wput. 1297 * The local address is filled in if endpoint is bound. The remote address 1298 * is filled in if remote address has been precified ("connected endpoint") 1299 * (The concept of connected CLTS sockets is alien to published TPI 1300 * but we support it anyway). 1301 */ 1302 static void 1303 udp_addr_req(queue_t *q, mblk_t *mp) 1304 { 1305 struct sockaddr *sa; 1306 mblk_t *ackmp; 1307 struct T_addr_ack *taa; 1308 udp_t *udp = Q_TO_UDP(q); 1309 conn_t *connp = udp->udp_connp; 1310 uint_t addrlen; 1311 1312 /* Make it large enough for worst case */ 1313 ackmp = reallocb(mp, sizeof (struct T_addr_ack) + 1314 2 * sizeof (sin6_t), 1); 1315 if (ackmp == NULL) { 1316 udp_err_ack(q, mp, TSYSERR, ENOMEM); 1317 return; 1318 } 1319 taa = (struct T_addr_ack *)ackmp->b_rptr; 1320 1321 bzero(taa, sizeof (struct T_addr_ack)); 1322 ackmp->b_wptr = (uchar_t *)&taa[1]; 1323 1324 taa->PRIM_type = T_ADDR_ACK; 1325 ackmp->b_datap->db_type = M_PCPROTO; 1326 1327 if (connp->conn_family == AF_INET) 1328 addrlen = sizeof (sin_t); 1329 else 1330 addrlen = sizeof (sin6_t); 1331 1332 mutex_enter(&connp->conn_lock); 1333 /* 1334 * Note: Following code assumes 32 bit alignment of basic 1335 * data structures like sin_t and struct T_addr_ack. 1336 */ 1337 if (udp->udp_state != TS_UNBND) { 1338 /* 1339 * Fill in local address first 1340 */ 1341 taa->LOCADDR_offset = sizeof (*taa); 1342 taa->LOCADDR_length = addrlen; 1343 sa = (struct sockaddr *)&taa[1]; 1344 (void) conn_getsockname(connp, sa, &addrlen); 1345 ackmp->b_wptr += addrlen; 1346 } 1347 if (udp->udp_state == TS_DATA_XFER) { 1348 /* 1349 * connected, fill remote address too 1350 */ 1351 taa->REMADDR_length = addrlen; 1352 /* assumed 32-bit alignment */ 1353 taa->REMADDR_offset = taa->LOCADDR_offset + taa->LOCADDR_length; 1354 sa = (struct sockaddr *)(ackmp->b_rptr + taa->REMADDR_offset); 1355 (void) conn_getpeername(connp, sa, &addrlen); 1356 ackmp->b_wptr += addrlen; 1357 } 1358 mutex_exit(&connp->conn_lock); 1359 ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim); 1360 qreply(q, ackmp); 1361 } 1362 1363 static void 1364 udp_copy_info(struct T_info_ack *tap, udp_t *udp) 1365 { 1366 conn_t *connp = udp->udp_connp; 1367 1368 if (connp->conn_family == AF_INET) { 1369 *tap = udp_g_t_info_ack_ipv4; 1370 } else { 1371 *tap = udp_g_t_info_ack_ipv6; 1372 } 1373 tap->CURRENT_state = udp->udp_state; 1374 tap->OPT_size = udp_max_optsize; 1375 } 1376 1377 static void 1378 udp_do_capability_ack(udp_t *udp, struct T_capability_ack *tcap, 1379 t_uscalar_t cap_bits1) 1380 { 1381 tcap->CAP_bits1 = 0; 1382 1383 if (cap_bits1 & TC1_INFO) { 1384 udp_copy_info(&tcap->INFO_ack, udp); 1385 tcap->CAP_bits1 |= TC1_INFO; 1386 } 1387 } 1388 1389 /* 1390 * This routine responds to T_CAPABILITY_REQ messages. It is called by 1391 * udp_wput. Much of the T_CAPABILITY_ACK information is copied from 1392 * udp_g_t_info_ack. The current state of the stream is copied from 1393 * udp_state. 1394 */ 1395 static void 1396 udp_capability_req(queue_t *q, mblk_t *mp) 1397 { 1398 t_uscalar_t cap_bits1; 1399 struct T_capability_ack *tcap; 1400 udp_t *udp = Q_TO_UDP(q); 1401 1402 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 1403 1404 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 1405 mp->b_datap->db_type, T_CAPABILITY_ACK); 1406 if (!mp) 1407 return; 1408 1409 tcap = (struct T_capability_ack *)mp->b_rptr; 1410 udp_do_capability_ack(udp, tcap, cap_bits1); 1411 1412 qreply(q, mp); 1413 } 1414 1415 /* 1416 * This routine responds to T_INFO_REQ messages. It is called by udp_wput. 1417 * Most of the T_INFO_ACK information is copied from udp_g_t_info_ack. 1418 * The current state of the stream is copied from udp_state. 1419 */ 1420 static void 1421 udp_info_req(queue_t *q, mblk_t *mp) 1422 { 1423 udp_t *udp = Q_TO_UDP(q); 1424 1425 /* Create a T_INFO_ACK message. */ 1426 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 1427 T_INFO_ACK); 1428 if (!mp) 1429 return; 1430 udp_copy_info((struct T_info_ack *)mp->b_rptr, udp); 1431 qreply(q, mp); 1432 } 1433 1434 /* For /dev/udp aka AF_INET open */ 1435 static int 1436 udp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 1437 { 1438 return (udp_open(q, devp, flag, sflag, credp, B_FALSE)); 1439 } 1440 1441 /* For /dev/udp6 aka AF_INET6 open */ 1442 static int 1443 udp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 1444 { 1445 return (udp_open(q, devp, flag, sflag, credp, B_TRUE)); 1446 } 1447 1448 /* 1449 * This is the open routine for udp. It allocates a udp_t structure for 1450 * the stream and, on the first open of the module, creates an ND table. 1451 */ 1452 static int 1453 udp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp, 1454 boolean_t isv6) 1455 { 1456 udp_t *udp; 1457 conn_t *connp; 1458 dev_t conn_dev; 1459 vmem_t *minor_arena; 1460 int err; 1461 1462 /* If the stream is already open, return immediately. */ 1463 if (q->q_ptr != NULL) 1464 return (0); 1465 1466 if (sflag == MODOPEN) 1467 return (EINVAL); 1468 1469 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) && 1470 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) { 1471 minor_arena = ip_minor_arena_la; 1472 } else { 1473 /* 1474 * Either minor numbers in the large arena were exhausted 1475 * or a non socket application is doing the open. 1476 * Try to allocate from the small arena. 1477 */ 1478 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) 1479 return (EBUSY); 1480 1481 minor_arena = ip_minor_arena_sa; 1482 } 1483 1484 if (flag & SO_FALLBACK) { 1485 /* 1486 * Non streams socket needs a stream to fallback to 1487 */ 1488 RD(q)->q_ptr = (void *)conn_dev; 1489 WR(q)->q_qinfo = &udp_fallback_sock_winit; 1490 WR(q)->q_ptr = (void *)minor_arena; 1491 qprocson(q); 1492 return (0); 1493 } 1494 1495 connp = udp_do_open(credp, isv6, KM_SLEEP, &err); 1496 if (connp == NULL) { 1497 inet_minor_free(minor_arena, conn_dev); 1498 return (err); 1499 } 1500 udp = connp->conn_udp; 1501 1502 *devp = makedevice(getemajor(*devp), (minor_t)conn_dev); 1503 connp->conn_dev = conn_dev; 1504 connp->conn_minor_arena = minor_arena; 1505 1506 /* 1507 * Initialize the udp_t structure for this stream. 1508 */ 1509 q->q_ptr = connp; 1510 WR(q)->q_ptr = connp; 1511 connp->conn_rq = q; 1512 connp->conn_wq = WR(q); 1513 1514 /* 1515 * Since this conn_t/udp_t is not yet visible to anybody else we don't 1516 * need to lock anything. 1517 */ 1518 ASSERT(connp->conn_proto == IPPROTO_UDP); 1519 ASSERT(connp->conn_udp == udp); 1520 ASSERT(udp->udp_connp == connp); 1521 1522 if (flag & SO_SOCKSTR) { 1523 udp->udp_issocket = B_TRUE; 1524 } 1525 1526 WR(q)->q_hiwat = connp->conn_sndbuf; 1527 WR(q)->q_lowat = connp->conn_sndlowat; 1528 1529 qprocson(q); 1530 1531 /* Set the Stream head write offset and high watermark. */ 1532 (void) proto_set_tx_wroff(q, connp, connp->conn_wroff); 1533 (void) proto_set_rx_hiwat(q, connp, 1534 udp_set_rcv_hiwat(udp, connp->conn_rcvbuf)); 1535 1536 mutex_enter(&connp->conn_lock); 1537 connp->conn_state_flags &= ~CONN_INCIPIENT; 1538 mutex_exit(&connp->conn_lock); 1539 return (0); 1540 } 1541 1542 /* 1543 * Which UDP options OK to set through T_UNITDATA_REQ... 1544 */ 1545 /* ARGSUSED */ 1546 static boolean_t 1547 udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name) 1548 { 1549 return (B_TRUE); 1550 } 1551 1552 /* 1553 * This routine gets default values of certain options whose default 1554 * values are maintained by protcol specific code 1555 */ 1556 int 1557 udp_opt_default(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr) 1558 { 1559 udp_t *udp = Q_TO_UDP(q); 1560 udp_stack_t *us = udp->udp_us; 1561 int *i1 = (int *)ptr; 1562 1563 switch (level) { 1564 case IPPROTO_IP: 1565 switch (name) { 1566 case IP_MULTICAST_TTL: 1567 *ptr = (uchar_t)IP_DEFAULT_MULTICAST_TTL; 1568 return (sizeof (uchar_t)); 1569 case IP_MULTICAST_LOOP: 1570 *ptr = (uchar_t)IP_DEFAULT_MULTICAST_LOOP; 1571 return (sizeof (uchar_t)); 1572 } 1573 break; 1574 case IPPROTO_IPV6: 1575 switch (name) { 1576 case IPV6_MULTICAST_HOPS: 1577 *i1 = IP_DEFAULT_MULTICAST_TTL; 1578 return (sizeof (int)); 1579 case IPV6_MULTICAST_LOOP: 1580 *i1 = IP_DEFAULT_MULTICAST_LOOP; 1581 return (sizeof (int)); 1582 case IPV6_UNICAST_HOPS: 1583 *i1 = us->us_ipv6_hoplimit; 1584 return (sizeof (int)); 1585 } 1586 break; 1587 } 1588 return (-1); 1589 } 1590 1591 /* 1592 * This routine retrieves the current status of socket options. 1593 * It returns the size of the option retrieved, or -1. 1594 */ 1595 int 1596 udp_opt_get(conn_t *connp, t_scalar_t level, t_scalar_t name, 1597 uchar_t *ptr) 1598 { 1599 int *i1 = (int *)ptr; 1600 udp_t *udp = connp->conn_udp; 1601 int len; 1602 conn_opt_arg_t coas; 1603 int retval; 1604 1605 coas.coa_connp = connp; 1606 coas.coa_ixa = connp->conn_ixa; 1607 coas.coa_ipp = &connp->conn_xmit_ipp; 1608 coas.coa_ancillary = B_FALSE; 1609 coas.coa_changed = 0; 1610 1611 /* 1612 * We assume that the optcom framework has checked for the set 1613 * of levels and names that are supported, hence we don't worry 1614 * about rejecting based on that. 1615 * First check for UDP specific handling, then pass to common routine. 1616 */ 1617 switch (level) { 1618 case IPPROTO_IP: 1619 /* 1620 * Only allow IPv4 option processing on IPv4 sockets. 1621 */ 1622 if (connp->conn_family != AF_INET) 1623 return (-1); 1624 1625 switch (name) { 1626 case IP_OPTIONS: 1627 case T_IP_OPTIONS: 1628 mutex_enter(&connp->conn_lock); 1629 if (!(udp->udp_recv_ipp.ipp_fields & 1630 IPPF_IPV4_OPTIONS)) { 1631 mutex_exit(&connp->conn_lock); 1632 return (0); 1633 } 1634 1635 len = udp->udp_recv_ipp.ipp_ipv4_options_len; 1636 ASSERT(len != 0); 1637 bcopy(udp->udp_recv_ipp.ipp_ipv4_options, ptr, len); 1638 mutex_exit(&connp->conn_lock); 1639 return (len); 1640 } 1641 break; 1642 case IPPROTO_UDP: 1643 switch (name) { 1644 case UDP_NAT_T_ENDPOINT: 1645 mutex_enter(&connp->conn_lock); 1646 *i1 = udp->udp_nat_t_endpoint; 1647 mutex_exit(&connp->conn_lock); 1648 return (sizeof (int)); 1649 case UDP_RCVHDR: 1650 mutex_enter(&connp->conn_lock); 1651 *i1 = udp->udp_rcvhdr ? 1 : 0; 1652 mutex_exit(&connp->conn_lock); 1653 return (sizeof (int)); 1654 } 1655 } 1656 mutex_enter(&connp->conn_lock); 1657 retval = conn_opt_get(&coas, level, name, ptr); 1658 mutex_exit(&connp->conn_lock); 1659 return (retval); 1660 } 1661 1662 /* 1663 * This routine retrieves the current status of socket options. 1664 * It returns the size of the option retrieved, or -1. 1665 */ 1666 int 1667 udp_tpi_opt_get(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr) 1668 { 1669 conn_t *connp = Q_TO_CONN(q); 1670 int err; 1671 1672 err = udp_opt_get(connp, level, name, ptr); 1673 return (err); 1674 } 1675 1676 /* 1677 * This routine sets socket options. 1678 */ 1679 int 1680 udp_do_opt_set(conn_opt_arg_t *coa, int level, int name, 1681 uint_t inlen, uchar_t *invalp, cred_t *cr, boolean_t checkonly) 1682 { 1683 conn_t *connp = coa->coa_connp; 1684 ip_xmit_attr_t *ixa = coa->coa_ixa; 1685 udp_t *udp = connp->conn_udp; 1686 udp_stack_t *us = udp->udp_us; 1687 int *i1 = (int *)invalp; 1688 boolean_t onoff = (*i1 == 0) ? 0 : 1; 1689 int error; 1690 1691 ASSERT(MUTEX_NOT_HELD(&coa->coa_connp->conn_lock)); 1692 /* 1693 * First do UDP specific sanity checks and handle UDP specific 1694 * options. Note that some IPPROTO_UDP options are handled 1695 * by conn_opt_set. 1696 */ 1697 switch (level) { 1698 case SOL_SOCKET: 1699 switch (name) { 1700 case SO_SNDBUF: 1701 if (*i1 > us->us_max_buf) { 1702 return (ENOBUFS); 1703 } 1704 break; 1705 case SO_RCVBUF: 1706 if (*i1 > us->us_max_buf) { 1707 return (ENOBUFS); 1708 } 1709 break; 1710 1711 case SCM_UCRED: { 1712 struct ucred_s *ucr; 1713 cred_t *newcr; 1714 ts_label_t *tsl; 1715 1716 /* 1717 * Only sockets that have proper privileges and are 1718 * bound to MLPs will have any other value here, so 1719 * this implicitly tests for privilege to set label. 1720 */ 1721 if (connp->conn_mlp_type == mlptSingle) 1722 break; 1723 1724 ucr = (struct ucred_s *)invalp; 1725 if (inlen < sizeof (*ucr) + sizeof (bslabel_t) || 1726 ucr->uc_labeloff < sizeof (*ucr) || 1727 ucr->uc_labeloff + sizeof (bslabel_t) > inlen) 1728 return (EINVAL); 1729 if (!checkonly) { 1730 /* 1731 * Set ixa_tsl to the new label. 1732 * We assume that crgetzoneid doesn't change 1733 * as part of the SCM_UCRED. 1734 */ 1735 ASSERT(cr != NULL); 1736 if ((tsl = crgetlabel(cr)) == NULL) 1737 return (EINVAL); 1738 newcr = copycred_from_bslabel(cr, UCLABEL(ucr), 1739 tsl->tsl_doi, KM_NOSLEEP); 1740 if (newcr == NULL) 1741 return (ENOSR); 1742 ASSERT(newcr->cr_label != NULL); 1743 /* 1744 * Move the hold on the cr_label to ixa_tsl by 1745 * setting cr_label to NULL. Then release newcr. 1746 */ 1747 ip_xmit_attr_replace_tsl(ixa, newcr->cr_label); 1748 ixa->ixa_flags |= IXAF_UCRED_TSL; 1749 newcr->cr_label = NULL; 1750 crfree(newcr); 1751 coa->coa_changed |= COA_HEADER_CHANGED; 1752 coa->coa_changed |= COA_WROFF_CHANGED; 1753 } 1754 /* Fully handled this option. */ 1755 return (0); 1756 } 1757 } 1758 break; 1759 case IPPROTO_UDP: 1760 switch (name) { 1761 case UDP_NAT_T_ENDPOINT: 1762 if ((error = secpolicy_ip_config(cr, B_FALSE)) != 0) { 1763 return (error); 1764 } 1765 1766 /* 1767 * Use conn_family instead so we can avoid ambiguitites 1768 * with AF_INET6 sockets that may switch from IPv4 1769 * to IPv6. 1770 */ 1771 if (connp->conn_family != AF_INET) { 1772 return (EAFNOSUPPORT); 1773 } 1774 1775 if (!checkonly) { 1776 mutex_enter(&connp->conn_lock); 1777 udp->udp_nat_t_endpoint = onoff; 1778 mutex_exit(&connp->conn_lock); 1779 coa->coa_changed |= COA_HEADER_CHANGED; 1780 coa->coa_changed |= COA_WROFF_CHANGED; 1781 } 1782 /* Fully handled this option. */ 1783 return (0); 1784 case UDP_RCVHDR: 1785 mutex_enter(&connp->conn_lock); 1786 udp->udp_rcvhdr = onoff; 1787 mutex_exit(&connp->conn_lock); 1788 return (0); 1789 } 1790 break; 1791 } 1792 error = conn_opt_set(coa, level, name, inlen, invalp, 1793 checkonly, cr); 1794 return (error); 1795 } 1796 1797 /* 1798 * This routine sets socket options. 1799 */ 1800 int 1801 udp_opt_set(conn_t *connp, uint_t optset_context, int level, 1802 int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp, 1803 uchar_t *outvalp, void *thisdg_attrs, cred_t *cr) 1804 { 1805 udp_t *udp = connp->conn_udp; 1806 int err; 1807 conn_opt_arg_t coas, *coa; 1808 boolean_t checkonly; 1809 udp_stack_t *us = udp->udp_us; 1810 1811 switch (optset_context) { 1812 case SETFN_OPTCOM_CHECKONLY: 1813 checkonly = B_TRUE; 1814 /* 1815 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ 1816 * inlen != 0 implies value supplied and 1817 * we have to "pretend" to set it. 1818 * inlen == 0 implies that there is no 1819 * value part in T_CHECK request and just validation 1820 * done elsewhere should be enough, we just return here. 1821 */ 1822 if (inlen == 0) { 1823 *outlenp = 0; 1824 return (0); 1825 } 1826 break; 1827 case SETFN_OPTCOM_NEGOTIATE: 1828 checkonly = B_FALSE; 1829 break; 1830 case SETFN_UD_NEGOTIATE: 1831 case SETFN_CONN_NEGOTIATE: 1832 checkonly = B_FALSE; 1833 /* 1834 * Negotiating local and "association-related" options 1835 * through T_UNITDATA_REQ. 1836 * 1837 * Following routine can filter out ones we do not 1838 * want to be "set" this way. 1839 */ 1840 if (!udp_opt_allow_udr_set(level, name)) { 1841 *outlenp = 0; 1842 return (EINVAL); 1843 } 1844 break; 1845 default: 1846 /* 1847 * We should never get here 1848 */ 1849 *outlenp = 0; 1850 return (EINVAL); 1851 } 1852 1853 ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) || 1854 (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0)); 1855 1856 if (thisdg_attrs != NULL) { 1857 /* Options from T_UNITDATA_REQ */ 1858 coa = (conn_opt_arg_t *)thisdg_attrs; 1859 ASSERT(coa->coa_connp == connp); 1860 ASSERT(coa->coa_ixa != NULL); 1861 ASSERT(coa->coa_ipp != NULL); 1862 ASSERT(coa->coa_ancillary); 1863 } else { 1864 coa = &coas; 1865 coas.coa_connp = connp; 1866 /* Get a reference on conn_ixa to prevent concurrent mods */ 1867 coas.coa_ixa = conn_get_ixa(connp, B_TRUE); 1868 if (coas.coa_ixa == NULL) { 1869 *outlenp = 0; 1870 return (ENOMEM); 1871 } 1872 coas.coa_ipp = &connp->conn_xmit_ipp; 1873 coas.coa_ancillary = B_FALSE; 1874 coas.coa_changed = 0; 1875 } 1876 1877 err = udp_do_opt_set(coa, level, name, inlen, invalp, 1878 cr, checkonly); 1879 if (err != 0) { 1880 errout: 1881 if (!coa->coa_ancillary) 1882 ixa_refrele(coa->coa_ixa); 1883 *outlenp = 0; 1884 return (err); 1885 } 1886 /* Handle DHCPINIT here outside of lock */ 1887 if (level == IPPROTO_IP && name == IP_DHCPINIT_IF) { 1888 uint_t ifindex; 1889 ill_t *ill; 1890 1891 ifindex = *(uint_t *)invalp; 1892 if (ifindex == 0) { 1893 ill = NULL; 1894 } else { 1895 ill = ill_lookup_on_ifindex(ifindex, B_FALSE, 1896 coa->coa_ixa->ixa_ipst); 1897 if (ill == NULL) { 1898 err = ENXIO; 1899 goto errout; 1900 } 1901 1902 mutex_enter(&ill->ill_lock); 1903 if (ill->ill_state_flags & ILL_CONDEMNED) { 1904 mutex_exit(&ill->ill_lock); 1905 ill_refrele(ill); 1906 err = ENXIO; 1907 goto errout; 1908 } 1909 if (IS_VNI(ill)) { 1910 mutex_exit(&ill->ill_lock); 1911 ill_refrele(ill); 1912 err = EINVAL; 1913 goto errout; 1914 } 1915 } 1916 mutex_enter(&connp->conn_lock); 1917 1918 if (connp->conn_dhcpinit_ill != NULL) { 1919 /* 1920 * We've locked the conn so conn_cleanup_ill() 1921 * cannot clear conn_dhcpinit_ill -- so it's 1922 * safe to access the ill. 1923 */ 1924 ill_t *oill = connp->conn_dhcpinit_ill; 1925 1926 ASSERT(oill->ill_dhcpinit != 0); 1927 atomic_dec_32(&oill->ill_dhcpinit); 1928 ill_set_inputfn(connp->conn_dhcpinit_ill); 1929 connp->conn_dhcpinit_ill = NULL; 1930 } 1931 1932 if (ill != NULL) { 1933 connp->conn_dhcpinit_ill = ill; 1934 atomic_inc_32(&ill->ill_dhcpinit); 1935 ill_set_inputfn(ill); 1936 mutex_exit(&connp->conn_lock); 1937 mutex_exit(&ill->ill_lock); 1938 ill_refrele(ill); 1939 } else { 1940 mutex_exit(&connp->conn_lock); 1941 } 1942 } 1943 1944 /* 1945 * Common case of OK return with outval same as inval. 1946 */ 1947 if (invalp != outvalp) { 1948 /* don't trust bcopy for identical src/dst */ 1949 (void) bcopy(invalp, outvalp, inlen); 1950 } 1951 *outlenp = inlen; 1952 1953 /* 1954 * If this was not ancillary data, then we rebuild the headers, 1955 * update the IRE/NCE, and IPsec as needed. 1956 * Since the label depends on the destination we go through 1957 * ip_set_destination first. 1958 */ 1959 if (coa->coa_ancillary) { 1960 return (0); 1961 } 1962 1963 if (coa->coa_changed & COA_ROUTE_CHANGED) { 1964 in6_addr_t saddr, faddr, nexthop; 1965 in_port_t fport; 1966 1967 /* 1968 * We clear lastdst to make sure we pick up the change 1969 * next time sending. 1970 * If we are connected we re-cache the information. 1971 * We ignore errors to preserve BSD behavior. 1972 * Note that we don't redo IPsec policy lookup here 1973 * since the final destination (or source) didn't change. 1974 */ 1975 mutex_enter(&connp->conn_lock); 1976 connp->conn_v6lastdst = ipv6_all_zeros; 1977 1978 ip_attr_nexthop(coa->coa_ipp, coa->coa_ixa, 1979 &connp->conn_faddr_v6, &nexthop); 1980 saddr = connp->conn_saddr_v6; 1981 faddr = connp->conn_faddr_v6; 1982 fport = connp->conn_fport; 1983 mutex_exit(&connp->conn_lock); 1984 1985 if (!IN6_IS_ADDR_UNSPECIFIED(&faddr) && 1986 !IN6_IS_ADDR_V4MAPPED_ANY(&faddr)) { 1987 (void) ip_attr_connect(connp, coa->coa_ixa, 1988 &saddr, &faddr, &nexthop, fport, NULL, NULL, 1989 IPDF_ALLOW_MCBC | IPDF_VERIFY_DST); 1990 } 1991 } 1992 1993 ixa_refrele(coa->coa_ixa); 1994 1995 if (coa->coa_changed & COA_HEADER_CHANGED) { 1996 /* 1997 * Rebuild the header template if we are connected. 1998 * Otherwise clear conn_v6lastdst so we rebuild the header 1999 * in the data path. 2000 */ 2001 mutex_enter(&connp->conn_lock); 2002 if (!IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6) && 2003 !IN6_IS_ADDR_V4MAPPED_ANY(&connp->conn_faddr_v6)) { 2004 err = udp_build_hdr_template(connp, 2005 &connp->conn_saddr_v6, &connp->conn_faddr_v6, 2006 connp->conn_fport, connp->conn_flowinfo); 2007 if (err != 0) { 2008 mutex_exit(&connp->conn_lock); 2009 return (err); 2010 } 2011 } else { 2012 connp->conn_v6lastdst = ipv6_all_zeros; 2013 } 2014 mutex_exit(&connp->conn_lock); 2015 } 2016 if (coa->coa_changed & COA_RCVBUF_CHANGED) { 2017 (void) proto_set_rx_hiwat(connp->conn_rq, connp, 2018 connp->conn_rcvbuf); 2019 } 2020 if ((coa->coa_changed & COA_SNDBUF_CHANGED) && !IPCL_IS_NONSTR(connp)) { 2021 connp->conn_wq->q_hiwat = connp->conn_sndbuf; 2022 } 2023 if (coa->coa_changed & COA_WROFF_CHANGED) { 2024 /* Increase wroff if needed */ 2025 uint_t wroff; 2026 2027 mutex_enter(&connp->conn_lock); 2028 wroff = connp->conn_ht_iphc_allocated + us->us_wroff_extra; 2029 if (udp->udp_nat_t_endpoint) 2030 wroff += sizeof (uint32_t); 2031 if (wroff > connp->conn_wroff) { 2032 connp->conn_wroff = wroff; 2033 mutex_exit(&connp->conn_lock); 2034 (void) proto_set_tx_wroff(connp->conn_rq, connp, wroff); 2035 } else { 2036 mutex_exit(&connp->conn_lock); 2037 } 2038 } 2039 return (err); 2040 } 2041 2042 /* This routine sets socket options. */ 2043 int 2044 udp_tpi_opt_set(queue_t *q, uint_t optset_context, int level, int name, 2045 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 2046 void *thisdg_attrs, cred_t *cr) 2047 { 2048 conn_t *connp = Q_TO_CONN(q); 2049 int error; 2050 2051 error = udp_opt_set(connp, optset_context, level, name, inlen, invalp, 2052 outlenp, outvalp, thisdg_attrs, cr); 2053 return (error); 2054 } 2055 2056 /* 2057 * Setup IP and UDP headers. 2058 * Returns NULL on allocation failure, in which case data_mp is freed. 2059 */ 2060 mblk_t * 2061 udp_prepend_hdr(conn_t *connp, ip_xmit_attr_t *ixa, const ip_pkt_t *ipp, 2062 const in6_addr_t *v6src, const in6_addr_t *v6dst, in_port_t dstport, 2063 uint32_t flowinfo, mblk_t *data_mp, int *errorp) 2064 { 2065 mblk_t *mp; 2066 udpha_t *udpha; 2067 udp_stack_t *us = connp->conn_netstack->netstack_udp; 2068 uint_t data_len; 2069 uint32_t cksum; 2070 udp_t *udp = connp->conn_udp; 2071 boolean_t insert_spi = udp->udp_nat_t_endpoint; 2072 uint_t ulp_hdr_len; 2073 2074 data_len = msgdsize(data_mp); 2075 ulp_hdr_len = UDPH_SIZE; 2076 if (insert_spi) 2077 ulp_hdr_len += sizeof (uint32_t); 2078 2079 mp = conn_prepend_hdr(ixa, ipp, v6src, v6dst, IPPROTO_UDP, flowinfo, 2080 ulp_hdr_len, data_mp, data_len, us->us_wroff_extra, &cksum, errorp); 2081 if (mp == NULL) { 2082 ASSERT(*errorp != 0); 2083 return (NULL); 2084 } 2085 2086 data_len += ulp_hdr_len; 2087 ixa->ixa_pktlen = data_len + ixa->ixa_ip_hdr_length; 2088 2089 udpha = (udpha_t *)(mp->b_rptr + ixa->ixa_ip_hdr_length); 2090 udpha->uha_src_port = connp->conn_lport; 2091 udpha->uha_dst_port = dstport; 2092 udpha->uha_checksum = 0; 2093 udpha->uha_length = htons(data_len); 2094 2095 /* 2096 * If there was a routing option/header then conn_prepend_hdr 2097 * has massaged it and placed the pseudo-header checksum difference 2098 * in the cksum argument. 2099 * 2100 * Setup header length and prepare for ULP checksum done in IP. 2101 * 2102 * We make it easy for IP to include our pseudo header 2103 * by putting our length in uha_checksum. 2104 * The IP source, destination, and length have already been set by 2105 * conn_prepend_hdr. 2106 */ 2107 cksum += data_len; 2108 cksum = (cksum >> 16) + (cksum & 0xFFFF); 2109 ASSERT(cksum < 0x10000); 2110 2111 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2112 ipha_t *ipha = (ipha_t *)mp->b_rptr; 2113 2114 ASSERT(ntohs(ipha->ipha_length) == ixa->ixa_pktlen); 2115 2116 /* IP does the checksum if uha_checksum is non-zero */ 2117 if (us->us_do_checksum) { 2118 if (cksum == 0) 2119 udpha->uha_checksum = 0xffff; 2120 else 2121 udpha->uha_checksum = htons(cksum); 2122 } else { 2123 udpha->uha_checksum = 0; 2124 } 2125 } else { 2126 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 2127 2128 ASSERT(ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN == ixa->ixa_pktlen); 2129 if (cksum == 0) 2130 udpha->uha_checksum = 0xffff; 2131 else 2132 udpha->uha_checksum = htons(cksum); 2133 } 2134 2135 /* Insert all-0s SPI now. */ 2136 if (insert_spi) 2137 *((uint32_t *)(udpha + 1)) = 0; 2138 2139 return (mp); 2140 } 2141 2142 static int 2143 udp_build_hdr_template(conn_t *connp, const in6_addr_t *v6src, 2144 const in6_addr_t *v6dst, in_port_t dstport, uint32_t flowinfo) 2145 { 2146 udpha_t *udpha; 2147 int error; 2148 2149 ASSERT(MUTEX_HELD(&connp->conn_lock)); 2150 /* 2151 * We clear lastdst to make sure we don't use the lastdst path 2152 * next time sending since we might not have set v6dst yet. 2153 */ 2154 connp->conn_v6lastdst = ipv6_all_zeros; 2155 2156 error = conn_build_hdr_template(connp, UDPH_SIZE, 0, v6src, v6dst, 2157 flowinfo); 2158 if (error != 0) 2159 return (error); 2160 2161 /* 2162 * Any routing header/option has been massaged. The checksum difference 2163 * is stored in conn_sum. 2164 */ 2165 udpha = (udpha_t *)connp->conn_ht_ulp; 2166 udpha->uha_src_port = connp->conn_lport; 2167 udpha->uha_dst_port = dstport; 2168 udpha->uha_checksum = 0; 2169 udpha->uha_length = htons(UDPH_SIZE); /* Filled in later */ 2170 return (0); 2171 } 2172 2173 static mblk_t * 2174 udp_queue_fallback(udp_t *udp, mblk_t *mp) 2175 { 2176 ASSERT(MUTEX_HELD(&udp->udp_recv_lock)); 2177 if (IPCL_IS_NONSTR(udp->udp_connp)) { 2178 /* 2179 * fallback has started but messages have not been moved yet 2180 */ 2181 if (udp->udp_fallback_queue_head == NULL) { 2182 ASSERT(udp->udp_fallback_queue_tail == NULL); 2183 udp->udp_fallback_queue_head = mp; 2184 udp->udp_fallback_queue_tail = mp; 2185 } else { 2186 ASSERT(udp->udp_fallback_queue_tail != NULL); 2187 udp->udp_fallback_queue_tail->b_next = mp; 2188 udp->udp_fallback_queue_tail = mp; 2189 } 2190 return (NULL); 2191 } else { 2192 /* 2193 * Fallback completed, let the caller putnext() the mblk. 2194 */ 2195 return (mp); 2196 } 2197 } 2198 2199 /* 2200 * Deliver data to ULP. In case we have a socket, and it's falling back to 2201 * TPI, then we'll queue the mp for later processing. 2202 */ 2203 static void 2204 udp_ulp_recv(conn_t *connp, mblk_t *mp, uint_t len, ip_recv_attr_t *ira) 2205 { 2206 if (IPCL_IS_NONSTR(connp)) { 2207 udp_t *udp = connp->conn_udp; 2208 int error; 2209 2210 ASSERT(len == msgdsize(mp)); 2211 if ((*connp->conn_upcalls->su_recv) 2212 (connp->conn_upper_handle, mp, len, 0, &error, NULL) < 0) { 2213 mutex_enter(&udp->udp_recv_lock); 2214 if (error == ENOSPC) { 2215 /* 2216 * let's confirm while holding the lock 2217 */ 2218 if ((*connp->conn_upcalls->su_recv) 2219 (connp->conn_upper_handle, NULL, 0, 0, 2220 &error, NULL) < 0) { 2221 ASSERT(error == ENOSPC); 2222 if (error == ENOSPC) { 2223 connp->conn_flow_cntrld = 2224 B_TRUE; 2225 } 2226 } 2227 mutex_exit(&udp->udp_recv_lock); 2228 } else { 2229 ASSERT(error == EOPNOTSUPP); 2230 mp = udp_queue_fallback(udp, mp); 2231 mutex_exit(&udp->udp_recv_lock); 2232 if (mp != NULL) 2233 putnext(connp->conn_rq, mp); 2234 } 2235 } 2236 ASSERT(MUTEX_NOT_HELD(&udp->udp_recv_lock)); 2237 } else { 2238 if (is_system_labeled()) { 2239 ASSERT(ira->ira_cred != NULL); 2240 /* 2241 * Provide for protocols above UDP such as RPC 2242 * NOPID leaves db_cpid unchanged. 2243 */ 2244 mblk_setcred(mp, ira->ira_cred, NOPID); 2245 } 2246 2247 putnext(connp->conn_rq, mp); 2248 } 2249 } 2250 2251 /* 2252 * This is the inbound data path. 2253 * IP has already pulled up the IP plus UDP headers and verified alignment 2254 * etc. 2255 */ 2256 /* ARGSUSED2 */ 2257 static void 2258 udp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2259 { 2260 conn_t *connp = (conn_t *)arg1; 2261 struct T_unitdata_ind *tudi; 2262 uchar_t *rptr; /* Pointer to IP header */ 2263 int hdr_length; /* Length of IP+UDP headers */ 2264 int udi_size; /* Size of T_unitdata_ind */ 2265 int pkt_len; 2266 udp_t *udp; 2267 udpha_t *udpha; 2268 ip_pkt_t ipps; 2269 ip6_t *ip6h; 2270 mblk_t *mp1; 2271 uint32_t udp_ipv4_options_len; 2272 crb_t recv_ancillary; 2273 udp_stack_t *us; 2274 conn_t *new = NULL; 2275 2276 ASSERT(connp->conn_flags & IPCL_UDPCONN); 2277 2278 mutex_enter(&connp->conn_lock); 2279 if (connp->conn_reuselist != NULL) { 2280 struct reuselist *reusep = connp->conn_reuselist; 2281 int i; 2282 2283 /* 2284 * we have to balance the request between multiple sockets. 2285 * Currently we do this in a round-robin fashion. In the 2286 * reuselist we maintain a pointer to the last receiver. 2287 * TODO: we can add a check if the conn is full and skip to 2288 * the next. 2289 */ 2290 mutex_enter(&reusep->ru_lock); 2291 i = reusep->ru_next; 2292 new = reusep->ru_conns[i]; 2293 if (++i == reusep->ru_entries) 2294 i = 0; 2295 reusep->ru_next = i; 2296 if (new == connp) 2297 new = NULL; 2298 else 2299 CONN_INC_REF(new); 2300 mutex_exit(&reusep->ru_lock); 2301 mutex_exit(&connp->conn_lock); 2302 if (new != NULL) 2303 connp = new; 2304 } else { 2305 mutex_exit(&connp->conn_lock); 2306 } 2307 2308 udp = connp->conn_udp; 2309 us = udp->udp_us; 2310 rptr = mp->b_rptr; 2311 2312 ASSERT(DB_TYPE(mp) == M_DATA); 2313 ASSERT(OK_32PTR(rptr)); 2314 ASSERT(ira->ira_pktlen == msgdsize(mp)); 2315 pkt_len = ira->ira_pktlen; 2316 2317 /* 2318 * Get a snapshot of these and allow other threads to change 2319 * them after that. We need the same recv_ancillary when determining 2320 * the size as when adding the ancillary data items. 2321 */ 2322 mutex_enter(&connp->conn_lock); 2323 udp_ipv4_options_len = udp->udp_recv_ipp.ipp_ipv4_options_len; 2324 recv_ancillary = connp->conn_recv_ancillary; 2325 mutex_exit(&connp->conn_lock); 2326 2327 hdr_length = ira->ira_ip_hdr_length; 2328 2329 /* 2330 * IP inspected the UDP header thus all of it must be in the mblk. 2331 * UDP length check is performed for IPv6 packets and IPv4 packets 2332 * to check if the size of the packet as specified 2333 * by the UDP header is the same as the length derived from the IP 2334 * header. 2335 */ 2336 udpha = (udpha_t *)(rptr + hdr_length); 2337 if (pkt_len != ntohs(udpha->uha_length) + hdr_length) 2338 goto tossit; 2339 2340 hdr_length += UDPH_SIZE; 2341 ASSERT(MBLKL(mp) >= hdr_length); /* IP did a pullup */ 2342 2343 /* Initialize regardless of IP version */ 2344 ipps.ipp_fields = 0; 2345 2346 if (((ira->ira_flags & IRAF_IPV4_OPTIONS) || 2347 udp_ipv4_options_len > 0) && 2348 connp->conn_family == AF_INET) { 2349 int err; 2350 2351 /* 2352 * Record/update udp_recv_ipp with the lock 2353 * held. Not needed for AF_INET6 sockets 2354 * since they don't support a getsockopt of IP_OPTIONS. 2355 */ 2356 mutex_enter(&connp->conn_lock); 2357 err = ip_find_hdr_v4((ipha_t *)rptr, &udp->udp_recv_ipp, 2358 B_TRUE); 2359 if (err != 0) { 2360 /* Allocation failed. Drop packet */ 2361 mutex_exit(&connp->conn_lock); 2362 goto tossit; 2363 } 2364 mutex_exit(&connp->conn_lock); 2365 } 2366 2367 if (recv_ancillary.crb_all != 0) { 2368 /* 2369 * Record packet information in the ip_pkt_t 2370 */ 2371 if (ira->ira_flags & IRAF_IS_IPV4) { 2372 ASSERT(IPH_HDR_VERSION(rptr) == IPV4_VERSION); 2373 ASSERT(MBLKL(mp) >= sizeof (ipha_t)); 2374 ASSERT(((ipha_t *)rptr)->ipha_protocol == IPPROTO_UDP); 2375 ASSERT(ira->ira_ip_hdr_length == IPH_HDR_LENGTH(rptr)); 2376 2377 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipps, B_FALSE); 2378 } else { 2379 uint8_t nexthdrp; 2380 2381 ASSERT(IPH_HDR_VERSION(rptr) == IPV6_VERSION); 2382 /* 2383 * IPv6 packets can only be received by applications 2384 * that are prepared to receive IPv6 addresses. 2385 * The IP fanout must ensure this. 2386 */ 2387 ASSERT(connp->conn_family == AF_INET6); 2388 2389 ip6h = (ip6_t *)rptr; 2390 2391 /* We don't care about the length, but need the ipp */ 2392 hdr_length = ip_find_hdr_v6(mp, ip6h, B_TRUE, &ipps, 2393 &nexthdrp); 2394 ASSERT(hdr_length == ira->ira_ip_hdr_length); 2395 /* Restore */ 2396 hdr_length = ira->ira_ip_hdr_length + UDPH_SIZE; 2397 ASSERT(nexthdrp == IPPROTO_UDP); 2398 } 2399 } 2400 2401 /* 2402 * This is the inbound data path. Packets are passed upstream as 2403 * T_UNITDATA_IND messages. 2404 */ 2405 if (connp->conn_family == AF_INET) { 2406 sin_t *sin; 2407 2408 ASSERT(IPH_HDR_VERSION((ipha_t *)rptr) == IPV4_VERSION); 2409 2410 /* 2411 * Normally only send up the source address. 2412 * If any ancillary data items are wanted we add those. 2413 */ 2414 udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin_t); 2415 if (recv_ancillary.crb_all != 0) { 2416 udi_size += conn_recvancillary_size(connp, 2417 recv_ancillary, ira, mp, &ipps); 2418 } 2419 2420 /* Allocate a message block for the T_UNITDATA_IND structure. */ 2421 mp1 = allocb(udi_size, BPRI_MED); 2422 if (mp1 == NULL) 2423 goto tossit; 2424 mp1->b_cont = mp; 2425 mp1->b_datap->db_type = M_PROTO; 2426 tudi = (struct T_unitdata_ind *)mp1->b_rptr; 2427 mp1->b_wptr = (uchar_t *)tudi + udi_size; 2428 tudi->PRIM_type = T_UNITDATA_IND; 2429 tudi->SRC_length = sizeof (sin_t); 2430 tudi->SRC_offset = sizeof (struct T_unitdata_ind); 2431 tudi->OPT_offset = sizeof (struct T_unitdata_ind) + 2432 sizeof (sin_t); 2433 udi_size -= (sizeof (struct T_unitdata_ind) + sizeof (sin_t)); 2434 tudi->OPT_length = udi_size; 2435 sin = (sin_t *)&tudi[1]; 2436 sin->sin_addr.s_addr = ((ipha_t *)rptr)->ipha_src; 2437 sin->sin_port = udpha->uha_src_port; 2438 sin->sin_family = connp->conn_family; 2439 *(uint32_t *)&sin->sin_zero[0] = 0; 2440 *(uint32_t *)&sin->sin_zero[4] = 0; 2441 2442 /* 2443 * Add options if IP_RECVDSTADDR, IP_RECVIF, IP_RECVSLLA or 2444 * IP_RECVTTL has been set. 2445 */ 2446 if (udi_size != 0) { 2447 conn_recvancillary_add(connp, recv_ancillary, ira, 2448 &ipps, (uchar_t *)&sin[1], udi_size); 2449 } 2450 } else { 2451 sin6_t *sin6; 2452 2453 /* 2454 * Handle both IPv4 and IPv6 packets for IPv6 sockets. 2455 * 2456 * Normally we only send up the address. If receiving of any 2457 * optional receive side information is enabled, we also send 2458 * that up as options. 2459 */ 2460 udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t); 2461 2462 if (recv_ancillary.crb_all != 0) { 2463 udi_size += conn_recvancillary_size(connp, 2464 recv_ancillary, ira, mp, &ipps); 2465 } 2466 2467 mp1 = allocb(udi_size, BPRI_MED); 2468 if (mp1 == NULL) 2469 goto tossit; 2470 mp1->b_cont = mp; 2471 mp1->b_datap->db_type = M_PROTO; 2472 tudi = (struct T_unitdata_ind *)mp1->b_rptr; 2473 mp1->b_wptr = (uchar_t *)tudi + udi_size; 2474 tudi->PRIM_type = T_UNITDATA_IND; 2475 tudi->SRC_length = sizeof (sin6_t); 2476 tudi->SRC_offset = sizeof (struct T_unitdata_ind); 2477 tudi->OPT_offset = sizeof (struct T_unitdata_ind) + 2478 sizeof (sin6_t); 2479 udi_size -= (sizeof (struct T_unitdata_ind) + sizeof (sin6_t)); 2480 tudi->OPT_length = udi_size; 2481 sin6 = (sin6_t *)&tudi[1]; 2482 if (ira->ira_flags & IRAF_IS_IPV4) { 2483 in6_addr_t v6dst; 2484 2485 IN6_IPADDR_TO_V4MAPPED(((ipha_t *)rptr)->ipha_src, 2486 &sin6->sin6_addr); 2487 IN6_IPADDR_TO_V4MAPPED(((ipha_t *)rptr)->ipha_dst, 2488 &v6dst); 2489 sin6->sin6_flowinfo = 0; 2490 sin6->sin6_scope_id = 0; 2491 sin6->__sin6_src_id = ip_srcid_find_addr(&v6dst, 2492 IPCL_ZONEID(connp), us->us_netstack); 2493 } else { 2494 ip6h = (ip6_t *)rptr; 2495 2496 sin6->sin6_addr = ip6h->ip6_src; 2497 /* No sin6_flowinfo per API */ 2498 sin6->sin6_flowinfo = 0; 2499 /* For link-scope pass up scope id */ 2500 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) 2501 sin6->sin6_scope_id = ira->ira_ruifindex; 2502 else 2503 sin6->sin6_scope_id = 0; 2504 sin6->__sin6_src_id = ip_srcid_find_addr( 2505 &ip6h->ip6_dst, IPCL_ZONEID(connp), 2506 us->us_netstack); 2507 } 2508 sin6->sin6_port = udpha->uha_src_port; 2509 sin6->sin6_family = connp->conn_family; 2510 2511 if (udi_size != 0) { 2512 conn_recvancillary_add(connp, recv_ancillary, ira, 2513 &ipps, (uchar_t *)&sin6[1], udi_size); 2514 } 2515 } 2516 2517 /* 2518 * DTrace this UDP input as udp:::receive (this is for IPv4, IPv6 and 2519 * loopback traffic). 2520 */ 2521 DTRACE_UDP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2522 void_ip_t *, rptr, udp_t *, udp, udpha_t *, udpha); 2523 2524 /* Walk past the headers unless IP_RECVHDR was set. */ 2525 if (!udp->udp_rcvhdr) { 2526 mp->b_rptr = rptr + hdr_length; 2527 pkt_len -= hdr_length; 2528 } 2529 2530 UDPS_BUMP_MIB(us, udpHCInDatagrams); 2531 udp_ulp_recv(connp, mp1, pkt_len, ira); 2532 if (new != NULL) 2533 CONN_DEC_REF(new); 2534 return; 2535 2536 tossit: 2537 freemsg(mp); 2538 UDPS_BUMP_MIB(us, udpInErrors); 2539 if (new != NULL) 2540 CONN_DEC_REF(new); 2541 } 2542 2543 /* 2544 * This routine creates a T_UDERROR_IND message and passes it upstream. 2545 * The address and options are copied from the T_UNITDATA_REQ message 2546 * passed in mp. This message is freed. 2547 */ 2548 static void 2549 udp_ud_err(queue_t *q, mblk_t *mp, t_scalar_t err) 2550 { 2551 struct T_unitdata_req *tudr; 2552 mblk_t *mp1; 2553 uchar_t *destaddr; 2554 t_scalar_t destlen; 2555 uchar_t *optaddr; 2556 t_scalar_t optlen; 2557 2558 if ((mp->b_wptr < mp->b_rptr) || 2559 (MBLKL(mp)) < sizeof (struct T_unitdata_req)) { 2560 goto done; 2561 } 2562 tudr = (struct T_unitdata_req *)mp->b_rptr; 2563 destaddr = mp->b_rptr + tudr->DEST_offset; 2564 if (destaddr < mp->b_rptr || destaddr >= mp->b_wptr || 2565 destaddr + tudr->DEST_length < mp->b_rptr || 2566 destaddr + tudr->DEST_length > mp->b_wptr) { 2567 goto done; 2568 } 2569 optaddr = mp->b_rptr + tudr->OPT_offset; 2570 if (optaddr < mp->b_rptr || optaddr >= mp->b_wptr || 2571 optaddr + tudr->OPT_length < mp->b_rptr || 2572 optaddr + tudr->OPT_length > mp->b_wptr) { 2573 goto done; 2574 } 2575 destlen = tudr->DEST_length; 2576 optlen = tudr->OPT_length; 2577 2578 mp1 = mi_tpi_uderror_ind((char *)destaddr, destlen, 2579 (char *)optaddr, optlen, err); 2580 if (mp1 != NULL) 2581 qreply(q, mp1); 2582 2583 done: 2584 freemsg(mp); 2585 } 2586 2587 /* 2588 * This routine removes a port number association from a stream. It 2589 * is called by udp_wput to handle T_UNBIND_REQ messages. 2590 */ 2591 static void 2592 udp_tpi_unbind(queue_t *q, mblk_t *mp) 2593 { 2594 conn_t *connp = Q_TO_CONN(q); 2595 int error; 2596 2597 error = udp_do_unbind(connp); 2598 if (error) { 2599 if (error < 0) 2600 udp_err_ack(q, mp, -error, 0); 2601 else 2602 udp_err_ack(q, mp, TSYSERR, error); 2603 return; 2604 } 2605 2606 mp = mi_tpi_ok_ack_alloc(mp); 2607 ASSERT(mp != NULL); 2608 ASSERT(((struct T_ok_ack *)mp->b_rptr)->PRIM_type == T_OK_ACK); 2609 qreply(q, mp); 2610 } 2611 2612 /* 2613 * Don't let port fall into the privileged range. 2614 * Since the extra privileged ports can be arbitrary we also 2615 * ensure that we exclude those from consideration. 2616 * us->us_epriv_ports is not sorted thus we loop over it until 2617 * there are no changes. 2618 */ 2619 static in_port_t 2620 udp_update_next_port(udp_t *udp, in_port_t port, boolean_t random) 2621 { 2622 int i, bump; 2623 in_port_t nextport; 2624 boolean_t restart = B_FALSE; 2625 udp_stack_t *us = udp->udp_us; 2626 2627 if (random && udp_random_anon_port != 0) { 2628 (void) random_get_pseudo_bytes((uint8_t *)&port, 2629 sizeof (in_port_t)); 2630 /* 2631 * Unless changed by a sys admin, the smallest anon port 2632 * is 32768 and the largest anon port is 65535. It is 2633 * very likely (50%) for the random port to be smaller 2634 * than the smallest anon port. When that happens, 2635 * add port % (anon port range) to the smallest anon 2636 * port to get the random port. It should fall into the 2637 * valid anon port range. 2638 */ 2639 if ((port < us->us_smallest_anon_port) || 2640 (port > us->us_largest_anon_port)) { 2641 if (us->us_smallest_anon_port == 2642 us->us_largest_anon_port) { 2643 bump = 0; 2644 } else { 2645 bump = port % (us->us_largest_anon_port - 2646 us->us_smallest_anon_port); 2647 } 2648 2649 port = us->us_smallest_anon_port + bump; 2650 } 2651 } 2652 2653 retry: 2654 if (port < us->us_smallest_anon_port) 2655 port = us->us_smallest_anon_port; 2656 2657 if (port > us->us_largest_anon_port) { 2658 port = us->us_smallest_anon_port; 2659 if (restart) 2660 return (0); 2661 restart = B_TRUE; 2662 } 2663 2664 if (port < us->us_smallest_nonpriv_port) 2665 port = us->us_smallest_nonpriv_port; 2666 2667 for (i = 0; i < us->us_num_epriv_ports; i++) { 2668 if (port == us->us_epriv_ports[i]) { 2669 port++; 2670 /* 2671 * Make sure that the port is in the 2672 * valid range. 2673 */ 2674 goto retry; 2675 } 2676 } 2677 2678 if (is_system_labeled() && 2679 (nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred), 2680 port, IPPROTO_UDP, B_TRUE)) != 0) { 2681 port = nextport; 2682 goto retry; 2683 } 2684 2685 return (port); 2686 } 2687 2688 /* 2689 * Handle T_UNITDATA_REQ with options. Both IPv4 and IPv6 2690 * Either tudr_mp or msg is set. If tudr_mp we take ancillary data from 2691 * the TPI options, otherwise we take them from msg_control. 2692 * If both sin and sin6 is set it is a connected socket and we use conn_faddr. 2693 * Always consumes mp; never consumes tudr_mp. 2694 */ 2695 static int 2696 udp_output_ancillary(conn_t *connp, sin_t *sin, sin6_t *sin6, mblk_t *mp, 2697 mblk_t *tudr_mp, struct nmsghdr *msg, cred_t *cr, pid_t pid) 2698 { 2699 udp_t *udp = connp->conn_udp; 2700 udp_stack_t *us = udp->udp_us; 2701 int error; 2702 ip_xmit_attr_t *ixa; 2703 ip_pkt_t *ipp; 2704 in6_addr_t v6src; 2705 in6_addr_t v6dst; 2706 in6_addr_t v6nexthop; 2707 in_port_t dstport; 2708 uint32_t flowinfo; 2709 uint_t srcid; 2710 int is_absreq_failure = 0; 2711 conn_opt_arg_t coas, *coa; 2712 2713 ASSERT(tudr_mp != NULL || msg != NULL); 2714 2715 /* 2716 * Get ixa before checking state to handle a disconnect race. 2717 * 2718 * We need an exclusive copy of conn_ixa since the ancillary data 2719 * options might modify it. That copy has no pointers hence we 2720 * need to set them up once we've parsed the ancillary data. 2721 */ 2722 ixa = conn_get_ixa_exclusive(connp); 2723 if (ixa == NULL) { 2724 UDPS_BUMP_MIB(us, udpOutErrors); 2725 freemsg(mp); 2726 return (ENOMEM); 2727 } 2728 ASSERT(cr != NULL); 2729 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 2730 ixa->ixa_cred = cr; 2731 ixa->ixa_cpid = pid; 2732 if (is_system_labeled()) { 2733 /* We need to restart with a label based on the cred */ 2734 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); 2735 } 2736 2737 /* In case previous destination was multicast or multirt */ 2738 ip_attr_newdst(ixa); 2739 2740 /* Get a copy of conn_xmit_ipp since the options might change it */ 2741 ipp = kmem_zalloc(sizeof (*ipp), KM_NOSLEEP); 2742 if (ipp == NULL) { 2743 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 2744 ixa->ixa_cred = connp->conn_cred; /* Restore */ 2745 ixa->ixa_cpid = connp->conn_cpid; 2746 ixa_refrele(ixa); 2747 UDPS_BUMP_MIB(us, udpOutErrors); 2748 freemsg(mp); 2749 return (ENOMEM); 2750 } 2751 mutex_enter(&connp->conn_lock); 2752 error = ip_pkt_copy(&connp->conn_xmit_ipp, ipp, KM_NOSLEEP); 2753 mutex_exit(&connp->conn_lock); 2754 if (error != 0) { 2755 UDPS_BUMP_MIB(us, udpOutErrors); 2756 freemsg(mp); 2757 goto done; 2758 } 2759 2760 /* 2761 * Parse the options and update ixa and ipp as a result. 2762 * Note that ixa_tsl can be updated if SCM_UCRED. 2763 * ixa_refrele/ixa_inactivate will release any reference on ixa_tsl. 2764 */ 2765 2766 coa = &coas; 2767 coa->coa_connp = connp; 2768 coa->coa_ixa = ixa; 2769 coa->coa_ipp = ipp; 2770 coa->coa_ancillary = B_TRUE; 2771 coa->coa_changed = 0; 2772 2773 if (msg != NULL) { 2774 error = process_auxiliary_options(connp, msg->msg_control, 2775 msg->msg_controllen, coa, &udp_opt_obj, udp_opt_set, cr); 2776 } else { 2777 struct T_unitdata_req *tudr; 2778 2779 tudr = (struct T_unitdata_req *)tudr_mp->b_rptr; 2780 ASSERT(tudr->PRIM_type == T_UNITDATA_REQ); 2781 error = tpi_optcom_buf(connp->conn_wq, tudr_mp, 2782 &tudr->OPT_length, tudr->OPT_offset, cr, &udp_opt_obj, 2783 coa, &is_absreq_failure); 2784 } 2785 if (error != 0) { 2786 /* 2787 * Note: No special action needed in this 2788 * module for "is_absreq_failure" 2789 */ 2790 freemsg(mp); 2791 UDPS_BUMP_MIB(us, udpOutErrors); 2792 goto done; 2793 } 2794 ASSERT(is_absreq_failure == 0); 2795 2796 mutex_enter(&connp->conn_lock); 2797 /* 2798 * If laddr is unspecified then we look at sin6_src_id. 2799 * We will give precedence to a source address set with IPV6_PKTINFO 2800 * (aka IPPF_ADDR) but that is handled in build_hdrs. However, we don't 2801 * want ip_attr_connect to select a source (since it can fail) when 2802 * IPV6_PKTINFO is specified. 2803 * If this doesn't result in a source address then we get a source 2804 * from ip_attr_connect() below. 2805 */ 2806 v6src = connp->conn_saddr_v6; 2807 if (sin != NULL) { 2808 IN6_IPADDR_TO_V4MAPPED(sin->sin_addr.s_addr, &v6dst); 2809 dstport = sin->sin_port; 2810 flowinfo = 0; 2811 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 2812 ixa->ixa_flags |= IXAF_IS_IPV4; 2813 } else if (sin6 != NULL) { 2814 boolean_t v4mapped; 2815 2816 v6dst = sin6->sin6_addr; 2817 dstport = sin6->sin6_port; 2818 flowinfo = sin6->sin6_flowinfo; 2819 srcid = sin6->__sin6_src_id; 2820 if (IN6_IS_ADDR_LINKSCOPE(&v6dst) && sin6->sin6_scope_id != 0) { 2821 ixa->ixa_scopeid = sin6->sin6_scope_id; 2822 ixa->ixa_flags |= IXAF_SCOPEID_SET; 2823 } else { 2824 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 2825 } 2826 v4mapped = IN6_IS_ADDR_V4MAPPED(&v6dst); 2827 if (v4mapped) 2828 ixa->ixa_flags |= IXAF_IS_IPV4; 2829 else 2830 ixa->ixa_flags &= ~IXAF_IS_IPV4; 2831 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 2832 if (!ip_srcid_find_id(srcid, &v6src, IPCL_ZONEID(connp), 2833 v4mapped, connp->conn_netstack)) { 2834 /* Mismatch - v4mapped/v6 specified by srcid. */ 2835 mutex_exit(&connp->conn_lock); 2836 error = EADDRNOTAVAIL; 2837 goto failed; /* Does freemsg() and mib. */ 2838 } 2839 } 2840 } else { 2841 /* Connected case */ 2842 v6dst = connp->conn_faddr_v6; 2843 dstport = connp->conn_fport; 2844 flowinfo = connp->conn_flowinfo; 2845 } 2846 mutex_exit(&connp->conn_lock); 2847 2848 /* Handle IP_PKTINFO/IPV6_PKTINFO setting source address. */ 2849 if (ipp->ipp_fields & IPPF_ADDR) { 2850 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2851 if (IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 2852 v6src = ipp->ipp_addr; 2853 } else { 2854 if (!IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 2855 v6src = ipp->ipp_addr; 2856 } 2857 } 2858 2859 ip_attr_nexthop(ipp, ixa, &v6dst, &v6nexthop); 2860 error = ip_attr_connect(connp, ixa, &v6src, &v6dst, &v6nexthop, dstport, 2861 &v6src, NULL, IPDF_ALLOW_MCBC | IPDF_VERIFY_DST | IPDF_IPSEC); 2862 2863 switch (error) { 2864 case 0: 2865 break; 2866 case EADDRNOTAVAIL: 2867 /* 2868 * IXAF_VERIFY_SOURCE tells us to pick a better source. 2869 * Don't have the application see that errno 2870 */ 2871 error = ENETUNREACH; 2872 goto failed; 2873 case ENETDOWN: 2874 /* 2875 * Have !ipif_addr_ready address; drop packet silently 2876 * until we can get applications to not send until we 2877 * are ready. 2878 */ 2879 error = 0; 2880 goto failed; 2881 case EHOSTUNREACH: 2882 case ENETUNREACH: 2883 if (ixa->ixa_ire != NULL) { 2884 /* 2885 * Let conn_ip_output/ire_send_noroute return 2886 * the error and send any local ICMP error. 2887 */ 2888 error = 0; 2889 break; 2890 } 2891 /* FALLTHRU */ 2892 default: 2893 failed: 2894 freemsg(mp); 2895 UDPS_BUMP_MIB(us, udpOutErrors); 2896 goto done; 2897 } 2898 2899 /* 2900 * We might be going to a different destination than last time, 2901 * thus check that TX allows the communication and compute any 2902 * needed label. 2903 * 2904 * TSOL Note: We have an exclusive ipp and ixa for this thread so we 2905 * don't have to worry about concurrent threads. 2906 */ 2907 if (is_system_labeled()) { 2908 /* Using UDP MLP requires SCM_UCRED from user */ 2909 if (connp->conn_mlp_type != mlptSingle && 2910 !((ixa->ixa_flags & IXAF_UCRED_TSL))) { 2911 UDPS_BUMP_MIB(us, udpOutErrors); 2912 error = ECONNREFUSED; 2913 freemsg(mp); 2914 goto done; 2915 } 2916 /* 2917 * Check whether Trusted Solaris policy allows communication 2918 * with this host, and pretend that the destination is 2919 * unreachable if not. 2920 * Compute any needed label and place it in ipp_label_v4/v6. 2921 * 2922 * Later conn_build_hdr_template/conn_prepend_hdr takes 2923 * ipp_label_v4/v6 to form the packet. 2924 * 2925 * Tsol note: We have ipp structure local to this thread so 2926 * no locking is needed. 2927 */ 2928 error = conn_update_label(connp, ixa, &v6dst, ipp); 2929 if (error != 0) { 2930 freemsg(mp); 2931 UDPS_BUMP_MIB(us, udpOutErrors); 2932 goto done; 2933 } 2934 } 2935 mp = udp_prepend_hdr(connp, ixa, ipp, &v6src, &v6dst, dstport, 2936 flowinfo, mp, &error); 2937 if (mp == NULL) { 2938 ASSERT(error != 0); 2939 UDPS_BUMP_MIB(us, udpOutErrors); 2940 goto done; 2941 } 2942 if (ixa->ixa_pktlen > IP_MAXPACKET) { 2943 error = EMSGSIZE; 2944 UDPS_BUMP_MIB(us, udpOutErrors); 2945 freemsg(mp); 2946 goto done; 2947 } 2948 /* We're done. Pass the packet to ip. */ 2949 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 2950 2951 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 2952 void_ip_t *, mp->b_rptr, udp_t *, udp, udpha_t *, 2953 &mp->b_rptr[ixa->ixa_ip_hdr_length]); 2954 2955 error = conn_ip_output(mp, ixa); 2956 /* No udpOutErrors if an error since IP increases its error counter */ 2957 switch (error) { 2958 case 0: 2959 break; 2960 case EWOULDBLOCK: 2961 (void) ixa_check_drain_insert(connp, ixa); 2962 error = 0; 2963 break; 2964 case EADDRNOTAVAIL: 2965 /* 2966 * IXAF_VERIFY_SOURCE tells us to pick a better source. 2967 * Don't have the application see that errno 2968 */ 2969 error = ENETUNREACH; 2970 /* FALLTHRU */ 2971 default: 2972 mutex_enter(&connp->conn_lock); 2973 /* 2974 * Clear the source and v6lastdst so we call ip_attr_connect 2975 * for the next packet and try to pick a better source. 2976 */ 2977 if (connp->conn_mcbc_bind) 2978 connp->conn_saddr_v6 = ipv6_all_zeros; 2979 else 2980 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 2981 connp->conn_v6lastdst = ipv6_all_zeros; 2982 mutex_exit(&connp->conn_lock); 2983 break; 2984 } 2985 done: 2986 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 2987 ixa->ixa_cred = connp->conn_cred; /* Restore */ 2988 ixa->ixa_cpid = connp->conn_cpid; 2989 ixa_refrele(ixa); 2990 ip_pkt_free(ipp); 2991 kmem_free(ipp, sizeof (*ipp)); 2992 return (error); 2993 } 2994 2995 /* 2996 * Handle sending an M_DATA for a connected socket. 2997 * Handles both IPv4 and IPv6. 2998 */ 2999 static int 3000 udp_output_connected(conn_t *connp, mblk_t *mp, cred_t *cr, pid_t pid) 3001 { 3002 udp_t *udp = connp->conn_udp; 3003 udp_stack_t *us = udp->udp_us; 3004 int error; 3005 ip_xmit_attr_t *ixa; 3006 3007 /* 3008 * If no other thread is using conn_ixa this just gets a reference to 3009 * conn_ixa. Otherwise we get a safe copy of conn_ixa. 3010 */ 3011 ixa = conn_get_ixa(connp, B_FALSE); 3012 if (ixa == NULL) { 3013 UDPS_BUMP_MIB(us, udpOutErrors); 3014 freemsg(mp); 3015 return (ENOMEM); 3016 } 3017 3018 ASSERT(cr != NULL); 3019 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3020 ixa->ixa_cred = cr; 3021 ixa->ixa_cpid = pid; 3022 3023 mutex_enter(&connp->conn_lock); 3024 mp = udp_prepend_header_template(connp, ixa, mp, &connp->conn_saddr_v6, 3025 connp->conn_fport, connp->conn_flowinfo, &error); 3026 3027 if (mp == NULL) { 3028 ASSERT(error != 0); 3029 mutex_exit(&connp->conn_lock); 3030 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3031 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3032 ixa->ixa_cpid = connp->conn_cpid; 3033 ixa_refrele(ixa); 3034 UDPS_BUMP_MIB(us, udpOutErrors); 3035 freemsg(mp); 3036 return (error); 3037 } 3038 3039 /* 3040 * In case we got a safe copy of conn_ixa, or if opt_set made us a new 3041 * safe copy, then we need to fill in any pointers in it. 3042 */ 3043 if (ixa->ixa_ire == NULL) { 3044 in6_addr_t faddr, saddr; 3045 in6_addr_t nexthop; 3046 in_port_t fport; 3047 3048 saddr = connp->conn_saddr_v6; 3049 faddr = connp->conn_faddr_v6; 3050 fport = connp->conn_fport; 3051 ip_attr_nexthop(&connp->conn_xmit_ipp, ixa, &faddr, &nexthop); 3052 mutex_exit(&connp->conn_lock); 3053 3054 error = ip_attr_connect(connp, ixa, &saddr, &faddr, &nexthop, 3055 fport, NULL, NULL, IPDF_ALLOW_MCBC | IPDF_VERIFY_DST | 3056 IPDF_IPSEC); 3057 switch (error) { 3058 case 0: 3059 break; 3060 case EADDRNOTAVAIL: 3061 /* 3062 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3063 * Don't have the application see that errno 3064 */ 3065 error = ENETUNREACH; 3066 goto failed; 3067 case ENETDOWN: 3068 /* 3069 * Have !ipif_addr_ready address; drop packet silently 3070 * until we can get applications to not send until we 3071 * are ready. 3072 */ 3073 error = 0; 3074 goto failed; 3075 case EHOSTUNREACH: 3076 case ENETUNREACH: 3077 if (ixa->ixa_ire != NULL) { 3078 /* 3079 * Let conn_ip_output/ire_send_noroute return 3080 * the error and send any local ICMP error. 3081 */ 3082 error = 0; 3083 break; 3084 } 3085 /* FALLTHRU */ 3086 default: 3087 failed: 3088 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3089 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3090 ixa->ixa_cpid = connp->conn_cpid; 3091 ixa_refrele(ixa); 3092 freemsg(mp); 3093 UDPS_BUMP_MIB(us, udpOutErrors); 3094 return (error); 3095 } 3096 } else { 3097 /* Done with conn_t */ 3098 mutex_exit(&connp->conn_lock); 3099 } 3100 ASSERT(ixa->ixa_ire != NULL); 3101 3102 /* We're done. Pass the packet to ip. */ 3103 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 3104 3105 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 3106 void_ip_t *, mp->b_rptr, udp_t *, udp, udpha_t *, 3107 &mp->b_rptr[ixa->ixa_ip_hdr_length]); 3108 3109 error = conn_ip_output(mp, ixa); 3110 /* No udpOutErrors if an error since IP increases its error counter */ 3111 switch (error) { 3112 case 0: 3113 break; 3114 case EWOULDBLOCK: 3115 (void) ixa_check_drain_insert(connp, ixa); 3116 error = 0; 3117 break; 3118 case EADDRNOTAVAIL: 3119 /* 3120 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3121 * Don't have the application see that errno 3122 */ 3123 error = ENETUNREACH; 3124 break; 3125 } 3126 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3127 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3128 ixa->ixa_cpid = connp->conn_cpid; 3129 ixa_refrele(ixa); 3130 return (error); 3131 } 3132 3133 /* 3134 * Handle sending an M_DATA to the last destination. 3135 * Handles both IPv4 and IPv6. 3136 * 3137 * NOTE: The caller must hold conn_lock and we drop it here. 3138 */ 3139 static int 3140 udp_output_lastdst(conn_t *connp, mblk_t *mp, cred_t *cr, pid_t pid, 3141 ip_xmit_attr_t *ixa) 3142 { 3143 udp_t *udp = connp->conn_udp; 3144 udp_stack_t *us = udp->udp_us; 3145 int error; 3146 3147 ASSERT(MUTEX_HELD(&connp->conn_lock)); 3148 ASSERT(ixa != NULL); 3149 3150 ASSERT(cr != NULL); 3151 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3152 ixa->ixa_cred = cr; 3153 ixa->ixa_cpid = pid; 3154 3155 mp = udp_prepend_header_template(connp, ixa, mp, &connp->conn_v6lastsrc, 3156 connp->conn_lastdstport, connp->conn_lastflowinfo, &error); 3157 3158 if (mp == NULL) { 3159 ASSERT(error != 0); 3160 mutex_exit(&connp->conn_lock); 3161 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3162 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3163 ixa->ixa_cpid = connp->conn_cpid; 3164 ixa_refrele(ixa); 3165 UDPS_BUMP_MIB(us, udpOutErrors); 3166 freemsg(mp); 3167 return (error); 3168 } 3169 3170 /* 3171 * In case we got a safe copy of conn_ixa, or if opt_set made us a new 3172 * safe copy, then we need to fill in any pointers in it. 3173 */ 3174 if (ixa->ixa_ire == NULL) { 3175 in6_addr_t lastdst, lastsrc; 3176 in6_addr_t nexthop; 3177 in_port_t lastport; 3178 3179 lastsrc = connp->conn_v6lastsrc; 3180 lastdst = connp->conn_v6lastdst; 3181 lastport = connp->conn_lastdstport; 3182 ip_attr_nexthop(&connp->conn_xmit_ipp, ixa, &lastdst, &nexthop); 3183 mutex_exit(&connp->conn_lock); 3184 3185 error = ip_attr_connect(connp, ixa, &lastsrc, &lastdst, 3186 &nexthop, lastport, NULL, NULL, IPDF_ALLOW_MCBC | 3187 IPDF_VERIFY_DST | IPDF_IPSEC); 3188 switch (error) { 3189 case 0: 3190 break; 3191 case EADDRNOTAVAIL: 3192 /* 3193 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3194 * Don't have the application see that errno 3195 */ 3196 error = ENETUNREACH; 3197 goto failed; 3198 case ENETDOWN: 3199 /* 3200 * Have !ipif_addr_ready address; drop packet silently 3201 * until we can get applications to not send until we 3202 * are ready. 3203 */ 3204 error = 0; 3205 goto failed; 3206 case EHOSTUNREACH: 3207 case ENETUNREACH: 3208 if (ixa->ixa_ire != NULL) { 3209 /* 3210 * Let conn_ip_output/ire_send_noroute return 3211 * the error and send any local ICMP error. 3212 */ 3213 error = 0; 3214 break; 3215 } 3216 /* FALLTHRU */ 3217 default: 3218 failed: 3219 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3220 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3221 ixa->ixa_cpid = connp->conn_cpid; 3222 ixa_refrele(ixa); 3223 freemsg(mp); 3224 UDPS_BUMP_MIB(us, udpOutErrors); 3225 return (error); 3226 } 3227 } else { 3228 /* Done with conn_t */ 3229 mutex_exit(&connp->conn_lock); 3230 } 3231 3232 /* We're done. Pass the packet to ip. */ 3233 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 3234 3235 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 3236 void_ip_t *, mp->b_rptr, udp_t *, udp, udpha_t *, 3237 &mp->b_rptr[ixa->ixa_ip_hdr_length]); 3238 3239 error = conn_ip_output(mp, ixa); 3240 /* No udpOutErrors if an error since IP increases its error counter */ 3241 switch (error) { 3242 case 0: 3243 break; 3244 case EWOULDBLOCK: 3245 (void) ixa_check_drain_insert(connp, ixa); 3246 error = 0; 3247 break; 3248 case EADDRNOTAVAIL: 3249 /* 3250 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3251 * Don't have the application see that errno 3252 */ 3253 error = ENETUNREACH; 3254 /* FALLTHRU */ 3255 default: 3256 mutex_enter(&connp->conn_lock); 3257 /* 3258 * Clear the source and v6lastdst so we call ip_attr_connect 3259 * for the next packet and try to pick a better source. 3260 */ 3261 if (connp->conn_mcbc_bind) 3262 connp->conn_saddr_v6 = ipv6_all_zeros; 3263 else 3264 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 3265 connp->conn_v6lastdst = ipv6_all_zeros; 3266 mutex_exit(&connp->conn_lock); 3267 break; 3268 } 3269 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3270 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3271 ixa->ixa_cpid = connp->conn_cpid; 3272 ixa_refrele(ixa); 3273 return (error); 3274 } 3275 3276 3277 /* 3278 * Prepend the header template and then fill in the source and 3279 * flowinfo. The caller needs to handle the destination address since 3280 * it's setting is different if rthdr or source route. 3281 * 3282 * Returns NULL is allocation failed or if the packet would exceed IP_MAXPACKET. 3283 * When it returns NULL it sets errorp. 3284 */ 3285 static mblk_t * 3286 udp_prepend_header_template(conn_t *connp, ip_xmit_attr_t *ixa, mblk_t *mp, 3287 const in6_addr_t *v6src, in_port_t dstport, uint32_t flowinfo, int *errorp) 3288 { 3289 udp_t *udp = connp->conn_udp; 3290 udp_stack_t *us = udp->udp_us; 3291 boolean_t insert_spi = udp->udp_nat_t_endpoint; 3292 uint_t pktlen; 3293 uint_t alloclen; 3294 uint_t copylen; 3295 uint8_t *iph; 3296 uint_t ip_hdr_length; 3297 udpha_t *udpha; 3298 uint32_t cksum; 3299 ip_pkt_t *ipp; 3300 3301 ASSERT(MUTEX_HELD(&connp->conn_lock)); 3302 3303 /* 3304 * Copy the header template and leave space for an SPI 3305 */ 3306 copylen = connp->conn_ht_iphc_len; 3307 alloclen = copylen + (insert_spi ? sizeof (uint32_t) : 0); 3308 pktlen = alloclen + msgdsize(mp); 3309 if (pktlen > IP_MAXPACKET) { 3310 freemsg(mp); 3311 *errorp = EMSGSIZE; 3312 return (NULL); 3313 } 3314 ixa->ixa_pktlen = pktlen; 3315 3316 /* check/fix buffer config, setup pointers into it */ 3317 iph = mp->b_rptr - alloclen; 3318 if (DB_REF(mp) != 1 || iph < DB_BASE(mp) || !OK_32PTR(iph)) { 3319 mblk_t *mp1; 3320 3321 mp1 = allocb(alloclen + us->us_wroff_extra, BPRI_MED); 3322 if (mp1 == NULL) { 3323 freemsg(mp); 3324 *errorp = ENOMEM; 3325 return (NULL); 3326 } 3327 mp1->b_wptr = DB_LIM(mp1); 3328 mp1->b_cont = mp; 3329 mp = mp1; 3330 iph = (mp->b_wptr - alloclen); 3331 } 3332 mp->b_rptr = iph; 3333 bcopy(connp->conn_ht_iphc, iph, copylen); 3334 ip_hdr_length = (uint_t)(connp->conn_ht_ulp - connp->conn_ht_iphc); 3335 3336 ixa->ixa_ip_hdr_length = ip_hdr_length; 3337 udpha = (udpha_t *)(iph + ip_hdr_length); 3338 3339 /* 3340 * Setup header length and prepare for ULP checksum done in IP. 3341 * udp_build_hdr_template has already massaged any routing header 3342 * and placed the result in conn_sum. 3343 * 3344 * We make it easy for IP to include our pseudo header 3345 * by putting our length in uha_checksum. 3346 */ 3347 cksum = pktlen - ip_hdr_length; 3348 udpha->uha_length = htons(cksum); 3349 3350 cksum += connp->conn_sum; 3351 cksum = (cksum >> 16) + (cksum & 0xFFFF); 3352 ASSERT(cksum < 0x10000); 3353 3354 ipp = &connp->conn_xmit_ipp; 3355 if (ixa->ixa_flags & IXAF_IS_IPV4) { 3356 ipha_t *ipha = (ipha_t *)iph; 3357 3358 ipha->ipha_length = htons((uint16_t)pktlen); 3359 3360 /* IP does the checksum if uha_checksum is non-zero */ 3361 if (us->us_do_checksum) 3362 udpha->uha_checksum = htons(cksum); 3363 3364 /* if IP_PKTINFO specified an addres it wins over bind() */ 3365 if ((ipp->ipp_fields & IPPF_ADDR) && 3366 IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) { 3367 ASSERT(ipp->ipp_addr_v4 != INADDR_ANY); 3368 ipha->ipha_src = ipp->ipp_addr_v4; 3369 } else { 3370 IN6_V4MAPPED_TO_IPADDR(v6src, ipha->ipha_src); 3371 } 3372 } else { 3373 ip6_t *ip6h = (ip6_t *)iph; 3374 3375 ip6h->ip6_plen = htons((uint16_t)(pktlen - IPV6_HDR_LEN)); 3376 udpha->uha_checksum = htons(cksum); 3377 3378 /* if IP_PKTINFO specified an addres it wins over bind() */ 3379 if ((ipp->ipp_fields & IPPF_ADDR) && 3380 !IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) { 3381 ASSERT(!IN6_IS_ADDR_UNSPECIFIED(&ipp->ipp_addr)); 3382 ip6h->ip6_src = ipp->ipp_addr; 3383 } else { 3384 ip6h->ip6_src = *v6src; 3385 } 3386 ip6h->ip6_vcf = 3387 (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) | 3388 (flowinfo & ~IPV6_VERS_AND_FLOW_MASK); 3389 if (ipp->ipp_fields & IPPF_TCLASS) { 3390 /* Overrides the class part of flowinfo */ 3391 ip6h->ip6_vcf = IPV6_TCLASS_FLOW(ip6h->ip6_vcf, 3392 ipp->ipp_tclass); 3393 } 3394 } 3395 3396 /* Insert all-0s SPI now. */ 3397 if (insert_spi) 3398 *((uint32_t *)(udpha + 1)) = 0; 3399 3400 udpha->uha_dst_port = dstport; 3401 return (mp); 3402 } 3403 3404 /* 3405 * Send a T_UDERR_IND in response to an M_DATA 3406 */ 3407 static void 3408 udp_ud_err_connected(conn_t *connp, t_scalar_t error) 3409 { 3410 struct sockaddr_storage ss; 3411 sin_t *sin; 3412 sin6_t *sin6; 3413 struct sockaddr *addr; 3414 socklen_t addrlen; 3415 mblk_t *mp1; 3416 3417 mutex_enter(&connp->conn_lock); 3418 /* Initialize addr and addrlen as if they're passed in */ 3419 if (connp->conn_family == AF_INET) { 3420 sin = (sin_t *)&ss; 3421 *sin = sin_null; 3422 sin->sin_family = AF_INET; 3423 sin->sin_port = connp->conn_fport; 3424 sin->sin_addr.s_addr = connp->conn_faddr_v4; 3425 addr = (struct sockaddr *)sin; 3426 addrlen = sizeof (*sin); 3427 } else { 3428 sin6 = (sin6_t *)&ss; 3429 *sin6 = sin6_null; 3430 sin6->sin6_family = AF_INET6; 3431 sin6->sin6_port = connp->conn_fport; 3432 sin6->sin6_flowinfo = connp->conn_flowinfo; 3433 sin6->sin6_addr = connp->conn_faddr_v6; 3434 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_faddr_v6) && 3435 (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)) { 3436 sin6->sin6_scope_id = connp->conn_ixa->ixa_scopeid; 3437 } else { 3438 sin6->sin6_scope_id = 0; 3439 } 3440 sin6->__sin6_src_id = 0; 3441 addr = (struct sockaddr *)sin6; 3442 addrlen = sizeof (*sin6); 3443 } 3444 mutex_exit(&connp->conn_lock); 3445 3446 mp1 = mi_tpi_uderror_ind((char *)addr, addrlen, NULL, 0, error); 3447 if (mp1 != NULL) 3448 putnext(connp->conn_rq, mp1); 3449 } 3450 3451 /* 3452 * This routine handles all messages passed downstream. It either 3453 * consumes the message or passes it downstream; it never queues a 3454 * a message. 3455 * 3456 * Also entry point for sockfs when udp is in "direct sockfs" mode. This mode 3457 * is valid when we are directly beneath the stream head, and thus sockfs 3458 * is able to bypass STREAMS and directly call us, passing along the sockaddr 3459 * structure without the cumbersome T_UNITDATA_REQ interface for the case of 3460 * connected endpoints. 3461 */ 3462 void 3463 udp_wput(queue_t *q, mblk_t *mp) 3464 { 3465 sin6_t *sin6; 3466 sin_t *sin = NULL; 3467 uint_t srcid; 3468 conn_t *connp = Q_TO_CONN(q); 3469 udp_t *udp = connp->conn_udp; 3470 int error = 0; 3471 struct sockaddr *addr = NULL; 3472 socklen_t addrlen; 3473 udp_stack_t *us = udp->udp_us; 3474 struct T_unitdata_req *tudr; 3475 mblk_t *data_mp; 3476 ushort_t ipversion; 3477 cred_t *cr; 3478 pid_t pid; 3479 3480 /* 3481 * We directly handle several cases here: T_UNITDATA_REQ message 3482 * coming down as M_PROTO/M_PCPROTO and M_DATA messages for connected 3483 * socket. 3484 */ 3485 switch (DB_TYPE(mp)) { 3486 case M_DATA: 3487 if (!udp->udp_issocket || udp->udp_state != TS_DATA_XFER) { 3488 /* Not connected; address is required */ 3489 UDPS_BUMP_MIB(us, udpOutErrors); 3490 UDP_DBGSTAT(us, udp_data_notconn); 3491 UDP_STAT(us, udp_out_err_notconn); 3492 freemsg(mp); 3493 return; 3494 } 3495 /* 3496 * All Solaris components should pass a db_credp 3497 * for this message, hence we ASSERT. 3498 * On production kernels we return an error to be robust against 3499 * random streams modules sitting on top of us. 3500 */ 3501 cr = msg_getcred(mp, &pid); 3502 ASSERT(cr != NULL); 3503 if (cr == NULL) { 3504 UDPS_BUMP_MIB(us, udpOutErrors); 3505 freemsg(mp); 3506 return; 3507 } 3508 ASSERT(udp->udp_issocket); 3509 UDP_DBGSTAT(us, udp_data_conn); 3510 error = udp_output_connected(connp, mp, cr, pid); 3511 if (error != 0) { 3512 UDP_STAT(us, udp_out_err_output); 3513 if (connp->conn_rq != NULL) 3514 udp_ud_err_connected(connp, (t_scalar_t)error); 3515 #ifdef DEBUG 3516 printf("udp_output_connected returned %d\n", error); 3517 #endif 3518 } 3519 return; 3520 3521 case M_PROTO: 3522 case M_PCPROTO: 3523 tudr = (struct T_unitdata_req *)mp->b_rptr; 3524 if (MBLKL(mp) < sizeof (*tudr) || 3525 ((t_primp_t)mp->b_rptr)->type != T_UNITDATA_REQ) { 3526 udp_wput_other(q, mp); 3527 return; 3528 } 3529 break; 3530 3531 default: 3532 udp_wput_other(q, mp); 3533 return; 3534 } 3535 3536 /* Handle valid T_UNITDATA_REQ here */ 3537 data_mp = mp->b_cont; 3538 if (data_mp == NULL) { 3539 error = EPROTO; 3540 goto ud_error2; 3541 } 3542 mp->b_cont = NULL; 3543 3544 if (!MBLKIN(mp, 0, tudr->DEST_offset + tudr->DEST_length)) { 3545 error = EADDRNOTAVAIL; 3546 goto ud_error2; 3547 } 3548 3549 /* 3550 * All Solaris components should pass a db_credp 3551 * for this TPI message, hence we should ASSERT. 3552 * However, RPC (svc_clts_ksend) does this odd thing where it 3553 * passes the options from a T_UNITDATA_IND unchanged in a 3554 * T_UNITDATA_REQ. While that is the right thing to do for 3555 * some options, SCM_UCRED being the key one, this also makes it 3556 * pass down IP_RECVDSTADDR. Hence we can't ASSERT here. 3557 */ 3558 cr = msg_getcred(mp, &pid); 3559 if (cr == NULL) { 3560 cr = connp->conn_cred; 3561 pid = connp->conn_cpid; 3562 } 3563 3564 /* 3565 * If a port has not been bound to the stream, fail. 3566 * This is not a problem when sockfs is directly 3567 * above us, because it will ensure that the socket 3568 * is first bound before allowing data to be sent. 3569 */ 3570 if (udp->udp_state == TS_UNBND) { 3571 error = EPROTO; 3572 goto ud_error2; 3573 } 3574 addr = (struct sockaddr *)&mp->b_rptr[tudr->DEST_offset]; 3575 addrlen = tudr->DEST_length; 3576 3577 switch (connp->conn_family) { 3578 case AF_INET6: 3579 sin6 = (sin6_t *)addr; 3580 if (!OK_32PTR((char *)sin6) || (addrlen != sizeof (sin6_t)) || 3581 (sin6->sin6_family != AF_INET6)) { 3582 error = EADDRNOTAVAIL; 3583 goto ud_error2; 3584 } 3585 3586 srcid = sin6->__sin6_src_id; 3587 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 3588 /* 3589 * Destination is a non-IPv4-compatible IPv6 address. 3590 * Send out an IPv6 format packet. 3591 */ 3592 3593 /* 3594 * If the local address is a mapped address return 3595 * an error. 3596 * It would be possible to send an IPv6 packet but the 3597 * response would never make it back to the application 3598 * since it is bound to a mapped address. 3599 */ 3600 if (IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6)) { 3601 error = EADDRNOTAVAIL; 3602 goto ud_error2; 3603 } 3604 3605 UDP_DBGSTAT(us, udp_out_ipv6); 3606 3607 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 3608 sin6->sin6_addr = ipv6_loopback; 3609 ipversion = IPV6_VERSION; 3610 } else { 3611 if (connp->conn_ipv6_v6only) { 3612 error = EADDRNOTAVAIL; 3613 goto ud_error2; 3614 } 3615 3616 /* 3617 * If the local address is not zero or a mapped address 3618 * return an error. It would be possible to send an 3619 * IPv4 packet but the response would never make it 3620 * back to the application since it is bound to a 3621 * non-mapped address. 3622 */ 3623 if (!IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6) && 3624 !IN6_IS_ADDR_UNSPECIFIED(&connp->conn_saddr_v6)) { 3625 error = EADDRNOTAVAIL; 3626 goto ud_error2; 3627 } 3628 UDP_DBGSTAT(us, udp_out_mapped); 3629 3630 if (V4_PART_OF_V6(sin6->sin6_addr) == INADDR_ANY) { 3631 V4_PART_OF_V6(sin6->sin6_addr) = 3632 htonl(INADDR_LOOPBACK); 3633 } 3634 ipversion = IPV4_VERSION; 3635 } 3636 3637 if (tudr->OPT_length != 0) { 3638 /* 3639 * If we are connected then the destination needs to be 3640 * the same as the connected one. 3641 */ 3642 if (udp->udp_state == TS_DATA_XFER && 3643 !conn_same_as_last_v6(connp, sin6)) { 3644 error = EISCONN; 3645 goto ud_error2; 3646 } 3647 UDP_STAT(us, udp_out_opt); 3648 error = udp_output_ancillary(connp, NULL, sin6, 3649 data_mp, mp, NULL, cr, pid); 3650 } else { 3651 ip_xmit_attr_t *ixa; 3652 3653 /* 3654 * We have to allocate an ip_xmit_attr_t before we grab 3655 * conn_lock and we need to hold conn_lock once we've 3656 * checked conn_same_as_last_v6 to handle concurrent 3657 * send* calls on a socket. 3658 */ 3659 ixa = conn_get_ixa(connp, B_FALSE); 3660 if (ixa == NULL) { 3661 error = ENOMEM; 3662 goto ud_error2; 3663 } 3664 mutex_enter(&connp->conn_lock); 3665 3666 if (conn_same_as_last_v6(connp, sin6) && 3667 connp->conn_lastsrcid == srcid && 3668 ipsec_outbound_policy_current(ixa)) { 3669 UDP_DBGSTAT(us, udp_out_lastdst); 3670 /* udp_output_lastdst drops conn_lock */ 3671 error = udp_output_lastdst(connp, data_mp, cr, 3672 pid, ixa); 3673 } else { 3674 UDP_DBGSTAT(us, udp_out_diffdst); 3675 /* udp_output_newdst drops conn_lock */ 3676 error = udp_output_newdst(connp, data_mp, NULL, 3677 sin6, ipversion, cr, pid, ixa); 3678 } 3679 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 3680 } 3681 if (error == 0) { 3682 freeb(mp); 3683 return; 3684 } 3685 break; 3686 3687 case AF_INET: 3688 sin = (sin_t *)addr; 3689 if ((!OK_32PTR((char *)sin) || addrlen != sizeof (sin_t)) || 3690 (sin->sin_family != AF_INET)) { 3691 error = EADDRNOTAVAIL; 3692 goto ud_error2; 3693 } 3694 UDP_DBGSTAT(us, udp_out_ipv4); 3695 if (sin->sin_addr.s_addr == INADDR_ANY) 3696 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 3697 ipversion = IPV4_VERSION; 3698 3699 srcid = 0; 3700 if (tudr->OPT_length != 0) { 3701 /* 3702 * If we are connected then the destination needs to be 3703 * the same as the connected one. 3704 */ 3705 if (udp->udp_state == TS_DATA_XFER && 3706 !conn_same_as_last_v4(connp, sin)) { 3707 error = EISCONN; 3708 goto ud_error2; 3709 } 3710 UDP_STAT(us, udp_out_opt); 3711 error = udp_output_ancillary(connp, sin, NULL, 3712 data_mp, mp, NULL, cr, pid); 3713 } else { 3714 ip_xmit_attr_t *ixa; 3715 3716 /* 3717 * We have to allocate an ip_xmit_attr_t before we grab 3718 * conn_lock and we need to hold conn_lock once we've 3719 * checked conn_same_as_last_v4 to handle concurrent 3720 * send* calls on a socket. 3721 */ 3722 ixa = conn_get_ixa(connp, B_FALSE); 3723 if (ixa == NULL) { 3724 error = ENOMEM; 3725 goto ud_error2; 3726 } 3727 mutex_enter(&connp->conn_lock); 3728 3729 if (conn_same_as_last_v4(connp, sin) && 3730 ipsec_outbound_policy_current(ixa)) { 3731 UDP_DBGSTAT(us, udp_out_lastdst); 3732 /* udp_output_lastdst drops conn_lock */ 3733 error = udp_output_lastdst(connp, data_mp, cr, 3734 pid, ixa); 3735 } else { 3736 UDP_DBGSTAT(us, udp_out_diffdst); 3737 /* udp_output_newdst drops conn_lock */ 3738 error = udp_output_newdst(connp, data_mp, sin, 3739 NULL, ipversion, cr, pid, ixa); 3740 } 3741 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 3742 } 3743 if (error == 0) { 3744 freeb(mp); 3745 return; 3746 } 3747 break; 3748 } 3749 UDP_STAT(us, udp_out_err_output); 3750 ASSERT(mp != NULL); 3751 /* mp is freed by the following routine */ 3752 udp_ud_err(q, mp, (t_scalar_t)error); 3753 return; 3754 3755 ud_error2: 3756 UDPS_BUMP_MIB(us, udpOutErrors); 3757 freemsg(data_mp); 3758 UDP_STAT(us, udp_out_err_output); 3759 ASSERT(mp != NULL); 3760 /* mp is freed by the following routine */ 3761 udp_ud_err(q, mp, (t_scalar_t)error); 3762 } 3763 3764 /* 3765 * Handle the case of the IP address, port, flow label being different 3766 * for both IPv4 and IPv6. 3767 * 3768 * NOTE: The caller must hold conn_lock and we drop it here. 3769 */ 3770 static int 3771 udp_output_newdst(conn_t *connp, mblk_t *data_mp, sin_t *sin, sin6_t *sin6, 3772 ushort_t ipversion, cred_t *cr, pid_t pid, ip_xmit_attr_t *ixa) 3773 { 3774 uint_t srcid; 3775 uint32_t flowinfo; 3776 udp_t *udp = connp->conn_udp; 3777 int error = 0; 3778 ip_xmit_attr_t *oldixa; 3779 udp_stack_t *us = udp->udp_us; 3780 in6_addr_t v6src; 3781 in6_addr_t v6dst; 3782 in6_addr_t v6nexthop; 3783 in_port_t dstport; 3784 3785 ASSERT(MUTEX_HELD(&connp->conn_lock)); 3786 ASSERT(ixa != NULL); 3787 /* 3788 * We hold conn_lock across all the use and modifications of 3789 * the conn_lastdst, conn_ixa, and conn_xmit_ipp to ensure that they 3790 * stay consistent. 3791 */ 3792 3793 ASSERT(cr != NULL); 3794 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3795 ixa->ixa_cred = cr; 3796 ixa->ixa_cpid = pid; 3797 if (is_system_labeled()) { 3798 /* We need to restart with a label based on the cred */ 3799 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); 3800 } 3801 3802 /* 3803 * If we are connected then the destination needs to be the 3804 * same as the connected one, which is not the case here since we 3805 * checked for that above. 3806 */ 3807 if (udp->udp_state == TS_DATA_XFER) { 3808 mutex_exit(&connp->conn_lock); 3809 error = EISCONN; 3810 goto ud_error; 3811 } 3812 3813 /* In case previous destination was multicast or multirt */ 3814 ip_attr_newdst(ixa); 3815 3816 /* 3817 * If laddr is unspecified then we look at sin6_src_id. 3818 * We will give precedence to a source address set with IPV6_PKTINFO 3819 * (aka IPPF_ADDR) but that is handled in build_hdrs. However, we don't 3820 * want ip_attr_connect to select a source (since it can fail) when 3821 * IPV6_PKTINFO is specified. 3822 * If this doesn't result in a source address then we get a source 3823 * from ip_attr_connect() below. 3824 */ 3825 v6src = connp->conn_saddr_v6; 3826 if (sin != NULL) { 3827 IN6_IPADDR_TO_V4MAPPED(sin->sin_addr.s_addr, &v6dst); 3828 dstport = sin->sin_port; 3829 flowinfo = 0; 3830 /* Don't bother with ip_srcid_find_id(), but indicate anyway. */ 3831 srcid = 0; 3832 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 3833 ixa->ixa_flags |= IXAF_IS_IPV4; 3834 } else { 3835 boolean_t v4mapped; 3836 3837 v6dst = sin6->sin6_addr; 3838 dstport = sin6->sin6_port; 3839 flowinfo = sin6->sin6_flowinfo; 3840 srcid = sin6->__sin6_src_id; 3841 if (IN6_IS_ADDR_LINKSCOPE(&v6dst) && sin6->sin6_scope_id != 0) { 3842 ixa->ixa_scopeid = sin6->sin6_scope_id; 3843 ixa->ixa_flags |= IXAF_SCOPEID_SET; 3844 } else { 3845 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 3846 } 3847 v4mapped = IN6_IS_ADDR_V4MAPPED(&v6dst); 3848 if (v4mapped) 3849 ixa->ixa_flags |= IXAF_IS_IPV4; 3850 else 3851 ixa->ixa_flags &= ~IXAF_IS_IPV4; 3852 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 3853 if (!ip_srcid_find_id(srcid, &v6src, IPCL_ZONEID(connp), 3854 v4mapped, connp->conn_netstack)) { 3855 /* Mismatched v4mapped/v6 specified by srcid. */ 3856 mutex_exit(&connp->conn_lock); 3857 error = EADDRNOTAVAIL; 3858 goto ud_error; 3859 } 3860 } 3861 } 3862 /* Handle IP_PKTINFO/IPV6_PKTINFO setting source address. */ 3863 if (connp->conn_xmit_ipp.ipp_fields & IPPF_ADDR) { 3864 ip_pkt_t *ipp = &connp->conn_xmit_ipp; 3865 3866 if (ixa->ixa_flags & IXAF_IS_IPV4) { 3867 if (IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 3868 v6src = ipp->ipp_addr; 3869 } else { 3870 if (!IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 3871 v6src = ipp->ipp_addr; 3872 } 3873 } 3874 3875 ip_attr_nexthop(&connp->conn_xmit_ipp, ixa, &v6dst, &v6nexthop); 3876 mutex_exit(&connp->conn_lock); 3877 3878 error = ip_attr_connect(connp, ixa, &v6src, &v6dst, &v6nexthop, dstport, 3879 &v6src, NULL, IPDF_ALLOW_MCBC | IPDF_VERIFY_DST | IPDF_IPSEC); 3880 switch (error) { 3881 case 0: 3882 break; 3883 case EADDRNOTAVAIL: 3884 /* 3885 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3886 * Don't have the application see that errno 3887 */ 3888 error = ENETUNREACH; 3889 goto failed; 3890 case ENETDOWN: 3891 /* 3892 * Have !ipif_addr_ready address; drop packet silently 3893 * until we can get applications to not send until we 3894 * are ready. 3895 */ 3896 error = 0; 3897 goto failed; 3898 case EHOSTUNREACH: 3899 case ENETUNREACH: 3900 if (ixa->ixa_ire != NULL) { 3901 /* 3902 * Let conn_ip_output/ire_send_noroute return 3903 * the error and send any local ICMP error. 3904 */ 3905 error = 0; 3906 break; 3907 } 3908 /* FALLTHRU */ 3909 failed: 3910 default: 3911 goto ud_error; 3912 } 3913 3914 3915 /* 3916 * Cluster note: we let the cluster hook know that we are sending to a 3917 * new address and/or port. 3918 */ 3919 if (cl_inet_connect2 != NULL) { 3920 CL_INET_UDP_CONNECT(connp, B_TRUE, &v6dst, dstport, error); 3921 if (error != 0) { 3922 error = EHOSTUNREACH; 3923 goto ud_error; 3924 } 3925 } 3926 3927 mutex_enter(&connp->conn_lock); 3928 /* 3929 * While we dropped the lock some other thread might have connected 3930 * this socket. If so we bail out with EISCONN to ensure that the 3931 * connecting thread is the one that updates conn_ixa, conn_ht_* 3932 * and conn_*last*. 3933 */ 3934 if (udp->udp_state == TS_DATA_XFER) { 3935 mutex_exit(&connp->conn_lock); 3936 error = EISCONN; 3937 goto ud_error; 3938 } 3939 3940 /* 3941 * We need to rebuild the headers if 3942 * - we are labeling packets (could be different for different 3943 * destinations) 3944 * - we have a source route (or routing header) since we need to 3945 * massage that to get the pseudo-header checksum 3946 * - the IP version is different than the last time 3947 * - a socket option with COA_HEADER_CHANGED has been set which 3948 * set conn_v6lastdst to zero. 3949 * 3950 * Otherwise the prepend function will just update the src, dst, 3951 * dstport, and flow label. 3952 */ 3953 if (is_system_labeled()) { 3954 /* TX MLP requires SCM_UCRED and don't have that here */ 3955 if (connp->conn_mlp_type != mlptSingle) { 3956 mutex_exit(&connp->conn_lock); 3957 error = ECONNREFUSED; 3958 goto ud_error; 3959 } 3960 /* 3961 * Check whether Trusted Solaris policy allows communication 3962 * with this host, and pretend that the destination is 3963 * unreachable if not. 3964 * Compute any needed label and place it in ipp_label_v4/v6. 3965 * 3966 * Later conn_build_hdr_template/conn_prepend_hdr takes 3967 * ipp_label_v4/v6 to form the packet. 3968 * 3969 * Tsol note: Since we hold conn_lock we know no other 3970 * thread manipulates conn_xmit_ipp. 3971 */ 3972 error = conn_update_label(connp, ixa, &v6dst, 3973 &connp->conn_xmit_ipp); 3974 if (error != 0) { 3975 mutex_exit(&connp->conn_lock); 3976 goto ud_error; 3977 } 3978 /* Rebuild the header template */ 3979 error = udp_build_hdr_template(connp, &v6src, &v6dst, dstport, 3980 flowinfo); 3981 if (error != 0) { 3982 mutex_exit(&connp->conn_lock); 3983 goto ud_error; 3984 } 3985 } else if ((connp->conn_xmit_ipp.ipp_fields & 3986 (IPPF_IPV4_OPTIONS|IPPF_RTHDR)) || 3987 ipversion != connp->conn_lastipversion || 3988 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_v6lastdst)) { 3989 /* Rebuild the header template */ 3990 error = udp_build_hdr_template(connp, &v6src, &v6dst, dstport, 3991 flowinfo); 3992 if (error != 0) { 3993 mutex_exit(&connp->conn_lock); 3994 goto ud_error; 3995 } 3996 } else { 3997 /* Simply update the destination address if no source route */ 3998 if (ixa->ixa_flags & IXAF_IS_IPV4) { 3999 ipha_t *ipha = (ipha_t *)connp->conn_ht_iphc; 4000 4001 IN6_V4MAPPED_TO_IPADDR(&v6dst, ipha->ipha_dst); 4002 if (ixa->ixa_flags & IXAF_PMTU_IPV4_DF) { 4003 ipha->ipha_fragment_offset_and_flags |= 4004 IPH_DF_HTONS; 4005 } else { 4006 ipha->ipha_fragment_offset_and_flags &= 4007 ~IPH_DF_HTONS; 4008 } 4009 } else { 4010 ip6_t *ip6h = (ip6_t *)connp->conn_ht_iphc; 4011 ip6h->ip6_dst = v6dst; 4012 } 4013 } 4014 4015 /* 4016 * Remember the dst/dstport etc which corresponds to the built header 4017 * template and conn_ixa. 4018 */ 4019 oldixa = conn_replace_ixa(connp, ixa); 4020 connp->conn_v6lastdst = v6dst; 4021 connp->conn_lastipversion = ipversion; 4022 connp->conn_lastdstport = dstport; 4023 connp->conn_lastflowinfo = flowinfo; 4024 connp->conn_lastscopeid = ixa->ixa_scopeid; 4025 connp->conn_lastsrcid = srcid; 4026 /* Also remember a source to use together with lastdst */ 4027 connp->conn_v6lastsrc = v6src; 4028 4029 data_mp = udp_prepend_header_template(connp, ixa, data_mp, &v6src, 4030 dstport, flowinfo, &error); 4031 4032 /* Done with conn_t */ 4033 mutex_exit(&connp->conn_lock); 4034 ixa_refrele(oldixa); 4035 4036 if (data_mp == NULL) { 4037 ASSERT(error != 0); 4038 goto ud_error; 4039 } 4040 4041 /* We're done. Pass the packet to ip. */ 4042 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 4043 4044 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 4045 void_ip_t *, data_mp->b_rptr, udp_t *, udp, udpha_t *, 4046 &data_mp->b_rptr[ixa->ixa_ip_hdr_length]); 4047 4048 error = conn_ip_output(data_mp, ixa); 4049 /* No udpOutErrors if an error since IP increases its error counter */ 4050 switch (error) { 4051 case 0: 4052 break; 4053 case EWOULDBLOCK: 4054 (void) ixa_check_drain_insert(connp, ixa); 4055 error = 0; 4056 break; 4057 case EADDRNOTAVAIL: 4058 /* 4059 * IXAF_VERIFY_SOURCE tells us to pick a better source. 4060 * Don't have the application see that errno 4061 */ 4062 error = ENETUNREACH; 4063 /* FALLTHRU */ 4064 default: 4065 mutex_enter(&connp->conn_lock); 4066 /* 4067 * Clear the source and v6lastdst so we call ip_attr_connect 4068 * for the next packet and try to pick a better source. 4069 */ 4070 if (connp->conn_mcbc_bind) 4071 connp->conn_saddr_v6 = ipv6_all_zeros; 4072 else 4073 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 4074 connp->conn_v6lastdst = ipv6_all_zeros; 4075 mutex_exit(&connp->conn_lock); 4076 break; 4077 } 4078 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 4079 ixa->ixa_cred = connp->conn_cred; /* Restore */ 4080 ixa->ixa_cpid = connp->conn_cpid; 4081 ixa_refrele(ixa); 4082 return (error); 4083 4084 ud_error: 4085 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 4086 ixa->ixa_cred = connp->conn_cred; /* Restore */ 4087 ixa->ixa_cpid = connp->conn_cpid; 4088 ixa_refrele(ixa); 4089 4090 freemsg(data_mp); 4091 UDPS_BUMP_MIB(us, udpOutErrors); 4092 UDP_STAT(us, udp_out_err_output); 4093 return (error); 4094 } 4095 4096 /* ARGSUSED */ 4097 static void 4098 udp_wput_fallback(queue_t *wq, mblk_t *mp) 4099 { 4100 #ifdef DEBUG 4101 cmn_err(CE_CONT, "udp_wput_fallback: Message in fallback \n"); 4102 #endif 4103 freemsg(mp); 4104 } 4105 4106 4107 /* 4108 * Handle special out-of-band ioctl requests (see PSARC/2008/265). 4109 */ 4110 static void 4111 udp_wput_cmdblk(queue_t *q, mblk_t *mp) 4112 { 4113 void *data; 4114 mblk_t *datamp = mp->b_cont; 4115 conn_t *connp = Q_TO_CONN(q); 4116 udp_t *udp = connp->conn_udp; 4117 cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr; 4118 4119 if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) { 4120 cmdp->cb_error = EPROTO; 4121 qreply(q, mp); 4122 return; 4123 } 4124 data = datamp->b_rptr; 4125 4126 mutex_enter(&connp->conn_lock); 4127 switch (cmdp->cb_cmd) { 4128 case TI_GETPEERNAME: 4129 if (udp->udp_state != TS_DATA_XFER) 4130 cmdp->cb_error = ENOTCONN; 4131 else 4132 cmdp->cb_error = conn_getpeername(connp, data, 4133 &cmdp->cb_len); 4134 break; 4135 case TI_GETMYNAME: 4136 cmdp->cb_error = conn_getsockname(connp, data, &cmdp->cb_len); 4137 break; 4138 default: 4139 cmdp->cb_error = EINVAL; 4140 break; 4141 } 4142 mutex_exit(&connp->conn_lock); 4143 4144 qreply(q, mp); 4145 } 4146 4147 static void 4148 udp_use_pure_tpi(udp_t *udp) 4149 { 4150 conn_t *connp = udp->udp_connp; 4151 4152 mutex_enter(&connp->conn_lock); 4153 udp->udp_issocket = B_FALSE; 4154 mutex_exit(&connp->conn_lock); 4155 UDP_STAT(udp->udp_us, udp_sock_fallback); 4156 } 4157 4158 static void 4159 udp_wput_other(queue_t *q, mblk_t *mp) 4160 { 4161 uchar_t *rptr = mp->b_rptr; 4162 struct iocblk *iocp; 4163 conn_t *connp = Q_TO_CONN(q); 4164 udp_t *udp = connp->conn_udp; 4165 cred_t *cr; 4166 4167 switch (mp->b_datap->db_type) { 4168 case M_CMD: 4169 udp_wput_cmdblk(q, mp); 4170 return; 4171 4172 case M_PROTO: 4173 case M_PCPROTO: 4174 if (mp->b_wptr - rptr < sizeof (t_scalar_t)) { 4175 /* 4176 * If the message does not contain a PRIM_type, 4177 * throw it away. 4178 */ 4179 freemsg(mp); 4180 return; 4181 } 4182 switch (((t_primp_t)rptr)->type) { 4183 case T_ADDR_REQ: 4184 udp_addr_req(q, mp); 4185 return; 4186 case O_T_BIND_REQ: 4187 case T_BIND_REQ: 4188 udp_tpi_bind(q, mp); 4189 return; 4190 case T_CONN_REQ: 4191 udp_tpi_connect(q, mp); 4192 return; 4193 case T_CAPABILITY_REQ: 4194 udp_capability_req(q, mp); 4195 return; 4196 case T_INFO_REQ: 4197 udp_info_req(q, mp); 4198 return; 4199 case T_UNITDATA_REQ: 4200 /* 4201 * If a T_UNITDATA_REQ gets here, the address must 4202 * be bad. Valid T_UNITDATA_REQs are handled 4203 * in udp_wput. 4204 */ 4205 udp_ud_err(q, mp, EADDRNOTAVAIL); 4206 return; 4207 case T_UNBIND_REQ: 4208 udp_tpi_unbind(q, mp); 4209 return; 4210 case T_SVR4_OPTMGMT_REQ: 4211 /* 4212 * All Solaris components should pass a db_credp 4213 * for this TPI message, hence we ASSERT. 4214 * But in case there is some other M_PROTO that looks 4215 * like a TPI message sent by some other kernel 4216 * component, we check and return an error. 4217 */ 4218 cr = msg_getcred(mp, NULL); 4219 ASSERT(cr != NULL); 4220 if (cr == NULL) { 4221 udp_err_ack(q, mp, TSYSERR, EINVAL); 4222 return; 4223 } 4224 if (!snmpcom_req(q, mp, udp_snmp_set, ip_snmp_get, 4225 cr)) { 4226 svr4_optcom_req(q, mp, cr, &udp_opt_obj); 4227 } 4228 return; 4229 4230 case T_OPTMGMT_REQ: 4231 /* 4232 * All Solaris components should pass a db_credp 4233 * for this TPI message, hence we ASSERT. 4234 * But in case there is some other M_PROTO that looks 4235 * like a TPI message sent by some other kernel 4236 * component, we check and return an error. 4237 */ 4238 cr = msg_getcred(mp, NULL); 4239 ASSERT(cr != NULL); 4240 if (cr == NULL) { 4241 udp_err_ack(q, mp, TSYSERR, EINVAL); 4242 return; 4243 } 4244 tpi_optcom_req(q, mp, cr, &udp_opt_obj); 4245 return; 4246 4247 case T_DISCON_REQ: 4248 udp_tpi_disconnect(q, mp); 4249 return; 4250 4251 /* The following TPI message is not supported by udp. */ 4252 case O_T_CONN_RES: 4253 case T_CONN_RES: 4254 udp_err_ack(q, mp, TNOTSUPPORT, 0); 4255 return; 4256 4257 /* The following 3 TPI requests are illegal for udp. */ 4258 case T_DATA_REQ: 4259 case T_EXDATA_REQ: 4260 case T_ORDREL_REQ: 4261 udp_err_ack(q, mp, TNOTSUPPORT, 0); 4262 return; 4263 default: 4264 break; 4265 } 4266 break; 4267 case M_FLUSH: 4268 if (*rptr & FLUSHW) 4269 flushq(q, FLUSHDATA); 4270 break; 4271 case M_IOCTL: 4272 iocp = (struct iocblk *)mp->b_rptr; 4273 switch (iocp->ioc_cmd) { 4274 case TI_GETPEERNAME: 4275 if (udp->udp_state != TS_DATA_XFER) { 4276 /* 4277 * If a default destination address has not 4278 * been associated with the stream, then we 4279 * don't know the peer's name. 4280 */ 4281 iocp->ioc_error = ENOTCONN; 4282 iocp->ioc_count = 0; 4283 mp->b_datap->db_type = M_IOCACK; 4284 qreply(q, mp); 4285 return; 4286 } 4287 /* FALLTHRU */ 4288 case TI_GETMYNAME: 4289 /* 4290 * For TI_GETPEERNAME and TI_GETMYNAME, we first 4291 * need to copyin the user's strbuf structure. 4292 * Processing will continue in the M_IOCDATA case 4293 * below. 4294 */ 4295 mi_copyin(q, mp, NULL, 4296 SIZEOF_STRUCT(strbuf, iocp->ioc_flag)); 4297 return; 4298 case _SIOCSOCKFALLBACK: 4299 /* 4300 * Either sockmod is about to be popped and the 4301 * socket would now be treated as a plain stream, 4302 * or a module is about to be pushed so we have 4303 * to follow pure TPI semantics. 4304 */ 4305 if (!udp->udp_issocket) { 4306 DB_TYPE(mp) = M_IOCNAK; 4307 iocp->ioc_error = EINVAL; 4308 } else { 4309 udp_use_pure_tpi(udp); 4310 4311 DB_TYPE(mp) = M_IOCACK; 4312 iocp->ioc_error = 0; 4313 } 4314 iocp->ioc_count = 0; 4315 iocp->ioc_rval = 0; 4316 qreply(q, mp); 4317 return; 4318 default: 4319 break; 4320 } 4321 break; 4322 case M_IOCDATA: 4323 udp_wput_iocdata(q, mp); 4324 return; 4325 default: 4326 /* Unrecognized messages are passed through without change. */ 4327 break; 4328 } 4329 ip_wput_nondata(q, mp); 4330 } 4331 4332 /* 4333 * udp_wput_iocdata is called by udp_wput_other to handle all M_IOCDATA 4334 * messages. 4335 */ 4336 static void 4337 udp_wput_iocdata(queue_t *q, mblk_t *mp) 4338 { 4339 mblk_t *mp1; 4340 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 4341 STRUCT_HANDLE(strbuf, sb); 4342 uint_t addrlen; 4343 conn_t *connp = Q_TO_CONN(q); 4344 udp_t *udp = connp->conn_udp; 4345 4346 /* Make sure it is one of ours. */ 4347 switch (iocp->ioc_cmd) { 4348 case TI_GETMYNAME: 4349 case TI_GETPEERNAME: 4350 break; 4351 default: 4352 ip_wput_nondata(q, mp); 4353 return; 4354 } 4355 4356 switch (mi_copy_state(q, mp, &mp1)) { 4357 case -1: 4358 return; 4359 case MI_COPY_CASE(MI_COPY_IN, 1): 4360 break; 4361 case MI_COPY_CASE(MI_COPY_OUT, 1): 4362 /* 4363 * The address has been copied out, so now 4364 * copyout the strbuf. 4365 */ 4366 mi_copyout(q, mp); 4367 return; 4368 case MI_COPY_CASE(MI_COPY_OUT, 2): 4369 /* 4370 * The address and strbuf have been copied out. 4371 * We're done, so just acknowledge the original 4372 * M_IOCTL. 4373 */ 4374 mi_copy_done(q, mp, 0); 4375 return; 4376 default: 4377 /* 4378 * Something strange has happened, so acknowledge 4379 * the original M_IOCTL with an EPROTO error. 4380 */ 4381 mi_copy_done(q, mp, EPROTO); 4382 return; 4383 } 4384 4385 /* 4386 * Now we have the strbuf structure for TI_GETMYNAME 4387 * and TI_GETPEERNAME. Next we copyout the requested 4388 * address and then we'll copyout the strbuf. 4389 */ 4390 STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr); 4391 4392 if (connp->conn_family == AF_INET) 4393 addrlen = sizeof (sin_t); 4394 else 4395 addrlen = sizeof (sin6_t); 4396 4397 if (STRUCT_FGET(sb, maxlen) < addrlen) { 4398 mi_copy_done(q, mp, EINVAL); 4399 return; 4400 } 4401 4402 switch (iocp->ioc_cmd) { 4403 case TI_GETMYNAME: 4404 break; 4405 case TI_GETPEERNAME: 4406 if (udp->udp_state != TS_DATA_XFER) { 4407 mi_copy_done(q, mp, ENOTCONN); 4408 return; 4409 } 4410 break; 4411 } 4412 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE); 4413 if (!mp1) 4414 return; 4415 4416 STRUCT_FSET(sb, len, addrlen); 4417 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 4418 case TI_GETMYNAME: 4419 (void) conn_getsockname(connp, (struct sockaddr *)mp1->b_wptr, 4420 &addrlen); 4421 break; 4422 case TI_GETPEERNAME: 4423 (void) conn_getpeername(connp, (struct sockaddr *)mp1->b_wptr, 4424 &addrlen); 4425 break; 4426 } 4427 mp1->b_wptr += addrlen; 4428 /* Copy out the address */ 4429 mi_copyout(q, mp); 4430 } 4431 4432 void 4433 udp_ddi_g_init(void) 4434 { 4435 udp_max_optsize = optcom_max_optsize(udp_opt_obj.odb_opt_des_arr, 4436 udp_opt_obj.odb_opt_arr_cnt); 4437 4438 /* 4439 * We want to be informed each time a stack is created or 4440 * destroyed in the kernel, so we can maintain the 4441 * set of udp_stack_t's. 4442 */ 4443 netstack_register(NS_UDP, udp_stack_init, NULL, udp_stack_fini); 4444 } 4445 4446 void 4447 udp_ddi_g_destroy(void) 4448 { 4449 netstack_unregister(NS_UDP); 4450 } 4451 4452 #define INET_NAME "ip" 4453 4454 /* 4455 * Initialize the UDP stack instance. 4456 */ 4457 static void * 4458 udp_stack_init(netstackid_t stackid, netstack_t *ns) 4459 { 4460 udp_stack_t *us; 4461 int i; 4462 int error = 0; 4463 major_t major; 4464 size_t arrsz; 4465 4466 us = (udp_stack_t *)kmem_zalloc(sizeof (*us), KM_SLEEP); 4467 us->us_netstack = ns; 4468 4469 mutex_init(&us->us_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL); 4470 us->us_num_epriv_ports = UDP_NUM_EPRIV_PORTS; 4471 us->us_epriv_ports[0] = ULP_DEF_EPRIV_PORT1; 4472 us->us_epriv_ports[1] = ULP_DEF_EPRIV_PORT2; 4473 4474 /* 4475 * The smallest anonymous port in the priviledged port range which UDP 4476 * looks for free port. Use in the option UDP_ANONPRIVBIND. 4477 */ 4478 us->us_min_anonpriv_port = 512; 4479 4480 us->us_bind_fanout_size = udp_bind_fanout_size; 4481 4482 /* Roundup variable that might have been modified in /etc/system */ 4483 if (!ISP2(us->us_bind_fanout_size)) { 4484 /* Not a power of two. Round up to nearest power of two */ 4485 for (i = 0; i < 31; i++) { 4486 if (us->us_bind_fanout_size < (1 << i)) 4487 break; 4488 } 4489 us->us_bind_fanout_size = 1 << i; 4490 } 4491 us->us_bind_fanout = kmem_zalloc(us->us_bind_fanout_size * 4492 sizeof (udp_fanout_t), KM_SLEEP); 4493 for (i = 0; i < us->us_bind_fanout_size; i++) { 4494 mutex_init(&us->us_bind_fanout[i].uf_lock, NULL, MUTEX_DEFAULT, 4495 NULL); 4496 } 4497 4498 arrsz = udp_propinfo_count * sizeof (mod_prop_info_t); 4499 us->us_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz, 4500 KM_SLEEP); 4501 bcopy(udp_propinfo_tbl, us->us_propinfo_tbl, arrsz); 4502 4503 /* Allocate the per netstack stats */ 4504 mutex_enter(&cpu_lock); 4505 us->us_sc_cnt = MAX(ncpus, boot_ncpus); 4506 mutex_exit(&cpu_lock); 4507 us->us_sc = kmem_zalloc(max_ncpus * sizeof (udp_stats_cpu_t *), 4508 KM_SLEEP); 4509 for (i = 0; i < us->us_sc_cnt; i++) { 4510 us->us_sc[i] = kmem_zalloc(sizeof (udp_stats_cpu_t), 4511 KM_SLEEP); 4512 } 4513 4514 us->us_kstat = udp_kstat2_init(stackid); 4515 us->us_mibkp = udp_kstat_init(stackid); 4516 4517 major = mod_name_to_major(INET_NAME); 4518 error = ldi_ident_from_major(major, &us->us_ldi_ident); 4519 ASSERT(error == 0); 4520 return (us); 4521 } 4522 4523 /* 4524 * Free the UDP stack instance. 4525 */ 4526 static void 4527 udp_stack_fini(netstackid_t stackid, void *arg) 4528 { 4529 udp_stack_t *us = (udp_stack_t *)arg; 4530 int i; 4531 4532 for (i = 0; i < us->us_bind_fanout_size; i++) { 4533 mutex_destroy(&us->us_bind_fanout[i].uf_lock); 4534 } 4535 4536 kmem_free(us->us_bind_fanout, us->us_bind_fanout_size * 4537 sizeof (udp_fanout_t)); 4538 4539 us->us_bind_fanout = NULL; 4540 4541 for (i = 0; i < us->us_sc_cnt; i++) 4542 kmem_free(us->us_sc[i], sizeof (udp_stats_cpu_t)); 4543 kmem_free(us->us_sc, max_ncpus * sizeof (udp_stats_cpu_t *)); 4544 4545 kmem_free(us->us_propinfo_tbl, 4546 udp_propinfo_count * sizeof (mod_prop_info_t)); 4547 us->us_propinfo_tbl = NULL; 4548 4549 udp_kstat_fini(stackid, us->us_mibkp); 4550 us->us_mibkp = NULL; 4551 4552 udp_kstat2_fini(stackid, us->us_kstat); 4553 us->us_kstat = NULL; 4554 4555 mutex_destroy(&us->us_epriv_port_lock); 4556 ldi_ident_release(us->us_ldi_ident); 4557 kmem_free(us, sizeof (*us)); 4558 } 4559 4560 static size_t 4561 udp_set_rcv_hiwat(udp_t *udp, size_t size) 4562 { 4563 udp_stack_t *us = udp->udp_us; 4564 4565 /* We add a bit of extra buffering */ 4566 size += size >> 1; 4567 if (size > us->us_max_buf) 4568 size = us->us_max_buf; 4569 4570 udp->udp_rcv_hiwat = size; 4571 return (size); 4572 } 4573 4574 /* 4575 * For the lower queue so that UDP can be a dummy mux. 4576 * Nobody should be sending 4577 * packets up this stream 4578 */ 4579 static void 4580 udp_lrput(queue_t *q, mblk_t *mp) 4581 { 4582 switch (mp->b_datap->db_type) { 4583 case M_FLUSH: 4584 /* Turn around */ 4585 if (*mp->b_rptr & FLUSHW) { 4586 *mp->b_rptr &= ~FLUSHR; 4587 qreply(q, mp); 4588 return; 4589 } 4590 break; 4591 } 4592 freemsg(mp); 4593 } 4594 4595 /* 4596 * For the lower queue so that UDP can be a dummy mux. 4597 * Nobody should be sending packets down this stream. 4598 */ 4599 /* ARGSUSED */ 4600 void 4601 udp_lwput(queue_t *q, mblk_t *mp) 4602 { 4603 freemsg(mp); 4604 } 4605 4606 /* 4607 * When a CPU is added, we need to allocate the per CPU stats struct. 4608 */ 4609 void 4610 udp_stack_cpu_add(udp_stack_t *us, processorid_t cpu_seqid) 4611 { 4612 int i; 4613 4614 if (cpu_seqid < us->us_sc_cnt) 4615 return; 4616 for (i = us->us_sc_cnt; i <= cpu_seqid; i++) { 4617 ASSERT(us->us_sc[i] == NULL); 4618 us->us_sc[i] = kmem_zalloc(sizeof (udp_stats_cpu_t), 4619 KM_SLEEP); 4620 } 4621 membar_producer(); 4622 us->us_sc_cnt = cpu_seqid + 1; 4623 } 4624 4625 /* 4626 * Below routines for UDP socket module. 4627 */ 4628 4629 static conn_t * 4630 udp_do_open(cred_t *credp, boolean_t isv6, int flags, int *errorp) 4631 { 4632 udp_t *udp; 4633 conn_t *connp; 4634 zoneid_t zoneid; 4635 netstack_t *ns; 4636 udp_stack_t *us; 4637 int len; 4638 4639 ASSERT(errorp != NULL); 4640 4641 if ((*errorp = secpolicy_basic_net_access(credp)) != 0) 4642 return (NULL); 4643 4644 ns = netstack_find_by_cred(credp); 4645 ASSERT(ns != NULL); 4646 us = ns->netstack_udp; 4647 ASSERT(us != NULL); 4648 4649 /* 4650 * For exclusive stacks we set the zoneid to zero 4651 * to make UDP operate as if in the global zone. 4652 */ 4653 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 4654 zoneid = GLOBAL_ZONEID; 4655 else 4656 zoneid = crgetzoneid(credp); 4657 4658 ASSERT(flags == KM_SLEEP || flags == KM_NOSLEEP); 4659 4660 connp = ipcl_conn_create(IPCL_UDPCONN, flags, ns); 4661 if (connp == NULL) { 4662 netstack_rele(ns); 4663 *errorp = ENOMEM; 4664 return (NULL); 4665 } 4666 udp = connp->conn_udp; 4667 4668 /* 4669 * ipcl_conn_create did a netstack_hold. Undo the hold that was 4670 * done by netstack_find_by_cred() 4671 */ 4672 netstack_rele(ns); 4673 4674 /* 4675 * Since this conn_t/udp_t is not yet visible to anybody else we don't 4676 * need to lock anything. 4677 */ 4678 ASSERT(connp->conn_proto == IPPROTO_UDP); 4679 ASSERT(connp->conn_udp == udp); 4680 ASSERT(udp->udp_connp == connp); 4681 4682 /* Set the initial state of the stream and the privilege status. */ 4683 udp->udp_state = TS_UNBND; 4684 connp->conn_ixa->ixa_flags |= IXAF_VERIFY_SOURCE; 4685 if (isv6) { 4686 connp->conn_family = AF_INET6; 4687 connp->conn_ipversion = IPV6_VERSION; 4688 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4; 4689 connp->conn_default_ttl = us->us_ipv6_hoplimit; 4690 len = sizeof (ip6_t) + UDPH_SIZE; 4691 } else { 4692 connp->conn_family = AF_INET; 4693 connp->conn_ipversion = IPV4_VERSION; 4694 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4; 4695 connp->conn_default_ttl = us->us_ipv4_ttl; 4696 len = sizeof (ipha_t) + UDPH_SIZE; 4697 } 4698 4699 ASSERT(connp->conn_ixa->ixa_protocol == connp->conn_proto); 4700 connp->conn_xmit_ipp.ipp_unicast_hops = connp->conn_default_ttl; 4701 4702 connp->conn_ixa->ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 4703 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_ULP_CKSUM; 4704 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */ 4705 connp->conn_ixa->ixa_zoneid = zoneid; 4706 4707 connp->conn_zoneid = zoneid; 4708 4709 /* 4710 * If the caller has the process-wide flag set, then default to MAC 4711 * exempt mode. This allows read-down to unlabeled hosts. 4712 */ 4713 if (getpflags(NET_MAC_AWARE, credp) != 0) 4714 connp->conn_mac_mode = CONN_MAC_AWARE; 4715 4716 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID); 4717 4718 udp->udp_us = us; 4719 4720 connp->conn_rcvbuf = us->us_recv_hiwat; 4721 connp->conn_sndbuf = us->us_xmit_hiwat; 4722 connp->conn_sndlowat = us->us_xmit_lowat; 4723 connp->conn_rcvlowat = udp_mod_info.mi_lowat; 4724 4725 connp->conn_wroff = len + us->us_wroff_extra; 4726 connp->conn_so_type = SOCK_DGRAM; 4727 4728 connp->conn_recv = udp_input; 4729 connp->conn_recvicmp = udp_icmp_input; 4730 crhold(credp); 4731 connp->conn_cred = credp; 4732 connp->conn_cpid = curproc->p_pid; 4733 connp->conn_open_time = ddi_get_lbolt64(); 4734 /* Cache things in ixa without an extra refhold */ 4735 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED)); 4736 connp->conn_ixa->ixa_cred = connp->conn_cred; 4737 connp->conn_ixa->ixa_cpid = connp->conn_cpid; 4738 if (is_system_labeled()) 4739 connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred); 4740 4741 *((sin6_t *)&udp->udp_delayed_addr) = sin6_null; 4742 4743 if (us->us_pmtu_discovery) 4744 connp->conn_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY; 4745 4746 return (connp); 4747 } 4748 4749 sock_lower_handle_t 4750 udp_create(int family, int type, int proto, sock_downcalls_t **sock_downcalls, 4751 uint_t *smodep, int *errorp, int flags, cred_t *credp) 4752 { 4753 udp_t *udp = NULL; 4754 udp_stack_t *us; 4755 conn_t *connp; 4756 boolean_t isv6; 4757 4758 if (type != SOCK_DGRAM || (family != AF_INET && family != AF_INET6) || 4759 (proto != 0 && proto != IPPROTO_UDP)) { 4760 *errorp = EPROTONOSUPPORT; 4761 return (NULL); 4762 } 4763 4764 if (family == AF_INET6) 4765 isv6 = B_TRUE; 4766 else 4767 isv6 = B_FALSE; 4768 4769 connp = udp_do_open(credp, isv6, flags, errorp); 4770 if (connp == NULL) 4771 return (NULL); 4772 4773 udp = connp->conn_udp; 4774 ASSERT(udp != NULL); 4775 us = udp->udp_us; 4776 ASSERT(us != NULL); 4777 4778 udp->udp_issocket = B_TRUE; 4779 connp->conn_flags |= IPCL_NONSTR; 4780 4781 /* 4782 * Set flow control 4783 * Since this conn_t/udp_t is not yet visible to anybody else we don't 4784 * need to lock anything. 4785 */ 4786 (void) udp_set_rcv_hiwat(udp, connp->conn_rcvbuf); 4787 udp->udp_rcv_disply_hiwat = connp->conn_rcvbuf; 4788 4789 connp->conn_flow_cntrld = B_FALSE; 4790 4791 mutex_enter(&connp->conn_lock); 4792 connp->conn_state_flags &= ~CONN_INCIPIENT; 4793 mutex_exit(&connp->conn_lock); 4794 4795 *errorp = 0; 4796 *smodep = SM_ATOMIC; 4797 *sock_downcalls = &sock_udp_downcalls; 4798 return ((sock_lower_handle_t)connp); 4799 } 4800 4801 /* ARGSUSED3 */ 4802 void 4803 udp_activate(sock_lower_handle_t proto_handle, sock_upper_handle_t sock_handle, 4804 sock_upcalls_t *sock_upcalls, int flags, cred_t *cr) 4805 { 4806 conn_t *connp = (conn_t *)proto_handle; 4807 struct sock_proto_props sopp; 4808 4809 /* All Solaris components should pass a cred for this operation. */ 4810 ASSERT(cr != NULL); 4811 4812 connp->conn_upcalls = sock_upcalls; 4813 connp->conn_upper_handle = sock_handle; 4814 4815 sopp.sopp_flags = SOCKOPT_WROFF | SOCKOPT_RCVHIWAT | SOCKOPT_RCVLOWAT | 4816 SOCKOPT_MAXBLK | SOCKOPT_MAXPSZ | SOCKOPT_MINPSZ; 4817 sopp.sopp_wroff = connp->conn_wroff; 4818 sopp.sopp_maxblk = INFPSZ; 4819 sopp.sopp_rxhiwat = connp->conn_rcvbuf; 4820 sopp.sopp_rxlowat = connp->conn_rcvlowat; 4821 sopp.sopp_maxaddrlen = sizeof (sin6_t); 4822 sopp.sopp_maxpsz = 4823 (connp->conn_family == AF_INET) ? UDP_MAXPACKET_IPV4 : 4824 UDP_MAXPACKET_IPV6; 4825 sopp.sopp_minpsz = (udp_mod_info.mi_minpsz == 1) ? 0 : 4826 udp_mod_info.mi_minpsz; 4827 4828 (*connp->conn_upcalls->su_set_proto_props)(connp->conn_upper_handle, 4829 &sopp); 4830 } 4831 4832 static void 4833 udp_do_close(conn_t *connp) 4834 { 4835 udp_t *udp; 4836 4837 ASSERT(connp != NULL && IPCL_IS_UDP(connp)); 4838 udp = connp->conn_udp; 4839 4840 if (cl_inet_unbind != NULL && udp->udp_state == TS_IDLE) { 4841 /* 4842 * Running in cluster mode - register unbind information 4843 */ 4844 if (connp->conn_ipversion == IPV4_VERSION) { 4845 (*cl_inet_unbind)( 4846 connp->conn_netstack->netstack_stackid, 4847 IPPROTO_UDP, AF_INET, 4848 (uint8_t *)(&V4_PART_OF_V6(connp->conn_laddr_v6)), 4849 (in_port_t)connp->conn_lport, NULL); 4850 } else { 4851 (*cl_inet_unbind)( 4852 connp->conn_netstack->netstack_stackid, 4853 IPPROTO_UDP, AF_INET6, 4854 (uint8_t *)&(connp->conn_laddr_v6), 4855 (in_port_t)connp->conn_lport, NULL); 4856 } 4857 } 4858 4859 udp_bind_hash_remove(udp, B_FALSE); 4860 mutex_enter(&connp->conn_lock); 4861 udp_reuselist_remove(connp); 4862 mutex_exit(&connp->conn_lock); 4863 4864 ip_quiesce_conn(connp); 4865 4866 if (!IPCL_IS_NONSTR(connp)) { 4867 ASSERT(connp->conn_wq != NULL); 4868 ASSERT(connp->conn_rq != NULL); 4869 qprocsoff(connp->conn_rq); 4870 } 4871 4872 udp_close_free(connp); 4873 4874 /* 4875 * Now we are truly single threaded on this stream, and can 4876 * delete the things hanging off the connp, and finally the connp. 4877 * We removed this connp from the fanout list, it cannot be 4878 * accessed thru the fanouts, and we already waited for the 4879 * conn_ref to drop to 0. We are already in close, so 4880 * there cannot be any other thread from the top. qprocsoff 4881 * has completed, and service has completed or won't run in 4882 * future. 4883 */ 4884 ASSERT(connp->conn_ref == 1); 4885 4886 if (!IPCL_IS_NONSTR(connp)) { 4887 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 4888 } else { 4889 ip_free_helper_stream(connp); 4890 } 4891 4892 connp->conn_ref--; 4893 ipcl_conn_destroy(connp); 4894 } 4895 4896 /* ARGSUSED1 */ 4897 int 4898 udp_close(sock_lower_handle_t proto_handle, int flags, cred_t *cr) 4899 { 4900 conn_t *connp = (conn_t *)proto_handle; 4901 4902 /* All Solaris components should pass a cred for this operation. */ 4903 ASSERT(cr != NULL); 4904 4905 udp_do_close(connp); 4906 return (0); 4907 } 4908 4909 static int 4910 udp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr, 4911 boolean_t bind_to_req_port_only) 4912 { 4913 sin_t *sin; 4914 sin6_t *sin6; 4915 udp_t *udp = connp->conn_udp; 4916 int error = 0; 4917 ip_laddr_t laddr_type = IPVL_UNICAST_UP; /* INADDR_ANY */ 4918 in_port_t port; /* Host byte order */ 4919 in_port_t requested_port; /* Host byte order */ 4920 int count; 4921 ipaddr_t v4src; /* Set if AF_INET */ 4922 in6_addr_t v6src; 4923 int loopmax; 4924 udp_fanout_t *udpf; 4925 in_port_t lport; /* Network byte order */ 4926 uint_t scopeid = 0; 4927 zoneid_t zoneid = IPCL_ZONEID(connp); 4928 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 4929 boolean_t is_inaddr_any; 4930 mlp_type_t addrtype, mlptype; 4931 udp_stack_t *us = udp->udp_us; 4932 struct reuselist *reusep; 4933 4934 switch (len) { 4935 case sizeof (sin_t): /* Complete IPv4 address */ 4936 sin = (sin_t *)sa; 4937 4938 if (sin == NULL || !OK_32PTR((char *)sin)) 4939 return (EINVAL); 4940 4941 if (connp->conn_family != AF_INET || 4942 sin->sin_family != AF_INET) { 4943 return (EAFNOSUPPORT); 4944 } 4945 v4src = sin->sin_addr.s_addr; 4946 IN6_IPADDR_TO_V4MAPPED(v4src, &v6src); 4947 if (v4src != INADDR_ANY) { 4948 laddr_type = ip_laddr_verify_v4(v4src, zoneid, ipst, 4949 B_TRUE); 4950 } 4951 port = ntohs(sin->sin_port); 4952 break; 4953 4954 case sizeof (sin6_t): /* complete IPv6 address */ 4955 sin6 = (sin6_t *)sa; 4956 4957 if (sin6 == NULL || !OK_32PTR((char *)sin6)) 4958 return (EINVAL); 4959 4960 if (connp->conn_family != AF_INET6 || 4961 sin6->sin6_family != AF_INET6) { 4962 return (EAFNOSUPPORT); 4963 } 4964 v6src = sin6->sin6_addr; 4965 if (IN6_IS_ADDR_V4MAPPED(&v6src)) { 4966 if (connp->conn_ipv6_v6only) 4967 return (EADDRNOTAVAIL); 4968 4969 IN6_V4MAPPED_TO_IPADDR(&v6src, v4src); 4970 if (v4src != INADDR_ANY) { 4971 laddr_type = ip_laddr_verify_v4(v4src, 4972 zoneid, ipst, B_FALSE); 4973 } 4974 } else { 4975 if (!IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 4976 if (IN6_IS_ADDR_LINKSCOPE(&v6src)) 4977 scopeid = sin6->sin6_scope_id; 4978 laddr_type = ip_laddr_verify_v6(&v6src, 4979 zoneid, ipst, B_TRUE, scopeid); 4980 } 4981 } 4982 port = ntohs(sin6->sin6_port); 4983 break; 4984 4985 default: /* Invalid request */ 4986 (void) strlog(UDP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 4987 "udp_bind: bad ADDR_length length %u", len); 4988 return (-TBADADDR); 4989 } 4990 4991 /* Is the local address a valid unicast, multicast, or broadcast? */ 4992 if (laddr_type == IPVL_BAD) 4993 return (EADDRNOTAVAIL); 4994 4995 requested_port = port; 4996 4997 if (requested_port == 0 || !bind_to_req_port_only) 4998 bind_to_req_port_only = B_FALSE; 4999 else /* T_BIND_REQ and requested_port != 0 */ 5000 bind_to_req_port_only = B_TRUE; 5001 5002 if (requested_port == 0) { 5003 /* 5004 * If the application passed in zero for the port number, it 5005 * doesn't care which port number we bind to. Get one in the 5006 * valid range. 5007 */ 5008 if (connp->conn_anon_priv_bind) { 5009 port = udp_get_next_priv_port(udp); 5010 } else { 5011 port = udp_update_next_port(udp, 5012 us->us_next_port_to_try, B_TRUE); 5013 } 5014 } else { 5015 /* 5016 * If the port is in the well-known privileged range, 5017 * make sure the caller was privileged. 5018 */ 5019 int i; 5020 boolean_t priv = B_FALSE; 5021 5022 if (port < us->us_smallest_nonpriv_port) { 5023 priv = B_TRUE; 5024 } else { 5025 for (i = 0; i < us->us_num_epriv_ports; i++) { 5026 if (port == us->us_epriv_ports[i]) { 5027 priv = B_TRUE; 5028 break; 5029 } 5030 } 5031 } 5032 5033 if (priv) { 5034 if (secpolicy_net_privaddr(cr, port, IPPROTO_UDP) != 0) 5035 return (-TACCES); 5036 } 5037 } 5038 5039 if (port == 0) 5040 return (-TNOADDR); 5041 5042 /* 5043 * get some memory we might need later on for reuseport, avoid 5044 * KM_SLEEP under lock 5045 */ 5046 reusep = kmem_zalloc(sizeof (*reusep), KM_SLEEP); 5047 mutex_init(&reusep->ru_lock, NULL, MUTEX_DEFAULT, NULL); 5048 5049 mutex_enter(&connp->conn_lock); 5050 5051 if (!connp->conn_reuseport) { 5052 mutex_destroy(&reusep->ru_lock); 5053 kmem_free(reusep, sizeof (*reusep)); 5054 reusep = NULL; 5055 } 5056 5057 /* 5058 * The state must be TS_UNBND. TPI mandates that users must send 5059 * TPI primitives only 1 at a time and wait for the response before 5060 * sending the next primitive. 5061 */ 5062 if (udp->udp_state != TS_UNBND) { 5063 mutex_exit(&connp->conn_lock); 5064 if (reusep != NULL) { 5065 mutex_destroy(&reusep->ru_lock); 5066 kmem_free(reusep, sizeof (*reusep)); 5067 } 5068 (void) strlog(UDP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 5069 "udp_bind: bad state, %u", udp->udp_state); 5070 return (-TOUTSTATE); 5071 } 5072 /* 5073 * Copy the source address into our udp structure. This address 5074 * may still be zero; if so, IP will fill in the correct address 5075 * each time an outbound packet is passed to it. Since the udp is 5076 * not yet in the bind hash list, we don't grab the uf_lock to 5077 * change conn_ipversion 5078 */ 5079 if (connp->conn_family == AF_INET) { 5080 ASSERT(sin != NULL); 5081 ASSERT(connp->conn_ixa->ixa_flags & IXAF_IS_IPV4); 5082 } else { 5083 if (IN6_IS_ADDR_V4MAPPED(&v6src)) { 5084 /* 5085 * no need to hold the uf_lock to set the conn_ipversion 5086 * since we are not yet in the fanout list 5087 */ 5088 connp->conn_ipversion = IPV4_VERSION; 5089 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4; 5090 } else { 5091 connp->conn_ipversion = IPV6_VERSION; 5092 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4; 5093 } 5094 } 5095 5096 /* 5097 * If conn_reuseaddr is not set, then we have to make sure that 5098 * the IP address and port number the application requested 5099 * (or we selected for the application) is not being used by 5100 * another stream. If another stream is already using the 5101 * requested IP address and port, the behavior depends on 5102 * "bind_to_req_port_only". If set the bind fails; otherwise we 5103 * search for any an unused port to bind to the stream. 5104 * 5105 * As per the BSD semantics, as modified by the Deering multicast 5106 * changes, if udp_reuseaddr is set, then we allow multiple binds 5107 * to the same port independent of the local IP address. 5108 * 5109 * This is slightly different than in SunOS 4.X which did not 5110 * support IP multicast. Note that the change implemented by the 5111 * Deering multicast code effects all binds - not only binding 5112 * to IP multicast addresses. 5113 * 5114 * Note that when binding to port zero we ignore SO_REUSEADDR in 5115 * order to guarantee a unique port. 5116 */ 5117 5118 count = 0; 5119 if (connp->conn_anon_priv_bind) { 5120 /* 5121 * loopmax = (IPPORT_RESERVED-1) - 5122 * us->us_min_anonpriv_port + 1 5123 */ 5124 loopmax = IPPORT_RESERVED - us->us_min_anonpriv_port; 5125 } else { 5126 loopmax = us->us_largest_anon_port - 5127 us->us_smallest_anon_port + 1; 5128 } 5129 5130 is_inaddr_any = V6_OR_V4_INADDR_ANY(v6src); 5131 5132 for (;;) { 5133 udp_t *udp1; 5134 boolean_t found_exclbind = B_FALSE; 5135 conn_t *connp1; 5136 5137 /* 5138 * Walk through the list of udp streams bound to 5139 * requested port with the same IP address. 5140 */ 5141 lport = htons(port); 5142 udpf = &us->us_bind_fanout[UDP_BIND_HASH(lport, 5143 us->us_bind_fanout_size)]; 5144 mutex_enter(&udpf->uf_lock); 5145 for (udp1 = udpf->uf_udp; udp1 != NULL; 5146 udp1 = udp1->udp_bind_hash) { 5147 connp1 = udp1->udp_connp; 5148 5149 if (lport != connp1->conn_lport) 5150 continue; 5151 5152 /* 5153 * On a labeled system, we must treat bindings to ports 5154 * on shared IP addresses by sockets with MAC exemption 5155 * privilege as being in all zones, as there's 5156 * otherwise no way to identify the right receiver. 5157 */ 5158 if (!IPCL_BIND_ZONE_MATCH(connp1, connp)) 5159 continue; 5160 5161 /* 5162 * If UDP_EXCLBIND is set for either the bound or 5163 * binding endpoint, the semantics of bind 5164 * is changed according to the following chart. 5165 * 5166 * spec = specified address (v4 or v6) 5167 * unspec = unspecified address (v4 or v6) 5168 * A = specified addresses are different for endpoints 5169 * 5170 * bound bind to allowed? 5171 * ------------------------------------- 5172 * unspec unspec no 5173 * unspec spec no 5174 * spec unspec no 5175 * spec spec yes if A 5176 * 5177 * For labeled systems, SO_MAC_EXEMPT behaves the same 5178 * as UDP_EXCLBIND, except that zoneid is ignored. 5179 */ 5180 if (connp1->conn_exclbind || connp->conn_exclbind || 5181 IPCL_CONNS_MAC(udp1->udp_connp, connp)) { 5182 if (V6_OR_V4_INADDR_ANY( 5183 connp1->conn_bound_addr_v6) || 5184 is_inaddr_any || 5185 IN6_ARE_ADDR_EQUAL( 5186 &connp1->conn_bound_addr_v6, 5187 &v6src)) { 5188 found_exclbind = B_TRUE; 5189 break; 5190 } 5191 continue; 5192 } 5193 5194 /* 5195 * Check ipversion to allow IPv4 and IPv6 sockets to 5196 * have disjoint port number spaces. 5197 */ 5198 if (connp->conn_ipversion != connp1->conn_ipversion) { 5199 5200 /* 5201 * On the first time through the loop, if the 5202 * the user intentionally specified a 5203 * particular port number, then ignore any 5204 * bindings of the other protocol that may 5205 * conflict. This allows the user to bind IPv6 5206 * alone and get both v4 and v6, or bind both 5207 * both and get each seperately. On subsequent 5208 * times through the loop, we're checking a 5209 * port that we chose (not the user) and thus 5210 * we do not allow casual duplicate bindings. 5211 */ 5212 if (count == 0 && requested_port != 0) 5213 continue; 5214 } 5215 5216 /* 5217 * No difference depending on SO_REUSEADDR. 5218 * 5219 * If existing port is bound to a 5220 * non-wildcard IP address and 5221 * the requesting stream is bound to 5222 * a distinct different IP addresses 5223 * (non-wildcard, also), keep going. 5224 */ 5225 if (!is_inaddr_any && 5226 !V6_OR_V4_INADDR_ANY(connp1->conn_bound_addr_v6) && 5227 !IN6_ARE_ADDR_EQUAL(&connp1->conn_laddr_v6, 5228 &v6src)) { 5229 continue; 5230 } 5231 5232 /* 5233 * if bound conn has reuseport set and conn requests 5234 * reuseport, check if cred matches. If they match, 5235 * allow conn to proceed. 5236 */ 5237 if (connp->conn_reuseport && connp1->conn_reuseport) { 5238 cred_t *bcred = connp1->conn_cred; 5239 cred_t *ncred = connp->conn_cred; 5240 if (crgetuid(bcred) == crgetuid(ncred) && 5241 crgetzoneid(bcred) == crgetzoneid(ncred)) { 5242 (void) udp_reuselist_add(reusep, connp1, 5243 B_FALSE); 5244 continue; 5245 } 5246 } 5247 5248 break; 5249 } 5250 5251 5252 if (!found_exclbind && 5253 (connp->conn_reuseaddr && requested_port != 0)) { 5254 if (reusep != NULL) { 5255 mutex_destroy(&reusep->ru_lock); 5256 kmem_free(reusep, sizeof (*reusep)); 5257 } 5258 break; 5259 } 5260 5261 if (udp1 == NULL) { 5262 /* 5263 * No other stream has this IP address and port number 5264 * or all have reuseport set. We can use it. 5265 */ 5266 if (connp->conn_reuseport) { 5267 struct reuselist *old = NULL; 5268 5269 if (reusep->ru_entries > 0) { 5270 old = reusep->ru_conns[0]-> 5271 conn_reuselist; 5272 ASSERT(old != NULL); 5273 } 5274 if (old != NULL) { 5275 int i; 5276 5277 mutex_enter(&reusep->ru_lock); 5278 for (i = 0; i < old->ru_entries; ++i) { 5279 ASSERT(old->ru_conns[i]-> 5280 conn_reuselist == old); 5281 old->ru_conns[i]->conn_reuselist 5282 = reusep; 5283 } 5284 mutex_exit(&reusep->ru_lock); 5285 } 5286 if (udp_reuselist_add(reusep, connp, B_TRUE) 5287 < 0) { 5288 /* 5289 * table full, reject request. As we 5290 * have already replaced the table, 5291 * leave the new one in the conns and 5292 * free the old 5293 */ 5294 if (old != NULL) { 5295 mutex_destroy(&old->ru_lock); 5296 kmem_free(old, 5297 sizeof (*reusep)); 5298 } 5299 mutex_exit(&udpf->uf_lock); 5300 mutex_exit(&connp->conn_lock); 5301 return (-TADDRBUSY); 5302 } 5303 connp->conn_reuselist = reusep; 5304 if (old != NULL) { 5305 mutex_destroy(&old->ru_lock); 5306 kmem_free(old, sizeof (*reusep)); 5307 } 5308 } 5309 break; 5310 } 5311 mutex_exit(&udpf->uf_lock); 5312 5313 if (connp->conn_reuseport) { 5314 /* reject for all other cases */ 5315 mutex_exit(&connp->conn_lock); 5316 mutex_destroy(&reusep->ru_lock); 5317 kmem_free(reusep, sizeof (*reusep)); 5318 return (-TADDRBUSY); 5319 } 5320 5321 if (bind_to_req_port_only) { 5322 /* 5323 * We get here only when requested port 5324 * is bound (and only first of the for() 5325 * loop iteration). 5326 * 5327 * The semantics of this bind request 5328 * require it to fail so we return from 5329 * the routine (and exit the loop). 5330 * 5331 */ 5332 mutex_exit(&connp->conn_lock); 5333 return (-TADDRBUSY); 5334 } 5335 5336 if (connp->conn_anon_priv_bind) { 5337 port = udp_get_next_priv_port(udp); 5338 } else { 5339 if ((count == 0) && (requested_port != 0)) { 5340 /* 5341 * If the application wants us to find 5342 * a port, get one to start with. Set 5343 * requested_port to 0, so that we will 5344 * update us->us_next_port_to_try below. 5345 */ 5346 port = udp_update_next_port(udp, 5347 us->us_next_port_to_try, B_TRUE); 5348 requested_port = 0; 5349 } else { 5350 port = udp_update_next_port(udp, port + 1, 5351 B_FALSE); 5352 } 5353 } 5354 5355 if (port == 0 || ++count >= loopmax) { 5356 /* 5357 * We've tried every possible port number and 5358 * there are none available, so send an error 5359 * to the user. 5360 */ 5361 mutex_exit(&connp->conn_lock); 5362 return (-TNOADDR); 5363 } 5364 } 5365 5366 /* 5367 * Copy the source address into our udp structure. This address 5368 * may still be zero; if so, ip_attr_connect will fill in the correct 5369 * address when a packet is about to be sent. 5370 * If we are binding to a broadcast or multicast address then 5371 * we just set the conn_bound_addr since we don't want to use 5372 * that as the source address when sending. 5373 */ 5374 connp->conn_bound_addr_v6 = v6src; 5375 connp->conn_laddr_v6 = v6src; 5376 if (scopeid != 0) { 5377 connp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 5378 connp->conn_ixa->ixa_scopeid = scopeid; 5379 connp->conn_incoming_ifindex = scopeid; 5380 } else { 5381 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 5382 connp->conn_incoming_ifindex = connp->conn_bound_if; 5383 } 5384 5385 switch (laddr_type) { 5386 case IPVL_UNICAST_UP: 5387 case IPVL_UNICAST_DOWN: 5388 connp->conn_saddr_v6 = v6src; 5389 connp->conn_mcbc_bind = B_FALSE; 5390 break; 5391 case IPVL_MCAST: 5392 case IPVL_BCAST: 5393 /* ip_set_destination will pick a source address later */ 5394 connp->conn_saddr_v6 = ipv6_all_zeros; 5395 connp->conn_mcbc_bind = B_TRUE; 5396 break; 5397 } 5398 5399 /* Any errors after this point should use late_error */ 5400 connp->conn_lport = lport; 5401 5402 /* 5403 * Now reset the next anonymous port if the application requested 5404 * an anonymous port, or we handed out the next anonymous port. 5405 */ 5406 if ((requested_port == 0) && (!connp->conn_anon_priv_bind)) { 5407 us->us_next_port_to_try = port + 1; 5408 } 5409 5410 /* Initialize the T_BIND_ACK. */ 5411 if (connp->conn_family == AF_INET) { 5412 sin->sin_port = connp->conn_lport; 5413 } else { 5414 sin6->sin6_port = connp->conn_lport; 5415 } 5416 udp->udp_state = TS_IDLE; 5417 udp_bind_hash_insert(udpf, udp); 5418 mutex_exit(&udpf->uf_lock); 5419 mutex_exit(&connp->conn_lock); 5420 5421 if (cl_inet_bind) { 5422 /* 5423 * Running in cluster mode - register bind information 5424 */ 5425 if (connp->conn_ipversion == IPV4_VERSION) { 5426 (*cl_inet_bind)(connp->conn_netstack->netstack_stackid, 5427 IPPROTO_UDP, AF_INET, (uint8_t *)&v4src, 5428 (in_port_t)connp->conn_lport, NULL); 5429 } else { 5430 (*cl_inet_bind)(connp->conn_netstack->netstack_stackid, 5431 IPPROTO_UDP, AF_INET6, (uint8_t *)&v6src, 5432 (in_port_t)connp->conn_lport, NULL); 5433 } 5434 } 5435 5436 mutex_enter(&connp->conn_lock); 5437 connp->conn_anon_port = (is_system_labeled() && requested_port == 0); 5438 if (is_system_labeled() && (!connp->conn_anon_port || 5439 connp->conn_anon_mlp)) { 5440 uint16_t mlpport; 5441 zone_t *zone; 5442 5443 zone = crgetzone(cr); 5444 connp->conn_mlp_type = 5445 connp->conn_recv_ancillary.crb_recvucred ? mlptBoth : 5446 mlptSingle; 5447 addrtype = tsol_mlp_addr_type( 5448 connp->conn_allzones ? ALL_ZONES : zone->zone_id, 5449 IPV6_VERSION, &v6src, us->us_netstack->netstack_ip); 5450 if (addrtype == mlptSingle) { 5451 error = -TNOADDR; 5452 mutex_exit(&connp->conn_lock); 5453 goto late_error; 5454 } 5455 mlpport = connp->conn_anon_port ? PMAPPORT : port; 5456 mlptype = tsol_mlp_port_type(zone, IPPROTO_UDP, mlpport, 5457 addrtype); 5458 5459 /* 5460 * It is a coding error to attempt to bind an MLP port 5461 * without first setting SOL_SOCKET/SCM_UCRED. 5462 */ 5463 if (mlptype != mlptSingle && 5464 connp->conn_mlp_type == mlptSingle) { 5465 error = EINVAL; 5466 mutex_exit(&connp->conn_lock); 5467 goto late_error; 5468 } 5469 5470 /* 5471 * It is an access violation to attempt to bind an MLP port 5472 * without NET_BINDMLP privilege. 5473 */ 5474 if (mlptype != mlptSingle && 5475 secpolicy_net_bindmlp(cr) != 0) { 5476 if (connp->conn_debug) { 5477 (void) strlog(UDP_MOD_ID, 0, 1, 5478 SL_ERROR|SL_TRACE, 5479 "udp_bind: no priv for multilevel port %d", 5480 mlpport); 5481 } 5482 error = -TACCES; 5483 mutex_exit(&connp->conn_lock); 5484 goto late_error; 5485 } 5486 5487 /* 5488 * If we're specifically binding a shared IP address and the 5489 * port is MLP on shared addresses, then check to see if this 5490 * zone actually owns the MLP. Reject if not. 5491 */ 5492 if (mlptype == mlptShared && addrtype == mlptShared) { 5493 /* 5494 * No need to handle exclusive-stack zones since 5495 * ALL_ZONES only applies to the shared stack. 5496 */ 5497 zoneid_t mlpzone; 5498 5499 mlpzone = tsol_mlp_findzone(IPPROTO_UDP, 5500 htons(mlpport)); 5501 if (connp->conn_zoneid != mlpzone) { 5502 if (connp->conn_debug) { 5503 (void) strlog(UDP_MOD_ID, 0, 1, 5504 SL_ERROR|SL_TRACE, 5505 "udp_bind: attempt to bind port " 5506 "%d on shared addr in zone %d " 5507 "(should be %d)", 5508 mlpport, connp->conn_zoneid, 5509 mlpzone); 5510 } 5511 error = -TACCES; 5512 mutex_exit(&connp->conn_lock); 5513 goto late_error; 5514 } 5515 } 5516 if (connp->conn_anon_port) { 5517 error = tsol_mlp_anon(zone, mlptype, connp->conn_proto, 5518 port, B_TRUE); 5519 if (error != 0) { 5520 if (connp->conn_debug) { 5521 (void) strlog(UDP_MOD_ID, 0, 1, 5522 SL_ERROR|SL_TRACE, 5523 "udp_bind: cannot establish anon " 5524 "MLP for port %d", port); 5525 } 5526 error = -TACCES; 5527 mutex_exit(&connp->conn_lock); 5528 goto late_error; 5529 } 5530 } 5531 connp->conn_mlp_type = mlptype; 5532 } 5533 5534 /* 5535 * We create an initial header template here to make a subsequent 5536 * sendto have a starting point. Since conn_last_dst is zero the 5537 * first sendto will always follow the 'dst changed' code path. 5538 * Note that we defer massaging options and the related checksum 5539 * adjustment until we have a destination address. 5540 */ 5541 error = udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5542 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5543 if (error != 0) { 5544 mutex_exit(&connp->conn_lock); 5545 goto late_error; 5546 } 5547 /* Just in case */ 5548 connp->conn_faddr_v6 = ipv6_all_zeros; 5549 connp->conn_fport = 0; 5550 connp->conn_v6lastdst = ipv6_all_zeros; 5551 mutex_exit(&connp->conn_lock); 5552 5553 error = ip_laddr_fanout_insert(connp); 5554 if (error != 0) 5555 goto late_error; 5556 5557 /* Bind succeeded */ 5558 return (0); 5559 5560 late_error: 5561 /* We had already picked the port number, and then the bind failed */ 5562 mutex_enter(&connp->conn_lock); 5563 udpf = &us->us_bind_fanout[ 5564 UDP_BIND_HASH(connp->conn_lport, 5565 us->us_bind_fanout_size)]; 5566 mutex_enter(&udpf->uf_lock); 5567 connp->conn_saddr_v6 = ipv6_all_zeros; 5568 connp->conn_bound_addr_v6 = ipv6_all_zeros; 5569 connp->conn_laddr_v6 = ipv6_all_zeros; 5570 if (scopeid != 0) { 5571 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 5572 connp->conn_incoming_ifindex = connp->conn_bound_if; 5573 } 5574 udp->udp_state = TS_UNBND; 5575 udp_bind_hash_remove(udp, B_TRUE); 5576 udp_reuselist_remove(connp); 5577 connp->conn_lport = 0; 5578 mutex_exit(&udpf->uf_lock); 5579 connp->conn_anon_port = B_FALSE; 5580 connp->conn_mlp_type = mlptSingle; 5581 5582 connp->conn_v6lastdst = ipv6_all_zeros; 5583 5584 /* Restore the header that was built above - different source address */ 5585 (void) udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5586 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5587 mutex_exit(&connp->conn_lock); 5588 return (error); 5589 } 5590 5591 int 5592 udp_bind(sock_lower_handle_t proto_handle, struct sockaddr *sa, 5593 socklen_t len, cred_t *cr) 5594 { 5595 int error; 5596 conn_t *connp; 5597 5598 /* All Solaris components should pass a cred for this operation. */ 5599 ASSERT(cr != NULL); 5600 5601 connp = (conn_t *)proto_handle; 5602 5603 if (sa == NULL) 5604 error = udp_do_unbind(connp); 5605 else 5606 error = udp_do_bind(connp, sa, len, cr, B_TRUE); 5607 5608 if (error < 0) { 5609 if (error == -TOUTSTATE) 5610 error = EINVAL; 5611 else 5612 error = proto_tlitosyserr(-error); 5613 } 5614 5615 return (error); 5616 } 5617 5618 static int 5619 udp_implicit_bind(conn_t *connp, cred_t *cr) 5620 { 5621 sin6_t sin6addr; 5622 sin_t *sin; 5623 sin6_t *sin6; 5624 socklen_t len; 5625 int error; 5626 5627 /* All Solaris components should pass a cred for this operation. */ 5628 ASSERT(cr != NULL); 5629 5630 if (connp->conn_family == AF_INET) { 5631 len = sizeof (struct sockaddr_in); 5632 sin = (sin_t *)&sin6addr; 5633 *sin = sin_null; 5634 sin->sin_family = AF_INET; 5635 sin->sin_addr.s_addr = INADDR_ANY; 5636 } else { 5637 ASSERT(connp->conn_family == AF_INET6); 5638 len = sizeof (sin6_t); 5639 sin6 = (sin6_t *)&sin6addr; 5640 *sin6 = sin6_null; 5641 sin6->sin6_family = AF_INET6; 5642 V6_SET_ZERO(sin6->sin6_addr); 5643 } 5644 5645 error = udp_do_bind(connp, (struct sockaddr *)&sin6addr, len, 5646 cr, B_FALSE); 5647 return ((error < 0) ? proto_tlitosyserr(-error) : error); 5648 } 5649 5650 /* 5651 * This routine removes a port number association from a stream. It 5652 * is called by udp_unbind and udp_tpi_unbind. 5653 */ 5654 static int 5655 udp_do_unbind(conn_t *connp) 5656 { 5657 udp_t *udp = connp->conn_udp; 5658 udp_fanout_t *udpf; 5659 udp_stack_t *us = udp->udp_us; 5660 5661 if (cl_inet_unbind != NULL) { 5662 /* 5663 * Running in cluster mode - register unbind information 5664 */ 5665 if (connp->conn_ipversion == IPV4_VERSION) { 5666 (*cl_inet_unbind)( 5667 connp->conn_netstack->netstack_stackid, 5668 IPPROTO_UDP, AF_INET, 5669 (uint8_t *)(&V4_PART_OF_V6(connp->conn_laddr_v6)), 5670 (in_port_t)connp->conn_lport, NULL); 5671 } else { 5672 (*cl_inet_unbind)( 5673 connp->conn_netstack->netstack_stackid, 5674 IPPROTO_UDP, AF_INET6, 5675 (uint8_t *)&(connp->conn_laddr_v6), 5676 (in_port_t)connp->conn_lport, NULL); 5677 } 5678 } 5679 5680 mutex_enter(&connp->conn_lock); 5681 /* If a bind has not been done, we can't unbind. */ 5682 if (udp->udp_state == TS_UNBND) { 5683 mutex_exit(&connp->conn_lock); 5684 return (-TOUTSTATE); 5685 } 5686 udpf = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 5687 us->us_bind_fanout_size)]; 5688 mutex_enter(&udpf->uf_lock); 5689 udp_bind_hash_remove(udp, B_TRUE); 5690 udp_reuselist_remove(connp); 5691 connp->conn_saddr_v6 = ipv6_all_zeros; 5692 connp->conn_bound_addr_v6 = ipv6_all_zeros; 5693 connp->conn_laddr_v6 = ipv6_all_zeros; 5694 connp->conn_mcbc_bind = B_FALSE; 5695 connp->conn_lport = 0; 5696 /* In case we were also connected */ 5697 connp->conn_faddr_v6 = ipv6_all_zeros; 5698 connp->conn_fport = 0; 5699 mutex_exit(&udpf->uf_lock); 5700 5701 connp->conn_v6lastdst = ipv6_all_zeros; 5702 udp->udp_state = TS_UNBND; 5703 5704 (void) udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5705 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5706 mutex_exit(&connp->conn_lock); 5707 5708 ip_unbind(connp); 5709 5710 return (0); 5711 } 5712 5713 /* 5714 * It associates a default destination address with the stream. 5715 */ 5716 static int 5717 udp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len, 5718 cred_t *cr, pid_t pid) 5719 { 5720 sin6_t *sin6; 5721 sin_t *sin; 5722 in6_addr_t v6dst; 5723 ipaddr_t v4dst; 5724 uint16_t dstport; 5725 uint32_t flowinfo; 5726 udp_fanout_t *udpf; 5727 udp_t *udp, *udp1; 5728 ushort_t ipversion; 5729 udp_stack_t *us; 5730 int error; 5731 conn_t *connp1; 5732 ip_xmit_attr_t *ixa; 5733 ip_xmit_attr_t *oldixa; 5734 uint_t scopeid = 0; 5735 uint_t srcid = 0; 5736 in6_addr_t v6src = connp->conn_saddr_v6; 5737 boolean_t v4mapped; 5738 5739 udp = connp->conn_udp; 5740 us = udp->udp_us; 5741 5742 /* 5743 * Address has been verified by the caller 5744 */ 5745 switch (len) { 5746 default: 5747 /* 5748 * Should never happen 5749 */ 5750 return (EINVAL); 5751 5752 case sizeof (sin_t): 5753 sin = (sin_t *)sa; 5754 v4dst = sin->sin_addr.s_addr; 5755 dstport = sin->sin_port; 5756 IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst); 5757 ASSERT(connp->conn_ipversion == IPV4_VERSION); 5758 ipversion = IPV4_VERSION; 5759 break; 5760 5761 case sizeof (sin6_t): 5762 sin6 = (sin6_t *)sa; 5763 v6dst = sin6->sin6_addr; 5764 dstport = sin6->sin6_port; 5765 srcid = sin6->__sin6_src_id; 5766 v4mapped = IN6_IS_ADDR_V4MAPPED(&v6dst); 5767 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 5768 if (!ip_srcid_find_id(srcid, &v6src, IPCL_ZONEID(connp), 5769 v4mapped, connp->conn_netstack)) { 5770 /* Mismatch v4mapped/v6 specified by srcid. */ 5771 return (EADDRNOTAVAIL); 5772 } 5773 } 5774 if (v4mapped) { 5775 if (connp->conn_ipv6_v6only) 5776 return (EADDRNOTAVAIL); 5777 5778 /* 5779 * Destination adress is mapped IPv6 address. 5780 * Source bound address should be unspecified or 5781 * IPv6 mapped address as well. 5782 */ 5783 if (!IN6_IS_ADDR_UNSPECIFIED( 5784 &connp->conn_bound_addr_v6) && 5785 !IN6_IS_ADDR_V4MAPPED(&connp->conn_bound_addr_v6)) { 5786 return (EADDRNOTAVAIL); 5787 } 5788 IN6_V4MAPPED_TO_IPADDR(&v6dst, v4dst); 5789 ipversion = IPV4_VERSION; 5790 flowinfo = 0; 5791 } else { 5792 ipversion = IPV6_VERSION; 5793 flowinfo = sin6->sin6_flowinfo; 5794 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) 5795 scopeid = sin6->sin6_scope_id; 5796 } 5797 break; 5798 } 5799 5800 if (dstport == 0) 5801 return (-TBADADDR); 5802 5803 /* 5804 * If there is a different thread using conn_ixa then we get a new 5805 * copy and cut the old one loose from conn_ixa. Otherwise we use 5806 * conn_ixa and prevent any other thread from using/changing it. 5807 * Once connect() is done other threads can use conn_ixa since the 5808 * refcnt will be back at one. 5809 * We defer updating conn_ixa until later to handle any concurrent 5810 * conn_ixa_cleanup thread. 5811 */ 5812 ixa = conn_get_ixa(connp, B_FALSE); 5813 if (ixa == NULL) 5814 return (ENOMEM); 5815 5816 mutex_enter(&connp->conn_lock); 5817 /* 5818 * This udp_t must have bound to a port already before doing a connect. 5819 * Reject if a connect is in progress (we drop conn_lock during 5820 * udp_do_connect). 5821 */ 5822 if (udp->udp_state == TS_UNBND || udp->udp_state == TS_WCON_CREQ) { 5823 mutex_exit(&connp->conn_lock); 5824 (void) strlog(UDP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 5825 "udp_connect: bad state, %u", udp->udp_state); 5826 ixa_refrele(ixa); 5827 return (-TOUTSTATE); 5828 } 5829 ASSERT(connp->conn_lport != 0 && udp->udp_ptpbhn != NULL); 5830 5831 udpf = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 5832 us->us_bind_fanout_size)]; 5833 5834 mutex_enter(&udpf->uf_lock); 5835 if (udp->udp_state == TS_DATA_XFER) { 5836 /* Already connected - clear out state */ 5837 if (connp->conn_mcbc_bind) 5838 connp->conn_saddr_v6 = ipv6_all_zeros; 5839 else 5840 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 5841 connp->conn_laddr_v6 = connp->conn_bound_addr_v6; 5842 connp->conn_faddr_v6 = ipv6_all_zeros; 5843 connp->conn_fport = 0; 5844 udp->udp_state = TS_IDLE; 5845 } 5846 5847 connp->conn_fport = dstport; 5848 connp->conn_ipversion = ipversion; 5849 if (ipversion == IPV4_VERSION) { 5850 /* 5851 * Interpret a zero destination to mean loopback. 5852 * Update the T_CONN_REQ (sin/sin6) since it is used to 5853 * generate the T_CONN_CON. 5854 */ 5855 if (v4dst == INADDR_ANY) { 5856 v4dst = htonl(INADDR_LOOPBACK); 5857 IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst); 5858 if (connp->conn_family == AF_INET) { 5859 sin->sin_addr.s_addr = v4dst; 5860 } else { 5861 sin6->sin6_addr = v6dst; 5862 } 5863 } 5864 connp->conn_faddr_v6 = v6dst; 5865 connp->conn_flowinfo = 0; 5866 } else { 5867 ASSERT(connp->conn_ipversion == IPV6_VERSION); 5868 /* 5869 * Interpret a zero destination to mean loopback. 5870 * Update the T_CONN_REQ (sin/sin6) since it is used to 5871 * generate the T_CONN_CON. 5872 */ 5873 if (IN6_IS_ADDR_UNSPECIFIED(&v6dst)) { 5874 v6dst = ipv6_loopback; 5875 sin6->sin6_addr = v6dst; 5876 } 5877 connp->conn_faddr_v6 = v6dst; 5878 connp->conn_flowinfo = flowinfo; 5879 } 5880 mutex_exit(&udpf->uf_lock); 5881 5882 /* 5883 * We update our cred/cpid based on the caller of connect 5884 */ 5885 if (connp->conn_cred != cr) { 5886 crhold(cr); 5887 crfree(connp->conn_cred); 5888 connp->conn_cred = cr; 5889 } 5890 connp->conn_cpid = pid; 5891 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 5892 ixa->ixa_cred = cr; 5893 ixa->ixa_cpid = pid; 5894 if (is_system_labeled()) { 5895 /* We need to restart with a label based on the cred */ 5896 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); 5897 } 5898 5899 if (scopeid != 0) { 5900 ixa->ixa_flags |= IXAF_SCOPEID_SET; 5901 ixa->ixa_scopeid = scopeid; 5902 connp->conn_incoming_ifindex = scopeid; 5903 } else { 5904 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 5905 connp->conn_incoming_ifindex = connp->conn_bound_if; 5906 } 5907 /* 5908 * conn_connect will drop conn_lock and reacquire it. 5909 * To prevent a send* from messing with this udp_t while the lock 5910 * is dropped we set udp_state and clear conn_v6lastdst. 5911 * That will make all send* fail with EISCONN. 5912 */ 5913 connp->conn_v6lastdst = ipv6_all_zeros; 5914 udp->udp_state = TS_WCON_CREQ; 5915 5916 error = conn_connect(connp, NULL, IPDF_ALLOW_MCBC); 5917 mutex_exit(&connp->conn_lock); 5918 if (error != 0) 5919 goto connect_failed; 5920 5921 /* 5922 * The addresses have been verified. Time to insert in 5923 * the correct fanout list. 5924 */ 5925 error = ipcl_conn_insert(connp); 5926 if (error != 0) 5927 goto connect_failed; 5928 5929 mutex_enter(&connp->conn_lock); 5930 error = udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5931 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5932 if (error != 0) { 5933 mutex_exit(&connp->conn_lock); 5934 goto connect_failed; 5935 } 5936 5937 udp->udp_state = TS_DATA_XFER; 5938 /* Record this as the "last" send even though we haven't sent any */ 5939 connp->conn_v6lastdst = connp->conn_faddr_v6; 5940 connp->conn_lastipversion = connp->conn_ipversion; 5941 connp->conn_lastdstport = connp->conn_fport; 5942 connp->conn_lastflowinfo = connp->conn_flowinfo; 5943 connp->conn_lastscopeid = scopeid; 5944 connp->conn_lastsrcid = srcid; 5945 /* Also remember a source to use together with lastdst */ 5946 connp->conn_v6lastsrc = v6src; 5947 5948 oldixa = conn_replace_ixa(connp, ixa); 5949 mutex_exit(&connp->conn_lock); 5950 ixa_refrele(oldixa); 5951 5952 /* 5953 * We've picked a source address above. Now we can 5954 * verify that the src/port/dst/port is unique for all 5955 * connections in TS_DATA_XFER, skipping ourselves. 5956 */ 5957 mutex_enter(&udpf->uf_lock); 5958 for (udp1 = udpf->uf_udp; udp1 != NULL; udp1 = udp1->udp_bind_hash) { 5959 if (udp1->udp_state != TS_DATA_XFER) 5960 continue; 5961 5962 if (udp1 == udp) 5963 continue; 5964 5965 connp1 = udp1->udp_connp; 5966 if (connp->conn_lport != connp1->conn_lport || 5967 connp->conn_ipversion != connp1->conn_ipversion || 5968 dstport != connp1->conn_fport || 5969 !IN6_ARE_ADDR_EQUAL(&connp->conn_laddr_v6, 5970 &connp1->conn_laddr_v6) || 5971 !IN6_ARE_ADDR_EQUAL(&v6dst, &connp1->conn_faddr_v6) || 5972 !(IPCL_ZONE_MATCH(connp, connp1->conn_zoneid) || 5973 IPCL_ZONE_MATCH(connp1, connp->conn_zoneid))) 5974 continue; 5975 mutex_exit(&udpf->uf_lock); 5976 error = -TBADADDR; 5977 goto connect_failed; 5978 } 5979 if (cl_inet_connect2 != NULL) { 5980 CL_INET_UDP_CONNECT(connp, B_TRUE, &v6dst, dstport, error); 5981 if (error != 0) { 5982 mutex_exit(&udpf->uf_lock); 5983 error = -TBADADDR; 5984 goto connect_failed; 5985 } 5986 } 5987 mutex_exit(&udpf->uf_lock); 5988 5989 ixa_refrele(ixa); 5990 return (0); 5991 5992 connect_failed: 5993 if (ixa != NULL) 5994 ixa_refrele(ixa); 5995 mutex_enter(&connp->conn_lock); 5996 mutex_enter(&udpf->uf_lock); 5997 udp->udp_state = TS_IDLE; 5998 connp->conn_faddr_v6 = ipv6_all_zeros; 5999 connp->conn_fport = 0; 6000 /* In case the source address was set above */ 6001 if (connp->conn_mcbc_bind) 6002 connp->conn_saddr_v6 = ipv6_all_zeros; 6003 else 6004 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 6005 connp->conn_laddr_v6 = connp->conn_bound_addr_v6; 6006 mutex_exit(&udpf->uf_lock); 6007 6008 connp->conn_v6lastdst = ipv6_all_zeros; 6009 connp->conn_flowinfo = 0; 6010 6011 (void) udp_build_hdr_template(connp, &connp->conn_saddr_v6, 6012 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 6013 mutex_exit(&connp->conn_lock); 6014 return (error); 6015 } 6016 6017 static int 6018 udp_connect(sock_lower_handle_t proto_handle, const struct sockaddr *sa, 6019 socklen_t len, sock_connid_t *id, cred_t *cr) 6020 { 6021 conn_t *connp = (conn_t *)proto_handle; 6022 udp_t *udp = connp->conn_udp; 6023 int error; 6024 boolean_t did_bind = B_FALSE; 6025 pid_t pid = curproc->p_pid; 6026 6027 /* All Solaris components should pass a cred for this operation. */ 6028 ASSERT(cr != NULL); 6029 6030 if (sa == NULL) { 6031 /* 6032 * Disconnect 6033 * Make sure we are connected 6034 */ 6035 if (udp->udp_state != TS_DATA_XFER) 6036 return (EINVAL); 6037 6038 error = udp_disconnect(connp); 6039 return (error); 6040 } 6041 6042 error = proto_verify_ip_addr(connp->conn_family, sa, len); 6043 if (error != 0) 6044 goto done; 6045 6046 /* do an implicit bind if necessary */ 6047 if (udp->udp_state == TS_UNBND) { 6048 error = udp_implicit_bind(connp, cr); 6049 /* 6050 * We could be racing with an actual bind, in which case 6051 * we would see EPROTO. We cross our fingers and try 6052 * to connect. 6053 */ 6054 if (!(error == 0 || error == EPROTO)) 6055 goto done; 6056 did_bind = B_TRUE; 6057 } 6058 /* 6059 * set SO_DGRAM_ERRIND 6060 */ 6061 connp->conn_dgram_errind = B_TRUE; 6062 6063 error = udp_do_connect(connp, sa, len, cr, pid); 6064 6065 if (error != 0 && did_bind) { 6066 int unbind_err; 6067 6068 unbind_err = udp_do_unbind(connp); 6069 ASSERT(unbind_err == 0); 6070 } 6071 6072 if (error == 0) { 6073 *id = 0; 6074 (*connp->conn_upcalls->su_connected) 6075 (connp->conn_upper_handle, 0, NULL, -1); 6076 } else if (error < 0) { 6077 error = proto_tlitosyserr(-error); 6078 } 6079 6080 done: 6081 if (error != 0 && udp->udp_state == TS_DATA_XFER) { 6082 /* 6083 * No need to hold locks to set state 6084 * after connect failure socket state is undefined 6085 * We set the state only to imitate old sockfs behavior 6086 */ 6087 udp->udp_state = TS_IDLE; 6088 } 6089 return (error); 6090 } 6091 6092 int 6093 udp_send(sock_lower_handle_t proto_handle, mblk_t *mp, struct nmsghdr *msg, 6094 cred_t *cr) 6095 { 6096 sin6_t *sin6; 6097 sin_t *sin = NULL; 6098 uint_t srcid; 6099 conn_t *connp = (conn_t *)proto_handle; 6100 udp_t *udp = connp->conn_udp; 6101 int error = 0; 6102 udp_stack_t *us = udp->udp_us; 6103 ushort_t ipversion; 6104 pid_t pid = curproc->p_pid; 6105 ip_xmit_attr_t *ixa; 6106 6107 ASSERT(DB_TYPE(mp) == M_DATA); 6108 6109 /* All Solaris components should pass a cred for this operation. */ 6110 ASSERT(cr != NULL); 6111 6112 /* do an implicit bind if necessary */ 6113 if (udp->udp_state == TS_UNBND) { 6114 error = udp_implicit_bind(connp, cr); 6115 /* 6116 * We could be racing with an actual bind, in which case 6117 * we would see EPROTO. We cross our fingers and try 6118 * to connect. 6119 */ 6120 if (!(error == 0 || error == EPROTO)) { 6121 freemsg(mp); 6122 return (error); 6123 } 6124 } 6125 6126 /* Connected? */ 6127 if (msg->msg_name == NULL) { 6128 if (udp->udp_state != TS_DATA_XFER) { 6129 UDPS_BUMP_MIB(us, udpOutErrors); 6130 return (EDESTADDRREQ); 6131 } 6132 if (msg->msg_controllen != 0) { 6133 error = udp_output_ancillary(connp, NULL, NULL, mp, 6134 NULL, msg, cr, pid); 6135 } else { 6136 error = udp_output_connected(connp, mp, cr, pid); 6137 } 6138 if (us->us_sendto_ignerr) 6139 return (0); 6140 else 6141 return (error); 6142 } 6143 if (udp->udp_state == TS_DATA_XFER) { 6144 UDPS_BUMP_MIB(us, udpOutErrors); 6145 return (EISCONN); 6146 } 6147 error = proto_verify_ip_addr(connp->conn_family, 6148 (struct sockaddr *)msg->msg_name, msg->msg_namelen); 6149 if (error != 0) { 6150 UDPS_BUMP_MIB(us, udpOutErrors); 6151 return (error); 6152 } 6153 switch (connp->conn_family) { 6154 case AF_INET6: 6155 sin6 = (sin6_t *)msg->msg_name; 6156 6157 srcid = sin6->__sin6_src_id; 6158 6159 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6160 /* 6161 * Destination is a non-IPv4-compatible IPv6 address. 6162 * Send out an IPv6 format packet. 6163 */ 6164 6165 /* 6166 * If the local address is a mapped address return 6167 * an error. 6168 * It would be possible to send an IPv6 packet but the 6169 * response would never make it back to the application 6170 * since it is bound to a mapped address. 6171 */ 6172 if (IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6)) { 6173 UDPS_BUMP_MIB(us, udpOutErrors); 6174 return (EADDRNOTAVAIL); 6175 } 6176 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 6177 sin6->sin6_addr = ipv6_loopback; 6178 ipversion = IPV6_VERSION; 6179 } else { 6180 if (connp->conn_ipv6_v6only) { 6181 UDPS_BUMP_MIB(us, udpOutErrors); 6182 return (EADDRNOTAVAIL); 6183 } 6184 6185 /* 6186 * If the local address is not zero or a mapped address 6187 * return an error. It would be possible to send an 6188 * IPv4 packet but the response would never make it 6189 * back to the application since it is bound to a 6190 * non-mapped address. 6191 */ 6192 if (!IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6) && 6193 !IN6_IS_ADDR_UNSPECIFIED(&connp->conn_saddr_v6)) { 6194 UDPS_BUMP_MIB(us, udpOutErrors); 6195 return (EADDRNOTAVAIL); 6196 } 6197 6198 if (V4_PART_OF_V6(sin6->sin6_addr) == INADDR_ANY) { 6199 V4_PART_OF_V6(sin6->sin6_addr) = 6200 htonl(INADDR_LOOPBACK); 6201 } 6202 ipversion = IPV4_VERSION; 6203 } 6204 6205 /* 6206 * We have to allocate an ip_xmit_attr_t before we grab 6207 * conn_lock and we need to hold conn_lock once we've check 6208 * conn_same_as_last_v6 to handle concurrent send* calls on a 6209 * socket. 6210 */ 6211 if (msg->msg_controllen == 0) { 6212 ixa = conn_get_ixa(connp, B_FALSE); 6213 if (ixa == NULL) { 6214 UDPS_BUMP_MIB(us, udpOutErrors); 6215 return (ENOMEM); 6216 } 6217 } else { 6218 ixa = NULL; 6219 } 6220 mutex_enter(&connp->conn_lock); 6221 if (udp->udp_delayed_error != 0) { 6222 sin6_t *sin2 = (sin6_t *)&udp->udp_delayed_addr; 6223 6224 error = udp->udp_delayed_error; 6225 udp->udp_delayed_error = 0; 6226 6227 /* Compare IP address, port, and family */ 6228 6229 if (sin6->sin6_port == sin2->sin6_port && 6230 IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 6231 &sin2->sin6_addr) && 6232 sin6->sin6_family == sin2->sin6_family) { 6233 mutex_exit(&connp->conn_lock); 6234 UDPS_BUMP_MIB(us, udpOutErrors); 6235 if (ixa != NULL) 6236 ixa_refrele(ixa); 6237 return (error); 6238 } 6239 } 6240 6241 if (msg->msg_controllen != 0) { 6242 mutex_exit(&connp->conn_lock); 6243 ASSERT(ixa == NULL); 6244 error = udp_output_ancillary(connp, NULL, sin6, mp, 6245 NULL, msg, cr, pid); 6246 } else if (conn_same_as_last_v6(connp, sin6) && 6247 connp->conn_lastsrcid == srcid && 6248 ipsec_outbound_policy_current(ixa)) { 6249 /* udp_output_lastdst drops conn_lock */ 6250 error = udp_output_lastdst(connp, mp, cr, pid, ixa); 6251 } else { 6252 /* udp_output_newdst drops conn_lock */ 6253 error = udp_output_newdst(connp, mp, NULL, sin6, 6254 ipversion, cr, pid, ixa); 6255 } 6256 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 6257 if (us->us_sendto_ignerr) 6258 return (0); 6259 else 6260 return (error); 6261 case AF_INET: 6262 sin = (sin_t *)msg->msg_name; 6263 6264 ipversion = IPV4_VERSION; 6265 6266 if (sin->sin_addr.s_addr == INADDR_ANY) 6267 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 6268 6269 /* 6270 * We have to allocate an ip_xmit_attr_t before we grab 6271 * conn_lock and we need to hold conn_lock once we've check 6272 * conn_same_as_last_v6 to handle concurrent send* on a socket. 6273 */ 6274 if (msg->msg_controllen == 0) { 6275 ixa = conn_get_ixa(connp, B_FALSE); 6276 if (ixa == NULL) { 6277 UDPS_BUMP_MIB(us, udpOutErrors); 6278 return (ENOMEM); 6279 } 6280 } else { 6281 ixa = NULL; 6282 } 6283 mutex_enter(&connp->conn_lock); 6284 if (udp->udp_delayed_error != 0) { 6285 sin_t *sin2 = (sin_t *)&udp->udp_delayed_addr; 6286 6287 error = udp->udp_delayed_error; 6288 udp->udp_delayed_error = 0; 6289 6290 /* Compare IP address and port */ 6291 6292 if (sin->sin_port == sin2->sin_port && 6293 sin->sin_addr.s_addr == sin2->sin_addr.s_addr) { 6294 mutex_exit(&connp->conn_lock); 6295 UDPS_BUMP_MIB(us, udpOutErrors); 6296 if (ixa != NULL) 6297 ixa_refrele(ixa); 6298 return (error); 6299 } 6300 } 6301 if (msg->msg_controllen != 0) { 6302 mutex_exit(&connp->conn_lock); 6303 ASSERT(ixa == NULL); 6304 error = udp_output_ancillary(connp, sin, NULL, mp, 6305 NULL, msg, cr, pid); 6306 } else if (conn_same_as_last_v4(connp, sin) && 6307 ipsec_outbound_policy_current(ixa)) { 6308 /* udp_output_lastdst drops conn_lock */ 6309 error = udp_output_lastdst(connp, mp, cr, pid, ixa); 6310 } else { 6311 /* udp_output_newdst drops conn_lock */ 6312 error = udp_output_newdst(connp, mp, sin, NULL, 6313 ipversion, cr, pid, ixa); 6314 } 6315 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 6316 if (us->us_sendto_ignerr) 6317 return (0); 6318 else 6319 return (error); 6320 default: 6321 return (EINVAL); 6322 } 6323 } 6324 6325 int 6326 udp_fallback(sock_lower_handle_t proto_handle, queue_t *q, 6327 boolean_t issocket, so_proto_quiesced_cb_t quiesced_cb, 6328 sock_quiesce_arg_t *arg) 6329 { 6330 conn_t *connp = (conn_t *)proto_handle; 6331 udp_t *udp; 6332 struct T_capability_ack tca; 6333 struct sockaddr_in6 laddr, faddr; 6334 socklen_t laddrlen, faddrlen; 6335 short opts; 6336 struct stroptions *stropt; 6337 mblk_t *mp, *stropt_mp; 6338 int error; 6339 6340 udp = connp->conn_udp; 6341 6342 stropt_mp = allocb_wait(sizeof (*stropt), BPRI_HI, STR_NOSIG, NULL); 6343 6344 /* 6345 * setup the fallback stream that was allocated 6346 */ 6347 connp->conn_dev = (dev_t)RD(q)->q_ptr; 6348 connp->conn_minor_arena = WR(q)->q_ptr; 6349 6350 RD(q)->q_ptr = WR(q)->q_ptr = connp; 6351 6352 WR(q)->q_qinfo = &udp_winit; 6353 6354 connp->conn_rq = RD(q); 6355 connp->conn_wq = WR(q); 6356 6357 /* Notify stream head about options before sending up data */ 6358 stropt_mp->b_datap->db_type = M_SETOPTS; 6359 stropt_mp->b_wptr += sizeof (*stropt); 6360 stropt = (struct stroptions *)stropt_mp->b_rptr; 6361 stropt->so_flags = SO_WROFF | SO_HIWAT; 6362 stropt->so_wroff = connp->conn_wroff; 6363 stropt->so_hiwat = udp->udp_rcv_disply_hiwat; 6364 putnext(RD(q), stropt_mp); 6365 6366 /* 6367 * Free the helper stream 6368 */ 6369 ip_free_helper_stream(connp); 6370 6371 if (!issocket) 6372 udp_use_pure_tpi(udp); 6373 6374 /* 6375 * Collect the information needed to sync with the sonode 6376 */ 6377 udp_do_capability_ack(udp, &tca, TC1_INFO); 6378 6379 laddrlen = faddrlen = sizeof (sin6_t); 6380 (void) udp_getsockname((sock_lower_handle_t)connp, 6381 (struct sockaddr *)&laddr, &laddrlen, CRED()); 6382 error = udp_getpeername((sock_lower_handle_t)connp, 6383 (struct sockaddr *)&faddr, &faddrlen, CRED()); 6384 if (error != 0) 6385 faddrlen = 0; 6386 6387 opts = 0; 6388 if (connp->conn_dgram_errind) 6389 opts |= SO_DGRAM_ERRIND; 6390 if (connp->conn_ixa->ixa_flags & IXAF_DONTROUTE) 6391 opts |= SO_DONTROUTE; 6392 6393 mp = (*quiesced_cb)(connp->conn_upper_handle, arg, &tca, 6394 (struct sockaddr *)&laddr, laddrlen, 6395 (struct sockaddr *)&faddr, faddrlen, opts); 6396 6397 mutex_enter(&udp->udp_recv_lock); 6398 /* 6399 * Attempts to send data up during fallback will result in it being 6400 * queued in udp_t. First push up the datagrams obtained from the 6401 * socket, then any packets queued in udp_t. 6402 */ 6403 if (mp != NULL) { 6404 mp->b_next = udp->udp_fallback_queue_head; 6405 udp->udp_fallback_queue_head = mp; 6406 } 6407 while (udp->udp_fallback_queue_head != NULL) { 6408 mp = udp->udp_fallback_queue_head; 6409 udp->udp_fallback_queue_head = mp->b_next; 6410 mutex_exit(&udp->udp_recv_lock); 6411 mp->b_next = NULL; 6412 putnext(RD(q), mp); 6413 mutex_enter(&udp->udp_recv_lock); 6414 } 6415 udp->udp_fallback_queue_tail = udp->udp_fallback_queue_head; 6416 /* 6417 * No longer a streams less socket 6418 */ 6419 mutex_enter(&connp->conn_lock); 6420 connp->conn_flags &= ~IPCL_NONSTR; 6421 mutex_exit(&connp->conn_lock); 6422 6423 mutex_exit(&udp->udp_recv_lock); 6424 6425 ASSERT(connp->conn_ref >= 1); 6426 6427 return (0); 6428 } 6429 6430 /* ARGSUSED3 */ 6431 int 6432 udp_getpeername(sock_lower_handle_t proto_handle, struct sockaddr *sa, 6433 socklen_t *salenp, cred_t *cr) 6434 { 6435 conn_t *connp = (conn_t *)proto_handle; 6436 udp_t *udp = connp->conn_udp; 6437 int error; 6438 6439 /* All Solaris components should pass a cred for this operation. */ 6440 ASSERT(cr != NULL); 6441 6442 mutex_enter(&connp->conn_lock); 6443 if (udp->udp_state != TS_DATA_XFER) 6444 error = ENOTCONN; 6445 else 6446 error = conn_getpeername(connp, sa, salenp); 6447 mutex_exit(&connp->conn_lock); 6448 return (error); 6449 } 6450 6451 /* ARGSUSED3 */ 6452 int 6453 udp_getsockname(sock_lower_handle_t proto_handle, struct sockaddr *sa, 6454 socklen_t *salenp, cred_t *cr) 6455 { 6456 conn_t *connp = (conn_t *)proto_handle; 6457 int error; 6458 6459 /* All Solaris components should pass a cred for this operation. */ 6460 ASSERT(cr != NULL); 6461 6462 mutex_enter(&connp->conn_lock); 6463 error = conn_getsockname(connp, sa, salenp); 6464 mutex_exit(&connp->conn_lock); 6465 return (error); 6466 } 6467 6468 int 6469 udp_getsockopt(sock_lower_handle_t proto_handle, int level, int option_name, 6470 void *optvalp, socklen_t *optlen, cred_t *cr) 6471 { 6472 conn_t *connp = (conn_t *)proto_handle; 6473 int error; 6474 t_uscalar_t max_optbuf_len; 6475 void *optvalp_buf; 6476 int len; 6477 6478 /* All Solaris components should pass a cred for this operation. */ 6479 ASSERT(cr != NULL); 6480 6481 error = proto_opt_check(level, option_name, *optlen, &max_optbuf_len, 6482 udp_opt_obj.odb_opt_des_arr, 6483 udp_opt_obj.odb_opt_arr_cnt, 6484 B_FALSE, B_TRUE, cr); 6485 if (error != 0) { 6486 if (error < 0) 6487 error = proto_tlitosyserr(-error); 6488 return (error); 6489 } 6490 6491 optvalp_buf = kmem_alloc(max_optbuf_len, KM_SLEEP); 6492 len = udp_opt_get(connp, level, option_name, optvalp_buf); 6493 if (len == -1) { 6494 kmem_free(optvalp_buf, max_optbuf_len); 6495 return (EINVAL); 6496 } 6497 6498 /* 6499 * update optlen and copy option value 6500 */ 6501 t_uscalar_t size = MIN(len, *optlen); 6502 6503 bcopy(optvalp_buf, optvalp, size); 6504 bcopy(&size, optlen, sizeof (size)); 6505 6506 kmem_free(optvalp_buf, max_optbuf_len); 6507 return (0); 6508 } 6509 6510 int 6511 udp_setsockopt(sock_lower_handle_t proto_handle, int level, int option_name, 6512 const void *optvalp, socklen_t optlen, cred_t *cr) 6513 { 6514 conn_t *connp = (conn_t *)proto_handle; 6515 int error; 6516 6517 /* All Solaris components should pass a cred for this operation. */ 6518 ASSERT(cr != NULL); 6519 6520 error = proto_opt_check(level, option_name, optlen, NULL, 6521 udp_opt_obj.odb_opt_des_arr, 6522 udp_opt_obj.odb_opt_arr_cnt, 6523 B_TRUE, B_FALSE, cr); 6524 6525 if (error != 0) { 6526 if (error < 0) 6527 error = proto_tlitosyserr(-error); 6528 return (error); 6529 } 6530 6531 error = udp_opt_set(connp, SETFN_OPTCOM_NEGOTIATE, level, option_name, 6532 optlen, (uchar_t *)optvalp, (uint_t *)&optlen, (uchar_t *)optvalp, 6533 NULL, cr); 6534 6535 ASSERT(error >= 0); 6536 6537 return (error); 6538 } 6539 6540 void 6541 udp_clr_flowctrl(sock_lower_handle_t proto_handle) 6542 { 6543 conn_t *connp = (conn_t *)proto_handle; 6544 udp_t *udp = connp->conn_udp; 6545 6546 mutex_enter(&udp->udp_recv_lock); 6547 connp->conn_flow_cntrld = B_FALSE; 6548 mutex_exit(&udp->udp_recv_lock); 6549 } 6550 6551 /* ARGSUSED2 */ 6552 int 6553 udp_shutdown(sock_lower_handle_t proto_handle, int how, cred_t *cr) 6554 { 6555 conn_t *connp = (conn_t *)proto_handle; 6556 6557 /* All Solaris components should pass a cred for this operation. */ 6558 ASSERT(cr != NULL); 6559 6560 /* shut down the send side */ 6561 if (how != SHUT_RD) 6562 (*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle, 6563 SOCK_OPCTL_SHUT_SEND, 0); 6564 /* shut down the recv side */ 6565 if (how != SHUT_WR) 6566 (*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle, 6567 SOCK_OPCTL_SHUT_RECV, 0); 6568 return (0); 6569 } 6570 6571 int 6572 udp_ioctl(sock_lower_handle_t proto_handle, int cmd, intptr_t arg, 6573 int mode, int32_t *rvalp, cred_t *cr) 6574 { 6575 conn_t *connp = (conn_t *)proto_handle; 6576 int error; 6577 6578 /* All Solaris components should pass a cred for this operation. */ 6579 ASSERT(cr != NULL); 6580 6581 /* 6582 * If we don't have a helper stream then create one. 6583 * ip_create_helper_stream takes care of locking the conn_t, 6584 * so this check for NULL is just a performance optimization. 6585 */ 6586 if (connp->conn_helper_info == NULL) { 6587 udp_stack_t *us = connp->conn_udp->udp_us; 6588 6589 ASSERT(us->us_ldi_ident != NULL); 6590 6591 /* 6592 * Create a helper stream for non-STREAMS socket. 6593 */ 6594 error = ip_create_helper_stream(connp, us->us_ldi_ident); 6595 if (error != 0) { 6596 ip0dbg(("tcp_ioctl: create of IP helper stream " 6597 "failed %d\n", error)); 6598 return (error); 6599 } 6600 } 6601 6602 switch (cmd) { 6603 case _SIOCSOCKFALLBACK: 6604 case TI_GETPEERNAME: 6605 case TI_GETMYNAME: 6606 ip1dbg(("udp_ioctl: cmd 0x%x on non streams socket", 6607 cmd)); 6608 error = EINVAL; 6609 break; 6610 default: 6611 /* 6612 * Pass on to IP using helper stream 6613 */ 6614 error = ldi_ioctl(connp->conn_helper_info->iphs_handle, 6615 cmd, arg, mode, cr, rvalp); 6616 break; 6617 } 6618 return (error); 6619 } 6620 6621 /* ARGSUSED */ 6622 int 6623 udp_accept(sock_lower_handle_t lproto_handle, 6624 sock_lower_handle_t eproto_handle, sock_upper_handle_t sock_handle, 6625 cred_t *cr) 6626 { 6627 return (EOPNOTSUPP); 6628 } 6629 6630 /* ARGSUSED */ 6631 int 6632 udp_listen(sock_lower_handle_t proto_handle, int backlog, cred_t *cr) 6633 { 6634 return (EOPNOTSUPP); 6635 } 6636 6637 sock_downcalls_t sock_udp_downcalls = { 6638 udp_activate, /* sd_activate */ 6639 udp_accept, /* sd_accept */ 6640 udp_bind, /* sd_bind */ 6641 udp_listen, /* sd_listen */ 6642 udp_connect, /* sd_connect */ 6643 udp_getpeername, /* sd_getpeername */ 6644 udp_getsockname, /* sd_getsockname */ 6645 udp_getsockopt, /* sd_getsockopt */ 6646 udp_setsockopt, /* sd_setsockopt */ 6647 udp_send, /* sd_send */ 6648 NULL, /* sd_send_uio */ 6649 NULL, /* sd_recv_uio */ 6650 NULL, /* sd_poll */ 6651 udp_shutdown, /* sd_shutdown */ 6652 udp_clr_flowctrl, /* sd_setflowctrl */ 6653 udp_ioctl, /* sd_ioctl */ 6654 udp_close /* sd_close */ 6655 }; 6656