1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 24 * Copyright 2014, OmniTI Computer Consulting, Inc. All rights reserved. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #include <sys/sysmacros.h> 29 #include <sys/types.h> 30 #include <sys/stream.h> 31 #include <sys/stropts.h> 32 #include <sys/strlog.h> 33 #include <sys/strsun.h> 34 #define _SUN_TPI_VERSION 2 35 #include <sys/tihdr.h> 36 #include <sys/timod.h> 37 #include <sys/ddi.h> 38 #include <sys/sunddi.h> 39 #include <sys/strsubr.h> 40 #include <sys/suntpi.h> 41 #include <sys/xti_inet.h> 42 #include <sys/kmem.h> 43 #include <sys/cred_impl.h> 44 #include <sys/policy.h> 45 #include <sys/priv.h> 46 #include <sys/ucred.h> 47 #include <sys/zone.h> 48 49 #include <sys/socket.h> 50 #include <sys/socketvar.h> 51 #include <sys/sockio.h> 52 #include <sys/vtrace.h> 53 #include <sys/sdt.h> 54 #include <sys/debug.h> 55 #include <sys/isa_defs.h> 56 #include <sys/random.h> 57 #include <netinet/in.h> 58 #include <netinet/ip6.h> 59 #include <netinet/icmp6.h> 60 #include <netinet/udp.h> 61 62 #include <inet/common.h> 63 #include <inet/ip.h> 64 #include <inet/ip_impl.h> 65 #include <inet/ipsec_impl.h> 66 #include <inet/ip6.h> 67 #include <inet/ip_ire.h> 68 #include <inet/ip_if.h> 69 #include <inet/ip_multi.h> 70 #include <inet/ip_ndp.h> 71 #include <inet/proto_set.h> 72 #include <inet/mib2.h> 73 #include <inet/optcom.h> 74 #include <inet/snmpcom.h> 75 #include <inet/kstatcom.h> 76 #include <inet/ipclassifier.h> 77 #include <sys/squeue_impl.h> 78 #include <inet/ipnet.h> 79 #include <sys/ethernet.h> 80 81 #include <sys/tsol/label.h> 82 #include <sys/tsol/tnet.h> 83 #include <rpc/pmap_prot.h> 84 85 #include <inet/udp_impl.h> 86 87 /* 88 * Synchronization notes: 89 * 90 * UDP is MT and uses the usual kernel synchronization primitives. There are 2 91 * locks, the fanout lock (uf_lock) and conn_lock. conn_lock 92 * protects the contents of the udp_t. uf_lock protects the address and the 93 * fanout information. 94 * The lock order is conn_lock -> uf_lock. 95 * 96 * The fanout lock uf_lock: 97 * When a UDP endpoint is bound to a local port, it is inserted into 98 * a bind hash list. The list consists of an array of udp_fanout_t buckets. 99 * The size of the array is controlled by the udp_bind_fanout_size variable. 100 * This variable can be changed in /etc/system if the default value is 101 * not large enough. Each bind hash bucket is protected by a per bucket 102 * lock. It protects the udp_bind_hash and udp_ptpbhn fields in the udp_t 103 * structure and a few other fields in the udp_t. A UDP endpoint is removed 104 * from the bind hash list only when it is being unbound or being closed. 105 * The per bucket lock also protects a UDP endpoint's state changes. 106 * 107 * Plumbing notes: 108 * UDP is always a device driver. For compatibility with mibopen() code 109 * it is possible to I_PUSH "udp", but that results in pushing a passthrough 110 * dummy module. 111 * 112 * The above implies that we don't support any intermediate module to 113 * reside in between /dev/ip and udp -- in fact, we never supported such 114 * scenario in the past as the inter-layer communication semantics have 115 * always been private. 116 */ 117 118 /* For /etc/system control */ 119 uint_t udp_bind_fanout_size = UDP_BIND_FANOUT_SIZE; 120 121 static void udp_addr_req(queue_t *q, mblk_t *mp); 122 static void udp_tpi_bind(queue_t *q, mblk_t *mp); 123 static void udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp); 124 static void udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock); 125 static int udp_build_hdr_template(conn_t *, const in6_addr_t *, 126 const in6_addr_t *, in_port_t, uint32_t); 127 static void udp_capability_req(queue_t *q, mblk_t *mp); 128 static int udp_tpi_close(queue_t *q, int flags); 129 static void udp_close_free(conn_t *); 130 static void udp_tpi_connect(queue_t *q, mblk_t *mp); 131 static void udp_tpi_disconnect(queue_t *q, mblk_t *mp); 132 static void udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error, 133 int sys_error); 134 static void udp_err_ack_prim(queue_t *q, mblk_t *mp, t_scalar_t primitive, 135 t_scalar_t tlierr, int sys_error); 136 static int udp_extra_priv_ports_get(queue_t *q, mblk_t *mp, caddr_t cp, 137 cred_t *cr); 138 static int udp_extra_priv_ports_add(queue_t *q, mblk_t *mp, 139 char *value, caddr_t cp, cred_t *cr); 140 static int udp_extra_priv_ports_del(queue_t *q, mblk_t *mp, 141 char *value, caddr_t cp, cred_t *cr); 142 static void udp_icmp_input(void *, mblk_t *, void *, ip_recv_attr_t *); 143 static void udp_icmp_error_ipv6(conn_t *connp, mblk_t *mp, 144 ip_recv_attr_t *ira); 145 static void udp_info_req(queue_t *q, mblk_t *mp); 146 static void udp_input(void *, mblk_t *, void *, ip_recv_attr_t *); 147 static void udp_lrput(queue_t *, mblk_t *); 148 static void udp_lwput(queue_t *, mblk_t *); 149 static int udp_open(queue_t *q, dev_t *devp, int flag, int sflag, 150 cred_t *credp, boolean_t isv6); 151 static int udp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, 152 cred_t *credp); 153 static int udp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, 154 cred_t *credp); 155 static boolean_t udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name); 156 int udp_opt_set(conn_t *connp, uint_t optset_context, 157 int level, int name, uint_t inlen, 158 uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 159 void *thisdg_attrs, cred_t *cr); 160 int udp_opt_get(conn_t *connp, int level, int name, 161 uchar_t *ptr); 162 static int udp_output_connected(conn_t *connp, mblk_t *mp, cred_t *cr, 163 pid_t pid); 164 static int udp_output_lastdst(conn_t *connp, mblk_t *mp, cred_t *cr, 165 pid_t pid, ip_xmit_attr_t *ixa); 166 static int udp_output_newdst(conn_t *connp, mblk_t *data_mp, sin_t *sin, 167 sin6_t *sin6, ushort_t ipversion, cred_t *cr, pid_t, 168 ip_xmit_attr_t *ixa); 169 static mblk_t *udp_prepend_hdr(conn_t *, ip_xmit_attr_t *, const ip_pkt_t *, 170 const in6_addr_t *, const in6_addr_t *, in_port_t, uint32_t, mblk_t *, 171 int *); 172 static mblk_t *udp_prepend_header_template(conn_t *, ip_xmit_attr_t *, 173 mblk_t *, const in6_addr_t *, in_port_t, uint32_t, int *); 174 static void udp_ud_err(queue_t *q, mblk_t *mp, t_scalar_t err); 175 static void udp_ud_err_connected(conn_t *, t_scalar_t); 176 static void udp_tpi_unbind(queue_t *q, mblk_t *mp); 177 static in_port_t udp_update_next_port(udp_t *udp, in_port_t port, 178 boolean_t random); 179 static void udp_wput_other(queue_t *q, mblk_t *mp); 180 static void udp_wput_iocdata(queue_t *q, mblk_t *mp); 181 static void udp_wput_fallback(queue_t *q, mblk_t *mp); 182 static size_t udp_set_rcv_hiwat(udp_t *udp, size_t size); 183 184 static void *udp_stack_init(netstackid_t stackid, netstack_t *ns); 185 static void udp_stack_fini(netstackid_t stackid, void *arg); 186 187 /* Common routines for TPI and socket module */ 188 static void udp_ulp_recv(conn_t *, mblk_t *, uint_t, ip_recv_attr_t *); 189 190 /* Common routine for TPI and socket module */ 191 static conn_t *udp_do_open(cred_t *, boolean_t, int, int *); 192 static void udp_do_close(conn_t *); 193 static int udp_do_bind(conn_t *, struct sockaddr *, socklen_t, cred_t *, 194 boolean_t); 195 static int udp_do_unbind(conn_t *); 196 197 int udp_getsockname(sock_lower_handle_t, 198 struct sockaddr *, socklen_t *, cred_t *); 199 int udp_getpeername(sock_lower_handle_t, 200 struct sockaddr *, socklen_t *, cred_t *); 201 static int udp_do_connect(conn_t *, const struct sockaddr *, socklen_t, 202 cred_t *, pid_t); 203 204 #pragma inline(udp_output_connected, udp_output_newdst, udp_output_lastdst) 205 206 /* 207 * Checks if the given destination addr/port is allowed out. 208 * If allowed, registers the (dest_addr/port, node_ID) mapping at Cluster. 209 * Called for each connect() and for sendto()/sendmsg() to a different 210 * destination. 211 * For connect(), called in udp_connect(). 212 * For sendto()/sendmsg(), called in udp_output_newdst(). 213 * 214 * This macro assumes that the cl_inet_connect2 hook is not NULL. 215 * Please check this before calling this macro. 216 * 217 * void 218 * CL_INET_UDP_CONNECT(conn_t cp, udp_t *udp, boolean_t is_outgoing, 219 * in6_addr_t *faddrp, in_port_t (or uint16_t) fport, int err); 220 */ 221 #define CL_INET_UDP_CONNECT(cp, is_outgoing, faddrp, fport, err) { \ 222 (err) = 0; \ 223 /* \ 224 * Running in cluster mode - check and register active \ 225 * "connection" information \ 226 */ \ 227 if ((cp)->conn_ipversion == IPV4_VERSION) \ 228 (err) = (*cl_inet_connect2)( \ 229 (cp)->conn_netstack->netstack_stackid, \ 230 IPPROTO_UDP, is_outgoing, AF_INET, \ 231 (uint8_t *)&((cp)->conn_laddr_v4), \ 232 (cp)->conn_lport, \ 233 (uint8_t *)&(V4_PART_OF_V6(*faddrp)), \ 234 (in_port_t)(fport), NULL); \ 235 else \ 236 (err) = (*cl_inet_connect2)( \ 237 (cp)->conn_netstack->netstack_stackid, \ 238 IPPROTO_UDP, is_outgoing, AF_INET6, \ 239 (uint8_t *)&((cp)->conn_laddr_v6), \ 240 (cp)->conn_lport, \ 241 (uint8_t *)(faddrp), (in_port_t)(fport), NULL); \ 242 } 243 244 static struct module_info udp_mod_info = { 245 UDP_MOD_ID, UDP_MOD_NAME, 1, INFPSZ, UDP_RECV_HIWATER, UDP_RECV_LOWATER 246 }; 247 248 /* 249 * Entry points for UDP as a device. 250 * We have separate open functions for the /dev/udp and /dev/udp6 devices. 251 */ 252 static struct qinit udp_rinitv4 = { 253 NULL, NULL, udp_openv4, udp_tpi_close, NULL, &udp_mod_info, NULL 254 }; 255 256 static struct qinit udp_rinitv6 = { 257 NULL, NULL, udp_openv6, udp_tpi_close, NULL, &udp_mod_info, NULL 258 }; 259 260 static struct qinit udp_winit = { 261 (pfi_t)udp_wput, (pfi_t)ip_wsrv, NULL, NULL, NULL, &udp_mod_info 262 }; 263 264 /* UDP entry point during fallback */ 265 struct qinit udp_fallback_sock_winit = { 266 (pfi_t)udp_wput_fallback, NULL, NULL, NULL, NULL, &udp_mod_info 267 }; 268 269 /* 270 * UDP needs to handle I_LINK and I_PLINK since ifconfig 271 * likes to use it as a place to hang the various streams. 272 */ 273 static struct qinit udp_lrinit = { 274 (pfi_t)udp_lrput, NULL, udp_openv4, udp_tpi_close, NULL, &udp_mod_info 275 }; 276 277 static struct qinit udp_lwinit = { 278 (pfi_t)udp_lwput, NULL, udp_openv4, udp_tpi_close, NULL, &udp_mod_info 279 }; 280 281 /* For AF_INET aka /dev/udp */ 282 struct streamtab udpinfov4 = { 283 &udp_rinitv4, &udp_winit, &udp_lrinit, &udp_lwinit 284 }; 285 286 /* For AF_INET6 aka /dev/udp6 */ 287 struct streamtab udpinfov6 = { 288 &udp_rinitv6, &udp_winit, &udp_lrinit, &udp_lwinit 289 }; 290 291 #define REUSELIST_MAX 64 292 struct reuselist { 293 conn_t *ru_conns[REUSELIST_MAX]; 294 int ru_entries; /* number of entries */ 295 int ru_next; /* round-robin pointer */ 296 kmutex_t ru_lock; 297 }; 298 299 #define UDP_MAXPACKET_IPV4 (IP_MAXPACKET - UDPH_SIZE - IP_SIMPLE_HDR_LENGTH) 300 301 /* Default structure copied into T_INFO_ACK messages */ 302 static struct T_info_ack udp_g_t_info_ack_ipv4 = { 303 T_INFO_ACK, 304 UDP_MAXPACKET_IPV4, /* TSDU_size. Excl. headers */ 305 T_INVALID, /* ETSU_size. udp does not support expedited data. */ 306 T_INVALID, /* CDATA_size. udp does not support connect data. */ 307 T_INVALID, /* DDATA_size. udp does not support disconnect data. */ 308 sizeof (sin_t), /* ADDR_size. */ 309 0, /* OPT_size - not initialized here */ 310 UDP_MAXPACKET_IPV4, /* TIDU_size. Excl. headers */ 311 T_CLTS, /* SERV_type. udp supports connection-less. */ 312 TS_UNBND, /* CURRENT_state. This is set from udp_state. */ 313 (XPG4_1|SENDZERO) /* PROVIDER_flag */ 314 }; 315 316 #define UDP_MAXPACKET_IPV6 (IP_MAXPACKET - UDPH_SIZE - IPV6_HDR_LEN) 317 318 static struct T_info_ack udp_g_t_info_ack_ipv6 = { 319 T_INFO_ACK, 320 UDP_MAXPACKET_IPV6, /* TSDU_size. Excl. headers */ 321 T_INVALID, /* ETSU_size. udp does not support expedited data. */ 322 T_INVALID, /* CDATA_size. udp does not support connect data. */ 323 T_INVALID, /* DDATA_size. udp does not support disconnect data. */ 324 sizeof (sin6_t), /* ADDR_size. */ 325 0, /* OPT_size - not initialized here */ 326 UDP_MAXPACKET_IPV6, /* TIDU_size. Excl. headers */ 327 T_CLTS, /* SERV_type. udp supports connection-less. */ 328 TS_UNBND, /* CURRENT_state. This is set from udp_state. */ 329 (XPG4_1|SENDZERO) /* PROVIDER_flag */ 330 }; 331 332 /* 333 * UDP tunables related declarations. Definitions are in udp_tunables.c 334 */ 335 extern mod_prop_info_t udp_propinfo_tbl[]; 336 extern int udp_propinfo_count; 337 338 /* Setable in /etc/system */ 339 /* If set to 0, pick ephemeral port sequentially; otherwise randomly. */ 340 uint32_t udp_random_anon_port = 1; 341 342 /* 343 * Hook functions to enable cluster networking. 344 * On non-clustered systems these vectors must always be NULL 345 */ 346 347 void (*cl_inet_bind)(netstackid_t stack_id, uchar_t protocol, 348 sa_family_t addr_family, uint8_t *laddrp, in_port_t lport, 349 void *args) = NULL; 350 void (*cl_inet_unbind)(netstackid_t stack_id, uint8_t protocol, 351 sa_family_t addr_family, uint8_t *laddrp, in_port_t lport, 352 void *args) = NULL; 353 354 typedef union T_primitives *t_primp_t; 355 356 357 /* 358 * udp_reuselist_add() and udp_reuselist_remove() are protected agains 359 * concurrent calls by uf_lock 360 * ru_lock protects add/remove against use of the list in udp_input() 361 */ 362 static int 363 udp_reuselist_add(struct reuselist *reusep, conn_t *connp) 364 { 365 ASSERT(MUTEX_HELD(&connp->conn_lock)); 366 367 mutex_enter(&reusep->ru_lock); 368 369 if (reusep->ru_entries == REUSELIST_MAX) { 370 mutex_exit(&reusep->ru_lock); 371 return -1; 372 } 373 374 reusep->ru_conns[reusep->ru_entries++] = connp; 375 connp->conn_reuselist = reusep; 376 377 mutex_exit(&reusep->ru_lock); 378 return 0; 379 } 380 381 static void 382 udp_reuselist_remove(conn_t *connp) 383 { 384 int i; 385 struct reuselist *reusep = connp->conn_reuselist; 386 387 if (reusep == NULL) 388 return; 389 390 ASSERT(MUTEX_HELD(&connp->conn_lock)); 391 392 mutex_enter(&reusep->ru_lock); 393 394 for (i = 0; i < reusep->ru_entries; ++i) { 395 if (reusep->ru_conns[i] == connp) 396 break; 397 } 398 ASSERT(i < reusep->ru_entries); 399 400 /* move last entry into freed slot */ 401 if (--reusep->ru_entries == 0) { 402 /* last entry, free list */ 403 mutex_exit(&reusep->ru_lock); 404 mutex_destroy(&reusep->ru_lock); 405 kmem_free(reusep, sizeof (*reusep)); 406 connp->conn_reuselist = NULL; 407 } else { 408 reusep->ru_conns[i] = reusep->ru_conns[reusep->ru_entries]; 409 410 /* 411 * reset round-robin pointer, so it doesn't accidentally point 412 * to the last entry 413 */ 414 reusep->ru_next = 0; 415 mutex_exit(&reusep->ru_lock); 416 } 417 } 418 419 /* 420 * Return the next anonymous port in the privileged port range for 421 * bind checking. 422 * 423 * Trusted Extension (TX) notes: TX allows administrator to mark or 424 * reserve ports as Multilevel ports (MLP). MLP has special function 425 * on TX systems. Once a port is made MLP, it's not available as 426 * ordinary port. This creates "holes" in the port name space. It 427 * may be necessary to skip the "holes" find a suitable anon port. 428 */ 429 static in_port_t 430 udp_get_next_priv_port(udp_t *udp) 431 { 432 static in_port_t next_priv_port = IPPORT_RESERVED - 1; 433 in_port_t nextport; 434 boolean_t restart = B_FALSE; 435 udp_stack_t *us = udp->udp_us; 436 437 retry: 438 if (next_priv_port < us->us_min_anonpriv_port || 439 next_priv_port >= IPPORT_RESERVED) { 440 next_priv_port = IPPORT_RESERVED - 1; 441 if (restart) 442 return (0); 443 restart = B_TRUE; 444 } 445 446 if (is_system_labeled() && 447 (nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred), 448 next_priv_port, IPPROTO_UDP, B_FALSE)) != 0) { 449 next_priv_port = nextport; 450 goto retry; 451 } 452 453 return (next_priv_port--); 454 } 455 456 /* 457 * Hash list removal routine for udp_t structures. 458 */ 459 static void 460 udp_bind_hash_remove(udp_t *udp, boolean_t caller_holds_lock) 461 { 462 udp_t *udpnext; 463 kmutex_t *lockp; 464 udp_stack_t *us = udp->udp_us; 465 conn_t *connp = udp->udp_connp; 466 467 if (udp->udp_ptpbhn == NULL) 468 return; 469 470 /* 471 * Extract the lock pointer in case there are concurrent 472 * hash_remove's for this instance. 473 */ 474 ASSERT(connp->conn_lport != 0); 475 if (!caller_holds_lock) { 476 lockp = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 477 us->us_bind_fanout_size)].uf_lock; 478 ASSERT(lockp != NULL); 479 mutex_enter(lockp); 480 } 481 if (udp->udp_ptpbhn != NULL) { 482 udpnext = udp->udp_bind_hash; 483 if (udpnext != NULL) { 484 udpnext->udp_ptpbhn = udp->udp_ptpbhn; 485 udp->udp_bind_hash = NULL; 486 } 487 *udp->udp_ptpbhn = udpnext; 488 udp->udp_ptpbhn = NULL; 489 } 490 udp_reuselist_remove(connp); 491 if (!caller_holds_lock) { 492 mutex_exit(lockp); 493 } 494 } 495 496 static void 497 udp_bind_hash_insert(udp_fanout_t *uf, udp_t *udp) 498 { 499 conn_t *connp = udp->udp_connp; 500 udp_t **udpp; 501 udp_t *udpnext; 502 conn_t *connext; 503 504 ASSERT(MUTEX_HELD(&uf->uf_lock)); 505 ASSERT(udp->udp_ptpbhn == NULL); 506 udpp = &uf->uf_udp; 507 udpnext = udpp[0]; 508 if (udpnext != NULL) { 509 /* 510 * If the new udp bound to the INADDR_ANY address 511 * and the first one in the list is not bound to 512 * INADDR_ANY we skip all entries until we find the 513 * first one bound to INADDR_ANY. 514 * This makes sure that applications binding to a 515 * specific address get preference over those binding to 516 * INADDR_ANY. 517 */ 518 connext = udpnext->udp_connp; 519 if (V6_OR_V4_INADDR_ANY(connp->conn_bound_addr_v6) && 520 !V6_OR_V4_INADDR_ANY(connext->conn_bound_addr_v6)) { 521 while ((udpnext = udpp[0]) != NULL && 522 !V6_OR_V4_INADDR_ANY(connext->conn_bound_addr_v6)) { 523 udpp = &(udpnext->udp_bind_hash); 524 } 525 if (udpnext != NULL) 526 udpnext->udp_ptpbhn = &udp->udp_bind_hash; 527 } else { 528 udpnext->udp_ptpbhn = &udp->udp_bind_hash; 529 } 530 } 531 udp->udp_bind_hash = udpnext; 532 udp->udp_ptpbhn = udpp; 533 udpp[0] = udp; 534 } 535 536 /* 537 * This routine is called to handle each O_T_BIND_REQ/T_BIND_REQ message 538 * passed to udp_wput. 539 * It associates a port number and local address with the stream. 540 * It calls IP to verify the local IP address, and calls IP to insert 541 * the conn_t in the fanout table. 542 * If everything is ok it then sends the T_BIND_ACK back up. 543 * 544 * Note that UDP over IPv4 and IPv6 sockets can use the same port number 545 * without setting SO_REUSEADDR. This is needed so that they 546 * can be viewed as two independent transport protocols. 547 * However, anonymouns ports are allocated from the same range to avoid 548 * duplicating the us->us_next_port_to_try. 549 */ 550 static void 551 udp_tpi_bind(queue_t *q, mblk_t *mp) 552 { 553 sin_t *sin; 554 sin6_t *sin6; 555 mblk_t *mp1; 556 struct T_bind_req *tbr; 557 conn_t *connp; 558 udp_t *udp; 559 int error; 560 struct sockaddr *sa; 561 cred_t *cr; 562 563 /* 564 * All Solaris components should pass a db_credp 565 * for this TPI message, hence we ASSERT. 566 * But in case there is some other M_PROTO that looks 567 * like a TPI message sent by some other kernel 568 * component, we check and return an error. 569 */ 570 cr = msg_getcred(mp, NULL); 571 ASSERT(cr != NULL); 572 if (cr == NULL) { 573 udp_err_ack(q, mp, TSYSERR, EINVAL); 574 return; 575 } 576 577 connp = Q_TO_CONN(q); 578 udp = connp->conn_udp; 579 if ((mp->b_wptr - mp->b_rptr) < sizeof (*tbr)) { 580 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 581 "udp_bind: bad req, len %u", 582 (uint_t)(mp->b_wptr - mp->b_rptr)); 583 udp_err_ack(q, mp, TPROTO, 0); 584 return; 585 } 586 if (udp->udp_state != TS_UNBND) { 587 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 588 "udp_bind: bad state, %u", udp->udp_state); 589 udp_err_ack(q, mp, TOUTSTATE, 0); 590 return; 591 } 592 /* 593 * Reallocate the message to make sure we have enough room for an 594 * address. 595 */ 596 mp1 = reallocb(mp, sizeof (struct T_bind_ack) + sizeof (sin6_t), 1); 597 if (mp1 == NULL) { 598 udp_err_ack(q, mp, TSYSERR, ENOMEM); 599 return; 600 } 601 602 mp = mp1; 603 604 /* Reset the message type in preparation for shipping it back. */ 605 DB_TYPE(mp) = M_PCPROTO; 606 607 tbr = (struct T_bind_req *)mp->b_rptr; 608 switch (tbr->ADDR_length) { 609 case 0: /* Request for a generic port */ 610 tbr->ADDR_offset = sizeof (struct T_bind_req); 611 if (connp->conn_family == AF_INET) { 612 tbr->ADDR_length = sizeof (sin_t); 613 sin = (sin_t *)&tbr[1]; 614 *sin = sin_null; 615 sin->sin_family = AF_INET; 616 mp->b_wptr = (uchar_t *)&sin[1]; 617 sa = (struct sockaddr *)sin; 618 } else { 619 ASSERT(connp->conn_family == AF_INET6); 620 tbr->ADDR_length = sizeof (sin6_t); 621 sin6 = (sin6_t *)&tbr[1]; 622 *sin6 = sin6_null; 623 sin6->sin6_family = AF_INET6; 624 mp->b_wptr = (uchar_t *)&sin6[1]; 625 sa = (struct sockaddr *)sin6; 626 } 627 break; 628 629 case sizeof (sin_t): /* Complete IPv4 address */ 630 sa = (struct sockaddr *)mi_offset_param(mp, tbr->ADDR_offset, 631 sizeof (sin_t)); 632 if (sa == NULL || !OK_32PTR((char *)sa)) { 633 udp_err_ack(q, mp, TSYSERR, EINVAL); 634 return; 635 } 636 if (connp->conn_family != AF_INET || 637 sa->sa_family != AF_INET) { 638 udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT); 639 return; 640 } 641 break; 642 643 case sizeof (sin6_t): /* complete IPv6 address */ 644 sa = (struct sockaddr *)mi_offset_param(mp, tbr->ADDR_offset, 645 sizeof (sin6_t)); 646 if (sa == NULL || !OK_32PTR((char *)sa)) { 647 udp_err_ack(q, mp, TSYSERR, EINVAL); 648 return; 649 } 650 if (connp->conn_family != AF_INET6 || 651 sa->sa_family != AF_INET6) { 652 udp_err_ack(q, mp, TSYSERR, EAFNOSUPPORT); 653 return; 654 } 655 break; 656 657 default: /* Invalid request */ 658 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 659 "udp_bind: bad ADDR_length length %u", tbr->ADDR_length); 660 udp_err_ack(q, mp, TBADADDR, 0); 661 return; 662 } 663 664 error = udp_do_bind(connp, sa, tbr->ADDR_length, cr, 665 tbr->PRIM_type != O_T_BIND_REQ); 666 667 if (error != 0) { 668 if (error > 0) { 669 udp_err_ack(q, mp, TSYSERR, error); 670 } else { 671 udp_err_ack(q, mp, -error, 0); 672 } 673 } else { 674 tbr->PRIM_type = T_BIND_ACK; 675 qreply(q, mp); 676 } 677 } 678 679 /* 680 * This routine handles each T_CONN_REQ message passed to udp. It 681 * associates a default destination address with the stream. 682 * 683 * After various error checks are completed, udp_connect() lays 684 * the target address and port into the composite header template. 685 * Then we ask IP for information, including a source address if we didn't 686 * already have one. Finally we send up the T_OK_ACK reply message. 687 */ 688 static void 689 udp_tpi_connect(queue_t *q, mblk_t *mp) 690 { 691 conn_t *connp = Q_TO_CONN(q); 692 int error; 693 socklen_t len; 694 struct sockaddr *sa; 695 struct T_conn_req *tcr; 696 cred_t *cr; 697 pid_t pid; 698 /* 699 * All Solaris components should pass a db_credp 700 * for this TPI message, hence we ASSERT. 701 * But in case there is some other M_PROTO that looks 702 * like a TPI message sent by some other kernel 703 * component, we check and return an error. 704 */ 705 cr = msg_getcred(mp, &pid); 706 ASSERT(cr != NULL); 707 if (cr == NULL) { 708 udp_err_ack(q, mp, TSYSERR, EINVAL); 709 return; 710 } 711 712 tcr = (struct T_conn_req *)mp->b_rptr; 713 714 /* A bit of sanity checking */ 715 if ((mp->b_wptr - mp->b_rptr) < sizeof (struct T_conn_req)) { 716 udp_err_ack(q, mp, TPROTO, 0); 717 return; 718 } 719 720 if (tcr->OPT_length != 0) { 721 udp_err_ack(q, mp, TBADOPT, 0); 722 return; 723 } 724 725 /* 726 * Determine packet type based on type of address passed in 727 * the request should contain an IPv4 or IPv6 address. 728 * Make sure that address family matches the type of 729 * family of the address passed down. 730 */ 731 len = tcr->DEST_length; 732 switch (tcr->DEST_length) { 733 default: 734 udp_err_ack(q, mp, TBADADDR, 0); 735 return; 736 737 case sizeof (sin_t): 738 sa = (struct sockaddr *)mi_offset_param(mp, tcr->DEST_offset, 739 sizeof (sin_t)); 740 break; 741 742 case sizeof (sin6_t): 743 sa = (struct sockaddr *)mi_offset_param(mp, tcr->DEST_offset, 744 sizeof (sin6_t)); 745 break; 746 } 747 748 error = proto_verify_ip_addr(connp->conn_family, sa, len); 749 if (error != 0) { 750 udp_err_ack(q, mp, TSYSERR, error); 751 return; 752 } 753 754 error = udp_do_connect(connp, sa, len, cr, pid); 755 if (error != 0) { 756 if (error < 0) 757 udp_err_ack(q, mp, -error, 0); 758 else 759 udp_err_ack(q, mp, TSYSERR, error); 760 } else { 761 mblk_t *mp1; 762 /* 763 * We have to send a connection confirmation to 764 * keep TLI happy. 765 */ 766 if (connp->conn_family == AF_INET) { 767 mp1 = mi_tpi_conn_con(NULL, (char *)sa, 768 sizeof (sin_t), NULL, 0); 769 } else { 770 mp1 = mi_tpi_conn_con(NULL, (char *)sa, 771 sizeof (sin6_t), NULL, 0); 772 } 773 if (mp1 == NULL) { 774 udp_err_ack(q, mp, TSYSERR, ENOMEM); 775 return; 776 } 777 778 /* 779 * Send ok_ack for T_CONN_REQ 780 */ 781 mp = mi_tpi_ok_ack_alloc(mp); 782 if (mp == NULL) { 783 /* Unable to reuse the T_CONN_REQ for the ack. */ 784 udp_err_ack_prim(q, mp1, T_CONN_REQ, TSYSERR, ENOMEM); 785 return; 786 } 787 788 putnext(connp->conn_rq, mp); 789 putnext(connp->conn_rq, mp1); 790 } 791 } 792 793 static int 794 udp_tpi_close(queue_t *q, int flags) 795 { 796 conn_t *connp; 797 798 if (flags & SO_FALLBACK) { 799 /* 800 * stream is being closed while in fallback 801 * simply free the resources that were allocated 802 */ 803 inet_minor_free(WR(q)->q_ptr, (dev_t)(RD(q)->q_ptr)); 804 qprocsoff(q); 805 goto done; 806 } 807 808 connp = Q_TO_CONN(q); 809 udp_do_close(connp); 810 done: 811 q->q_ptr = WR(q)->q_ptr = NULL; 812 return (0); 813 } 814 815 static void 816 udp_close_free(conn_t *connp) 817 { 818 udp_t *udp = connp->conn_udp; 819 820 /* If there are any options associated with the stream, free them. */ 821 if (udp->udp_recv_ipp.ipp_fields != 0) 822 ip_pkt_free(&udp->udp_recv_ipp); 823 824 /* 825 * Clear any fields which the kmem_cache constructor clears. 826 * Only udp_connp needs to be preserved. 827 * TBD: We should make this more efficient to avoid clearing 828 * everything. 829 */ 830 ASSERT(udp->udp_connp == connp); 831 bzero(udp, sizeof (udp_t)); 832 udp->udp_connp = connp; 833 } 834 835 static int 836 udp_do_disconnect(conn_t *connp) 837 { 838 udp_t *udp; 839 udp_fanout_t *udpf; 840 udp_stack_t *us; 841 int error; 842 843 udp = connp->conn_udp; 844 us = udp->udp_us; 845 mutex_enter(&connp->conn_lock); 846 if (udp->udp_state != TS_DATA_XFER) { 847 mutex_exit(&connp->conn_lock); 848 return (-TOUTSTATE); 849 } 850 udpf = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 851 us->us_bind_fanout_size)]; 852 mutex_enter(&udpf->uf_lock); 853 if (connp->conn_mcbc_bind) 854 connp->conn_saddr_v6 = ipv6_all_zeros; 855 else 856 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 857 connp->conn_laddr_v6 = connp->conn_bound_addr_v6; 858 connp->conn_faddr_v6 = ipv6_all_zeros; 859 connp->conn_fport = 0; 860 udp->udp_state = TS_IDLE; 861 mutex_exit(&udpf->uf_lock); 862 863 /* Remove any remnants of mapped address binding */ 864 if (connp->conn_family == AF_INET6) 865 connp->conn_ipversion = IPV6_VERSION; 866 867 connp->conn_v6lastdst = ipv6_all_zeros; 868 error = udp_build_hdr_template(connp, &connp->conn_saddr_v6, 869 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 870 mutex_exit(&connp->conn_lock); 871 if (error != 0) 872 return (error); 873 874 /* 875 * Tell IP to remove the full binding and revert 876 * to the local address binding. 877 */ 878 return (ip_laddr_fanout_insert(connp)); 879 } 880 881 static void 882 udp_tpi_disconnect(queue_t *q, mblk_t *mp) 883 { 884 conn_t *connp = Q_TO_CONN(q); 885 int error; 886 887 /* 888 * Allocate the largest primitive we need to send back 889 * T_error_ack is > than T_ok_ack 890 */ 891 mp = reallocb(mp, sizeof (struct T_error_ack), 1); 892 if (mp == NULL) { 893 /* Unable to reuse the T_DISCON_REQ for the ack. */ 894 udp_err_ack_prim(q, mp, T_DISCON_REQ, TSYSERR, ENOMEM); 895 return; 896 } 897 898 error = udp_do_disconnect(connp); 899 900 if (error != 0) { 901 if (error < 0) { 902 udp_err_ack(q, mp, -error, 0); 903 } else { 904 udp_err_ack(q, mp, TSYSERR, error); 905 } 906 } else { 907 mp = mi_tpi_ok_ack_alloc(mp); 908 ASSERT(mp != NULL); 909 qreply(q, mp); 910 } 911 } 912 913 int 914 udp_disconnect(conn_t *connp) 915 { 916 int error; 917 918 connp->conn_dgram_errind = B_FALSE; 919 error = udp_do_disconnect(connp); 920 if (error < 0) 921 error = proto_tlitosyserr(-error); 922 923 return (error); 924 } 925 926 /* This routine creates a T_ERROR_ACK message and passes it upstream. */ 927 static void 928 udp_err_ack(queue_t *q, mblk_t *mp, t_scalar_t t_error, int sys_error) 929 { 930 if ((mp = mi_tpi_err_ack_alloc(mp, t_error, sys_error)) != NULL) 931 qreply(q, mp); 932 } 933 934 /* Shorthand to generate and send TPI error acks to our client */ 935 static void 936 udp_err_ack_prim(queue_t *q, mblk_t *mp, t_scalar_t primitive, 937 t_scalar_t t_error, int sys_error) 938 { 939 struct T_error_ack *teackp; 940 941 if ((mp = tpi_ack_alloc(mp, sizeof (struct T_error_ack), 942 M_PCPROTO, T_ERROR_ACK)) != NULL) { 943 teackp = (struct T_error_ack *)mp->b_rptr; 944 teackp->ERROR_prim = primitive; 945 teackp->TLI_error = t_error; 946 teackp->UNIX_error = sys_error; 947 qreply(q, mp); 948 } 949 } 950 951 /* At minimum we need 4 bytes of UDP header */ 952 #define ICMP_MIN_UDP_HDR 4 953 954 /* 955 * udp_icmp_input is called as conn_recvicmp to process ICMP messages. 956 * Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors. 957 * Assumes that IP has pulled up everything up to and including the ICMP header. 958 */ 959 /* ARGSUSED2 */ 960 static void 961 udp_icmp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 962 { 963 conn_t *connp = (conn_t *)arg1; 964 icmph_t *icmph; 965 ipha_t *ipha; 966 int iph_hdr_length; 967 udpha_t *udpha; 968 sin_t sin; 969 sin6_t sin6; 970 mblk_t *mp1; 971 int error = 0; 972 udp_t *udp = connp->conn_udp; 973 974 ipha = (ipha_t *)mp->b_rptr; 975 976 ASSERT(OK_32PTR(mp->b_rptr)); 977 978 if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) { 979 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION); 980 udp_icmp_error_ipv6(connp, mp, ira); 981 return; 982 } 983 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 984 985 /* Skip past the outer IP and ICMP headers */ 986 ASSERT(IPH_HDR_LENGTH(ipha) == ira->ira_ip_hdr_length); 987 iph_hdr_length = ira->ira_ip_hdr_length; 988 icmph = (icmph_t *)&mp->b_rptr[iph_hdr_length]; 989 ipha = (ipha_t *)&icmph[1]; /* Inner IP header */ 990 991 /* Skip past the inner IP and find the ULP header */ 992 iph_hdr_length = IPH_HDR_LENGTH(ipha); 993 udpha = (udpha_t *)((char *)ipha + iph_hdr_length); 994 995 switch (icmph->icmph_type) { 996 case ICMP_DEST_UNREACHABLE: 997 switch (icmph->icmph_code) { 998 case ICMP_FRAGMENTATION_NEEDED: { 999 ipha_t *ipha; 1000 ip_xmit_attr_t *ixa; 1001 /* 1002 * IP has already adjusted the path MTU. 1003 * But we need to adjust DF for IPv4. 1004 */ 1005 if (connp->conn_ipversion != IPV4_VERSION) 1006 break; 1007 1008 ixa = conn_get_ixa(connp, B_FALSE); 1009 if (ixa == NULL || ixa->ixa_ire == NULL) { 1010 /* 1011 * Some other thread holds conn_ixa. We will 1012 * redo this on the next ICMP too big. 1013 */ 1014 if (ixa != NULL) 1015 ixa_refrele(ixa); 1016 break; 1017 } 1018 (void) ip_get_pmtu(ixa); 1019 1020 mutex_enter(&connp->conn_lock); 1021 ipha = (ipha_t *)connp->conn_ht_iphc; 1022 if (ixa->ixa_flags & IXAF_PMTU_IPV4_DF) { 1023 ipha->ipha_fragment_offset_and_flags |= 1024 IPH_DF_HTONS; 1025 } else { 1026 ipha->ipha_fragment_offset_and_flags &= 1027 ~IPH_DF_HTONS; 1028 } 1029 mutex_exit(&connp->conn_lock); 1030 ixa_refrele(ixa); 1031 break; 1032 } 1033 case ICMP_PORT_UNREACHABLE: 1034 case ICMP_PROTOCOL_UNREACHABLE: 1035 error = ECONNREFUSED; 1036 break; 1037 default: 1038 /* Transient errors */ 1039 break; 1040 } 1041 break; 1042 default: 1043 /* Transient errors */ 1044 break; 1045 } 1046 if (error == 0) { 1047 freemsg(mp); 1048 return; 1049 } 1050 1051 /* 1052 * Deliver T_UDERROR_IND when the application has asked for it. 1053 * The socket layer enables this automatically when connected. 1054 */ 1055 if (!connp->conn_dgram_errind) { 1056 freemsg(mp); 1057 return; 1058 } 1059 1060 switch (connp->conn_family) { 1061 case AF_INET: 1062 sin = sin_null; 1063 sin.sin_family = AF_INET; 1064 sin.sin_addr.s_addr = ipha->ipha_dst; 1065 sin.sin_port = udpha->uha_dst_port; 1066 if (IPCL_IS_NONSTR(connp)) { 1067 mutex_enter(&connp->conn_lock); 1068 if (udp->udp_state == TS_DATA_XFER) { 1069 if (sin.sin_port == connp->conn_fport && 1070 sin.sin_addr.s_addr == 1071 connp->conn_faddr_v4) { 1072 mutex_exit(&connp->conn_lock); 1073 (*connp->conn_upcalls->su_set_error) 1074 (connp->conn_upper_handle, error); 1075 goto done; 1076 } 1077 } else { 1078 udp->udp_delayed_error = error; 1079 *((sin_t *)&udp->udp_delayed_addr) = sin; 1080 } 1081 mutex_exit(&connp->conn_lock); 1082 } else { 1083 mp1 = mi_tpi_uderror_ind((char *)&sin, sizeof (sin_t), 1084 NULL, 0, error); 1085 if (mp1 != NULL) 1086 putnext(connp->conn_rq, mp1); 1087 } 1088 break; 1089 case AF_INET6: 1090 sin6 = sin6_null; 1091 sin6.sin6_family = AF_INET6; 1092 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &sin6.sin6_addr); 1093 sin6.sin6_port = udpha->uha_dst_port; 1094 if (IPCL_IS_NONSTR(connp)) { 1095 mutex_enter(&connp->conn_lock); 1096 if (udp->udp_state == TS_DATA_XFER) { 1097 if (sin6.sin6_port == connp->conn_fport && 1098 IN6_ARE_ADDR_EQUAL(&sin6.sin6_addr, 1099 &connp->conn_faddr_v6)) { 1100 mutex_exit(&connp->conn_lock); 1101 (*connp->conn_upcalls->su_set_error) 1102 (connp->conn_upper_handle, error); 1103 goto done; 1104 } 1105 } else { 1106 udp->udp_delayed_error = error; 1107 *((sin6_t *)&udp->udp_delayed_addr) = sin6; 1108 } 1109 mutex_exit(&connp->conn_lock); 1110 } else { 1111 mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t), 1112 NULL, 0, error); 1113 if (mp1 != NULL) 1114 putnext(connp->conn_rq, mp1); 1115 } 1116 break; 1117 } 1118 done: 1119 freemsg(mp); 1120 } 1121 1122 /* 1123 * udp_icmp_error_ipv6 is called by udp_icmp_error to process ICMP for IPv6. 1124 * Generates the appropriate T_UDERROR_IND for permanent (non-transient) errors. 1125 * Assumes that IP has pulled up all the extension headers as well as the 1126 * ICMPv6 header. 1127 */ 1128 static void 1129 udp_icmp_error_ipv6(conn_t *connp, mblk_t *mp, ip_recv_attr_t *ira) 1130 { 1131 icmp6_t *icmp6; 1132 ip6_t *ip6h, *outer_ip6h; 1133 uint16_t iph_hdr_length; 1134 uint8_t *nexthdrp; 1135 udpha_t *udpha; 1136 sin6_t sin6; 1137 mblk_t *mp1; 1138 int error = 0; 1139 udp_t *udp = connp->conn_udp; 1140 udp_stack_t *us = udp->udp_us; 1141 1142 outer_ip6h = (ip6_t *)mp->b_rptr; 1143 #ifdef DEBUG 1144 if (outer_ip6h->ip6_nxt != IPPROTO_ICMPV6) 1145 iph_hdr_length = ip_hdr_length_v6(mp, outer_ip6h); 1146 else 1147 iph_hdr_length = IPV6_HDR_LEN; 1148 ASSERT(iph_hdr_length == ira->ira_ip_hdr_length); 1149 #endif 1150 /* Skip past the outer IP and ICMP headers */ 1151 iph_hdr_length = ira->ira_ip_hdr_length; 1152 icmp6 = (icmp6_t *)&mp->b_rptr[iph_hdr_length]; 1153 1154 /* Skip past the inner IP and find the ULP header */ 1155 ip6h = (ip6_t *)&icmp6[1]; /* Inner IP header */ 1156 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &iph_hdr_length, &nexthdrp)) { 1157 freemsg(mp); 1158 return; 1159 } 1160 udpha = (udpha_t *)((char *)ip6h + iph_hdr_length); 1161 1162 switch (icmp6->icmp6_type) { 1163 case ICMP6_DST_UNREACH: 1164 switch (icmp6->icmp6_code) { 1165 case ICMP6_DST_UNREACH_NOPORT: 1166 error = ECONNREFUSED; 1167 break; 1168 case ICMP6_DST_UNREACH_ADMIN: 1169 case ICMP6_DST_UNREACH_NOROUTE: 1170 case ICMP6_DST_UNREACH_BEYONDSCOPE: 1171 case ICMP6_DST_UNREACH_ADDR: 1172 /* Transient errors */ 1173 break; 1174 default: 1175 break; 1176 } 1177 break; 1178 case ICMP6_PACKET_TOO_BIG: { 1179 struct T_unitdata_ind *tudi; 1180 struct T_opthdr *toh; 1181 size_t udi_size; 1182 mblk_t *newmp; 1183 t_scalar_t opt_length = sizeof (struct T_opthdr) + 1184 sizeof (struct ip6_mtuinfo); 1185 sin6_t *sin6; 1186 struct ip6_mtuinfo *mtuinfo; 1187 1188 /* 1189 * If the application has requested to receive path mtu 1190 * information, send up an empty message containing an 1191 * IPV6_PATHMTU ancillary data item. 1192 */ 1193 if (!connp->conn_ipv6_recvpathmtu) 1194 break; 1195 1196 udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t) + 1197 opt_length; 1198 if ((newmp = allocb(udi_size, BPRI_MED)) == NULL) { 1199 UDPS_BUMP_MIB(us, udpInErrors); 1200 break; 1201 } 1202 1203 /* 1204 * newmp->b_cont is left to NULL on purpose. This is an 1205 * empty message containing only ancillary data. 1206 */ 1207 newmp->b_datap->db_type = M_PROTO; 1208 tudi = (struct T_unitdata_ind *)newmp->b_rptr; 1209 newmp->b_wptr = (uchar_t *)tudi + udi_size; 1210 tudi->PRIM_type = T_UNITDATA_IND; 1211 tudi->SRC_length = sizeof (sin6_t); 1212 tudi->SRC_offset = sizeof (struct T_unitdata_ind); 1213 tudi->OPT_offset = tudi->SRC_offset + sizeof (sin6_t); 1214 tudi->OPT_length = opt_length; 1215 1216 sin6 = (sin6_t *)&tudi[1]; 1217 bzero(sin6, sizeof (sin6_t)); 1218 sin6->sin6_family = AF_INET6; 1219 sin6->sin6_addr = connp->conn_faddr_v6; 1220 1221 toh = (struct T_opthdr *)&sin6[1]; 1222 toh->level = IPPROTO_IPV6; 1223 toh->name = IPV6_PATHMTU; 1224 toh->len = opt_length; 1225 toh->status = 0; 1226 1227 mtuinfo = (struct ip6_mtuinfo *)&toh[1]; 1228 bzero(mtuinfo, sizeof (struct ip6_mtuinfo)); 1229 mtuinfo->ip6m_addr.sin6_family = AF_INET6; 1230 mtuinfo->ip6m_addr.sin6_addr = ip6h->ip6_dst; 1231 mtuinfo->ip6m_mtu = icmp6->icmp6_mtu; 1232 /* 1233 * We've consumed everything we need from the original 1234 * message. Free it, then send our empty message. 1235 */ 1236 freemsg(mp); 1237 udp_ulp_recv(connp, newmp, msgdsize(newmp), ira); 1238 return; 1239 } 1240 case ICMP6_TIME_EXCEEDED: 1241 /* Transient errors */ 1242 break; 1243 case ICMP6_PARAM_PROB: 1244 /* If this corresponds to an ICMP_PROTOCOL_UNREACHABLE */ 1245 if (icmp6->icmp6_code == ICMP6_PARAMPROB_NEXTHEADER && 1246 (uchar_t *)ip6h + icmp6->icmp6_pptr == 1247 (uchar_t *)nexthdrp) { 1248 error = ECONNREFUSED; 1249 break; 1250 } 1251 break; 1252 } 1253 if (error == 0) { 1254 freemsg(mp); 1255 return; 1256 } 1257 1258 /* 1259 * Deliver T_UDERROR_IND when the application has asked for it. 1260 * The socket layer enables this automatically when connected. 1261 */ 1262 if (!connp->conn_dgram_errind) { 1263 freemsg(mp); 1264 return; 1265 } 1266 1267 sin6 = sin6_null; 1268 sin6.sin6_family = AF_INET6; 1269 sin6.sin6_addr = ip6h->ip6_dst; 1270 sin6.sin6_port = udpha->uha_dst_port; 1271 sin6.sin6_flowinfo = ip6h->ip6_vcf & ~IPV6_VERS_AND_FLOW_MASK; 1272 1273 if (IPCL_IS_NONSTR(connp)) { 1274 mutex_enter(&connp->conn_lock); 1275 if (udp->udp_state == TS_DATA_XFER) { 1276 if (sin6.sin6_port == connp->conn_fport && 1277 IN6_ARE_ADDR_EQUAL(&sin6.sin6_addr, 1278 &connp->conn_faddr_v6)) { 1279 mutex_exit(&connp->conn_lock); 1280 (*connp->conn_upcalls->su_set_error) 1281 (connp->conn_upper_handle, error); 1282 goto done; 1283 } 1284 } else { 1285 udp->udp_delayed_error = error; 1286 *((sin6_t *)&udp->udp_delayed_addr) = sin6; 1287 } 1288 mutex_exit(&connp->conn_lock); 1289 } else { 1290 mp1 = mi_tpi_uderror_ind((char *)&sin6, sizeof (sin6_t), 1291 NULL, 0, error); 1292 if (mp1 != NULL) 1293 putnext(connp->conn_rq, mp1); 1294 } 1295 done: 1296 freemsg(mp); 1297 } 1298 1299 /* 1300 * This routine responds to T_ADDR_REQ messages. It is called by udp_wput. 1301 * The local address is filled in if endpoint is bound. The remote address 1302 * is filled in if remote address has been precified ("connected endpoint") 1303 * (The concept of connected CLTS sockets is alien to published TPI 1304 * but we support it anyway). 1305 */ 1306 static void 1307 udp_addr_req(queue_t *q, mblk_t *mp) 1308 { 1309 struct sockaddr *sa; 1310 mblk_t *ackmp; 1311 struct T_addr_ack *taa; 1312 udp_t *udp = Q_TO_UDP(q); 1313 conn_t *connp = udp->udp_connp; 1314 uint_t addrlen; 1315 1316 /* Make it large enough for worst case */ 1317 ackmp = reallocb(mp, sizeof (struct T_addr_ack) + 1318 2 * sizeof (sin6_t), 1); 1319 if (ackmp == NULL) { 1320 udp_err_ack(q, mp, TSYSERR, ENOMEM); 1321 return; 1322 } 1323 taa = (struct T_addr_ack *)ackmp->b_rptr; 1324 1325 bzero(taa, sizeof (struct T_addr_ack)); 1326 ackmp->b_wptr = (uchar_t *)&taa[1]; 1327 1328 taa->PRIM_type = T_ADDR_ACK; 1329 ackmp->b_datap->db_type = M_PCPROTO; 1330 1331 if (connp->conn_family == AF_INET) 1332 addrlen = sizeof (sin_t); 1333 else 1334 addrlen = sizeof (sin6_t); 1335 1336 mutex_enter(&connp->conn_lock); 1337 /* 1338 * Note: Following code assumes 32 bit alignment of basic 1339 * data structures like sin_t and struct T_addr_ack. 1340 */ 1341 if (udp->udp_state != TS_UNBND) { 1342 /* 1343 * Fill in local address first 1344 */ 1345 taa->LOCADDR_offset = sizeof (*taa); 1346 taa->LOCADDR_length = addrlen; 1347 sa = (struct sockaddr *)&taa[1]; 1348 (void) conn_getsockname(connp, sa, &addrlen); 1349 ackmp->b_wptr += addrlen; 1350 } 1351 if (udp->udp_state == TS_DATA_XFER) { 1352 /* 1353 * connected, fill remote address too 1354 */ 1355 taa->REMADDR_length = addrlen; 1356 /* assumed 32-bit alignment */ 1357 taa->REMADDR_offset = taa->LOCADDR_offset + taa->LOCADDR_length; 1358 sa = (struct sockaddr *)(ackmp->b_rptr + taa->REMADDR_offset); 1359 (void) conn_getpeername(connp, sa, &addrlen); 1360 ackmp->b_wptr += addrlen; 1361 } 1362 mutex_exit(&connp->conn_lock); 1363 ASSERT(ackmp->b_wptr <= ackmp->b_datap->db_lim); 1364 qreply(q, ackmp); 1365 } 1366 1367 static void 1368 udp_copy_info(struct T_info_ack *tap, udp_t *udp) 1369 { 1370 conn_t *connp = udp->udp_connp; 1371 1372 if (connp->conn_family == AF_INET) { 1373 *tap = udp_g_t_info_ack_ipv4; 1374 } else { 1375 *tap = udp_g_t_info_ack_ipv6; 1376 } 1377 tap->CURRENT_state = udp->udp_state; 1378 tap->OPT_size = udp_max_optsize; 1379 } 1380 1381 static void 1382 udp_do_capability_ack(udp_t *udp, struct T_capability_ack *tcap, 1383 t_uscalar_t cap_bits1) 1384 { 1385 tcap->CAP_bits1 = 0; 1386 1387 if (cap_bits1 & TC1_INFO) { 1388 udp_copy_info(&tcap->INFO_ack, udp); 1389 tcap->CAP_bits1 |= TC1_INFO; 1390 } 1391 } 1392 1393 /* 1394 * This routine responds to T_CAPABILITY_REQ messages. It is called by 1395 * udp_wput. Much of the T_CAPABILITY_ACK information is copied from 1396 * udp_g_t_info_ack. The current state of the stream is copied from 1397 * udp_state. 1398 */ 1399 static void 1400 udp_capability_req(queue_t *q, mblk_t *mp) 1401 { 1402 t_uscalar_t cap_bits1; 1403 struct T_capability_ack *tcap; 1404 udp_t *udp = Q_TO_UDP(q); 1405 1406 cap_bits1 = ((struct T_capability_req *)mp->b_rptr)->CAP_bits1; 1407 1408 mp = tpi_ack_alloc(mp, sizeof (struct T_capability_ack), 1409 mp->b_datap->db_type, T_CAPABILITY_ACK); 1410 if (!mp) 1411 return; 1412 1413 tcap = (struct T_capability_ack *)mp->b_rptr; 1414 udp_do_capability_ack(udp, tcap, cap_bits1); 1415 1416 qreply(q, mp); 1417 } 1418 1419 /* 1420 * This routine responds to T_INFO_REQ messages. It is called by udp_wput. 1421 * Most of the T_INFO_ACK information is copied from udp_g_t_info_ack. 1422 * The current state of the stream is copied from udp_state. 1423 */ 1424 static void 1425 udp_info_req(queue_t *q, mblk_t *mp) 1426 { 1427 udp_t *udp = Q_TO_UDP(q); 1428 1429 /* Create a T_INFO_ACK message. */ 1430 mp = tpi_ack_alloc(mp, sizeof (struct T_info_ack), M_PCPROTO, 1431 T_INFO_ACK); 1432 if (!mp) 1433 return; 1434 udp_copy_info((struct T_info_ack *)mp->b_rptr, udp); 1435 qreply(q, mp); 1436 } 1437 1438 /* For /dev/udp aka AF_INET open */ 1439 static int 1440 udp_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 1441 { 1442 return (udp_open(q, devp, flag, sflag, credp, B_FALSE)); 1443 } 1444 1445 /* For /dev/udp6 aka AF_INET6 open */ 1446 static int 1447 udp_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 1448 { 1449 return (udp_open(q, devp, flag, sflag, credp, B_TRUE)); 1450 } 1451 1452 /* 1453 * This is the open routine for udp. It allocates a udp_t structure for 1454 * the stream and, on the first open of the module, creates an ND table. 1455 */ 1456 static int 1457 udp_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp, 1458 boolean_t isv6) 1459 { 1460 udp_t *udp; 1461 conn_t *connp; 1462 dev_t conn_dev; 1463 vmem_t *minor_arena; 1464 int err; 1465 1466 /* If the stream is already open, return immediately. */ 1467 if (q->q_ptr != NULL) 1468 return (0); 1469 1470 if (sflag == MODOPEN) 1471 return (EINVAL); 1472 1473 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) && 1474 ((conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) { 1475 minor_arena = ip_minor_arena_la; 1476 } else { 1477 /* 1478 * Either minor numbers in the large arena were exhausted 1479 * or a non socket application is doing the open. 1480 * Try to allocate from the small arena. 1481 */ 1482 if ((conn_dev = inet_minor_alloc(ip_minor_arena_sa)) == 0) 1483 return (EBUSY); 1484 1485 minor_arena = ip_minor_arena_sa; 1486 } 1487 1488 if (flag & SO_FALLBACK) { 1489 /* 1490 * Non streams socket needs a stream to fallback to 1491 */ 1492 RD(q)->q_ptr = (void *)conn_dev; 1493 WR(q)->q_qinfo = &udp_fallback_sock_winit; 1494 WR(q)->q_ptr = (void *)minor_arena; 1495 qprocson(q); 1496 return (0); 1497 } 1498 1499 connp = udp_do_open(credp, isv6, KM_SLEEP, &err); 1500 if (connp == NULL) { 1501 inet_minor_free(minor_arena, conn_dev); 1502 return (err); 1503 } 1504 udp = connp->conn_udp; 1505 1506 *devp = makedevice(getemajor(*devp), (minor_t)conn_dev); 1507 connp->conn_dev = conn_dev; 1508 connp->conn_minor_arena = minor_arena; 1509 1510 /* 1511 * Initialize the udp_t structure for this stream. 1512 */ 1513 q->q_ptr = connp; 1514 WR(q)->q_ptr = connp; 1515 connp->conn_rq = q; 1516 connp->conn_wq = WR(q); 1517 1518 /* 1519 * Since this conn_t/udp_t is not yet visible to anybody else we don't 1520 * need to lock anything. 1521 */ 1522 ASSERT(connp->conn_proto == IPPROTO_UDP); 1523 ASSERT(connp->conn_udp == udp); 1524 ASSERT(udp->udp_connp == connp); 1525 1526 if (flag & SO_SOCKSTR) { 1527 udp->udp_issocket = B_TRUE; 1528 } 1529 1530 WR(q)->q_hiwat = connp->conn_sndbuf; 1531 WR(q)->q_lowat = connp->conn_sndlowat; 1532 1533 qprocson(q); 1534 1535 /* Set the Stream head write offset and high watermark. */ 1536 (void) proto_set_tx_wroff(q, connp, connp->conn_wroff); 1537 (void) proto_set_rx_hiwat(q, connp, 1538 udp_set_rcv_hiwat(udp, connp->conn_rcvbuf)); 1539 1540 mutex_enter(&connp->conn_lock); 1541 connp->conn_state_flags &= ~CONN_INCIPIENT; 1542 mutex_exit(&connp->conn_lock); 1543 return (0); 1544 } 1545 1546 /* 1547 * Which UDP options OK to set through T_UNITDATA_REQ... 1548 */ 1549 /* ARGSUSED */ 1550 static boolean_t 1551 udp_opt_allow_udr_set(t_scalar_t level, t_scalar_t name) 1552 { 1553 return (B_TRUE); 1554 } 1555 1556 /* 1557 * This routine gets default values of certain options whose default 1558 * values are maintained by protcol specific code 1559 */ 1560 int 1561 udp_opt_default(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr) 1562 { 1563 udp_t *udp = Q_TO_UDP(q); 1564 udp_stack_t *us = udp->udp_us; 1565 int *i1 = (int *)ptr; 1566 1567 switch (level) { 1568 case IPPROTO_IP: 1569 switch (name) { 1570 case IP_MULTICAST_TTL: 1571 *ptr = (uchar_t)IP_DEFAULT_MULTICAST_TTL; 1572 return (sizeof (uchar_t)); 1573 case IP_MULTICAST_LOOP: 1574 *ptr = (uchar_t)IP_DEFAULT_MULTICAST_LOOP; 1575 return (sizeof (uchar_t)); 1576 } 1577 break; 1578 case IPPROTO_IPV6: 1579 switch (name) { 1580 case IPV6_MULTICAST_HOPS: 1581 *i1 = IP_DEFAULT_MULTICAST_TTL; 1582 return (sizeof (int)); 1583 case IPV6_MULTICAST_LOOP: 1584 *i1 = IP_DEFAULT_MULTICAST_LOOP; 1585 return (sizeof (int)); 1586 case IPV6_UNICAST_HOPS: 1587 *i1 = us->us_ipv6_hoplimit; 1588 return (sizeof (int)); 1589 } 1590 break; 1591 } 1592 return (-1); 1593 } 1594 1595 /* 1596 * This routine retrieves the current status of socket options. 1597 * It returns the size of the option retrieved, or -1. 1598 */ 1599 int 1600 udp_opt_get(conn_t *connp, t_scalar_t level, t_scalar_t name, 1601 uchar_t *ptr) 1602 { 1603 int *i1 = (int *)ptr; 1604 udp_t *udp = connp->conn_udp; 1605 int len; 1606 conn_opt_arg_t coas; 1607 int retval; 1608 1609 coas.coa_connp = connp; 1610 coas.coa_ixa = connp->conn_ixa; 1611 coas.coa_ipp = &connp->conn_xmit_ipp; 1612 coas.coa_ancillary = B_FALSE; 1613 coas.coa_changed = 0; 1614 1615 /* 1616 * We assume that the optcom framework has checked for the set 1617 * of levels and names that are supported, hence we don't worry 1618 * about rejecting based on that. 1619 * First check for UDP specific handling, then pass to common routine. 1620 */ 1621 switch (level) { 1622 case IPPROTO_IP: 1623 /* 1624 * Only allow IPv4 option processing on IPv4 sockets. 1625 */ 1626 if (connp->conn_family != AF_INET) 1627 return (-1); 1628 1629 switch (name) { 1630 case IP_OPTIONS: 1631 case T_IP_OPTIONS: 1632 mutex_enter(&connp->conn_lock); 1633 if (!(udp->udp_recv_ipp.ipp_fields & 1634 IPPF_IPV4_OPTIONS)) { 1635 mutex_exit(&connp->conn_lock); 1636 return (0); 1637 } 1638 1639 len = udp->udp_recv_ipp.ipp_ipv4_options_len; 1640 ASSERT(len != 0); 1641 bcopy(udp->udp_recv_ipp.ipp_ipv4_options, ptr, len); 1642 mutex_exit(&connp->conn_lock); 1643 return (len); 1644 } 1645 break; 1646 case IPPROTO_UDP: 1647 switch (name) { 1648 case UDP_NAT_T_ENDPOINT: 1649 mutex_enter(&connp->conn_lock); 1650 *i1 = udp->udp_nat_t_endpoint; 1651 mutex_exit(&connp->conn_lock); 1652 return (sizeof (int)); 1653 case UDP_RCVHDR: 1654 mutex_enter(&connp->conn_lock); 1655 *i1 = udp->udp_rcvhdr ? 1 : 0; 1656 mutex_exit(&connp->conn_lock); 1657 return (sizeof (int)); 1658 } 1659 } 1660 mutex_enter(&connp->conn_lock); 1661 retval = conn_opt_get(&coas, level, name, ptr); 1662 mutex_exit(&connp->conn_lock); 1663 return (retval); 1664 } 1665 1666 /* 1667 * This routine retrieves the current status of socket options. 1668 * It returns the size of the option retrieved, or -1. 1669 */ 1670 int 1671 udp_tpi_opt_get(queue_t *q, t_scalar_t level, t_scalar_t name, uchar_t *ptr) 1672 { 1673 conn_t *connp = Q_TO_CONN(q); 1674 int err; 1675 1676 err = udp_opt_get(connp, level, name, ptr); 1677 return (err); 1678 } 1679 1680 /* 1681 * This routine sets socket options. 1682 */ 1683 int 1684 udp_do_opt_set(conn_opt_arg_t *coa, int level, int name, 1685 uint_t inlen, uchar_t *invalp, cred_t *cr, boolean_t checkonly) 1686 { 1687 conn_t *connp = coa->coa_connp; 1688 ip_xmit_attr_t *ixa = coa->coa_ixa; 1689 udp_t *udp = connp->conn_udp; 1690 udp_stack_t *us = udp->udp_us; 1691 int *i1 = (int *)invalp; 1692 boolean_t onoff = (*i1 == 0) ? 0 : 1; 1693 int error; 1694 1695 ASSERT(MUTEX_NOT_HELD(&coa->coa_connp->conn_lock)); 1696 /* 1697 * First do UDP specific sanity checks and handle UDP specific 1698 * options. Note that some IPPROTO_UDP options are handled 1699 * by conn_opt_set. 1700 */ 1701 switch (level) { 1702 case SOL_SOCKET: 1703 switch (name) { 1704 case SO_SNDBUF: 1705 if (*i1 > us->us_max_buf) { 1706 return (ENOBUFS); 1707 } 1708 break; 1709 case SO_RCVBUF: 1710 if (*i1 > us->us_max_buf) { 1711 return (ENOBUFS); 1712 } 1713 break; 1714 1715 case SCM_UCRED: { 1716 struct ucred_s *ucr; 1717 cred_t *newcr; 1718 ts_label_t *tsl; 1719 1720 /* 1721 * Only sockets that have proper privileges and are 1722 * bound to MLPs will have any other value here, so 1723 * this implicitly tests for privilege to set label. 1724 */ 1725 if (connp->conn_mlp_type == mlptSingle) 1726 break; 1727 1728 ucr = (struct ucred_s *)invalp; 1729 if (inlen < sizeof (*ucr) + sizeof (bslabel_t) || 1730 ucr->uc_labeloff < sizeof (*ucr) || 1731 ucr->uc_labeloff + sizeof (bslabel_t) > inlen) 1732 return (EINVAL); 1733 if (!checkonly) { 1734 /* 1735 * Set ixa_tsl to the new label. 1736 * We assume that crgetzoneid doesn't change 1737 * as part of the SCM_UCRED. 1738 */ 1739 ASSERT(cr != NULL); 1740 if ((tsl = crgetlabel(cr)) == NULL) 1741 return (EINVAL); 1742 newcr = copycred_from_bslabel(cr, UCLABEL(ucr), 1743 tsl->tsl_doi, KM_NOSLEEP); 1744 if (newcr == NULL) 1745 return (ENOSR); 1746 ASSERT(newcr->cr_label != NULL); 1747 /* 1748 * Move the hold on the cr_label to ixa_tsl by 1749 * setting cr_label to NULL. Then release newcr. 1750 */ 1751 ip_xmit_attr_replace_tsl(ixa, newcr->cr_label); 1752 ixa->ixa_flags |= IXAF_UCRED_TSL; 1753 newcr->cr_label = NULL; 1754 crfree(newcr); 1755 coa->coa_changed |= COA_HEADER_CHANGED; 1756 coa->coa_changed |= COA_WROFF_CHANGED; 1757 } 1758 /* Fully handled this option. */ 1759 return (0); 1760 } 1761 } 1762 break; 1763 case IPPROTO_UDP: 1764 switch (name) { 1765 case UDP_NAT_T_ENDPOINT: 1766 if ((error = secpolicy_ip_config(cr, B_FALSE)) != 0) { 1767 return (error); 1768 } 1769 1770 /* 1771 * Use conn_family instead so we can avoid ambiguitites 1772 * with AF_INET6 sockets that may switch from IPv4 1773 * to IPv6. 1774 */ 1775 if (connp->conn_family != AF_INET) { 1776 return (EAFNOSUPPORT); 1777 } 1778 1779 if (!checkonly) { 1780 mutex_enter(&connp->conn_lock); 1781 udp->udp_nat_t_endpoint = onoff; 1782 mutex_exit(&connp->conn_lock); 1783 coa->coa_changed |= COA_HEADER_CHANGED; 1784 coa->coa_changed |= COA_WROFF_CHANGED; 1785 } 1786 /* Fully handled this option. */ 1787 return (0); 1788 case UDP_RCVHDR: 1789 mutex_enter(&connp->conn_lock); 1790 udp->udp_rcvhdr = onoff; 1791 mutex_exit(&connp->conn_lock); 1792 return (0); 1793 } 1794 break; 1795 } 1796 error = conn_opt_set(coa, level, name, inlen, invalp, 1797 checkonly, cr); 1798 return (error); 1799 } 1800 1801 /* 1802 * This routine sets socket options. 1803 */ 1804 int 1805 udp_opt_set(conn_t *connp, uint_t optset_context, int level, 1806 int name, uint_t inlen, uchar_t *invalp, uint_t *outlenp, 1807 uchar_t *outvalp, void *thisdg_attrs, cred_t *cr) 1808 { 1809 udp_t *udp = connp->conn_udp; 1810 int err; 1811 conn_opt_arg_t coas, *coa; 1812 boolean_t checkonly; 1813 udp_stack_t *us = udp->udp_us; 1814 1815 switch (optset_context) { 1816 case SETFN_OPTCOM_CHECKONLY: 1817 checkonly = B_TRUE; 1818 /* 1819 * Note: Implies T_CHECK semantics for T_OPTCOM_REQ 1820 * inlen != 0 implies value supplied and 1821 * we have to "pretend" to set it. 1822 * inlen == 0 implies that there is no 1823 * value part in T_CHECK request and just validation 1824 * done elsewhere should be enough, we just return here. 1825 */ 1826 if (inlen == 0) { 1827 *outlenp = 0; 1828 return (0); 1829 } 1830 break; 1831 case SETFN_OPTCOM_NEGOTIATE: 1832 checkonly = B_FALSE; 1833 break; 1834 case SETFN_UD_NEGOTIATE: 1835 case SETFN_CONN_NEGOTIATE: 1836 checkonly = B_FALSE; 1837 /* 1838 * Negotiating local and "association-related" options 1839 * through T_UNITDATA_REQ. 1840 * 1841 * Following routine can filter out ones we do not 1842 * want to be "set" this way. 1843 */ 1844 if (!udp_opt_allow_udr_set(level, name)) { 1845 *outlenp = 0; 1846 return (EINVAL); 1847 } 1848 break; 1849 default: 1850 /* 1851 * We should never get here 1852 */ 1853 *outlenp = 0; 1854 return (EINVAL); 1855 } 1856 1857 ASSERT((optset_context != SETFN_OPTCOM_CHECKONLY) || 1858 (optset_context == SETFN_OPTCOM_CHECKONLY && inlen != 0)); 1859 1860 if (thisdg_attrs != NULL) { 1861 /* Options from T_UNITDATA_REQ */ 1862 coa = (conn_opt_arg_t *)thisdg_attrs; 1863 ASSERT(coa->coa_connp == connp); 1864 ASSERT(coa->coa_ixa != NULL); 1865 ASSERT(coa->coa_ipp != NULL); 1866 ASSERT(coa->coa_ancillary); 1867 } else { 1868 coa = &coas; 1869 coas.coa_connp = connp; 1870 /* Get a reference on conn_ixa to prevent concurrent mods */ 1871 coas.coa_ixa = conn_get_ixa(connp, B_TRUE); 1872 if (coas.coa_ixa == NULL) { 1873 *outlenp = 0; 1874 return (ENOMEM); 1875 } 1876 coas.coa_ipp = &connp->conn_xmit_ipp; 1877 coas.coa_ancillary = B_FALSE; 1878 coas.coa_changed = 0; 1879 } 1880 1881 err = udp_do_opt_set(coa, level, name, inlen, invalp, 1882 cr, checkonly); 1883 if (err != 0) { 1884 errout: 1885 if (!coa->coa_ancillary) 1886 ixa_refrele(coa->coa_ixa); 1887 *outlenp = 0; 1888 return (err); 1889 } 1890 /* Handle DHCPINIT here outside of lock */ 1891 if (level == IPPROTO_IP && name == IP_DHCPINIT_IF) { 1892 uint_t ifindex; 1893 ill_t *ill; 1894 1895 ifindex = *(uint_t *)invalp; 1896 if (ifindex == 0) { 1897 ill = NULL; 1898 } else { 1899 ill = ill_lookup_on_ifindex(ifindex, B_FALSE, 1900 coa->coa_ixa->ixa_ipst); 1901 if (ill == NULL) { 1902 err = ENXIO; 1903 goto errout; 1904 } 1905 1906 mutex_enter(&ill->ill_lock); 1907 if (ill->ill_state_flags & ILL_CONDEMNED) { 1908 mutex_exit(&ill->ill_lock); 1909 ill_refrele(ill); 1910 err = ENXIO; 1911 goto errout; 1912 } 1913 if (IS_VNI(ill)) { 1914 mutex_exit(&ill->ill_lock); 1915 ill_refrele(ill); 1916 err = EINVAL; 1917 goto errout; 1918 } 1919 } 1920 mutex_enter(&connp->conn_lock); 1921 1922 if (connp->conn_dhcpinit_ill != NULL) { 1923 /* 1924 * We've locked the conn so conn_cleanup_ill() 1925 * cannot clear conn_dhcpinit_ill -- so it's 1926 * safe to access the ill. 1927 */ 1928 ill_t *oill = connp->conn_dhcpinit_ill; 1929 1930 ASSERT(oill->ill_dhcpinit != 0); 1931 atomic_dec_32(&oill->ill_dhcpinit); 1932 ill_set_inputfn(connp->conn_dhcpinit_ill); 1933 connp->conn_dhcpinit_ill = NULL; 1934 } 1935 1936 if (ill != NULL) { 1937 connp->conn_dhcpinit_ill = ill; 1938 atomic_inc_32(&ill->ill_dhcpinit); 1939 ill_set_inputfn(ill); 1940 mutex_exit(&connp->conn_lock); 1941 mutex_exit(&ill->ill_lock); 1942 ill_refrele(ill); 1943 } else { 1944 mutex_exit(&connp->conn_lock); 1945 } 1946 } 1947 1948 /* 1949 * Common case of OK return with outval same as inval. 1950 */ 1951 if (invalp != outvalp) { 1952 /* don't trust bcopy for identical src/dst */ 1953 (void) bcopy(invalp, outvalp, inlen); 1954 } 1955 *outlenp = inlen; 1956 1957 /* 1958 * If this was not ancillary data, then we rebuild the headers, 1959 * update the IRE/NCE, and IPsec as needed. 1960 * Since the label depends on the destination we go through 1961 * ip_set_destination first. 1962 */ 1963 if (coa->coa_ancillary) { 1964 return (0); 1965 } 1966 1967 if (coa->coa_changed & COA_ROUTE_CHANGED) { 1968 in6_addr_t saddr, faddr, nexthop; 1969 in_port_t fport; 1970 1971 /* 1972 * We clear lastdst to make sure we pick up the change 1973 * next time sending. 1974 * If we are connected we re-cache the information. 1975 * We ignore errors to preserve BSD behavior. 1976 * Note that we don't redo IPsec policy lookup here 1977 * since the final destination (or source) didn't change. 1978 */ 1979 mutex_enter(&connp->conn_lock); 1980 connp->conn_v6lastdst = ipv6_all_zeros; 1981 1982 ip_attr_nexthop(coa->coa_ipp, coa->coa_ixa, 1983 &connp->conn_faddr_v6, &nexthop); 1984 saddr = connp->conn_saddr_v6; 1985 faddr = connp->conn_faddr_v6; 1986 fport = connp->conn_fport; 1987 mutex_exit(&connp->conn_lock); 1988 1989 if (!IN6_IS_ADDR_UNSPECIFIED(&faddr) && 1990 !IN6_IS_ADDR_V4MAPPED_ANY(&faddr)) { 1991 (void) ip_attr_connect(connp, coa->coa_ixa, 1992 &saddr, &faddr, &nexthop, fport, NULL, NULL, 1993 IPDF_ALLOW_MCBC | IPDF_VERIFY_DST); 1994 } 1995 } 1996 1997 ixa_refrele(coa->coa_ixa); 1998 1999 if (coa->coa_changed & COA_HEADER_CHANGED) { 2000 /* 2001 * Rebuild the header template if we are connected. 2002 * Otherwise clear conn_v6lastdst so we rebuild the header 2003 * in the data path. 2004 */ 2005 mutex_enter(&connp->conn_lock); 2006 if (!IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6) && 2007 !IN6_IS_ADDR_V4MAPPED_ANY(&connp->conn_faddr_v6)) { 2008 err = udp_build_hdr_template(connp, 2009 &connp->conn_saddr_v6, &connp->conn_faddr_v6, 2010 connp->conn_fport, connp->conn_flowinfo); 2011 if (err != 0) { 2012 mutex_exit(&connp->conn_lock); 2013 return (err); 2014 } 2015 } else { 2016 connp->conn_v6lastdst = ipv6_all_zeros; 2017 } 2018 mutex_exit(&connp->conn_lock); 2019 } 2020 if (coa->coa_changed & COA_RCVBUF_CHANGED) { 2021 (void) proto_set_rx_hiwat(connp->conn_rq, connp, 2022 connp->conn_rcvbuf); 2023 } 2024 if ((coa->coa_changed & COA_SNDBUF_CHANGED) && !IPCL_IS_NONSTR(connp)) { 2025 connp->conn_wq->q_hiwat = connp->conn_sndbuf; 2026 } 2027 if (coa->coa_changed & COA_WROFF_CHANGED) { 2028 /* Increase wroff if needed */ 2029 uint_t wroff; 2030 2031 mutex_enter(&connp->conn_lock); 2032 wroff = connp->conn_ht_iphc_allocated + us->us_wroff_extra; 2033 if (udp->udp_nat_t_endpoint) 2034 wroff += sizeof (uint32_t); 2035 if (wroff > connp->conn_wroff) { 2036 connp->conn_wroff = wroff; 2037 mutex_exit(&connp->conn_lock); 2038 (void) proto_set_tx_wroff(connp->conn_rq, connp, wroff); 2039 } else { 2040 mutex_exit(&connp->conn_lock); 2041 } 2042 } 2043 return (err); 2044 } 2045 2046 /* This routine sets socket options. */ 2047 int 2048 udp_tpi_opt_set(queue_t *q, uint_t optset_context, int level, int name, 2049 uint_t inlen, uchar_t *invalp, uint_t *outlenp, uchar_t *outvalp, 2050 void *thisdg_attrs, cred_t *cr) 2051 { 2052 conn_t *connp = Q_TO_CONN(q); 2053 int error; 2054 2055 error = udp_opt_set(connp, optset_context, level, name, inlen, invalp, 2056 outlenp, outvalp, thisdg_attrs, cr); 2057 return (error); 2058 } 2059 2060 /* 2061 * Setup IP and UDP headers. 2062 * Returns NULL on allocation failure, in which case data_mp is freed. 2063 */ 2064 mblk_t * 2065 udp_prepend_hdr(conn_t *connp, ip_xmit_attr_t *ixa, const ip_pkt_t *ipp, 2066 const in6_addr_t *v6src, const in6_addr_t *v6dst, in_port_t dstport, 2067 uint32_t flowinfo, mblk_t *data_mp, int *errorp) 2068 { 2069 mblk_t *mp; 2070 udpha_t *udpha; 2071 udp_stack_t *us = connp->conn_netstack->netstack_udp; 2072 uint_t data_len; 2073 uint32_t cksum; 2074 udp_t *udp = connp->conn_udp; 2075 boolean_t insert_spi = udp->udp_nat_t_endpoint; 2076 uint_t ulp_hdr_len; 2077 2078 data_len = msgdsize(data_mp); 2079 ulp_hdr_len = UDPH_SIZE; 2080 if (insert_spi) 2081 ulp_hdr_len += sizeof (uint32_t); 2082 2083 mp = conn_prepend_hdr(ixa, ipp, v6src, v6dst, IPPROTO_UDP, flowinfo, 2084 ulp_hdr_len, data_mp, data_len, us->us_wroff_extra, &cksum, errorp); 2085 if (mp == NULL) { 2086 ASSERT(*errorp != 0); 2087 return (NULL); 2088 } 2089 2090 data_len += ulp_hdr_len; 2091 ixa->ixa_pktlen = data_len + ixa->ixa_ip_hdr_length; 2092 2093 udpha = (udpha_t *)(mp->b_rptr + ixa->ixa_ip_hdr_length); 2094 udpha->uha_src_port = connp->conn_lport; 2095 udpha->uha_dst_port = dstport; 2096 udpha->uha_checksum = 0; 2097 udpha->uha_length = htons(data_len); 2098 2099 /* 2100 * If there was a routing option/header then conn_prepend_hdr 2101 * has massaged it and placed the pseudo-header checksum difference 2102 * in the cksum argument. 2103 * 2104 * Setup header length and prepare for ULP checksum done in IP. 2105 * 2106 * We make it easy for IP to include our pseudo header 2107 * by putting our length in uha_checksum. 2108 * The IP source, destination, and length have already been set by 2109 * conn_prepend_hdr. 2110 */ 2111 cksum += data_len; 2112 cksum = (cksum >> 16) + (cksum & 0xFFFF); 2113 ASSERT(cksum < 0x10000); 2114 2115 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2116 ipha_t *ipha = (ipha_t *)mp->b_rptr; 2117 2118 ASSERT(ntohs(ipha->ipha_length) == ixa->ixa_pktlen); 2119 2120 /* IP does the checksum if uha_checksum is non-zero */ 2121 if (us->us_do_checksum) { 2122 if (cksum == 0) 2123 udpha->uha_checksum = 0xffff; 2124 else 2125 udpha->uha_checksum = htons(cksum); 2126 } else { 2127 udpha->uha_checksum = 0; 2128 } 2129 } else { 2130 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 2131 2132 ASSERT(ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN == ixa->ixa_pktlen); 2133 if (cksum == 0) 2134 udpha->uha_checksum = 0xffff; 2135 else 2136 udpha->uha_checksum = htons(cksum); 2137 } 2138 2139 /* Insert all-0s SPI now. */ 2140 if (insert_spi) 2141 *((uint32_t *)(udpha + 1)) = 0; 2142 2143 return (mp); 2144 } 2145 2146 static int 2147 udp_build_hdr_template(conn_t *connp, const in6_addr_t *v6src, 2148 const in6_addr_t *v6dst, in_port_t dstport, uint32_t flowinfo) 2149 { 2150 udpha_t *udpha; 2151 int error; 2152 2153 ASSERT(MUTEX_HELD(&connp->conn_lock)); 2154 /* 2155 * We clear lastdst to make sure we don't use the lastdst path 2156 * next time sending since we might not have set v6dst yet. 2157 */ 2158 connp->conn_v6lastdst = ipv6_all_zeros; 2159 2160 error = conn_build_hdr_template(connp, UDPH_SIZE, 0, v6src, v6dst, 2161 flowinfo); 2162 if (error != 0) 2163 return (error); 2164 2165 /* 2166 * Any routing header/option has been massaged. The checksum difference 2167 * is stored in conn_sum. 2168 */ 2169 udpha = (udpha_t *)connp->conn_ht_ulp; 2170 udpha->uha_src_port = connp->conn_lport; 2171 udpha->uha_dst_port = dstport; 2172 udpha->uha_checksum = 0; 2173 udpha->uha_length = htons(UDPH_SIZE); /* Filled in later */ 2174 return (0); 2175 } 2176 2177 static mblk_t * 2178 udp_queue_fallback(udp_t *udp, mblk_t *mp) 2179 { 2180 ASSERT(MUTEX_HELD(&udp->udp_recv_lock)); 2181 if (IPCL_IS_NONSTR(udp->udp_connp)) { 2182 /* 2183 * fallback has started but messages have not been moved yet 2184 */ 2185 if (udp->udp_fallback_queue_head == NULL) { 2186 ASSERT(udp->udp_fallback_queue_tail == NULL); 2187 udp->udp_fallback_queue_head = mp; 2188 udp->udp_fallback_queue_tail = mp; 2189 } else { 2190 ASSERT(udp->udp_fallback_queue_tail != NULL); 2191 udp->udp_fallback_queue_tail->b_next = mp; 2192 udp->udp_fallback_queue_tail = mp; 2193 } 2194 return (NULL); 2195 } else { 2196 /* 2197 * Fallback completed, let the caller putnext() the mblk. 2198 */ 2199 return (mp); 2200 } 2201 } 2202 2203 /* 2204 * Deliver data to ULP. In case we have a socket, and it's falling back to 2205 * TPI, then we'll queue the mp for later processing. 2206 */ 2207 static void 2208 udp_ulp_recv(conn_t *connp, mblk_t *mp, uint_t len, ip_recv_attr_t *ira) 2209 { 2210 if (IPCL_IS_NONSTR(connp)) { 2211 udp_t *udp = connp->conn_udp; 2212 int error; 2213 2214 ASSERT(len == msgdsize(mp)); 2215 if ((*connp->conn_upcalls->su_recv) 2216 (connp->conn_upper_handle, mp, len, 0, &error, NULL) < 0) { 2217 mutex_enter(&udp->udp_recv_lock); 2218 if (error == ENOSPC) { 2219 /* 2220 * let's confirm while holding the lock 2221 */ 2222 if ((*connp->conn_upcalls->su_recv) 2223 (connp->conn_upper_handle, NULL, 0, 0, 2224 &error, NULL) < 0) { 2225 ASSERT(error == ENOSPC); 2226 if (error == ENOSPC) { 2227 connp->conn_flow_cntrld = 2228 B_TRUE; 2229 } 2230 } 2231 mutex_exit(&udp->udp_recv_lock); 2232 } else { 2233 ASSERT(error == EOPNOTSUPP); 2234 mp = udp_queue_fallback(udp, mp); 2235 mutex_exit(&udp->udp_recv_lock); 2236 if (mp != NULL) 2237 putnext(connp->conn_rq, mp); 2238 } 2239 } 2240 ASSERT(MUTEX_NOT_HELD(&udp->udp_recv_lock)); 2241 } else { 2242 if (is_system_labeled()) { 2243 ASSERT(ira->ira_cred != NULL); 2244 /* 2245 * Provide for protocols above UDP such as RPC 2246 * NOPID leaves db_cpid unchanged. 2247 */ 2248 mblk_setcred(mp, ira->ira_cred, NOPID); 2249 } 2250 2251 putnext(connp->conn_rq, mp); 2252 } 2253 } 2254 2255 /* 2256 * This is the inbound data path. 2257 * IP has already pulled up the IP plus UDP headers and verified alignment 2258 * etc. 2259 */ 2260 /* ARGSUSED2 */ 2261 static void 2262 udp_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 2263 { 2264 conn_t *connp = (conn_t *)arg1; 2265 struct T_unitdata_ind *tudi; 2266 uchar_t *rptr; /* Pointer to IP header */ 2267 int hdr_length; /* Length of IP+UDP headers */ 2268 int udi_size; /* Size of T_unitdata_ind */ 2269 int pkt_len; 2270 udp_t *udp; 2271 udpha_t *udpha; 2272 ip_pkt_t ipps; 2273 ip6_t *ip6h; 2274 mblk_t *mp1; 2275 uint32_t udp_ipv4_options_len; 2276 crb_t recv_ancillary; 2277 udp_stack_t *us; 2278 conn_t *new = NULL; 2279 2280 ASSERT(connp->conn_flags & IPCL_UDPCONN); 2281 2282 mutex_enter(&connp->conn_lock); 2283 if (connp->conn_reuselist != NULL) { 2284 struct reuselist *reusep = connp->conn_reuselist; 2285 int i; 2286 2287 /* 2288 * we have to balance the request between multiple sockets. 2289 * Currently we do this in a round-robin fashion. In the 2290 * reuselist we maintain a pointer to the last receiver. 2291 * TODO: we can add a check if the conn is full and skip to 2292 * the next. 2293 */ 2294 mutex_enter(&reusep->ru_lock); 2295 i = reusep->ru_next; 2296 reuse_again: 2297 new = reusep->ru_conns[i]; 2298 if (++i == reusep->ru_entries) 2299 i = 0; 2300 reusep->ru_next = i; 2301 if (new == connp) { 2302 new = NULL; 2303 } else { 2304 /* 2305 * Locking 'new' violates the lock order conn -> 2306 * reuselist, use tryenter to not cause a deadlock 2307 * panic. If the lock can't be obtained go to next conn. 2308 * Loop terminates because connp must be part of 2309 * reuselist. 2310 */ 2311 if (!mutex_tryenter(&new->conn_lock)) 2312 goto reuse_again; 2313 CONN_INC_REF_LOCKED(new); 2314 mutex_exit(&new->conn_lock); 2315 } 2316 mutex_exit(&reusep->ru_lock); 2317 mutex_exit(&connp->conn_lock); 2318 if (new != NULL) 2319 connp = new; 2320 } else { 2321 mutex_exit(&connp->conn_lock); 2322 } 2323 2324 udp = connp->conn_udp; 2325 us = udp->udp_us; 2326 rptr = mp->b_rptr; 2327 2328 ASSERT(DB_TYPE(mp) == M_DATA); 2329 ASSERT(OK_32PTR(rptr)); 2330 ASSERT(ira->ira_pktlen == msgdsize(mp)); 2331 pkt_len = ira->ira_pktlen; 2332 2333 /* 2334 * Get a snapshot of these and allow other threads to change 2335 * them after that. We need the same recv_ancillary when determining 2336 * the size as when adding the ancillary data items. 2337 */ 2338 mutex_enter(&connp->conn_lock); 2339 udp_ipv4_options_len = udp->udp_recv_ipp.ipp_ipv4_options_len; 2340 recv_ancillary = connp->conn_recv_ancillary; 2341 mutex_exit(&connp->conn_lock); 2342 2343 hdr_length = ira->ira_ip_hdr_length; 2344 2345 /* 2346 * IP inspected the UDP header thus all of it must be in the mblk. 2347 * UDP length check is performed for IPv6 packets and IPv4 packets 2348 * to check if the size of the packet as specified 2349 * by the UDP header is the same as the length derived from the IP 2350 * header. 2351 */ 2352 udpha = (udpha_t *)(rptr + hdr_length); 2353 if (pkt_len != ntohs(udpha->uha_length) + hdr_length) 2354 goto tossit; 2355 2356 hdr_length += UDPH_SIZE; 2357 ASSERT(MBLKL(mp) >= hdr_length); /* IP did a pullup */ 2358 2359 /* Initialize regardless of IP version */ 2360 ipps.ipp_fields = 0; 2361 2362 if (((ira->ira_flags & IRAF_IPV4_OPTIONS) || 2363 udp_ipv4_options_len > 0) && 2364 connp->conn_family == AF_INET) { 2365 int err; 2366 2367 /* 2368 * Record/update udp_recv_ipp with the lock 2369 * held. Not needed for AF_INET6 sockets 2370 * since they don't support a getsockopt of IP_OPTIONS. 2371 */ 2372 mutex_enter(&connp->conn_lock); 2373 err = ip_find_hdr_v4((ipha_t *)rptr, &udp->udp_recv_ipp, 2374 B_TRUE); 2375 if (err != 0) { 2376 /* Allocation failed. Drop packet */ 2377 mutex_exit(&connp->conn_lock); 2378 goto tossit; 2379 } 2380 mutex_exit(&connp->conn_lock); 2381 } 2382 2383 if (recv_ancillary.crb_all != 0) { 2384 /* 2385 * Record packet information in the ip_pkt_t 2386 */ 2387 if (ira->ira_flags & IRAF_IS_IPV4) { 2388 ASSERT(IPH_HDR_VERSION(rptr) == IPV4_VERSION); 2389 ASSERT(MBLKL(mp) >= sizeof (ipha_t)); 2390 ASSERT(((ipha_t *)rptr)->ipha_protocol == IPPROTO_UDP); 2391 ASSERT(ira->ira_ip_hdr_length == IPH_HDR_LENGTH(rptr)); 2392 2393 (void) ip_find_hdr_v4((ipha_t *)rptr, &ipps, B_FALSE); 2394 } else { 2395 uint8_t nexthdrp; 2396 2397 ASSERT(IPH_HDR_VERSION(rptr) == IPV6_VERSION); 2398 /* 2399 * IPv6 packets can only be received by applications 2400 * that are prepared to receive IPv6 addresses. 2401 * The IP fanout must ensure this. 2402 */ 2403 ASSERT(connp->conn_family == AF_INET6); 2404 2405 ip6h = (ip6_t *)rptr; 2406 2407 /* We don't care about the length, but need the ipp */ 2408 hdr_length = ip_find_hdr_v6(mp, ip6h, B_TRUE, &ipps, 2409 &nexthdrp); 2410 ASSERT(hdr_length == ira->ira_ip_hdr_length); 2411 /* Restore */ 2412 hdr_length = ira->ira_ip_hdr_length + UDPH_SIZE; 2413 ASSERT(nexthdrp == IPPROTO_UDP); 2414 } 2415 } 2416 2417 /* 2418 * This is the inbound data path. Packets are passed upstream as 2419 * T_UNITDATA_IND messages. 2420 */ 2421 if (connp->conn_family == AF_INET) { 2422 sin_t *sin; 2423 2424 ASSERT(IPH_HDR_VERSION((ipha_t *)rptr) == IPV4_VERSION); 2425 2426 /* 2427 * Normally only send up the source address. 2428 * If any ancillary data items are wanted we add those. 2429 */ 2430 udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin_t); 2431 if (recv_ancillary.crb_all != 0) { 2432 udi_size += conn_recvancillary_size(connp, 2433 recv_ancillary, ira, mp, &ipps); 2434 } 2435 2436 /* Allocate a message block for the T_UNITDATA_IND structure. */ 2437 mp1 = allocb(udi_size, BPRI_MED); 2438 if (mp1 == NULL) 2439 goto tossit; 2440 mp1->b_cont = mp; 2441 mp1->b_datap->db_type = M_PROTO; 2442 tudi = (struct T_unitdata_ind *)mp1->b_rptr; 2443 mp1->b_wptr = (uchar_t *)tudi + udi_size; 2444 tudi->PRIM_type = T_UNITDATA_IND; 2445 tudi->SRC_length = sizeof (sin_t); 2446 tudi->SRC_offset = sizeof (struct T_unitdata_ind); 2447 tudi->OPT_offset = sizeof (struct T_unitdata_ind) + 2448 sizeof (sin_t); 2449 udi_size -= (sizeof (struct T_unitdata_ind) + sizeof (sin_t)); 2450 tudi->OPT_length = udi_size; 2451 sin = (sin_t *)&tudi[1]; 2452 sin->sin_addr.s_addr = ((ipha_t *)rptr)->ipha_src; 2453 sin->sin_port = udpha->uha_src_port; 2454 sin->sin_family = connp->conn_family; 2455 *(uint32_t *)&sin->sin_zero[0] = 0; 2456 *(uint32_t *)&sin->sin_zero[4] = 0; 2457 2458 /* 2459 * Add options if IP_RECVDSTADDR, IP_RECVIF, IP_RECVSLLA or 2460 * IP_RECVTTL has been set. 2461 */ 2462 if (udi_size != 0) { 2463 conn_recvancillary_add(connp, recv_ancillary, ira, 2464 &ipps, (uchar_t *)&sin[1], udi_size); 2465 } 2466 } else { 2467 sin6_t *sin6; 2468 2469 /* 2470 * Handle both IPv4 and IPv6 packets for IPv6 sockets. 2471 * 2472 * Normally we only send up the address. If receiving of any 2473 * optional receive side information is enabled, we also send 2474 * that up as options. 2475 */ 2476 udi_size = sizeof (struct T_unitdata_ind) + sizeof (sin6_t); 2477 2478 if (recv_ancillary.crb_all != 0) { 2479 udi_size += conn_recvancillary_size(connp, 2480 recv_ancillary, ira, mp, &ipps); 2481 } 2482 2483 mp1 = allocb(udi_size, BPRI_MED); 2484 if (mp1 == NULL) 2485 goto tossit; 2486 mp1->b_cont = mp; 2487 mp1->b_datap->db_type = M_PROTO; 2488 tudi = (struct T_unitdata_ind *)mp1->b_rptr; 2489 mp1->b_wptr = (uchar_t *)tudi + udi_size; 2490 tudi->PRIM_type = T_UNITDATA_IND; 2491 tudi->SRC_length = sizeof (sin6_t); 2492 tudi->SRC_offset = sizeof (struct T_unitdata_ind); 2493 tudi->OPT_offset = sizeof (struct T_unitdata_ind) + 2494 sizeof (sin6_t); 2495 udi_size -= (sizeof (struct T_unitdata_ind) + sizeof (sin6_t)); 2496 tudi->OPT_length = udi_size; 2497 sin6 = (sin6_t *)&tudi[1]; 2498 if (ira->ira_flags & IRAF_IS_IPV4) { 2499 in6_addr_t v6dst; 2500 2501 IN6_IPADDR_TO_V4MAPPED(((ipha_t *)rptr)->ipha_src, 2502 &sin6->sin6_addr); 2503 IN6_IPADDR_TO_V4MAPPED(((ipha_t *)rptr)->ipha_dst, 2504 &v6dst); 2505 sin6->sin6_flowinfo = 0; 2506 sin6->sin6_scope_id = 0; 2507 sin6->__sin6_src_id = ip_srcid_find_addr(&v6dst, 2508 IPCL_ZONEID(connp), us->us_netstack); 2509 } else { 2510 ip6h = (ip6_t *)rptr; 2511 2512 sin6->sin6_addr = ip6h->ip6_src; 2513 /* No sin6_flowinfo per API */ 2514 sin6->sin6_flowinfo = 0; 2515 /* For link-scope pass up scope id */ 2516 if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_src)) 2517 sin6->sin6_scope_id = ira->ira_ruifindex; 2518 else 2519 sin6->sin6_scope_id = 0; 2520 sin6->__sin6_src_id = ip_srcid_find_addr( 2521 &ip6h->ip6_dst, IPCL_ZONEID(connp), 2522 us->us_netstack); 2523 } 2524 sin6->sin6_port = udpha->uha_src_port; 2525 sin6->sin6_family = connp->conn_family; 2526 2527 if (udi_size != 0) { 2528 conn_recvancillary_add(connp, recv_ancillary, ira, 2529 &ipps, (uchar_t *)&sin6[1], udi_size); 2530 } 2531 } 2532 2533 /* 2534 * DTrace this UDP input as udp:::receive (this is for IPv4, IPv6 and 2535 * loopback traffic). 2536 */ 2537 DTRACE_UDP5(receive, mblk_t *, NULL, ip_xmit_attr_t *, connp->conn_ixa, 2538 void_ip_t *, rptr, udp_t *, udp, udpha_t *, udpha); 2539 2540 /* Walk past the headers unless IP_RECVHDR was set. */ 2541 if (!udp->udp_rcvhdr) { 2542 mp->b_rptr = rptr + hdr_length; 2543 pkt_len -= hdr_length; 2544 } 2545 2546 UDPS_BUMP_MIB(us, udpHCInDatagrams); 2547 udp_ulp_recv(connp, mp1, pkt_len, ira); 2548 if (new != NULL) 2549 CONN_DEC_REF(new); 2550 return; 2551 2552 tossit: 2553 freemsg(mp); 2554 UDPS_BUMP_MIB(us, udpInErrors); 2555 if (new != NULL) 2556 CONN_DEC_REF(new); 2557 } 2558 2559 /* 2560 * This routine creates a T_UDERROR_IND message and passes it upstream. 2561 * The address and options are copied from the T_UNITDATA_REQ message 2562 * passed in mp. This message is freed. 2563 */ 2564 static void 2565 udp_ud_err(queue_t *q, mblk_t *mp, t_scalar_t err) 2566 { 2567 struct T_unitdata_req *tudr; 2568 mblk_t *mp1; 2569 uchar_t *destaddr; 2570 t_scalar_t destlen; 2571 uchar_t *optaddr; 2572 t_scalar_t optlen; 2573 2574 if ((mp->b_wptr < mp->b_rptr) || 2575 (MBLKL(mp)) < sizeof (struct T_unitdata_req)) { 2576 goto done; 2577 } 2578 tudr = (struct T_unitdata_req *)mp->b_rptr; 2579 destaddr = mp->b_rptr + tudr->DEST_offset; 2580 if (destaddr < mp->b_rptr || destaddr >= mp->b_wptr || 2581 destaddr + tudr->DEST_length < mp->b_rptr || 2582 destaddr + tudr->DEST_length > mp->b_wptr) { 2583 goto done; 2584 } 2585 optaddr = mp->b_rptr + tudr->OPT_offset; 2586 if (optaddr < mp->b_rptr || optaddr >= mp->b_wptr || 2587 optaddr + tudr->OPT_length < mp->b_rptr || 2588 optaddr + tudr->OPT_length > mp->b_wptr) { 2589 goto done; 2590 } 2591 destlen = tudr->DEST_length; 2592 optlen = tudr->OPT_length; 2593 2594 mp1 = mi_tpi_uderror_ind((char *)destaddr, destlen, 2595 (char *)optaddr, optlen, err); 2596 if (mp1 != NULL) 2597 qreply(q, mp1); 2598 2599 done: 2600 freemsg(mp); 2601 } 2602 2603 /* 2604 * This routine removes a port number association from a stream. It 2605 * is called by udp_wput to handle T_UNBIND_REQ messages. 2606 */ 2607 static void 2608 udp_tpi_unbind(queue_t *q, mblk_t *mp) 2609 { 2610 conn_t *connp = Q_TO_CONN(q); 2611 int error; 2612 2613 error = udp_do_unbind(connp); 2614 if (error) { 2615 if (error < 0) 2616 udp_err_ack(q, mp, -error, 0); 2617 else 2618 udp_err_ack(q, mp, TSYSERR, error); 2619 return; 2620 } 2621 2622 mp = mi_tpi_ok_ack_alloc(mp); 2623 ASSERT(mp != NULL); 2624 ASSERT(((struct T_ok_ack *)mp->b_rptr)->PRIM_type == T_OK_ACK); 2625 qreply(q, mp); 2626 } 2627 2628 /* 2629 * Don't let port fall into the privileged range. 2630 * Since the extra privileged ports can be arbitrary we also 2631 * ensure that we exclude those from consideration. 2632 * us->us_epriv_ports is not sorted thus we loop over it until 2633 * there are no changes. 2634 */ 2635 static in_port_t 2636 udp_update_next_port(udp_t *udp, in_port_t port, boolean_t random) 2637 { 2638 int i, bump; 2639 in_port_t nextport; 2640 boolean_t restart = B_FALSE; 2641 udp_stack_t *us = udp->udp_us; 2642 2643 if (random && udp_random_anon_port != 0) { 2644 (void) random_get_pseudo_bytes((uint8_t *)&port, 2645 sizeof (in_port_t)); 2646 /* 2647 * Unless changed by a sys admin, the smallest anon port 2648 * is 32768 and the largest anon port is 65535. It is 2649 * very likely (50%) for the random port to be smaller 2650 * than the smallest anon port. When that happens, 2651 * add port % (anon port range) to the smallest anon 2652 * port to get the random port. It should fall into the 2653 * valid anon port range. 2654 */ 2655 if ((port < us->us_smallest_anon_port) || 2656 (port > us->us_largest_anon_port)) { 2657 if (us->us_smallest_anon_port == 2658 us->us_largest_anon_port) { 2659 bump = 0; 2660 } else { 2661 bump = port % (us->us_largest_anon_port - 2662 us->us_smallest_anon_port); 2663 } 2664 2665 port = us->us_smallest_anon_port + bump; 2666 } 2667 } 2668 2669 retry: 2670 if (port < us->us_smallest_anon_port) 2671 port = us->us_smallest_anon_port; 2672 2673 if (port > us->us_largest_anon_port) { 2674 port = us->us_smallest_anon_port; 2675 if (restart) 2676 return (0); 2677 restart = B_TRUE; 2678 } 2679 2680 if (port < us->us_smallest_nonpriv_port) 2681 port = us->us_smallest_nonpriv_port; 2682 2683 for (i = 0; i < us->us_num_epriv_ports; i++) { 2684 if (port == us->us_epriv_ports[i]) { 2685 port++; 2686 /* 2687 * Make sure that the port is in the 2688 * valid range. 2689 */ 2690 goto retry; 2691 } 2692 } 2693 2694 if (is_system_labeled() && 2695 (nextport = tsol_next_port(crgetzone(udp->udp_connp->conn_cred), 2696 port, IPPROTO_UDP, B_TRUE)) != 0) { 2697 port = nextport; 2698 goto retry; 2699 } 2700 2701 return (port); 2702 } 2703 2704 /* 2705 * Handle T_UNITDATA_REQ with options. Both IPv4 and IPv6 2706 * Either tudr_mp or msg is set. If tudr_mp we take ancillary data from 2707 * the TPI options, otherwise we take them from msg_control. 2708 * If both sin and sin6 is set it is a connected socket and we use conn_faddr. 2709 * Always consumes mp; never consumes tudr_mp. 2710 */ 2711 static int 2712 udp_output_ancillary(conn_t *connp, sin_t *sin, sin6_t *sin6, mblk_t *mp, 2713 mblk_t *tudr_mp, struct nmsghdr *msg, cred_t *cr, pid_t pid) 2714 { 2715 udp_t *udp = connp->conn_udp; 2716 udp_stack_t *us = udp->udp_us; 2717 int error; 2718 ip_xmit_attr_t *ixa; 2719 ip_pkt_t *ipp; 2720 in6_addr_t v6src; 2721 in6_addr_t v6dst; 2722 in6_addr_t v6nexthop; 2723 in_port_t dstport; 2724 uint32_t flowinfo; 2725 uint_t srcid; 2726 int is_absreq_failure = 0; 2727 conn_opt_arg_t coas, *coa; 2728 2729 ASSERT(tudr_mp != NULL || msg != NULL); 2730 2731 /* 2732 * Get ixa before checking state to handle a disconnect race. 2733 * 2734 * We need an exclusive copy of conn_ixa since the ancillary data 2735 * options might modify it. That copy has no pointers hence we 2736 * need to set them up once we've parsed the ancillary data. 2737 */ 2738 ixa = conn_get_ixa_exclusive(connp); 2739 if (ixa == NULL) { 2740 UDPS_BUMP_MIB(us, udpOutErrors); 2741 freemsg(mp); 2742 return (ENOMEM); 2743 } 2744 ASSERT(cr != NULL); 2745 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 2746 ixa->ixa_cred = cr; 2747 ixa->ixa_cpid = pid; 2748 if (is_system_labeled()) { 2749 /* We need to restart with a label based on the cred */ 2750 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); 2751 } 2752 2753 /* In case previous destination was multicast or multirt */ 2754 ip_attr_newdst(ixa); 2755 2756 /* Get a copy of conn_xmit_ipp since the options might change it */ 2757 ipp = kmem_zalloc(sizeof (*ipp), KM_NOSLEEP); 2758 if (ipp == NULL) { 2759 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 2760 ixa->ixa_cred = connp->conn_cred; /* Restore */ 2761 ixa->ixa_cpid = connp->conn_cpid; 2762 ixa_refrele(ixa); 2763 UDPS_BUMP_MIB(us, udpOutErrors); 2764 freemsg(mp); 2765 return (ENOMEM); 2766 } 2767 mutex_enter(&connp->conn_lock); 2768 error = ip_pkt_copy(&connp->conn_xmit_ipp, ipp, KM_NOSLEEP); 2769 mutex_exit(&connp->conn_lock); 2770 if (error != 0) { 2771 UDPS_BUMP_MIB(us, udpOutErrors); 2772 freemsg(mp); 2773 goto done; 2774 } 2775 2776 /* 2777 * Parse the options and update ixa and ipp as a result. 2778 * Note that ixa_tsl can be updated if SCM_UCRED. 2779 * ixa_refrele/ixa_inactivate will release any reference on ixa_tsl. 2780 */ 2781 2782 coa = &coas; 2783 coa->coa_connp = connp; 2784 coa->coa_ixa = ixa; 2785 coa->coa_ipp = ipp; 2786 coa->coa_ancillary = B_TRUE; 2787 coa->coa_changed = 0; 2788 2789 if (msg != NULL) { 2790 error = process_auxiliary_options(connp, msg->msg_control, 2791 msg->msg_controllen, coa, &udp_opt_obj, udp_opt_set, cr); 2792 } else { 2793 struct T_unitdata_req *tudr; 2794 2795 tudr = (struct T_unitdata_req *)tudr_mp->b_rptr; 2796 ASSERT(tudr->PRIM_type == T_UNITDATA_REQ); 2797 error = tpi_optcom_buf(connp->conn_wq, tudr_mp, 2798 &tudr->OPT_length, tudr->OPT_offset, cr, &udp_opt_obj, 2799 coa, &is_absreq_failure); 2800 } 2801 if (error != 0) { 2802 /* 2803 * Note: No special action needed in this 2804 * module for "is_absreq_failure" 2805 */ 2806 freemsg(mp); 2807 UDPS_BUMP_MIB(us, udpOutErrors); 2808 goto done; 2809 } 2810 ASSERT(is_absreq_failure == 0); 2811 2812 mutex_enter(&connp->conn_lock); 2813 /* 2814 * If laddr is unspecified then we look at sin6_src_id. 2815 * We will give precedence to a source address set with IPV6_PKTINFO 2816 * (aka IPPF_ADDR) but that is handled in build_hdrs. However, we don't 2817 * want ip_attr_connect to select a source (since it can fail) when 2818 * IPV6_PKTINFO is specified. 2819 * If this doesn't result in a source address then we get a source 2820 * from ip_attr_connect() below. 2821 */ 2822 v6src = connp->conn_saddr_v6; 2823 if (sin != NULL) { 2824 IN6_IPADDR_TO_V4MAPPED(sin->sin_addr.s_addr, &v6dst); 2825 dstport = sin->sin_port; 2826 flowinfo = 0; 2827 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 2828 ixa->ixa_flags |= IXAF_IS_IPV4; 2829 } else if (sin6 != NULL) { 2830 boolean_t v4mapped; 2831 2832 v6dst = sin6->sin6_addr; 2833 dstport = sin6->sin6_port; 2834 flowinfo = sin6->sin6_flowinfo; 2835 srcid = sin6->__sin6_src_id; 2836 if (IN6_IS_ADDR_LINKSCOPE(&v6dst) && sin6->sin6_scope_id != 0) { 2837 ixa->ixa_scopeid = sin6->sin6_scope_id; 2838 ixa->ixa_flags |= IXAF_SCOPEID_SET; 2839 } else { 2840 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 2841 } 2842 v4mapped = IN6_IS_ADDR_V4MAPPED(&v6dst); 2843 if (v4mapped) 2844 ixa->ixa_flags |= IXAF_IS_IPV4; 2845 else 2846 ixa->ixa_flags &= ~IXAF_IS_IPV4; 2847 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 2848 if (!ip_srcid_find_id(srcid, &v6src, IPCL_ZONEID(connp), 2849 v4mapped, connp->conn_netstack)) { 2850 /* Mismatch - v4mapped/v6 specified by srcid. */ 2851 mutex_exit(&connp->conn_lock); 2852 error = EADDRNOTAVAIL; 2853 goto failed; /* Does freemsg() and mib. */ 2854 } 2855 } 2856 } else { 2857 /* Connected case */ 2858 v6dst = connp->conn_faddr_v6; 2859 dstport = connp->conn_fport; 2860 flowinfo = connp->conn_flowinfo; 2861 } 2862 mutex_exit(&connp->conn_lock); 2863 2864 /* Handle IP_PKTINFO/IPV6_PKTINFO setting source address. */ 2865 if (ipp->ipp_fields & IPPF_ADDR) { 2866 if (ixa->ixa_flags & IXAF_IS_IPV4) { 2867 if (IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 2868 v6src = ipp->ipp_addr; 2869 } else { 2870 if (!IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 2871 v6src = ipp->ipp_addr; 2872 } 2873 } 2874 2875 ip_attr_nexthop(ipp, ixa, &v6dst, &v6nexthop); 2876 error = ip_attr_connect(connp, ixa, &v6src, &v6dst, &v6nexthop, dstport, 2877 &v6src, NULL, IPDF_ALLOW_MCBC | IPDF_VERIFY_DST | IPDF_IPSEC); 2878 2879 switch (error) { 2880 case 0: 2881 break; 2882 case EADDRNOTAVAIL: 2883 /* 2884 * IXAF_VERIFY_SOURCE tells us to pick a better source. 2885 * Don't have the application see that errno 2886 */ 2887 error = ENETUNREACH; 2888 goto failed; 2889 case ENETDOWN: 2890 /* 2891 * Have !ipif_addr_ready address; drop packet silently 2892 * until we can get applications to not send until we 2893 * are ready. 2894 */ 2895 error = 0; 2896 goto failed; 2897 case EHOSTUNREACH: 2898 case ENETUNREACH: 2899 if (ixa->ixa_ire != NULL) { 2900 /* 2901 * Let conn_ip_output/ire_send_noroute return 2902 * the error and send any local ICMP error. 2903 */ 2904 error = 0; 2905 break; 2906 } 2907 /* FALLTHRU */ 2908 default: 2909 failed: 2910 freemsg(mp); 2911 UDPS_BUMP_MIB(us, udpOutErrors); 2912 goto done; 2913 } 2914 2915 /* 2916 * We might be going to a different destination than last time, 2917 * thus check that TX allows the communication and compute any 2918 * needed label. 2919 * 2920 * TSOL Note: We have an exclusive ipp and ixa for this thread so we 2921 * don't have to worry about concurrent threads. 2922 */ 2923 if (is_system_labeled()) { 2924 /* Using UDP MLP requires SCM_UCRED from user */ 2925 if (connp->conn_mlp_type != mlptSingle && 2926 !((ixa->ixa_flags & IXAF_UCRED_TSL))) { 2927 UDPS_BUMP_MIB(us, udpOutErrors); 2928 error = ECONNREFUSED; 2929 freemsg(mp); 2930 goto done; 2931 } 2932 /* 2933 * Check whether Trusted Solaris policy allows communication 2934 * with this host, and pretend that the destination is 2935 * unreachable if not. 2936 * Compute any needed label and place it in ipp_label_v4/v6. 2937 * 2938 * Later conn_build_hdr_template/conn_prepend_hdr takes 2939 * ipp_label_v4/v6 to form the packet. 2940 * 2941 * Tsol note: We have ipp structure local to this thread so 2942 * no locking is needed. 2943 */ 2944 error = conn_update_label(connp, ixa, &v6dst, ipp); 2945 if (error != 0) { 2946 freemsg(mp); 2947 UDPS_BUMP_MIB(us, udpOutErrors); 2948 goto done; 2949 } 2950 } 2951 mp = udp_prepend_hdr(connp, ixa, ipp, &v6src, &v6dst, dstport, 2952 flowinfo, mp, &error); 2953 if (mp == NULL) { 2954 ASSERT(error != 0); 2955 UDPS_BUMP_MIB(us, udpOutErrors); 2956 goto done; 2957 } 2958 if (ixa->ixa_pktlen > IP_MAXPACKET) { 2959 error = EMSGSIZE; 2960 UDPS_BUMP_MIB(us, udpOutErrors); 2961 freemsg(mp); 2962 goto done; 2963 } 2964 /* We're done. Pass the packet to ip. */ 2965 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 2966 2967 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 2968 void_ip_t *, mp->b_rptr, udp_t *, udp, udpha_t *, 2969 &mp->b_rptr[ixa->ixa_ip_hdr_length]); 2970 2971 error = conn_ip_output(mp, ixa); 2972 /* No udpOutErrors if an error since IP increases its error counter */ 2973 switch (error) { 2974 case 0: 2975 break; 2976 case EWOULDBLOCK: 2977 (void) ixa_check_drain_insert(connp, ixa); 2978 error = 0; 2979 break; 2980 case EADDRNOTAVAIL: 2981 /* 2982 * IXAF_VERIFY_SOURCE tells us to pick a better source. 2983 * Don't have the application see that errno 2984 */ 2985 error = ENETUNREACH; 2986 /* FALLTHRU */ 2987 default: 2988 mutex_enter(&connp->conn_lock); 2989 /* 2990 * Clear the source and v6lastdst so we call ip_attr_connect 2991 * for the next packet and try to pick a better source. 2992 */ 2993 if (connp->conn_mcbc_bind) 2994 connp->conn_saddr_v6 = ipv6_all_zeros; 2995 else 2996 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 2997 connp->conn_v6lastdst = ipv6_all_zeros; 2998 mutex_exit(&connp->conn_lock); 2999 break; 3000 } 3001 done: 3002 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3003 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3004 ixa->ixa_cpid = connp->conn_cpid; 3005 ixa_refrele(ixa); 3006 ip_pkt_free(ipp); 3007 kmem_free(ipp, sizeof (*ipp)); 3008 return (error); 3009 } 3010 3011 /* 3012 * Handle sending an M_DATA for a connected socket. 3013 * Handles both IPv4 and IPv6. 3014 */ 3015 static int 3016 udp_output_connected(conn_t *connp, mblk_t *mp, cred_t *cr, pid_t pid) 3017 { 3018 udp_t *udp = connp->conn_udp; 3019 udp_stack_t *us = udp->udp_us; 3020 int error; 3021 ip_xmit_attr_t *ixa; 3022 3023 /* 3024 * If no other thread is using conn_ixa this just gets a reference to 3025 * conn_ixa. Otherwise we get a safe copy of conn_ixa. 3026 */ 3027 ixa = conn_get_ixa(connp, B_FALSE); 3028 if (ixa == NULL) { 3029 UDPS_BUMP_MIB(us, udpOutErrors); 3030 freemsg(mp); 3031 return (ENOMEM); 3032 } 3033 3034 ASSERT(cr != NULL); 3035 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3036 ixa->ixa_cred = cr; 3037 ixa->ixa_cpid = pid; 3038 3039 mutex_enter(&connp->conn_lock); 3040 mp = udp_prepend_header_template(connp, ixa, mp, &connp->conn_saddr_v6, 3041 connp->conn_fport, connp->conn_flowinfo, &error); 3042 3043 if (mp == NULL) { 3044 ASSERT(error != 0); 3045 mutex_exit(&connp->conn_lock); 3046 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3047 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3048 ixa->ixa_cpid = connp->conn_cpid; 3049 ixa_refrele(ixa); 3050 UDPS_BUMP_MIB(us, udpOutErrors); 3051 freemsg(mp); 3052 return (error); 3053 } 3054 3055 /* 3056 * In case we got a safe copy of conn_ixa, or if opt_set made us a new 3057 * safe copy, then we need to fill in any pointers in it. 3058 */ 3059 if (ixa->ixa_ire == NULL) { 3060 in6_addr_t faddr, saddr; 3061 in6_addr_t nexthop; 3062 in_port_t fport; 3063 3064 saddr = connp->conn_saddr_v6; 3065 faddr = connp->conn_faddr_v6; 3066 fport = connp->conn_fport; 3067 ip_attr_nexthop(&connp->conn_xmit_ipp, ixa, &faddr, &nexthop); 3068 mutex_exit(&connp->conn_lock); 3069 3070 error = ip_attr_connect(connp, ixa, &saddr, &faddr, &nexthop, 3071 fport, NULL, NULL, IPDF_ALLOW_MCBC | IPDF_VERIFY_DST | 3072 IPDF_IPSEC); 3073 switch (error) { 3074 case 0: 3075 break; 3076 case EADDRNOTAVAIL: 3077 /* 3078 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3079 * Don't have the application see that errno 3080 */ 3081 error = ENETUNREACH; 3082 goto failed; 3083 case ENETDOWN: 3084 /* 3085 * Have !ipif_addr_ready address; drop packet silently 3086 * until we can get applications to not send until we 3087 * are ready. 3088 */ 3089 error = 0; 3090 goto failed; 3091 case EHOSTUNREACH: 3092 case ENETUNREACH: 3093 if (ixa->ixa_ire != NULL) { 3094 /* 3095 * Let conn_ip_output/ire_send_noroute return 3096 * the error and send any local ICMP error. 3097 */ 3098 error = 0; 3099 break; 3100 } 3101 /* FALLTHRU */ 3102 default: 3103 failed: 3104 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3105 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3106 ixa->ixa_cpid = connp->conn_cpid; 3107 ixa_refrele(ixa); 3108 freemsg(mp); 3109 UDPS_BUMP_MIB(us, udpOutErrors); 3110 return (error); 3111 } 3112 } else { 3113 /* Done with conn_t */ 3114 mutex_exit(&connp->conn_lock); 3115 } 3116 ASSERT(ixa->ixa_ire != NULL); 3117 3118 /* We're done. Pass the packet to ip. */ 3119 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 3120 3121 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 3122 void_ip_t *, mp->b_rptr, udp_t *, udp, udpha_t *, 3123 &mp->b_rptr[ixa->ixa_ip_hdr_length]); 3124 3125 error = conn_ip_output(mp, ixa); 3126 /* No udpOutErrors if an error since IP increases its error counter */ 3127 switch (error) { 3128 case 0: 3129 break; 3130 case EWOULDBLOCK: 3131 (void) ixa_check_drain_insert(connp, ixa); 3132 error = 0; 3133 break; 3134 case EADDRNOTAVAIL: 3135 /* 3136 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3137 * Don't have the application see that errno 3138 */ 3139 error = ENETUNREACH; 3140 break; 3141 } 3142 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3143 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3144 ixa->ixa_cpid = connp->conn_cpid; 3145 ixa_refrele(ixa); 3146 return (error); 3147 } 3148 3149 /* 3150 * Handle sending an M_DATA to the last destination. 3151 * Handles both IPv4 and IPv6. 3152 * 3153 * NOTE: The caller must hold conn_lock and we drop it here. 3154 */ 3155 static int 3156 udp_output_lastdst(conn_t *connp, mblk_t *mp, cred_t *cr, pid_t pid, 3157 ip_xmit_attr_t *ixa) 3158 { 3159 udp_t *udp = connp->conn_udp; 3160 udp_stack_t *us = udp->udp_us; 3161 int error; 3162 3163 ASSERT(MUTEX_HELD(&connp->conn_lock)); 3164 ASSERT(ixa != NULL); 3165 3166 ASSERT(cr != NULL); 3167 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3168 ixa->ixa_cred = cr; 3169 ixa->ixa_cpid = pid; 3170 3171 mp = udp_prepend_header_template(connp, ixa, mp, &connp->conn_v6lastsrc, 3172 connp->conn_lastdstport, connp->conn_lastflowinfo, &error); 3173 3174 if (mp == NULL) { 3175 ASSERT(error != 0); 3176 mutex_exit(&connp->conn_lock); 3177 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3178 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3179 ixa->ixa_cpid = connp->conn_cpid; 3180 ixa_refrele(ixa); 3181 UDPS_BUMP_MIB(us, udpOutErrors); 3182 freemsg(mp); 3183 return (error); 3184 } 3185 3186 /* 3187 * In case we got a safe copy of conn_ixa, or if opt_set made us a new 3188 * safe copy, then we need to fill in any pointers in it. 3189 */ 3190 if (ixa->ixa_ire == NULL) { 3191 in6_addr_t lastdst, lastsrc; 3192 in6_addr_t nexthop; 3193 in_port_t lastport; 3194 3195 lastsrc = connp->conn_v6lastsrc; 3196 lastdst = connp->conn_v6lastdst; 3197 lastport = connp->conn_lastdstport; 3198 ip_attr_nexthop(&connp->conn_xmit_ipp, ixa, &lastdst, &nexthop); 3199 mutex_exit(&connp->conn_lock); 3200 3201 error = ip_attr_connect(connp, ixa, &lastsrc, &lastdst, 3202 &nexthop, lastport, NULL, NULL, IPDF_ALLOW_MCBC | 3203 IPDF_VERIFY_DST | IPDF_IPSEC); 3204 switch (error) { 3205 case 0: 3206 break; 3207 case EADDRNOTAVAIL: 3208 /* 3209 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3210 * Don't have the application see that errno 3211 */ 3212 error = ENETUNREACH; 3213 goto failed; 3214 case ENETDOWN: 3215 /* 3216 * Have !ipif_addr_ready address; drop packet silently 3217 * until we can get applications to not send until we 3218 * are ready. 3219 */ 3220 error = 0; 3221 goto failed; 3222 case EHOSTUNREACH: 3223 case ENETUNREACH: 3224 if (ixa->ixa_ire != NULL) { 3225 /* 3226 * Let conn_ip_output/ire_send_noroute return 3227 * the error and send any local ICMP error. 3228 */ 3229 error = 0; 3230 break; 3231 } 3232 /* FALLTHRU */ 3233 default: 3234 failed: 3235 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3236 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3237 ixa->ixa_cpid = connp->conn_cpid; 3238 ixa_refrele(ixa); 3239 freemsg(mp); 3240 UDPS_BUMP_MIB(us, udpOutErrors); 3241 return (error); 3242 } 3243 } else { 3244 /* Done with conn_t */ 3245 mutex_exit(&connp->conn_lock); 3246 } 3247 3248 /* We're done. Pass the packet to ip. */ 3249 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 3250 3251 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 3252 void_ip_t *, mp->b_rptr, udp_t *, udp, udpha_t *, 3253 &mp->b_rptr[ixa->ixa_ip_hdr_length]); 3254 3255 error = conn_ip_output(mp, ixa); 3256 /* No udpOutErrors if an error since IP increases its error counter */ 3257 switch (error) { 3258 case 0: 3259 break; 3260 case EWOULDBLOCK: 3261 (void) ixa_check_drain_insert(connp, ixa); 3262 error = 0; 3263 break; 3264 case EADDRNOTAVAIL: 3265 /* 3266 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3267 * Don't have the application see that errno 3268 */ 3269 error = ENETUNREACH; 3270 /* FALLTHRU */ 3271 default: 3272 mutex_enter(&connp->conn_lock); 3273 /* 3274 * Clear the source and v6lastdst so we call ip_attr_connect 3275 * for the next packet and try to pick a better source. 3276 */ 3277 if (connp->conn_mcbc_bind) 3278 connp->conn_saddr_v6 = ipv6_all_zeros; 3279 else 3280 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 3281 connp->conn_v6lastdst = ipv6_all_zeros; 3282 mutex_exit(&connp->conn_lock); 3283 break; 3284 } 3285 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3286 ixa->ixa_cred = connp->conn_cred; /* Restore */ 3287 ixa->ixa_cpid = connp->conn_cpid; 3288 ixa_refrele(ixa); 3289 return (error); 3290 } 3291 3292 3293 /* 3294 * Prepend the header template and then fill in the source and 3295 * flowinfo. The caller needs to handle the destination address since 3296 * it's setting is different if rthdr or source route. 3297 * 3298 * Returns NULL is allocation failed or if the packet would exceed IP_MAXPACKET. 3299 * When it returns NULL it sets errorp. 3300 */ 3301 static mblk_t * 3302 udp_prepend_header_template(conn_t *connp, ip_xmit_attr_t *ixa, mblk_t *mp, 3303 const in6_addr_t *v6src, in_port_t dstport, uint32_t flowinfo, int *errorp) 3304 { 3305 udp_t *udp = connp->conn_udp; 3306 udp_stack_t *us = udp->udp_us; 3307 boolean_t insert_spi = udp->udp_nat_t_endpoint; 3308 uint_t pktlen; 3309 uint_t alloclen; 3310 uint_t copylen; 3311 uint8_t *iph; 3312 uint_t ip_hdr_length; 3313 udpha_t *udpha; 3314 uint32_t cksum; 3315 ip_pkt_t *ipp; 3316 3317 ASSERT(MUTEX_HELD(&connp->conn_lock)); 3318 3319 /* 3320 * Copy the header template and leave space for an SPI 3321 */ 3322 copylen = connp->conn_ht_iphc_len; 3323 alloclen = copylen + (insert_spi ? sizeof (uint32_t) : 0); 3324 pktlen = alloclen + msgdsize(mp); 3325 if (pktlen > IP_MAXPACKET) { 3326 freemsg(mp); 3327 *errorp = EMSGSIZE; 3328 return (NULL); 3329 } 3330 ixa->ixa_pktlen = pktlen; 3331 3332 /* check/fix buffer config, setup pointers into it */ 3333 iph = mp->b_rptr - alloclen; 3334 if (DB_REF(mp) != 1 || iph < DB_BASE(mp) || !OK_32PTR(iph)) { 3335 mblk_t *mp1; 3336 3337 mp1 = allocb(alloclen + us->us_wroff_extra, BPRI_MED); 3338 if (mp1 == NULL) { 3339 freemsg(mp); 3340 *errorp = ENOMEM; 3341 return (NULL); 3342 } 3343 mp1->b_wptr = DB_LIM(mp1); 3344 mp1->b_cont = mp; 3345 mp = mp1; 3346 iph = (mp->b_wptr - alloclen); 3347 } 3348 mp->b_rptr = iph; 3349 bcopy(connp->conn_ht_iphc, iph, copylen); 3350 ip_hdr_length = (uint_t)(connp->conn_ht_ulp - connp->conn_ht_iphc); 3351 3352 ixa->ixa_ip_hdr_length = ip_hdr_length; 3353 udpha = (udpha_t *)(iph + ip_hdr_length); 3354 3355 /* 3356 * Setup header length and prepare for ULP checksum done in IP. 3357 * udp_build_hdr_template has already massaged any routing header 3358 * and placed the result in conn_sum. 3359 * 3360 * We make it easy for IP to include our pseudo header 3361 * by putting our length in uha_checksum. 3362 */ 3363 cksum = pktlen - ip_hdr_length; 3364 udpha->uha_length = htons(cksum); 3365 3366 cksum += connp->conn_sum; 3367 cksum = (cksum >> 16) + (cksum & 0xFFFF); 3368 ASSERT(cksum < 0x10000); 3369 3370 ipp = &connp->conn_xmit_ipp; 3371 if (ixa->ixa_flags & IXAF_IS_IPV4) { 3372 ipha_t *ipha = (ipha_t *)iph; 3373 3374 ipha->ipha_length = htons((uint16_t)pktlen); 3375 3376 /* IP does the checksum if uha_checksum is non-zero */ 3377 if (us->us_do_checksum) 3378 udpha->uha_checksum = htons(cksum); 3379 3380 /* if IP_PKTINFO specified an addres it wins over bind() */ 3381 if ((ipp->ipp_fields & IPPF_ADDR) && 3382 IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) { 3383 ASSERT(ipp->ipp_addr_v4 != INADDR_ANY); 3384 ipha->ipha_src = ipp->ipp_addr_v4; 3385 } else { 3386 IN6_V4MAPPED_TO_IPADDR(v6src, ipha->ipha_src); 3387 } 3388 } else { 3389 ip6_t *ip6h = (ip6_t *)iph; 3390 3391 ip6h->ip6_plen = htons((uint16_t)(pktlen - IPV6_HDR_LEN)); 3392 udpha->uha_checksum = htons(cksum); 3393 3394 /* if IP_PKTINFO specified an addres it wins over bind() */ 3395 if ((ipp->ipp_fields & IPPF_ADDR) && 3396 !IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) { 3397 ASSERT(!IN6_IS_ADDR_UNSPECIFIED(&ipp->ipp_addr)); 3398 ip6h->ip6_src = ipp->ipp_addr; 3399 } else { 3400 ip6h->ip6_src = *v6src; 3401 } 3402 ip6h->ip6_vcf = 3403 (IPV6_DEFAULT_VERS_AND_FLOW & IPV6_VERS_AND_FLOW_MASK) | 3404 (flowinfo & ~IPV6_VERS_AND_FLOW_MASK); 3405 if (ipp->ipp_fields & IPPF_TCLASS) { 3406 /* Overrides the class part of flowinfo */ 3407 ip6h->ip6_vcf = IPV6_TCLASS_FLOW(ip6h->ip6_vcf, 3408 ipp->ipp_tclass); 3409 } 3410 } 3411 3412 /* Insert all-0s SPI now. */ 3413 if (insert_spi) 3414 *((uint32_t *)(udpha + 1)) = 0; 3415 3416 udpha->uha_dst_port = dstport; 3417 return (mp); 3418 } 3419 3420 /* 3421 * Send a T_UDERR_IND in response to an M_DATA 3422 */ 3423 static void 3424 udp_ud_err_connected(conn_t *connp, t_scalar_t error) 3425 { 3426 struct sockaddr_storage ss; 3427 sin_t *sin; 3428 sin6_t *sin6; 3429 struct sockaddr *addr; 3430 socklen_t addrlen; 3431 mblk_t *mp1; 3432 3433 mutex_enter(&connp->conn_lock); 3434 /* Initialize addr and addrlen as if they're passed in */ 3435 if (connp->conn_family == AF_INET) { 3436 sin = (sin_t *)&ss; 3437 *sin = sin_null; 3438 sin->sin_family = AF_INET; 3439 sin->sin_port = connp->conn_fport; 3440 sin->sin_addr.s_addr = connp->conn_faddr_v4; 3441 addr = (struct sockaddr *)sin; 3442 addrlen = sizeof (*sin); 3443 } else { 3444 sin6 = (sin6_t *)&ss; 3445 *sin6 = sin6_null; 3446 sin6->sin6_family = AF_INET6; 3447 sin6->sin6_port = connp->conn_fport; 3448 sin6->sin6_flowinfo = connp->conn_flowinfo; 3449 sin6->sin6_addr = connp->conn_faddr_v6; 3450 if (IN6_IS_ADDR_LINKSCOPE(&connp->conn_faddr_v6) && 3451 (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET)) { 3452 sin6->sin6_scope_id = connp->conn_ixa->ixa_scopeid; 3453 } else { 3454 sin6->sin6_scope_id = 0; 3455 } 3456 sin6->__sin6_src_id = 0; 3457 addr = (struct sockaddr *)sin6; 3458 addrlen = sizeof (*sin6); 3459 } 3460 mutex_exit(&connp->conn_lock); 3461 3462 mp1 = mi_tpi_uderror_ind((char *)addr, addrlen, NULL, 0, error); 3463 if (mp1 != NULL) 3464 putnext(connp->conn_rq, mp1); 3465 } 3466 3467 /* 3468 * This routine handles all messages passed downstream. It either 3469 * consumes the message or passes it downstream; it never queues a 3470 * a message. 3471 * 3472 * Also entry point for sockfs when udp is in "direct sockfs" mode. This mode 3473 * is valid when we are directly beneath the stream head, and thus sockfs 3474 * is able to bypass STREAMS and directly call us, passing along the sockaddr 3475 * structure without the cumbersome T_UNITDATA_REQ interface for the case of 3476 * connected endpoints. 3477 */ 3478 void 3479 udp_wput(queue_t *q, mblk_t *mp) 3480 { 3481 sin6_t *sin6; 3482 sin_t *sin = NULL; 3483 uint_t srcid; 3484 conn_t *connp = Q_TO_CONN(q); 3485 udp_t *udp = connp->conn_udp; 3486 int error = 0; 3487 struct sockaddr *addr = NULL; 3488 socklen_t addrlen; 3489 udp_stack_t *us = udp->udp_us; 3490 struct T_unitdata_req *tudr; 3491 mblk_t *data_mp; 3492 ushort_t ipversion; 3493 cred_t *cr; 3494 pid_t pid; 3495 3496 /* 3497 * We directly handle several cases here: T_UNITDATA_REQ message 3498 * coming down as M_PROTO/M_PCPROTO and M_DATA messages for connected 3499 * socket. 3500 */ 3501 switch (DB_TYPE(mp)) { 3502 case M_DATA: 3503 if (!udp->udp_issocket || udp->udp_state != TS_DATA_XFER) { 3504 /* Not connected; address is required */ 3505 UDPS_BUMP_MIB(us, udpOutErrors); 3506 UDP_DBGSTAT(us, udp_data_notconn); 3507 UDP_STAT(us, udp_out_err_notconn); 3508 freemsg(mp); 3509 return; 3510 } 3511 /* 3512 * All Solaris components should pass a db_credp 3513 * for this message, hence we ASSERT. 3514 * On production kernels we return an error to be robust against 3515 * random streams modules sitting on top of us. 3516 */ 3517 cr = msg_getcred(mp, &pid); 3518 ASSERT(cr != NULL); 3519 if (cr == NULL) { 3520 UDPS_BUMP_MIB(us, udpOutErrors); 3521 freemsg(mp); 3522 return; 3523 } 3524 ASSERT(udp->udp_issocket); 3525 UDP_DBGSTAT(us, udp_data_conn); 3526 error = udp_output_connected(connp, mp, cr, pid); 3527 if (error != 0) { 3528 UDP_STAT(us, udp_out_err_output); 3529 if (connp->conn_rq != NULL) 3530 udp_ud_err_connected(connp, (t_scalar_t)error); 3531 #ifdef DEBUG 3532 printf("udp_output_connected returned %d\n", error); 3533 #endif 3534 } 3535 return; 3536 3537 case M_PROTO: 3538 case M_PCPROTO: 3539 tudr = (struct T_unitdata_req *)mp->b_rptr; 3540 if (MBLKL(mp) < sizeof (*tudr) || 3541 ((t_primp_t)mp->b_rptr)->type != T_UNITDATA_REQ) { 3542 udp_wput_other(q, mp); 3543 return; 3544 } 3545 break; 3546 3547 default: 3548 udp_wput_other(q, mp); 3549 return; 3550 } 3551 3552 /* Handle valid T_UNITDATA_REQ here */ 3553 data_mp = mp->b_cont; 3554 if (data_mp == NULL) { 3555 error = EPROTO; 3556 goto ud_error2; 3557 } 3558 mp->b_cont = NULL; 3559 3560 if (!MBLKIN(mp, 0, tudr->DEST_offset + tudr->DEST_length)) { 3561 error = EADDRNOTAVAIL; 3562 goto ud_error2; 3563 } 3564 3565 /* 3566 * All Solaris components should pass a db_credp 3567 * for this TPI message, hence we should ASSERT. 3568 * However, RPC (svc_clts_ksend) does this odd thing where it 3569 * passes the options from a T_UNITDATA_IND unchanged in a 3570 * T_UNITDATA_REQ. While that is the right thing to do for 3571 * some options, SCM_UCRED being the key one, this also makes it 3572 * pass down IP_RECVDSTADDR. Hence we can't ASSERT here. 3573 */ 3574 cr = msg_getcred(mp, &pid); 3575 if (cr == NULL) { 3576 cr = connp->conn_cred; 3577 pid = connp->conn_cpid; 3578 } 3579 3580 /* 3581 * If a port has not been bound to the stream, fail. 3582 * This is not a problem when sockfs is directly 3583 * above us, because it will ensure that the socket 3584 * is first bound before allowing data to be sent. 3585 */ 3586 if (udp->udp_state == TS_UNBND) { 3587 error = EPROTO; 3588 goto ud_error2; 3589 } 3590 addr = (struct sockaddr *)&mp->b_rptr[tudr->DEST_offset]; 3591 addrlen = tudr->DEST_length; 3592 3593 switch (connp->conn_family) { 3594 case AF_INET6: 3595 sin6 = (sin6_t *)addr; 3596 if (!OK_32PTR((char *)sin6) || (addrlen != sizeof (sin6_t)) || 3597 (sin6->sin6_family != AF_INET6)) { 3598 error = EADDRNOTAVAIL; 3599 goto ud_error2; 3600 } 3601 3602 srcid = sin6->__sin6_src_id; 3603 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 3604 /* 3605 * Destination is a non-IPv4-compatible IPv6 address. 3606 * Send out an IPv6 format packet. 3607 */ 3608 3609 /* 3610 * If the local address is a mapped address return 3611 * an error. 3612 * It would be possible to send an IPv6 packet but the 3613 * response would never make it back to the application 3614 * since it is bound to a mapped address. 3615 */ 3616 if (IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6)) { 3617 error = EADDRNOTAVAIL; 3618 goto ud_error2; 3619 } 3620 3621 UDP_DBGSTAT(us, udp_out_ipv6); 3622 3623 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 3624 sin6->sin6_addr = ipv6_loopback; 3625 ipversion = IPV6_VERSION; 3626 } else { 3627 if (connp->conn_ipv6_v6only) { 3628 error = EADDRNOTAVAIL; 3629 goto ud_error2; 3630 } 3631 3632 /* 3633 * If the local address is not zero or a mapped address 3634 * return an error. It would be possible to send an 3635 * IPv4 packet but the response would never make it 3636 * back to the application since it is bound to a 3637 * non-mapped address. 3638 */ 3639 if (!IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6) && 3640 !IN6_IS_ADDR_UNSPECIFIED(&connp->conn_saddr_v6)) { 3641 error = EADDRNOTAVAIL; 3642 goto ud_error2; 3643 } 3644 UDP_DBGSTAT(us, udp_out_mapped); 3645 3646 if (V4_PART_OF_V6(sin6->sin6_addr) == INADDR_ANY) { 3647 V4_PART_OF_V6(sin6->sin6_addr) = 3648 htonl(INADDR_LOOPBACK); 3649 } 3650 ipversion = IPV4_VERSION; 3651 } 3652 3653 if (tudr->OPT_length != 0) { 3654 /* 3655 * If we are connected then the destination needs to be 3656 * the same as the connected one. 3657 */ 3658 if (udp->udp_state == TS_DATA_XFER && 3659 !conn_same_as_last_v6(connp, sin6)) { 3660 error = EISCONN; 3661 goto ud_error2; 3662 } 3663 UDP_STAT(us, udp_out_opt); 3664 error = udp_output_ancillary(connp, NULL, sin6, 3665 data_mp, mp, NULL, cr, pid); 3666 } else { 3667 ip_xmit_attr_t *ixa; 3668 3669 /* 3670 * We have to allocate an ip_xmit_attr_t before we grab 3671 * conn_lock and we need to hold conn_lock once we've 3672 * checked conn_same_as_last_v6 to handle concurrent 3673 * send* calls on a socket. 3674 */ 3675 ixa = conn_get_ixa(connp, B_FALSE); 3676 if (ixa == NULL) { 3677 error = ENOMEM; 3678 goto ud_error2; 3679 } 3680 mutex_enter(&connp->conn_lock); 3681 3682 if (conn_same_as_last_v6(connp, sin6) && 3683 connp->conn_lastsrcid == srcid && 3684 ipsec_outbound_policy_current(ixa)) { 3685 UDP_DBGSTAT(us, udp_out_lastdst); 3686 /* udp_output_lastdst drops conn_lock */ 3687 error = udp_output_lastdst(connp, data_mp, cr, 3688 pid, ixa); 3689 } else { 3690 UDP_DBGSTAT(us, udp_out_diffdst); 3691 /* udp_output_newdst drops conn_lock */ 3692 error = udp_output_newdst(connp, data_mp, NULL, 3693 sin6, ipversion, cr, pid, ixa); 3694 } 3695 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 3696 } 3697 if (error == 0) { 3698 freeb(mp); 3699 return; 3700 } 3701 break; 3702 3703 case AF_INET: 3704 sin = (sin_t *)addr; 3705 if ((!OK_32PTR((char *)sin) || addrlen != sizeof (sin_t)) || 3706 (sin->sin_family != AF_INET)) { 3707 error = EADDRNOTAVAIL; 3708 goto ud_error2; 3709 } 3710 UDP_DBGSTAT(us, udp_out_ipv4); 3711 if (sin->sin_addr.s_addr == INADDR_ANY) 3712 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 3713 ipversion = IPV4_VERSION; 3714 3715 srcid = 0; 3716 if (tudr->OPT_length != 0) { 3717 /* 3718 * If we are connected then the destination needs to be 3719 * the same as the connected one. 3720 */ 3721 if (udp->udp_state == TS_DATA_XFER && 3722 !conn_same_as_last_v4(connp, sin)) { 3723 error = EISCONN; 3724 goto ud_error2; 3725 } 3726 UDP_STAT(us, udp_out_opt); 3727 error = udp_output_ancillary(connp, sin, NULL, 3728 data_mp, mp, NULL, cr, pid); 3729 } else { 3730 ip_xmit_attr_t *ixa; 3731 3732 /* 3733 * We have to allocate an ip_xmit_attr_t before we grab 3734 * conn_lock and we need to hold conn_lock once we've 3735 * checked conn_same_as_last_v4 to handle concurrent 3736 * send* calls on a socket. 3737 */ 3738 ixa = conn_get_ixa(connp, B_FALSE); 3739 if (ixa == NULL) { 3740 error = ENOMEM; 3741 goto ud_error2; 3742 } 3743 mutex_enter(&connp->conn_lock); 3744 3745 if (conn_same_as_last_v4(connp, sin) && 3746 ipsec_outbound_policy_current(ixa)) { 3747 UDP_DBGSTAT(us, udp_out_lastdst); 3748 /* udp_output_lastdst drops conn_lock */ 3749 error = udp_output_lastdst(connp, data_mp, cr, 3750 pid, ixa); 3751 } else { 3752 UDP_DBGSTAT(us, udp_out_diffdst); 3753 /* udp_output_newdst drops conn_lock */ 3754 error = udp_output_newdst(connp, data_mp, sin, 3755 NULL, ipversion, cr, pid, ixa); 3756 } 3757 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 3758 } 3759 if (error == 0) { 3760 freeb(mp); 3761 return; 3762 } 3763 break; 3764 } 3765 UDP_STAT(us, udp_out_err_output); 3766 ASSERT(mp != NULL); 3767 /* mp is freed by the following routine */ 3768 udp_ud_err(q, mp, (t_scalar_t)error); 3769 return; 3770 3771 ud_error2: 3772 UDPS_BUMP_MIB(us, udpOutErrors); 3773 freemsg(data_mp); 3774 UDP_STAT(us, udp_out_err_output); 3775 ASSERT(mp != NULL); 3776 /* mp is freed by the following routine */ 3777 udp_ud_err(q, mp, (t_scalar_t)error); 3778 } 3779 3780 /* 3781 * Handle the case of the IP address, port, flow label being different 3782 * for both IPv4 and IPv6. 3783 * 3784 * NOTE: The caller must hold conn_lock and we drop it here. 3785 */ 3786 static int 3787 udp_output_newdst(conn_t *connp, mblk_t *data_mp, sin_t *sin, sin6_t *sin6, 3788 ushort_t ipversion, cred_t *cr, pid_t pid, ip_xmit_attr_t *ixa) 3789 { 3790 uint_t srcid; 3791 uint32_t flowinfo; 3792 udp_t *udp = connp->conn_udp; 3793 int error = 0; 3794 ip_xmit_attr_t *oldixa; 3795 udp_stack_t *us = udp->udp_us; 3796 in6_addr_t v6src; 3797 in6_addr_t v6dst; 3798 in6_addr_t v6nexthop; 3799 in_port_t dstport; 3800 3801 ASSERT(MUTEX_HELD(&connp->conn_lock)); 3802 ASSERT(ixa != NULL); 3803 /* 3804 * We hold conn_lock across all the use and modifications of 3805 * the conn_lastdst, conn_ixa, and conn_xmit_ipp to ensure that they 3806 * stay consistent. 3807 */ 3808 3809 ASSERT(cr != NULL); 3810 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 3811 ixa->ixa_cred = cr; 3812 ixa->ixa_cpid = pid; 3813 if (is_system_labeled()) { 3814 /* We need to restart with a label based on the cred */ 3815 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); 3816 } 3817 3818 /* 3819 * If we are connected then the destination needs to be the 3820 * same as the connected one, which is not the case here since we 3821 * checked for that above. 3822 */ 3823 if (udp->udp_state == TS_DATA_XFER) { 3824 mutex_exit(&connp->conn_lock); 3825 error = EISCONN; 3826 goto ud_error; 3827 } 3828 3829 /* In case previous destination was multicast or multirt */ 3830 ip_attr_newdst(ixa); 3831 3832 /* 3833 * If laddr is unspecified then we look at sin6_src_id. 3834 * We will give precedence to a source address set with IPV6_PKTINFO 3835 * (aka IPPF_ADDR) but that is handled in build_hdrs. However, we don't 3836 * want ip_attr_connect to select a source (since it can fail) when 3837 * IPV6_PKTINFO is specified. 3838 * If this doesn't result in a source address then we get a source 3839 * from ip_attr_connect() below. 3840 */ 3841 v6src = connp->conn_saddr_v6; 3842 if (sin != NULL) { 3843 IN6_IPADDR_TO_V4MAPPED(sin->sin_addr.s_addr, &v6dst); 3844 dstport = sin->sin_port; 3845 flowinfo = 0; 3846 /* Don't bother with ip_srcid_find_id(), but indicate anyway. */ 3847 srcid = 0; 3848 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 3849 ixa->ixa_flags |= IXAF_IS_IPV4; 3850 } else { 3851 boolean_t v4mapped; 3852 3853 v6dst = sin6->sin6_addr; 3854 dstport = sin6->sin6_port; 3855 flowinfo = sin6->sin6_flowinfo; 3856 srcid = sin6->__sin6_src_id; 3857 if (IN6_IS_ADDR_LINKSCOPE(&v6dst) && sin6->sin6_scope_id != 0) { 3858 ixa->ixa_scopeid = sin6->sin6_scope_id; 3859 ixa->ixa_flags |= IXAF_SCOPEID_SET; 3860 } else { 3861 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 3862 } 3863 v4mapped = IN6_IS_ADDR_V4MAPPED(&v6dst); 3864 if (v4mapped) 3865 ixa->ixa_flags |= IXAF_IS_IPV4; 3866 else 3867 ixa->ixa_flags &= ~IXAF_IS_IPV4; 3868 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 3869 if (!ip_srcid_find_id(srcid, &v6src, IPCL_ZONEID(connp), 3870 v4mapped, connp->conn_netstack)) { 3871 /* Mismatched v4mapped/v6 specified by srcid. */ 3872 mutex_exit(&connp->conn_lock); 3873 error = EADDRNOTAVAIL; 3874 goto ud_error; 3875 } 3876 } 3877 } 3878 /* Handle IP_PKTINFO/IPV6_PKTINFO setting source address. */ 3879 if (connp->conn_xmit_ipp.ipp_fields & IPPF_ADDR) { 3880 ip_pkt_t *ipp = &connp->conn_xmit_ipp; 3881 3882 if (ixa->ixa_flags & IXAF_IS_IPV4) { 3883 if (IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 3884 v6src = ipp->ipp_addr; 3885 } else { 3886 if (!IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 3887 v6src = ipp->ipp_addr; 3888 } 3889 } 3890 3891 ip_attr_nexthop(&connp->conn_xmit_ipp, ixa, &v6dst, &v6nexthop); 3892 mutex_exit(&connp->conn_lock); 3893 3894 error = ip_attr_connect(connp, ixa, &v6src, &v6dst, &v6nexthop, dstport, 3895 &v6src, NULL, IPDF_ALLOW_MCBC | IPDF_VERIFY_DST | IPDF_IPSEC); 3896 switch (error) { 3897 case 0: 3898 break; 3899 case EADDRNOTAVAIL: 3900 /* 3901 * IXAF_VERIFY_SOURCE tells us to pick a better source. 3902 * Don't have the application see that errno 3903 */ 3904 error = ENETUNREACH; 3905 goto failed; 3906 case ENETDOWN: 3907 /* 3908 * Have !ipif_addr_ready address; drop packet silently 3909 * until we can get applications to not send until we 3910 * are ready. 3911 */ 3912 error = 0; 3913 goto failed; 3914 case EHOSTUNREACH: 3915 case ENETUNREACH: 3916 if (ixa->ixa_ire != NULL) { 3917 /* 3918 * Let conn_ip_output/ire_send_noroute return 3919 * the error and send any local ICMP error. 3920 */ 3921 error = 0; 3922 break; 3923 } 3924 /* FALLTHRU */ 3925 failed: 3926 default: 3927 goto ud_error; 3928 } 3929 3930 3931 /* 3932 * Cluster note: we let the cluster hook know that we are sending to a 3933 * new address and/or port. 3934 */ 3935 if (cl_inet_connect2 != NULL) { 3936 CL_INET_UDP_CONNECT(connp, B_TRUE, &v6dst, dstport, error); 3937 if (error != 0) { 3938 error = EHOSTUNREACH; 3939 goto ud_error; 3940 } 3941 } 3942 3943 mutex_enter(&connp->conn_lock); 3944 /* 3945 * While we dropped the lock some other thread might have connected 3946 * this socket. If so we bail out with EISCONN to ensure that the 3947 * connecting thread is the one that updates conn_ixa, conn_ht_* 3948 * and conn_*last*. 3949 */ 3950 if (udp->udp_state == TS_DATA_XFER) { 3951 mutex_exit(&connp->conn_lock); 3952 error = EISCONN; 3953 goto ud_error; 3954 } 3955 3956 /* 3957 * We need to rebuild the headers if 3958 * - we are labeling packets (could be different for different 3959 * destinations) 3960 * - we have a source route (or routing header) since we need to 3961 * massage that to get the pseudo-header checksum 3962 * - the IP version is different than the last time 3963 * - a socket option with COA_HEADER_CHANGED has been set which 3964 * set conn_v6lastdst to zero. 3965 * 3966 * Otherwise the prepend function will just update the src, dst, 3967 * dstport, and flow label. 3968 */ 3969 if (is_system_labeled()) { 3970 /* TX MLP requires SCM_UCRED and don't have that here */ 3971 if (connp->conn_mlp_type != mlptSingle) { 3972 mutex_exit(&connp->conn_lock); 3973 error = ECONNREFUSED; 3974 goto ud_error; 3975 } 3976 /* 3977 * Check whether Trusted Solaris policy allows communication 3978 * with this host, and pretend that the destination is 3979 * unreachable if not. 3980 * Compute any needed label and place it in ipp_label_v4/v6. 3981 * 3982 * Later conn_build_hdr_template/conn_prepend_hdr takes 3983 * ipp_label_v4/v6 to form the packet. 3984 * 3985 * Tsol note: Since we hold conn_lock we know no other 3986 * thread manipulates conn_xmit_ipp. 3987 */ 3988 error = conn_update_label(connp, ixa, &v6dst, 3989 &connp->conn_xmit_ipp); 3990 if (error != 0) { 3991 mutex_exit(&connp->conn_lock); 3992 goto ud_error; 3993 } 3994 /* Rebuild the header template */ 3995 error = udp_build_hdr_template(connp, &v6src, &v6dst, dstport, 3996 flowinfo); 3997 if (error != 0) { 3998 mutex_exit(&connp->conn_lock); 3999 goto ud_error; 4000 } 4001 } else if ((connp->conn_xmit_ipp.ipp_fields & 4002 (IPPF_IPV4_OPTIONS|IPPF_RTHDR)) || 4003 ipversion != connp->conn_lastipversion || 4004 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_v6lastdst)) { 4005 /* Rebuild the header template */ 4006 error = udp_build_hdr_template(connp, &v6src, &v6dst, dstport, 4007 flowinfo); 4008 if (error != 0) { 4009 mutex_exit(&connp->conn_lock); 4010 goto ud_error; 4011 } 4012 } else { 4013 /* Simply update the destination address if no source route */ 4014 if (ixa->ixa_flags & IXAF_IS_IPV4) { 4015 ipha_t *ipha = (ipha_t *)connp->conn_ht_iphc; 4016 4017 IN6_V4MAPPED_TO_IPADDR(&v6dst, ipha->ipha_dst); 4018 if (ixa->ixa_flags & IXAF_PMTU_IPV4_DF) { 4019 ipha->ipha_fragment_offset_and_flags |= 4020 IPH_DF_HTONS; 4021 } else { 4022 ipha->ipha_fragment_offset_and_flags &= 4023 ~IPH_DF_HTONS; 4024 } 4025 } else { 4026 ip6_t *ip6h = (ip6_t *)connp->conn_ht_iphc; 4027 ip6h->ip6_dst = v6dst; 4028 } 4029 } 4030 4031 /* 4032 * Remember the dst/dstport etc which corresponds to the built header 4033 * template and conn_ixa. 4034 */ 4035 oldixa = conn_replace_ixa(connp, ixa); 4036 connp->conn_v6lastdst = v6dst; 4037 connp->conn_lastipversion = ipversion; 4038 connp->conn_lastdstport = dstport; 4039 connp->conn_lastflowinfo = flowinfo; 4040 connp->conn_lastscopeid = ixa->ixa_scopeid; 4041 connp->conn_lastsrcid = srcid; 4042 /* Also remember a source to use together with lastdst */ 4043 connp->conn_v6lastsrc = v6src; 4044 4045 data_mp = udp_prepend_header_template(connp, ixa, data_mp, &v6src, 4046 dstport, flowinfo, &error); 4047 4048 /* Done with conn_t */ 4049 mutex_exit(&connp->conn_lock); 4050 ixa_refrele(oldixa); 4051 4052 if (data_mp == NULL) { 4053 ASSERT(error != 0); 4054 goto ud_error; 4055 } 4056 4057 /* We're done. Pass the packet to ip. */ 4058 UDPS_BUMP_MIB(us, udpHCOutDatagrams); 4059 4060 DTRACE_UDP5(send, mblk_t *, NULL, ip_xmit_attr_t *, ixa, 4061 void_ip_t *, data_mp->b_rptr, udp_t *, udp, udpha_t *, 4062 &data_mp->b_rptr[ixa->ixa_ip_hdr_length]); 4063 4064 error = conn_ip_output(data_mp, ixa); 4065 /* No udpOutErrors if an error since IP increases its error counter */ 4066 switch (error) { 4067 case 0: 4068 break; 4069 case EWOULDBLOCK: 4070 (void) ixa_check_drain_insert(connp, ixa); 4071 error = 0; 4072 break; 4073 case EADDRNOTAVAIL: 4074 /* 4075 * IXAF_VERIFY_SOURCE tells us to pick a better source. 4076 * Don't have the application see that errno 4077 */ 4078 error = ENETUNREACH; 4079 /* FALLTHRU */ 4080 default: 4081 mutex_enter(&connp->conn_lock); 4082 /* 4083 * Clear the source and v6lastdst so we call ip_attr_connect 4084 * for the next packet and try to pick a better source. 4085 */ 4086 if (connp->conn_mcbc_bind) 4087 connp->conn_saddr_v6 = ipv6_all_zeros; 4088 else 4089 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 4090 connp->conn_v6lastdst = ipv6_all_zeros; 4091 mutex_exit(&connp->conn_lock); 4092 break; 4093 } 4094 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 4095 ixa->ixa_cred = connp->conn_cred; /* Restore */ 4096 ixa->ixa_cpid = connp->conn_cpid; 4097 ixa_refrele(ixa); 4098 return (error); 4099 4100 ud_error: 4101 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 4102 ixa->ixa_cred = connp->conn_cred; /* Restore */ 4103 ixa->ixa_cpid = connp->conn_cpid; 4104 ixa_refrele(ixa); 4105 4106 freemsg(data_mp); 4107 UDPS_BUMP_MIB(us, udpOutErrors); 4108 UDP_STAT(us, udp_out_err_output); 4109 return (error); 4110 } 4111 4112 /* ARGSUSED */ 4113 static void 4114 udp_wput_fallback(queue_t *wq, mblk_t *mp) 4115 { 4116 #ifdef DEBUG 4117 cmn_err(CE_CONT, "udp_wput_fallback: Message in fallback \n"); 4118 #endif 4119 freemsg(mp); 4120 } 4121 4122 4123 /* 4124 * Handle special out-of-band ioctl requests (see PSARC/2008/265). 4125 */ 4126 static void 4127 udp_wput_cmdblk(queue_t *q, mblk_t *mp) 4128 { 4129 void *data; 4130 mblk_t *datamp = mp->b_cont; 4131 conn_t *connp = Q_TO_CONN(q); 4132 udp_t *udp = connp->conn_udp; 4133 cmdblk_t *cmdp = (cmdblk_t *)mp->b_rptr; 4134 4135 if (datamp == NULL || MBLKL(datamp) < cmdp->cb_len) { 4136 cmdp->cb_error = EPROTO; 4137 qreply(q, mp); 4138 return; 4139 } 4140 data = datamp->b_rptr; 4141 4142 mutex_enter(&connp->conn_lock); 4143 switch (cmdp->cb_cmd) { 4144 case TI_GETPEERNAME: 4145 if (udp->udp_state != TS_DATA_XFER) 4146 cmdp->cb_error = ENOTCONN; 4147 else 4148 cmdp->cb_error = conn_getpeername(connp, data, 4149 &cmdp->cb_len); 4150 break; 4151 case TI_GETMYNAME: 4152 cmdp->cb_error = conn_getsockname(connp, data, &cmdp->cb_len); 4153 break; 4154 default: 4155 cmdp->cb_error = EINVAL; 4156 break; 4157 } 4158 mutex_exit(&connp->conn_lock); 4159 4160 qreply(q, mp); 4161 } 4162 4163 static void 4164 udp_use_pure_tpi(udp_t *udp) 4165 { 4166 conn_t *connp = udp->udp_connp; 4167 4168 mutex_enter(&connp->conn_lock); 4169 udp->udp_issocket = B_FALSE; 4170 mutex_exit(&connp->conn_lock); 4171 UDP_STAT(udp->udp_us, udp_sock_fallback); 4172 } 4173 4174 static void 4175 udp_wput_other(queue_t *q, mblk_t *mp) 4176 { 4177 uchar_t *rptr = mp->b_rptr; 4178 struct iocblk *iocp; 4179 conn_t *connp = Q_TO_CONN(q); 4180 udp_t *udp = connp->conn_udp; 4181 cred_t *cr; 4182 4183 switch (mp->b_datap->db_type) { 4184 case M_CMD: 4185 udp_wput_cmdblk(q, mp); 4186 return; 4187 4188 case M_PROTO: 4189 case M_PCPROTO: 4190 if (mp->b_wptr - rptr < sizeof (t_scalar_t)) { 4191 /* 4192 * If the message does not contain a PRIM_type, 4193 * throw it away. 4194 */ 4195 freemsg(mp); 4196 return; 4197 } 4198 switch (((t_primp_t)rptr)->type) { 4199 case T_ADDR_REQ: 4200 udp_addr_req(q, mp); 4201 return; 4202 case O_T_BIND_REQ: 4203 case T_BIND_REQ: 4204 udp_tpi_bind(q, mp); 4205 return; 4206 case T_CONN_REQ: 4207 udp_tpi_connect(q, mp); 4208 return; 4209 case T_CAPABILITY_REQ: 4210 udp_capability_req(q, mp); 4211 return; 4212 case T_INFO_REQ: 4213 udp_info_req(q, mp); 4214 return; 4215 case T_UNITDATA_REQ: 4216 /* 4217 * If a T_UNITDATA_REQ gets here, the address must 4218 * be bad. Valid T_UNITDATA_REQs are handled 4219 * in udp_wput. 4220 */ 4221 udp_ud_err(q, mp, EADDRNOTAVAIL); 4222 return; 4223 case T_UNBIND_REQ: 4224 udp_tpi_unbind(q, mp); 4225 return; 4226 case T_SVR4_OPTMGMT_REQ: 4227 /* 4228 * All Solaris components should pass a db_credp 4229 * for this TPI message, hence we ASSERT. 4230 * But in case there is some other M_PROTO that looks 4231 * like a TPI message sent by some other kernel 4232 * component, we check and return an error. 4233 */ 4234 cr = msg_getcred(mp, NULL); 4235 ASSERT(cr != NULL); 4236 if (cr == NULL) { 4237 udp_err_ack(q, mp, TSYSERR, EINVAL); 4238 return; 4239 } 4240 if (!snmpcom_req(q, mp, udp_snmp_set, ip_snmp_get, 4241 cr)) { 4242 svr4_optcom_req(q, mp, cr, &udp_opt_obj); 4243 } 4244 return; 4245 4246 case T_OPTMGMT_REQ: 4247 /* 4248 * All Solaris components should pass a db_credp 4249 * for this TPI message, hence we ASSERT. 4250 * But in case there is some other M_PROTO that looks 4251 * like a TPI message sent by some other kernel 4252 * component, we check and return an error. 4253 */ 4254 cr = msg_getcred(mp, NULL); 4255 ASSERT(cr != NULL); 4256 if (cr == NULL) { 4257 udp_err_ack(q, mp, TSYSERR, EINVAL); 4258 return; 4259 } 4260 tpi_optcom_req(q, mp, cr, &udp_opt_obj); 4261 return; 4262 4263 case T_DISCON_REQ: 4264 udp_tpi_disconnect(q, mp); 4265 return; 4266 4267 /* The following TPI message is not supported by udp. */ 4268 case O_T_CONN_RES: 4269 case T_CONN_RES: 4270 udp_err_ack(q, mp, TNOTSUPPORT, 0); 4271 return; 4272 4273 /* The following 3 TPI requests are illegal for udp. */ 4274 case T_DATA_REQ: 4275 case T_EXDATA_REQ: 4276 case T_ORDREL_REQ: 4277 udp_err_ack(q, mp, TNOTSUPPORT, 0); 4278 return; 4279 default: 4280 break; 4281 } 4282 break; 4283 case M_FLUSH: 4284 if (*rptr & FLUSHW) 4285 flushq(q, FLUSHDATA); 4286 break; 4287 case M_IOCTL: 4288 iocp = (struct iocblk *)mp->b_rptr; 4289 switch (iocp->ioc_cmd) { 4290 case TI_GETPEERNAME: 4291 if (udp->udp_state != TS_DATA_XFER) { 4292 /* 4293 * If a default destination address has not 4294 * been associated with the stream, then we 4295 * don't know the peer's name. 4296 */ 4297 iocp->ioc_error = ENOTCONN; 4298 iocp->ioc_count = 0; 4299 mp->b_datap->db_type = M_IOCACK; 4300 qreply(q, mp); 4301 return; 4302 } 4303 /* FALLTHRU */ 4304 case TI_GETMYNAME: 4305 /* 4306 * For TI_GETPEERNAME and TI_GETMYNAME, we first 4307 * need to copyin the user's strbuf structure. 4308 * Processing will continue in the M_IOCDATA case 4309 * below. 4310 */ 4311 mi_copyin(q, mp, NULL, 4312 SIZEOF_STRUCT(strbuf, iocp->ioc_flag)); 4313 return; 4314 case _SIOCSOCKFALLBACK: 4315 /* 4316 * Either sockmod is about to be popped and the 4317 * socket would now be treated as a plain stream, 4318 * or a module is about to be pushed so we have 4319 * to follow pure TPI semantics. 4320 */ 4321 if (!udp->udp_issocket) { 4322 DB_TYPE(mp) = M_IOCNAK; 4323 iocp->ioc_error = EINVAL; 4324 } else { 4325 udp_use_pure_tpi(udp); 4326 4327 DB_TYPE(mp) = M_IOCACK; 4328 iocp->ioc_error = 0; 4329 } 4330 iocp->ioc_count = 0; 4331 iocp->ioc_rval = 0; 4332 qreply(q, mp); 4333 return; 4334 default: 4335 break; 4336 } 4337 break; 4338 case M_IOCDATA: 4339 udp_wput_iocdata(q, mp); 4340 return; 4341 default: 4342 /* Unrecognized messages are passed through without change. */ 4343 break; 4344 } 4345 ip_wput_nondata(q, mp); 4346 } 4347 4348 /* 4349 * udp_wput_iocdata is called by udp_wput_other to handle all M_IOCDATA 4350 * messages. 4351 */ 4352 static void 4353 udp_wput_iocdata(queue_t *q, mblk_t *mp) 4354 { 4355 mblk_t *mp1; 4356 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 4357 STRUCT_HANDLE(strbuf, sb); 4358 uint_t addrlen; 4359 conn_t *connp = Q_TO_CONN(q); 4360 udp_t *udp = connp->conn_udp; 4361 4362 /* Make sure it is one of ours. */ 4363 switch (iocp->ioc_cmd) { 4364 case TI_GETMYNAME: 4365 case TI_GETPEERNAME: 4366 break; 4367 default: 4368 ip_wput_nondata(q, mp); 4369 return; 4370 } 4371 4372 switch (mi_copy_state(q, mp, &mp1)) { 4373 case -1: 4374 return; 4375 case MI_COPY_CASE(MI_COPY_IN, 1): 4376 break; 4377 case MI_COPY_CASE(MI_COPY_OUT, 1): 4378 /* 4379 * The address has been copied out, so now 4380 * copyout the strbuf. 4381 */ 4382 mi_copyout(q, mp); 4383 return; 4384 case MI_COPY_CASE(MI_COPY_OUT, 2): 4385 /* 4386 * The address and strbuf have been copied out. 4387 * We're done, so just acknowledge the original 4388 * M_IOCTL. 4389 */ 4390 mi_copy_done(q, mp, 0); 4391 return; 4392 default: 4393 /* 4394 * Something strange has happened, so acknowledge 4395 * the original M_IOCTL with an EPROTO error. 4396 */ 4397 mi_copy_done(q, mp, EPROTO); 4398 return; 4399 } 4400 4401 /* 4402 * Now we have the strbuf structure for TI_GETMYNAME 4403 * and TI_GETPEERNAME. Next we copyout the requested 4404 * address and then we'll copyout the strbuf. 4405 */ 4406 STRUCT_SET_HANDLE(sb, iocp->ioc_flag, (void *)mp1->b_rptr); 4407 4408 if (connp->conn_family == AF_INET) 4409 addrlen = sizeof (sin_t); 4410 else 4411 addrlen = sizeof (sin6_t); 4412 4413 if (STRUCT_FGET(sb, maxlen) < addrlen) { 4414 mi_copy_done(q, mp, EINVAL); 4415 return; 4416 } 4417 4418 switch (iocp->ioc_cmd) { 4419 case TI_GETMYNAME: 4420 break; 4421 case TI_GETPEERNAME: 4422 if (udp->udp_state != TS_DATA_XFER) { 4423 mi_copy_done(q, mp, ENOTCONN); 4424 return; 4425 } 4426 break; 4427 } 4428 mp1 = mi_copyout_alloc(q, mp, STRUCT_FGETP(sb, buf), addrlen, B_TRUE); 4429 if (!mp1) 4430 return; 4431 4432 STRUCT_FSET(sb, len, addrlen); 4433 switch (((struct iocblk *)mp->b_rptr)->ioc_cmd) { 4434 case TI_GETMYNAME: 4435 (void) conn_getsockname(connp, (struct sockaddr *)mp1->b_wptr, 4436 &addrlen); 4437 break; 4438 case TI_GETPEERNAME: 4439 (void) conn_getpeername(connp, (struct sockaddr *)mp1->b_wptr, 4440 &addrlen); 4441 break; 4442 } 4443 mp1->b_wptr += addrlen; 4444 /* Copy out the address */ 4445 mi_copyout(q, mp); 4446 } 4447 4448 void 4449 udp_ddi_g_init(void) 4450 { 4451 udp_max_optsize = optcom_max_optsize(udp_opt_obj.odb_opt_des_arr, 4452 udp_opt_obj.odb_opt_arr_cnt); 4453 4454 /* 4455 * We want to be informed each time a stack is created or 4456 * destroyed in the kernel, so we can maintain the 4457 * set of udp_stack_t's. 4458 */ 4459 netstack_register(NS_UDP, udp_stack_init, NULL, udp_stack_fini); 4460 } 4461 4462 void 4463 udp_ddi_g_destroy(void) 4464 { 4465 netstack_unregister(NS_UDP); 4466 } 4467 4468 #define INET_NAME "ip" 4469 4470 /* 4471 * Initialize the UDP stack instance. 4472 */ 4473 static void * 4474 udp_stack_init(netstackid_t stackid, netstack_t *ns) 4475 { 4476 udp_stack_t *us; 4477 int i; 4478 int error = 0; 4479 major_t major; 4480 size_t arrsz; 4481 4482 us = (udp_stack_t *)kmem_zalloc(sizeof (*us), KM_SLEEP); 4483 us->us_netstack = ns; 4484 4485 mutex_init(&us->us_epriv_port_lock, NULL, MUTEX_DEFAULT, NULL); 4486 us->us_num_epriv_ports = UDP_NUM_EPRIV_PORTS; 4487 us->us_epriv_ports[0] = ULP_DEF_EPRIV_PORT1; 4488 us->us_epriv_ports[1] = ULP_DEF_EPRIV_PORT2; 4489 4490 /* 4491 * The smallest anonymous port in the priviledged port range which UDP 4492 * looks for free port. Use in the option UDP_ANONPRIVBIND. 4493 */ 4494 us->us_min_anonpriv_port = 512; 4495 4496 us->us_bind_fanout_size = udp_bind_fanout_size; 4497 4498 /* Roundup variable that might have been modified in /etc/system */ 4499 if (!ISP2(us->us_bind_fanout_size)) { 4500 /* Not a power of two. Round up to nearest power of two */ 4501 for (i = 0; i < 31; i++) { 4502 if (us->us_bind_fanout_size < (1 << i)) 4503 break; 4504 } 4505 us->us_bind_fanout_size = 1 << i; 4506 } 4507 us->us_bind_fanout = kmem_zalloc(us->us_bind_fanout_size * 4508 sizeof (udp_fanout_t), KM_SLEEP); 4509 for (i = 0; i < us->us_bind_fanout_size; i++) { 4510 mutex_init(&us->us_bind_fanout[i].uf_lock, NULL, MUTEX_DEFAULT, 4511 NULL); 4512 } 4513 4514 arrsz = udp_propinfo_count * sizeof (mod_prop_info_t); 4515 us->us_propinfo_tbl = (mod_prop_info_t *)kmem_alloc(arrsz, 4516 KM_SLEEP); 4517 bcopy(udp_propinfo_tbl, us->us_propinfo_tbl, arrsz); 4518 4519 /* Allocate the per netstack stats */ 4520 mutex_enter(&cpu_lock); 4521 us->us_sc_cnt = MAX(ncpus, boot_ncpus); 4522 mutex_exit(&cpu_lock); 4523 us->us_sc = kmem_zalloc(max_ncpus * sizeof (udp_stats_cpu_t *), 4524 KM_SLEEP); 4525 for (i = 0; i < us->us_sc_cnt; i++) { 4526 us->us_sc[i] = kmem_zalloc(sizeof (udp_stats_cpu_t), 4527 KM_SLEEP); 4528 } 4529 4530 us->us_kstat = udp_kstat2_init(stackid); 4531 us->us_mibkp = udp_kstat_init(stackid); 4532 4533 major = mod_name_to_major(INET_NAME); 4534 error = ldi_ident_from_major(major, &us->us_ldi_ident); 4535 ASSERT(error == 0); 4536 return (us); 4537 } 4538 4539 /* 4540 * Free the UDP stack instance. 4541 */ 4542 static void 4543 udp_stack_fini(netstackid_t stackid, void *arg) 4544 { 4545 udp_stack_t *us = (udp_stack_t *)arg; 4546 int i; 4547 4548 for (i = 0; i < us->us_bind_fanout_size; i++) { 4549 mutex_destroy(&us->us_bind_fanout[i].uf_lock); 4550 } 4551 4552 kmem_free(us->us_bind_fanout, us->us_bind_fanout_size * 4553 sizeof (udp_fanout_t)); 4554 4555 us->us_bind_fanout = NULL; 4556 4557 for (i = 0; i < us->us_sc_cnt; i++) 4558 kmem_free(us->us_sc[i], sizeof (udp_stats_cpu_t)); 4559 kmem_free(us->us_sc, max_ncpus * sizeof (udp_stats_cpu_t *)); 4560 4561 kmem_free(us->us_propinfo_tbl, 4562 udp_propinfo_count * sizeof (mod_prop_info_t)); 4563 us->us_propinfo_tbl = NULL; 4564 4565 udp_kstat_fini(stackid, us->us_mibkp); 4566 us->us_mibkp = NULL; 4567 4568 udp_kstat2_fini(stackid, us->us_kstat); 4569 us->us_kstat = NULL; 4570 4571 mutex_destroy(&us->us_epriv_port_lock); 4572 ldi_ident_release(us->us_ldi_ident); 4573 kmem_free(us, sizeof (*us)); 4574 } 4575 4576 static size_t 4577 udp_set_rcv_hiwat(udp_t *udp, size_t size) 4578 { 4579 udp_stack_t *us = udp->udp_us; 4580 4581 /* We add a bit of extra buffering */ 4582 size += size >> 1; 4583 if (size > us->us_max_buf) 4584 size = us->us_max_buf; 4585 4586 udp->udp_rcv_hiwat = size; 4587 return (size); 4588 } 4589 4590 /* 4591 * For the lower queue so that UDP can be a dummy mux. 4592 * Nobody should be sending 4593 * packets up this stream 4594 */ 4595 static void 4596 udp_lrput(queue_t *q, mblk_t *mp) 4597 { 4598 switch (mp->b_datap->db_type) { 4599 case M_FLUSH: 4600 /* Turn around */ 4601 if (*mp->b_rptr & FLUSHW) { 4602 *mp->b_rptr &= ~FLUSHR; 4603 qreply(q, mp); 4604 return; 4605 } 4606 break; 4607 } 4608 freemsg(mp); 4609 } 4610 4611 /* 4612 * For the lower queue so that UDP can be a dummy mux. 4613 * Nobody should be sending packets down this stream. 4614 */ 4615 /* ARGSUSED */ 4616 void 4617 udp_lwput(queue_t *q, mblk_t *mp) 4618 { 4619 freemsg(mp); 4620 } 4621 4622 /* 4623 * When a CPU is added, we need to allocate the per CPU stats struct. 4624 */ 4625 void 4626 udp_stack_cpu_add(udp_stack_t *us, processorid_t cpu_seqid) 4627 { 4628 int i; 4629 4630 if (cpu_seqid < us->us_sc_cnt) 4631 return; 4632 for (i = us->us_sc_cnt; i <= cpu_seqid; i++) { 4633 ASSERT(us->us_sc[i] == NULL); 4634 us->us_sc[i] = kmem_zalloc(sizeof (udp_stats_cpu_t), 4635 KM_SLEEP); 4636 } 4637 membar_producer(); 4638 us->us_sc_cnt = cpu_seqid + 1; 4639 } 4640 4641 /* 4642 * Below routines for UDP socket module. 4643 */ 4644 4645 static conn_t * 4646 udp_do_open(cred_t *credp, boolean_t isv6, int flags, int *errorp) 4647 { 4648 udp_t *udp; 4649 conn_t *connp; 4650 zoneid_t zoneid; 4651 netstack_t *ns; 4652 udp_stack_t *us; 4653 int len; 4654 4655 ASSERT(errorp != NULL); 4656 4657 if ((*errorp = secpolicy_basic_net_access(credp)) != 0) 4658 return (NULL); 4659 4660 ns = netstack_find_by_cred(credp); 4661 ASSERT(ns != NULL); 4662 us = ns->netstack_udp; 4663 ASSERT(us != NULL); 4664 4665 /* 4666 * For exclusive stacks we set the zoneid to zero 4667 * to make UDP operate as if in the global zone. 4668 */ 4669 if (ns->netstack_stackid != GLOBAL_NETSTACKID) 4670 zoneid = GLOBAL_ZONEID; 4671 else 4672 zoneid = crgetzoneid(credp); 4673 4674 ASSERT(flags == KM_SLEEP || flags == KM_NOSLEEP); 4675 4676 connp = ipcl_conn_create(IPCL_UDPCONN, flags, ns); 4677 if (connp == NULL) { 4678 netstack_rele(ns); 4679 *errorp = ENOMEM; 4680 return (NULL); 4681 } 4682 udp = connp->conn_udp; 4683 4684 /* 4685 * ipcl_conn_create did a netstack_hold. Undo the hold that was 4686 * done by netstack_find_by_cred() 4687 */ 4688 netstack_rele(ns); 4689 4690 /* 4691 * Since this conn_t/udp_t is not yet visible to anybody else we don't 4692 * need to lock anything. 4693 */ 4694 ASSERT(connp->conn_proto == IPPROTO_UDP); 4695 ASSERT(connp->conn_udp == udp); 4696 ASSERT(udp->udp_connp == connp); 4697 4698 /* Set the initial state of the stream and the privilege status. */ 4699 udp->udp_state = TS_UNBND; 4700 connp->conn_ixa->ixa_flags |= IXAF_VERIFY_SOURCE; 4701 if (isv6) { 4702 connp->conn_family = AF_INET6; 4703 connp->conn_ipversion = IPV6_VERSION; 4704 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4; 4705 connp->conn_default_ttl = us->us_ipv6_hoplimit; 4706 len = sizeof (ip6_t) + UDPH_SIZE; 4707 } else { 4708 connp->conn_family = AF_INET; 4709 connp->conn_ipversion = IPV4_VERSION; 4710 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4; 4711 connp->conn_default_ttl = us->us_ipv4_ttl; 4712 len = sizeof (ipha_t) + UDPH_SIZE; 4713 } 4714 4715 ASSERT(connp->conn_ixa->ixa_protocol == connp->conn_proto); 4716 connp->conn_xmit_ipp.ipp_unicast_hops = connp->conn_default_ttl; 4717 4718 connp->conn_ixa->ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 4719 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_ULP_CKSUM; 4720 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */ 4721 connp->conn_ixa->ixa_zoneid = zoneid; 4722 4723 connp->conn_zoneid = zoneid; 4724 4725 /* 4726 * If the caller has the process-wide flag set, then default to MAC 4727 * exempt mode. This allows read-down to unlabeled hosts. 4728 */ 4729 if (getpflags(NET_MAC_AWARE, credp) != 0) 4730 connp->conn_mac_mode = CONN_MAC_AWARE; 4731 4732 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID); 4733 4734 udp->udp_us = us; 4735 4736 connp->conn_rcvbuf = us->us_recv_hiwat; 4737 connp->conn_sndbuf = us->us_xmit_hiwat; 4738 connp->conn_sndlowat = us->us_xmit_lowat; 4739 connp->conn_rcvlowat = udp_mod_info.mi_lowat; 4740 4741 connp->conn_wroff = len + us->us_wroff_extra; 4742 connp->conn_so_type = SOCK_DGRAM; 4743 4744 connp->conn_recv = udp_input; 4745 connp->conn_recvicmp = udp_icmp_input; 4746 crhold(credp); 4747 connp->conn_cred = credp; 4748 connp->conn_cpid = curproc->p_pid; 4749 connp->conn_open_time = ddi_get_lbolt64(); 4750 /* Cache things in ixa without an extra refhold */ 4751 ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED)); 4752 connp->conn_ixa->ixa_cred = connp->conn_cred; 4753 connp->conn_ixa->ixa_cpid = connp->conn_cpid; 4754 if (is_system_labeled()) 4755 connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred); 4756 4757 *((sin6_t *)&udp->udp_delayed_addr) = sin6_null; 4758 4759 if (us->us_pmtu_discovery) 4760 connp->conn_ixa->ixa_flags |= IXAF_PMTU_DISCOVERY; 4761 4762 return (connp); 4763 } 4764 4765 sock_lower_handle_t 4766 udp_create(int family, int type, int proto, sock_downcalls_t **sock_downcalls, 4767 uint_t *smodep, int *errorp, int flags, cred_t *credp) 4768 { 4769 udp_t *udp = NULL; 4770 udp_stack_t *us; 4771 conn_t *connp; 4772 boolean_t isv6; 4773 4774 if (type != SOCK_DGRAM || (family != AF_INET && family != AF_INET6) || 4775 (proto != 0 && proto != IPPROTO_UDP)) { 4776 *errorp = EPROTONOSUPPORT; 4777 return (NULL); 4778 } 4779 4780 if (family == AF_INET6) 4781 isv6 = B_TRUE; 4782 else 4783 isv6 = B_FALSE; 4784 4785 connp = udp_do_open(credp, isv6, flags, errorp); 4786 if (connp == NULL) 4787 return (NULL); 4788 4789 udp = connp->conn_udp; 4790 ASSERT(udp != NULL); 4791 us = udp->udp_us; 4792 ASSERT(us != NULL); 4793 4794 udp->udp_issocket = B_TRUE; 4795 connp->conn_flags |= IPCL_NONSTR; 4796 4797 /* 4798 * Set flow control 4799 * Since this conn_t/udp_t is not yet visible to anybody else we don't 4800 * need to lock anything. 4801 */ 4802 (void) udp_set_rcv_hiwat(udp, connp->conn_rcvbuf); 4803 udp->udp_rcv_disply_hiwat = connp->conn_rcvbuf; 4804 4805 connp->conn_flow_cntrld = B_FALSE; 4806 4807 mutex_enter(&connp->conn_lock); 4808 connp->conn_state_flags &= ~CONN_INCIPIENT; 4809 mutex_exit(&connp->conn_lock); 4810 4811 *errorp = 0; 4812 *smodep = SM_ATOMIC; 4813 *sock_downcalls = &sock_udp_downcalls; 4814 return ((sock_lower_handle_t)connp); 4815 } 4816 4817 /* ARGSUSED3 */ 4818 void 4819 udp_activate(sock_lower_handle_t proto_handle, sock_upper_handle_t sock_handle, 4820 sock_upcalls_t *sock_upcalls, int flags, cred_t *cr) 4821 { 4822 conn_t *connp = (conn_t *)proto_handle; 4823 struct sock_proto_props sopp; 4824 4825 /* All Solaris components should pass a cred for this operation. */ 4826 ASSERT(cr != NULL); 4827 4828 connp->conn_upcalls = sock_upcalls; 4829 connp->conn_upper_handle = sock_handle; 4830 4831 sopp.sopp_flags = SOCKOPT_WROFF | SOCKOPT_RCVHIWAT | SOCKOPT_RCVLOWAT | 4832 SOCKOPT_MAXBLK | SOCKOPT_MAXPSZ | SOCKOPT_MINPSZ; 4833 sopp.sopp_wroff = connp->conn_wroff; 4834 sopp.sopp_maxblk = INFPSZ; 4835 sopp.sopp_rxhiwat = connp->conn_rcvbuf; 4836 sopp.sopp_rxlowat = connp->conn_rcvlowat; 4837 sopp.sopp_maxaddrlen = sizeof (sin6_t); 4838 sopp.sopp_maxpsz = 4839 (connp->conn_family == AF_INET) ? UDP_MAXPACKET_IPV4 : 4840 UDP_MAXPACKET_IPV6; 4841 sopp.sopp_minpsz = (udp_mod_info.mi_minpsz == 1) ? 0 : 4842 udp_mod_info.mi_minpsz; 4843 4844 (*connp->conn_upcalls->su_set_proto_props)(connp->conn_upper_handle, 4845 &sopp); 4846 } 4847 4848 static void 4849 udp_do_close(conn_t *connp) 4850 { 4851 udp_t *udp; 4852 4853 ASSERT(connp != NULL && IPCL_IS_UDP(connp)); 4854 udp = connp->conn_udp; 4855 4856 if (cl_inet_unbind != NULL && udp->udp_state == TS_IDLE) { 4857 /* 4858 * Running in cluster mode - register unbind information 4859 */ 4860 if (connp->conn_ipversion == IPV4_VERSION) { 4861 (*cl_inet_unbind)( 4862 connp->conn_netstack->netstack_stackid, 4863 IPPROTO_UDP, AF_INET, 4864 (uint8_t *)(&V4_PART_OF_V6(connp->conn_laddr_v6)), 4865 (in_port_t)connp->conn_lport, NULL); 4866 } else { 4867 (*cl_inet_unbind)( 4868 connp->conn_netstack->netstack_stackid, 4869 IPPROTO_UDP, AF_INET6, 4870 (uint8_t *)&(connp->conn_laddr_v6), 4871 (in_port_t)connp->conn_lport, NULL); 4872 } 4873 } 4874 4875 mutex_enter(&connp->conn_lock); 4876 udp_bind_hash_remove(udp, B_FALSE); 4877 mutex_exit(&connp->conn_lock); 4878 4879 ip_quiesce_conn(connp); 4880 4881 if (!IPCL_IS_NONSTR(connp)) { 4882 ASSERT(connp->conn_wq != NULL); 4883 ASSERT(connp->conn_rq != NULL); 4884 qprocsoff(connp->conn_rq); 4885 } 4886 4887 udp_close_free(connp); 4888 4889 /* 4890 * Now we are truly single threaded on this stream, and can 4891 * delete the things hanging off the connp, and finally the connp. 4892 * We removed this connp from the fanout list, it cannot be 4893 * accessed thru the fanouts, and we already waited for the 4894 * conn_ref to drop to 0. We are already in close, so 4895 * there cannot be any other thread from the top. qprocsoff 4896 * has completed, and service has completed or won't run in 4897 * future. 4898 */ 4899 ASSERT(connp->conn_ref == 1); 4900 4901 if (!IPCL_IS_NONSTR(connp)) { 4902 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 4903 } else { 4904 ip_free_helper_stream(connp); 4905 } 4906 4907 connp->conn_ref--; 4908 ipcl_conn_destroy(connp); 4909 } 4910 4911 /* ARGSUSED1 */ 4912 int 4913 udp_close(sock_lower_handle_t proto_handle, int flags, cred_t *cr) 4914 { 4915 conn_t *connp = (conn_t *)proto_handle; 4916 4917 /* All Solaris components should pass a cred for this operation. */ 4918 ASSERT(cr != NULL); 4919 4920 udp_do_close(connp); 4921 return (0); 4922 } 4923 4924 static int 4925 udp_do_bind(conn_t *connp, struct sockaddr *sa, socklen_t len, cred_t *cr, 4926 boolean_t bind_to_req_port_only) 4927 { 4928 sin_t *sin; 4929 sin6_t *sin6; 4930 udp_t *udp = connp->conn_udp; 4931 int error = 0; 4932 ip_laddr_t laddr_type = IPVL_UNICAST_UP; /* INADDR_ANY */ 4933 in_port_t port; /* Host byte order */ 4934 in_port_t requested_port; /* Host byte order */ 4935 int count; 4936 ipaddr_t v4src; /* Set if AF_INET */ 4937 in6_addr_t v6src; 4938 int loopmax; 4939 udp_fanout_t *udpf; 4940 in_port_t lport; /* Network byte order */ 4941 uint_t scopeid = 0; 4942 zoneid_t zoneid = IPCL_ZONEID(connp); 4943 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 4944 boolean_t is_inaddr_any; 4945 mlp_type_t addrtype, mlptype; 4946 udp_stack_t *us = udp->udp_us; 4947 struct reuselist *reusep; 4948 4949 switch (len) { 4950 case sizeof (sin_t): /* Complete IPv4 address */ 4951 sin = (sin_t *)sa; 4952 4953 if (sin == NULL || !OK_32PTR((char *)sin)) 4954 return (EINVAL); 4955 4956 if (connp->conn_family != AF_INET || 4957 sin->sin_family != AF_INET) { 4958 return (EAFNOSUPPORT); 4959 } 4960 v4src = sin->sin_addr.s_addr; 4961 IN6_IPADDR_TO_V4MAPPED(v4src, &v6src); 4962 if (v4src != INADDR_ANY) { 4963 laddr_type = ip_laddr_verify_v4(v4src, zoneid, ipst, 4964 B_TRUE); 4965 } 4966 port = ntohs(sin->sin_port); 4967 break; 4968 4969 case sizeof (sin6_t): /* complete IPv6 address */ 4970 sin6 = (sin6_t *)sa; 4971 4972 if (sin6 == NULL || !OK_32PTR((char *)sin6)) 4973 return (EINVAL); 4974 4975 if (connp->conn_family != AF_INET6 || 4976 sin6->sin6_family != AF_INET6) { 4977 return (EAFNOSUPPORT); 4978 } 4979 v6src = sin6->sin6_addr; 4980 if (IN6_IS_ADDR_V4MAPPED(&v6src)) { 4981 if (connp->conn_ipv6_v6only) 4982 return (EADDRNOTAVAIL); 4983 4984 IN6_V4MAPPED_TO_IPADDR(&v6src, v4src); 4985 if (v4src != INADDR_ANY) { 4986 laddr_type = ip_laddr_verify_v4(v4src, 4987 zoneid, ipst, B_FALSE); 4988 } 4989 } else { 4990 if (!IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 4991 if (IN6_IS_ADDR_LINKSCOPE(&v6src)) 4992 scopeid = sin6->sin6_scope_id; 4993 laddr_type = ip_laddr_verify_v6(&v6src, 4994 zoneid, ipst, B_TRUE, scopeid); 4995 } 4996 } 4997 port = ntohs(sin6->sin6_port); 4998 break; 4999 5000 default: /* Invalid request */ 5001 (void) strlog(UDP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 5002 "udp_bind: bad ADDR_length length %u", len); 5003 return (-TBADADDR); 5004 } 5005 5006 /* Is the local address a valid unicast, multicast, or broadcast? */ 5007 if (laddr_type == IPVL_BAD) 5008 return (EADDRNOTAVAIL); 5009 5010 requested_port = port; 5011 5012 if (requested_port == 0 || !bind_to_req_port_only) 5013 bind_to_req_port_only = B_FALSE; 5014 else /* T_BIND_REQ and requested_port != 0 */ 5015 bind_to_req_port_only = B_TRUE; 5016 5017 if (requested_port == 0) { 5018 /* 5019 * If the application passed in zero for the port number, it 5020 * doesn't care which port number we bind to. Get one in the 5021 * valid range. 5022 */ 5023 if (connp->conn_anon_priv_bind) { 5024 port = udp_get_next_priv_port(udp); 5025 } else { 5026 port = udp_update_next_port(udp, 5027 us->us_next_port_to_try, B_TRUE); 5028 } 5029 } else { 5030 /* 5031 * If the port is in the well-known privileged range, 5032 * make sure the caller was privileged. 5033 */ 5034 int i; 5035 boolean_t priv = B_FALSE; 5036 5037 if (port < us->us_smallest_nonpriv_port) { 5038 priv = B_TRUE; 5039 } else { 5040 for (i = 0; i < us->us_num_epriv_ports; i++) { 5041 if (port == us->us_epriv_ports[i]) { 5042 priv = B_TRUE; 5043 break; 5044 } 5045 } 5046 } 5047 5048 if (priv) { 5049 if (secpolicy_net_privaddr(cr, port, IPPROTO_UDP) != 0) 5050 return (-TACCES); 5051 } 5052 } 5053 5054 if (port == 0) 5055 return (-TNOADDR); 5056 5057 /* 5058 * get some memory we might need later on for reuseport, avoid 5059 * KM_SLEEP under lock 5060 */ 5061 reusep = kmem_zalloc(sizeof (*reusep), KM_SLEEP); 5062 5063 mutex_enter(&connp->conn_lock); 5064 5065 if (!connp->conn_reuseport) { 5066 kmem_free(reusep, sizeof (*reusep)); 5067 reusep = NULL; 5068 } 5069 5070 /* 5071 * The state must be TS_UNBND. TPI mandates that users must send 5072 * TPI primitives only 1 at a time and wait for the response before 5073 * sending the next primitive. 5074 */ 5075 if (udp->udp_state != TS_UNBND) { 5076 mutex_exit(&connp->conn_lock); 5077 if (reusep != NULL) 5078 kmem_free(reusep, sizeof (*reusep)); 5079 (void) strlog(UDP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 5080 "udp_bind: bad state, %u", udp->udp_state); 5081 return (-TOUTSTATE); 5082 } 5083 /* 5084 * Copy the source address into our udp structure. This address 5085 * may still be zero; if so, IP will fill in the correct address 5086 * each time an outbound packet is passed to it. Since the udp is 5087 * not yet in the bind hash list, we don't grab the uf_lock to 5088 * change conn_ipversion 5089 */ 5090 if (connp->conn_family == AF_INET) { 5091 ASSERT(sin != NULL); 5092 ASSERT(connp->conn_ixa->ixa_flags & IXAF_IS_IPV4); 5093 } else { 5094 if (IN6_IS_ADDR_V4MAPPED(&v6src)) { 5095 /* 5096 * no need to hold the uf_lock to set the conn_ipversion 5097 * since we are not yet in the fanout list 5098 */ 5099 connp->conn_ipversion = IPV4_VERSION; 5100 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4; 5101 } else { 5102 connp->conn_ipversion = IPV6_VERSION; 5103 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4; 5104 } 5105 } 5106 5107 /* 5108 * If conn_reuseaddr is not set, then we have to make sure that 5109 * the IP address and port number the application requested 5110 * (or we selected for the application) is not being used by 5111 * another stream. If another stream is already using the 5112 * requested IP address and port, the behavior depends on 5113 * "bind_to_req_port_only". If set the bind fails; otherwise we 5114 * search for any an unused port to bind to the stream. 5115 * 5116 * As per the BSD semantics, as modified by the Deering multicast 5117 * changes, if udp_reuseaddr is set, then we allow multiple binds 5118 * to the same port independent of the local IP address. 5119 * 5120 * This is slightly different than in SunOS 4.X which did not 5121 * support IP multicast. Note that the change implemented by the 5122 * Deering multicast code effects all binds - not only binding 5123 * to IP multicast addresses. 5124 * 5125 * Note that when binding to port zero we ignore SO_REUSEADDR in 5126 * order to guarantee a unique port. 5127 */ 5128 5129 count = 0; 5130 if (connp->conn_anon_priv_bind) { 5131 /* 5132 * loopmax = (IPPORT_RESERVED-1) - 5133 * us->us_min_anonpriv_port + 1 5134 */ 5135 loopmax = IPPORT_RESERVED - us->us_min_anonpriv_port; 5136 } else { 5137 loopmax = us->us_largest_anon_port - 5138 us->us_smallest_anon_port + 1; 5139 } 5140 5141 is_inaddr_any = V6_OR_V4_INADDR_ANY(v6src); 5142 5143 for (;;) { 5144 udp_t *udp1; 5145 boolean_t found_exclbind = B_FALSE; 5146 conn_t *connp1; 5147 5148 /* 5149 * Walk through the list of udp streams bound to 5150 * requested port with the same IP address. 5151 */ 5152 lport = htons(port); 5153 udpf = &us->us_bind_fanout[UDP_BIND_HASH(lport, 5154 us->us_bind_fanout_size)]; 5155 mutex_enter(&udpf->uf_lock); 5156 for (udp1 = udpf->uf_udp; udp1 != NULL; 5157 udp1 = udp1->udp_bind_hash) { 5158 connp1 = udp1->udp_connp; 5159 5160 if (lport != connp1->conn_lport) 5161 continue; 5162 5163 /* 5164 * On a labeled system, we must treat bindings to ports 5165 * on shared IP addresses by sockets with MAC exemption 5166 * privilege as being in all zones, as there's 5167 * otherwise no way to identify the right receiver. 5168 */ 5169 if (!IPCL_BIND_ZONE_MATCH(connp1, connp)) 5170 continue; 5171 5172 /* 5173 * If UDP_EXCLBIND is set for either the bound or 5174 * binding endpoint, the semantics of bind 5175 * is changed according to the following chart. 5176 * 5177 * spec = specified address (v4 or v6) 5178 * unspec = unspecified address (v4 or v6) 5179 * A = specified addresses are different for endpoints 5180 * 5181 * bound bind to allowed? 5182 * ------------------------------------- 5183 * unspec unspec no 5184 * unspec spec no 5185 * spec unspec no 5186 * spec spec yes if A 5187 * 5188 * For labeled systems, SO_MAC_EXEMPT behaves the same 5189 * as UDP_EXCLBIND, except that zoneid is ignored. 5190 */ 5191 if (connp1->conn_exclbind || connp->conn_exclbind || 5192 IPCL_CONNS_MAC(udp1->udp_connp, connp)) { 5193 if (V6_OR_V4_INADDR_ANY( 5194 connp1->conn_bound_addr_v6) || 5195 is_inaddr_any || 5196 IN6_ARE_ADDR_EQUAL( 5197 &connp1->conn_bound_addr_v6, 5198 &v6src)) { 5199 found_exclbind = B_TRUE; 5200 break; 5201 } 5202 continue; 5203 } 5204 5205 /* 5206 * Check ipversion to allow IPv4 and IPv6 sockets to 5207 * have disjoint port number spaces. 5208 */ 5209 if (connp->conn_ipversion != connp1->conn_ipversion) { 5210 5211 /* 5212 * On the first time through the loop, if the 5213 * the user intentionally specified a 5214 * particular port number, then ignore any 5215 * bindings of the other protocol that may 5216 * conflict. This allows the user to bind IPv6 5217 * alone and get both v4 and v6, or bind both 5218 * both and get each seperately. On subsequent 5219 * times through the loop, we're checking a 5220 * port that we chose (not the user) and thus 5221 * we do not allow casual duplicate bindings. 5222 */ 5223 if (count == 0 && requested_port != 0) 5224 continue; 5225 } 5226 5227 /* 5228 * No difference depending on SO_REUSEADDR. 5229 * 5230 * If existing port is bound to a 5231 * non-wildcard IP address and 5232 * the requesting stream is bound to 5233 * a distinct different IP addresses 5234 * (non-wildcard, also), keep going. 5235 */ 5236 if (!is_inaddr_any && 5237 !V6_OR_V4_INADDR_ANY(connp1->conn_bound_addr_v6) && 5238 !IN6_ARE_ADDR_EQUAL(&connp1->conn_laddr_v6, 5239 &v6src)) { 5240 continue; 5241 } 5242 5243 /* 5244 * if bound conn has reuseport set and conn requests 5245 * reuseport, check if cred matches. If they match, 5246 * allow conn to proceed. 5247 */ 5248 if (connp->conn_reuseport && connp1->conn_reuseport) { 5249 cred_t *bcred = connp1->conn_cred; 5250 cred_t *ncred = connp->conn_cred; 5251 if (crgetuid(bcred) == crgetuid(ncred) && 5252 crgetzoneid(bcred) == crgetzoneid(ncred)) { 5253 /* just memorize one of the conns */ 5254 reusep->ru_conns[0] = connp1; 5255 reusep->ru_entries = 1; 5256 continue; 5257 } 5258 } 5259 5260 break; 5261 } 5262 5263 5264 if (!found_exclbind && 5265 (connp->conn_reuseaddr && requested_port != 0)) { 5266 if (reusep != NULL) 5267 kmem_free(reusep, sizeof (*reusep)); 5268 break; 5269 } 5270 5271 if (udp1 == NULL) { 5272 /* 5273 * No other stream has this IP address and port number 5274 * or all have reuseport set. We can use it. 5275 */ 5276 if (connp->conn_reuseport) { 5277 if (reusep->ru_entries > 0) { 5278 /* add to a present reuselist */ 5279 struct reuselist *lp = reusep-> 5280 ru_conns[0]->conn_reuselist; 5281 5282 ASSERT(lp != NULL); 5283 kmem_free(reusep, sizeof (*reusep)); 5284 if (udp_reuselist_add(lp, connp) < 0) { 5285 /* table full */ 5286 mutex_exit(&udpf->uf_lock); 5287 mutex_exit(&connp->conn_lock); 5288 return (-TADDRBUSY); 5289 } 5290 } else { 5291 /* use own new reuselist */ 5292 reusep->ru_conns[0] = connp; 5293 reusep->ru_entries = 1; 5294 mutex_init(&reusep->ru_lock, NULL, 5295 MUTEX_DEFAULT, NULL); 5296 connp->conn_reuselist = reusep; 5297 } 5298 } 5299 break; 5300 } 5301 mutex_exit(&udpf->uf_lock); 5302 5303 if (connp->conn_reuseport) { 5304 /* reject for all other cases */ 5305 mutex_exit(&connp->conn_lock); 5306 kmem_free(reusep, sizeof (*reusep)); 5307 return (-TADDRBUSY); 5308 } 5309 5310 if (bind_to_req_port_only) { 5311 /* 5312 * We get here only when requested port 5313 * is bound (and only first of the for() 5314 * loop iteration). 5315 * 5316 * The semantics of this bind request 5317 * require it to fail so we return from 5318 * the routine (and exit the loop). 5319 * 5320 */ 5321 mutex_exit(&connp->conn_lock); 5322 return (-TADDRBUSY); 5323 } 5324 5325 if (connp->conn_anon_priv_bind) { 5326 port = udp_get_next_priv_port(udp); 5327 } else { 5328 if ((count == 0) && (requested_port != 0)) { 5329 /* 5330 * If the application wants us to find 5331 * a port, get one to start with. Set 5332 * requested_port to 0, so that we will 5333 * update us->us_next_port_to_try below. 5334 */ 5335 port = udp_update_next_port(udp, 5336 us->us_next_port_to_try, B_TRUE); 5337 requested_port = 0; 5338 } else { 5339 port = udp_update_next_port(udp, port + 1, 5340 B_FALSE); 5341 } 5342 } 5343 5344 if (port == 0 || ++count >= loopmax) { 5345 /* 5346 * We've tried every possible port number and 5347 * there are none available, so send an error 5348 * to the user. 5349 */ 5350 mutex_exit(&connp->conn_lock); 5351 return (-TNOADDR); 5352 } 5353 } 5354 5355 /* 5356 * Copy the source address into our udp structure. This address 5357 * may still be zero; if so, ip_attr_connect will fill in the correct 5358 * address when a packet is about to be sent. 5359 * If we are binding to a broadcast or multicast address then 5360 * we just set the conn_bound_addr since we don't want to use 5361 * that as the source address when sending. 5362 */ 5363 connp->conn_bound_addr_v6 = v6src; 5364 connp->conn_laddr_v6 = v6src; 5365 if (scopeid != 0) { 5366 connp->conn_ixa->ixa_flags |= IXAF_SCOPEID_SET; 5367 connp->conn_ixa->ixa_scopeid = scopeid; 5368 connp->conn_incoming_ifindex = scopeid; 5369 } else { 5370 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 5371 connp->conn_incoming_ifindex = connp->conn_bound_if; 5372 } 5373 5374 switch (laddr_type) { 5375 case IPVL_UNICAST_UP: 5376 case IPVL_UNICAST_DOWN: 5377 connp->conn_saddr_v6 = v6src; 5378 connp->conn_mcbc_bind = B_FALSE; 5379 break; 5380 case IPVL_MCAST: 5381 case IPVL_BCAST: 5382 /* ip_set_destination will pick a source address later */ 5383 connp->conn_saddr_v6 = ipv6_all_zeros; 5384 connp->conn_mcbc_bind = B_TRUE; 5385 break; 5386 } 5387 5388 /* Any errors after this point should use late_error */ 5389 connp->conn_lport = lport; 5390 5391 /* 5392 * Now reset the next anonymous port if the application requested 5393 * an anonymous port, or we handed out the next anonymous port. 5394 */ 5395 if ((requested_port == 0) && (!connp->conn_anon_priv_bind)) { 5396 us->us_next_port_to_try = port + 1; 5397 } 5398 5399 /* Initialize the T_BIND_ACK. */ 5400 if (connp->conn_family == AF_INET) { 5401 sin->sin_port = connp->conn_lport; 5402 } else { 5403 sin6->sin6_port = connp->conn_lport; 5404 } 5405 udp->udp_state = TS_IDLE; 5406 udp_bind_hash_insert(udpf, udp); 5407 mutex_exit(&udpf->uf_lock); 5408 mutex_exit(&connp->conn_lock); 5409 5410 if (cl_inet_bind) { 5411 /* 5412 * Running in cluster mode - register bind information 5413 */ 5414 if (connp->conn_ipversion == IPV4_VERSION) { 5415 (*cl_inet_bind)(connp->conn_netstack->netstack_stackid, 5416 IPPROTO_UDP, AF_INET, (uint8_t *)&v4src, 5417 (in_port_t)connp->conn_lport, NULL); 5418 } else { 5419 (*cl_inet_bind)(connp->conn_netstack->netstack_stackid, 5420 IPPROTO_UDP, AF_INET6, (uint8_t *)&v6src, 5421 (in_port_t)connp->conn_lport, NULL); 5422 } 5423 } 5424 5425 mutex_enter(&connp->conn_lock); 5426 connp->conn_anon_port = (is_system_labeled() && requested_port == 0); 5427 if (is_system_labeled() && (!connp->conn_anon_port || 5428 connp->conn_anon_mlp)) { 5429 uint16_t mlpport; 5430 zone_t *zone; 5431 5432 zone = crgetzone(cr); 5433 connp->conn_mlp_type = 5434 connp->conn_recv_ancillary.crb_recvucred ? mlptBoth : 5435 mlptSingle; 5436 addrtype = tsol_mlp_addr_type( 5437 connp->conn_allzones ? ALL_ZONES : zone->zone_id, 5438 IPV6_VERSION, &v6src, us->us_netstack->netstack_ip); 5439 if (addrtype == mlptSingle) { 5440 error = -TNOADDR; 5441 mutex_exit(&connp->conn_lock); 5442 goto late_error; 5443 } 5444 mlpport = connp->conn_anon_port ? PMAPPORT : port; 5445 mlptype = tsol_mlp_port_type(zone, IPPROTO_UDP, mlpport, 5446 addrtype); 5447 5448 /* 5449 * It is a coding error to attempt to bind an MLP port 5450 * without first setting SOL_SOCKET/SCM_UCRED. 5451 */ 5452 if (mlptype != mlptSingle && 5453 connp->conn_mlp_type == mlptSingle) { 5454 error = EINVAL; 5455 mutex_exit(&connp->conn_lock); 5456 goto late_error; 5457 } 5458 5459 /* 5460 * It is an access violation to attempt to bind an MLP port 5461 * without NET_BINDMLP privilege. 5462 */ 5463 if (mlptype != mlptSingle && 5464 secpolicy_net_bindmlp(cr) != 0) { 5465 if (connp->conn_debug) { 5466 (void) strlog(UDP_MOD_ID, 0, 1, 5467 SL_ERROR|SL_TRACE, 5468 "udp_bind: no priv for multilevel port %d", 5469 mlpport); 5470 } 5471 error = -TACCES; 5472 mutex_exit(&connp->conn_lock); 5473 goto late_error; 5474 } 5475 5476 /* 5477 * If we're specifically binding a shared IP address and the 5478 * port is MLP on shared addresses, then check to see if this 5479 * zone actually owns the MLP. Reject if not. 5480 */ 5481 if (mlptype == mlptShared && addrtype == mlptShared) { 5482 /* 5483 * No need to handle exclusive-stack zones since 5484 * ALL_ZONES only applies to the shared stack. 5485 */ 5486 zoneid_t mlpzone; 5487 5488 mlpzone = tsol_mlp_findzone(IPPROTO_UDP, 5489 htons(mlpport)); 5490 if (connp->conn_zoneid != mlpzone) { 5491 if (connp->conn_debug) { 5492 (void) strlog(UDP_MOD_ID, 0, 1, 5493 SL_ERROR|SL_TRACE, 5494 "udp_bind: attempt to bind port " 5495 "%d on shared addr in zone %d " 5496 "(should be %d)", 5497 mlpport, connp->conn_zoneid, 5498 mlpzone); 5499 } 5500 error = -TACCES; 5501 mutex_exit(&connp->conn_lock); 5502 goto late_error; 5503 } 5504 } 5505 if (connp->conn_anon_port) { 5506 error = tsol_mlp_anon(zone, mlptype, connp->conn_proto, 5507 port, B_TRUE); 5508 if (error != 0) { 5509 if (connp->conn_debug) { 5510 (void) strlog(UDP_MOD_ID, 0, 1, 5511 SL_ERROR|SL_TRACE, 5512 "udp_bind: cannot establish anon " 5513 "MLP for port %d", port); 5514 } 5515 error = -TACCES; 5516 mutex_exit(&connp->conn_lock); 5517 goto late_error; 5518 } 5519 } 5520 connp->conn_mlp_type = mlptype; 5521 } 5522 5523 /* 5524 * We create an initial header template here to make a subsequent 5525 * sendto have a starting point. Since conn_last_dst is zero the 5526 * first sendto will always follow the 'dst changed' code path. 5527 * Note that we defer massaging options and the related checksum 5528 * adjustment until we have a destination address. 5529 */ 5530 error = udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5531 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5532 if (error != 0) { 5533 mutex_exit(&connp->conn_lock); 5534 goto late_error; 5535 } 5536 /* Just in case */ 5537 connp->conn_faddr_v6 = ipv6_all_zeros; 5538 connp->conn_fport = 0; 5539 connp->conn_v6lastdst = ipv6_all_zeros; 5540 mutex_exit(&connp->conn_lock); 5541 5542 error = ip_laddr_fanout_insert(connp); 5543 if (error != 0) 5544 goto late_error; 5545 5546 /* Bind succeeded */ 5547 return (0); 5548 5549 late_error: 5550 /* We had already picked the port number, and then the bind failed */ 5551 mutex_enter(&connp->conn_lock); 5552 udpf = &us->us_bind_fanout[ 5553 UDP_BIND_HASH(connp->conn_lport, 5554 us->us_bind_fanout_size)]; 5555 mutex_enter(&udpf->uf_lock); 5556 connp->conn_saddr_v6 = ipv6_all_zeros; 5557 connp->conn_bound_addr_v6 = ipv6_all_zeros; 5558 connp->conn_laddr_v6 = ipv6_all_zeros; 5559 if (scopeid != 0) { 5560 connp->conn_ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 5561 connp->conn_incoming_ifindex = connp->conn_bound_if; 5562 } 5563 udp->udp_state = TS_UNBND; 5564 udp_bind_hash_remove(udp, B_TRUE); 5565 connp->conn_lport = 0; 5566 mutex_exit(&udpf->uf_lock); 5567 connp->conn_anon_port = B_FALSE; 5568 connp->conn_mlp_type = mlptSingle; 5569 5570 connp->conn_v6lastdst = ipv6_all_zeros; 5571 5572 /* Restore the header that was built above - different source address */ 5573 (void) udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5574 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5575 mutex_exit(&connp->conn_lock); 5576 return (error); 5577 } 5578 5579 int 5580 udp_bind(sock_lower_handle_t proto_handle, struct sockaddr *sa, 5581 socklen_t len, cred_t *cr) 5582 { 5583 int error; 5584 conn_t *connp; 5585 5586 /* All Solaris components should pass a cred for this operation. */ 5587 ASSERT(cr != NULL); 5588 5589 connp = (conn_t *)proto_handle; 5590 5591 if (sa == NULL) 5592 error = udp_do_unbind(connp); 5593 else 5594 error = udp_do_bind(connp, sa, len, cr, B_TRUE); 5595 5596 if (error < 0) { 5597 if (error == -TOUTSTATE) 5598 error = EINVAL; 5599 else 5600 error = proto_tlitosyserr(-error); 5601 } 5602 5603 return (error); 5604 } 5605 5606 static int 5607 udp_implicit_bind(conn_t *connp, cred_t *cr) 5608 { 5609 sin6_t sin6addr; 5610 sin_t *sin; 5611 sin6_t *sin6; 5612 socklen_t len; 5613 int error; 5614 5615 /* All Solaris components should pass a cred for this operation. */ 5616 ASSERT(cr != NULL); 5617 5618 if (connp->conn_family == AF_INET) { 5619 len = sizeof (struct sockaddr_in); 5620 sin = (sin_t *)&sin6addr; 5621 *sin = sin_null; 5622 sin->sin_family = AF_INET; 5623 sin->sin_addr.s_addr = INADDR_ANY; 5624 } else { 5625 ASSERT(connp->conn_family == AF_INET6); 5626 len = sizeof (sin6_t); 5627 sin6 = (sin6_t *)&sin6addr; 5628 *sin6 = sin6_null; 5629 sin6->sin6_family = AF_INET6; 5630 V6_SET_ZERO(sin6->sin6_addr); 5631 } 5632 5633 error = udp_do_bind(connp, (struct sockaddr *)&sin6addr, len, 5634 cr, B_FALSE); 5635 return ((error < 0) ? proto_tlitosyserr(-error) : error); 5636 } 5637 5638 /* 5639 * This routine removes a port number association from a stream. It 5640 * is called by udp_unbind and udp_tpi_unbind. 5641 */ 5642 static int 5643 udp_do_unbind(conn_t *connp) 5644 { 5645 udp_t *udp = connp->conn_udp; 5646 udp_fanout_t *udpf; 5647 udp_stack_t *us = udp->udp_us; 5648 5649 if (cl_inet_unbind != NULL) { 5650 /* 5651 * Running in cluster mode - register unbind information 5652 */ 5653 if (connp->conn_ipversion == IPV4_VERSION) { 5654 (*cl_inet_unbind)( 5655 connp->conn_netstack->netstack_stackid, 5656 IPPROTO_UDP, AF_INET, 5657 (uint8_t *)(&V4_PART_OF_V6(connp->conn_laddr_v6)), 5658 (in_port_t)connp->conn_lport, NULL); 5659 } else { 5660 (*cl_inet_unbind)( 5661 connp->conn_netstack->netstack_stackid, 5662 IPPROTO_UDP, AF_INET6, 5663 (uint8_t *)&(connp->conn_laddr_v6), 5664 (in_port_t)connp->conn_lport, NULL); 5665 } 5666 } 5667 5668 mutex_enter(&connp->conn_lock); 5669 /* If a bind has not been done, we can't unbind. */ 5670 if (udp->udp_state == TS_UNBND) { 5671 mutex_exit(&connp->conn_lock); 5672 return (-TOUTSTATE); 5673 } 5674 udpf = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 5675 us->us_bind_fanout_size)]; 5676 mutex_enter(&udpf->uf_lock); 5677 udp_bind_hash_remove(udp, B_TRUE); 5678 connp->conn_saddr_v6 = ipv6_all_zeros; 5679 connp->conn_bound_addr_v6 = ipv6_all_zeros; 5680 connp->conn_laddr_v6 = ipv6_all_zeros; 5681 connp->conn_mcbc_bind = B_FALSE; 5682 connp->conn_lport = 0; 5683 /* In case we were also connected */ 5684 connp->conn_faddr_v6 = ipv6_all_zeros; 5685 connp->conn_fport = 0; 5686 mutex_exit(&udpf->uf_lock); 5687 5688 connp->conn_v6lastdst = ipv6_all_zeros; 5689 udp->udp_state = TS_UNBND; 5690 5691 (void) udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5692 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5693 mutex_exit(&connp->conn_lock); 5694 5695 ip_unbind(connp); 5696 5697 return (0); 5698 } 5699 5700 /* 5701 * It associates a default destination address with the stream. 5702 */ 5703 static int 5704 udp_do_connect(conn_t *connp, const struct sockaddr *sa, socklen_t len, 5705 cred_t *cr, pid_t pid) 5706 { 5707 sin6_t *sin6; 5708 sin_t *sin; 5709 in6_addr_t v6dst; 5710 ipaddr_t v4dst; 5711 uint16_t dstport; 5712 uint32_t flowinfo; 5713 udp_fanout_t *udpf; 5714 udp_t *udp, *udp1; 5715 ushort_t ipversion; 5716 udp_stack_t *us; 5717 int error; 5718 conn_t *connp1; 5719 ip_xmit_attr_t *ixa; 5720 ip_xmit_attr_t *oldixa; 5721 uint_t scopeid = 0; 5722 uint_t srcid = 0; 5723 in6_addr_t v6src = connp->conn_saddr_v6; 5724 boolean_t v4mapped; 5725 5726 udp = connp->conn_udp; 5727 us = udp->udp_us; 5728 5729 /* 5730 * Address has been verified by the caller 5731 */ 5732 switch (len) { 5733 default: 5734 /* 5735 * Should never happen 5736 */ 5737 return (EINVAL); 5738 5739 case sizeof (sin_t): 5740 sin = (sin_t *)sa; 5741 v4dst = sin->sin_addr.s_addr; 5742 dstport = sin->sin_port; 5743 IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst); 5744 ASSERT(connp->conn_ipversion == IPV4_VERSION); 5745 ipversion = IPV4_VERSION; 5746 break; 5747 5748 case sizeof (sin6_t): 5749 sin6 = (sin6_t *)sa; 5750 v6dst = sin6->sin6_addr; 5751 dstport = sin6->sin6_port; 5752 srcid = sin6->__sin6_src_id; 5753 v4mapped = IN6_IS_ADDR_V4MAPPED(&v6dst); 5754 if (srcid != 0 && IN6_IS_ADDR_UNSPECIFIED(&v6src)) { 5755 if (!ip_srcid_find_id(srcid, &v6src, IPCL_ZONEID(connp), 5756 v4mapped, connp->conn_netstack)) { 5757 /* Mismatch v4mapped/v6 specified by srcid. */ 5758 return (EADDRNOTAVAIL); 5759 } 5760 } 5761 if (v4mapped) { 5762 if (connp->conn_ipv6_v6only) 5763 return (EADDRNOTAVAIL); 5764 5765 /* 5766 * Destination adress is mapped IPv6 address. 5767 * Source bound address should be unspecified or 5768 * IPv6 mapped address as well. 5769 */ 5770 if (!IN6_IS_ADDR_UNSPECIFIED( 5771 &connp->conn_bound_addr_v6) && 5772 !IN6_IS_ADDR_V4MAPPED(&connp->conn_bound_addr_v6)) { 5773 return (EADDRNOTAVAIL); 5774 } 5775 IN6_V4MAPPED_TO_IPADDR(&v6dst, v4dst); 5776 ipversion = IPV4_VERSION; 5777 flowinfo = 0; 5778 } else { 5779 ipversion = IPV6_VERSION; 5780 flowinfo = sin6->sin6_flowinfo; 5781 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) 5782 scopeid = sin6->sin6_scope_id; 5783 } 5784 break; 5785 } 5786 5787 if (dstport == 0) 5788 return (-TBADADDR); 5789 5790 /* 5791 * If there is a different thread using conn_ixa then we get a new 5792 * copy and cut the old one loose from conn_ixa. Otherwise we use 5793 * conn_ixa and prevent any other thread from using/changing it. 5794 * Once connect() is done other threads can use conn_ixa since the 5795 * refcnt will be back at one. 5796 * We defer updating conn_ixa until later to handle any concurrent 5797 * conn_ixa_cleanup thread. 5798 */ 5799 ixa = conn_get_ixa(connp, B_FALSE); 5800 if (ixa == NULL) 5801 return (ENOMEM); 5802 5803 mutex_enter(&connp->conn_lock); 5804 /* 5805 * This udp_t must have bound to a port already before doing a connect. 5806 * Reject if a connect is in progress (we drop conn_lock during 5807 * udp_do_connect). 5808 */ 5809 if (udp->udp_state == TS_UNBND || udp->udp_state == TS_WCON_CREQ) { 5810 mutex_exit(&connp->conn_lock); 5811 (void) strlog(UDP_MOD_ID, 0, 1, SL_ERROR|SL_TRACE, 5812 "udp_connect: bad state, %u", udp->udp_state); 5813 ixa_refrele(ixa); 5814 return (-TOUTSTATE); 5815 } 5816 ASSERT(connp->conn_lport != 0 && udp->udp_ptpbhn != NULL); 5817 5818 udpf = &us->us_bind_fanout[UDP_BIND_HASH(connp->conn_lport, 5819 us->us_bind_fanout_size)]; 5820 5821 mutex_enter(&udpf->uf_lock); 5822 if (udp->udp_state == TS_DATA_XFER) { 5823 /* Already connected - clear out state */ 5824 if (connp->conn_mcbc_bind) 5825 connp->conn_saddr_v6 = ipv6_all_zeros; 5826 else 5827 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 5828 connp->conn_laddr_v6 = connp->conn_bound_addr_v6; 5829 connp->conn_faddr_v6 = ipv6_all_zeros; 5830 connp->conn_fport = 0; 5831 udp->udp_state = TS_IDLE; 5832 } 5833 5834 connp->conn_fport = dstport; 5835 connp->conn_ipversion = ipversion; 5836 if (ipversion == IPV4_VERSION) { 5837 /* 5838 * Interpret a zero destination to mean loopback. 5839 * Update the T_CONN_REQ (sin/sin6) since it is used to 5840 * generate the T_CONN_CON. 5841 */ 5842 if (v4dst == INADDR_ANY) { 5843 v4dst = htonl(INADDR_LOOPBACK); 5844 IN6_IPADDR_TO_V4MAPPED(v4dst, &v6dst); 5845 if (connp->conn_family == AF_INET) { 5846 sin->sin_addr.s_addr = v4dst; 5847 } else { 5848 sin6->sin6_addr = v6dst; 5849 } 5850 } 5851 connp->conn_faddr_v6 = v6dst; 5852 connp->conn_flowinfo = 0; 5853 } else { 5854 ASSERT(connp->conn_ipversion == IPV6_VERSION); 5855 /* 5856 * Interpret a zero destination to mean loopback. 5857 * Update the T_CONN_REQ (sin/sin6) since it is used to 5858 * generate the T_CONN_CON. 5859 */ 5860 if (IN6_IS_ADDR_UNSPECIFIED(&v6dst)) { 5861 v6dst = ipv6_loopback; 5862 sin6->sin6_addr = v6dst; 5863 } 5864 connp->conn_faddr_v6 = v6dst; 5865 connp->conn_flowinfo = flowinfo; 5866 } 5867 mutex_exit(&udpf->uf_lock); 5868 5869 /* 5870 * We update our cred/cpid based on the caller of connect 5871 */ 5872 if (connp->conn_cred != cr) { 5873 crhold(cr); 5874 crfree(connp->conn_cred); 5875 connp->conn_cred = cr; 5876 } 5877 connp->conn_cpid = pid; 5878 ASSERT(!(ixa->ixa_free_flags & IXA_FREE_CRED)); 5879 ixa->ixa_cred = cr; 5880 ixa->ixa_cpid = pid; 5881 if (is_system_labeled()) { 5882 /* We need to restart with a label based on the cred */ 5883 ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred); 5884 } 5885 5886 if (scopeid != 0) { 5887 ixa->ixa_flags |= IXAF_SCOPEID_SET; 5888 ixa->ixa_scopeid = scopeid; 5889 connp->conn_incoming_ifindex = scopeid; 5890 } else { 5891 ixa->ixa_flags &= ~IXAF_SCOPEID_SET; 5892 connp->conn_incoming_ifindex = connp->conn_bound_if; 5893 } 5894 /* 5895 * conn_connect will drop conn_lock and reacquire it. 5896 * To prevent a send* from messing with this udp_t while the lock 5897 * is dropped we set udp_state and clear conn_v6lastdst. 5898 * That will make all send* fail with EISCONN. 5899 */ 5900 connp->conn_v6lastdst = ipv6_all_zeros; 5901 udp->udp_state = TS_WCON_CREQ; 5902 5903 error = conn_connect(connp, NULL, IPDF_ALLOW_MCBC); 5904 mutex_exit(&connp->conn_lock); 5905 if (error != 0) 5906 goto connect_failed; 5907 5908 /* 5909 * The addresses have been verified. Time to insert in 5910 * the correct fanout list. 5911 */ 5912 error = ipcl_conn_insert(connp); 5913 if (error != 0) 5914 goto connect_failed; 5915 5916 mutex_enter(&connp->conn_lock); 5917 error = udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5918 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 5919 if (error != 0) { 5920 mutex_exit(&connp->conn_lock); 5921 goto connect_failed; 5922 } 5923 5924 udp->udp_state = TS_DATA_XFER; 5925 /* Record this as the "last" send even though we haven't sent any */ 5926 connp->conn_v6lastdst = connp->conn_faddr_v6; 5927 connp->conn_lastipversion = connp->conn_ipversion; 5928 connp->conn_lastdstport = connp->conn_fport; 5929 connp->conn_lastflowinfo = connp->conn_flowinfo; 5930 connp->conn_lastscopeid = scopeid; 5931 connp->conn_lastsrcid = srcid; 5932 /* Also remember a source to use together with lastdst */ 5933 connp->conn_v6lastsrc = v6src; 5934 5935 oldixa = conn_replace_ixa(connp, ixa); 5936 mutex_exit(&connp->conn_lock); 5937 ixa_refrele(oldixa); 5938 5939 /* 5940 * We've picked a source address above. Now we can 5941 * verify that the src/port/dst/port is unique for all 5942 * connections in TS_DATA_XFER, skipping ourselves. 5943 */ 5944 mutex_enter(&udpf->uf_lock); 5945 for (udp1 = udpf->uf_udp; udp1 != NULL; udp1 = udp1->udp_bind_hash) { 5946 if (udp1->udp_state != TS_DATA_XFER) 5947 continue; 5948 5949 if (udp1 == udp) 5950 continue; 5951 5952 connp1 = udp1->udp_connp; 5953 if (connp->conn_lport != connp1->conn_lport || 5954 connp->conn_ipversion != connp1->conn_ipversion || 5955 dstport != connp1->conn_fport || 5956 !IN6_ARE_ADDR_EQUAL(&connp->conn_laddr_v6, 5957 &connp1->conn_laddr_v6) || 5958 !IN6_ARE_ADDR_EQUAL(&v6dst, &connp1->conn_faddr_v6) || 5959 !(IPCL_ZONE_MATCH(connp, connp1->conn_zoneid) || 5960 IPCL_ZONE_MATCH(connp1, connp->conn_zoneid))) 5961 continue; 5962 mutex_exit(&udpf->uf_lock); 5963 error = -TBADADDR; 5964 goto connect_failed; 5965 } 5966 if (cl_inet_connect2 != NULL) { 5967 CL_INET_UDP_CONNECT(connp, B_TRUE, &v6dst, dstport, error); 5968 if (error != 0) { 5969 mutex_exit(&udpf->uf_lock); 5970 error = -TBADADDR; 5971 goto connect_failed; 5972 } 5973 } 5974 mutex_exit(&udpf->uf_lock); 5975 5976 ixa_refrele(ixa); 5977 return (0); 5978 5979 connect_failed: 5980 if (ixa != NULL) 5981 ixa_refrele(ixa); 5982 mutex_enter(&connp->conn_lock); 5983 mutex_enter(&udpf->uf_lock); 5984 udp->udp_state = TS_IDLE; 5985 connp->conn_faddr_v6 = ipv6_all_zeros; 5986 connp->conn_fport = 0; 5987 /* In case the source address was set above */ 5988 if (connp->conn_mcbc_bind) 5989 connp->conn_saddr_v6 = ipv6_all_zeros; 5990 else 5991 connp->conn_saddr_v6 = connp->conn_bound_addr_v6; 5992 connp->conn_laddr_v6 = connp->conn_bound_addr_v6; 5993 mutex_exit(&udpf->uf_lock); 5994 5995 connp->conn_v6lastdst = ipv6_all_zeros; 5996 connp->conn_flowinfo = 0; 5997 5998 (void) udp_build_hdr_template(connp, &connp->conn_saddr_v6, 5999 &connp->conn_faddr_v6, connp->conn_fport, connp->conn_flowinfo); 6000 mutex_exit(&connp->conn_lock); 6001 return (error); 6002 } 6003 6004 static int 6005 udp_connect(sock_lower_handle_t proto_handle, const struct sockaddr *sa, 6006 socklen_t len, sock_connid_t *id, cred_t *cr) 6007 { 6008 conn_t *connp = (conn_t *)proto_handle; 6009 udp_t *udp = connp->conn_udp; 6010 int error; 6011 boolean_t did_bind = B_FALSE; 6012 pid_t pid = curproc->p_pid; 6013 6014 /* All Solaris components should pass a cred for this operation. */ 6015 ASSERT(cr != NULL); 6016 6017 if (sa == NULL) { 6018 /* 6019 * Disconnect 6020 * Make sure we are connected 6021 */ 6022 if (udp->udp_state != TS_DATA_XFER) 6023 return (EINVAL); 6024 6025 error = udp_disconnect(connp); 6026 return (error); 6027 } 6028 6029 error = proto_verify_ip_addr(connp->conn_family, sa, len); 6030 if (error != 0) 6031 goto done; 6032 6033 /* do an implicit bind if necessary */ 6034 if (udp->udp_state == TS_UNBND) { 6035 error = udp_implicit_bind(connp, cr); 6036 /* 6037 * We could be racing with an actual bind, in which case 6038 * we would see EPROTO. We cross our fingers and try 6039 * to connect. 6040 */ 6041 if (!(error == 0 || error == EPROTO)) 6042 goto done; 6043 did_bind = B_TRUE; 6044 } 6045 /* 6046 * set SO_DGRAM_ERRIND 6047 */ 6048 connp->conn_dgram_errind = B_TRUE; 6049 6050 error = udp_do_connect(connp, sa, len, cr, pid); 6051 6052 if (error != 0 && did_bind) { 6053 int unbind_err; 6054 6055 unbind_err = udp_do_unbind(connp); 6056 ASSERT(unbind_err == 0); 6057 } 6058 6059 if (error == 0) { 6060 *id = 0; 6061 (*connp->conn_upcalls->su_connected) 6062 (connp->conn_upper_handle, 0, NULL, -1); 6063 } else if (error < 0) { 6064 error = proto_tlitosyserr(-error); 6065 } 6066 6067 done: 6068 if (error != 0 && udp->udp_state == TS_DATA_XFER) { 6069 /* 6070 * No need to hold locks to set state 6071 * after connect failure socket state is undefined 6072 * We set the state only to imitate old sockfs behavior 6073 */ 6074 udp->udp_state = TS_IDLE; 6075 } 6076 return (error); 6077 } 6078 6079 int 6080 udp_send(sock_lower_handle_t proto_handle, mblk_t *mp, struct nmsghdr *msg, 6081 cred_t *cr) 6082 { 6083 sin6_t *sin6; 6084 sin_t *sin = NULL; 6085 uint_t srcid; 6086 conn_t *connp = (conn_t *)proto_handle; 6087 udp_t *udp = connp->conn_udp; 6088 int error = 0; 6089 udp_stack_t *us = udp->udp_us; 6090 ushort_t ipversion; 6091 pid_t pid = curproc->p_pid; 6092 ip_xmit_attr_t *ixa; 6093 6094 ASSERT(DB_TYPE(mp) == M_DATA); 6095 6096 /* All Solaris components should pass a cred for this operation. */ 6097 ASSERT(cr != NULL); 6098 6099 /* do an implicit bind if necessary */ 6100 if (udp->udp_state == TS_UNBND) { 6101 error = udp_implicit_bind(connp, cr); 6102 /* 6103 * We could be racing with an actual bind, in which case 6104 * we would see EPROTO. We cross our fingers and try 6105 * to connect. 6106 */ 6107 if (!(error == 0 || error == EPROTO)) { 6108 freemsg(mp); 6109 return (error); 6110 } 6111 } 6112 6113 /* Connected? */ 6114 if (msg->msg_name == NULL) { 6115 if (udp->udp_state != TS_DATA_XFER) { 6116 UDPS_BUMP_MIB(us, udpOutErrors); 6117 return (EDESTADDRREQ); 6118 } 6119 if (msg->msg_controllen != 0) { 6120 error = udp_output_ancillary(connp, NULL, NULL, mp, 6121 NULL, msg, cr, pid); 6122 } else { 6123 error = udp_output_connected(connp, mp, cr, pid); 6124 } 6125 if (us->us_sendto_ignerr) 6126 return (0); 6127 else 6128 return (error); 6129 } 6130 if (udp->udp_state == TS_DATA_XFER) { 6131 UDPS_BUMP_MIB(us, udpOutErrors); 6132 return (EISCONN); 6133 } 6134 error = proto_verify_ip_addr(connp->conn_family, 6135 (struct sockaddr *)msg->msg_name, msg->msg_namelen); 6136 if (error != 0) { 6137 UDPS_BUMP_MIB(us, udpOutErrors); 6138 return (error); 6139 } 6140 switch (connp->conn_family) { 6141 case AF_INET6: 6142 sin6 = (sin6_t *)msg->msg_name; 6143 6144 srcid = sin6->__sin6_src_id; 6145 6146 if (!IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6147 /* 6148 * Destination is a non-IPv4-compatible IPv6 address. 6149 * Send out an IPv6 format packet. 6150 */ 6151 6152 /* 6153 * If the local address is a mapped address return 6154 * an error. 6155 * It would be possible to send an IPv6 packet but the 6156 * response would never make it back to the application 6157 * since it is bound to a mapped address. 6158 */ 6159 if (IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6)) { 6160 UDPS_BUMP_MIB(us, udpOutErrors); 6161 return (EADDRNOTAVAIL); 6162 } 6163 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) 6164 sin6->sin6_addr = ipv6_loopback; 6165 ipversion = IPV6_VERSION; 6166 } else { 6167 if (connp->conn_ipv6_v6only) { 6168 UDPS_BUMP_MIB(us, udpOutErrors); 6169 return (EADDRNOTAVAIL); 6170 } 6171 6172 /* 6173 * If the local address is not zero or a mapped address 6174 * return an error. It would be possible to send an 6175 * IPv4 packet but the response would never make it 6176 * back to the application since it is bound to a 6177 * non-mapped address. 6178 */ 6179 if (!IN6_IS_ADDR_V4MAPPED(&connp->conn_saddr_v6) && 6180 !IN6_IS_ADDR_UNSPECIFIED(&connp->conn_saddr_v6)) { 6181 UDPS_BUMP_MIB(us, udpOutErrors); 6182 return (EADDRNOTAVAIL); 6183 } 6184 6185 if (V4_PART_OF_V6(sin6->sin6_addr) == INADDR_ANY) { 6186 V4_PART_OF_V6(sin6->sin6_addr) = 6187 htonl(INADDR_LOOPBACK); 6188 } 6189 ipversion = IPV4_VERSION; 6190 } 6191 6192 /* 6193 * We have to allocate an ip_xmit_attr_t before we grab 6194 * conn_lock and we need to hold conn_lock once we've check 6195 * conn_same_as_last_v6 to handle concurrent send* calls on a 6196 * socket. 6197 */ 6198 if (msg->msg_controllen == 0) { 6199 ixa = conn_get_ixa(connp, B_FALSE); 6200 if (ixa == NULL) { 6201 UDPS_BUMP_MIB(us, udpOutErrors); 6202 return (ENOMEM); 6203 } 6204 } else { 6205 ixa = NULL; 6206 } 6207 mutex_enter(&connp->conn_lock); 6208 if (udp->udp_delayed_error != 0) { 6209 sin6_t *sin2 = (sin6_t *)&udp->udp_delayed_addr; 6210 6211 error = udp->udp_delayed_error; 6212 udp->udp_delayed_error = 0; 6213 6214 /* Compare IP address, port, and family */ 6215 6216 if (sin6->sin6_port == sin2->sin6_port && 6217 IN6_ARE_ADDR_EQUAL(&sin6->sin6_addr, 6218 &sin2->sin6_addr) && 6219 sin6->sin6_family == sin2->sin6_family) { 6220 mutex_exit(&connp->conn_lock); 6221 UDPS_BUMP_MIB(us, udpOutErrors); 6222 if (ixa != NULL) 6223 ixa_refrele(ixa); 6224 return (error); 6225 } 6226 } 6227 6228 if (msg->msg_controllen != 0) { 6229 mutex_exit(&connp->conn_lock); 6230 ASSERT(ixa == NULL); 6231 error = udp_output_ancillary(connp, NULL, sin6, mp, 6232 NULL, msg, cr, pid); 6233 } else if (conn_same_as_last_v6(connp, sin6) && 6234 connp->conn_lastsrcid == srcid && 6235 ipsec_outbound_policy_current(ixa)) { 6236 /* udp_output_lastdst drops conn_lock */ 6237 error = udp_output_lastdst(connp, mp, cr, pid, ixa); 6238 } else { 6239 /* udp_output_newdst drops conn_lock */ 6240 error = udp_output_newdst(connp, mp, NULL, sin6, 6241 ipversion, cr, pid, ixa); 6242 } 6243 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 6244 if (us->us_sendto_ignerr) 6245 return (0); 6246 else 6247 return (error); 6248 case AF_INET: 6249 sin = (sin_t *)msg->msg_name; 6250 6251 ipversion = IPV4_VERSION; 6252 6253 if (sin->sin_addr.s_addr == INADDR_ANY) 6254 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK); 6255 6256 /* 6257 * We have to allocate an ip_xmit_attr_t before we grab 6258 * conn_lock and we need to hold conn_lock once we've check 6259 * conn_same_as_last_v6 to handle concurrent send* on a socket. 6260 */ 6261 if (msg->msg_controllen == 0) { 6262 ixa = conn_get_ixa(connp, B_FALSE); 6263 if (ixa == NULL) { 6264 UDPS_BUMP_MIB(us, udpOutErrors); 6265 return (ENOMEM); 6266 } 6267 } else { 6268 ixa = NULL; 6269 } 6270 mutex_enter(&connp->conn_lock); 6271 if (udp->udp_delayed_error != 0) { 6272 sin_t *sin2 = (sin_t *)&udp->udp_delayed_addr; 6273 6274 error = udp->udp_delayed_error; 6275 udp->udp_delayed_error = 0; 6276 6277 /* Compare IP address and port */ 6278 6279 if (sin->sin_port == sin2->sin_port && 6280 sin->sin_addr.s_addr == sin2->sin_addr.s_addr) { 6281 mutex_exit(&connp->conn_lock); 6282 UDPS_BUMP_MIB(us, udpOutErrors); 6283 if (ixa != NULL) 6284 ixa_refrele(ixa); 6285 return (error); 6286 } 6287 } 6288 if (msg->msg_controllen != 0) { 6289 mutex_exit(&connp->conn_lock); 6290 ASSERT(ixa == NULL); 6291 error = udp_output_ancillary(connp, sin, NULL, mp, 6292 NULL, msg, cr, pid); 6293 } else if (conn_same_as_last_v4(connp, sin) && 6294 ipsec_outbound_policy_current(ixa)) { 6295 /* udp_output_lastdst drops conn_lock */ 6296 error = udp_output_lastdst(connp, mp, cr, pid, ixa); 6297 } else { 6298 /* udp_output_newdst drops conn_lock */ 6299 error = udp_output_newdst(connp, mp, sin, NULL, 6300 ipversion, cr, pid, ixa); 6301 } 6302 ASSERT(MUTEX_NOT_HELD(&connp->conn_lock)); 6303 if (us->us_sendto_ignerr) 6304 return (0); 6305 else 6306 return (error); 6307 default: 6308 return (EINVAL); 6309 } 6310 } 6311 6312 int 6313 udp_fallback(sock_lower_handle_t proto_handle, queue_t *q, 6314 boolean_t issocket, so_proto_quiesced_cb_t quiesced_cb, 6315 sock_quiesce_arg_t *arg) 6316 { 6317 conn_t *connp = (conn_t *)proto_handle; 6318 udp_t *udp; 6319 struct T_capability_ack tca; 6320 struct sockaddr_in6 laddr, faddr; 6321 socklen_t laddrlen, faddrlen; 6322 short opts; 6323 struct stroptions *stropt; 6324 mblk_t *mp, *stropt_mp; 6325 int error; 6326 6327 udp = connp->conn_udp; 6328 6329 stropt_mp = allocb_wait(sizeof (*stropt), BPRI_HI, STR_NOSIG, NULL); 6330 6331 /* 6332 * setup the fallback stream that was allocated 6333 */ 6334 connp->conn_dev = (dev_t)RD(q)->q_ptr; 6335 connp->conn_minor_arena = WR(q)->q_ptr; 6336 6337 RD(q)->q_ptr = WR(q)->q_ptr = connp; 6338 6339 WR(q)->q_qinfo = &udp_winit; 6340 6341 connp->conn_rq = RD(q); 6342 connp->conn_wq = WR(q); 6343 6344 /* Notify stream head about options before sending up data */ 6345 stropt_mp->b_datap->db_type = M_SETOPTS; 6346 stropt_mp->b_wptr += sizeof (*stropt); 6347 stropt = (struct stroptions *)stropt_mp->b_rptr; 6348 stropt->so_flags = SO_WROFF | SO_HIWAT; 6349 stropt->so_wroff = connp->conn_wroff; 6350 stropt->so_hiwat = udp->udp_rcv_disply_hiwat; 6351 putnext(RD(q), stropt_mp); 6352 6353 /* 6354 * Free the helper stream 6355 */ 6356 ip_free_helper_stream(connp); 6357 6358 if (!issocket) 6359 udp_use_pure_tpi(udp); 6360 6361 /* 6362 * Collect the information needed to sync with the sonode 6363 */ 6364 udp_do_capability_ack(udp, &tca, TC1_INFO); 6365 6366 laddrlen = faddrlen = sizeof (sin6_t); 6367 (void) udp_getsockname((sock_lower_handle_t)connp, 6368 (struct sockaddr *)&laddr, &laddrlen, CRED()); 6369 error = udp_getpeername((sock_lower_handle_t)connp, 6370 (struct sockaddr *)&faddr, &faddrlen, CRED()); 6371 if (error != 0) 6372 faddrlen = 0; 6373 6374 opts = 0; 6375 if (connp->conn_dgram_errind) 6376 opts |= SO_DGRAM_ERRIND; 6377 if (connp->conn_ixa->ixa_flags & IXAF_DONTROUTE) 6378 opts |= SO_DONTROUTE; 6379 6380 mp = (*quiesced_cb)(connp->conn_upper_handle, arg, &tca, 6381 (struct sockaddr *)&laddr, laddrlen, 6382 (struct sockaddr *)&faddr, faddrlen, opts); 6383 6384 mutex_enter(&udp->udp_recv_lock); 6385 /* 6386 * Attempts to send data up during fallback will result in it being 6387 * queued in udp_t. First push up the datagrams obtained from the 6388 * socket, then any packets queued in udp_t. 6389 */ 6390 if (mp != NULL) { 6391 mp->b_next = udp->udp_fallback_queue_head; 6392 udp->udp_fallback_queue_head = mp; 6393 } 6394 while (udp->udp_fallback_queue_head != NULL) { 6395 mp = udp->udp_fallback_queue_head; 6396 udp->udp_fallback_queue_head = mp->b_next; 6397 mutex_exit(&udp->udp_recv_lock); 6398 mp->b_next = NULL; 6399 putnext(RD(q), mp); 6400 mutex_enter(&udp->udp_recv_lock); 6401 } 6402 udp->udp_fallback_queue_tail = udp->udp_fallback_queue_head; 6403 /* 6404 * No longer a streams less socket 6405 */ 6406 mutex_enter(&connp->conn_lock); 6407 connp->conn_flags &= ~IPCL_NONSTR; 6408 mutex_exit(&connp->conn_lock); 6409 6410 mutex_exit(&udp->udp_recv_lock); 6411 6412 ASSERT(connp->conn_ref >= 1); 6413 6414 return (0); 6415 } 6416 6417 /* ARGSUSED3 */ 6418 int 6419 udp_getpeername(sock_lower_handle_t proto_handle, struct sockaddr *sa, 6420 socklen_t *salenp, cred_t *cr) 6421 { 6422 conn_t *connp = (conn_t *)proto_handle; 6423 udp_t *udp = connp->conn_udp; 6424 int error; 6425 6426 /* All Solaris components should pass a cred for this operation. */ 6427 ASSERT(cr != NULL); 6428 6429 mutex_enter(&connp->conn_lock); 6430 if (udp->udp_state != TS_DATA_XFER) 6431 error = ENOTCONN; 6432 else 6433 error = conn_getpeername(connp, sa, salenp); 6434 mutex_exit(&connp->conn_lock); 6435 return (error); 6436 } 6437 6438 /* ARGSUSED3 */ 6439 int 6440 udp_getsockname(sock_lower_handle_t proto_handle, struct sockaddr *sa, 6441 socklen_t *salenp, cred_t *cr) 6442 { 6443 conn_t *connp = (conn_t *)proto_handle; 6444 int error; 6445 6446 /* All Solaris components should pass a cred for this operation. */ 6447 ASSERT(cr != NULL); 6448 6449 mutex_enter(&connp->conn_lock); 6450 error = conn_getsockname(connp, sa, salenp); 6451 mutex_exit(&connp->conn_lock); 6452 return (error); 6453 } 6454 6455 int 6456 udp_getsockopt(sock_lower_handle_t proto_handle, int level, int option_name, 6457 void *optvalp, socklen_t *optlen, cred_t *cr) 6458 { 6459 conn_t *connp = (conn_t *)proto_handle; 6460 int error; 6461 t_uscalar_t max_optbuf_len; 6462 void *optvalp_buf; 6463 int len; 6464 6465 /* All Solaris components should pass a cred for this operation. */ 6466 ASSERT(cr != NULL); 6467 6468 error = proto_opt_check(level, option_name, *optlen, &max_optbuf_len, 6469 udp_opt_obj.odb_opt_des_arr, 6470 udp_opt_obj.odb_opt_arr_cnt, 6471 B_FALSE, B_TRUE, cr); 6472 if (error != 0) { 6473 if (error < 0) 6474 error = proto_tlitosyserr(-error); 6475 return (error); 6476 } 6477 6478 optvalp_buf = kmem_alloc(max_optbuf_len, KM_SLEEP); 6479 len = udp_opt_get(connp, level, option_name, optvalp_buf); 6480 if (len == -1) { 6481 kmem_free(optvalp_buf, max_optbuf_len); 6482 return (EINVAL); 6483 } 6484 6485 /* 6486 * update optlen and copy option value 6487 */ 6488 t_uscalar_t size = MIN(len, *optlen); 6489 6490 bcopy(optvalp_buf, optvalp, size); 6491 bcopy(&size, optlen, sizeof (size)); 6492 6493 kmem_free(optvalp_buf, max_optbuf_len); 6494 return (0); 6495 } 6496 6497 int 6498 udp_setsockopt(sock_lower_handle_t proto_handle, int level, int option_name, 6499 const void *optvalp, socklen_t optlen, cred_t *cr) 6500 { 6501 conn_t *connp = (conn_t *)proto_handle; 6502 int error; 6503 6504 /* All Solaris components should pass a cred for this operation. */ 6505 ASSERT(cr != NULL); 6506 6507 error = proto_opt_check(level, option_name, optlen, NULL, 6508 udp_opt_obj.odb_opt_des_arr, 6509 udp_opt_obj.odb_opt_arr_cnt, 6510 B_TRUE, B_FALSE, cr); 6511 6512 if (error != 0) { 6513 if (error < 0) 6514 error = proto_tlitosyserr(-error); 6515 return (error); 6516 } 6517 6518 error = udp_opt_set(connp, SETFN_OPTCOM_NEGOTIATE, level, option_name, 6519 optlen, (uchar_t *)optvalp, (uint_t *)&optlen, (uchar_t *)optvalp, 6520 NULL, cr); 6521 6522 ASSERT(error >= 0); 6523 6524 return (error); 6525 } 6526 6527 void 6528 udp_clr_flowctrl(sock_lower_handle_t proto_handle) 6529 { 6530 conn_t *connp = (conn_t *)proto_handle; 6531 udp_t *udp = connp->conn_udp; 6532 6533 mutex_enter(&udp->udp_recv_lock); 6534 connp->conn_flow_cntrld = B_FALSE; 6535 mutex_exit(&udp->udp_recv_lock); 6536 } 6537 6538 /* ARGSUSED2 */ 6539 int 6540 udp_shutdown(sock_lower_handle_t proto_handle, int how, cred_t *cr) 6541 { 6542 conn_t *connp = (conn_t *)proto_handle; 6543 6544 /* All Solaris components should pass a cred for this operation. */ 6545 ASSERT(cr != NULL); 6546 6547 /* shut down the send side */ 6548 if (how != SHUT_RD) 6549 (*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle, 6550 SOCK_OPCTL_SHUT_SEND, 0); 6551 /* shut down the recv side */ 6552 if (how != SHUT_WR) 6553 (*connp->conn_upcalls->su_opctl)(connp->conn_upper_handle, 6554 SOCK_OPCTL_SHUT_RECV, 0); 6555 return (0); 6556 } 6557 6558 int 6559 udp_ioctl(sock_lower_handle_t proto_handle, int cmd, intptr_t arg, 6560 int mode, int32_t *rvalp, cred_t *cr) 6561 { 6562 conn_t *connp = (conn_t *)proto_handle; 6563 int error; 6564 6565 /* All Solaris components should pass a cred for this operation. */ 6566 ASSERT(cr != NULL); 6567 6568 /* 6569 * If we don't have a helper stream then create one. 6570 * ip_create_helper_stream takes care of locking the conn_t, 6571 * so this check for NULL is just a performance optimization. 6572 */ 6573 if (connp->conn_helper_info == NULL) { 6574 udp_stack_t *us = connp->conn_udp->udp_us; 6575 6576 ASSERT(us->us_ldi_ident != NULL); 6577 6578 /* 6579 * Create a helper stream for non-STREAMS socket. 6580 */ 6581 error = ip_create_helper_stream(connp, us->us_ldi_ident); 6582 if (error != 0) { 6583 ip0dbg(("tcp_ioctl: create of IP helper stream " 6584 "failed %d\n", error)); 6585 return (error); 6586 } 6587 } 6588 6589 switch (cmd) { 6590 case _SIOCSOCKFALLBACK: 6591 case TI_GETPEERNAME: 6592 case TI_GETMYNAME: 6593 ip1dbg(("udp_ioctl: cmd 0x%x on non streams socket", 6594 cmd)); 6595 error = EINVAL; 6596 break; 6597 default: 6598 /* 6599 * Pass on to IP using helper stream 6600 */ 6601 error = ldi_ioctl(connp->conn_helper_info->iphs_handle, 6602 cmd, arg, mode, cr, rvalp); 6603 break; 6604 } 6605 return (error); 6606 } 6607 6608 /* ARGSUSED */ 6609 int 6610 udp_accept(sock_lower_handle_t lproto_handle, 6611 sock_lower_handle_t eproto_handle, sock_upper_handle_t sock_handle, 6612 cred_t *cr) 6613 { 6614 return (EOPNOTSUPP); 6615 } 6616 6617 /* ARGSUSED */ 6618 int 6619 udp_listen(sock_lower_handle_t proto_handle, int backlog, cred_t *cr) 6620 { 6621 return (EOPNOTSUPP); 6622 } 6623 6624 sock_downcalls_t sock_udp_downcalls = { 6625 udp_activate, /* sd_activate */ 6626 udp_accept, /* sd_accept */ 6627 udp_bind, /* sd_bind */ 6628 udp_listen, /* sd_listen */ 6629 udp_connect, /* sd_connect */ 6630 udp_getpeername, /* sd_getpeername */ 6631 udp_getsockname, /* sd_getsockname */ 6632 udp_getsockopt, /* sd_getsockopt */ 6633 udp_setsockopt, /* sd_setsockopt */ 6634 udp_send, /* sd_send */ 6635 NULL, /* sd_send_uio */ 6636 NULL, /* sd_recv_uio */ 6637 NULL, /* sd_poll */ 6638 udp_shutdown, /* sd_shutdown */ 6639 udp_clr_flowctrl, /* sd_setflowctrl */ 6640 udp_ioctl, /* sd_ioctl */ 6641 udp_close /* sd_close */ 6642 }; 6643