1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 /* Copyright (c) 1990 Mentat Inc. */ 27 28 #include <sys/types.h> 29 #include <sys/stream.h> 30 #include <sys/dlpi.h> 31 #include <sys/stropts.h> 32 #include <sys/sysmacros.h> 33 #include <sys/strsubr.h> 34 #include <sys/strlog.h> 35 #include <sys/strsun.h> 36 #include <sys/zone.h> 37 #define _SUN_TPI_VERSION 2 38 #include <sys/tihdr.h> 39 #include <sys/xti_inet.h> 40 #include <sys/ddi.h> 41 #include <sys/suntpi.h> 42 #include <sys/cmn_err.h> 43 #include <sys/debug.h> 44 #include <sys/kobj.h> 45 #include <sys/modctl.h> 46 #include <sys/atomic.h> 47 #include <sys/policy.h> 48 #include <sys/priv.h> 49 #include <sys/taskq.h> 50 51 #include <sys/systm.h> 52 #include <sys/param.h> 53 #include <sys/kmem.h> 54 #include <sys/sdt.h> 55 #include <sys/socket.h> 56 #include <sys/vtrace.h> 57 #include <sys/isa_defs.h> 58 #include <sys/mac.h> 59 #include <net/if.h> 60 #include <net/if_arp.h> 61 #include <net/route.h> 62 #include <sys/sockio.h> 63 #include <netinet/in.h> 64 #include <net/if_dl.h> 65 66 #include <inet/common.h> 67 #include <inet/mi.h> 68 #include <inet/mib2.h> 69 #include <inet/nd.h> 70 #include <inet/arp.h> 71 #include <inet/snmpcom.h> 72 #include <inet/optcom.h> 73 #include <inet/kstatcom.h> 74 75 #include <netinet/igmp_var.h> 76 #include <netinet/ip6.h> 77 #include <netinet/icmp6.h> 78 #include <netinet/sctp.h> 79 80 #include <inet/ip.h> 81 #include <inet/ip_impl.h> 82 #include <inet/ip6.h> 83 #include <inet/ip6_asp.h> 84 #include <inet/tcp.h> 85 #include <inet/tcp_impl.h> 86 #include <inet/ip_multi.h> 87 #include <inet/ip_if.h> 88 #include <inet/ip_ire.h> 89 #include <inet/ip_ftable.h> 90 #include <inet/ip_rts.h> 91 #include <inet/ip_ndp.h> 92 #include <inet/ip_listutils.h> 93 #include <netinet/igmp.h> 94 #include <netinet/ip_mroute.h> 95 #include <inet/ipp_common.h> 96 97 #include <net/pfkeyv2.h> 98 #include <inet/sadb.h> 99 #include <inet/ipsec_impl.h> 100 #include <inet/iptun/iptun_impl.h> 101 #include <inet/ipdrop.h> 102 #include <inet/ip_netinfo.h> 103 #include <inet/ilb_ip.h> 104 105 #include <sys/ethernet.h> 106 #include <net/if_types.h> 107 #include <sys/cpuvar.h> 108 109 #include <ipp/ipp.h> 110 #include <ipp/ipp_impl.h> 111 #include <ipp/ipgpc/ipgpc.h> 112 113 #include <sys/pattr.h> 114 #include <inet/ipclassifier.h> 115 #include <inet/sctp_ip.h> 116 #include <inet/sctp/sctp_impl.h> 117 #include <inet/udp_impl.h> 118 #include <inet/rawip_impl.h> 119 #include <inet/rts_impl.h> 120 121 #include <sys/tsol/label.h> 122 #include <sys/tsol/tnet.h> 123 124 #include <sys/squeue_impl.h> 125 #include <inet/ip_arp.h> 126 127 #include <sys/clock_impl.h> /* For LBOLT_FASTPATH{,64} */ 128 129 /* 130 * Values for squeue switch: 131 * IP_SQUEUE_ENTER_NODRAIN: SQ_NODRAIN 132 * IP_SQUEUE_ENTER: SQ_PROCESS 133 * IP_SQUEUE_FILL: SQ_FILL 134 */ 135 int ip_squeue_enter = IP_SQUEUE_ENTER; /* Setable in /etc/system */ 136 137 int ip_squeue_flag; 138 139 /* 140 * Setable in /etc/system 141 */ 142 int ip_poll_normal_ms = 100; 143 int ip_poll_normal_ticks = 0; 144 int ip_modclose_ackwait_ms = 3000; 145 146 /* 147 * It would be nice to have these present only in DEBUG systems, but the 148 * current design of the global symbol checking logic requires them to be 149 * unconditionally present. 150 */ 151 uint_t ip_thread_data; /* TSD key for debug support */ 152 krwlock_t ip_thread_rwlock; 153 list_t ip_thread_list; 154 155 /* 156 * Structure to represent a linked list of msgblks. Used by ip_snmp_ functions. 157 */ 158 159 struct listptr_s { 160 mblk_t *lp_head; /* pointer to the head of the list */ 161 mblk_t *lp_tail; /* pointer to the tail of the list */ 162 }; 163 164 typedef struct listptr_s listptr_t; 165 166 /* 167 * This is used by ip_snmp_get_mib2_ip_route_media and 168 * ip_snmp_get_mib2_ip6_route_media to carry the lists of return data. 169 */ 170 typedef struct iproutedata_s { 171 uint_t ird_idx; 172 uint_t ird_flags; /* see below */ 173 listptr_t ird_route; /* ipRouteEntryTable */ 174 listptr_t ird_netmedia; /* ipNetToMediaEntryTable */ 175 listptr_t ird_attrs; /* ipRouteAttributeTable */ 176 } iproutedata_t; 177 178 /* Include ire_testhidden and IRE_IF_CLONE routes */ 179 #define IRD_REPORT_ALL 0x01 180 181 /* 182 * Cluster specific hooks. These should be NULL when booted as a non-cluster 183 */ 184 185 /* 186 * Hook functions to enable cluster networking 187 * On non-clustered systems these vectors must always be NULL. 188 * 189 * Hook function to Check ip specified ip address is a shared ip address 190 * in the cluster 191 * 192 */ 193 int (*cl_inet_isclusterwide)(netstackid_t stack_id, uint8_t protocol, 194 sa_family_t addr_family, uint8_t *laddrp, void *args) = NULL; 195 196 /* 197 * Hook function to generate cluster wide ip fragment identifier 198 */ 199 uint32_t (*cl_inet_ipident)(netstackid_t stack_id, uint8_t protocol, 200 sa_family_t addr_family, uint8_t *laddrp, uint8_t *faddrp, 201 void *args) = NULL; 202 203 /* 204 * Hook function to generate cluster wide SPI. 205 */ 206 void (*cl_inet_getspi)(netstackid_t, uint8_t, uint8_t *, size_t, 207 void *) = NULL; 208 209 /* 210 * Hook function to verify if the SPI is already utlized. 211 */ 212 213 int (*cl_inet_checkspi)(netstackid_t, uint8_t, uint32_t, void *) = NULL; 214 215 /* 216 * Hook function to delete the SPI from the cluster wide repository. 217 */ 218 219 void (*cl_inet_deletespi)(netstackid_t, uint8_t, uint32_t, void *) = NULL; 220 221 /* 222 * Hook function to inform the cluster when packet received on an IDLE SA 223 */ 224 225 void (*cl_inet_idlesa)(netstackid_t, uint8_t, uint32_t, sa_family_t, 226 in6_addr_t, in6_addr_t, void *) = NULL; 227 228 /* 229 * Synchronization notes: 230 * 231 * IP is a fully D_MP STREAMS module/driver. Thus it does not depend on any 232 * MT level protection given by STREAMS. IP uses a combination of its own 233 * internal serialization mechanism and standard Solaris locking techniques. 234 * The internal serialization is per phyint. This is used to serialize 235 * plumbing operations, IPMP operations, most set ioctls, etc. 236 * 237 * Plumbing is a long sequence of operations involving message 238 * exchanges between IP, ARP and device drivers. Many set ioctls are typically 239 * involved in plumbing operations. A natural model is to serialize these 240 * ioctls one per ill. For example plumbing of hme0 and qfe0 can go on in 241 * parallel without any interference. But various set ioctls on hme0 are best 242 * serialized, along with IPMP operations and processing of DLPI control 243 * messages received from drivers on a per phyint basis. This serialization is 244 * provided by the ipsq_t and primitives operating on this. Details can 245 * be found in ip_if.c above the core primitives operating on ipsq_t. 246 * 247 * Lookups of an ipif or ill by a thread return a refheld ipif / ill. 248 * Simiarly lookup of an ire by a thread also returns a refheld ire. 249 * In addition ipif's and ill's referenced by the ire are also indirectly 250 * refheld. Thus no ipif or ill can vanish as long as an ipif is refheld 251 * directly or indirectly. For example an SIOCSLIFADDR ioctl that changes the 252 * address of an ipif has to go through the ipsq_t. This ensures that only 253 * one such exclusive operation proceeds at any time on the ipif. It then 254 * waits for all refcnts 255 * associated with this ipif to come down to zero. The address is changed 256 * only after the ipif has been quiesced. Then the ipif is brought up again. 257 * More details are described above the comment in ip_sioctl_flags. 258 * 259 * Packet processing is based mostly on IREs and are fully multi-threaded 260 * using standard Solaris MT techniques. 261 * 262 * There are explicit locks in IP to handle: 263 * - The ip_g_head list maintained by mi_open_link() and friends. 264 * 265 * - The reassembly data structures (one lock per hash bucket) 266 * 267 * - conn_lock is meant to protect conn_t fields. The fields actually 268 * protected by conn_lock are documented in the conn_t definition. 269 * 270 * - ire_lock to protect some of the fields of the ire, IRE tables 271 * (one lock per hash bucket). Refer to ip_ire.c for details. 272 * 273 * - ndp_g_lock and ncec_lock for protecting NCEs. 274 * 275 * - ill_lock protects fields of the ill and ipif. Details in ip.h 276 * 277 * - ill_g_lock: This is a global reader/writer lock. Protects the following 278 * * The AVL tree based global multi list of all ills. 279 * * The linked list of all ipifs of an ill 280 * * The <ipsq-xop> mapping 281 * * <ill-phyint> association 282 * Insertion/deletion of an ill in the system, insertion/deletion of an ipif 283 * into an ill, changing the <ipsq-xop> mapping of an ill, changing the 284 * <ill-phyint> assoc of an ill will all have to hold the ill_g_lock as 285 * writer for the actual duration of the insertion/deletion/change. 286 * 287 * - ill_lock: This is a per ill mutex. 288 * It protects some members of the ill_t struct; see ip.h for details. 289 * It also protects the <ill-phyint> assoc. 290 * It also protects the list of ipifs hanging off the ill. 291 * 292 * - ipsq_lock: This is a per ipsq_t mutex lock. 293 * This protects some members of the ipsq_t struct; see ip.h for details. 294 * It also protects the <ipsq-ipxop> mapping 295 * 296 * - ipx_lock: This is a per ipxop_t mutex lock. 297 * This protects some members of the ipxop_t struct; see ip.h for details. 298 * 299 * - phyint_lock: This is a per phyint mutex lock. Protects just the 300 * phyint_flags 301 * 302 * - ip_g_nd_lock: This is a global reader/writer lock. 303 * Any call to nd_load to load a new parameter to the ND table must hold the 304 * lock as writer. ND_GET/ND_SET routines that read the ND table hold the lock 305 * as reader. 306 * 307 * - ip_addr_avail_lock: This is used to ensure the uniqueness of IP addresses. 308 * This lock is held in ipif_up_done and the ipif is marked IPIF_UP and the 309 * uniqueness check also done atomically. 310 * 311 * - ill_g_usesrc_lock: This readers/writer lock protects the usesrc 312 * group list linked by ill_usesrc_grp_next. It also protects the 313 * ill_usesrc_ifindex field. It is taken as a writer when a member of the 314 * group is being added or deleted. This lock is taken as a reader when 315 * walking the list/group(eg: to get the number of members in a usesrc group). 316 * Note, it is only necessary to take this lock if the ill_usesrc_grp_next 317 * field is changing state i.e from NULL to non-NULL or vice-versa. For 318 * example, it is not necessary to take this lock in the initial portion 319 * of ip_sioctl_slifusesrc or at all in ip_sioctl_flags since these 320 * operations are executed exclusively and that ensures that the "usesrc 321 * group state" cannot change. The "usesrc group state" change can happen 322 * only in the latter part of ip_sioctl_slifusesrc and in ill_delete. 323 * 324 * Changing <ill-phyint>, <ipsq-xop> assocications: 325 * 326 * To change the <ill-phyint> association, the ill_g_lock must be held 327 * as writer, and the ill_locks of both the v4 and v6 instance of the ill 328 * must be held. 329 * 330 * To change the <ipsq-xop> association, the ill_g_lock must be held as 331 * writer, the ipsq_lock must be held, and one must be writer on the ipsq. 332 * This is only done when ills are added or removed from IPMP groups. 333 * 334 * To add or delete an ipif from the list of ipifs hanging off the ill, 335 * ill_g_lock (writer) and ill_lock must be held and the thread must be 336 * a writer on the associated ipsq. 337 * 338 * To add or delete an ill to the system, the ill_g_lock must be held as 339 * writer and the thread must be a writer on the associated ipsq. 340 * 341 * To add or delete an ilm to an ill, the ill_lock must be held and the thread 342 * must be a writer on the associated ipsq. 343 * 344 * Lock hierarchy 345 * 346 * Some lock hierarchy scenarios are listed below. 347 * 348 * ill_g_lock -> conn_lock -> ill_lock -> ipsq_lock -> ipx_lock 349 * ill_g_lock -> ill_lock(s) -> phyint_lock 350 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock 351 * ill_g_lock -> ip_addr_avail_lock 352 * conn_lock -> irb_lock -> ill_lock -> ire_lock 353 * ill_g_lock -> ip_g_nd_lock 354 * ill_g_lock -> ips_ipmp_lock -> ill_lock -> nce_lock 355 * ill_g_lock -> ndp_g_lock -> ill_lock -> ncec_lock -> nce_lock 356 * arl_lock -> ill_lock 357 * ips_ire_dep_lock -> irb_lock 358 * 359 * When more than 1 ill lock is needed to be held, all ill lock addresses 360 * are sorted on address and locked starting from highest addressed lock 361 * downward. 362 * 363 * Multicast scenarios 364 * ips_ill_g_lock -> ill_mcast_lock 365 * conn_ilg_lock -> ips_ill_g_lock -> ill_lock 366 * ill_mcast_serializer -> ill_mcast_lock -> ips_ipmp_lock -> ill_lock 367 * ill_mcast_serializer -> ill_mcast_lock -> connf_lock -> conn_lock 368 * ill_mcast_serializer -> ill_mcast_lock -> conn_ilg_lock 369 * ill_mcast_serializer -> ill_mcast_lock -> ips_igmp_timer_lock 370 * 371 * IPsec scenarios 372 * 373 * ipsa_lock -> ill_g_lock -> ill_lock 374 * ill_g_usesrc_lock -> ill_g_lock -> ill_lock 375 * 376 * Trusted Solaris scenarios 377 * 378 * igsa_lock -> gcgrp_rwlock -> gcgrp_lock 379 * igsa_lock -> gcdb_lock 380 * gcgrp_rwlock -> ire_lock 381 * gcgrp_rwlock -> gcdb_lock 382 * 383 * squeue(sq_lock), flow related (ft_lock, fe_lock) locking 384 * 385 * cpu_lock --> ill_lock --> sqset_lock --> sq_lock 386 * sq_lock -> conn_lock -> QLOCK(q) 387 * ill_lock -> ft_lock -> fe_lock 388 * 389 * Routing/forwarding table locking notes: 390 * 391 * Lock acquisition order: Radix tree lock, irb_lock. 392 * Requirements: 393 * i. Walker must not hold any locks during the walker callback. 394 * ii Walker must not see a truncated tree during the walk because of any node 395 * deletion. 396 * iii Existing code assumes ire_bucket is valid if it is non-null and is used 397 * in many places in the code to walk the irb list. Thus even if all the 398 * ires in a bucket have been deleted, we still can't free the radix node 399 * until the ires have actually been inactive'd (freed). 400 * 401 * Tree traversal - Need to hold the global tree lock in read mode. 402 * Before dropping the global tree lock, need to either increment the ire_refcnt 403 * to ensure that the radix node can't be deleted. 404 * 405 * Tree add - Need to hold the global tree lock in write mode to add a 406 * radix node. To prevent the node from being deleted, increment the 407 * irb_refcnt, after the node is added to the tree. The ire itself is 408 * added later while holding the irb_lock, but not the tree lock. 409 * 410 * Tree delete - Need to hold the global tree lock and irb_lock in write mode. 411 * All associated ires must be inactive (i.e. freed), and irb_refcnt 412 * must be zero. 413 * 414 * Walker - Increment irb_refcnt before calling the walker callback. Hold the 415 * global tree lock (read mode) for traversal. 416 * 417 * IRE dependencies - In some cases we hold ips_ire_dep_lock across ire_refrele 418 * hence we will acquire irb_lock while holding ips_ire_dep_lock. 419 * 420 * IPsec notes : 421 * 422 * IP interacts with the IPsec code (AH/ESP) by storing IPsec attributes 423 * in the ip_xmit_attr_t ip_recv_attr_t. For outbound datagrams, the 424 * ip_xmit_attr_t has the 425 * information used by the IPsec code for applying the right level of 426 * protection. The information initialized by IP in the ip_xmit_attr_t 427 * is determined by the per-socket policy or global policy in the system. 428 * For inbound datagrams, the ip_recv_attr_t 429 * starts out with nothing in it. It gets filled 430 * with the right information if it goes through the AH/ESP code, which 431 * happens if the incoming packet is secure. The information initialized 432 * by AH/ESP, is later used by IP (during fanouts to ULP) to see whether 433 * the policy requirements needed by per-socket policy or global policy 434 * is met or not. 435 * 436 * For fully connected sockets i.e dst, src [addr, port] is known, 437 * conn_policy_cached is set indicating that policy has been cached. 438 * conn_in_enforce_policy may or may not be set depending on whether 439 * there is a global policy match or per-socket policy match. 440 * Policy inheriting happpens in ip_policy_set once the destination is known. 441 * Once the right policy is set on the conn_t, policy cannot change for 442 * this socket. This makes life simpler for TCP (UDP ?) where 443 * re-transmissions go out with the same policy. For symmetry, policy 444 * is cached for fully connected UDP sockets also. Thus if policy is cached, 445 * it also implies that policy is latched i.e policy cannot change 446 * on these sockets. As we have the right policy on the conn, we don't 447 * have to lookup global policy for every outbound and inbound datagram 448 * and thus serving as an optimization. Note that a global policy change 449 * does not affect fully connected sockets if they have policy. If fully 450 * connected sockets did not have any policy associated with it, global 451 * policy change may affect them. 452 * 453 * IP Flow control notes: 454 * --------------------- 455 * Non-TCP streams are flow controlled by IP. The way this is accomplished 456 * differs when ILL_CAPAB_DLD_DIRECT is enabled for that IP instance. When 457 * ILL_DIRECT_CAPABLE(ill) is TRUE, IP can do direct function calls into 458 * GLDv3. Otherwise packets are sent down to lower layers using STREAMS 459 * functions. 460 * 461 * Per Tx ring udp flow control: 462 * This is applicable only when ILL_CAPAB_DLD_DIRECT capability is set in 463 * the ill (i.e. ILL_DIRECT_CAPABLE(ill) is true). 464 * 465 * The underlying link can expose multiple Tx rings to the GLDv3 mac layer. 466 * To achieve best performance, outgoing traffic need to be fanned out among 467 * these Tx ring. mac_tx() is called (via str_mdata_fastpath_put()) to send 468 * traffic out of the NIC and it takes a fanout hint. UDP connections pass 469 * the address of connp as fanout hint to mac_tx(). Under flow controlled 470 * condition, mac_tx() returns a non-NULL cookie (ip_mac_tx_cookie_t). This 471 * cookie points to a specific Tx ring that is blocked. The cookie is used to 472 * hash into an idl_tx_list[] entry in idl_tx_list[] array. Each idl_tx_list_t 473 * point to drain_lists (idl_t's). These drain list will store the blocked UDP 474 * connp's. The drain list is not a single list but a configurable number of 475 * lists. 476 * 477 * The diagram below shows idl_tx_list_t's and their drain_lists. ip_stack_t 478 * has an array of idl_tx_list_t. The size of the array is TX_FANOUT_SIZE 479 * which is equal to 128. This array in turn contains a pointer to idl_t[], 480 * the ip drain list. The idl_t[] array size is MIN(max_ncpus, 8). The drain 481 * list will point to the list of connp's that are flow controlled. 482 * 483 * --------------- ------- ------- ------- 484 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|--> 485 * | --------------- ------- ------- ------- 486 * | --------------- ------- ------- ------- 487 * |->|drain_list[1]|-->|connp|-->|connp|-->|connp|--> 488 * ---------------- | --------------- ------- ------- ------- 489 * |idl_tx_list[0]|->| --------------- ------- ------- ------- 490 * ---------------- |->|drain_list[2]|-->|connp|-->|connp|-->|connp|--> 491 * | --------------- ------- ------- ------- 492 * . . . . . 493 * | --------------- ------- ------- ------- 494 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|--> 495 * --------------- ------- ------- ------- 496 * --------------- ------- ------- ------- 497 * |->|drain_list[0]|-->|connp|-->|connp|-->|connp|--> 498 * | --------------- ------- ------- ------- 499 * | --------------- ------- ------- ------- 500 * ---------------- |->|drain_list[1]|-->|connp|-->|connp|-->|connp|--> 501 * |idl_tx_list[1]|->| --------------- ------- ------- ------- 502 * ---------------- | . . . . 503 * | --------------- ------- ------- ------- 504 * |->|drain_list[n]|-->|connp|-->|connp|-->|connp|--> 505 * --------------- ------- ------- ------- 506 * ..... 507 * ---------------- 508 * |idl_tx_list[n]|-> ... 509 * ---------------- 510 * 511 * When mac_tx() returns a cookie, the cookie is used to hash into a 512 * idl_tx_list in ips_idl_tx_list[] array. Then conn_drain_insert() is 513 * called passing idl_tx_list. The connp gets inserted in a drain list 514 * pointed to by idl_tx_list. conn_drain_list() asserts flow control for 515 * the sockets (non stream based) and sets QFULL condition on the conn_wq 516 * of streams sockets, or the su_txqfull for non-streams sockets. 517 * connp->conn_direct_blocked will be set to indicate the blocked 518 * condition. 519 * 520 * GLDv3 mac layer calls ill_flow_enable() when flow control is relieved. 521 * A cookie is passed in the call to ill_flow_enable() that identifies the 522 * blocked Tx ring. This cookie is used to get to the idl_tx_list that 523 * contains the blocked connp's. conn_walk_drain() uses the idl_tx_list_t 524 * and goes through each conn in the drain list and calls conn_idl_remove 525 * for the conn to clear the qfull condition for the conn, as well as to 526 * remove the conn from the idl list. In addition, streams based sockets 527 * will have the conn_wq enabled, causing ip_wsrv to run for the 528 * conn. ip_wsrv drains the queued messages, and removes the conn from the 529 * drain list, if all messages were drained. It also notifies the 530 * conn_upcalls for the conn to signal that flow-control has opened up. 531 * 532 * In reality the drain list is not a single list, but a configurable number 533 * of lists. conn_walk_drain() in the IP module, notifies the conn_upcalls for 534 * each conn in the list. conn_drain_insert and conn_drain_tail are the only 535 * functions that manipulate this drain list. conn_drain_insert is called in 536 * from the protocol layer when conn_ip_output returns EWOULDBLOCK. 537 * (as opposed to from ip_wsrv context for STREAMS 538 * case -- see below). The synchronization between drain insertion and flow 539 * control wakeup is handled by using idl_txl->txl_lock. 540 * 541 * Flow control using STREAMS: 542 * When ILL_DIRECT_CAPABLE() is not TRUE, STREAMS flow control mechanism 543 * is used. On the send side, if the packet cannot be sent down to the 544 * driver by IP, because of a canput failure, ip_xmit drops the packet 545 * and returns EWOULDBLOCK to the caller, who may then invoke 546 * ixa_check_drain_insert to insert the conn on the 0'th drain list. 547 * When ip_wsrv runs on the ill_wq because flow control has been relieved, the 548 * blocked conns in the * 0'th drain list is drained as with the 549 * non-STREAMS case. 550 * 551 * In both the STREAMS and non-STREAMS case, the sockfs upcall to set 552 * qfull is done when the conn is inserted into the drain list 553 * (conn_drain_insert()) and cleared when the conn is removed from the drain 554 * list (conn_idl_remove()). 555 * 556 * IPQOS notes: 557 * 558 * IPQoS Policies are applied to packets using IPPF (IP Policy framework) 559 * and IPQoS modules. IPPF includes hooks in IP at different control points 560 * (callout positions) which direct packets to IPQoS modules for policy 561 * processing. Policies, if present, are global. 562 * 563 * The callout positions are located in the following paths: 564 * o local_in (packets destined for this host) 565 * o local_out (packets orginating from this host ) 566 * o fwd_in (packets forwarded by this m/c - inbound) 567 * o fwd_out (packets forwarded by this m/c - outbound) 568 * Hooks at these callout points can be enabled/disabled using the ndd variable 569 * ip_policy_mask (a bit mask with the 4 LSB indicating the callout positions). 570 * By default all the callout positions are enabled. 571 * 572 * Outbound (local_out) 573 * Hooks are placed in ire_send_wire_v4 and ire_send_wire_v6. 574 * 575 * Inbound (local_in) 576 * Hooks are placed in ip_fanout_v4 and ip_fanout_v6. 577 * 578 * Forwarding (in and out) 579 * Hooks are placed in ire_recv_forward_v4/v6. 580 * 581 * IP Policy Framework processing (IPPF processing) 582 * Policy processing for a packet is initiated by ip_process, which ascertains 583 * that the classifier (ipgpc) is loaded and configured, failing which the 584 * packet resumes normal processing in IP. If the clasifier is present, the 585 * packet is acted upon by one or more IPQoS modules (action instances), per 586 * filters configured in ipgpc and resumes normal IP processing thereafter. 587 * An action instance can drop a packet in course of its processing. 588 * 589 * Zones notes: 590 * 591 * The partitioning rules for networking are as follows: 592 * 1) Packets coming from a zone must have a source address belonging to that 593 * zone. 594 * 2) Packets coming from a zone can only be sent on a physical interface on 595 * which the zone has an IP address. 596 * 3) Between two zones on the same machine, packet delivery is only allowed if 597 * there's a matching route for the destination and zone in the forwarding 598 * table. 599 * 4) The TCP and UDP port spaces are per-zone; that is, two processes in 600 * different zones can bind to the same port with the wildcard address 601 * (INADDR_ANY). 602 * 603 * The granularity of interface partitioning is at the logical interface level. 604 * Therefore, every zone has its own IP addresses, and incoming packets can be 605 * attributed to a zone unambiguously. A logical interface is placed into a zone 606 * using the SIOCSLIFZONE ioctl; this sets the ipif_zoneid field in the ipif_t 607 * structure. Rule (1) is implemented by modifying the source address selection 608 * algorithm so that the list of eligible addresses is filtered based on the 609 * sending process zone. 610 * 611 * The Internet Routing Entries (IREs) are either exclusive to a zone or shared 612 * across all zones, depending on their type. Here is the break-up: 613 * 614 * IRE type Shared/exclusive 615 * -------- ---------------- 616 * IRE_BROADCAST Exclusive 617 * IRE_DEFAULT (default routes) Shared (*) 618 * IRE_LOCAL Exclusive (x) 619 * IRE_LOOPBACK Exclusive 620 * IRE_PREFIX (net routes) Shared (*) 621 * IRE_IF_NORESOLVER (interface routes) Exclusive 622 * IRE_IF_RESOLVER (interface routes) Exclusive 623 * IRE_IF_CLONE (interface routes) Exclusive 624 * IRE_HOST (host routes) Shared (*) 625 * 626 * (*) A zone can only use a default or off-subnet route if the gateway is 627 * directly reachable from the zone, that is, if the gateway's address matches 628 * one of the zone's logical interfaces. 629 * 630 * (x) IRE_LOCAL are handled a bit differently. 631 * When ip_restrict_interzone_loopback is set (the default), 632 * ire_route_recursive restricts loopback using an IRE_LOCAL 633 * between zone to the case when L2 would have conceptually looped the packet 634 * back, i.e. the loopback which is required since neither Ethernet drivers 635 * nor Ethernet hardware loops them back. This is the case when the normal 636 * routes (ignoring IREs with different zoneids) would send out the packet on 637 * the same ill as the ill with which is IRE_LOCAL is associated. 638 * 639 * Multiple zones can share a common broadcast address; typically all zones 640 * share the 255.255.255.255 address. Incoming as well as locally originated 641 * broadcast packets must be dispatched to all the zones on the broadcast 642 * network. For directed broadcasts (e.g. 10.16.72.255) this is not trivial 643 * since some zones may not be on the 10.16.72/24 network. To handle this, each 644 * zone has its own set of IRE_BROADCAST entries; then, broadcast packets are 645 * sent to every zone that has an IRE_BROADCAST entry for the destination 646 * address on the input ill, see ip_input_broadcast(). 647 * 648 * Applications in different zones can join the same multicast group address. 649 * The same logic applies for multicast as for broadcast. ip_input_multicast 650 * dispatches packets to all zones that have members on the physical interface. 651 */ 652 653 /* 654 * Squeue Fanout flags: 655 * 0: No fanout. 656 * 1: Fanout across all squeues 657 */ 658 boolean_t ip_squeue_fanout = 0; 659 660 /* 661 * Maximum dups allowed per packet. 662 */ 663 uint_t ip_max_frag_dups = 10; 664 665 /* RFC 1122 Conformance */ 666 #define IP_FORWARD_DEFAULT IP_FORWARD_NEVER 667 668 #define ILL_MAX_NAMELEN LIFNAMSIZ 669 670 static int ip_open(queue_t *q, dev_t *devp, int flag, int sflag, 671 cred_t *credp, boolean_t isv6); 672 static mblk_t *ip_xmit_attach_llhdr(mblk_t *, nce_t *); 673 674 static boolean_t icmp_inbound_verify_v4(mblk_t *, icmph_t *, ip_recv_attr_t *); 675 static void icmp_inbound_too_big_v4(icmph_t *, ip_recv_attr_t *); 676 static void icmp_inbound_error_fanout_v4(mblk_t *, icmph_t *, 677 ip_recv_attr_t *); 678 static void icmp_options_update(ipha_t *); 679 static void icmp_param_problem(mblk_t *, uint8_t, ip_recv_attr_t *); 680 static void icmp_pkt(mblk_t *, void *, size_t, ip_recv_attr_t *); 681 static mblk_t *icmp_pkt_err_ok(mblk_t *, ip_recv_attr_t *); 682 static void icmp_redirect_v4(mblk_t *mp, ipha_t *, icmph_t *, 683 ip_recv_attr_t *); 684 static void icmp_send_redirect(mblk_t *, ipaddr_t, ip_recv_attr_t *); 685 static void icmp_send_reply_v4(mblk_t *, ipha_t *, icmph_t *, 686 ip_recv_attr_t *); 687 688 mblk_t *ip_dlpi_alloc(size_t, t_uscalar_t); 689 char *ip_dot_addr(ipaddr_t, char *); 690 mblk_t *ip_carve_mp(mblk_t **, ssize_t); 691 int ip_close(queue_t *, int); 692 static char *ip_dot_saddr(uchar_t *, char *); 693 static void ip_lrput(queue_t *, mblk_t *); 694 ipaddr_t ip_net_mask(ipaddr_t); 695 char *ip_nv_lookup(nv_t *, int); 696 static int ip_param_get(queue_t *, mblk_t *, caddr_t, cred_t *); 697 static int ip_param_generic_get(queue_t *, mblk_t *, caddr_t, cred_t *); 698 static boolean_t ip_param_register(IDP *ndp, ipparam_t *, size_t, 699 ipndp_t *, size_t); 700 static int ip_param_set(queue_t *, mblk_t *, char *, caddr_t, cred_t *); 701 void ip_rput(queue_t *, mblk_t *); 702 static void ip_rput_dlpi_writer(ipsq_t *dummy_sq, queue_t *q, mblk_t *mp, 703 void *dummy_arg); 704 int ip_snmp_get(queue_t *, mblk_t *, int); 705 static mblk_t *ip_snmp_get_mib2_ip(queue_t *, mblk_t *, 706 mib2_ipIfStatsEntry_t *, ip_stack_t *); 707 static mblk_t *ip_snmp_get_mib2_ip_traffic_stats(queue_t *, mblk_t *, 708 ip_stack_t *); 709 static mblk_t *ip_snmp_get_mib2_ip6(queue_t *, mblk_t *, ip_stack_t *); 710 static mblk_t *ip_snmp_get_mib2_icmp(queue_t *, mblk_t *, ip_stack_t *ipst); 711 static mblk_t *ip_snmp_get_mib2_icmp6(queue_t *, mblk_t *, ip_stack_t *ipst); 712 static mblk_t *ip_snmp_get_mib2_igmp(queue_t *, mblk_t *, ip_stack_t *ipst); 713 static mblk_t *ip_snmp_get_mib2_multi(queue_t *, mblk_t *, ip_stack_t *ipst); 714 static mblk_t *ip_snmp_get_mib2_ip_addr(queue_t *, mblk_t *, 715 ip_stack_t *ipst); 716 static mblk_t *ip_snmp_get_mib2_ip6_addr(queue_t *, mblk_t *, 717 ip_stack_t *ipst); 718 static mblk_t *ip_snmp_get_mib2_ip_group_src(queue_t *, mblk_t *, 719 ip_stack_t *ipst); 720 static mblk_t *ip_snmp_get_mib2_ip6_group_src(queue_t *, mblk_t *, 721 ip_stack_t *ipst); 722 static mblk_t *ip_snmp_get_mib2_ip_group_mem(queue_t *, mblk_t *, 723 ip_stack_t *ipst); 724 static mblk_t *ip_snmp_get_mib2_ip6_group_mem(queue_t *, mblk_t *, 725 ip_stack_t *ipst); 726 static mblk_t *ip_snmp_get_mib2_virt_multi(queue_t *, mblk_t *, 727 ip_stack_t *ipst); 728 static mblk_t *ip_snmp_get_mib2_multi_rtable(queue_t *, mblk_t *, 729 ip_stack_t *ipst); 730 static mblk_t *ip_snmp_get_mib2_ip_route_media(queue_t *, mblk_t *, int, 731 ip_stack_t *ipst); 732 static mblk_t *ip_snmp_get_mib2_ip6_route_media(queue_t *, mblk_t *, int, 733 ip_stack_t *ipst); 734 static void ip_snmp_get2_v4(ire_t *, iproutedata_t *); 735 static void ip_snmp_get2_v6_route(ire_t *, iproutedata_t *); 736 static int ip_snmp_get2_v4_media(ncec_t *, iproutedata_t *); 737 static int ip_snmp_get2_v6_media(ncec_t *, iproutedata_t *); 738 int ip_snmp_set(queue_t *, int, int, uchar_t *, int); 739 740 static mblk_t *ip_fragment_copyhdr(uchar_t *, int, int, ip_stack_t *, 741 mblk_t *); 742 743 static void conn_drain_init(ip_stack_t *); 744 static void conn_drain_fini(ip_stack_t *); 745 static void conn_drain_tail(conn_t *connp, boolean_t closing); 746 747 static void conn_walk_drain(ip_stack_t *, idl_tx_list_t *); 748 static void conn_walk_sctp(pfv_t, void *, zoneid_t, netstack_t *); 749 750 static void *ip_stack_init(netstackid_t stackid, netstack_t *ns); 751 static void ip_stack_shutdown(netstackid_t stackid, void *arg); 752 static void ip_stack_fini(netstackid_t stackid, void *arg); 753 754 static int ip_forward_set(queue_t *, mblk_t *, char *, caddr_t, cred_t *); 755 756 static int ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t, 757 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *), 758 ire_t *, conn_t *, boolean_t, const in6_addr_t *, mcast_record_t, 759 const in6_addr_t *); 760 761 static int ip_cgtp_filter_get(queue_t *, mblk_t *, caddr_t, cred_t *); 762 static int ip_cgtp_filter_set(queue_t *, mblk_t *, char *, 763 caddr_t, cred_t *); 764 static int ip_input_proc_set(queue_t *q, mblk_t *mp, char *value, 765 caddr_t cp, cred_t *cr); 766 static int ip_int_set(queue_t *, mblk_t *, char *, caddr_t, 767 cred_t *); 768 static int ip_squeue_switch(int); 769 770 static void *ip_kstat_init(netstackid_t, ip_stack_t *); 771 static void ip_kstat_fini(netstackid_t, kstat_t *); 772 static int ip_kstat_update(kstat_t *kp, int rw); 773 static void *icmp_kstat_init(netstackid_t); 774 static void icmp_kstat_fini(netstackid_t, kstat_t *); 775 static int icmp_kstat_update(kstat_t *kp, int rw); 776 static void *ip_kstat2_init(netstackid_t, ip_stat_t *); 777 static void ip_kstat2_fini(netstackid_t, kstat_t *); 778 779 static void ipobs_init(ip_stack_t *); 780 static void ipobs_fini(ip_stack_t *); 781 782 ipaddr_t ip_g_all_ones = IP_HOST_MASK; 783 784 /* How long, in seconds, we allow frags to hang around. */ 785 #define IP_FRAG_TIMEOUT 15 786 #define IPV6_FRAG_TIMEOUT 60 787 788 static long ip_rput_pullups; 789 int dohwcksum = 1; /* use h/w cksum if supported by the hardware */ 790 791 vmem_t *ip_minor_arena_sa; /* for minor nos. from INET_MIN_DEV+2 thru 2^^18-1 */ 792 vmem_t *ip_minor_arena_la; /* for minor nos. from 2^^18 thru 2^^32-1 */ 793 794 int ip_debug; 795 796 /* 797 * Multirouting/CGTP stuff 798 */ 799 int ip_cgtp_filter_rev = CGTP_FILTER_REV; /* CGTP hooks version */ 800 801 /* 802 * Named Dispatch Parameter Table. 803 * All of these are alterable, within the min/max values given, at run time. 804 */ 805 static ipparam_t lcl_param_arr[] = { 806 /* min max value name */ 807 { 0, 1, 0, "ip_respond_to_address_mask_broadcast"}, 808 { 0, 1, 1, "ip_respond_to_echo_broadcast"}, 809 { 0, 1, 1, "ip_respond_to_echo_multicast"}, 810 { 0, 1, 0, "ip_respond_to_timestamp"}, 811 { 0, 1, 0, "ip_respond_to_timestamp_broadcast"}, 812 { 0, 1, 1, "ip_send_redirects"}, 813 { 0, 1, 0, "ip_forward_directed_broadcasts"}, 814 { 0, 10, 0, "ip_mrtdebug"}, 815 { 1, 8, 3, "ip_ire_reclaim_fraction" }, 816 { 1, 8, 3, "ip_nce_reclaim_fraction" }, 817 { 1, 8, 3, "ip_dce_reclaim_fraction" }, 818 { 1, 255, 255, "ip_def_ttl" }, 819 { 0, 1, 0, "ip_forward_src_routed"}, 820 { 0, 256, 32, "ip_wroff_extra" }, 821 { 2, 999999999, 60*20, "ip_pathmtu_interval" }, /* In seconds */ 822 { 8, 65536, 64, "ip_icmp_return_data_bytes" }, 823 { 0, 1, 1, "ip_path_mtu_discovery" }, 824 { 68, 65535, 576, "ip_pmtu_min" }, 825 { 0, 1, 0, "ip_ignore_redirect" }, 826 { 0, 1, 0, "ip_arp_icmp_error" }, 827 { 1, 254, 1, "ip_broadcast_ttl" }, 828 { 0, 99999, 100, "ip_icmp_err_interval" }, 829 { 1, 99999, 10, "ip_icmp_err_burst" }, 830 { 0, 999999999, 1000000, "ip_reass_queue_bytes" }, 831 { 0, 1, 0, "ip_strict_dst_multihoming" }, 832 { 1, MAX_ADDRS_PER_IF, 256, "ip_addrs_per_if"}, 833 { 0, 1, 0, "ipsec_override_persocket_policy" }, 834 { 0, 1, 1, "icmp_accept_clear_messages" }, 835 { 0, 1, 1, "igmp_accept_clear_messages" }, 836 { 2, 999999999, ND_DELAY_FIRST_PROBE_TIME, 837 "ip_ndp_delay_first_probe_time"}, 838 { 1, 999999999, ND_MAX_UNICAST_SOLICIT, 839 "ip_ndp_max_unicast_solicit"}, 840 { 1, 255, IPV6_MAX_HOPS, "ip6_def_hops" }, 841 { 8, IPV6_MIN_MTU, IPV6_MIN_MTU, "ip6_icmp_return_data_bytes" }, 842 { 0, 1, 0, "ip6_forward_src_routed"}, 843 { 0, 1, 1, "ip6_respond_to_echo_multicast"}, 844 { 0, 1, 1, "ip6_send_redirects"}, 845 { 0, 1, 0, "ip6_ignore_redirect" }, 846 { 0, 1, 0, "ip6_strict_dst_multihoming" }, 847 848 { 0, 2, 2, "ip_src_check" }, 849 850 { 0, 999999, 1000, "ipsec_policy_log_interval" }, 851 852 { 0, 1, 1, "pim_accept_clear_messages" }, 853 { 1000, 20000, 2000, "ip_ndp_unsolicit_interval" }, 854 { 1, 20, 3, "ip_ndp_unsolicit_count" }, 855 { 0, 1, 1, "ip6_ignore_home_address_opt" }, 856 { 0, 15, 0, "ip_policy_mask" }, 857 { 0, 2, 2, "ip_ecmp_behavior" }, 858 { 0, 255, 1, "ip_multirt_ttl" }, 859 { 0, 3600, 60, "ip_ire_badcnt_lifetime" }, /* In seconds */ 860 { 0, 999999, 60*60*24, "ip_max_temp_idle" }, 861 { 0, 1000, 1, "ip_max_temp_defend" }, 862 /* 863 * when a conflict of an active address is detected, 864 * defend up to ip_max_defend times, within any 865 * ip_defend_interval span. 866 */ 867 { 0, 1000, 3, "ip_max_defend" }, 868 { 0, 999999, 30, "ip_defend_interval" }, 869 { 0, 3600000, 300000, "ip_dup_recovery" }, 870 { 0, 1, 1, "ip_restrict_interzone_loopback" }, 871 { 0, 1, 1, "ip_lso_outbound" }, 872 { IGMP_V1_ROUTER, IGMP_V3_ROUTER, IGMP_V3_ROUTER, "igmp_max_version" }, 873 { MLD_V1_ROUTER, MLD_V2_ROUTER, MLD_V2_ROUTER, "mld_max_version" }, 874 #ifdef DEBUG 875 { 0, 1, 0, "ip6_drop_inbound_icmpv6" }, 876 #else 877 { 0, 0, 0, "" }, 878 #endif 879 /* delay before sending first probe: */ 880 { 0, 20000, 1000, "arp_probe_delay" }, 881 { 0, 20000, 100, "arp_fastprobe_delay" }, 882 /* interval at which DAD probes are sent: */ 883 { 10, 20000, 1500, "arp_probe_interval" }, 884 { 10, 20000, 150, "arp_fastprobe_interval" }, 885 /* setting probe count to 0 will disable ARP probing for DAD. */ 886 { 0, 20, 3, "arp_probe_count" }, 887 { 0, 20, 3, "arp_fastprobe_count" }, 888 889 { 0, 3600000, 15000, "ipv4_dad_announce_interval"}, 890 { 0, 3600000, 15000, "ipv6_dad_announce_interval"}, 891 /* 892 * Rate limiting parameters for DAD defense used in 893 * ill_defend_rate_limit(): 894 * defend_rate : pkts/hour permitted 895 * defend_interval : time that can elapse before we send out a 896 * DAD defense. 897 * defend_period: denominator for defend_rate (in seconds). 898 */ 899 { 0, 3600000, 300000, "arp_defend_interval"}, 900 { 0, 20000, 100, "arp_defend_rate"}, 901 { 0, 3600000, 300000, "ndp_defend_interval"}, 902 { 0, 20000, 100, "ndp_defend_rate"}, 903 { 5, 86400, 3600, "arp_defend_period"}, 904 { 5, 86400, 3600, "ndp_defend_period"}, 905 { 0, 1, 1, "ipv4_icmp_return_pmtu" }, 906 { 0, 1, 1, "ipv6_icmp_return_pmtu" }, 907 /* 908 * publish count/interval values used to announce local addresses 909 * for IPv4, IPv6. 910 */ 911 { 1, 20, 5, "ip_arp_publish_count" }, 912 { 1000, 20000, 2000, "ip_arp_publish_interval" }, 913 }; 914 915 /* 916 * Extended NDP table 917 * The addresses for the first two are filled in to be ips_ip_g_forward 918 * and ips_ipv6_forward at init time. 919 */ 920 static ipndp_t lcl_ndp_arr[] = { 921 /* getf setf data name */ 922 #define IPNDP_IP_FORWARDING_OFFSET 0 923 { ip_param_generic_get, ip_forward_set, NULL, 924 "ip_forwarding" }, 925 #define IPNDP_IP6_FORWARDING_OFFSET 1 926 { ip_param_generic_get, ip_forward_set, NULL, 927 "ip6_forwarding" }, 928 { ip_param_generic_get, ip_input_proc_set, 929 (caddr_t)&ip_squeue_enter, "ip_squeue_enter" }, 930 { ip_param_generic_get, ip_int_set, 931 (caddr_t)&ip_squeue_fanout, "ip_squeue_fanout" }, 932 #define IPNDP_CGTP_FILTER_OFFSET 4 933 { ip_cgtp_filter_get, ip_cgtp_filter_set, NULL, 934 "ip_cgtp_filter" }, 935 { ip_param_generic_get, ip_int_set, (caddr_t)&ip_debug, 936 "ip_debug" }, 937 }; 938 939 /* 940 * Table of IP ioctls encoding the various properties of the ioctl and 941 * indexed based on the last byte of the ioctl command. Occasionally there 942 * is a clash, and there is more than 1 ioctl with the same last byte. 943 * In such a case 1 ioctl is encoded in the ndx table and the remaining 944 * ioctls are encoded in the misc table. An entry in the ndx table is 945 * retrieved by indexing on the last byte of the ioctl command and comparing 946 * the ioctl command with the value in the ndx table. In the event of a 947 * mismatch the misc table is then searched sequentially for the desired 948 * ioctl command. 949 * 950 * Entry: <command> <copyin_size> <flags> <cmd_type> <function> <restart_func> 951 */ 952 ip_ioctl_cmd_t ip_ndx_ioctl_table[] = { 953 /* 000 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 954 /* 001 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 955 /* 002 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 956 /* 003 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 957 /* 004 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 958 /* 005 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 959 /* 006 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 960 /* 007 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 961 /* 008 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 962 /* 009 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 963 964 /* 010 */ { SIOCADDRT, sizeof (struct rtentry), IPI_PRIV, 965 MISC_CMD, ip_siocaddrt, NULL }, 966 /* 011 */ { SIOCDELRT, sizeof (struct rtentry), IPI_PRIV, 967 MISC_CMD, ip_siocdelrt, NULL }, 968 969 /* 012 */ { SIOCSIFADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR, 970 IF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart }, 971 /* 013 */ { SIOCGIFADDR, sizeof (struct ifreq), IPI_GET_CMD, 972 IF_CMD, ip_sioctl_get_addr, NULL }, 973 974 /* 014 */ { SIOCSIFDSTADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR, 975 IF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart }, 976 /* 015 */ { SIOCGIFDSTADDR, sizeof (struct ifreq), 977 IPI_GET_CMD, IF_CMD, ip_sioctl_get_dstaddr, NULL }, 978 979 /* 016 */ { SIOCSIFFLAGS, sizeof (struct ifreq), 980 IPI_PRIV | IPI_WR, 981 IF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart }, 982 /* 017 */ { SIOCGIFFLAGS, sizeof (struct ifreq), 983 IPI_MODOK | IPI_GET_CMD, 984 IF_CMD, ip_sioctl_get_flags, NULL }, 985 986 /* 018 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 987 /* 019 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 988 989 /* copyin size cannot be coded for SIOCGIFCONF */ 990 /* 020 */ { O_SIOCGIFCONF, 0, IPI_GET_CMD, 991 MISC_CMD, ip_sioctl_get_ifconf, NULL }, 992 993 /* 021 */ { SIOCSIFMTU, sizeof (struct ifreq), IPI_PRIV | IPI_WR, 994 IF_CMD, ip_sioctl_mtu, NULL }, 995 /* 022 */ { SIOCGIFMTU, sizeof (struct ifreq), IPI_GET_CMD, 996 IF_CMD, ip_sioctl_get_mtu, NULL }, 997 /* 023 */ { SIOCGIFBRDADDR, sizeof (struct ifreq), 998 IPI_GET_CMD, IF_CMD, ip_sioctl_get_brdaddr, NULL }, 999 /* 024 */ { SIOCSIFBRDADDR, sizeof (struct ifreq), IPI_PRIV | IPI_WR, 1000 IF_CMD, ip_sioctl_brdaddr, NULL }, 1001 /* 025 */ { SIOCGIFNETMASK, sizeof (struct ifreq), 1002 IPI_GET_CMD, IF_CMD, ip_sioctl_get_netmask, NULL }, 1003 /* 026 */ { SIOCSIFNETMASK, sizeof (struct ifreq), IPI_PRIV | IPI_WR, 1004 IF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart }, 1005 /* 027 */ { SIOCGIFMETRIC, sizeof (struct ifreq), 1006 IPI_GET_CMD, IF_CMD, ip_sioctl_get_metric, NULL }, 1007 /* 028 */ { SIOCSIFMETRIC, sizeof (struct ifreq), IPI_PRIV, 1008 IF_CMD, ip_sioctl_metric, NULL }, 1009 /* 029 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1010 1011 /* See 166-168 below for extended SIOC*XARP ioctls */ 1012 /* 030 */ { SIOCSARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR, 1013 ARP_CMD, ip_sioctl_arp, NULL }, 1014 /* 031 */ { SIOCGARP, sizeof (struct arpreq), IPI_GET_CMD, 1015 ARP_CMD, ip_sioctl_arp, NULL }, 1016 /* 032 */ { SIOCDARP, sizeof (struct arpreq), IPI_PRIV | IPI_WR, 1017 ARP_CMD, ip_sioctl_arp, NULL }, 1018 1019 /* 033 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1020 /* 034 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1021 /* 035 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1022 /* 036 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1023 /* 037 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1024 /* 038 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1025 /* 039 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1026 /* 040 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1027 /* 041 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1028 /* 042 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1029 /* 043 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1030 /* 044 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1031 /* 045 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1032 /* 046 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1033 /* 047 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1034 /* 048 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1035 /* 049 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1036 /* 050 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1037 /* 051 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1038 /* 052 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1039 /* 053 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1040 1041 /* 054 */ { IF_UNITSEL, sizeof (int), IPI_PRIV | IPI_WR | IPI_MODOK, 1042 MISC_CMD, if_unitsel, if_unitsel_restart }, 1043 1044 /* 055 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1045 /* 056 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1046 /* 057 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1047 /* 058 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1048 /* 059 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1049 /* 060 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1050 /* 061 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1051 /* 062 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1052 /* 063 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1053 /* 064 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1054 /* 065 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1055 /* 066 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1056 /* 067 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1057 /* 068 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1058 /* 069 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1059 /* 070 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1060 /* 071 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1061 /* 072 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1062 1063 /* 073 */ { SIOCSIFNAME, sizeof (struct ifreq), 1064 IPI_PRIV | IPI_WR | IPI_MODOK, 1065 IF_CMD, ip_sioctl_sifname, NULL }, 1066 1067 /* 074 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1068 /* 075 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1069 /* 076 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1070 /* 077 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1071 /* 078 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1072 /* 079 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1073 /* 080 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1074 /* 081 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1075 /* 082 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1076 /* 083 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1077 /* 084 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1078 /* 085 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1079 /* 086 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1080 1081 /* 087 */ { SIOCGIFNUM, sizeof (int), IPI_GET_CMD, 1082 MISC_CMD, ip_sioctl_get_ifnum, NULL }, 1083 /* 088 */ { SIOCGIFMUXID, sizeof (struct ifreq), IPI_GET_CMD, 1084 IF_CMD, ip_sioctl_get_muxid, NULL }, 1085 /* 089 */ { SIOCSIFMUXID, sizeof (struct ifreq), 1086 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_muxid, NULL }, 1087 1088 /* Both if and lif variants share same func */ 1089 /* 090 */ { SIOCGIFINDEX, sizeof (struct ifreq), IPI_GET_CMD, 1090 IF_CMD, ip_sioctl_get_lifindex, NULL }, 1091 /* Both if and lif variants share same func */ 1092 /* 091 */ { SIOCSIFINDEX, sizeof (struct ifreq), 1093 IPI_PRIV | IPI_WR, IF_CMD, ip_sioctl_slifindex, NULL }, 1094 1095 /* copyin size cannot be coded for SIOCGIFCONF */ 1096 /* 092 */ { SIOCGIFCONF, 0, IPI_GET_CMD, 1097 MISC_CMD, ip_sioctl_get_ifconf, NULL }, 1098 /* 093 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1099 /* 094 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1100 /* 095 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1101 /* 096 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1102 /* 097 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1103 /* 098 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1104 /* 099 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1105 /* 100 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1106 /* 101 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1107 /* 102 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1108 /* 103 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1109 /* 104 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1110 /* 105 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1111 /* 106 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1112 /* 107 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1113 /* 108 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1114 /* 109 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1115 1116 /* 110 */ { SIOCLIFREMOVEIF, sizeof (struct lifreq), 1117 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_removeif, 1118 ip_sioctl_removeif_restart }, 1119 /* 111 */ { SIOCLIFADDIF, sizeof (struct lifreq), 1120 IPI_GET_CMD | IPI_PRIV | IPI_WR, 1121 LIF_CMD, ip_sioctl_addif, NULL }, 1122 #define SIOCLIFADDR_NDX 112 1123 /* 112 */ { SIOCSLIFADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1124 LIF_CMD, ip_sioctl_addr, ip_sioctl_addr_restart }, 1125 /* 113 */ { SIOCGLIFADDR, sizeof (struct lifreq), 1126 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_addr, NULL }, 1127 /* 114 */ { SIOCSLIFDSTADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1128 LIF_CMD, ip_sioctl_dstaddr, ip_sioctl_dstaddr_restart }, 1129 /* 115 */ { SIOCGLIFDSTADDR, sizeof (struct lifreq), 1130 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_dstaddr, NULL }, 1131 /* 116 */ { SIOCSLIFFLAGS, sizeof (struct lifreq), 1132 IPI_PRIV | IPI_WR, 1133 LIF_CMD, ip_sioctl_flags, ip_sioctl_flags_restart }, 1134 /* 117 */ { SIOCGLIFFLAGS, sizeof (struct lifreq), 1135 IPI_GET_CMD | IPI_MODOK, 1136 LIF_CMD, ip_sioctl_get_flags, NULL }, 1137 1138 /* 118 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1139 /* 119 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1140 1141 /* 120 */ { O_SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD, 1142 ip_sioctl_get_lifconf, NULL }, 1143 /* 121 */ { SIOCSLIFMTU, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1144 LIF_CMD, ip_sioctl_mtu, NULL }, 1145 /* 122 */ { SIOCGLIFMTU, sizeof (struct lifreq), IPI_GET_CMD, 1146 LIF_CMD, ip_sioctl_get_mtu, NULL }, 1147 /* 123 */ { SIOCGLIFBRDADDR, sizeof (struct lifreq), 1148 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_brdaddr, NULL }, 1149 /* 124 */ { SIOCSLIFBRDADDR, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1150 LIF_CMD, ip_sioctl_brdaddr, NULL }, 1151 /* 125 */ { SIOCGLIFNETMASK, sizeof (struct lifreq), 1152 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_netmask, NULL }, 1153 /* 126 */ { SIOCSLIFNETMASK, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1154 LIF_CMD, ip_sioctl_netmask, ip_sioctl_netmask_restart }, 1155 /* 127 */ { SIOCGLIFMETRIC, sizeof (struct lifreq), 1156 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_metric, NULL }, 1157 /* 128 */ { SIOCSLIFMETRIC, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1158 LIF_CMD, ip_sioctl_metric, NULL }, 1159 /* 129 */ { SIOCSLIFNAME, sizeof (struct lifreq), 1160 IPI_PRIV | IPI_WR | IPI_MODOK, 1161 LIF_CMD, ip_sioctl_slifname, 1162 ip_sioctl_slifname_restart }, 1163 1164 /* 130 */ { SIOCGLIFNUM, sizeof (struct lifnum), IPI_GET_CMD, 1165 MISC_CMD, ip_sioctl_get_lifnum, NULL }, 1166 /* 131 */ { SIOCGLIFMUXID, sizeof (struct lifreq), 1167 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_muxid, NULL }, 1168 /* 132 */ { SIOCSLIFMUXID, sizeof (struct lifreq), 1169 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_muxid, NULL }, 1170 /* 133 */ { SIOCGLIFINDEX, sizeof (struct lifreq), 1171 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifindex, 0 }, 1172 /* 134 */ { SIOCSLIFINDEX, sizeof (struct lifreq), 1173 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifindex, 0 }, 1174 /* 135 */ { SIOCSLIFTOKEN, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1175 LIF_CMD, ip_sioctl_token, NULL }, 1176 /* 136 */ { SIOCGLIFTOKEN, sizeof (struct lifreq), 1177 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_token, NULL }, 1178 /* 137 */ { SIOCSLIFSUBNET, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1179 LIF_CMD, ip_sioctl_subnet, ip_sioctl_subnet_restart }, 1180 /* 138 */ { SIOCGLIFSUBNET, sizeof (struct lifreq), 1181 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_subnet, NULL }, 1182 /* 139 */ { SIOCSLIFLNKINFO, sizeof (struct lifreq), IPI_PRIV | IPI_WR, 1183 LIF_CMD, ip_sioctl_lnkinfo, NULL }, 1184 1185 /* 140 */ { SIOCGLIFLNKINFO, sizeof (struct lifreq), 1186 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lnkinfo, NULL }, 1187 /* 141 */ { SIOCLIFDELND, sizeof (struct lifreq), IPI_PRIV, 1188 LIF_CMD, ip_siocdelndp_v6, NULL }, 1189 /* 142 */ { SIOCLIFGETND, sizeof (struct lifreq), IPI_GET_CMD, 1190 LIF_CMD, ip_siocqueryndp_v6, NULL }, 1191 /* 143 */ { SIOCLIFSETND, sizeof (struct lifreq), IPI_PRIV, 1192 LIF_CMD, ip_siocsetndp_v6, NULL }, 1193 /* 144 */ { SIOCTMYADDR, sizeof (struct sioc_addrreq), IPI_GET_CMD, 1194 MISC_CMD, ip_sioctl_tmyaddr, NULL }, 1195 /* 145 */ { SIOCTONLINK, sizeof (struct sioc_addrreq), IPI_GET_CMD, 1196 MISC_CMD, ip_sioctl_tonlink, NULL }, 1197 /* 146 */ { SIOCTMYSITE, sizeof (struct sioc_addrreq), 0, 1198 MISC_CMD, ip_sioctl_tmysite, NULL }, 1199 /* 147 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1200 /* 148 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1201 /* IPSECioctls handled in ip_sioctl_copyin_setup itself */ 1202 /* 149 */ { SIOCFIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL }, 1203 /* 150 */ { SIOCSIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL }, 1204 /* 151 */ { SIOCDIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL }, 1205 /* 152 */ { SIOCLIPSECONFIG, 0, IPI_PRIV, MISC_CMD, NULL, NULL }, 1206 1207 /* 153 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1208 1209 /* 154 */ { SIOCGLIFBINDING, sizeof (struct lifreq), IPI_GET_CMD, 1210 LIF_CMD, ip_sioctl_get_binding, NULL }, 1211 /* 155 */ { SIOCSLIFGROUPNAME, sizeof (struct lifreq), 1212 IPI_PRIV | IPI_WR, 1213 LIF_CMD, ip_sioctl_groupname, ip_sioctl_groupname }, 1214 /* 156 */ { SIOCGLIFGROUPNAME, sizeof (struct lifreq), 1215 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_groupname, NULL }, 1216 /* 157 */ { SIOCGLIFGROUPINFO, sizeof (lifgroupinfo_t), 1217 IPI_GET_CMD, MISC_CMD, ip_sioctl_groupinfo, NULL }, 1218 1219 /* Leave 158-160 unused; used to be SIOC*IFARP ioctls */ 1220 /* 158 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1221 /* 159 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1222 /* 160 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1223 1224 /* 161 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1225 1226 /* These are handled in ip_sioctl_copyin_setup itself */ 1227 /* 162 */ { SIOCGIP6ADDRPOLICY, 0, IPI_NULL_BCONT, 1228 MISC_CMD, NULL, NULL }, 1229 /* 163 */ { SIOCSIP6ADDRPOLICY, 0, IPI_PRIV | IPI_NULL_BCONT, 1230 MISC_CMD, NULL, NULL }, 1231 /* 164 */ { SIOCGDSTINFO, 0, IPI_GET_CMD, MISC_CMD, NULL, NULL }, 1232 1233 /* 165 */ { SIOCGLIFCONF, 0, IPI_GET_CMD, MISC_CMD, 1234 ip_sioctl_get_lifconf, NULL }, 1235 1236 /* 166 */ { SIOCSXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR, 1237 XARP_CMD, ip_sioctl_arp, NULL }, 1238 /* 167 */ { SIOCGXARP, sizeof (struct xarpreq), IPI_GET_CMD, 1239 XARP_CMD, ip_sioctl_arp, NULL }, 1240 /* 168 */ { SIOCDXARP, sizeof (struct xarpreq), IPI_PRIV | IPI_WR, 1241 XARP_CMD, ip_sioctl_arp, NULL }, 1242 1243 /* SIOCPOPSOCKFS is not handled by IP */ 1244 /* 169 */ { IPI_DONTCARE /* SIOCPOPSOCKFS */, 0, 0, 0, NULL, NULL }, 1245 1246 /* 170 */ { SIOCGLIFZONE, sizeof (struct lifreq), 1247 IPI_GET_CMD, LIF_CMD, ip_sioctl_get_lifzone, NULL }, 1248 /* 171 */ { SIOCSLIFZONE, sizeof (struct lifreq), 1249 IPI_PRIV | IPI_WR, LIF_CMD, ip_sioctl_slifzone, 1250 ip_sioctl_slifzone_restart }, 1251 /* 172-174 are SCTP ioctls and not handled by IP */ 1252 /* 172 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1253 /* 173 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1254 /* 174 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1255 /* 175 */ { SIOCGLIFUSESRC, sizeof (struct lifreq), 1256 IPI_GET_CMD, LIF_CMD, 1257 ip_sioctl_get_lifusesrc, 0 }, 1258 /* 176 */ { SIOCSLIFUSESRC, sizeof (struct lifreq), 1259 IPI_PRIV | IPI_WR, 1260 LIF_CMD, ip_sioctl_slifusesrc, 1261 NULL }, 1262 /* 177 */ { SIOCGLIFSRCOF, 0, IPI_GET_CMD, MISC_CMD, 1263 ip_sioctl_get_lifsrcof, NULL }, 1264 /* 178 */ { SIOCGMSFILTER, sizeof (struct group_filter), IPI_GET_CMD, 1265 MSFILT_CMD, ip_sioctl_msfilter, NULL }, 1266 /* 179 */ { SIOCSMSFILTER, sizeof (struct group_filter), 0, 1267 MSFILT_CMD, ip_sioctl_msfilter, NULL }, 1268 /* 180 */ { SIOCGIPMSFILTER, sizeof (struct ip_msfilter), IPI_GET_CMD, 1269 MSFILT_CMD, ip_sioctl_msfilter, NULL }, 1270 /* 181 */ { SIOCSIPMSFILTER, sizeof (struct ip_msfilter), 0, 1271 MSFILT_CMD, ip_sioctl_msfilter, NULL }, 1272 /* 182 */ { IPI_DONTCARE, 0, 0, 0, NULL, NULL }, 1273 /* SIOCSENABLESDP is handled by SDP */ 1274 /* 183 */ { IPI_DONTCARE /* SIOCSENABLESDP */, 0, 0, 0, NULL, NULL }, 1275 /* 184 */ { IPI_DONTCARE /* SIOCSQPTR */, 0, 0, 0, NULL, NULL }, 1276 /* 185 */ { IPI_DONTCARE /* SIOCGIFHWADDR */, 0, 0, 0, NULL, NULL }, 1277 /* 186 */ { IPI_DONTCARE /* SIOCGSTAMP */, 0, 0, 0, NULL, NULL }, 1278 /* 187 */ { SIOCILB, 0, IPI_PRIV | IPI_GET_CMD, MISC_CMD, 1279 ip_sioctl_ilb_cmd, NULL }, 1280 }; 1281 1282 int ip_ndx_ioctl_count = sizeof (ip_ndx_ioctl_table) / sizeof (ip_ioctl_cmd_t); 1283 1284 ip_ioctl_cmd_t ip_misc_ioctl_table[] = { 1285 { I_LINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL }, 1286 { I_UNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL }, 1287 { I_PLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL }, 1288 { I_PUNLINK, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL }, 1289 { ND_GET, 0, 0, 0, NULL, NULL }, 1290 { ND_SET, 0, IPI_PRIV | IPI_WR, 0, NULL, NULL }, 1291 { IP_IOCTL, 0, 0, 0, NULL, NULL }, 1292 { SIOCGETVIFCNT, sizeof (struct sioc_vif_req), IPI_GET_CMD, 1293 MISC_CMD, mrt_ioctl}, 1294 { SIOCGETSGCNT, sizeof (struct sioc_sg_req), IPI_GET_CMD, 1295 MISC_CMD, mrt_ioctl}, 1296 { SIOCGETLSGCNT, sizeof (struct sioc_lsg_req), IPI_GET_CMD, 1297 MISC_CMD, mrt_ioctl} 1298 }; 1299 1300 int ip_misc_ioctl_count = 1301 sizeof (ip_misc_ioctl_table) / sizeof (ip_ioctl_cmd_t); 1302 1303 int conn_drain_nthreads; /* Number of drainers reqd. */ 1304 /* Settable in /etc/system */ 1305 /* Defined in ip_ire.c */ 1306 extern uint32_t ip_ire_max_bucket_cnt, ip6_ire_max_bucket_cnt; 1307 extern uint32_t ip_ire_min_bucket_cnt, ip6_ire_min_bucket_cnt; 1308 extern uint32_t ip_ire_mem_ratio, ip_ire_cpu_ratio; 1309 1310 static nv_t ire_nv_arr[] = { 1311 { IRE_BROADCAST, "BROADCAST" }, 1312 { IRE_LOCAL, "LOCAL" }, 1313 { IRE_LOOPBACK, "LOOPBACK" }, 1314 { IRE_DEFAULT, "DEFAULT" }, 1315 { IRE_PREFIX, "PREFIX" }, 1316 { IRE_IF_NORESOLVER, "IF_NORESOL" }, 1317 { IRE_IF_RESOLVER, "IF_RESOLV" }, 1318 { IRE_IF_CLONE, "IF_CLONE" }, 1319 { IRE_HOST, "HOST" }, 1320 { IRE_MULTICAST, "MULTICAST" }, 1321 { IRE_NOROUTE, "NOROUTE" }, 1322 { 0 } 1323 }; 1324 1325 nv_t *ire_nv_tbl = ire_nv_arr; 1326 1327 /* Simple ICMP IP Header Template */ 1328 static ipha_t icmp_ipha = { 1329 IP_SIMPLE_HDR_VERSION, 0, 0, 0, 0, 0, IPPROTO_ICMP 1330 }; 1331 1332 struct module_info ip_mod_info = { 1333 IP_MOD_ID, IP_MOD_NAME, IP_MOD_MINPSZ, IP_MOD_MAXPSZ, IP_MOD_HIWAT, 1334 IP_MOD_LOWAT 1335 }; 1336 1337 /* 1338 * Duplicate static symbols within a module confuses mdb; so we avoid the 1339 * problem by making the symbols here distinct from those in udp.c. 1340 */ 1341 1342 /* 1343 * Entry points for IP as a device and as a module. 1344 * We have separate open functions for the /dev/ip and /dev/ip6 devices. 1345 */ 1346 static struct qinit iprinitv4 = { 1347 (pfi_t)ip_rput, NULL, ip_openv4, ip_close, NULL, 1348 &ip_mod_info 1349 }; 1350 1351 struct qinit iprinitv6 = { 1352 (pfi_t)ip_rput_v6, NULL, ip_openv6, ip_close, NULL, 1353 &ip_mod_info 1354 }; 1355 1356 static struct qinit ipwinit = { 1357 (pfi_t)ip_wput_nondata, (pfi_t)ip_wsrv, NULL, NULL, NULL, 1358 &ip_mod_info 1359 }; 1360 1361 static struct qinit iplrinit = { 1362 (pfi_t)ip_lrput, NULL, ip_openv4, ip_close, NULL, 1363 &ip_mod_info 1364 }; 1365 1366 static struct qinit iplwinit = { 1367 (pfi_t)ip_lwput, NULL, NULL, NULL, NULL, 1368 &ip_mod_info 1369 }; 1370 1371 /* For AF_INET aka /dev/ip */ 1372 struct streamtab ipinfov4 = { 1373 &iprinitv4, &ipwinit, &iplrinit, &iplwinit 1374 }; 1375 1376 /* For AF_INET6 aka /dev/ip6 */ 1377 struct streamtab ipinfov6 = { 1378 &iprinitv6, &ipwinit, &iplrinit, &iplwinit 1379 }; 1380 1381 #ifdef DEBUG 1382 boolean_t skip_sctp_cksum = B_FALSE; 1383 #endif 1384 1385 /* 1386 * Generate an ICMP fragmentation needed message. 1387 * When called from ip_output side a minimal ip_recv_attr_t needs to be 1388 * constructed by the caller. 1389 */ 1390 void 1391 icmp_frag_needed(mblk_t *mp, int mtu, ip_recv_attr_t *ira) 1392 { 1393 icmph_t icmph; 1394 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 1395 1396 mp = icmp_pkt_err_ok(mp, ira); 1397 if (mp == NULL) 1398 return; 1399 1400 bzero(&icmph, sizeof (icmph_t)); 1401 icmph.icmph_type = ICMP_DEST_UNREACHABLE; 1402 icmph.icmph_code = ICMP_FRAGMENTATION_NEEDED; 1403 icmph.icmph_du_mtu = htons((uint16_t)mtu); 1404 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutFragNeeded); 1405 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs); 1406 1407 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira); 1408 } 1409 1410 /* 1411 * icmp_inbound_v4 deals with ICMP messages that are handled by IP. 1412 * If the ICMP message is consumed by IP, i.e., it should not be delivered 1413 * to any IPPROTO_ICMP raw sockets, then it returns NULL. 1414 * Likewise, if the ICMP error is misformed (too short, etc), then it 1415 * returns NULL. The caller uses this to determine whether or not to send 1416 * to raw sockets. 1417 * 1418 * All error messages are passed to the matching transport stream. 1419 * 1420 * The following cases are handled by icmp_inbound: 1421 * 1) It needs to send a reply back and possibly delivering it 1422 * to the "interested" upper clients. 1423 * 2) Return the mblk so that the caller can pass it to the RAW socket clients. 1424 * 3) It needs to change some values in IP only. 1425 * 4) It needs to change some values in IP and upper layers e.g TCP 1426 * by delivering an error to the upper layers. 1427 * 1428 * We handle the above three cases in the context of IPsec in the 1429 * following way : 1430 * 1431 * 1) Send the reply back in the same way as the request came in. 1432 * If it came in encrypted, it goes out encrypted. If it came in 1433 * clear, it goes out in clear. Thus, this will prevent chosen 1434 * plain text attack. 1435 * 2) The client may or may not expect things to come in secure. 1436 * If it comes in secure, the policy constraints are checked 1437 * before delivering it to the upper layers. If it comes in 1438 * clear, ipsec_inbound_accept_clear will decide whether to 1439 * accept this in clear or not. In both the cases, if the returned 1440 * message (IP header + 8 bytes) that caused the icmp message has 1441 * AH/ESP headers, it is sent up to AH/ESP for validation before 1442 * sending up. If there are only 8 bytes of returned message, then 1443 * upper client will not be notified. 1444 * 3) Check with global policy to see whether it matches the constaints. 1445 * But this will be done only if icmp_accept_messages_in_clear is 1446 * zero. 1447 * 4) If we need to change both in IP and ULP, then the decision taken 1448 * while affecting the values in IP and while delivering up to TCP 1449 * should be the same. 1450 * 1451 * There are two cases. 1452 * 1453 * a) If we reject data at the IP layer (ipsec_check_global_policy() 1454 * failed), we will not deliver it to the ULP, even though they 1455 * are *willing* to accept in *clear*. This is fine as our global 1456 * disposition to icmp messages asks us reject the datagram. 1457 * 1458 * b) If we accept data at the IP layer (ipsec_check_global_policy() 1459 * succeeded or icmp_accept_messages_in_clear is 1), and not able 1460 * to deliver it to ULP (policy failed), it can lead to 1461 * consistency problems. The cases known at this time are 1462 * ICMP_DESTINATION_UNREACHABLE messages with following code 1463 * values : 1464 * 1465 * - ICMP_FRAGMENTATION_NEEDED : IP adapts to the new value 1466 * and Upper layer rejects. Then the communication will 1467 * come to a stop. This is solved by making similar decisions 1468 * at both levels. Currently, when we are unable to deliver 1469 * to the Upper Layer (due to policy failures) while IP has 1470 * adjusted dce_pmtu, the next outbound datagram would 1471 * generate a local ICMP_FRAGMENTATION_NEEDED message - which 1472 * will be with the right level of protection. Thus the right 1473 * value will be communicated even if we are not able to 1474 * communicate when we get from the wire initially. But this 1475 * assumes there would be at least one outbound datagram after 1476 * IP has adjusted its dce_pmtu value. To make things 1477 * simpler, we accept in clear after the validation of 1478 * AH/ESP headers. 1479 * 1480 * - Other ICMP ERRORS : We may not be able to deliver it to the 1481 * upper layer depending on the level of protection the upper 1482 * layer expects and the disposition in ipsec_inbound_accept_clear(). 1483 * ipsec_inbound_accept_clear() decides whether a given ICMP error 1484 * should be accepted in clear when the Upper layer expects secure. 1485 * Thus the communication may get aborted by some bad ICMP 1486 * packets. 1487 */ 1488 mblk_t * 1489 icmp_inbound_v4(mblk_t *mp, ip_recv_attr_t *ira) 1490 { 1491 icmph_t *icmph; 1492 ipha_t *ipha; /* Outer header */ 1493 int ip_hdr_length; /* Outer header length */ 1494 boolean_t interested; 1495 ipif_t *ipif; 1496 uint32_t ts; 1497 uint32_t *tsp; 1498 timestruc_t now; 1499 ill_t *ill = ira->ira_ill; 1500 ip_stack_t *ipst = ill->ill_ipst; 1501 zoneid_t zoneid = ira->ira_zoneid; 1502 int len_needed; 1503 mblk_t *mp_ret = NULL; 1504 1505 ipha = (ipha_t *)mp->b_rptr; 1506 1507 BUMP_MIB(&ipst->ips_icmp_mib, icmpInMsgs); 1508 1509 ip_hdr_length = ira->ira_ip_hdr_length; 1510 if ((mp->b_wptr - mp->b_rptr) < (ip_hdr_length + ICMPH_SIZE)) { 1511 if (ira->ira_pktlen < (ip_hdr_length + ICMPH_SIZE)) { 1512 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts); 1513 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill); 1514 freemsg(mp); 1515 return (NULL); 1516 } 1517 /* Last chance to get real. */ 1518 ipha = ip_pullup(mp, ip_hdr_length + ICMPH_SIZE, ira); 1519 if (ipha == NULL) { 1520 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors); 1521 freemsg(mp); 1522 return (NULL); 1523 } 1524 } 1525 1526 /* The IP header will always be a multiple of four bytes */ 1527 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1528 ip2dbg(("icmp_inbound_v4: type %d code %d\n", icmph->icmph_type, 1529 icmph->icmph_code)); 1530 1531 /* 1532 * We will set "interested" to "true" if we should pass a copy to 1533 * the transport or if we handle the packet locally. 1534 */ 1535 interested = B_FALSE; 1536 switch (icmph->icmph_type) { 1537 case ICMP_ECHO_REPLY: 1538 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchoReps); 1539 break; 1540 case ICMP_DEST_UNREACHABLE: 1541 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED) 1542 BUMP_MIB(&ipst->ips_icmp_mib, icmpInFragNeeded); 1543 interested = B_TRUE; /* Pass up to transport */ 1544 BUMP_MIB(&ipst->ips_icmp_mib, icmpInDestUnreachs); 1545 break; 1546 case ICMP_SOURCE_QUENCH: 1547 interested = B_TRUE; /* Pass up to transport */ 1548 BUMP_MIB(&ipst->ips_icmp_mib, icmpInSrcQuenchs); 1549 break; 1550 case ICMP_REDIRECT: 1551 if (!ipst->ips_ip_ignore_redirect) 1552 interested = B_TRUE; 1553 BUMP_MIB(&ipst->ips_icmp_mib, icmpInRedirects); 1554 break; 1555 case ICMP_ECHO_REQUEST: 1556 /* 1557 * Whether to respond to echo requests that come in as IP 1558 * broadcasts or as IP multicast is subject to debate 1559 * (what isn't?). We aim to please, you pick it. 1560 * Default is do it. 1561 */ 1562 if (ira->ira_flags & IRAF_MULTICAST) { 1563 /* multicast: respond based on tunable */ 1564 interested = ipst->ips_ip_g_resp_to_echo_mcast; 1565 } else if (ira->ira_flags & IRAF_BROADCAST) { 1566 /* broadcast: respond based on tunable */ 1567 interested = ipst->ips_ip_g_resp_to_echo_bcast; 1568 } else { 1569 /* unicast: always respond */ 1570 interested = B_TRUE; 1571 } 1572 BUMP_MIB(&ipst->ips_icmp_mib, icmpInEchos); 1573 if (!interested) { 1574 /* We never pass these to RAW sockets */ 1575 freemsg(mp); 1576 return (NULL); 1577 } 1578 1579 /* Check db_ref to make sure we can modify the packet. */ 1580 if (mp->b_datap->db_ref > 1) { 1581 mblk_t *mp1; 1582 1583 mp1 = copymsg(mp); 1584 freemsg(mp); 1585 if (!mp1) { 1586 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops); 1587 return (NULL); 1588 } 1589 mp = mp1; 1590 ipha = (ipha_t *)mp->b_rptr; 1591 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1592 } 1593 icmph->icmph_type = ICMP_ECHO_REPLY; 1594 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutEchoReps); 1595 icmp_send_reply_v4(mp, ipha, icmph, ira); 1596 return (NULL); 1597 1598 case ICMP_ROUTER_ADVERTISEMENT: 1599 case ICMP_ROUTER_SOLICITATION: 1600 break; 1601 case ICMP_TIME_EXCEEDED: 1602 interested = B_TRUE; /* Pass up to transport */ 1603 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimeExcds); 1604 break; 1605 case ICMP_PARAM_PROBLEM: 1606 interested = B_TRUE; /* Pass up to transport */ 1607 BUMP_MIB(&ipst->ips_icmp_mib, icmpInParmProbs); 1608 break; 1609 case ICMP_TIME_STAMP_REQUEST: 1610 /* Response to Time Stamp Requests is local policy. */ 1611 if (ipst->ips_ip_g_resp_to_timestamp) { 1612 if (ira->ira_flags & IRAF_MULTIBROADCAST) 1613 interested = 1614 ipst->ips_ip_g_resp_to_timestamp_bcast; 1615 else 1616 interested = B_TRUE; 1617 } 1618 if (!interested) { 1619 /* We never pass these to RAW sockets */ 1620 freemsg(mp); 1621 return (NULL); 1622 } 1623 1624 /* Make sure we have enough of the packet */ 1625 len_needed = ip_hdr_length + ICMPH_SIZE + 1626 3 * sizeof (uint32_t); 1627 1628 if (mp->b_wptr - mp->b_rptr < len_needed) { 1629 ipha = ip_pullup(mp, len_needed, ira); 1630 if (ipha == NULL) { 1631 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 1632 ip_drop_input("ipIfStatsInDiscards - ip_pullup", 1633 mp, ill); 1634 freemsg(mp); 1635 return (NULL); 1636 } 1637 /* Refresh following the pullup. */ 1638 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1639 } 1640 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestamps); 1641 /* Check db_ref to make sure we can modify the packet. */ 1642 if (mp->b_datap->db_ref > 1) { 1643 mblk_t *mp1; 1644 1645 mp1 = copymsg(mp); 1646 freemsg(mp); 1647 if (!mp1) { 1648 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops); 1649 return (NULL); 1650 } 1651 mp = mp1; 1652 ipha = (ipha_t *)mp->b_rptr; 1653 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1654 } 1655 icmph->icmph_type = ICMP_TIME_STAMP_REPLY; 1656 tsp = (uint32_t *)&icmph[1]; 1657 tsp++; /* Skip past 'originate time' */ 1658 /* Compute # of milliseconds since midnight */ 1659 gethrestime(&now); 1660 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 + 1661 now.tv_nsec / (NANOSEC / MILLISEC); 1662 *tsp++ = htonl(ts); /* Lay in 'receive time' */ 1663 *tsp++ = htonl(ts); /* Lay in 'send time' */ 1664 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimestampReps); 1665 icmp_send_reply_v4(mp, ipha, icmph, ira); 1666 return (NULL); 1667 1668 case ICMP_TIME_STAMP_REPLY: 1669 BUMP_MIB(&ipst->ips_icmp_mib, icmpInTimestampReps); 1670 break; 1671 case ICMP_INFO_REQUEST: 1672 /* Per RFC 1122 3.2.2.7, ignore this. */ 1673 case ICMP_INFO_REPLY: 1674 break; 1675 case ICMP_ADDRESS_MASK_REQUEST: 1676 if (ira->ira_flags & IRAF_MULTIBROADCAST) { 1677 interested = 1678 ipst->ips_ip_respond_to_address_mask_broadcast; 1679 } else { 1680 interested = B_TRUE; 1681 } 1682 if (!interested) { 1683 /* We never pass these to RAW sockets */ 1684 freemsg(mp); 1685 return (NULL); 1686 } 1687 len_needed = ip_hdr_length + ICMPH_SIZE + IP_ADDR_LEN; 1688 if (mp->b_wptr - mp->b_rptr < len_needed) { 1689 ipha = ip_pullup(mp, len_needed, ira); 1690 if (ipha == NULL) { 1691 BUMP_MIB(ill->ill_ip_mib, 1692 ipIfStatsInTruncatedPkts); 1693 ip_drop_input("ipIfStatsInTruncatedPkts", mp, 1694 ill); 1695 freemsg(mp); 1696 return (NULL); 1697 } 1698 /* Refresh following the pullup. */ 1699 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1700 } 1701 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMasks); 1702 /* Check db_ref to make sure we can modify the packet. */ 1703 if (mp->b_datap->db_ref > 1) { 1704 mblk_t *mp1; 1705 1706 mp1 = copymsg(mp); 1707 freemsg(mp); 1708 if (!mp1) { 1709 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops); 1710 return (NULL); 1711 } 1712 mp = mp1; 1713 ipha = (ipha_t *)mp->b_rptr; 1714 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1715 } 1716 /* 1717 * Need the ipif with the mask be the same as the source 1718 * address of the mask reply. For unicast we have a specific 1719 * ipif. For multicast/broadcast we only handle onlink 1720 * senders, and use the source address to pick an ipif. 1721 */ 1722 ipif = ipif_lookup_addr(ipha->ipha_dst, ill, zoneid, ipst); 1723 if (ipif == NULL) { 1724 /* Broadcast or multicast */ 1725 ipif = ipif_lookup_remote(ill, ipha->ipha_src, zoneid); 1726 if (ipif == NULL) { 1727 freemsg(mp); 1728 return (NULL); 1729 } 1730 } 1731 icmph->icmph_type = ICMP_ADDRESS_MASK_REPLY; 1732 bcopy(&ipif->ipif_net_mask, &icmph[1], IP_ADDR_LEN); 1733 ipif_refrele(ipif); 1734 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutAddrMaskReps); 1735 icmp_send_reply_v4(mp, ipha, icmph, ira); 1736 return (NULL); 1737 1738 case ICMP_ADDRESS_MASK_REPLY: 1739 BUMP_MIB(&ipst->ips_icmp_mib, icmpInAddrMaskReps); 1740 break; 1741 default: 1742 interested = B_TRUE; /* Pass up to transport */ 1743 BUMP_MIB(&ipst->ips_icmp_mib, icmpInUnknowns); 1744 break; 1745 } 1746 /* 1747 * See if there is an ICMP client to avoid an extra copymsg/freemsg 1748 * if there isn't one. 1749 */ 1750 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_ICMP].connf_head != NULL) { 1751 /* If there is an ICMP client and we want one too, copy it. */ 1752 1753 if (!interested) { 1754 /* Caller will deliver to RAW sockets */ 1755 return (mp); 1756 } 1757 mp_ret = copymsg(mp); 1758 if (mp_ret == NULL) { 1759 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 1760 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill); 1761 } 1762 } else if (!interested) { 1763 /* Neither we nor raw sockets are interested. Drop packet now */ 1764 freemsg(mp); 1765 return (NULL); 1766 } 1767 1768 /* 1769 * ICMP error or redirect packet. Make sure we have enough of 1770 * the header and that db_ref == 1 since we might end up modifying 1771 * the packet. 1772 */ 1773 if (mp->b_cont != NULL) { 1774 if (ip_pullup(mp, -1, ira) == NULL) { 1775 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 1776 ip_drop_input("ipIfStatsInDiscards - ip_pullup", 1777 mp, ill); 1778 freemsg(mp); 1779 return (mp_ret); 1780 } 1781 } 1782 1783 if (mp->b_datap->db_ref > 1) { 1784 mblk_t *mp1; 1785 1786 mp1 = copymsg(mp); 1787 if (mp1 == NULL) { 1788 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 1789 ip_drop_input("ipIfStatsInDiscards - copymsg", mp, ill); 1790 freemsg(mp); 1791 return (mp_ret); 1792 } 1793 freemsg(mp); 1794 mp = mp1; 1795 } 1796 1797 /* 1798 * In case mp has changed, verify the message before any further 1799 * processes. 1800 */ 1801 ipha = (ipha_t *)mp->b_rptr; 1802 icmph = (icmph_t *)&mp->b_rptr[ip_hdr_length]; 1803 if (!icmp_inbound_verify_v4(mp, icmph, ira)) { 1804 freemsg(mp); 1805 return (mp_ret); 1806 } 1807 1808 switch (icmph->icmph_type) { 1809 case ICMP_REDIRECT: 1810 icmp_redirect_v4(mp, ipha, icmph, ira); 1811 break; 1812 case ICMP_DEST_UNREACHABLE: 1813 if (icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED) { 1814 /* Update DCE and adjust MTU is icmp header if needed */ 1815 icmp_inbound_too_big_v4(icmph, ira); 1816 } 1817 /* FALLTHRU */ 1818 default: 1819 icmp_inbound_error_fanout_v4(mp, icmph, ira); 1820 break; 1821 } 1822 return (mp_ret); 1823 } 1824 1825 /* 1826 * Send an ICMP echo, timestamp or address mask reply. 1827 * The caller has already updated the payload part of the packet. 1828 * We handle the ICMP checksum, IP source address selection and feed 1829 * the packet into ip_output_simple. 1830 */ 1831 static void 1832 icmp_send_reply_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph, 1833 ip_recv_attr_t *ira) 1834 { 1835 uint_t ip_hdr_length = ira->ira_ip_hdr_length; 1836 ill_t *ill = ira->ira_ill; 1837 ip_stack_t *ipst = ill->ill_ipst; 1838 ip_xmit_attr_t ixas; 1839 1840 /* Send out an ICMP packet */ 1841 icmph->icmph_checksum = 0; 1842 icmph->icmph_checksum = IP_CSUM(mp, ip_hdr_length, 0); 1843 /* Reset time to live. */ 1844 ipha->ipha_ttl = ipst->ips_ip_def_ttl; 1845 { 1846 /* Swap source and destination addresses */ 1847 ipaddr_t tmp; 1848 1849 tmp = ipha->ipha_src; 1850 ipha->ipha_src = ipha->ipha_dst; 1851 ipha->ipha_dst = tmp; 1852 } 1853 ipha->ipha_ident = 0; 1854 if (!IS_SIMPLE_IPH(ipha)) 1855 icmp_options_update(ipha); 1856 1857 bzero(&ixas, sizeof (ixas)); 1858 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4; 1859 ixas.ixa_zoneid = ira->ira_zoneid; 1860 ixas.ixa_cred = kcred; 1861 ixas.ixa_cpid = NOPID; 1862 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */ 1863 ixas.ixa_ifindex = 0; 1864 ixas.ixa_ipst = ipst; 1865 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 1866 1867 if (!(ira->ira_flags & IRAF_IPSEC_SECURE)) { 1868 /* 1869 * This packet should go out the same way as it 1870 * came in i.e in clear, independent of the IPsec policy 1871 * for transmitting packets. 1872 */ 1873 ixas.ixa_flags |= IXAF_NO_IPSEC; 1874 } else { 1875 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) { 1876 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 1877 /* Note: mp already consumed and ip_drop_packet done */ 1878 return; 1879 } 1880 } 1881 if (ira->ira_flags & IRAF_MULTIBROADCAST) { 1882 /* 1883 * Not one or our addresses (IRE_LOCALs), thus we let 1884 * ip_output_simple pick the source. 1885 */ 1886 ipha->ipha_src = INADDR_ANY; 1887 ixas.ixa_flags |= IXAF_SET_SOURCE; 1888 } 1889 /* Should we send with DF and use dce_pmtu? */ 1890 if (ipst->ips_ipv4_icmp_return_pmtu) { 1891 ixas.ixa_flags |= IXAF_PMTU_DISCOVERY; 1892 ipha->ipha_fragment_offset_and_flags |= IPH_DF_HTONS; 1893 } 1894 1895 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs); 1896 1897 (void) ip_output_simple(mp, &ixas); 1898 ixa_cleanup(&ixas); 1899 } 1900 1901 /* 1902 * Verify the ICMP messages for either for ICMP error or redirect packet. 1903 * The caller should have fully pulled up the message. If it's a redirect 1904 * packet, only basic checks on IP header will be done; otherwise, verify 1905 * the packet by looking at the included ULP header. 1906 * 1907 * Called before icmp_inbound_error_fanout_v4 is called. 1908 */ 1909 static boolean_t 1910 icmp_inbound_verify_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira) 1911 { 1912 ill_t *ill = ira->ira_ill; 1913 int hdr_length; 1914 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 1915 conn_t *connp; 1916 ipha_t *ipha; /* Inner IP header */ 1917 1918 ipha = (ipha_t *)&icmph[1]; 1919 if ((uchar_t *)ipha + IP_SIMPLE_HDR_LENGTH > mp->b_wptr) 1920 goto truncated; 1921 1922 hdr_length = IPH_HDR_LENGTH(ipha); 1923 1924 if ((IPH_HDR_VERSION(ipha) != IPV4_VERSION)) 1925 goto discard_pkt; 1926 1927 if (hdr_length < sizeof (ipha_t)) 1928 goto truncated; 1929 1930 if ((uchar_t *)ipha + hdr_length > mp->b_wptr) 1931 goto truncated; 1932 1933 /* 1934 * Stop here for ICMP_REDIRECT. 1935 */ 1936 if (icmph->icmph_type == ICMP_REDIRECT) 1937 return (B_TRUE); 1938 1939 /* 1940 * ICMP errors only. 1941 */ 1942 switch (ipha->ipha_protocol) { 1943 case IPPROTO_UDP: 1944 /* 1945 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of 1946 * transport header. 1947 */ 1948 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN > 1949 mp->b_wptr) 1950 goto truncated; 1951 break; 1952 case IPPROTO_TCP: { 1953 tcpha_t *tcpha; 1954 1955 /* 1956 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of 1957 * transport header. 1958 */ 1959 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN > 1960 mp->b_wptr) 1961 goto truncated; 1962 1963 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length); 1964 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN, 1965 ipst); 1966 if (connp == NULL) 1967 goto discard_pkt; 1968 1969 if ((connp->conn_verifyicmp != NULL) && 1970 !connp->conn_verifyicmp(connp, tcpha, icmph, NULL, ira)) { 1971 CONN_DEC_REF(connp); 1972 goto discard_pkt; 1973 } 1974 CONN_DEC_REF(connp); 1975 break; 1976 } 1977 case IPPROTO_SCTP: 1978 /* 1979 * Verify we have at least ICMP_MIN_TP_HDR_LEN bytes of 1980 * transport header. 1981 */ 1982 if ((uchar_t *)ipha + hdr_length + ICMP_MIN_TP_HDR_LEN > 1983 mp->b_wptr) 1984 goto truncated; 1985 break; 1986 case IPPROTO_ESP: 1987 case IPPROTO_AH: 1988 break; 1989 case IPPROTO_ENCAP: 1990 if ((uchar_t *)ipha + hdr_length + sizeof (ipha_t) > 1991 mp->b_wptr) 1992 goto truncated; 1993 break; 1994 default: 1995 break; 1996 } 1997 1998 return (B_TRUE); 1999 2000 discard_pkt: 2001 /* Bogus ICMP error. */ 2002 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 2003 return (B_FALSE); 2004 2005 truncated: 2006 /* We pulled up everthing already. Must be truncated */ 2007 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts); 2008 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill); 2009 return (B_FALSE); 2010 } 2011 2012 /* Table from RFC 1191 */ 2013 static int icmp_frag_size_table[] = 2014 { 32000, 17914, 8166, 4352, 2002, 1496, 1006, 508, 296, 68 }; 2015 2016 /* 2017 * Process received ICMP Packet too big. 2018 * Just handles the DCE create/update, including using the above table of 2019 * PMTU guesses. The caller is responsible for validating the packet before 2020 * passing it in and also to fanout the ICMP error to any matching transport 2021 * conns. Assumes the message has been fully pulled up and verified. 2022 * 2023 * Before getting here, the caller has called icmp_inbound_verify_v4() 2024 * that should have verified with ULP to prevent undoing the changes we're 2025 * going to make to DCE. For example, TCP might have verified that the packet 2026 * which generated error is in the send window. 2027 * 2028 * In some cases modified this MTU in the ICMP header packet; the caller 2029 * should pass to the matching ULP after this returns. 2030 */ 2031 static void 2032 icmp_inbound_too_big_v4(icmph_t *icmph, ip_recv_attr_t *ira) 2033 { 2034 dce_t *dce; 2035 int old_mtu; 2036 int mtu, orig_mtu; 2037 ipaddr_t dst; 2038 boolean_t disable_pmtud; 2039 ill_t *ill = ira->ira_ill; 2040 ip_stack_t *ipst = ill->ill_ipst; 2041 uint_t hdr_length; 2042 ipha_t *ipha; 2043 2044 /* Caller already pulled up everything. */ 2045 ipha = (ipha_t *)&icmph[1]; 2046 ASSERT(icmph->icmph_type == ICMP_DEST_UNREACHABLE && 2047 icmph->icmph_code == ICMP_FRAGMENTATION_NEEDED); 2048 ASSERT(ill != NULL); 2049 2050 hdr_length = IPH_HDR_LENGTH(ipha); 2051 2052 /* 2053 * We handle path MTU for source routed packets since the DCE 2054 * is looked up using the final destination. 2055 */ 2056 dst = ip_get_dst(ipha); 2057 2058 dce = dce_lookup_and_add_v4(dst, ipst); 2059 if (dce == NULL) { 2060 /* Couldn't add a unique one - ENOMEM */ 2061 ip1dbg(("icmp_inbound_too_big_v4: no dce for 0x%x\n", 2062 ntohl(dst))); 2063 return; 2064 } 2065 2066 /* Check for MTU discovery advice as described in RFC 1191 */ 2067 mtu = ntohs(icmph->icmph_du_mtu); 2068 orig_mtu = mtu; 2069 disable_pmtud = B_FALSE; 2070 2071 mutex_enter(&dce->dce_lock); 2072 if (dce->dce_flags & DCEF_PMTU) 2073 old_mtu = dce->dce_pmtu; 2074 else 2075 old_mtu = ill->ill_mtu; 2076 2077 if (icmph->icmph_du_zero != 0 || mtu < ipst->ips_ip_pmtu_min) { 2078 uint32_t length; 2079 int i; 2080 2081 /* 2082 * Use the table from RFC 1191 to figure out 2083 * the next "plateau" based on the length in 2084 * the original IP packet. 2085 */ 2086 length = ntohs(ipha->ipha_length); 2087 DTRACE_PROBE2(ip4__pmtu__guess, dce_t *, dce, 2088 uint32_t, length); 2089 if (old_mtu <= length && 2090 old_mtu >= length - hdr_length) { 2091 /* 2092 * Handle broken BSD 4.2 systems that 2093 * return the wrong ipha_length in ICMP 2094 * errors. 2095 */ 2096 ip1dbg(("Wrong mtu: sent %d, dce %d\n", 2097 length, old_mtu)); 2098 length -= hdr_length; 2099 } 2100 for (i = 0; i < A_CNT(icmp_frag_size_table); i++) { 2101 if (length > icmp_frag_size_table[i]) 2102 break; 2103 } 2104 if (i == A_CNT(icmp_frag_size_table)) { 2105 /* Smaller than IP_MIN_MTU! */ 2106 ip1dbg(("Too big for packet size %d\n", 2107 length)); 2108 disable_pmtud = B_TRUE; 2109 mtu = ipst->ips_ip_pmtu_min; 2110 } else { 2111 mtu = icmp_frag_size_table[i]; 2112 ip1dbg(("Calculated mtu %d, packet size %d, " 2113 "before %d\n", mtu, length, old_mtu)); 2114 if (mtu < ipst->ips_ip_pmtu_min) { 2115 mtu = ipst->ips_ip_pmtu_min; 2116 disable_pmtud = B_TRUE; 2117 } 2118 } 2119 } 2120 if (disable_pmtud) 2121 dce->dce_flags |= DCEF_TOO_SMALL_PMTU; 2122 else 2123 dce->dce_flags &= ~DCEF_TOO_SMALL_PMTU; 2124 2125 dce->dce_pmtu = MIN(old_mtu, mtu); 2126 /* Prepare to send the new max frag size for the ULP. */ 2127 icmph->icmph_du_zero = 0; 2128 icmph->icmph_du_mtu = htons((uint16_t)dce->dce_pmtu); 2129 DTRACE_PROBE4(ip4__pmtu__change, icmph_t *, icmph, dce_t *, 2130 dce, int, orig_mtu, int, mtu); 2131 2132 /* We now have a PMTU for sure */ 2133 dce->dce_flags |= DCEF_PMTU; 2134 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64()); 2135 mutex_exit(&dce->dce_lock); 2136 /* 2137 * After dropping the lock the new value is visible to everyone. 2138 * Then we bump the generation number so any cached values reinspect 2139 * the dce_t. 2140 */ 2141 dce_increment_generation(dce); 2142 dce_refrele(dce); 2143 } 2144 2145 /* 2146 * If the packet in error is Self-Encapsulated, icmp_inbound_error_fanout_v4 2147 * calls this function. 2148 */ 2149 static mblk_t * 2150 icmp_inbound_self_encap_error_v4(mblk_t *mp, ipha_t *ipha, ipha_t *in_ipha) 2151 { 2152 int length; 2153 2154 ASSERT(mp->b_datap->db_type == M_DATA); 2155 2156 /* icmp_inbound_v4 has already pulled up the whole error packet */ 2157 ASSERT(mp->b_cont == NULL); 2158 2159 /* 2160 * The length that we want to overlay is the inner header 2161 * and what follows it. 2162 */ 2163 length = msgdsize(mp) - ((uchar_t *)in_ipha - mp->b_rptr); 2164 2165 /* 2166 * Overlay the inner header and whatever follows it over the 2167 * outer header. 2168 */ 2169 bcopy((uchar_t *)in_ipha, (uchar_t *)ipha, length); 2170 2171 /* Adjust for what we removed */ 2172 mp->b_wptr -= (uchar_t *)in_ipha - (uchar_t *)ipha; 2173 return (mp); 2174 } 2175 2176 /* 2177 * Try to pass the ICMP message upstream in case the ULP cares. 2178 * 2179 * If the packet that caused the ICMP error is secure, we send 2180 * it to AH/ESP to make sure that the attached packet has a 2181 * valid association. ipha in the code below points to the 2182 * IP header of the packet that caused the error. 2183 * 2184 * For IPsec cases, we let the next-layer-up (which has access to 2185 * cached policy on the conn_t, or can query the SPD directly) 2186 * subtract out any IPsec overhead if they must. We therefore make no 2187 * adjustments here for IPsec overhead. 2188 * 2189 * IFN could have been generated locally or by some router. 2190 * 2191 * LOCAL : ire_send_wire (before calling ipsec_out_process) can call 2192 * icmp_frag_needed/icmp_pkt2big_v6 to generated a local IFN. 2193 * This happens because IP adjusted its value of MTU on an 2194 * earlier IFN message and could not tell the upper layer, 2195 * the new adjusted value of MTU e.g. Packet was encrypted 2196 * or there was not enough information to fanout to upper 2197 * layers. Thus on the next outbound datagram, ire_send_wire 2198 * generates the IFN, where IPsec processing has *not* been 2199 * done. 2200 * 2201 * Note that we retain ixa_fragsize across IPsec thus once 2202 * we have picking ixa_fragsize and entered ipsec_out_process we do 2203 * no change the fragsize even if the path MTU changes before 2204 * we reach ip_output_post_ipsec. 2205 * 2206 * In the local case, IRAF_LOOPBACK will be set indicating 2207 * that IFN was generated locally. 2208 * 2209 * ROUTER : IFN could be secure or non-secure. 2210 * 2211 * * SECURE : We use the IPSEC_IN to fanout to AH/ESP if the 2212 * packet in error has AH/ESP headers to validate the AH/ESP 2213 * headers. AH/ESP will verify whether there is a valid SA or 2214 * not and send it back. We will fanout again if we have more 2215 * data in the packet. 2216 * 2217 * If the packet in error does not have AH/ESP, we handle it 2218 * like any other case. 2219 * 2220 * * NON_SECURE : If the packet in error has AH/ESP headers, we send it 2221 * up to AH/ESP for validation. AH/ESP will verify whether there is a 2222 * valid SA or not and send it back. We will fanout again if 2223 * we have more data in the packet. 2224 * 2225 * If the packet in error does not have AH/ESP, we handle it 2226 * like any other case. 2227 * 2228 * The caller must have called icmp_inbound_verify_v4. 2229 */ 2230 static void 2231 icmp_inbound_error_fanout_v4(mblk_t *mp, icmph_t *icmph, ip_recv_attr_t *ira) 2232 { 2233 uint16_t *up; /* Pointer to ports in ULP header */ 2234 uint32_t ports; /* reversed ports for fanout */ 2235 ipha_t ripha; /* With reversed addresses */ 2236 ipha_t *ipha; /* Inner IP header */ 2237 uint_t hdr_length; /* Inner IP header length */ 2238 tcpha_t *tcpha; 2239 conn_t *connp; 2240 ill_t *ill = ira->ira_ill; 2241 ip_stack_t *ipst = ill->ill_ipst; 2242 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 2243 ill_t *rill = ira->ira_rill; 2244 2245 /* Caller already pulled up everything. */ 2246 ipha = (ipha_t *)&icmph[1]; 2247 ASSERT((uchar_t *)&ipha[1] <= mp->b_wptr); 2248 ASSERT(mp->b_cont == NULL); 2249 2250 hdr_length = IPH_HDR_LENGTH(ipha); 2251 ira->ira_protocol = ipha->ipha_protocol; 2252 2253 /* 2254 * We need a separate IP header with the source and destination 2255 * addresses reversed to do fanout/classification because the ipha in 2256 * the ICMP error is in the form we sent it out. 2257 */ 2258 ripha.ipha_src = ipha->ipha_dst; 2259 ripha.ipha_dst = ipha->ipha_src; 2260 ripha.ipha_protocol = ipha->ipha_protocol; 2261 ripha.ipha_version_and_hdr_length = ipha->ipha_version_and_hdr_length; 2262 2263 ip2dbg(("icmp_inbound_error_v4: proto %d %x to %x: %d/%d\n", 2264 ripha.ipha_protocol, ntohl(ipha->ipha_src), 2265 ntohl(ipha->ipha_dst), 2266 icmph->icmph_type, icmph->icmph_code)); 2267 2268 switch (ipha->ipha_protocol) { 2269 case IPPROTO_UDP: 2270 up = (uint16_t *)((uchar_t *)ipha + hdr_length); 2271 2272 /* Attempt to find a client stream based on port. */ 2273 ip2dbg(("icmp_inbound_error_v4: UDP ports %d to %d\n", 2274 ntohs(up[0]), ntohs(up[1]))); 2275 2276 /* Note that we send error to all matches. */ 2277 ira->ira_flags |= IRAF_ICMP_ERROR; 2278 ip_fanout_udp_multi_v4(mp, &ripha, up[0], up[1], ira); 2279 ira->ira_flags &= ~IRAF_ICMP_ERROR; 2280 return; 2281 2282 case IPPROTO_TCP: 2283 /* 2284 * Find a TCP client stream for this packet. 2285 * Note that we do a reverse lookup since the header is 2286 * in the form we sent it out. 2287 */ 2288 tcpha = (tcpha_t *)((uchar_t *)ipha + hdr_length); 2289 connp = ipcl_tcp_lookup_reversed_ipv4(ipha, tcpha, TCPS_LISTEN, 2290 ipst); 2291 if (connp == NULL) 2292 goto discard_pkt; 2293 2294 if (CONN_INBOUND_POLICY_PRESENT(connp, ipss) || 2295 (ira->ira_flags & IRAF_IPSEC_SECURE)) { 2296 mp = ipsec_check_inbound_policy(mp, connp, 2297 ipha, NULL, ira); 2298 if (mp == NULL) { 2299 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 2300 /* Note that mp is NULL */ 2301 ip_drop_input("ipIfStatsInDiscards", mp, ill); 2302 CONN_DEC_REF(connp); 2303 return; 2304 } 2305 } 2306 2307 ira->ira_flags |= IRAF_ICMP_ERROR; 2308 ira->ira_ill = ira->ira_rill = NULL; 2309 if (IPCL_IS_TCP(connp)) { 2310 SQUEUE_ENTER_ONE(connp->conn_sqp, mp, 2311 connp->conn_recvicmp, connp, ira, SQ_FILL, 2312 SQTAG_TCP_INPUT_ICMP_ERR); 2313 } else { 2314 /* Not TCP; must be SOCK_RAW, IPPROTO_TCP */ 2315 (connp->conn_recv)(connp, mp, NULL, ira); 2316 CONN_DEC_REF(connp); 2317 } 2318 ira->ira_ill = ill; 2319 ira->ira_rill = rill; 2320 ira->ira_flags &= ~IRAF_ICMP_ERROR; 2321 return; 2322 2323 case IPPROTO_SCTP: 2324 up = (uint16_t *)((uchar_t *)ipha + hdr_length); 2325 /* Find a SCTP client stream for this packet. */ 2326 ((uint16_t *)&ports)[0] = up[1]; 2327 ((uint16_t *)&ports)[1] = up[0]; 2328 2329 ira->ira_flags |= IRAF_ICMP_ERROR; 2330 ip_fanout_sctp(mp, &ripha, NULL, ports, ira); 2331 ira->ira_flags &= ~IRAF_ICMP_ERROR; 2332 return; 2333 2334 case IPPROTO_ESP: 2335 case IPPROTO_AH: 2336 if (!ipsec_loaded(ipss)) { 2337 ip_proto_not_sup(mp, ira); 2338 return; 2339 } 2340 2341 if (ipha->ipha_protocol == IPPROTO_ESP) 2342 mp = ipsecesp_icmp_error(mp, ira); 2343 else 2344 mp = ipsecah_icmp_error(mp, ira); 2345 if (mp == NULL) 2346 return; 2347 2348 /* Just in case ipsec didn't preserve the NULL b_cont */ 2349 if (mp->b_cont != NULL) { 2350 if (!pullupmsg(mp, -1)) 2351 goto discard_pkt; 2352 } 2353 2354 /* 2355 * Note that ira_pktlen and ira_ip_hdr_length are no longer 2356 * correct, but we don't use them any more here. 2357 * 2358 * If succesful, the mp has been modified to not include 2359 * the ESP/AH header so we can fanout to the ULP's icmp 2360 * error handler. 2361 */ 2362 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH) 2363 goto truncated; 2364 2365 /* Verify the modified message before any further processes. */ 2366 ipha = (ipha_t *)mp->b_rptr; 2367 hdr_length = IPH_HDR_LENGTH(ipha); 2368 icmph = (icmph_t *)&mp->b_rptr[hdr_length]; 2369 if (!icmp_inbound_verify_v4(mp, icmph, ira)) { 2370 freemsg(mp); 2371 return; 2372 } 2373 2374 icmp_inbound_error_fanout_v4(mp, icmph, ira); 2375 return; 2376 2377 case IPPROTO_ENCAP: { 2378 /* Look for self-encapsulated packets that caused an error */ 2379 ipha_t *in_ipha; 2380 2381 /* 2382 * Caller has verified that length has to be 2383 * at least the size of IP header. 2384 */ 2385 ASSERT(hdr_length >= sizeof (ipha_t)); 2386 /* 2387 * Check the sanity of the inner IP header like 2388 * we did for the outer header. 2389 */ 2390 in_ipha = (ipha_t *)((uchar_t *)ipha + hdr_length); 2391 if ((IPH_HDR_VERSION(in_ipha) != IPV4_VERSION)) { 2392 goto discard_pkt; 2393 } 2394 if (IPH_HDR_LENGTH(in_ipha) < sizeof (ipha_t)) { 2395 goto discard_pkt; 2396 } 2397 /* Check for Self-encapsulated tunnels */ 2398 if (in_ipha->ipha_src == ipha->ipha_src && 2399 in_ipha->ipha_dst == ipha->ipha_dst) { 2400 2401 mp = icmp_inbound_self_encap_error_v4(mp, ipha, 2402 in_ipha); 2403 if (mp == NULL) 2404 goto discard_pkt; 2405 2406 /* 2407 * Just in case self_encap didn't preserve the NULL 2408 * b_cont 2409 */ 2410 if (mp->b_cont != NULL) { 2411 if (!pullupmsg(mp, -1)) 2412 goto discard_pkt; 2413 } 2414 /* 2415 * Note that ira_pktlen and ira_ip_hdr_length are no 2416 * longer correct, but we don't use them any more here. 2417 */ 2418 if (mp->b_wptr - mp->b_rptr < IP_SIMPLE_HDR_LENGTH) 2419 goto truncated; 2420 2421 /* 2422 * Verify the modified message before any further 2423 * processes. 2424 */ 2425 ipha = (ipha_t *)mp->b_rptr; 2426 hdr_length = IPH_HDR_LENGTH(ipha); 2427 icmph = (icmph_t *)&mp->b_rptr[hdr_length]; 2428 if (!icmp_inbound_verify_v4(mp, icmph, ira)) { 2429 freemsg(mp); 2430 return; 2431 } 2432 2433 /* 2434 * The packet in error is self-encapsualted. 2435 * And we are finding it further encapsulated 2436 * which we could not have possibly generated. 2437 */ 2438 if (ipha->ipha_protocol == IPPROTO_ENCAP) { 2439 goto discard_pkt; 2440 } 2441 icmp_inbound_error_fanout_v4(mp, icmph, ira); 2442 return; 2443 } 2444 /* No self-encapsulated */ 2445 /* FALLTHRU */ 2446 } 2447 case IPPROTO_IPV6: 2448 if ((connp = ipcl_iptun_classify_v4(&ripha.ipha_src, 2449 &ripha.ipha_dst, ipst)) != NULL) { 2450 ira->ira_flags |= IRAF_ICMP_ERROR; 2451 connp->conn_recvicmp(connp, mp, NULL, ira); 2452 CONN_DEC_REF(connp); 2453 ira->ira_flags &= ~IRAF_ICMP_ERROR; 2454 return; 2455 } 2456 /* 2457 * No IP tunnel is interested, fallthrough and see 2458 * if a raw socket will want it. 2459 */ 2460 /* FALLTHRU */ 2461 default: 2462 ira->ira_flags |= IRAF_ICMP_ERROR; 2463 ip_fanout_proto_v4(mp, &ripha, ira); 2464 ira->ira_flags &= ~IRAF_ICMP_ERROR; 2465 return; 2466 } 2467 /* NOTREACHED */ 2468 discard_pkt: 2469 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 2470 ip1dbg(("icmp_inbound_error_fanout_v4: drop pkt\n")); 2471 ip_drop_input("ipIfStatsInDiscards", mp, ill); 2472 freemsg(mp); 2473 return; 2474 2475 truncated: 2476 /* We pulled up everthing already. Must be truncated */ 2477 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts); 2478 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill); 2479 freemsg(mp); 2480 } 2481 2482 /* 2483 * Common IP options parser. 2484 * 2485 * Setup routine: fill in *optp with options-parsing state, then 2486 * tail-call ipoptp_next to return the first option. 2487 */ 2488 uint8_t 2489 ipoptp_first(ipoptp_t *optp, ipha_t *ipha) 2490 { 2491 uint32_t totallen; /* total length of all options */ 2492 2493 totallen = ipha->ipha_version_and_hdr_length - 2494 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS); 2495 totallen <<= 2; 2496 optp->ipoptp_next = (uint8_t *)(&ipha[1]); 2497 optp->ipoptp_end = optp->ipoptp_next + totallen; 2498 optp->ipoptp_flags = 0; 2499 return (ipoptp_next(optp)); 2500 } 2501 2502 /* Like above but without an ipha_t */ 2503 uint8_t 2504 ipoptp_first2(ipoptp_t *optp, uint32_t totallen, uint8_t *opt) 2505 { 2506 optp->ipoptp_next = opt; 2507 optp->ipoptp_end = optp->ipoptp_next + totallen; 2508 optp->ipoptp_flags = 0; 2509 return (ipoptp_next(optp)); 2510 } 2511 2512 /* 2513 * Common IP options parser: extract next option. 2514 */ 2515 uint8_t 2516 ipoptp_next(ipoptp_t *optp) 2517 { 2518 uint8_t *end = optp->ipoptp_end; 2519 uint8_t *cur = optp->ipoptp_next; 2520 uint8_t opt, len, pointer; 2521 2522 /* 2523 * If cur > end already, then the ipoptp_end or ipoptp_next pointer 2524 * has been corrupted. 2525 */ 2526 ASSERT(cur <= end); 2527 2528 if (cur == end) 2529 return (IPOPT_EOL); 2530 2531 opt = cur[IPOPT_OPTVAL]; 2532 2533 /* 2534 * Skip any NOP options. 2535 */ 2536 while (opt == IPOPT_NOP) { 2537 cur++; 2538 if (cur == end) 2539 return (IPOPT_EOL); 2540 opt = cur[IPOPT_OPTVAL]; 2541 } 2542 2543 if (opt == IPOPT_EOL) 2544 return (IPOPT_EOL); 2545 2546 /* 2547 * Option requiring a length. 2548 */ 2549 if ((cur + 1) >= end) { 2550 optp->ipoptp_flags |= IPOPTP_ERROR; 2551 return (IPOPT_EOL); 2552 } 2553 len = cur[IPOPT_OLEN]; 2554 if (len < 2) { 2555 optp->ipoptp_flags |= IPOPTP_ERROR; 2556 return (IPOPT_EOL); 2557 } 2558 optp->ipoptp_cur = cur; 2559 optp->ipoptp_len = len; 2560 optp->ipoptp_next = cur + len; 2561 if (cur + len > end) { 2562 optp->ipoptp_flags |= IPOPTP_ERROR; 2563 return (IPOPT_EOL); 2564 } 2565 2566 /* 2567 * For the options which require a pointer field, make sure 2568 * its there, and make sure it points to either something 2569 * inside this option, or the end of the option. 2570 */ 2571 switch (opt) { 2572 case IPOPT_RR: 2573 case IPOPT_TS: 2574 case IPOPT_LSRR: 2575 case IPOPT_SSRR: 2576 if (len <= IPOPT_OFFSET) { 2577 optp->ipoptp_flags |= IPOPTP_ERROR; 2578 return (opt); 2579 } 2580 pointer = cur[IPOPT_OFFSET]; 2581 if (pointer - 1 > len) { 2582 optp->ipoptp_flags |= IPOPTP_ERROR; 2583 return (opt); 2584 } 2585 break; 2586 } 2587 2588 /* 2589 * Sanity check the pointer field based on the type of the 2590 * option. 2591 */ 2592 switch (opt) { 2593 case IPOPT_RR: 2594 case IPOPT_SSRR: 2595 case IPOPT_LSRR: 2596 if (pointer < IPOPT_MINOFF_SR) 2597 optp->ipoptp_flags |= IPOPTP_ERROR; 2598 break; 2599 case IPOPT_TS: 2600 if (pointer < IPOPT_MINOFF_IT) 2601 optp->ipoptp_flags |= IPOPTP_ERROR; 2602 /* 2603 * Note that the Internet Timestamp option also 2604 * contains two four bit fields (the Overflow field, 2605 * and the Flag field), which follow the pointer 2606 * field. We don't need to check that these fields 2607 * fall within the length of the option because this 2608 * was implicitely done above. We've checked that the 2609 * pointer value is at least IPOPT_MINOFF_IT, and that 2610 * it falls within the option. Since IPOPT_MINOFF_IT > 2611 * IPOPT_POS_OV_FLG, we don't need the explicit check. 2612 */ 2613 ASSERT(len > IPOPT_POS_OV_FLG); 2614 break; 2615 } 2616 2617 return (opt); 2618 } 2619 2620 /* 2621 * Use the outgoing IP header to create an IP_OPTIONS option the way 2622 * it was passed down from the application. 2623 * 2624 * This is compatible with BSD in that it returns 2625 * the reverse source route with the final destination 2626 * as the last entry. The first 4 bytes of the option 2627 * will contain the final destination. 2628 */ 2629 int 2630 ip_opt_get_user(conn_t *connp, uchar_t *buf) 2631 { 2632 ipoptp_t opts; 2633 uchar_t *opt; 2634 uint8_t optval; 2635 uint8_t optlen; 2636 uint32_t len = 0; 2637 uchar_t *buf1 = buf; 2638 uint32_t totallen; 2639 ipaddr_t dst; 2640 ip_pkt_t *ipp = &connp->conn_xmit_ipp; 2641 2642 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS)) 2643 return (0); 2644 2645 totallen = ipp->ipp_ipv4_options_len; 2646 if (totallen & 0x3) 2647 return (0); 2648 2649 buf += IP_ADDR_LEN; /* Leave room for final destination */ 2650 len += IP_ADDR_LEN; 2651 bzero(buf1, IP_ADDR_LEN); 2652 2653 dst = connp->conn_faddr_v4; 2654 2655 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options); 2656 optval != IPOPT_EOL; 2657 optval = ipoptp_next(&opts)) { 2658 int off; 2659 2660 opt = opts.ipoptp_cur; 2661 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 2662 break; 2663 } 2664 optlen = opts.ipoptp_len; 2665 2666 switch (optval) { 2667 case IPOPT_SSRR: 2668 case IPOPT_LSRR: 2669 2670 /* 2671 * Insert destination as the first entry in the source 2672 * route and move down the entries on step. 2673 * The last entry gets placed at buf1. 2674 */ 2675 buf[IPOPT_OPTVAL] = optval; 2676 buf[IPOPT_OLEN] = optlen; 2677 buf[IPOPT_OFFSET] = optlen; 2678 2679 off = optlen - IP_ADDR_LEN; 2680 if (off < 0) { 2681 /* No entries in source route */ 2682 break; 2683 } 2684 /* Last entry in source route if not already set */ 2685 if (dst == INADDR_ANY) 2686 bcopy(opt + off, buf1, IP_ADDR_LEN); 2687 off -= IP_ADDR_LEN; 2688 2689 while (off > 0) { 2690 bcopy(opt + off, 2691 buf + off + IP_ADDR_LEN, 2692 IP_ADDR_LEN); 2693 off -= IP_ADDR_LEN; 2694 } 2695 /* ipha_dst into first slot */ 2696 bcopy(&dst, buf + off + IP_ADDR_LEN, 2697 IP_ADDR_LEN); 2698 buf += optlen; 2699 len += optlen; 2700 break; 2701 2702 default: 2703 bcopy(opt, buf, optlen); 2704 buf += optlen; 2705 len += optlen; 2706 break; 2707 } 2708 } 2709 done: 2710 /* Pad the resulting options */ 2711 while (len & 0x3) { 2712 *buf++ = IPOPT_EOL; 2713 len++; 2714 } 2715 return (len); 2716 } 2717 2718 /* 2719 * Update any record route or timestamp options to include this host. 2720 * Reverse any source route option. 2721 * This routine assumes that the options are well formed i.e. that they 2722 * have already been checked. 2723 */ 2724 static void 2725 icmp_options_update(ipha_t *ipha) 2726 { 2727 ipoptp_t opts; 2728 uchar_t *opt; 2729 uint8_t optval; 2730 ipaddr_t src; /* Our local address */ 2731 ipaddr_t dst; 2732 2733 ip2dbg(("icmp_options_update\n")); 2734 src = ipha->ipha_src; 2735 dst = ipha->ipha_dst; 2736 2737 for (optval = ipoptp_first(&opts, ipha); 2738 optval != IPOPT_EOL; 2739 optval = ipoptp_next(&opts)) { 2740 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0); 2741 opt = opts.ipoptp_cur; 2742 ip2dbg(("icmp_options_update: opt %d, len %d\n", 2743 optval, opts.ipoptp_len)); 2744 switch (optval) { 2745 int off1, off2; 2746 case IPOPT_SSRR: 2747 case IPOPT_LSRR: 2748 /* 2749 * Reverse the source route. The first entry 2750 * should be the next to last one in the current 2751 * source route (the last entry is our address). 2752 * The last entry should be the final destination. 2753 */ 2754 off1 = IPOPT_MINOFF_SR - 1; 2755 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1; 2756 if (off2 < 0) { 2757 /* No entries in source route */ 2758 ip1dbg(( 2759 "icmp_options_update: bad src route\n")); 2760 break; 2761 } 2762 bcopy((char *)opt + off2, &dst, IP_ADDR_LEN); 2763 bcopy(&ipha->ipha_dst, (char *)opt + off2, IP_ADDR_LEN); 2764 bcopy(&dst, &ipha->ipha_dst, IP_ADDR_LEN); 2765 off2 -= IP_ADDR_LEN; 2766 2767 while (off1 < off2) { 2768 bcopy((char *)opt + off1, &src, IP_ADDR_LEN); 2769 bcopy((char *)opt + off2, (char *)opt + off1, 2770 IP_ADDR_LEN); 2771 bcopy(&src, (char *)opt + off2, IP_ADDR_LEN); 2772 off1 += IP_ADDR_LEN; 2773 off2 -= IP_ADDR_LEN; 2774 } 2775 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR; 2776 break; 2777 } 2778 } 2779 } 2780 2781 /* 2782 * Process received ICMP Redirect messages. 2783 * Assumes the caller has verified that the headers are in the pulled up mblk. 2784 * Consumes mp. 2785 */ 2786 static void 2787 icmp_redirect_v4(mblk_t *mp, ipha_t *ipha, icmph_t *icmph, ip_recv_attr_t *ira) 2788 { 2789 ire_t *ire, *nire; 2790 ire_t *prev_ire; 2791 ipaddr_t src, dst, gateway; 2792 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 2793 ipha_t *inner_ipha; /* Inner IP header */ 2794 2795 /* Caller already pulled up everything. */ 2796 inner_ipha = (ipha_t *)&icmph[1]; 2797 src = ipha->ipha_src; 2798 dst = inner_ipha->ipha_dst; 2799 gateway = icmph->icmph_rd_gateway; 2800 /* Make sure the new gateway is reachable somehow. */ 2801 ire = ire_ftable_lookup_v4(gateway, 0, 0, IRE_ONLINK, NULL, 2802 ALL_ZONES, NULL, MATCH_IRE_TYPE, 0, ipst, NULL); 2803 /* 2804 * Make sure we had a route for the dest in question and that 2805 * that route was pointing to the old gateway (the source of the 2806 * redirect packet.) 2807 * Note: this merely says that there is some IRE which matches that 2808 * gateway; not that the longest match matches that gateway. 2809 */ 2810 prev_ire = ire_ftable_lookup_v4(dst, 0, src, 0, NULL, ALL_ZONES, 2811 NULL, MATCH_IRE_GW, 0, ipst, NULL); 2812 /* 2813 * Check that 2814 * the redirect was not from ourselves 2815 * the new gateway and the old gateway are directly reachable 2816 */ 2817 if (prev_ire == NULL || ire == NULL || 2818 (prev_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) || 2819 (prev_ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) || 2820 !(ire->ire_type & IRE_IF_ALL)) { 2821 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects); 2822 ip_drop_input("icmpInBadRedirects - ire", mp, ira->ira_ill); 2823 freemsg(mp); 2824 if (ire != NULL) 2825 ire_refrele(ire); 2826 if (prev_ire != NULL) 2827 ire_refrele(prev_ire); 2828 return; 2829 } 2830 2831 ire_refrele(prev_ire); 2832 ire_refrele(ire); 2833 2834 /* 2835 * TODO: more precise handling for cases 0, 2, 3, the latter two 2836 * require TOS routing 2837 */ 2838 switch (icmph->icmph_code) { 2839 case 0: 2840 case 1: 2841 /* TODO: TOS specificity for cases 2 and 3 */ 2842 case 2: 2843 case 3: 2844 break; 2845 default: 2846 BUMP_MIB(&ipst->ips_icmp_mib, icmpInBadRedirects); 2847 ip_drop_input("icmpInBadRedirects - code", mp, ira->ira_ill); 2848 freemsg(mp); 2849 return; 2850 } 2851 /* 2852 * Create a Route Association. This will allow us to remember that 2853 * someone we believe told us to use the particular gateway. 2854 */ 2855 ire = ire_create( 2856 (uchar_t *)&dst, /* dest addr */ 2857 (uchar_t *)&ip_g_all_ones, /* mask */ 2858 (uchar_t *)&gateway, /* gateway addr */ 2859 IRE_HOST, 2860 NULL, /* ill */ 2861 ALL_ZONES, 2862 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST), 2863 NULL, /* tsol_gc_t */ 2864 ipst); 2865 2866 if (ire == NULL) { 2867 freemsg(mp); 2868 return; 2869 } 2870 nire = ire_add(ire); 2871 /* Check if it was a duplicate entry */ 2872 if (nire != NULL && nire != ire) { 2873 ASSERT(nire->ire_identical_ref > 1); 2874 ire_delete(nire); 2875 ire_refrele(nire); 2876 nire = NULL; 2877 } 2878 ire = nire; 2879 if (ire != NULL) { 2880 ire_refrele(ire); /* Held in ire_add */ 2881 2882 /* tell routing sockets that we received a redirect */ 2883 ip_rts_change(RTM_REDIRECT, dst, gateway, IP_HOST_MASK, 0, src, 2884 (RTF_DYNAMIC | RTF_GATEWAY | RTF_HOST), 0, 2885 (RTA_DST | RTA_GATEWAY | RTA_NETMASK | RTA_AUTHOR), ipst); 2886 } 2887 2888 /* 2889 * Delete any existing IRE_HOST type redirect ires for this destination. 2890 * This together with the added IRE has the effect of 2891 * modifying an existing redirect. 2892 */ 2893 prev_ire = ire_ftable_lookup_v4(dst, 0, src, IRE_HOST, NULL, 2894 ALL_ZONES, NULL, (MATCH_IRE_GW | MATCH_IRE_TYPE), 0, ipst, NULL); 2895 if (prev_ire != NULL) { 2896 if (prev_ire ->ire_flags & RTF_DYNAMIC) 2897 ire_delete(prev_ire); 2898 ire_refrele(prev_ire); 2899 } 2900 2901 freemsg(mp); 2902 } 2903 2904 /* 2905 * Generate an ICMP parameter problem message. 2906 * When called from ip_output side a minimal ip_recv_attr_t needs to be 2907 * constructed by the caller. 2908 */ 2909 static void 2910 icmp_param_problem(mblk_t *mp, uint8_t ptr, ip_recv_attr_t *ira) 2911 { 2912 icmph_t icmph; 2913 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 2914 2915 mp = icmp_pkt_err_ok(mp, ira); 2916 if (mp == NULL) 2917 return; 2918 2919 bzero(&icmph, sizeof (icmph_t)); 2920 icmph.icmph_type = ICMP_PARAM_PROBLEM; 2921 icmph.icmph_pp_ptr = ptr; 2922 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutParmProbs); 2923 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira); 2924 } 2925 2926 /* 2927 * Build and ship an IPv4 ICMP message using the packet data in mp, and 2928 * the ICMP header pointed to by "stuff". (May be called as writer.) 2929 * Note: assumes that icmp_pkt_err_ok has been called to verify that 2930 * an icmp error packet can be sent. 2931 * Assigns an appropriate source address to the packet. If ipha_dst is 2932 * one of our addresses use it for source. Otherwise let ip_output_simple 2933 * pick the source address. 2934 */ 2935 static void 2936 icmp_pkt(mblk_t *mp, void *stuff, size_t len, ip_recv_attr_t *ira) 2937 { 2938 ipaddr_t dst; 2939 icmph_t *icmph; 2940 ipha_t *ipha; 2941 uint_t len_needed; 2942 size_t msg_len; 2943 mblk_t *mp1; 2944 ipaddr_t src; 2945 ire_t *ire; 2946 ip_xmit_attr_t ixas; 2947 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 2948 2949 ipha = (ipha_t *)mp->b_rptr; 2950 2951 bzero(&ixas, sizeof (ixas)); 2952 ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4; 2953 ixas.ixa_zoneid = ira->ira_zoneid; 2954 ixas.ixa_ifindex = 0; 2955 ixas.ixa_ipst = ipst; 2956 ixas.ixa_cred = kcred; 2957 ixas.ixa_cpid = NOPID; 2958 ixas.ixa_tsl = ira->ira_tsl; /* Behave as a multi-level responder */ 2959 ixas.ixa_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 2960 2961 if (ira->ira_flags & IRAF_IPSEC_SECURE) { 2962 /* 2963 * Apply IPsec based on how IPsec was applied to 2964 * the packet that had the error. 2965 * 2966 * If it was an outbound packet that caused the ICMP 2967 * error, then the caller will have setup the IRA 2968 * appropriately. 2969 */ 2970 if (!ipsec_in_to_out(ira, &ixas, mp, ipha, NULL)) { 2971 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); 2972 /* Note: mp already consumed and ip_drop_packet done */ 2973 return; 2974 } 2975 } else { 2976 /* 2977 * This is in clear. The icmp message we are building 2978 * here should go out in clear, independent of our policy. 2979 */ 2980 ixas.ixa_flags |= IXAF_NO_IPSEC; 2981 } 2982 2983 /* Remember our eventual destination */ 2984 dst = ipha->ipha_src; 2985 2986 /* 2987 * If the packet was for one of our unicast addresses, make 2988 * sure we respond with that as the source. Otherwise 2989 * have ip_output_simple pick the source address. 2990 */ 2991 ire = ire_ftable_lookup_v4(ipha->ipha_dst, 0, 0, 2992 (IRE_LOCAL|IRE_LOOPBACK), NULL, ira->ira_zoneid, NULL, 2993 MATCH_IRE_TYPE|MATCH_IRE_ZONEONLY, 0, ipst, NULL); 2994 if (ire != NULL) { 2995 ire_refrele(ire); 2996 src = ipha->ipha_dst; 2997 } else { 2998 src = INADDR_ANY; 2999 ixas.ixa_flags |= IXAF_SET_SOURCE; 3000 } 3001 3002 /* 3003 * Check if we can send back more then 8 bytes in addition to 3004 * the IP header. We try to send 64 bytes of data and the internal 3005 * header in the special cases of ipv4 encapsulated ipv4 or ipv6. 3006 */ 3007 len_needed = IPH_HDR_LENGTH(ipha); 3008 if (ipha->ipha_protocol == IPPROTO_ENCAP || 3009 ipha->ipha_protocol == IPPROTO_IPV6) { 3010 if (!pullupmsg(mp, -1)) { 3011 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards); 3012 ip_drop_output("ipIfStatsOutDiscards", mp, NULL); 3013 freemsg(mp); 3014 return; 3015 } 3016 ipha = (ipha_t *)mp->b_rptr; 3017 3018 if (ipha->ipha_protocol == IPPROTO_ENCAP) { 3019 len_needed += IPH_HDR_LENGTH(((uchar_t *)ipha + 3020 len_needed)); 3021 } else { 3022 ip6_t *ip6h = (ip6_t *)((uchar_t *)ipha + len_needed); 3023 3024 ASSERT(ipha->ipha_protocol == IPPROTO_IPV6); 3025 len_needed += ip_hdr_length_v6(mp, ip6h); 3026 } 3027 } 3028 len_needed += ipst->ips_ip_icmp_return; 3029 msg_len = msgdsize(mp); 3030 if (msg_len > len_needed) { 3031 (void) adjmsg(mp, len_needed - msg_len); 3032 msg_len = len_needed; 3033 } 3034 mp1 = allocb(sizeof (icmp_ipha) + len, BPRI_MED); 3035 if (mp1 == NULL) { 3036 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutErrors); 3037 freemsg(mp); 3038 return; 3039 } 3040 mp1->b_cont = mp; 3041 mp = mp1; 3042 3043 /* 3044 * Set IXAF_TRUSTED_ICMP so we can let the ICMP messages this 3045 * node generates be accepted in peace by all on-host destinations. 3046 * If we do NOT assume that all on-host destinations trust 3047 * self-generated ICMP messages, then rework here, ip6.c, and spd.c. 3048 * (Look for IXAF_TRUSTED_ICMP). 3049 */ 3050 ixas.ixa_flags |= IXAF_TRUSTED_ICMP; 3051 3052 ipha = (ipha_t *)mp->b_rptr; 3053 mp1->b_wptr = (uchar_t *)ipha + (sizeof (icmp_ipha) + len); 3054 *ipha = icmp_ipha; 3055 ipha->ipha_src = src; 3056 ipha->ipha_dst = dst; 3057 ipha->ipha_ttl = ipst->ips_ip_def_ttl; 3058 msg_len += sizeof (icmp_ipha) + len; 3059 if (msg_len > IP_MAXPACKET) { 3060 (void) adjmsg(mp, IP_MAXPACKET - msg_len); 3061 msg_len = IP_MAXPACKET; 3062 } 3063 ipha->ipha_length = htons((uint16_t)msg_len); 3064 icmph = (icmph_t *)&ipha[1]; 3065 bcopy(stuff, icmph, len); 3066 icmph->icmph_checksum = 0; 3067 icmph->icmph_checksum = IP_CSUM(mp, (int32_t)sizeof (ipha_t), 0); 3068 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutMsgs); 3069 3070 (void) ip_output_simple(mp, &ixas); 3071 ixa_cleanup(&ixas); 3072 } 3073 3074 /* 3075 * Determine if an ICMP error packet can be sent given the rate limit. 3076 * The limit consists of an average frequency (icmp_pkt_err_interval measured 3077 * in milliseconds) and a burst size. Burst size number of packets can 3078 * be sent arbitrarely closely spaced. 3079 * The state is tracked using two variables to implement an approximate 3080 * token bucket filter: 3081 * icmp_pkt_err_last - lbolt value when the last burst started 3082 * icmp_pkt_err_sent - number of packets sent in current burst 3083 */ 3084 boolean_t 3085 icmp_err_rate_limit(ip_stack_t *ipst) 3086 { 3087 clock_t now = TICK_TO_MSEC(ddi_get_lbolt()); 3088 uint_t refilled; /* Number of packets refilled in tbf since last */ 3089 /* Guard against changes by loading into local variable */ 3090 uint_t err_interval = ipst->ips_ip_icmp_err_interval; 3091 3092 if (err_interval == 0) 3093 return (B_FALSE); 3094 3095 if (ipst->ips_icmp_pkt_err_last > now) { 3096 /* 100HZ lbolt in ms for 32bit arch wraps every 49.7 days */ 3097 ipst->ips_icmp_pkt_err_last = 0; 3098 ipst->ips_icmp_pkt_err_sent = 0; 3099 } 3100 /* 3101 * If we are in a burst update the token bucket filter. 3102 * Update the "last" time to be close to "now" but make sure 3103 * we don't loose precision. 3104 */ 3105 if (ipst->ips_icmp_pkt_err_sent != 0) { 3106 refilled = (now - ipst->ips_icmp_pkt_err_last)/err_interval; 3107 if (refilled > ipst->ips_icmp_pkt_err_sent) { 3108 ipst->ips_icmp_pkt_err_sent = 0; 3109 } else { 3110 ipst->ips_icmp_pkt_err_sent -= refilled; 3111 ipst->ips_icmp_pkt_err_last += refilled * err_interval; 3112 } 3113 } 3114 if (ipst->ips_icmp_pkt_err_sent == 0) { 3115 /* Start of new burst */ 3116 ipst->ips_icmp_pkt_err_last = now; 3117 } 3118 if (ipst->ips_icmp_pkt_err_sent < ipst->ips_ip_icmp_err_burst) { 3119 ipst->ips_icmp_pkt_err_sent++; 3120 ip1dbg(("icmp_err_rate_limit: %d sent in burst\n", 3121 ipst->ips_icmp_pkt_err_sent)); 3122 return (B_FALSE); 3123 } 3124 ip1dbg(("icmp_err_rate_limit: dropped\n")); 3125 return (B_TRUE); 3126 } 3127 3128 /* 3129 * Check if it is ok to send an IPv4 ICMP error packet in 3130 * response to the IPv4 packet in mp. 3131 * Free the message and return null if no 3132 * ICMP error packet should be sent. 3133 */ 3134 static mblk_t * 3135 icmp_pkt_err_ok(mblk_t *mp, ip_recv_attr_t *ira) 3136 { 3137 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 3138 icmph_t *icmph; 3139 ipha_t *ipha; 3140 uint_t len_needed; 3141 3142 if (!mp) 3143 return (NULL); 3144 ipha = (ipha_t *)mp->b_rptr; 3145 if (ip_csum_hdr(ipha)) { 3146 BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsInCksumErrs); 3147 ip_drop_input("ipIfStatsInCksumErrs", mp, NULL); 3148 freemsg(mp); 3149 return (NULL); 3150 } 3151 if (ip_type_v4(ipha->ipha_dst, ipst) == IRE_BROADCAST || 3152 ip_type_v4(ipha->ipha_src, ipst) == IRE_BROADCAST || 3153 CLASSD(ipha->ipha_dst) || 3154 CLASSD(ipha->ipha_src) || 3155 (ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET)) { 3156 /* Note: only errors to the fragment with offset 0 */ 3157 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops); 3158 freemsg(mp); 3159 return (NULL); 3160 } 3161 if (ipha->ipha_protocol == IPPROTO_ICMP) { 3162 /* 3163 * Check the ICMP type. RFC 1122 sez: don't send ICMP 3164 * errors in response to any ICMP errors. 3165 */ 3166 len_needed = IPH_HDR_LENGTH(ipha) + ICMPH_SIZE; 3167 if (mp->b_wptr - mp->b_rptr < len_needed) { 3168 if (!pullupmsg(mp, len_needed)) { 3169 BUMP_MIB(&ipst->ips_icmp_mib, icmpInErrors); 3170 freemsg(mp); 3171 return (NULL); 3172 } 3173 ipha = (ipha_t *)mp->b_rptr; 3174 } 3175 icmph = (icmph_t *) 3176 (&((char *)ipha)[IPH_HDR_LENGTH(ipha)]); 3177 switch (icmph->icmph_type) { 3178 case ICMP_DEST_UNREACHABLE: 3179 case ICMP_SOURCE_QUENCH: 3180 case ICMP_TIME_EXCEEDED: 3181 case ICMP_PARAM_PROBLEM: 3182 case ICMP_REDIRECT: 3183 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops); 3184 freemsg(mp); 3185 return (NULL); 3186 default: 3187 break; 3188 } 3189 } 3190 /* 3191 * If this is a labeled system, then check to see if we're allowed to 3192 * send a response to this particular sender. If not, then just drop. 3193 */ 3194 if (is_system_labeled() && !tsol_can_reply_error(mp, ira)) { 3195 ip2dbg(("icmp_pkt_err_ok: can't respond to packet\n")); 3196 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDrops); 3197 freemsg(mp); 3198 return (NULL); 3199 } 3200 if (icmp_err_rate_limit(ipst)) { 3201 /* 3202 * Only send ICMP error packets every so often. 3203 * This should be done on a per port/source basis, 3204 * but for now this will suffice. 3205 */ 3206 freemsg(mp); 3207 return (NULL); 3208 } 3209 return (mp); 3210 } 3211 3212 /* 3213 * Called when a packet was sent out the same link that it arrived on. 3214 * Check if it is ok to send a redirect and then send it. 3215 */ 3216 void 3217 ip_send_potential_redirect_v4(mblk_t *mp, ipha_t *ipha, ire_t *ire, 3218 ip_recv_attr_t *ira) 3219 { 3220 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 3221 ipaddr_t src, nhop; 3222 mblk_t *mp1; 3223 ire_t *nhop_ire; 3224 3225 /* 3226 * Check the source address to see if it originated 3227 * on the same logical subnet it is going back out on. 3228 * If so, we should be able to send it a redirect. 3229 * Avoid sending a redirect if the destination 3230 * is directly connected (i.e., we matched an IRE_ONLINK), 3231 * or if the packet was source routed out this interface. 3232 * 3233 * We avoid sending a redirect if the 3234 * destination is directly connected 3235 * because it is possible that multiple 3236 * IP subnets may have been configured on 3237 * the link, and the source may not 3238 * be on the same subnet as ip destination, 3239 * even though they are on the same 3240 * physical link. 3241 */ 3242 if ((ire->ire_type & IRE_ONLINK) || 3243 ip_source_routed(ipha, ipst)) 3244 return; 3245 3246 nhop_ire = ire_nexthop(ire); 3247 if (nhop_ire == NULL) 3248 return; 3249 3250 nhop = nhop_ire->ire_addr; 3251 3252 if (nhop_ire->ire_type & IRE_IF_CLONE) { 3253 ire_t *ire2; 3254 3255 /* Follow ire_dep_parent to find non-clone IRE_INTERFACE */ 3256 mutex_enter(&nhop_ire->ire_lock); 3257 ire2 = nhop_ire->ire_dep_parent; 3258 if (ire2 != NULL) 3259 ire_refhold(ire2); 3260 mutex_exit(&nhop_ire->ire_lock); 3261 ire_refrele(nhop_ire); 3262 nhop_ire = ire2; 3263 } 3264 if (nhop_ire == NULL) 3265 return; 3266 3267 ASSERT(!(nhop_ire->ire_type & IRE_IF_CLONE)); 3268 3269 src = ipha->ipha_src; 3270 3271 /* 3272 * We look at the interface ire for the nexthop, 3273 * to see if ipha_src is in the same subnet 3274 * as the nexthop. 3275 */ 3276 if ((src & nhop_ire->ire_mask) == (nhop & nhop_ire->ire_mask)) { 3277 /* 3278 * The source is directly connected. 3279 */ 3280 mp1 = copymsg(mp); 3281 if (mp1 != NULL) { 3282 icmp_send_redirect(mp1, nhop, ira); 3283 } 3284 } 3285 ire_refrele(nhop_ire); 3286 } 3287 3288 /* 3289 * Generate an ICMP redirect message. 3290 */ 3291 static void 3292 icmp_send_redirect(mblk_t *mp, ipaddr_t gateway, ip_recv_attr_t *ira) 3293 { 3294 icmph_t icmph; 3295 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 3296 3297 mp = icmp_pkt_err_ok(mp, ira); 3298 if (mp == NULL) 3299 return; 3300 3301 bzero(&icmph, sizeof (icmph_t)); 3302 icmph.icmph_type = ICMP_REDIRECT; 3303 icmph.icmph_code = 1; 3304 icmph.icmph_rd_gateway = gateway; 3305 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutRedirects); 3306 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira); 3307 } 3308 3309 /* 3310 * Generate an ICMP time exceeded message. 3311 */ 3312 void 3313 icmp_time_exceeded(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira) 3314 { 3315 icmph_t icmph; 3316 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 3317 3318 mp = icmp_pkt_err_ok(mp, ira); 3319 if (mp == NULL) 3320 return; 3321 3322 bzero(&icmph, sizeof (icmph_t)); 3323 icmph.icmph_type = ICMP_TIME_EXCEEDED; 3324 icmph.icmph_code = code; 3325 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutTimeExcds); 3326 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira); 3327 } 3328 3329 /* 3330 * Generate an ICMP unreachable message. 3331 * When called from ip_output side a minimal ip_recv_attr_t needs to be 3332 * constructed by the caller. 3333 */ 3334 void 3335 icmp_unreachable(mblk_t *mp, uint8_t code, ip_recv_attr_t *ira) 3336 { 3337 icmph_t icmph; 3338 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 3339 3340 mp = icmp_pkt_err_ok(mp, ira); 3341 if (mp == NULL) 3342 return; 3343 3344 bzero(&icmph, sizeof (icmph_t)); 3345 icmph.icmph_type = ICMP_DEST_UNREACHABLE; 3346 icmph.icmph_code = code; 3347 BUMP_MIB(&ipst->ips_icmp_mib, icmpOutDestUnreachs); 3348 icmp_pkt(mp, &icmph, sizeof (icmph_t), ira); 3349 } 3350 3351 /* 3352 * Latch in the IPsec state for a stream based the policy in the listener 3353 * and the actions in the ip_recv_attr_t. 3354 * Called directly from TCP and SCTP. 3355 */ 3356 boolean_t 3357 ip_ipsec_policy_inherit(conn_t *connp, conn_t *lconnp, ip_recv_attr_t *ira) 3358 { 3359 ASSERT(lconnp->conn_policy != NULL); 3360 ASSERT(connp->conn_policy == NULL); 3361 3362 IPPH_REFHOLD(lconnp->conn_policy); 3363 connp->conn_policy = lconnp->conn_policy; 3364 3365 if (ira->ira_ipsec_action != NULL) { 3366 if (connp->conn_latch == NULL) { 3367 connp->conn_latch = iplatch_create(); 3368 if (connp->conn_latch == NULL) 3369 return (B_FALSE); 3370 } 3371 ipsec_latch_inbound(connp, ira); 3372 } 3373 return (B_TRUE); 3374 } 3375 3376 /* 3377 * Verify whether or not the IP address is a valid local address. 3378 * Could be a unicast, including one for a down interface. 3379 * If allow_mcbc then a multicast or broadcast address is also 3380 * acceptable. 3381 * 3382 * In the case of a broadcast/multicast address, however, the 3383 * upper protocol is expected to reset the src address 3384 * to zero when we return IPVL_MCAST/IPVL_BCAST so that 3385 * no packets are emitted with broadcast/multicast address as 3386 * source address (that violates hosts requirements RFC 1122) 3387 * The addresses valid for bind are: 3388 * (1) - INADDR_ANY (0) 3389 * (2) - IP address of an UP interface 3390 * (3) - IP address of a DOWN interface 3391 * (4) - valid local IP broadcast addresses. In this case 3392 * the conn will only receive packets destined to 3393 * the specified broadcast address. 3394 * (5) - a multicast address. In this case 3395 * the conn will only receive packets destined to 3396 * the specified multicast address. Note: the 3397 * application still has to issue an 3398 * IP_ADD_MEMBERSHIP socket option. 3399 * 3400 * In all the above cases, the bound address must be valid in the current zone. 3401 * When the address is loopback, multicast or broadcast, there might be many 3402 * matching IREs so bind has to look up based on the zone. 3403 */ 3404 ip_laddr_t 3405 ip_laddr_verify_v4(ipaddr_t src_addr, zoneid_t zoneid, 3406 ip_stack_t *ipst, boolean_t allow_mcbc) 3407 { 3408 ire_t *src_ire; 3409 3410 ASSERT(src_addr != INADDR_ANY); 3411 3412 src_ire = ire_ftable_lookup_v4(src_addr, 0, 0, 0, 3413 NULL, zoneid, NULL, MATCH_IRE_ZONEONLY, 0, ipst, NULL); 3414 3415 /* 3416 * If an address other than in6addr_any is requested, 3417 * we verify that it is a valid address for bind 3418 * Note: Following code is in if-else-if form for 3419 * readability compared to a condition check. 3420 */ 3421 if (src_ire != NULL && (src_ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK))) { 3422 /* 3423 * (2) Bind to address of local UP interface 3424 */ 3425 ire_refrele(src_ire); 3426 return (IPVL_UNICAST_UP); 3427 } else if (src_ire != NULL && src_ire->ire_type & IRE_BROADCAST) { 3428 /* 3429 * (4) Bind to broadcast address 3430 */ 3431 ire_refrele(src_ire); 3432 if (allow_mcbc) 3433 return (IPVL_BCAST); 3434 else 3435 return (IPVL_BAD); 3436 } else if (CLASSD(src_addr)) { 3437 /* (5) bind to multicast address. */ 3438 if (src_ire != NULL) 3439 ire_refrele(src_ire); 3440 3441 if (allow_mcbc) 3442 return (IPVL_MCAST); 3443 else 3444 return (IPVL_BAD); 3445 } else { 3446 ipif_t *ipif; 3447 3448 /* 3449 * (3) Bind to address of local DOWN interface? 3450 * (ipif_lookup_addr() looks up all interfaces 3451 * but we do not get here for UP interfaces 3452 * - case (2) above) 3453 */ 3454 if (src_ire != NULL) 3455 ire_refrele(src_ire); 3456 3457 ipif = ipif_lookup_addr(src_addr, NULL, zoneid, ipst); 3458 if (ipif == NULL) 3459 return (IPVL_BAD); 3460 3461 /* Not a useful source? */ 3462 if (ipif->ipif_flags & (IPIF_NOLOCAL | IPIF_ANYCAST)) { 3463 ipif_refrele(ipif); 3464 return (IPVL_BAD); 3465 } 3466 ipif_refrele(ipif); 3467 return (IPVL_UNICAST_DOWN); 3468 } 3469 } 3470 3471 /* 3472 * Insert in the bind fanout for IPv4 and IPv6. 3473 * The caller should already have used ip_laddr_verify_v*() before calling 3474 * this. 3475 */ 3476 int 3477 ip_laddr_fanout_insert(conn_t *connp) 3478 { 3479 int error; 3480 3481 /* 3482 * Allow setting new policies. For example, disconnects result 3483 * in us being called. As we would have set conn_policy_cached 3484 * to B_TRUE before, we should set it to B_FALSE, so that policy 3485 * can change after the disconnect. 3486 */ 3487 connp->conn_policy_cached = B_FALSE; 3488 3489 error = ipcl_bind_insert(connp); 3490 if (error != 0) { 3491 if (connp->conn_anon_port) { 3492 (void) tsol_mlp_anon(crgetzone(connp->conn_cred), 3493 connp->conn_mlp_type, connp->conn_proto, 3494 ntohs(connp->conn_lport), B_FALSE); 3495 } 3496 connp->conn_mlp_type = mlptSingle; 3497 } 3498 return (error); 3499 } 3500 3501 /* 3502 * Verify that both the source and destination addresses are valid. If 3503 * IPDF_VERIFY_DST is not set, then the destination address may be unreachable, 3504 * i.e. have no route to it. Protocols like TCP want to verify destination 3505 * reachability, while tunnels do not. 3506 * 3507 * Determine the route, the interface, and (optionally) the source address 3508 * to use to reach a given destination. 3509 * Note that we allow connect to broadcast and multicast addresses when 3510 * IPDF_ALLOW_MCBC is set. 3511 * first_hop and dst_addr are normally the same, but if source routing 3512 * they will differ; in that case the first_hop is what we'll use for the 3513 * routing lookup but the dce and label checks will be done on dst_addr, 3514 * 3515 * If uinfo is set, then we fill in the best available information 3516 * we have for the destination. This is based on (in priority order) any 3517 * metrics and path MTU stored in a dce_t, route metrics, and finally the 3518 * ill_mtu. 3519 * 3520 * Tsol note: If we have a source route then dst_addr != firsthop. But we 3521 * always do the label check on dst_addr. 3522 */ 3523 int 3524 ip_set_destination_v4(ipaddr_t *src_addrp, ipaddr_t dst_addr, ipaddr_t firsthop, 3525 ip_xmit_attr_t *ixa, iulp_t *uinfo, uint32_t flags, uint_t mac_mode) 3526 { 3527 ire_t *ire = NULL; 3528 int error = 0; 3529 ipaddr_t setsrc; /* RTF_SETSRC */ 3530 zoneid_t zoneid = ixa->ixa_zoneid; /* Honors SO_ALLZONES */ 3531 ip_stack_t *ipst = ixa->ixa_ipst; 3532 dce_t *dce; 3533 uint_t pmtu; 3534 uint_t generation; 3535 nce_t *nce; 3536 ill_t *ill = NULL; 3537 boolean_t multirt = B_FALSE; 3538 3539 ASSERT(ixa->ixa_flags & IXAF_IS_IPV4); 3540 3541 /* 3542 * We never send to zero; the ULPs map it to the loopback address. 3543 * We can't allow it since we use zero to mean unitialized in some 3544 * places. 3545 */ 3546 ASSERT(dst_addr != INADDR_ANY); 3547 3548 if (is_system_labeled()) { 3549 ts_label_t *tsl = NULL; 3550 3551 error = tsol_check_dest(ixa->ixa_tsl, &dst_addr, IPV4_VERSION, 3552 mac_mode, (flags & IPDF_ZONE_IS_GLOBAL) != 0, &tsl); 3553 if (error != 0) 3554 return (error); 3555 if (tsl != NULL) { 3556 /* Update the label */ 3557 ip_xmit_attr_replace_tsl(ixa, tsl); 3558 } 3559 } 3560 3561 setsrc = INADDR_ANY; 3562 /* 3563 * Select a route; For IPMP interfaces, we would only select 3564 * a "hidden" route (i.e., going through a specific under_ill) 3565 * if ixa_ifindex has been specified. 3566 */ 3567 ire = ip_select_route_v4(firsthop, ixa, &generation, &setsrc, &error, 3568 &multirt); 3569 ASSERT(ire != NULL); /* IRE_NOROUTE if none found */ 3570 if (error != 0) 3571 goto bad_addr; 3572 3573 /* 3574 * ire can't be a broadcast or multicast unless IPDF_ALLOW_MCBC is set. 3575 * If IPDF_VERIFY_DST is set, the destination must be reachable; 3576 * Otherwise the destination needn't be reachable. 3577 * 3578 * If we match on a reject or black hole, then we've got a 3579 * local failure. May as well fail out the connect() attempt, 3580 * since it's never going to succeed. 3581 */ 3582 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) { 3583 /* 3584 * If we're verifying destination reachability, we always want 3585 * to complain here. 3586 * 3587 * If we're not verifying destination reachability but the 3588 * destination has a route, we still want to fail on the 3589 * temporary address and broadcast address tests. 3590 * 3591 * In both cases do we let the code continue so some reasonable 3592 * information is returned to the caller. That enables the 3593 * caller to use (and even cache) the IRE. conn_ip_ouput will 3594 * use the generation mismatch path to check for the unreachable 3595 * case thereby avoiding any specific check in the main path. 3596 */ 3597 ASSERT(generation == IRE_GENERATION_VERIFY); 3598 if (flags & IPDF_VERIFY_DST) { 3599 /* 3600 * Set errno but continue to set up ixa_ire to be 3601 * the RTF_REJECT|RTF_BLACKHOLE IRE. 3602 * That allows callers to use ip_output to get an 3603 * ICMP error back. 3604 */ 3605 if (!(ire->ire_type & IRE_HOST)) 3606 error = ENETUNREACH; 3607 else 3608 error = EHOSTUNREACH; 3609 } 3610 } 3611 3612 if ((ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST)) && 3613 !(flags & IPDF_ALLOW_MCBC)) { 3614 ire_refrele(ire); 3615 ire = ire_reject(ipst, B_FALSE); 3616 generation = IRE_GENERATION_VERIFY; 3617 error = ENETUNREACH; 3618 } 3619 3620 /* Cache things */ 3621 if (ixa->ixa_ire != NULL) 3622 ire_refrele_notr(ixa->ixa_ire); 3623 #ifdef DEBUG 3624 ire_refhold_notr(ire); 3625 ire_refrele(ire); 3626 #endif 3627 ixa->ixa_ire = ire; 3628 ixa->ixa_ire_generation = generation; 3629 3630 /* 3631 * For multicast with multirt we have a flag passed back from 3632 * ire_lookup_multi_ill_v4 since we don't have an IRE for each 3633 * possible multicast address. 3634 * We also need a flag for multicast since we can't check 3635 * whether RTF_MULTIRT is set in ixa_ire for multicast. 3636 */ 3637 if (multirt) { 3638 ixa->ixa_postfragfn = ip_postfrag_multirt_v4; 3639 ixa->ixa_flags |= IXAF_MULTIRT_MULTICAST; 3640 } else { 3641 ixa->ixa_postfragfn = ire->ire_postfragfn; 3642 ixa->ixa_flags &= ~IXAF_MULTIRT_MULTICAST; 3643 } 3644 if (!(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) { 3645 /* Get an nce to cache. */ 3646 nce = ire_to_nce(ire, firsthop, NULL); 3647 if (nce == NULL) { 3648 /* Allocation failure? */ 3649 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY; 3650 } else { 3651 if (ixa->ixa_nce != NULL) 3652 nce_refrele(ixa->ixa_nce); 3653 ixa->ixa_nce = nce; 3654 } 3655 } 3656 3657 /* 3658 * If the source address is a loopback address, the 3659 * destination had best be local or multicast. 3660 * If we are sending to an IRE_LOCAL using a loopback source then 3661 * it had better be the same zoneid. 3662 */ 3663 if (*src_addrp == htonl(INADDR_LOOPBACK)) { 3664 if ((ire->ire_type & IRE_LOCAL) && ire->ire_zoneid != zoneid) { 3665 ire = NULL; /* Stored in ixa_ire */ 3666 error = EADDRNOTAVAIL; 3667 goto bad_addr; 3668 } 3669 if (!(ire->ire_type & (IRE_LOOPBACK|IRE_LOCAL|IRE_MULTICAST))) { 3670 ire = NULL; /* Stored in ixa_ire */ 3671 error = EADDRNOTAVAIL; 3672 goto bad_addr; 3673 } 3674 } 3675 if (ire->ire_type & IRE_BROADCAST) { 3676 /* 3677 * If the ULP didn't have a specified source, then we 3678 * make sure we reselect the source when sending 3679 * broadcasts out different interfaces. 3680 */ 3681 if (flags & IPDF_SELECT_SRC) 3682 ixa->ixa_flags |= IXAF_SET_SOURCE; 3683 else 3684 ixa->ixa_flags &= ~IXAF_SET_SOURCE; 3685 } 3686 3687 /* 3688 * Does the caller want us to pick a source address? 3689 */ 3690 if (flags & IPDF_SELECT_SRC) { 3691 ipaddr_t src_addr; 3692 3693 /* 3694 * We use use ire_nexthop_ill to avoid the under ipmp 3695 * interface for source address selection. Note that for ipmp 3696 * probe packets, ixa_ifindex would have been specified, and 3697 * the ip_select_route() invocation would have picked an ire 3698 * will ire_ill pointing at an under interface. 3699 */ 3700 ill = ire_nexthop_ill(ire); 3701 3702 /* If unreachable we have no ill but need some source */ 3703 if (ill == NULL) { 3704 src_addr = htonl(INADDR_LOOPBACK); 3705 /* Make sure we look for a better source address */ 3706 generation = SRC_GENERATION_VERIFY; 3707 } else { 3708 error = ip_select_source_v4(ill, setsrc, dst_addr, 3709 ixa->ixa_multicast_ifaddr, zoneid, 3710 ipst, &src_addr, &generation, NULL); 3711 if (error != 0) { 3712 ire = NULL; /* Stored in ixa_ire */ 3713 goto bad_addr; 3714 } 3715 } 3716 3717 /* 3718 * We allow the source address to to down. 3719 * However, we check that we don't use the loopback address 3720 * as a source when sending out on the wire. 3721 */ 3722 if ((src_addr == htonl(INADDR_LOOPBACK)) && 3723 !(ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK|IRE_MULTICAST)) && 3724 !(ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE))) { 3725 ire = NULL; /* Stored in ixa_ire */ 3726 error = EADDRNOTAVAIL; 3727 goto bad_addr; 3728 } 3729 3730 *src_addrp = src_addr; 3731 ixa->ixa_src_generation = generation; 3732 } 3733 3734 if (flags & IPDF_UNIQUE_DCE) { 3735 /* Fallback to the default dce if allocation fails */ 3736 dce = dce_lookup_and_add_v4(dst_addr, ipst); 3737 if (dce != NULL) 3738 generation = dce->dce_generation; 3739 else 3740 dce = dce_lookup_v4(dst_addr, ipst, &generation); 3741 } else { 3742 dce = dce_lookup_v4(dst_addr, ipst, &generation); 3743 } 3744 ASSERT(dce != NULL); 3745 if (ixa->ixa_dce != NULL) 3746 dce_refrele_notr(ixa->ixa_dce); 3747 #ifdef DEBUG 3748 dce_refhold_notr(dce); 3749 dce_refrele(dce); 3750 #endif 3751 ixa->ixa_dce = dce; 3752 ixa->ixa_dce_generation = generation; 3753 3754 /* 3755 * Make sure we don't leave an unreachable ixa_nce in place 3756 * since ip_select_route is used when we unplumb i.e., remove 3757 * references on ixa_ire, ixa_nce, and ixa_dce. 3758 */ 3759 nce = ixa->ixa_nce; 3760 if (nce != NULL && nce->nce_is_condemned) { 3761 nce_refrele(nce); 3762 ixa->ixa_nce = NULL; 3763 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY; 3764 } 3765 3766 /* 3767 * The caller has set IXAF_PMTU_DISCOVERY if path MTU is desired. 3768 * However, we can't do it for IPv4 multicast or broadcast. 3769 */ 3770 if (ire->ire_type & (IRE_BROADCAST|IRE_MULTICAST)) 3771 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY; 3772 3773 /* 3774 * Set initial value for fragmentation limit. Either conn_ip_output 3775 * or ULP might updates it when there are routing changes. 3776 * Handles a NULL ixa_ire->ire_ill or a NULL ixa_nce for RTF_REJECT. 3777 */ 3778 pmtu = ip_get_pmtu(ixa); 3779 ixa->ixa_fragsize = pmtu; 3780 /* Make sure ixa_fragsize and ixa_pmtu remain identical */ 3781 if (ixa->ixa_flags & IXAF_VERIFY_PMTU) 3782 ixa->ixa_pmtu = pmtu; 3783 3784 /* 3785 * Extract information useful for some transports. 3786 * First we look for DCE metrics. Then we take what we have in 3787 * the metrics in the route, where the offlink is used if we have 3788 * one. 3789 */ 3790 if (uinfo != NULL) { 3791 bzero(uinfo, sizeof (*uinfo)); 3792 3793 if (dce->dce_flags & DCEF_UINFO) 3794 *uinfo = dce->dce_uinfo; 3795 3796 rts_merge_metrics(uinfo, &ire->ire_metrics); 3797 3798 /* Allow ire_metrics to decrease the path MTU from above */ 3799 if (uinfo->iulp_mtu == 0 || uinfo->iulp_mtu > pmtu) 3800 uinfo->iulp_mtu = pmtu; 3801 3802 uinfo->iulp_localnet = (ire->ire_type & IRE_ONLINK) != 0; 3803 uinfo->iulp_loopback = (ire->ire_type & IRE_LOOPBACK) != 0; 3804 uinfo->iulp_local = (ire->ire_type & IRE_LOCAL) != 0; 3805 } 3806 3807 if (ill != NULL) 3808 ill_refrele(ill); 3809 3810 return (error); 3811 3812 bad_addr: 3813 if (ire != NULL) 3814 ire_refrele(ire); 3815 3816 if (ill != NULL) 3817 ill_refrele(ill); 3818 3819 /* 3820 * Make sure we don't leave an unreachable ixa_nce in place 3821 * since ip_select_route is used when we unplumb i.e., remove 3822 * references on ixa_ire, ixa_nce, and ixa_dce. 3823 */ 3824 nce = ixa->ixa_nce; 3825 if (nce != NULL && nce->nce_is_condemned) { 3826 nce_refrele(nce); 3827 ixa->ixa_nce = NULL; 3828 ixa->ixa_ire_generation = IRE_GENERATION_VERIFY; 3829 } 3830 3831 return (error); 3832 } 3833 3834 3835 /* 3836 * Get the base MTU for the case when path MTU discovery is not used. 3837 * Takes the MTU of the IRE into account. 3838 */ 3839 uint_t 3840 ip_get_base_mtu(ill_t *ill, ire_t *ire) 3841 { 3842 uint_t mtu = ill->ill_mtu; 3843 uint_t iremtu = ire->ire_metrics.iulp_mtu; 3844 3845 if (iremtu != 0 && iremtu < mtu) 3846 mtu = iremtu; 3847 3848 return (mtu); 3849 } 3850 3851 /* 3852 * Get the PMTU for the attributes. Handles both IPv4 and IPv6. 3853 * Assumes that ixa_ire, dce, and nce have already been set up. 3854 * 3855 * The caller has set IXAF_PMTU_DISCOVERY if path MTU discovery is desired. 3856 * We avoid path MTU discovery if it is disabled with ndd. 3857 * Furtermore, if the path MTU is too small, then we don't set DF for IPv4. 3858 * 3859 * NOTE: We also used to turn it off for source routed packets. That 3860 * is no longer required since the dce is per final destination. 3861 */ 3862 uint_t 3863 ip_get_pmtu(ip_xmit_attr_t *ixa) 3864 { 3865 ip_stack_t *ipst = ixa->ixa_ipst; 3866 dce_t *dce; 3867 nce_t *nce; 3868 ire_t *ire; 3869 uint_t pmtu; 3870 3871 ire = ixa->ixa_ire; 3872 dce = ixa->ixa_dce; 3873 nce = ixa->ixa_nce; 3874 3875 /* 3876 * If path MTU discovery has been turned off by ndd, then we ignore 3877 * any dce_pmtu and for IPv4 we will not set DF. 3878 */ 3879 if (!ipst->ips_ip_path_mtu_discovery) 3880 ixa->ixa_flags &= ~IXAF_PMTU_DISCOVERY; 3881 3882 pmtu = IP_MAXPACKET; 3883 /* 3884 * Decide whether whether IPv4 sets DF 3885 * For IPv6 "no DF" means to use the 1280 mtu 3886 */ 3887 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) { 3888 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF; 3889 } else { 3890 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF; 3891 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) 3892 pmtu = IPV6_MIN_MTU; 3893 } 3894 3895 /* Check if the PMTU is to old before we use it */ 3896 if ((dce->dce_flags & DCEF_PMTU) && 3897 TICK_TO_SEC(ddi_get_lbolt64()) - dce->dce_last_change_time > 3898 ipst->ips_ip_pathmtu_interval) { 3899 /* 3900 * Older than 20 minutes. Drop the path MTU information. 3901 */ 3902 mutex_enter(&dce->dce_lock); 3903 dce->dce_flags &= ~(DCEF_PMTU|DCEF_TOO_SMALL_PMTU); 3904 dce->dce_last_change_time = TICK_TO_SEC(ddi_get_lbolt64()); 3905 mutex_exit(&dce->dce_lock); 3906 dce_increment_generation(dce); 3907 } 3908 3909 /* The metrics on the route can lower the path MTU */ 3910 if (ire->ire_metrics.iulp_mtu != 0 && 3911 ire->ire_metrics.iulp_mtu < pmtu) 3912 pmtu = ire->ire_metrics.iulp_mtu; 3913 3914 /* 3915 * If the path MTU is smaller than some minimum, we still use dce_pmtu 3916 * above (would be 576 for IPv4 and 1280 for IPv6), but we clear 3917 * IXAF_PMTU_IPV4_DF so that we avoid setting DF for IPv4. 3918 */ 3919 if (ixa->ixa_flags & IXAF_PMTU_DISCOVERY) { 3920 if (dce->dce_flags & DCEF_PMTU) { 3921 if (dce->dce_pmtu < pmtu) 3922 pmtu = dce->dce_pmtu; 3923 3924 if (dce->dce_flags & DCEF_TOO_SMALL_PMTU) { 3925 ixa->ixa_flags |= IXAF_PMTU_TOO_SMALL; 3926 ixa->ixa_flags &= ~IXAF_PMTU_IPV4_DF; 3927 } else { 3928 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL; 3929 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF; 3930 } 3931 } else { 3932 ixa->ixa_flags &= ~IXAF_PMTU_TOO_SMALL; 3933 ixa->ixa_flags |= IXAF_PMTU_IPV4_DF; 3934 } 3935 } 3936 3937 /* 3938 * If we have an IRE_LOCAL we use the loopback mtu instead of 3939 * the ill for going out the wire i.e., IRE_LOCAL gets the same 3940 * mtu as IRE_LOOPBACK. 3941 */ 3942 if (ire->ire_type & (IRE_LOCAL|IRE_LOOPBACK)) { 3943 uint_t loopback_mtu; 3944 3945 loopback_mtu = (ire->ire_ipversion == IPV6_VERSION) ? 3946 ip_loopback_mtu_v6plus : ip_loopback_mtuplus; 3947 3948 if (loopback_mtu < pmtu) 3949 pmtu = loopback_mtu; 3950 } else if (nce != NULL) { 3951 /* 3952 * Make sure we don't exceed the interface MTU. 3953 * In the case of RTF_REJECT or RTF_BLACKHOLE we might not have 3954 * an ill. We'd use the above IP_MAXPACKET in that case just 3955 * to tell the transport something larger than zero. 3956 */ 3957 if (nce->nce_common->ncec_ill->ill_mtu < pmtu) 3958 pmtu = nce->nce_common->ncec_ill->ill_mtu; 3959 if (nce->nce_common->ncec_ill != nce->nce_ill && 3960 nce->nce_ill->ill_mtu < pmtu) { 3961 /* 3962 * for interfaces in an IPMP group, the mtu of 3963 * the nce_ill (under_ill) could be different 3964 * from the mtu of the ncec_ill, so we take the 3965 * min of the two. 3966 */ 3967 pmtu = nce->nce_ill->ill_mtu; 3968 } 3969 } 3970 3971 /* 3972 * Handle the IPV6_USE_MIN_MTU socket option or ancillary data. 3973 * Only applies to IPv6. 3974 */ 3975 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) { 3976 if (ixa->ixa_flags & IXAF_USE_MIN_MTU) { 3977 switch (ixa->ixa_use_min_mtu) { 3978 case IPV6_USE_MIN_MTU_MULTICAST: 3979 if (ire->ire_type & IRE_MULTICAST) 3980 pmtu = IPV6_MIN_MTU; 3981 break; 3982 case IPV6_USE_MIN_MTU_ALWAYS: 3983 pmtu = IPV6_MIN_MTU; 3984 break; 3985 case IPV6_USE_MIN_MTU_NEVER: 3986 break; 3987 } 3988 } else { 3989 /* Default is IPV6_USE_MIN_MTU_MULTICAST */ 3990 if (ire->ire_type & IRE_MULTICAST) 3991 pmtu = IPV6_MIN_MTU; 3992 } 3993 } 3994 3995 /* 3996 * After receiving an ICMPv6 "packet too big" message with a 3997 * MTU < 1280, and for multirouted IPv6 packets, the IP layer 3998 * will insert a 8-byte fragment header in every packet. We compensate 3999 * for those cases by returning a smaller path MTU to the ULP. 4000 * 4001 * In the case of CGTP then ip_output will add a fragment header. 4002 * Make sure there is room for it by telling a smaller number 4003 * to the transport. 4004 * 4005 * When IXAF_IPV6_ADDR_FRAGHDR we subtract the frag hdr here 4006 * so the ULPs consistently see a iulp_pmtu and ip_get_pmtu() 4007 * which is the size of the packets it can send. 4008 */ 4009 if (!(ixa->ixa_flags & IXAF_IS_IPV4)) { 4010 if ((dce->dce_flags & DCEF_TOO_SMALL_PMTU) || 4011 (ire->ire_flags & RTF_MULTIRT) || 4012 (ixa->ixa_flags & IXAF_MULTIRT_MULTICAST)) { 4013 pmtu -= sizeof (ip6_frag_t); 4014 ixa->ixa_flags |= IXAF_IPV6_ADD_FRAGHDR; 4015 } 4016 } 4017 4018 return (pmtu); 4019 } 4020 4021 /* 4022 * Carve "len" bytes out of an mblk chain, consuming any we empty, and duping 4023 * the final piece where we don't. Return a pointer to the first mblk in the 4024 * result, and update the pointer to the next mblk to chew on. If anything 4025 * goes wrong (i.e., dupb fails), we waste everything in sight and return a 4026 * NULL pointer. 4027 */ 4028 mblk_t * 4029 ip_carve_mp(mblk_t **mpp, ssize_t len) 4030 { 4031 mblk_t *mp0; 4032 mblk_t *mp1; 4033 mblk_t *mp2; 4034 4035 if (!len || !mpp || !(mp0 = *mpp)) 4036 return (NULL); 4037 /* If we aren't going to consume the first mblk, we need a dup. */ 4038 if (mp0->b_wptr - mp0->b_rptr > len) { 4039 mp1 = dupb(mp0); 4040 if (mp1) { 4041 /* Partition the data between the two mblks. */ 4042 mp1->b_wptr = mp1->b_rptr + len; 4043 mp0->b_rptr = mp1->b_wptr; 4044 /* 4045 * after adjustments if mblk not consumed is now 4046 * unaligned, try to align it. If this fails free 4047 * all messages and let upper layer recover. 4048 */ 4049 if (!OK_32PTR(mp0->b_rptr)) { 4050 if (!pullupmsg(mp0, -1)) { 4051 freemsg(mp0); 4052 freemsg(mp1); 4053 *mpp = NULL; 4054 return (NULL); 4055 } 4056 } 4057 } 4058 return (mp1); 4059 } 4060 /* Eat through as many mblks as we need to get len bytes. */ 4061 len -= mp0->b_wptr - mp0->b_rptr; 4062 for (mp2 = mp1 = mp0; (mp2 = mp2->b_cont) != 0 && len; mp1 = mp2) { 4063 if (mp2->b_wptr - mp2->b_rptr > len) { 4064 /* 4065 * We won't consume the entire last mblk. Like 4066 * above, dup and partition it. 4067 */ 4068 mp1->b_cont = dupb(mp2); 4069 mp1 = mp1->b_cont; 4070 if (!mp1) { 4071 /* 4072 * Trouble. Rather than go to a lot of 4073 * trouble to clean up, we free the messages. 4074 * This won't be any worse than losing it on 4075 * the wire. 4076 */ 4077 freemsg(mp0); 4078 freemsg(mp2); 4079 *mpp = NULL; 4080 return (NULL); 4081 } 4082 mp1->b_wptr = mp1->b_rptr + len; 4083 mp2->b_rptr = mp1->b_wptr; 4084 /* 4085 * after adjustments if mblk not consumed is now 4086 * unaligned, try to align it. If this fails free 4087 * all messages and let upper layer recover. 4088 */ 4089 if (!OK_32PTR(mp2->b_rptr)) { 4090 if (!pullupmsg(mp2, -1)) { 4091 freemsg(mp0); 4092 freemsg(mp2); 4093 *mpp = NULL; 4094 return (NULL); 4095 } 4096 } 4097 *mpp = mp2; 4098 return (mp0); 4099 } 4100 /* Decrement len by the amount we just got. */ 4101 len -= mp2->b_wptr - mp2->b_rptr; 4102 } 4103 /* 4104 * len should be reduced to zero now. If not our caller has 4105 * screwed up. 4106 */ 4107 if (len) { 4108 /* Shouldn't happen! */ 4109 freemsg(mp0); 4110 *mpp = NULL; 4111 return (NULL); 4112 } 4113 /* 4114 * We consumed up to exactly the end of an mblk. Detach the part 4115 * we are returning from the rest of the chain. 4116 */ 4117 mp1->b_cont = NULL; 4118 *mpp = mp2; 4119 return (mp0); 4120 } 4121 4122 /* The ill stream is being unplumbed. Called from ip_close */ 4123 int 4124 ip_modclose(ill_t *ill) 4125 { 4126 boolean_t success; 4127 ipsq_t *ipsq; 4128 ipif_t *ipif; 4129 queue_t *q = ill->ill_rq; 4130 ip_stack_t *ipst = ill->ill_ipst; 4131 int i; 4132 arl_ill_common_t *ai = ill->ill_common; 4133 4134 /* 4135 * The punlink prior to this may have initiated a capability 4136 * negotiation. But ipsq_enter will block until that finishes or 4137 * times out. 4138 */ 4139 success = ipsq_enter(ill, B_FALSE, NEW_OP); 4140 4141 /* 4142 * Open/close/push/pop is guaranteed to be single threaded 4143 * per stream by STREAMS. FS guarantees that all references 4144 * from top are gone before close is called. So there can't 4145 * be another close thread that has set CONDEMNED on this ill. 4146 * and cause ipsq_enter to return failure. 4147 */ 4148 ASSERT(success); 4149 ipsq = ill->ill_phyint->phyint_ipsq; 4150 4151 /* 4152 * Mark it condemned. No new reference will be made to this ill. 4153 * Lookup functions will return an error. Threads that try to 4154 * increment the refcnt must check for ILL_CAN_LOOKUP. This ensures 4155 * that the refcnt will drop down to zero. 4156 */ 4157 mutex_enter(&ill->ill_lock); 4158 ill->ill_state_flags |= ILL_CONDEMNED; 4159 for (ipif = ill->ill_ipif; ipif != NULL; 4160 ipif = ipif->ipif_next) { 4161 ipif->ipif_state_flags |= IPIF_CONDEMNED; 4162 } 4163 /* 4164 * Wake up anybody waiting to enter the ipsq. ipsq_enter 4165 * returns error if ILL_CONDEMNED is set 4166 */ 4167 cv_broadcast(&ill->ill_cv); 4168 mutex_exit(&ill->ill_lock); 4169 4170 /* 4171 * Send all the deferred DLPI messages downstream which came in 4172 * during the small window right before ipsq_enter(). We do this 4173 * without waiting for the ACKs because all the ACKs for M_PROTO 4174 * messages are ignored in ip_rput() when ILL_CONDEMNED is set. 4175 */ 4176 ill_dlpi_send_deferred(ill); 4177 4178 /* 4179 * Shut down fragmentation reassembly. 4180 * ill_frag_timer won't start a timer again. 4181 * Now cancel any existing timer 4182 */ 4183 (void) untimeout(ill->ill_frag_timer_id); 4184 (void) ill_frag_timeout(ill, 0); 4185 4186 /* 4187 * Call ill_delete to bring down the ipifs, ilms and ill on 4188 * this ill. Then wait for the refcnts to drop to zero. 4189 * ill_is_freeable checks whether the ill is really quiescent. 4190 * Then make sure that threads that are waiting to enter the 4191 * ipsq have seen the error returned by ipsq_enter and have 4192 * gone away. Then we call ill_delete_tail which does the 4193 * DL_UNBIND_REQ with the driver and then qprocsoff. 4194 */ 4195 ill_delete(ill); 4196 mutex_enter(&ill->ill_lock); 4197 while (!ill_is_freeable(ill)) 4198 cv_wait(&ill->ill_cv, &ill->ill_lock); 4199 4200 while (ill->ill_waiters) 4201 cv_wait(&ill->ill_cv, &ill->ill_lock); 4202 4203 mutex_exit(&ill->ill_lock); 4204 4205 /* 4206 * ill_delete_tail drops reference on ill_ipst, but we need to keep 4207 * it held until the end of the function since the cleanup 4208 * below needs to be able to use the ip_stack_t. 4209 */ 4210 netstack_hold(ipst->ips_netstack); 4211 4212 /* qprocsoff is done via ill_delete_tail */ 4213 ill_delete_tail(ill); 4214 /* 4215 * synchronously wait for arp stream to unbind. After this, we 4216 * cannot get any data packets up from the driver. 4217 */ 4218 arp_unbind_complete(ill); 4219 ASSERT(ill->ill_ipst == NULL); 4220 4221 /* 4222 * Walk through all conns and qenable those that have queued data. 4223 * Close synchronization needs this to 4224 * be done to ensure that all upper layers blocked 4225 * due to flow control to the closing device 4226 * get unblocked. 4227 */ 4228 ip1dbg(("ip_wsrv: walking\n")); 4229 for (i = 0; i < TX_FANOUT_SIZE; i++) { 4230 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[i]); 4231 } 4232 4233 /* 4234 * ai can be null if this is an IPv6 ill, or if the IPv4 4235 * stream is being torn down before ARP was plumbed (e.g., 4236 * /sbin/ifconfig plumbing a stream twice, and encountering 4237 * an error 4238 */ 4239 if (ai != NULL) { 4240 ASSERT(!ill->ill_isv6); 4241 mutex_enter(&ai->ai_lock); 4242 ai->ai_ill = NULL; 4243 if (ai->ai_arl == NULL) { 4244 mutex_destroy(&ai->ai_lock); 4245 kmem_free(ai, sizeof (*ai)); 4246 } else { 4247 cv_signal(&ai->ai_ill_unplumb_done); 4248 mutex_exit(&ai->ai_lock); 4249 } 4250 } 4251 4252 mutex_enter(&ipst->ips_ip_mi_lock); 4253 mi_close_unlink(&ipst->ips_ip_g_head, (IDP)ill); 4254 mutex_exit(&ipst->ips_ip_mi_lock); 4255 4256 /* 4257 * credp could be null if the open didn't succeed and ip_modopen 4258 * itself calls ip_close. 4259 */ 4260 if (ill->ill_credp != NULL) 4261 crfree(ill->ill_credp); 4262 4263 mutex_destroy(&ill->ill_saved_ire_lock); 4264 mutex_destroy(&ill->ill_lock); 4265 rw_destroy(&ill->ill_mcast_lock); 4266 mutex_destroy(&ill->ill_mcast_serializer); 4267 list_destroy(&ill->ill_nce); 4268 4269 /* 4270 * Now we are done with the module close pieces that 4271 * need the netstack_t. 4272 */ 4273 netstack_rele(ipst->ips_netstack); 4274 4275 mi_close_free((IDP)ill); 4276 q->q_ptr = WR(q)->q_ptr = NULL; 4277 4278 ipsq_exit(ipsq); 4279 4280 return (0); 4281 } 4282 4283 /* 4284 * This is called as part of close() for IP, UDP, ICMP, and RTS 4285 * in order to quiesce the conn. 4286 */ 4287 void 4288 ip_quiesce_conn(conn_t *connp) 4289 { 4290 boolean_t drain_cleanup_reqd = B_FALSE; 4291 boolean_t conn_ioctl_cleanup_reqd = B_FALSE; 4292 boolean_t ilg_cleanup_reqd = B_FALSE; 4293 ip_stack_t *ipst; 4294 4295 ASSERT(!IPCL_IS_TCP(connp)); 4296 ipst = connp->conn_netstack->netstack_ip; 4297 4298 /* 4299 * Mark the conn as closing, and this conn must not be 4300 * inserted in future into any list. Eg. conn_drain_insert(), 4301 * won't insert this conn into the conn_drain_list. 4302 * 4303 * conn_idl, and conn_ilg cannot get set henceforth. 4304 */ 4305 mutex_enter(&connp->conn_lock); 4306 ASSERT(!(connp->conn_state_flags & CONN_QUIESCED)); 4307 connp->conn_state_flags |= CONN_CLOSING; 4308 if (connp->conn_idl != NULL) 4309 drain_cleanup_reqd = B_TRUE; 4310 if (connp->conn_oper_pending_ill != NULL) 4311 conn_ioctl_cleanup_reqd = B_TRUE; 4312 if (connp->conn_dhcpinit_ill != NULL) { 4313 ASSERT(connp->conn_dhcpinit_ill->ill_dhcpinit != 0); 4314 atomic_dec_32(&connp->conn_dhcpinit_ill->ill_dhcpinit); 4315 ill_set_inputfn(connp->conn_dhcpinit_ill); 4316 connp->conn_dhcpinit_ill = NULL; 4317 } 4318 if (connp->conn_ilg != NULL) 4319 ilg_cleanup_reqd = B_TRUE; 4320 mutex_exit(&connp->conn_lock); 4321 4322 if (conn_ioctl_cleanup_reqd) 4323 conn_ioctl_cleanup(connp); 4324 4325 if (is_system_labeled() && connp->conn_anon_port) { 4326 (void) tsol_mlp_anon(crgetzone(connp->conn_cred), 4327 connp->conn_mlp_type, connp->conn_proto, 4328 ntohs(connp->conn_lport), B_FALSE); 4329 connp->conn_anon_port = 0; 4330 } 4331 connp->conn_mlp_type = mlptSingle; 4332 4333 /* 4334 * Remove this conn from any fanout list it is on. 4335 * and then wait for any threads currently operating 4336 * on this endpoint to finish 4337 */ 4338 ipcl_hash_remove(connp); 4339 4340 /* 4341 * Remove this conn from the drain list, and do 4342 * any other cleanup that may be required. 4343 * (Only non-tcp conns may have a non-null conn_idl. 4344 * TCP conns are never flow controlled, and 4345 * conn_idl will be null) 4346 */ 4347 if (drain_cleanup_reqd && connp->conn_idl != NULL) { 4348 mutex_enter(&connp->conn_idl->idl_lock); 4349 conn_drain_tail(connp, B_TRUE); 4350 mutex_exit(&connp->conn_idl->idl_lock); 4351 } 4352 4353 if (connp == ipst->ips_ip_g_mrouter) 4354 (void) ip_mrouter_done(ipst); 4355 4356 if (ilg_cleanup_reqd) 4357 ilg_delete_all(connp); 4358 4359 /* 4360 * Now conn refcnt can increase only thru CONN_INC_REF_LOCKED. 4361 * callers from write side can't be there now because close 4362 * is in progress. The only other caller is ipcl_walk 4363 * which checks for the condemned flag. 4364 */ 4365 mutex_enter(&connp->conn_lock); 4366 connp->conn_state_flags |= CONN_CONDEMNED; 4367 while (connp->conn_ref != 1) 4368 cv_wait(&connp->conn_cv, &connp->conn_lock); 4369 connp->conn_state_flags |= CONN_QUIESCED; 4370 mutex_exit(&connp->conn_lock); 4371 } 4372 4373 /* ARGSUSED */ 4374 int 4375 ip_close(queue_t *q, int flags) 4376 { 4377 conn_t *connp; 4378 4379 /* 4380 * Call the appropriate delete routine depending on whether this is 4381 * a module or device. 4382 */ 4383 if (WR(q)->q_next != NULL) { 4384 /* This is a module close */ 4385 return (ip_modclose((ill_t *)q->q_ptr)); 4386 } 4387 4388 connp = q->q_ptr; 4389 ip_quiesce_conn(connp); 4390 4391 qprocsoff(q); 4392 4393 /* 4394 * Now we are truly single threaded on this stream, and can 4395 * delete the things hanging off the connp, and finally the connp. 4396 * We removed this connp from the fanout list, it cannot be 4397 * accessed thru the fanouts, and we already waited for the 4398 * conn_ref to drop to 0. We are already in close, so 4399 * there cannot be any other thread from the top. qprocsoff 4400 * has completed, and service has completed or won't run in 4401 * future. 4402 */ 4403 ASSERT(connp->conn_ref == 1); 4404 4405 inet_minor_free(connp->conn_minor_arena, connp->conn_dev); 4406 4407 connp->conn_ref--; 4408 ipcl_conn_destroy(connp); 4409 4410 q->q_ptr = WR(q)->q_ptr = NULL; 4411 return (0); 4412 } 4413 4414 /* 4415 * Wapper around putnext() so that ip_rts_request can merely use 4416 * conn_recv. 4417 */ 4418 /*ARGSUSED2*/ 4419 static void 4420 ip_conn_input(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 4421 { 4422 conn_t *connp = (conn_t *)arg1; 4423 4424 putnext(connp->conn_rq, mp); 4425 } 4426 4427 /* Dummy in case ICMP error delivery is attempted to a /dev/ip instance */ 4428 /* ARGSUSED */ 4429 static void 4430 ip_conn_input_icmp(void *arg1, mblk_t *mp, void *arg2, ip_recv_attr_t *ira) 4431 { 4432 freemsg(mp); 4433 } 4434 4435 /* 4436 * Called when the module is about to be unloaded 4437 */ 4438 void 4439 ip_ddi_destroy(void) 4440 { 4441 tnet_fini(); 4442 4443 icmp_ddi_g_destroy(); 4444 rts_ddi_g_destroy(); 4445 udp_ddi_g_destroy(); 4446 sctp_ddi_g_destroy(); 4447 tcp_ddi_g_destroy(); 4448 ilb_ddi_g_destroy(); 4449 dce_g_destroy(); 4450 ipsec_policy_g_destroy(); 4451 ipcl_g_destroy(); 4452 ip_net_g_destroy(); 4453 ip_ire_g_fini(); 4454 inet_minor_destroy(ip_minor_arena_sa); 4455 #if defined(_LP64) 4456 inet_minor_destroy(ip_minor_arena_la); 4457 #endif 4458 4459 #ifdef DEBUG 4460 list_destroy(&ip_thread_list); 4461 rw_destroy(&ip_thread_rwlock); 4462 tsd_destroy(&ip_thread_data); 4463 #endif 4464 4465 netstack_unregister(NS_IP); 4466 } 4467 4468 /* 4469 * First step in cleanup. 4470 */ 4471 /* ARGSUSED */ 4472 static void 4473 ip_stack_shutdown(netstackid_t stackid, void *arg) 4474 { 4475 ip_stack_t *ipst = (ip_stack_t *)arg; 4476 4477 #ifdef NS_DEBUG 4478 printf("ip_stack_shutdown(%p, stack %d)\n", (void *)ipst, stackid); 4479 #endif 4480 4481 /* 4482 * Perform cleanup for special interfaces (loopback and IPMP). 4483 */ 4484 ip_interface_cleanup(ipst); 4485 4486 /* 4487 * The *_hook_shutdown()s start the process of notifying any 4488 * consumers that things are going away.... nothing is destroyed. 4489 */ 4490 ipv4_hook_shutdown(ipst); 4491 ipv6_hook_shutdown(ipst); 4492 arp_hook_shutdown(ipst); 4493 4494 mutex_enter(&ipst->ips_capab_taskq_lock); 4495 ipst->ips_capab_taskq_quit = B_TRUE; 4496 cv_signal(&ipst->ips_capab_taskq_cv); 4497 mutex_exit(&ipst->ips_capab_taskq_lock); 4498 } 4499 4500 /* 4501 * Free the IP stack instance. 4502 */ 4503 static void 4504 ip_stack_fini(netstackid_t stackid, void *arg) 4505 { 4506 ip_stack_t *ipst = (ip_stack_t *)arg; 4507 int ret; 4508 4509 #ifdef NS_DEBUG 4510 printf("ip_stack_fini(%p, stack %d)\n", (void *)ipst, stackid); 4511 #endif 4512 /* 4513 * At this point, all of the notifications that the events and 4514 * protocols are going away have been run, meaning that we can 4515 * now set about starting to clean things up. 4516 */ 4517 ipobs_fini(ipst); 4518 ipv4_hook_destroy(ipst); 4519 ipv6_hook_destroy(ipst); 4520 arp_hook_destroy(ipst); 4521 ip_net_destroy(ipst); 4522 4523 mutex_destroy(&ipst->ips_capab_taskq_lock); 4524 cv_destroy(&ipst->ips_capab_taskq_cv); 4525 4526 ipmp_destroy(ipst); 4527 rw_destroy(&ipst->ips_srcid_lock); 4528 4529 ip_kstat_fini(stackid, ipst->ips_ip_mibkp); 4530 ipst->ips_ip_mibkp = NULL; 4531 icmp_kstat_fini(stackid, ipst->ips_icmp_mibkp); 4532 ipst->ips_icmp_mibkp = NULL; 4533 ip_kstat2_fini(stackid, ipst->ips_ip_kstat); 4534 ipst->ips_ip_kstat = NULL; 4535 bzero(&ipst->ips_ip_statistics, sizeof (ipst->ips_ip_statistics)); 4536 ip6_kstat_fini(stackid, ipst->ips_ip6_kstat); 4537 ipst->ips_ip6_kstat = NULL; 4538 bzero(&ipst->ips_ip6_statistics, sizeof (ipst->ips_ip6_statistics)); 4539 4540 nd_free(&ipst->ips_ip_g_nd); 4541 kmem_free(ipst->ips_param_arr, sizeof (lcl_param_arr)); 4542 ipst->ips_param_arr = NULL; 4543 kmem_free(ipst->ips_ndp_arr, sizeof (lcl_ndp_arr)); 4544 ipst->ips_ndp_arr = NULL; 4545 4546 dce_stack_destroy(ipst); 4547 ip_mrouter_stack_destroy(ipst); 4548 4549 mutex_destroy(&ipst->ips_ip_mi_lock); 4550 rw_destroy(&ipst->ips_ill_g_usesrc_lock); 4551 rw_destroy(&ipst->ips_ip_g_nd_lock); 4552 4553 ret = untimeout(ipst->ips_igmp_timeout_id); 4554 if (ret == -1) { 4555 ASSERT(ipst->ips_igmp_timeout_id == 0); 4556 } else { 4557 ASSERT(ipst->ips_igmp_timeout_id != 0); 4558 ipst->ips_igmp_timeout_id = 0; 4559 } 4560 ret = untimeout(ipst->ips_igmp_slowtimeout_id); 4561 if (ret == -1) { 4562 ASSERT(ipst->ips_igmp_slowtimeout_id == 0); 4563 } else { 4564 ASSERT(ipst->ips_igmp_slowtimeout_id != 0); 4565 ipst->ips_igmp_slowtimeout_id = 0; 4566 } 4567 ret = untimeout(ipst->ips_mld_timeout_id); 4568 if (ret == -1) { 4569 ASSERT(ipst->ips_mld_timeout_id == 0); 4570 } else { 4571 ASSERT(ipst->ips_mld_timeout_id != 0); 4572 ipst->ips_mld_timeout_id = 0; 4573 } 4574 ret = untimeout(ipst->ips_mld_slowtimeout_id); 4575 if (ret == -1) { 4576 ASSERT(ipst->ips_mld_slowtimeout_id == 0); 4577 } else { 4578 ASSERT(ipst->ips_mld_slowtimeout_id != 0); 4579 ipst->ips_mld_slowtimeout_id = 0; 4580 } 4581 4582 mutex_destroy(&ipst->ips_igmp_timer_lock); 4583 mutex_destroy(&ipst->ips_mld_timer_lock); 4584 mutex_destroy(&ipst->ips_igmp_slowtimeout_lock); 4585 mutex_destroy(&ipst->ips_mld_slowtimeout_lock); 4586 mutex_destroy(&ipst->ips_ip_addr_avail_lock); 4587 rw_destroy(&ipst->ips_ill_g_lock); 4588 4589 ip_ire_fini(ipst); 4590 ip6_asp_free(ipst); 4591 conn_drain_fini(ipst); 4592 ipcl_destroy(ipst); 4593 4594 mutex_destroy(&ipst->ips_ndp4->ndp_g_lock); 4595 mutex_destroy(&ipst->ips_ndp6->ndp_g_lock); 4596 kmem_free(ipst->ips_ndp4, sizeof (ndp_g_t)); 4597 ipst->ips_ndp4 = NULL; 4598 kmem_free(ipst->ips_ndp6, sizeof (ndp_g_t)); 4599 ipst->ips_ndp6 = NULL; 4600 4601 if (ipst->ips_loopback_ksp != NULL) { 4602 kstat_delete_netstack(ipst->ips_loopback_ksp, stackid); 4603 ipst->ips_loopback_ksp = NULL; 4604 } 4605 4606 kmem_free(ipst->ips_phyint_g_list, sizeof (phyint_list_t)); 4607 ipst->ips_phyint_g_list = NULL; 4608 kmem_free(ipst->ips_ill_g_heads, sizeof (ill_g_head_t) * MAX_G_HEADS); 4609 ipst->ips_ill_g_heads = NULL; 4610 4611 ldi_ident_release(ipst->ips_ldi_ident); 4612 kmem_free(ipst, sizeof (*ipst)); 4613 } 4614 4615 /* 4616 * This function is called from the TSD destructor, and is used to debug 4617 * reference count issues in IP. See block comment in <inet/ip_if.h> for 4618 * details. 4619 */ 4620 static void 4621 ip_thread_exit(void *phash) 4622 { 4623 th_hash_t *thh = phash; 4624 4625 rw_enter(&ip_thread_rwlock, RW_WRITER); 4626 list_remove(&ip_thread_list, thh); 4627 rw_exit(&ip_thread_rwlock); 4628 mod_hash_destroy_hash(thh->thh_hash); 4629 kmem_free(thh, sizeof (*thh)); 4630 } 4631 4632 /* 4633 * Called when the IP kernel module is loaded into the kernel 4634 */ 4635 void 4636 ip_ddi_init(void) 4637 { 4638 ip_squeue_flag = ip_squeue_switch(ip_squeue_enter); 4639 4640 /* 4641 * For IP and TCP the minor numbers should start from 2 since we have 4 4642 * initial devices: ip, ip6, tcp, tcp6. 4643 */ 4644 /* 4645 * If this is a 64-bit kernel, then create two separate arenas - 4646 * one for TLIs in the range of INET_MIN_DEV+2 through 2^^18-1, and the 4647 * other for socket apps in the range 2^^18 through 2^^32-1. 4648 */ 4649 ip_minor_arena_la = NULL; 4650 ip_minor_arena_sa = NULL; 4651 #if defined(_LP64) 4652 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa", 4653 INET_MIN_DEV + 2, MAXMIN32, KM_SLEEP)) == NULL) { 4654 cmn_err(CE_PANIC, 4655 "ip_ddi_init: ip_minor_arena_sa creation failed\n"); 4656 } 4657 if ((ip_minor_arena_la = inet_minor_create("ip_minor_arena_la", 4658 MAXMIN32 + 1, MAXMIN64, KM_SLEEP)) == NULL) { 4659 cmn_err(CE_PANIC, 4660 "ip_ddi_init: ip_minor_arena_la creation failed\n"); 4661 } 4662 #else 4663 if ((ip_minor_arena_sa = inet_minor_create("ip_minor_arena_sa", 4664 INET_MIN_DEV + 2, MAXMIN, KM_SLEEP)) == NULL) { 4665 cmn_err(CE_PANIC, 4666 "ip_ddi_init: ip_minor_arena_sa creation failed\n"); 4667 } 4668 #endif 4669 ip_poll_normal_ticks = MSEC_TO_TICK_ROUNDUP(ip_poll_normal_ms); 4670 4671 ipcl_g_init(); 4672 ip_ire_g_init(); 4673 ip_net_g_init(); 4674 4675 #ifdef DEBUG 4676 tsd_create(&ip_thread_data, ip_thread_exit); 4677 rw_init(&ip_thread_rwlock, NULL, RW_DEFAULT, NULL); 4678 list_create(&ip_thread_list, sizeof (th_hash_t), 4679 offsetof(th_hash_t, thh_link)); 4680 #endif 4681 ipsec_policy_g_init(); 4682 tcp_ddi_g_init(); 4683 sctp_ddi_g_init(); 4684 dce_g_init(); 4685 4686 /* 4687 * We want to be informed each time a stack is created or 4688 * destroyed in the kernel, so we can maintain the 4689 * set of udp_stack_t's. 4690 */ 4691 netstack_register(NS_IP, ip_stack_init, ip_stack_shutdown, 4692 ip_stack_fini); 4693 4694 tnet_init(); 4695 4696 udp_ddi_g_init(); 4697 rts_ddi_g_init(); 4698 icmp_ddi_g_init(); 4699 ilb_ddi_g_init(); 4700 } 4701 4702 /* 4703 * Initialize the IP stack instance. 4704 */ 4705 static void * 4706 ip_stack_init(netstackid_t stackid, netstack_t *ns) 4707 { 4708 ip_stack_t *ipst; 4709 ipparam_t *pa; 4710 ipndp_t *na; 4711 major_t major; 4712 4713 #ifdef NS_DEBUG 4714 printf("ip_stack_init(stack %d)\n", stackid); 4715 #endif 4716 4717 ipst = (ip_stack_t *)kmem_zalloc(sizeof (*ipst), KM_SLEEP); 4718 ipst->ips_netstack = ns; 4719 4720 ipst->ips_ill_g_heads = kmem_zalloc(sizeof (ill_g_head_t) * MAX_G_HEADS, 4721 KM_SLEEP); 4722 ipst->ips_phyint_g_list = kmem_zalloc(sizeof (phyint_list_t), 4723 KM_SLEEP); 4724 ipst->ips_ndp4 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP); 4725 ipst->ips_ndp6 = kmem_zalloc(sizeof (ndp_g_t), KM_SLEEP); 4726 mutex_init(&ipst->ips_ndp4->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL); 4727 mutex_init(&ipst->ips_ndp6->ndp_g_lock, NULL, MUTEX_DEFAULT, NULL); 4728 4729 rw_init(&ipst->ips_ip_g_nd_lock, NULL, RW_DEFAULT, NULL); 4730 mutex_init(&ipst->ips_igmp_timer_lock, NULL, MUTEX_DEFAULT, NULL); 4731 ipst->ips_igmp_deferred_next = INFINITY; 4732 mutex_init(&ipst->ips_mld_timer_lock, NULL, MUTEX_DEFAULT, NULL); 4733 ipst->ips_mld_deferred_next = INFINITY; 4734 mutex_init(&ipst->ips_igmp_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL); 4735 mutex_init(&ipst->ips_mld_slowtimeout_lock, NULL, MUTEX_DEFAULT, NULL); 4736 mutex_init(&ipst->ips_ip_mi_lock, NULL, MUTEX_DEFAULT, NULL); 4737 mutex_init(&ipst->ips_ip_addr_avail_lock, NULL, MUTEX_DEFAULT, NULL); 4738 rw_init(&ipst->ips_ill_g_lock, NULL, RW_DEFAULT, NULL); 4739 rw_init(&ipst->ips_ill_g_usesrc_lock, NULL, RW_DEFAULT, NULL); 4740 4741 ipcl_init(ipst); 4742 ip_ire_init(ipst); 4743 ip6_asp_init(ipst); 4744 ipif_init(ipst); 4745 conn_drain_init(ipst); 4746 ip_mrouter_stack_init(ipst); 4747 dce_stack_init(ipst); 4748 4749 ipst->ips_ip_g_frag_timeout = IP_FRAG_TIMEOUT; 4750 ipst->ips_ip_g_frag_timo_ms = IP_FRAG_TIMEOUT * 1000; 4751 ipst->ips_ipv6_frag_timeout = IPV6_FRAG_TIMEOUT; 4752 ipst->ips_ipv6_frag_timo_ms = IPV6_FRAG_TIMEOUT * 1000; 4753 4754 ipst->ips_ip_multirt_log_interval = 1000; 4755 4756 ipst->ips_ip_g_forward = IP_FORWARD_DEFAULT; 4757 ipst->ips_ipv6_forward = IP_FORWARD_DEFAULT; 4758 ipst->ips_ill_index = 1; 4759 4760 ipst->ips_saved_ip_g_forward = -1; 4761 ipst->ips_reg_vif_num = ALL_VIFS; /* Index to Register vif */ 4762 4763 pa = (ipparam_t *)kmem_alloc(sizeof (lcl_param_arr), KM_SLEEP); 4764 ipst->ips_param_arr = pa; 4765 bcopy(lcl_param_arr, ipst->ips_param_arr, sizeof (lcl_param_arr)); 4766 4767 na = (ipndp_t *)kmem_alloc(sizeof (lcl_ndp_arr), KM_SLEEP); 4768 ipst->ips_ndp_arr = na; 4769 bcopy(lcl_ndp_arr, ipst->ips_ndp_arr, sizeof (lcl_ndp_arr)); 4770 ipst->ips_ndp_arr[IPNDP_IP_FORWARDING_OFFSET].ip_ndp_data = 4771 (caddr_t)&ipst->ips_ip_g_forward; 4772 ipst->ips_ndp_arr[IPNDP_IP6_FORWARDING_OFFSET].ip_ndp_data = 4773 (caddr_t)&ipst->ips_ipv6_forward; 4774 ASSERT(strcmp(ipst->ips_ndp_arr[IPNDP_CGTP_FILTER_OFFSET].ip_ndp_name, 4775 "ip_cgtp_filter") == 0); 4776 ipst->ips_ndp_arr[IPNDP_CGTP_FILTER_OFFSET].ip_ndp_data = 4777 (caddr_t)&ipst->ips_ip_cgtp_filter; 4778 4779 (void) ip_param_register(&ipst->ips_ip_g_nd, 4780 ipst->ips_param_arr, A_CNT(lcl_param_arr), 4781 ipst->ips_ndp_arr, A_CNT(lcl_ndp_arr)); 4782 4783 ipst->ips_ip_mibkp = ip_kstat_init(stackid, ipst); 4784 ipst->ips_icmp_mibkp = icmp_kstat_init(stackid); 4785 ipst->ips_ip_kstat = ip_kstat2_init(stackid, &ipst->ips_ip_statistics); 4786 ipst->ips_ip6_kstat = 4787 ip6_kstat_init(stackid, &ipst->ips_ip6_statistics); 4788 4789 ipst->ips_ip_src_id = 1; 4790 rw_init(&ipst->ips_srcid_lock, NULL, RW_DEFAULT, NULL); 4791 4792 ipst->ips_src_generation = SRC_GENERATION_INITIAL; 4793 4794 ip_net_init(ipst, ns); 4795 ipv4_hook_init(ipst); 4796 ipv6_hook_init(ipst); 4797 arp_hook_init(ipst); 4798 ipmp_init(ipst); 4799 ipobs_init(ipst); 4800 4801 /* 4802 * Create the taskq dispatcher thread and initialize related stuff. 4803 */ 4804 ipst->ips_capab_taskq_thread = thread_create(NULL, 0, 4805 ill_taskq_dispatch, ipst, 0, &p0, TS_RUN, minclsyspri); 4806 mutex_init(&ipst->ips_capab_taskq_lock, NULL, MUTEX_DEFAULT, NULL); 4807 cv_init(&ipst->ips_capab_taskq_cv, NULL, CV_DEFAULT, NULL); 4808 4809 major = mod_name_to_major(INET_NAME); 4810 (void) ldi_ident_from_major(major, &ipst->ips_ldi_ident); 4811 return (ipst); 4812 } 4813 4814 /* 4815 * Allocate and initialize a DLPI template of the specified length. (May be 4816 * called as writer.) 4817 */ 4818 mblk_t * 4819 ip_dlpi_alloc(size_t len, t_uscalar_t prim) 4820 { 4821 mblk_t *mp; 4822 4823 mp = allocb(len, BPRI_MED); 4824 if (!mp) 4825 return (NULL); 4826 4827 /* 4828 * DLPIv2 says that DL_INFO_REQ and DL_TOKEN_REQ (the latter 4829 * of which we don't seem to use) are sent with M_PCPROTO, and 4830 * that other DLPI are M_PROTO. 4831 */ 4832 if (prim == DL_INFO_REQ) { 4833 mp->b_datap->db_type = M_PCPROTO; 4834 } else { 4835 mp->b_datap->db_type = M_PROTO; 4836 } 4837 4838 mp->b_wptr = mp->b_rptr + len; 4839 bzero(mp->b_rptr, len); 4840 ((dl_unitdata_req_t *)mp->b_rptr)->dl_primitive = prim; 4841 return (mp); 4842 } 4843 4844 /* 4845 * Allocate and initialize a DLPI notification. (May be called as writer.) 4846 */ 4847 mblk_t * 4848 ip_dlnotify_alloc(uint_t notification, uint_t data) 4849 { 4850 dl_notify_ind_t *notifyp; 4851 mblk_t *mp; 4852 4853 if ((mp = ip_dlpi_alloc(DL_NOTIFY_IND_SIZE, DL_NOTIFY_IND)) == NULL) 4854 return (NULL); 4855 4856 notifyp = (dl_notify_ind_t *)mp->b_rptr; 4857 notifyp->dl_notification = notification; 4858 notifyp->dl_data = data; 4859 return (mp); 4860 } 4861 4862 /* 4863 * Debug formatting routine. Returns a character string representation of the 4864 * addr in buf, of the form xxx.xxx.xxx.xxx. This routine takes the address 4865 * in the form of a ipaddr_t and calls ip_dot_saddr with a pointer. 4866 * 4867 * Once the ndd table-printing interfaces are removed, this can be changed to 4868 * standard dotted-decimal form. 4869 */ 4870 char * 4871 ip_dot_addr(ipaddr_t addr, char *buf) 4872 { 4873 uint8_t *ap = (uint8_t *)&addr; 4874 4875 (void) mi_sprintf(buf, "%03d.%03d.%03d.%03d", 4876 ap[0] & 0xFF, ap[1] & 0xFF, ap[2] & 0xFF, ap[3] & 0xFF); 4877 return (buf); 4878 } 4879 4880 /* 4881 * Write the given MAC address as a printable string in the usual colon- 4882 * separated format. 4883 */ 4884 const char * 4885 mac_colon_addr(const uint8_t *addr, size_t alen, char *buf, size_t buflen) 4886 { 4887 char *bp; 4888 4889 if (alen == 0 || buflen < 4) 4890 return ("?"); 4891 bp = buf; 4892 for (;;) { 4893 /* 4894 * If there are more MAC address bytes available, but we won't 4895 * have any room to print them, then add "..." to the string 4896 * instead. See below for the 'magic number' explanation. 4897 */ 4898 if ((alen == 2 && buflen < 6) || (alen > 2 && buflen < 7)) { 4899 (void) strcpy(bp, "..."); 4900 break; 4901 } 4902 (void) sprintf(bp, "%02x", *addr++); 4903 bp += 2; 4904 if (--alen == 0) 4905 break; 4906 *bp++ = ':'; 4907 buflen -= 3; 4908 /* 4909 * At this point, based on the first 'if' statement above, 4910 * either alen == 1 and buflen >= 3, or alen > 1 and 4911 * buflen >= 4. The first case leaves room for the final "xx" 4912 * number and trailing NUL byte. The second leaves room for at 4913 * least "...". Thus the apparently 'magic' numbers chosen for 4914 * that statement. 4915 */ 4916 } 4917 return (buf); 4918 } 4919 4920 /* 4921 * Called when it is conceptually a ULP that would sent the packet 4922 * e.g., port unreachable and protocol unreachable. Check that the packet 4923 * would have passed the IPsec global policy before sending the error. 4924 * 4925 * Send an ICMP error after patching up the packet appropriately. 4926 * Uses ip_drop_input and bumps the appropriate MIB. 4927 */ 4928 void 4929 ip_fanout_send_icmp_v4(mblk_t *mp, uint_t icmp_type, uint_t icmp_code, 4930 ip_recv_attr_t *ira) 4931 { 4932 ipha_t *ipha; 4933 boolean_t secure; 4934 ill_t *ill = ira->ira_ill; 4935 ip_stack_t *ipst = ill->ill_ipst; 4936 netstack_t *ns = ipst->ips_netstack; 4937 ipsec_stack_t *ipss = ns->netstack_ipsec; 4938 4939 secure = ira->ira_flags & IRAF_IPSEC_SECURE; 4940 4941 /* 4942 * We are generating an icmp error for some inbound packet. 4943 * Called from all ip_fanout_(udp, tcp, proto) functions. 4944 * Before we generate an error, check with global policy 4945 * to see whether this is allowed to enter the system. As 4946 * there is no "conn", we are checking with global policy. 4947 */ 4948 ipha = (ipha_t *)mp->b_rptr; 4949 if (secure || ipss->ipsec_inbound_v4_policy_present) { 4950 mp = ipsec_check_global_policy(mp, NULL, ipha, NULL, ira, ns); 4951 if (mp == NULL) 4952 return; 4953 } 4954 4955 /* We never send errors for protocols that we do implement */ 4956 if (ira->ira_protocol == IPPROTO_ICMP || 4957 ira->ira_protocol == IPPROTO_IGMP) { 4958 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 4959 ip_drop_input("ip_fanout_send_icmp_v4", mp, ill); 4960 freemsg(mp); 4961 return; 4962 } 4963 /* 4964 * Have to correct checksum since 4965 * the packet might have been 4966 * fragmented and the reassembly code in ip_rput 4967 * does not restore the IP checksum. 4968 */ 4969 ipha->ipha_hdr_checksum = 0; 4970 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha); 4971 4972 switch (icmp_type) { 4973 case ICMP_DEST_UNREACHABLE: 4974 switch (icmp_code) { 4975 case ICMP_PROTOCOL_UNREACHABLE: 4976 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInUnknownProtos); 4977 ip_drop_input("ipIfStatsInUnknownProtos", mp, ill); 4978 break; 4979 case ICMP_PORT_UNREACHABLE: 4980 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts); 4981 ip_drop_input("ipIfStatsNoPorts", mp, ill); 4982 break; 4983 } 4984 4985 icmp_unreachable(mp, icmp_code, ira); 4986 break; 4987 default: 4988 #ifdef DEBUG 4989 panic("ip_fanout_send_icmp_v4: wrong type"); 4990 /*NOTREACHED*/ 4991 #else 4992 freemsg(mp); 4993 break; 4994 #endif 4995 } 4996 } 4997 4998 /* 4999 * Used to send an ICMP error message when a packet is received for 5000 * a protocol that is not supported. The mblk passed as argument 5001 * is consumed by this function. 5002 */ 5003 void 5004 ip_proto_not_sup(mblk_t *mp, ip_recv_attr_t *ira) 5005 { 5006 ipha_t *ipha; 5007 5008 ipha = (ipha_t *)mp->b_rptr; 5009 if (ira->ira_flags & IRAF_IS_IPV4) { 5010 ASSERT(IPH_HDR_VERSION(ipha) == IP_VERSION); 5011 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE, 5012 ICMP_PROTOCOL_UNREACHABLE, ira); 5013 } else { 5014 ASSERT(IPH_HDR_VERSION(ipha) == IPV6_VERSION); 5015 ip_fanout_send_icmp_v6(mp, ICMP6_PARAM_PROB, 5016 ICMP6_PARAMPROB_NEXTHEADER, ira); 5017 } 5018 } 5019 5020 /* 5021 * Deliver a rawip packet to the given conn, possibly applying ipsec policy. 5022 * Handles IPv4 and IPv6. 5023 * We are responsible for disposing of mp, such as by freemsg() or putnext() 5024 * Caller is responsible for dropping references to the conn. 5025 */ 5026 void 5027 ip_fanout_proto_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h, 5028 ip_recv_attr_t *ira) 5029 { 5030 ill_t *ill = ira->ira_ill; 5031 ip_stack_t *ipst = ill->ill_ipst; 5032 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5033 boolean_t secure; 5034 uint_t protocol = ira->ira_protocol; 5035 iaflags_t iraflags = ira->ira_flags; 5036 queue_t *rq; 5037 5038 secure = iraflags & IRAF_IPSEC_SECURE; 5039 5040 rq = connp->conn_rq; 5041 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) { 5042 switch (protocol) { 5043 case IPPROTO_ICMPV6: 5044 BUMP_MIB(ill->ill_icmp6_mib, ipv6IfIcmpInOverflows); 5045 break; 5046 case IPPROTO_ICMP: 5047 BUMP_MIB(&ipst->ips_icmp_mib, icmpInOverflows); 5048 break; 5049 default: 5050 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows); 5051 break; 5052 } 5053 freemsg(mp); 5054 return; 5055 } 5056 5057 ASSERT(!(IPCL_IS_IPTUN(connp))); 5058 5059 if (((iraflags & IRAF_IS_IPV4) ? 5060 CONN_INBOUND_POLICY_PRESENT(connp, ipss) : 5061 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) || 5062 secure) { 5063 mp = ipsec_check_inbound_policy(mp, connp, ipha, 5064 ip6h, ira); 5065 if (mp == NULL) { 5066 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 5067 /* Note that mp is NULL */ 5068 ip_drop_input("ipIfStatsInDiscards", mp, ill); 5069 return; 5070 } 5071 } 5072 5073 if (iraflags & IRAF_ICMP_ERROR) { 5074 (connp->conn_recvicmp)(connp, mp, NULL, ira); 5075 } else { 5076 ill_t *rill = ira->ira_rill; 5077 5078 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers); 5079 ira->ira_ill = ira->ira_rill = NULL; 5080 /* Send it upstream */ 5081 (connp->conn_recv)(connp, mp, NULL, ira); 5082 ira->ira_ill = ill; 5083 ira->ira_rill = rill; 5084 } 5085 } 5086 5087 /* 5088 * Handle protocols with which IP is less intimate. There 5089 * can be more than one stream bound to a particular 5090 * protocol. When this is the case, normally each one gets a copy 5091 * of any incoming packets. 5092 * 5093 * IPsec NOTE : 5094 * 5095 * Don't allow a secure packet going up a non-secure connection. 5096 * We don't allow this because 5097 * 5098 * 1) Reply might go out in clear which will be dropped at 5099 * the sending side. 5100 * 2) If the reply goes out in clear it will give the 5101 * adversary enough information for getting the key in 5102 * most of the cases. 5103 * 5104 * Moreover getting a secure packet when we expect clear 5105 * implies that SA's were added without checking for 5106 * policy on both ends. This should not happen once ISAKMP 5107 * is used to negotiate SAs as SAs will be added only after 5108 * verifying the policy. 5109 * 5110 * Zones notes: 5111 * Earlier in ip_input on a system with multiple shared-IP zones we 5112 * duplicate the multicast and broadcast packets and send them up 5113 * with each explicit zoneid that exists on that ill. 5114 * This means that here we can match the zoneid with SO_ALLZONES being special. 5115 */ 5116 void 5117 ip_fanout_proto_v4(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira) 5118 { 5119 mblk_t *mp1; 5120 ipaddr_t laddr; 5121 conn_t *connp, *first_connp, *next_connp; 5122 connf_t *connfp; 5123 ill_t *ill = ira->ira_ill; 5124 ip_stack_t *ipst = ill->ill_ipst; 5125 5126 laddr = ipha->ipha_dst; 5127 5128 connfp = &ipst->ips_ipcl_proto_fanout_v4[ira->ira_protocol]; 5129 mutex_enter(&connfp->connf_lock); 5130 connp = connfp->connf_head; 5131 for (connp = connfp->connf_head; connp != NULL; 5132 connp = connp->conn_next) { 5133 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */ 5134 if (IPCL_PROTO_MATCH(connp, ira, ipha) && 5135 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) || 5136 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp))) { 5137 break; 5138 } 5139 } 5140 5141 if (connp == NULL) { 5142 /* 5143 * No one bound to these addresses. Is 5144 * there a client that wants all 5145 * unclaimed datagrams? 5146 */ 5147 mutex_exit(&connfp->connf_lock); 5148 ip_fanout_send_icmp_v4(mp, ICMP_DEST_UNREACHABLE, 5149 ICMP_PROTOCOL_UNREACHABLE, ira); 5150 return; 5151 } 5152 5153 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL); 5154 5155 CONN_INC_REF(connp); 5156 first_connp = connp; 5157 connp = connp->conn_next; 5158 5159 for (;;) { 5160 while (connp != NULL) { 5161 /* Note: IPCL_PROTO_MATCH includes conn_wantpacket */ 5162 if (IPCL_PROTO_MATCH(connp, ira, ipha) && 5163 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) || 5164 tsol_receive_local(mp, &laddr, IPV4_VERSION, 5165 ira, connp))) 5166 break; 5167 connp = connp->conn_next; 5168 } 5169 5170 if (connp == NULL) { 5171 /* No more interested clients */ 5172 connp = first_connp; 5173 break; 5174 } 5175 if (((mp1 = dupmsg(mp)) == NULL) && 5176 ((mp1 = copymsg(mp)) == NULL)) { 5177 /* Memory allocation failed */ 5178 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 5179 ip_drop_input("ipIfStatsInDiscards", mp, ill); 5180 connp = first_connp; 5181 break; 5182 } 5183 5184 CONN_INC_REF(connp); 5185 mutex_exit(&connfp->connf_lock); 5186 5187 ip_fanout_proto_conn(connp, mp1, (ipha_t *)mp1->b_rptr, NULL, 5188 ira); 5189 5190 mutex_enter(&connfp->connf_lock); 5191 /* Follow the next pointer before releasing the conn. */ 5192 next_connp = connp->conn_next; 5193 CONN_DEC_REF(connp); 5194 connp = next_connp; 5195 } 5196 5197 /* Last one. Send it upstream. */ 5198 mutex_exit(&connfp->connf_lock); 5199 5200 ip_fanout_proto_conn(connp, mp, ipha, NULL, ira); 5201 5202 CONN_DEC_REF(connp); 5203 } 5204 5205 /* 5206 * If we have a IPsec NAT-Traversal packet, strip the zero-SPI or 5207 * pass it along to ESP if the SPI is non-zero. Returns the mblk if the mblk 5208 * is not consumed. 5209 * 5210 * One of three things can happen, all of which affect the passed-in mblk: 5211 * 5212 * 1.) The packet is stock UDP and gets its zero-SPI stripped. Return mblk.. 5213 * 5214 * 2.) The packet is ESP-in-UDP, gets transformed into an equivalent 5215 * ESP packet, and is passed along to ESP for consumption. Return NULL. 5216 * 5217 * 3.) The packet is an ESP-in-UDP Keepalive. Drop it and return NULL. 5218 */ 5219 mblk_t * 5220 zero_spi_check(mblk_t *mp, ip_recv_attr_t *ira) 5221 { 5222 int shift, plen, iph_len; 5223 ipha_t *ipha; 5224 udpha_t *udpha; 5225 uint32_t *spi; 5226 uint32_t esp_ports; 5227 uint8_t *orptr; 5228 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 5229 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5230 5231 ipha = (ipha_t *)mp->b_rptr; 5232 iph_len = ira->ira_ip_hdr_length; 5233 plen = ira->ira_pktlen; 5234 5235 if (plen - iph_len - sizeof (udpha_t) < sizeof (uint32_t)) { 5236 /* 5237 * Most likely a keepalive for the benefit of an intervening 5238 * NAT. These aren't for us, per se, so drop it. 5239 * 5240 * RFC 3947/8 doesn't say for sure what to do for 2-3 5241 * byte packets (keepalives are 1-byte), but we'll drop them 5242 * also. 5243 */ 5244 ip_drop_packet(mp, B_TRUE, ira->ira_ill, 5245 DROPPER(ipss, ipds_esp_nat_t_ka), &ipss->ipsec_dropper); 5246 return (NULL); 5247 } 5248 5249 if (MBLKL(mp) < iph_len + sizeof (udpha_t) + sizeof (*spi)) { 5250 /* might as well pull it all up - it might be ESP. */ 5251 if (!pullupmsg(mp, -1)) { 5252 ip_drop_packet(mp, B_TRUE, ira->ira_ill, 5253 DROPPER(ipss, ipds_esp_nomem), 5254 &ipss->ipsec_dropper); 5255 return (NULL); 5256 } 5257 5258 ipha = (ipha_t *)mp->b_rptr; 5259 } 5260 spi = (uint32_t *)(mp->b_rptr + iph_len + sizeof (udpha_t)); 5261 if (*spi == 0) { 5262 /* UDP packet - remove 0-spi. */ 5263 shift = sizeof (uint32_t); 5264 } else { 5265 /* ESP-in-UDP packet - reduce to ESP. */ 5266 ipha->ipha_protocol = IPPROTO_ESP; 5267 shift = sizeof (udpha_t); 5268 } 5269 5270 /* Fix IP header */ 5271 ira->ira_pktlen = (plen - shift); 5272 ipha->ipha_length = htons(ira->ira_pktlen); 5273 ipha->ipha_hdr_checksum = 0; 5274 5275 orptr = mp->b_rptr; 5276 mp->b_rptr += shift; 5277 5278 udpha = (udpha_t *)(orptr + iph_len); 5279 if (*spi == 0) { 5280 ASSERT((uint8_t *)ipha == orptr); 5281 udpha->uha_length = htons(plen - shift - iph_len); 5282 iph_len += sizeof (udpha_t); /* For the call to ovbcopy(). */ 5283 esp_ports = 0; 5284 } else { 5285 esp_ports = *((uint32_t *)udpha); 5286 ASSERT(esp_ports != 0); 5287 } 5288 ovbcopy(orptr, orptr + shift, iph_len); 5289 if (esp_ports != 0) /* Punt up for ESP processing. */ { 5290 ipha = (ipha_t *)(orptr + shift); 5291 5292 ira->ira_flags |= IRAF_ESP_UDP_PORTS; 5293 ira->ira_esp_udp_ports = esp_ports; 5294 ip_fanout_v4(mp, ipha, ira); 5295 return (NULL); 5296 } 5297 return (mp); 5298 } 5299 5300 /* 5301 * Deliver a udp packet to the given conn, possibly applying ipsec policy. 5302 * Handles IPv4 and IPv6. 5303 * We are responsible for disposing of mp, such as by freemsg() or putnext() 5304 * Caller is responsible for dropping references to the conn. 5305 */ 5306 void 5307 ip_fanout_udp_conn(conn_t *connp, mblk_t *mp, ipha_t *ipha, ip6_t *ip6h, 5308 ip_recv_attr_t *ira) 5309 { 5310 ill_t *ill = ira->ira_ill; 5311 ip_stack_t *ipst = ill->ill_ipst; 5312 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 5313 boolean_t secure; 5314 iaflags_t iraflags = ira->ira_flags; 5315 5316 secure = iraflags & IRAF_IPSEC_SECURE; 5317 5318 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : 5319 !canputnext(connp->conn_rq)) { 5320 BUMP_MIB(ill->ill_ip_mib, udpIfStatsInOverflows); 5321 freemsg(mp); 5322 return; 5323 } 5324 5325 if (((iraflags & IRAF_IS_IPV4) ? 5326 CONN_INBOUND_POLICY_PRESENT(connp, ipss) : 5327 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) || 5328 secure) { 5329 mp = ipsec_check_inbound_policy(mp, connp, ipha, 5330 ip6h, ira); 5331 if (mp == NULL) { 5332 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 5333 /* Note that mp is NULL */ 5334 ip_drop_input("ipIfStatsInDiscards", mp, ill); 5335 return; 5336 } 5337 } 5338 5339 /* 5340 * Since this code is not used for UDP unicast we don't need a NAT_T 5341 * check. Only ip_fanout_v4 has that check. 5342 */ 5343 if (ira->ira_flags & IRAF_ICMP_ERROR) { 5344 (connp->conn_recvicmp)(connp, mp, NULL, ira); 5345 } else { 5346 ill_t *rill = ira->ira_rill; 5347 5348 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers); 5349 ira->ira_ill = ira->ira_rill = NULL; 5350 /* Send it upstream */ 5351 (connp->conn_recv)(connp, mp, NULL, ira); 5352 ira->ira_ill = ill; 5353 ira->ira_rill = rill; 5354 } 5355 } 5356 5357 /* 5358 * Fanout for UDP packets that are multicast or broadcast, and ICMP errors. 5359 * (Unicast fanout is handled in ip_input_v4.) 5360 * 5361 * If SO_REUSEADDR is set all multicast and broadcast packets 5362 * will be delivered to all conns bound to the same port. 5363 * 5364 * If there is at least one matching AF_INET receiver, then we will 5365 * ignore any AF_INET6 receivers. 5366 * In the special case where an AF_INET socket binds to 0.0.0.0/<port> and an 5367 * AF_INET6 socket binds to ::/<port>, only the AF_INET socket receives the IPv4 5368 * packets. 5369 * 5370 * Zones notes: 5371 * Earlier in ip_input on a system with multiple shared-IP zones we 5372 * duplicate the multicast and broadcast packets and send them up 5373 * with each explicit zoneid that exists on that ill. 5374 * This means that here we can match the zoneid with SO_ALLZONES being special. 5375 */ 5376 void 5377 ip_fanout_udp_multi_v4(mblk_t *mp, ipha_t *ipha, uint16_t lport, uint16_t fport, 5378 ip_recv_attr_t *ira) 5379 { 5380 ipaddr_t laddr; 5381 in6_addr_t v6faddr; 5382 conn_t *connp; 5383 connf_t *connfp; 5384 ipaddr_t faddr; 5385 ill_t *ill = ira->ira_ill; 5386 ip_stack_t *ipst = ill->ill_ipst; 5387 5388 ASSERT(ira->ira_flags & (IRAF_MULTIBROADCAST|IRAF_ICMP_ERROR)); 5389 5390 laddr = ipha->ipha_dst; 5391 faddr = ipha->ipha_src; 5392 5393 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)]; 5394 mutex_enter(&connfp->connf_lock); 5395 connp = connfp->connf_head; 5396 5397 /* 5398 * If SO_REUSEADDR has been set on the first we send the 5399 * packet to all clients that have joined the group and 5400 * match the port. 5401 */ 5402 while (connp != NULL) { 5403 if ((IPCL_UDP_MATCH(connp, lport, laddr, fport, faddr)) && 5404 conn_wantpacket(connp, ira, ipha) && 5405 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) || 5406 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp))) 5407 break; 5408 connp = connp->conn_next; 5409 } 5410 5411 if (connp == NULL) 5412 goto notfound; 5413 5414 CONN_INC_REF(connp); 5415 5416 if (connp->conn_reuseaddr) { 5417 conn_t *first_connp = connp; 5418 conn_t *next_connp; 5419 mblk_t *mp1; 5420 5421 connp = connp->conn_next; 5422 for (;;) { 5423 while (connp != NULL) { 5424 if (IPCL_UDP_MATCH(connp, lport, laddr, 5425 fport, faddr) && 5426 conn_wantpacket(connp, ira, ipha) && 5427 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) || 5428 tsol_receive_local(mp, &laddr, IPV4_VERSION, 5429 ira, connp))) 5430 break; 5431 connp = connp->conn_next; 5432 } 5433 if (connp == NULL) { 5434 /* No more interested clients */ 5435 connp = first_connp; 5436 break; 5437 } 5438 if (((mp1 = dupmsg(mp)) == NULL) && 5439 ((mp1 = copymsg(mp)) == NULL)) { 5440 /* Memory allocation failed */ 5441 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 5442 ip_drop_input("ipIfStatsInDiscards", mp, ill); 5443 connp = first_connp; 5444 break; 5445 } 5446 CONN_INC_REF(connp); 5447 mutex_exit(&connfp->connf_lock); 5448 5449 IP_STAT(ipst, ip_udp_fanmb); 5450 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr, 5451 NULL, ira); 5452 mutex_enter(&connfp->connf_lock); 5453 /* Follow the next pointer before releasing the conn */ 5454 next_connp = connp->conn_next; 5455 CONN_DEC_REF(connp); 5456 connp = next_connp; 5457 } 5458 } 5459 5460 /* Last one. Send it upstream. */ 5461 mutex_exit(&connfp->connf_lock); 5462 IP_STAT(ipst, ip_udp_fanmb); 5463 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira); 5464 CONN_DEC_REF(connp); 5465 return; 5466 5467 notfound: 5468 mutex_exit(&connfp->connf_lock); 5469 /* 5470 * IPv6 endpoints bound to multicast IPv4-mapped addresses 5471 * have already been matched above, since they live in the IPv4 5472 * fanout tables. This implies we only need to 5473 * check for IPv6 in6addr_any endpoints here. 5474 * Thus we compare using ipv6_all_zeros instead of the destination 5475 * address, except for the multicast group membership lookup which 5476 * uses the IPv4 destination. 5477 */ 5478 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &v6faddr); 5479 connfp = &ipst->ips_ipcl_udp_fanout[IPCL_UDP_HASH(lport, ipst)]; 5480 mutex_enter(&connfp->connf_lock); 5481 connp = connfp->connf_head; 5482 /* 5483 * IPv4 multicast packet being delivered to an AF_INET6 5484 * in6addr_any endpoint. 5485 * Need to check conn_wantpacket(). Note that we use conn_wantpacket() 5486 * and not conn_wantpacket_v6() since any multicast membership is 5487 * for an IPv4-mapped multicast address. 5488 */ 5489 while (connp != NULL) { 5490 if (IPCL_UDP_MATCH_V6(connp, lport, ipv6_all_zeros, 5491 fport, v6faddr) && 5492 conn_wantpacket(connp, ira, ipha) && 5493 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) || 5494 tsol_receive_local(mp, &laddr, IPV4_VERSION, ira, connp))) 5495 break; 5496 connp = connp->conn_next; 5497 } 5498 5499 if (connp == NULL) { 5500 /* 5501 * No one bound to this port. Is 5502 * there a client that wants all 5503 * unclaimed datagrams? 5504 */ 5505 mutex_exit(&connfp->connf_lock); 5506 5507 if (ipst->ips_ipcl_proto_fanout_v4[IPPROTO_UDP].connf_head != 5508 NULL) { 5509 ASSERT(ira->ira_protocol == IPPROTO_UDP); 5510 ip_fanout_proto_v4(mp, ipha, ira); 5511 } else { 5512 /* 5513 * We used to attempt to send an icmp error here, but 5514 * since this is known to be a multicast packet 5515 * and we don't send icmp errors in response to 5516 * multicast, just drop the packet and give up sooner. 5517 */ 5518 BUMP_MIB(ill->ill_ip_mib, udpIfStatsNoPorts); 5519 freemsg(mp); 5520 } 5521 return; 5522 } 5523 ASSERT(IPCL_IS_NONSTR(connp) || connp->conn_rq != NULL); 5524 5525 /* 5526 * If SO_REUSEADDR has been set on the first we send the 5527 * packet to all clients that have joined the group and 5528 * match the port. 5529 */ 5530 if (connp->conn_reuseaddr) { 5531 conn_t *first_connp = connp; 5532 conn_t *next_connp; 5533 mblk_t *mp1; 5534 5535 CONN_INC_REF(connp); 5536 connp = connp->conn_next; 5537 for (;;) { 5538 while (connp != NULL) { 5539 if (IPCL_UDP_MATCH_V6(connp, lport, 5540 ipv6_all_zeros, fport, v6faddr) && 5541 conn_wantpacket(connp, ira, ipha) && 5542 (!(ira->ira_flags & IRAF_SYSTEM_LABELED) || 5543 tsol_receive_local(mp, &laddr, IPV4_VERSION, 5544 ira, connp))) 5545 break; 5546 connp = connp->conn_next; 5547 } 5548 if (connp == NULL) { 5549 /* No more interested clients */ 5550 connp = first_connp; 5551 break; 5552 } 5553 if (((mp1 = dupmsg(mp)) == NULL) && 5554 ((mp1 = copymsg(mp)) == NULL)) { 5555 /* Memory allocation failed */ 5556 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 5557 ip_drop_input("ipIfStatsInDiscards", mp, ill); 5558 connp = first_connp; 5559 break; 5560 } 5561 CONN_INC_REF(connp); 5562 mutex_exit(&connfp->connf_lock); 5563 5564 IP_STAT(ipst, ip_udp_fanmb); 5565 ip_fanout_udp_conn(connp, mp1, (ipha_t *)mp1->b_rptr, 5566 NULL, ira); 5567 mutex_enter(&connfp->connf_lock); 5568 /* Follow the next pointer before releasing the conn */ 5569 next_connp = connp->conn_next; 5570 CONN_DEC_REF(connp); 5571 connp = next_connp; 5572 } 5573 } 5574 5575 /* Last one. Send it upstream. */ 5576 mutex_exit(&connfp->connf_lock); 5577 IP_STAT(ipst, ip_udp_fanmb); 5578 ip_fanout_udp_conn(connp, mp, ipha, NULL, ira); 5579 CONN_DEC_REF(connp); 5580 } 5581 5582 /* 5583 * Split an incoming packet's IPv4 options into the label and the other options. 5584 * If 'allocate' is set it does memory allocation for the ip_pkt_t, including 5585 * clearing out any leftover label or options. 5586 * Otherwise it just makes ipp point into the packet. 5587 * 5588 * Returns zero if ok; ENOMEM if the buffer couldn't be allocated. 5589 */ 5590 int 5591 ip_find_hdr_v4(ipha_t *ipha, ip_pkt_t *ipp, boolean_t allocate) 5592 { 5593 uchar_t *opt; 5594 uint32_t totallen; 5595 uint32_t optval; 5596 uint32_t optlen; 5597 5598 ipp->ipp_fields |= IPPF_HOPLIMIT | IPPF_TCLASS | IPPF_ADDR; 5599 ipp->ipp_hoplimit = ipha->ipha_ttl; 5600 ipp->ipp_type_of_service = ipha->ipha_type_of_service; 5601 IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &ipp->ipp_addr); 5602 5603 /* 5604 * Get length (in 4 byte octets) of IP header options. 5605 */ 5606 totallen = ipha->ipha_version_and_hdr_length - 5607 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS); 5608 5609 if (totallen == 0) { 5610 if (!allocate) 5611 return (0); 5612 5613 /* Clear out anything from a previous packet */ 5614 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) { 5615 kmem_free(ipp->ipp_ipv4_options, 5616 ipp->ipp_ipv4_options_len); 5617 ipp->ipp_ipv4_options = NULL; 5618 ipp->ipp_ipv4_options_len = 0; 5619 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS; 5620 } 5621 if (ipp->ipp_fields & IPPF_LABEL_V4) { 5622 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4); 5623 ipp->ipp_label_v4 = NULL; 5624 ipp->ipp_label_len_v4 = 0; 5625 ipp->ipp_fields &= ~IPPF_LABEL_V4; 5626 } 5627 return (0); 5628 } 5629 5630 totallen <<= 2; 5631 opt = (uchar_t *)&ipha[1]; 5632 if (!is_system_labeled()) { 5633 5634 copyall: 5635 if (!allocate) { 5636 if (totallen != 0) { 5637 ipp->ipp_ipv4_options = opt; 5638 ipp->ipp_ipv4_options_len = totallen; 5639 ipp->ipp_fields |= IPPF_IPV4_OPTIONS; 5640 } 5641 return (0); 5642 } 5643 /* Just copy all of options */ 5644 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) { 5645 if (totallen == ipp->ipp_ipv4_options_len) { 5646 bcopy(opt, ipp->ipp_ipv4_options, totallen); 5647 return (0); 5648 } 5649 kmem_free(ipp->ipp_ipv4_options, 5650 ipp->ipp_ipv4_options_len); 5651 ipp->ipp_ipv4_options = NULL; 5652 ipp->ipp_ipv4_options_len = 0; 5653 ipp->ipp_fields &= ~IPPF_IPV4_OPTIONS; 5654 } 5655 if (totallen == 0) 5656 return (0); 5657 5658 ipp->ipp_ipv4_options = kmem_alloc(totallen, KM_NOSLEEP); 5659 if (ipp->ipp_ipv4_options == NULL) 5660 return (ENOMEM); 5661 ipp->ipp_ipv4_options_len = totallen; 5662 ipp->ipp_fields |= IPPF_IPV4_OPTIONS; 5663 bcopy(opt, ipp->ipp_ipv4_options, totallen); 5664 return (0); 5665 } 5666 5667 if (allocate && (ipp->ipp_fields & IPPF_LABEL_V4)) { 5668 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4); 5669 ipp->ipp_label_v4 = NULL; 5670 ipp->ipp_label_len_v4 = 0; 5671 ipp->ipp_fields &= ~IPPF_LABEL_V4; 5672 } 5673 5674 /* 5675 * Search for CIPSO option. 5676 * We assume CIPSO is first in options if it is present. 5677 * If it isn't, then ipp_opt_ipv4_options will not include the options 5678 * prior to the CIPSO option. 5679 */ 5680 while (totallen != 0) { 5681 switch (optval = opt[IPOPT_OPTVAL]) { 5682 case IPOPT_EOL: 5683 return (0); 5684 case IPOPT_NOP: 5685 optlen = 1; 5686 break; 5687 default: 5688 if (totallen <= IPOPT_OLEN) 5689 return (EINVAL); 5690 optlen = opt[IPOPT_OLEN]; 5691 if (optlen < 2) 5692 return (EINVAL); 5693 } 5694 if (optlen > totallen) 5695 return (EINVAL); 5696 5697 switch (optval) { 5698 case IPOPT_COMSEC: 5699 if (!allocate) { 5700 ipp->ipp_label_v4 = opt; 5701 ipp->ipp_label_len_v4 = optlen; 5702 ipp->ipp_fields |= IPPF_LABEL_V4; 5703 } else { 5704 ipp->ipp_label_v4 = kmem_alloc(optlen, 5705 KM_NOSLEEP); 5706 if (ipp->ipp_label_v4 == NULL) 5707 return (ENOMEM); 5708 ipp->ipp_label_len_v4 = optlen; 5709 ipp->ipp_fields |= IPPF_LABEL_V4; 5710 bcopy(opt, ipp->ipp_label_v4, optlen); 5711 } 5712 totallen -= optlen; 5713 opt += optlen; 5714 5715 /* Skip padding bytes until we get to a multiple of 4 */ 5716 while ((totallen & 3) != 0 && opt[0] == IPOPT_NOP) { 5717 totallen--; 5718 opt++; 5719 } 5720 /* Remaining as ipp_ipv4_options */ 5721 goto copyall; 5722 } 5723 totallen -= optlen; 5724 opt += optlen; 5725 } 5726 /* No CIPSO found; return everything as ipp_ipv4_options */ 5727 totallen = ipha->ipha_version_and_hdr_length - 5728 (uint8_t)((IP_VERSION << 4) + IP_SIMPLE_HDR_LENGTH_IN_WORDS); 5729 totallen <<= 2; 5730 opt = (uchar_t *)&ipha[1]; 5731 goto copyall; 5732 } 5733 5734 /* 5735 * Efficient versions of lookup for an IRE when we only 5736 * match the address. 5737 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE. 5738 * Does not handle multicast addresses. 5739 */ 5740 uint_t 5741 ip_type_v4(ipaddr_t addr, ip_stack_t *ipst) 5742 { 5743 ire_t *ire; 5744 uint_t result; 5745 5746 ire = ire_ftable_lookup_simple_v4(addr, 0, ipst, NULL); 5747 ASSERT(ire != NULL); 5748 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) 5749 result = IRE_NOROUTE; 5750 else 5751 result = ire->ire_type; 5752 ire_refrele(ire); 5753 return (result); 5754 } 5755 5756 /* 5757 * Efficient versions of lookup for an IRE when we only 5758 * match the address. 5759 * For RTF_REJECT or BLACKHOLE we return IRE_NOROUTE. 5760 * Does not handle multicast addresses. 5761 */ 5762 uint_t 5763 ip_type_v6(const in6_addr_t *addr, ip_stack_t *ipst) 5764 { 5765 ire_t *ire; 5766 uint_t result; 5767 5768 ire = ire_ftable_lookup_simple_v6(addr, 0, ipst, NULL); 5769 ASSERT(ire != NULL); 5770 if (ire->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) 5771 result = IRE_NOROUTE; 5772 else 5773 result = ire->ire_type; 5774 ire_refrele(ire); 5775 return (result); 5776 } 5777 5778 /* 5779 * Nobody should be sending 5780 * packets up this stream 5781 */ 5782 static void 5783 ip_lrput(queue_t *q, mblk_t *mp) 5784 { 5785 switch (mp->b_datap->db_type) { 5786 case M_FLUSH: 5787 /* Turn around */ 5788 if (*mp->b_rptr & FLUSHW) { 5789 *mp->b_rptr &= ~FLUSHR; 5790 qreply(q, mp); 5791 return; 5792 } 5793 break; 5794 } 5795 freemsg(mp); 5796 } 5797 5798 /* Nobody should be sending packets down this stream */ 5799 /* ARGSUSED */ 5800 void 5801 ip_lwput(queue_t *q, mblk_t *mp) 5802 { 5803 freemsg(mp); 5804 } 5805 5806 /* 5807 * Move the first hop in any source route to ipha_dst and remove that part of 5808 * the source route. Called by other protocols. Errors in option formatting 5809 * are ignored - will be handled by ip_output_options. Return the final 5810 * destination (either ipha_dst or the last entry in a source route.) 5811 */ 5812 ipaddr_t 5813 ip_massage_options(ipha_t *ipha, netstack_t *ns) 5814 { 5815 ipoptp_t opts; 5816 uchar_t *opt; 5817 uint8_t optval; 5818 uint8_t optlen; 5819 ipaddr_t dst; 5820 int i; 5821 ip_stack_t *ipst = ns->netstack_ip; 5822 5823 ip2dbg(("ip_massage_options\n")); 5824 dst = ipha->ipha_dst; 5825 for (optval = ipoptp_first(&opts, ipha); 5826 optval != IPOPT_EOL; 5827 optval = ipoptp_next(&opts)) { 5828 opt = opts.ipoptp_cur; 5829 switch (optval) { 5830 uint8_t off; 5831 case IPOPT_SSRR: 5832 case IPOPT_LSRR: 5833 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 5834 ip1dbg(("ip_massage_options: bad src route\n")); 5835 break; 5836 } 5837 optlen = opts.ipoptp_len; 5838 off = opt[IPOPT_OFFSET]; 5839 off--; 5840 redo_srr: 5841 if (optlen < IP_ADDR_LEN || 5842 off > optlen - IP_ADDR_LEN) { 5843 /* End of source route */ 5844 ip1dbg(("ip_massage_options: end of SR\n")); 5845 break; 5846 } 5847 bcopy((char *)opt + off, &dst, IP_ADDR_LEN); 5848 ip1dbg(("ip_massage_options: next hop 0x%x\n", 5849 ntohl(dst))); 5850 /* 5851 * Check if our address is present more than 5852 * once as consecutive hops in source route. 5853 * XXX verify per-interface ip_forwarding 5854 * for source route? 5855 */ 5856 if (ip_type_v4(dst, ipst) == IRE_LOCAL) { 5857 off += IP_ADDR_LEN; 5858 goto redo_srr; 5859 } 5860 if (dst == htonl(INADDR_LOOPBACK)) { 5861 ip1dbg(("ip_massage_options: loopback addr in " 5862 "source route!\n")); 5863 break; 5864 } 5865 /* 5866 * Update ipha_dst to be the first hop and remove the 5867 * first hop from the source route (by overwriting 5868 * part of the option with NOP options). 5869 */ 5870 ipha->ipha_dst = dst; 5871 /* Put the last entry in dst */ 5872 off = ((optlen - IP_ADDR_LEN - 3) & ~(IP_ADDR_LEN-1)) + 5873 3; 5874 bcopy(&opt[off], &dst, IP_ADDR_LEN); 5875 5876 ip1dbg(("ip_massage_options: last hop 0x%x\n", 5877 ntohl(dst))); 5878 /* Move down and overwrite */ 5879 opt[IP_ADDR_LEN] = opt[0]; 5880 opt[IP_ADDR_LEN+1] = opt[IPOPT_OLEN] - IP_ADDR_LEN; 5881 opt[IP_ADDR_LEN+2] = opt[IPOPT_OFFSET]; 5882 for (i = 0; i < IP_ADDR_LEN; i++) 5883 opt[i] = IPOPT_NOP; 5884 break; 5885 } 5886 } 5887 return (dst); 5888 } 5889 5890 /* 5891 * Return the network mask 5892 * associated with the specified address. 5893 */ 5894 ipaddr_t 5895 ip_net_mask(ipaddr_t addr) 5896 { 5897 uchar_t *up = (uchar_t *)&addr; 5898 ipaddr_t mask = 0; 5899 uchar_t *maskp = (uchar_t *)&mask; 5900 5901 #if defined(__i386) || defined(__amd64) 5902 #define TOTALLY_BRAIN_DAMAGED_C_COMPILER 5903 #endif 5904 #ifdef TOTALLY_BRAIN_DAMAGED_C_COMPILER 5905 maskp[0] = maskp[1] = maskp[2] = maskp[3] = 0; 5906 #endif 5907 if (CLASSD(addr)) { 5908 maskp[0] = 0xF0; 5909 return (mask); 5910 } 5911 5912 /* We assume Class E default netmask to be 32 */ 5913 if (CLASSE(addr)) 5914 return (0xffffffffU); 5915 5916 if (addr == 0) 5917 return (0); 5918 maskp[0] = 0xFF; 5919 if ((up[0] & 0x80) == 0) 5920 return (mask); 5921 5922 maskp[1] = 0xFF; 5923 if ((up[0] & 0xC0) == 0x80) 5924 return (mask); 5925 5926 maskp[2] = 0xFF; 5927 if ((up[0] & 0xE0) == 0xC0) 5928 return (mask); 5929 5930 /* Otherwise return no mask */ 5931 return ((ipaddr_t)0); 5932 } 5933 5934 /* Name/Value Table Lookup Routine */ 5935 char * 5936 ip_nv_lookup(nv_t *nv, int value) 5937 { 5938 if (!nv) 5939 return (NULL); 5940 for (; nv->nv_name; nv++) { 5941 if (nv->nv_value == value) 5942 return (nv->nv_name); 5943 } 5944 return ("unknown"); 5945 } 5946 5947 static int 5948 ip_wait_for_info_ack(ill_t *ill) 5949 { 5950 int err; 5951 5952 mutex_enter(&ill->ill_lock); 5953 while (ill->ill_state_flags & ILL_LL_SUBNET_PENDING) { 5954 /* 5955 * Return value of 0 indicates a pending signal. 5956 */ 5957 err = cv_wait_sig(&ill->ill_cv, &ill->ill_lock); 5958 if (err == 0) { 5959 mutex_exit(&ill->ill_lock); 5960 return (EINTR); 5961 } 5962 } 5963 mutex_exit(&ill->ill_lock); 5964 /* 5965 * ip_rput_other could have set an error in ill_error on 5966 * receipt of M_ERROR. 5967 */ 5968 return (ill->ill_error); 5969 } 5970 5971 /* 5972 * This is a module open, i.e. this is a control stream for access 5973 * to a DLPI device. We allocate an ill_t as the instance data in 5974 * this case. 5975 */ 5976 static int 5977 ip_modopen(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 5978 { 5979 ill_t *ill; 5980 int err; 5981 zoneid_t zoneid; 5982 netstack_t *ns; 5983 ip_stack_t *ipst; 5984 5985 /* 5986 * Prevent unprivileged processes from pushing IP so that 5987 * they can't send raw IP. 5988 */ 5989 if (secpolicy_net_rawaccess(credp) != 0) 5990 return (EPERM); 5991 5992 ns = netstack_find_by_cred(credp); 5993 ASSERT(ns != NULL); 5994 ipst = ns->netstack_ip; 5995 ASSERT(ipst != NULL); 5996 5997 /* 5998 * For exclusive stacks we set the zoneid to zero 5999 * to make IP operate as if in the global zone. 6000 */ 6001 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID) 6002 zoneid = GLOBAL_ZONEID; 6003 else 6004 zoneid = crgetzoneid(credp); 6005 6006 ill = (ill_t *)mi_open_alloc_sleep(sizeof (ill_t)); 6007 q->q_ptr = WR(q)->q_ptr = ill; 6008 ill->ill_ipst = ipst; 6009 ill->ill_zoneid = zoneid; 6010 6011 /* 6012 * ill_init initializes the ill fields and then sends down 6013 * down a DL_INFO_REQ after calling qprocson. 6014 */ 6015 err = ill_init(q, ill); 6016 6017 if (err != 0) { 6018 mi_free(ill); 6019 netstack_rele(ipst->ips_netstack); 6020 q->q_ptr = NULL; 6021 WR(q)->q_ptr = NULL; 6022 return (err); 6023 } 6024 6025 /* 6026 * Wait for the DL_INFO_ACK if a DL_INFO_REQ was sent. 6027 * 6028 * ill_init initializes the ipsq marking this thread as 6029 * writer 6030 */ 6031 ipsq_exit(ill->ill_phyint->phyint_ipsq); 6032 err = ip_wait_for_info_ack(ill); 6033 if (err == 0) 6034 ill->ill_credp = credp; 6035 else 6036 goto fail; 6037 6038 crhold(credp); 6039 6040 mutex_enter(&ipst->ips_ip_mi_lock); 6041 err = mi_open_link(&ipst->ips_ip_g_head, (IDP)q->q_ptr, devp, flag, 6042 sflag, credp); 6043 mutex_exit(&ipst->ips_ip_mi_lock); 6044 fail: 6045 if (err) { 6046 (void) ip_close(q, 0); 6047 return (err); 6048 } 6049 return (0); 6050 } 6051 6052 /* For /dev/ip aka AF_INET open */ 6053 int 6054 ip_openv4(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 6055 { 6056 return (ip_open(q, devp, flag, sflag, credp, B_FALSE)); 6057 } 6058 6059 /* For /dev/ip6 aka AF_INET6 open */ 6060 int 6061 ip_openv6(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp) 6062 { 6063 return (ip_open(q, devp, flag, sflag, credp, B_TRUE)); 6064 } 6065 6066 /* IP open routine. */ 6067 int 6068 ip_open(queue_t *q, dev_t *devp, int flag, int sflag, cred_t *credp, 6069 boolean_t isv6) 6070 { 6071 conn_t *connp; 6072 major_t maj; 6073 zoneid_t zoneid; 6074 netstack_t *ns; 6075 ip_stack_t *ipst; 6076 6077 /* Allow reopen. */ 6078 if (q->q_ptr != NULL) 6079 return (0); 6080 6081 if (sflag & MODOPEN) { 6082 /* This is a module open */ 6083 return (ip_modopen(q, devp, flag, sflag, credp)); 6084 } 6085 6086 if ((flag & ~(FKLYR)) == IP_HELPER_STR) { 6087 /* 6088 * Non streams based socket looking for a stream 6089 * to access IP 6090 */ 6091 return (ip_helper_stream_setup(q, devp, flag, sflag, 6092 credp, isv6)); 6093 } 6094 6095 ns = netstack_find_by_cred(credp); 6096 ASSERT(ns != NULL); 6097 ipst = ns->netstack_ip; 6098 ASSERT(ipst != NULL); 6099 6100 /* 6101 * For exclusive stacks we set the zoneid to zero 6102 * to make IP operate as if in the global zone. 6103 */ 6104 if (ipst->ips_netstack->netstack_stackid != GLOBAL_NETSTACKID) 6105 zoneid = GLOBAL_ZONEID; 6106 else 6107 zoneid = crgetzoneid(credp); 6108 6109 /* 6110 * We are opening as a device. This is an IP client stream, and we 6111 * allocate an conn_t as the instance data. 6112 */ 6113 connp = ipcl_conn_create(IPCL_IPCCONN, KM_SLEEP, ipst->ips_netstack); 6114 6115 /* 6116 * ipcl_conn_create did a netstack_hold. Undo the hold that was 6117 * done by netstack_find_by_cred() 6118 */ 6119 netstack_rele(ipst->ips_netstack); 6120 6121 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP | IXAF_SET_ULP_CKSUM; 6122 /* conn_allzones can not be set this early, hence no IPCL_ZONEID */ 6123 connp->conn_ixa->ixa_zoneid = zoneid; 6124 connp->conn_zoneid = zoneid; 6125 6126 connp->conn_rq = q; 6127 q->q_ptr = WR(q)->q_ptr = connp; 6128 6129 /* Minor tells us which /dev entry was opened */ 6130 if (isv6) { 6131 connp->conn_family = AF_INET6; 6132 connp->conn_ipversion = IPV6_VERSION; 6133 connp->conn_ixa->ixa_flags &= ~IXAF_IS_IPV4; 6134 connp->conn_ixa->ixa_src_preferences = IPV6_PREFER_SRC_DEFAULT; 6135 } else { 6136 connp->conn_family = AF_INET; 6137 connp->conn_ipversion = IPV4_VERSION; 6138 connp->conn_ixa->ixa_flags |= IXAF_IS_IPV4; 6139 } 6140 6141 if ((ip_minor_arena_la != NULL) && (flag & SO_SOCKSTR) && 6142 ((connp->conn_dev = inet_minor_alloc(ip_minor_arena_la)) != 0)) { 6143 connp->conn_minor_arena = ip_minor_arena_la; 6144 } else { 6145 /* 6146 * Either minor numbers in the large arena were exhausted 6147 * or a non socket application is doing the open. 6148 * Try to allocate from the small arena. 6149 */ 6150 if ((connp->conn_dev = 6151 inet_minor_alloc(ip_minor_arena_sa)) == 0) { 6152 /* CONN_DEC_REF takes care of netstack_rele() */ 6153 q->q_ptr = WR(q)->q_ptr = NULL; 6154 CONN_DEC_REF(connp); 6155 return (EBUSY); 6156 } 6157 connp->conn_minor_arena = ip_minor_arena_sa; 6158 } 6159 6160 maj = getemajor(*devp); 6161 *devp = makedevice(maj, (minor_t)connp->conn_dev); 6162 6163 /* 6164 * connp->conn_cred is crfree()ed in ipcl_conn_destroy() 6165 */ 6166 connp->conn_cred = credp; 6167 /* Cache things in ixa without an extra refhold */ 6168 connp->conn_ixa->ixa_cred = connp->conn_cred; 6169 connp->conn_ixa->ixa_cpid = connp->conn_cpid; 6170 if (is_system_labeled()) 6171 connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred); 6172 6173 /* 6174 * Handle IP_IOC_RTS_REQUEST and other ioctls which use conn_recv 6175 */ 6176 connp->conn_recv = ip_conn_input; 6177 connp->conn_recvicmp = ip_conn_input_icmp; 6178 6179 crhold(connp->conn_cred); 6180 6181 /* 6182 * If the caller has the process-wide flag set, then default to MAC 6183 * exempt mode. This allows read-down to unlabeled hosts. 6184 */ 6185 if (getpflags(NET_MAC_AWARE, credp) != 0) 6186 connp->conn_mac_mode = CONN_MAC_AWARE; 6187 6188 connp->conn_zone_is_global = (crgetzoneid(credp) == GLOBAL_ZONEID); 6189 6190 connp->conn_rq = q; 6191 connp->conn_wq = WR(q); 6192 6193 /* Non-zero default values */ 6194 connp->conn_ixa->ixa_flags |= IXAF_MULTICAST_LOOP; 6195 6196 /* 6197 * Make the conn globally visible to walkers 6198 */ 6199 ASSERT(connp->conn_ref == 1); 6200 mutex_enter(&connp->conn_lock); 6201 connp->conn_state_flags &= ~CONN_INCIPIENT; 6202 mutex_exit(&connp->conn_lock); 6203 6204 qprocson(q); 6205 6206 return (0); 6207 } 6208 6209 /* 6210 * Set IPsec policy from an ipsec_req_t. If the req is not "zero" and valid, 6211 * all of them are copied to the conn_t. If the req is "zero", the policy is 6212 * zeroed out. A "zero" policy has zero ipsr_{ah,req,self_encap}_req 6213 * fields. 6214 * We keep only the latest setting of the policy and thus policy setting 6215 * is not incremental/cumulative. 6216 * 6217 * Requests to set policies with multiple alternative actions will 6218 * go through a different API. 6219 */ 6220 int 6221 ipsec_set_req(cred_t *cr, conn_t *connp, ipsec_req_t *req) 6222 { 6223 uint_t ah_req = 0; 6224 uint_t esp_req = 0; 6225 uint_t se_req = 0; 6226 ipsec_act_t *actp = NULL; 6227 uint_t nact; 6228 ipsec_policy_head_t *ph; 6229 boolean_t is_pol_reset, is_pol_inserted = B_FALSE; 6230 int error = 0; 6231 netstack_t *ns = connp->conn_netstack; 6232 ip_stack_t *ipst = ns->netstack_ip; 6233 ipsec_stack_t *ipss = ns->netstack_ipsec; 6234 6235 #define REQ_MASK (IPSEC_PREF_REQUIRED|IPSEC_PREF_NEVER) 6236 6237 /* 6238 * The IP_SEC_OPT option does not allow variable length parameters, 6239 * hence a request cannot be NULL. 6240 */ 6241 if (req == NULL) 6242 return (EINVAL); 6243 6244 ah_req = req->ipsr_ah_req; 6245 esp_req = req->ipsr_esp_req; 6246 se_req = req->ipsr_self_encap_req; 6247 6248 /* Don't allow setting self-encap without one or more of AH/ESP. */ 6249 if (se_req != 0 && esp_req == 0 && ah_req == 0) 6250 return (EINVAL); 6251 6252 /* 6253 * Are we dealing with a request to reset the policy (i.e. 6254 * zero requests). 6255 */ 6256 is_pol_reset = ((ah_req & REQ_MASK) == 0 && 6257 (esp_req & REQ_MASK) == 0 && 6258 (se_req & REQ_MASK) == 0); 6259 6260 if (!is_pol_reset) { 6261 /* 6262 * If we couldn't load IPsec, fail with "protocol 6263 * not supported". 6264 * IPsec may not have been loaded for a request with zero 6265 * policies, so we don't fail in this case. 6266 */ 6267 mutex_enter(&ipss->ipsec_loader_lock); 6268 if (ipss->ipsec_loader_state != IPSEC_LOADER_SUCCEEDED) { 6269 mutex_exit(&ipss->ipsec_loader_lock); 6270 return (EPROTONOSUPPORT); 6271 } 6272 mutex_exit(&ipss->ipsec_loader_lock); 6273 6274 /* 6275 * Test for valid requests. Invalid algorithms 6276 * need to be tested by IPsec code because new 6277 * algorithms can be added dynamically. 6278 */ 6279 if ((ah_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 || 6280 (esp_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0 || 6281 (se_req & ~(REQ_MASK|IPSEC_PREF_UNIQUE)) != 0) { 6282 return (EINVAL); 6283 } 6284 6285 /* 6286 * Only privileged users can issue these 6287 * requests. 6288 */ 6289 if (((ah_req & IPSEC_PREF_NEVER) || 6290 (esp_req & IPSEC_PREF_NEVER) || 6291 (se_req & IPSEC_PREF_NEVER)) && 6292 secpolicy_ip_config(cr, B_FALSE) != 0) { 6293 return (EPERM); 6294 } 6295 6296 /* 6297 * The IPSEC_PREF_REQUIRED and IPSEC_PREF_NEVER 6298 * are mutually exclusive. 6299 */ 6300 if (((ah_req & REQ_MASK) == REQ_MASK) || 6301 ((esp_req & REQ_MASK) == REQ_MASK) || 6302 ((se_req & REQ_MASK) == REQ_MASK)) { 6303 /* Both of them are set */ 6304 return (EINVAL); 6305 } 6306 } 6307 6308 ASSERT(MUTEX_HELD(&connp->conn_lock)); 6309 6310 /* 6311 * If we have already cached policies in conn_connect(), don't 6312 * let them change now. We cache policies for connections 6313 * whose src,dst [addr, port] is known. 6314 */ 6315 if (connp->conn_policy_cached) { 6316 return (EINVAL); 6317 } 6318 6319 /* 6320 * We have a zero policies, reset the connection policy if already 6321 * set. This will cause the connection to inherit the 6322 * global policy, if any. 6323 */ 6324 if (is_pol_reset) { 6325 if (connp->conn_policy != NULL) { 6326 IPPH_REFRELE(connp->conn_policy, ipst->ips_netstack); 6327 connp->conn_policy = NULL; 6328 } 6329 connp->conn_in_enforce_policy = B_FALSE; 6330 connp->conn_out_enforce_policy = B_FALSE; 6331 return (0); 6332 } 6333 6334 ph = connp->conn_policy = ipsec_polhead_split(connp->conn_policy, 6335 ipst->ips_netstack); 6336 if (ph == NULL) 6337 goto enomem; 6338 6339 ipsec_actvec_from_req(req, &actp, &nact, ipst->ips_netstack); 6340 if (actp == NULL) 6341 goto enomem; 6342 6343 /* 6344 * Always insert IPv4 policy entries, since they can also apply to 6345 * ipv6 sockets being used in ipv4-compat mode. 6346 */ 6347 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4, 6348 IPSEC_TYPE_INBOUND, ns)) 6349 goto enomem; 6350 is_pol_inserted = B_TRUE; 6351 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V4, 6352 IPSEC_TYPE_OUTBOUND, ns)) 6353 goto enomem; 6354 6355 /* 6356 * We're looking at a v6 socket, also insert the v6-specific 6357 * entries. 6358 */ 6359 if (connp->conn_family == AF_INET6) { 6360 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6, 6361 IPSEC_TYPE_INBOUND, ns)) 6362 goto enomem; 6363 if (!ipsec_polhead_insert(ph, actp, nact, IPSEC_AF_V6, 6364 IPSEC_TYPE_OUTBOUND, ns)) 6365 goto enomem; 6366 } 6367 6368 ipsec_actvec_free(actp, nact); 6369 6370 /* 6371 * If the requests need security, set enforce_policy. 6372 * If the requests are IPSEC_PREF_NEVER, one should 6373 * still set conn_out_enforce_policy so that ip_set_destination 6374 * marks the ip_xmit_attr_t appropriatly. This is needed so that 6375 * for connections that we don't cache policy in at connect time, 6376 * if global policy matches in ip_output_attach_policy, we 6377 * don't wrongly inherit global policy. Similarly, we need 6378 * to set conn_in_enforce_policy also so that we don't verify 6379 * policy wrongly. 6380 */ 6381 if ((ah_req & REQ_MASK) != 0 || 6382 (esp_req & REQ_MASK) != 0 || 6383 (se_req & REQ_MASK) != 0) { 6384 connp->conn_in_enforce_policy = B_TRUE; 6385 connp->conn_out_enforce_policy = B_TRUE; 6386 } 6387 6388 return (error); 6389 #undef REQ_MASK 6390 6391 /* 6392 * Common memory-allocation-failure exit path. 6393 */ 6394 enomem: 6395 if (actp != NULL) 6396 ipsec_actvec_free(actp, nact); 6397 if (is_pol_inserted) 6398 ipsec_polhead_flush(ph, ns); 6399 return (ENOMEM); 6400 } 6401 6402 /* 6403 * Set socket options for joining and leaving multicast groups. 6404 * Common to IPv4 and IPv6; inet6 indicates the type of socket. 6405 * The caller has already check that the option name is consistent with 6406 * the address family of the socket. 6407 */ 6408 int 6409 ip_opt_set_multicast_group(conn_t *connp, t_scalar_t name, 6410 uchar_t *invalp, boolean_t inet6, boolean_t checkonly) 6411 { 6412 int *i1 = (int *)invalp; 6413 int error = 0; 6414 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 6415 struct ip_mreq *v4_mreqp; 6416 struct ipv6_mreq *v6_mreqp; 6417 struct group_req *greqp; 6418 ire_t *ire; 6419 boolean_t done = B_FALSE; 6420 ipaddr_t ifaddr; 6421 in6_addr_t v6group; 6422 uint_t ifindex; 6423 boolean_t mcast_opt = B_TRUE; 6424 mcast_record_t fmode; 6425 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *, 6426 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *); 6427 6428 switch (name) { 6429 case IP_ADD_MEMBERSHIP: 6430 case IPV6_JOIN_GROUP: 6431 mcast_opt = B_FALSE; 6432 /* FALLTHRU */ 6433 case MCAST_JOIN_GROUP: 6434 fmode = MODE_IS_EXCLUDE; 6435 optfn = ip_opt_add_group; 6436 break; 6437 6438 case IP_DROP_MEMBERSHIP: 6439 case IPV6_LEAVE_GROUP: 6440 mcast_opt = B_FALSE; 6441 /* FALLTHRU */ 6442 case MCAST_LEAVE_GROUP: 6443 fmode = MODE_IS_INCLUDE; 6444 optfn = ip_opt_delete_group; 6445 break; 6446 default: 6447 ASSERT(0); 6448 } 6449 6450 if (mcast_opt) { 6451 struct sockaddr_in *sin; 6452 struct sockaddr_in6 *sin6; 6453 6454 greqp = (struct group_req *)i1; 6455 if (greqp->gr_group.ss_family == AF_INET) { 6456 sin = (struct sockaddr_in *)&(greqp->gr_group); 6457 IN6_INADDR_TO_V4MAPPED(&sin->sin_addr, &v6group); 6458 } else { 6459 if (!inet6) 6460 return (EINVAL); /* Not on INET socket */ 6461 6462 sin6 = (struct sockaddr_in6 *)&(greqp->gr_group); 6463 v6group = sin6->sin6_addr; 6464 } 6465 ifaddr = INADDR_ANY; 6466 ifindex = greqp->gr_interface; 6467 } else if (inet6) { 6468 v6_mreqp = (struct ipv6_mreq *)i1; 6469 v6group = v6_mreqp->ipv6mr_multiaddr; 6470 ifaddr = INADDR_ANY; 6471 ifindex = v6_mreqp->ipv6mr_interface; 6472 } else { 6473 v4_mreqp = (struct ip_mreq *)i1; 6474 IN6_INADDR_TO_V4MAPPED(&v4_mreqp->imr_multiaddr, &v6group); 6475 ifaddr = (ipaddr_t)v4_mreqp->imr_interface.s_addr; 6476 ifindex = 0; 6477 } 6478 6479 /* 6480 * In the multirouting case, we need to replicate 6481 * the request on all interfaces that will take part 6482 * in replication. We do so because multirouting is 6483 * reflective, thus we will probably receive multi- 6484 * casts on those interfaces. 6485 * The ip_multirt_apply_membership() succeeds if 6486 * the operation succeeds on at least one interface. 6487 */ 6488 if (IN6_IS_ADDR_V4MAPPED(&v6group)) { 6489 ipaddr_t group; 6490 6491 IN6_V4MAPPED_TO_IPADDR(&v6group, group); 6492 6493 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0, 6494 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL, 6495 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL); 6496 } else { 6497 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0, 6498 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL, 6499 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL); 6500 } 6501 if (ire != NULL) { 6502 if (ire->ire_flags & RTF_MULTIRT) { 6503 error = ip_multirt_apply_membership(optfn, ire, connp, 6504 checkonly, &v6group, fmode, &ipv6_all_zeros); 6505 done = B_TRUE; 6506 } 6507 ire_refrele(ire); 6508 } 6509 6510 if (!done) { 6511 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex, 6512 fmode, &ipv6_all_zeros); 6513 } 6514 return (error); 6515 } 6516 6517 /* 6518 * Set socket options for joining and leaving multicast groups 6519 * for specific sources. 6520 * Common to IPv4 and IPv6; inet6 indicates the type of socket. 6521 * The caller has already check that the option name is consistent with 6522 * the address family of the socket. 6523 */ 6524 int 6525 ip_opt_set_multicast_sources(conn_t *connp, t_scalar_t name, 6526 uchar_t *invalp, boolean_t inet6, boolean_t checkonly) 6527 { 6528 int *i1 = (int *)invalp; 6529 int error = 0; 6530 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 6531 struct ip_mreq_source *imreqp; 6532 struct group_source_req *gsreqp; 6533 in6_addr_t v6group, v6src; 6534 uint32_t ifindex; 6535 ipaddr_t ifaddr; 6536 boolean_t mcast_opt = B_TRUE; 6537 mcast_record_t fmode; 6538 ire_t *ire; 6539 boolean_t done = B_FALSE; 6540 int (*optfn)(conn_t *, boolean_t, const in6_addr_t *, 6541 ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *); 6542 6543 switch (name) { 6544 case IP_BLOCK_SOURCE: 6545 mcast_opt = B_FALSE; 6546 /* FALLTHRU */ 6547 case MCAST_BLOCK_SOURCE: 6548 fmode = MODE_IS_EXCLUDE; 6549 optfn = ip_opt_add_group; 6550 break; 6551 6552 case IP_UNBLOCK_SOURCE: 6553 mcast_opt = B_FALSE; 6554 /* FALLTHRU */ 6555 case MCAST_UNBLOCK_SOURCE: 6556 fmode = MODE_IS_EXCLUDE; 6557 optfn = ip_opt_delete_group; 6558 break; 6559 6560 case IP_ADD_SOURCE_MEMBERSHIP: 6561 mcast_opt = B_FALSE; 6562 /* FALLTHRU */ 6563 case MCAST_JOIN_SOURCE_GROUP: 6564 fmode = MODE_IS_INCLUDE; 6565 optfn = ip_opt_add_group; 6566 break; 6567 6568 case IP_DROP_SOURCE_MEMBERSHIP: 6569 mcast_opt = B_FALSE; 6570 /* FALLTHRU */ 6571 case MCAST_LEAVE_SOURCE_GROUP: 6572 fmode = MODE_IS_INCLUDE; 6573 optfn = ip_opt_delete_group; 6574 break; 6575 default: 6576 ASSERT(0); 6577 } 6578 6579 if (mcast_opt) { 6580 gsreqp = (struct group_source_req *)i1; 6581 ifindex = gsreqp->gsr_interface; 6582 if (gsreqp->gsr_group.ss_family == AF_INET) { 6583 struct sockaddr_in *s; 6584 s = (struct sockaddr_in *)&gsreqp->gsr_group; 6585 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6group); 6586 s = (struct sockaddr_in *)&gsreqp->gsr_source; 6587 IN6_INADDR_TO_V4MAPPED(&s->sin_addr, &v6src); 6588 } else { 6589 struct sockaddr_in6 *s6; 6590 6591 if (!inet6) 6592 return (EINVAL); /* Not on INET socket */ 6593 6594 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_group; 6595 v6group = s6->sin6_addr; 6596 s6 = (struct sockaddr_in6 *)&gsreqp->gsr_source; 6597 v6src = s6->sin6_addr; 6598 } 6599 ifaddr = INADDR_ANY; 6600 } else { 6601 imreqp = (struct ip_mreq_source *)i1; 6602 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_multiaddr, &v6group); 6603 IN6_INADDR_TO_V4MAPPED(&imreqp->imr_sourceaddr, &v6src); 6604 ifaddr = (ipaddr_t)imreqp->imr_interface.s_addr; 6605 ifindex = 0; 6606 } 6607 6608 /* 6609 * Handle src being mapped INADDR_ANY by changing it to unspecified. 6610 */ 6611 if (IN6_IS_ADDR_V4MAPPED_ANY(&v6src)) 6612 v6src = ipv6_all_zeros; 6613 6614 /* 6615 * In the multirouting case, we need to replicate 6616 * the request as noted in the mcast cases above. 6617 */ 6618 if (IN6_IS_ADDR_V4MAPPED(&v6group)) { 6619 ipaddr_t group; 6620 6621 IN6_V4MAPPED_TO_IPADDR(&v6group, group); 6622 6623 ire = ire_ftable_lookup_v4(group, IP_HOST_MASK, 0, 6624 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL, 6625 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL); 6626 } else { 6627 ire = ire_ftable_lookup_v6(&v6group, &ipv6_all_ones, 0, 6628 IRE_HOST | IRE_INTERFACE, NULL, ALL_ZONES, NULL, 6629 MATCH_IRE_MASK | MATCH_IRE_TYPE, 0, ipst, NULL); 6630 } 6631 if (ire != NULL) { 6632 if (ire->ire_flags & RTF_MULTIRT) { 6633 error = ip_multirt_apply_membership(optfn, ire, connp, 6634 checkonly, &v6group, fmode, &v6src); 6635 done = B_TRUE; 6636 } 6637 ire_refrele(ire); 6638 } 6639 if (!done) { 6640 error = optfn(connp, checkonly, &v6group, ifaddr, ifindex, 6641 fmode, &v6src); 6642 } 6643 return (error); 6644 } 6645 6646 /* 6647 * Given a destination address and a pointer to where to put the information 6648 * this routine fills in the mtuinfo. 6649 * The socket must be connected. 6650 * For sctp conn_faddr is the primary address. 6651 */ 6652 int 6653 ip_fill_mtuinfo(conn_t *connp, ip_xmit_attr_t *ixa, struct ip6_mtuinfo *mtuinfo) 6654 { 6655 uint32_t pmtu = IP_MAXPACKET; 6656 uint_t scopeid; 6657 6658 if (IN6_IS_ADDR_UNSPECIFIED(&connp->conn_faddr_v6)) 6659 return (-1); 6660 6661 /* In case we never sent or called ip_set_destination_v4/v6 */ 6662 if (ixa->ixa_ire != NULL) 6663 pmtu = ip_get_pmtu(ixa); 6664 6665 if (ixa->ixa_flags & IXAF_SCOPEID_SET) 6666 scopeid = ixa->ixa_scopeid; 6667 else 6668 scopeid = 0; 6669 6670 bzero(mtuinfo, sizeof (*mtuinfo)); 6671 mtuinfo->ip6m_addr.sin6_family = AF_INET6; 6672 mtuinfo->ip6m_addr.sin6_port = connp->conn_fport; 6673 mtuinfo->ip6m_addr.sin6_addr = connp->conn_faddr_v6; 6674 mtuinfo->ip6m_addr.sin6_scope_id = scopeid; 6675 mtuinfo->ip6m_mtu = pmtu; 6676 6677 return (sizeof (struct ip6_mtuinfo)); 6678 } 6679 6680 /* Named Dispatch routine to get a current value out of our parameter table. */ 6681 /* ARGSUSED */ 6682 static int 6683 ip_param_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *ioc_cr) 6684 { 6685 ipparam_t *ippa = (ipparam_t *)cp; 6686 6687 (void) mi_mpprintf(mp, "%d", ippa->ip_param_value); 6688 return (0); 6689 } 6690 6691 /* ARGSUSED */ 6692 static int 6693 ip_param_generic_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *ioc_cr) 6694 { 6695 6696 (void) mi_mpprintf(mp, "%d", *(int *)cp); 6697 return (0); 6698 } 6699 6700 /* 6701 * Set ip{,6}_forwarding values. This means walking through all of the 6702 * ill's and toggling their forwarding values. 6703 */ 6704 /* ARGSUSED */ 6705 static int 6706 ip_forward_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *ioc_cr) 6707 { 6708 long new_value; 6709 int *forwarding_value = (int *)cp; 6710 ill_t *ill; 6711 boolean_t isv6; 6712 ill_walk_context_t ctx; 6713 ip_stack_t *ipst = CONNQ_TO_IPST(q); 6714 6715 isv6 = (forwarding_value == &ipst->ips_ipv6_forward); 6716 6717 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 6718 new_value < 0 || new_value > 1) { 6719 return (EINVAL); 6720 } 6721 6722 *forwarding_value = new_value; 6723 6724 /* 6725 * Regardless of the current value of ip_forwarding, set all per-ill 6726 * values of ip_forwarding to the value being set. 6727 * 6728 * Bring all the ill's up to date with the new global value. 6729 */ 6730 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 6731 6732 if (isv6) 6733 ill = ILL_START_WALK_V6(&ctx, ipst); 6734 else 6735 ill = ILL_START_WALK_V4(&ctx, ipst); 6736 6737 for (; ill != NULL; ill = ill_next(&ctx, ill)) 6738 (void) ill_forward_set(ill, new_value != 0); 6739 6740 rw_exit(&ipst->ips_ill_g_lock); 6741 return (0); 6742 } 6743 6744 /* 6745 * Walk through the param array specified registering each element with the 6746 * Named Dispatch handler. This is called only during init. So it is ok 6747 * not to acquire any locks 6748 */ 6749 static boolean_t 6750 ip_param_register(IDP *ndp, ipparam_t *ippa, size_t ippa_cnt, 6751 ipndp_t *ipnd, size_t ipnd_cnt) 6752 { 6753 for (; ippa_cnt-- > 0; ippa++) { 6754 if (ippa->ip_param_name && ippa->ip_param_name[0]) { 6755 if (!nd_load(ndp, ippa->ip_param_name, 6756 ip_param_get, ip_param_set, (caddr_t)ippa)) { 6757 nd_free(ndp); 6758 return (B_FALSE); 6759 } 6760 } 6761 } 6762 6763 for (; ipnd_cnt-- > 0; ipnd++) { 6764 if (ipnd->ip_ndp_name && ipnd->ip_ndp_name[0]) { 6765 if (!nd_load(ndp, ipnd->ip_ndp_name, 6766 ipnd->ip_ndp_getf, ipnd->ip_ndp_setf, 6767 ipnd->ip_ndp_data)) { 6768 nd_free(ndp); 6769 return (B_FALSE); 6770 } 6771 } 6772 } 6773 6774 return (B_TRUE); 6775 } 6776 6777 /* Named Dispatch routine to negotiate a new value for one of our parameters. */ 6778 /* ARGSUSED */ 6779 static int 6780 ip_param_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, cred_t *ioc_cr) 6781 { 6782 long new_value; 6783 ipparam_t *ippa = (ipparam_t *)cp; 6784 6785 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 6786 new_value < ippa->ip_param_min || new_value > ippa->ip_param_max) { 6787 return (EINVAL); 6788 } 6789 ippa->ip_param_value = new_value; 6790 return (0); 6791 } 6792 6793 /* 6794 * Handles both IPv4 and IPv6 reassembly - doing the out-of-order cases, 6795 * When an ipf is passed here for the first time, if 6796 * we already have in-order fragments on the queue, we convert from the fast- 6797 * path reassembly scheme to the hard-case scheme. From then on, additional 6798 * fragments are reassembled here. We keep track of the start and end offsets 6799 * of each piece, and the number of holes in the chain. When the hole count 6800 * goes to zero, we are done! 6801 * 6802 * The ipf_count will be updated to account for any mblk(s) added (pointed to 6803 * by mp) or subtracted (freeb()ed dups), upon return the caller must update 6804 * ipfb_count and ill_frag_count by the difference of ipf_count before and 6805 * after the call to ip_reassemble(). 6806 */ 6807 int 6808 ip_reassemble(mblk_t *mp, ipf_t *ipf, uint_t start, boolean_t more, ill_t *ill, 6809 size_t msg_len) 6810 { 6811 uint_t end; 6812 mblk_t *next_mp; 6813 mblk_t *mp1; 6814 uint_t offset; 6815 boolean_t incr_dups = B_TRUE; 6816 boolean_t offset_zero_seen = B_FALSE; 6817 boolean_t pkt_boundary_checked = B_FALSE; 6818 6819 /* If start == 0 then ipf_nf_hdr_len has to be set. */ 6820 ASSERT(start != 0 || ipf->ipf_nf_hdr_len != 0); 6821 6822 /* Add in byte count */ 6823 ipf->ipf_count += msg_len; 6824 if (ipf->ipf_end) { 6825 /* 6826 * We were part way through in-order reassembly, but now there 6827 * is a hole. We walk through messages already queued, and 6828 * mark them for hard case reassembly. We know that up till 6829 * now they were in order starting from offset zero. 6830 */ 6831 offset = 0; 6832 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) { 6833 IP_REASS_SET_START(mp1, offset); 6834 if (offset == 0) { 6835 ASSERT(ipf->ipf_nf_hdr_len != 0); 6836 offset = -ipf->ipf_nf_hdr_len; 6837 } 6838 offset += mp1->b_wptr - mp1->b_rptr; 6839 IP_REASS_SET_END(mp1, offset); 6840 } 6841 /* One hole at the end. */ 6842 ipf->ipf_hole_cnt = 1; 6843 /* Brand it as a hard case, forever. */ 6844 ipf->ipf_end = 0; 6845 } 6846 /* Walk through all the new pieces. */ 6847 do { 6848 end = start + (mp->b_wptr - mp->b_rptr); 6849 /* 6850 * If start is 0, decrease 'end' only for the first mblk of 6851 * the fragment. Otherwise 'end' can get wrong value in the 6852 * second pass of the loop if first mblk is exactly the 6853 * size of ipf_nf_hdr_len. 6854 */ 6855 if (start == 0 && !offset_zero_seen) { 6856 /* First segment */ 6857 ASSERT(ipf->ipf_nf_hdr_len != 0); 6858 end -= ipf->ipf_nf_hdr_len; 6859 offset_zero_seen = B_TRUE; 6860 } 6861 next_mp = mp->b_cont; 6862 /* 6863 * We are checking to see if there is any interesing data 6864 * to process. If there isn't and the mblk isn't the 6865 * one which carries the unfragmentable header then we 6866 * drop it. It's possible to have just the unfragmentable 6867 * header come through without any data. That needs to be 6868 * saved. 6869 * 6870 * If the assert at the top of this function holds then the 6871 * term "ipf->ipf_nf_hdr_len != 0" isn't needed. This code 6872 * is infrequently traveled enough that the test is left in 6873 * to protect against future code changes which break that 6874 * invariant. 6875 */ 6876 if (start == end && start != 0 && ipf->ipf_nf_hdr_len != 0) { 6877 /* Empty. Blast it. */ 6878 IP_REASS_SET_START(mp, 0); 6879 IP_REASS_SET_END(mp, 0); 6880 /* 6881 * If the ipf points to the mblk we are about to free, 6882 * update ipf to point to the next mblk (or NULL 6883 * if none). 6884 */ 6885 if (ipf->ipf_mp->b_cont == mp) 6886 ipf->ipf_mp->b_cont = next_mp; 6887 freeb(mp); 6888 continue; 6889 } 6890 mp->b_cont = NULL; 6891 IP_REASS_SET_START(mp, start); 6892 IP_REASS_SET_END(mp, end); 6893 if (!ipf->ipf_tail_mp) { 6894 ipf->ipf_tail_mp = mp; 6895 ipf->ipf_mp->b_cont = mp; 6896 if (start == 0 || !more) { 6897 ipf->ipf_hole_cnt = 1; 6898 /* 6899 * if the first fragment comes in more than one 6900 * mblk, this loop will be executed for each 6901 * mblk. Need to adjust hole count so exiting 6902 * this routine will leave hole count at 1. 6903 */ 6904 if (next_mp) 6905 ipf->ipf_hole_cnt++; 6906 } else 6907 ipf->ipf_hole_cnt = 2; 6908 continue; 6909 } else if (ipf->ipf_last_frag_seen && !more && 6910 !pkt_boundary_checked) { 6911 /* 6912 * We check datagram boundary only if this fragment 6913 * claims to be the last fragment and we have seen a 6914 * last fragment in the past too. We do this only 6915 * once for a given fragment. 6916 * 6917 * start cannot be 0 here as fragments with start=0 6918 * and MF=0 gets handled as a complete packet. These 6919 * fragments should not reach here. 6920 */ 6921 6922 if (start + msgdsize(mp) != 6923 IP_REASS_END(ipf->ipf_tail_mp)) { 6924 /* 6925 * We have two fragments both of which claim 6926 * to be the last fragment but gives conflicting 6927 * information about the whole datagram size. 6928 * Something fishy is going on. Drop the 6929 * fragment and free up the reassembly list. 6930 */ 6931 return (IP_REASS_FAILED); 6932 } 6933 6934 /* 6935 * We shouldn't come to this code block again for this 6936 * particular fragment. 6937 */ 6938 pkt_boundary_checked = B_TRUE; 6939 } 6940 6941 /* New stuff at or beyond tail? */ 6942 offset = IP_REASS_END(ipf->ipf_tail_mp); 6943 if (start >= offset) { 6944 if (ipf->ipf_last_frag_seen) { 6945 /* current fragment is beyond last fragment */ 6946 return (IP_REASS_FAILED); 6947 } 6948 /* Link it on end. */ 6949 ipf->ipf_tail_mp->b_cont = mp; 6950 ipf->ipf_tail_mp = mp; 6951 if (more) { 6952 if (start != offset) 6953 ipf->ipf_hole_cnt++; 6954 } else if (start == offset && next_mp == NULL) 6955 ipf->ipf_hole_cnt--; 6956 continue; 6957 } 6958 mp1 = ipf->ipf_mp->b_cont; 6959 offset = IP_REASS_START(mp1); 6960 /* New stuff at the front? */ 6961 if (start < offset) { 6962 if (start == 0) { 6963 if (end >= offset) { 6964 /* Nailed the hole at the begining. */ 6965 ipf->ipf_hole_cnt--; 6966 } 6967 } else if (end < offset) { 6968 /* 6969 * A hole, stuff, and a hole where there used 6970 * to be just a hole. 6971 */ 6972 ipf->ipf_hole_cnt++; 6973 } 6974 mp->b_cont = mp1; 6975 /* Check for overlap. */ 6976 while (end > offset) { 6977 if (end < IP_REASS_END(mp1)) { 6978 mp->b_wptr -= end - offset; 6979 IP_REASS_SET_END(mp, offset); 6980 BUMP_MIB(ill->ill_ip_mib, 6981 ipIfStatsReasmPartDups); 6982 break; 6983 } 6984 /* Did we cover another hole? */ 6985 if ((mp1->b_cont && 6986 IP_REASS_END(mp1) != 6987 IP_REASS_START(mp1->b_cont) && 6988 end >= IP_REASS_START(mp1->b_cont)) || 6989 (!ipf->ipf_last_frag_seen && !more)) { 6990 ipf->ipf_hole_cnt--; 6991 } 6992 /* Clip out mp1. */ 6993 if ((mp->b_cont = mp1->b_cont) == NULL) { 6994 /* 6995 * After clipping out mp1, this guy 6996 * is now hanging off the end. 6997 */ 6998 ipf->ipf_tail_mp = mp; 6999 } 7000 IP_REASS_SET_START(mp1, 0); 7001 IP_REASS_SET_END(mp1, 0); 7002 /* Subtract byte count */ 7003 ipf->ipf_count -= mp1->b_datap->db_lim - 7004 mp1->b_datap->db_base; 7005 freeb(mp1); 7006 BUMP_MIB(ill->ill_ip_mib, 7007 ipIfStatsReasmPartDups); 7008 mp1 = mp->b_cont; 7009 if (!mp1) 7010 break; 7011 offset = IP_REASS_START(mp1); 7012 } 7013 ipf->ipf_mp->b_cont = mp; 7014 continue; 7015 } 7016 /* 7017 * The new piece starts somewhere between the start of the head 7018 * and before the end of the tail. 7019 */ 7020 for (; mp1; mp1 = mp1->b_cont) { 7021 offset = IP_REASS_END(mp1); 7022 if (start < offset) { 7023 if (end <= offset) { 7024 /* Nothing new. */ 7025 IP_REASS_SET_START(mp, 0); 7026 IP_REASS_SET_END(mp, 0); 7027 /* Subtract byte count */ 7028 ipf->ipf_count -= mp->b_datap->db_lim - 7029 mp->b_datap->db_base; 7030 if (incr_dups) { 7031 ipf->ipf_num_dups++; 7032 incr_dups = B_FALSE; 7033 } 7034 freeb(mp); 7035 BUMP_MIB(ill->ill_ip_mib, 7036 ipIfStatsReasmDuplicates); 7037 break; 7038 } 7039 /* 7040 * Trim redundant stuff off beginning of new 7041 * piece. 7042 */ 7043 IP_REASS_SET_START(mp, offset); 7044 mp->b_rptr += offset - start; 7045 BUMP_MIB(ill->ill_ip_mib, 7046 ipIfStatsReasmPartDups); 7047 start = offset; 7048 if (!mp1->b_cont) { 7049 /* 7050 * After trimming, this guy is now 7051 * hanging off the end. 7052 */ 7053 mp1->b_cont = mp; 7054 ipf->ipf_tail_mp = mp; 7055 if (!more) { 7056 ipf->ipf_hole_cnt--; 7057 } 7058 break; 7059 } 7060 } 7061 if (start >= IP_REASS_START(mp1->b_cont)) 7062 continue; 7063 /* Fill a hole */ 7064 if (start > offset) 7065 ipf->ipf_hole_cnt++; 7066 mp->b_cont = mp1->b_cont; 7067 mp1->b_cont = mp; 7068 mp1 = mp->b_cont; 7069 offset = IP_REASS_START(mp1); 7070 if (end >= offset) { 7071 ipf->ipf_hole_cnt--; 7072 /* Check for overlap. */ 7073 while (end > offset) { 7074 if (end < IP_REASS_END(mp1)) { 7075 mp->b_wptr -= end - offset; 7076 IP_REASS_SET_END(mp, offset); 7077 /* 7078 * TODO we might bump 7079 * this up twice if there is 7080 * overlap at both ends. 7081 */ 7082 BUMP_MIB(ill->ill_ip_mib, 7083 ipIfStatsReasmPartDups); 7084 break; 7085 } 7086 /* Did we cover another hole? */ 7087 if ((mp1->b_cont && 7088 IP_REASS_END(mp1) 7089 != IP_REASS_START(mp1->b_cont) && 7090 end >= 7091 IP_REASS_START(mp1->b_cont)) || 7092 (!ipf->ipf_last_frag_seen && 7093 !more)) { 7094 ipf->ipf_hole_cnt--; 7095 } 7096 /* Clip out mp1. */ 7097 if ((mp->b_cont = mp1->b_cont) == 7098 NULL) { 7099 /* 7100 * After clipping out mp1, 7101 * this guy is now hanging 7102 * off the end. 7103 */ 7104 ipf->ipf_tail_mp = mp; 7105 } 7106 IP_REASS_SET_START(mp1, 0); 7107 IP_REASS_SET_END(mp1, 0); 7108 /* Subtract byte count */ 7109 ipf->ipf_count -= 7110 mp1->b_datap->db_lim - 7111 mp1->b_datap->db_base; 7112 freeb(mp1); 7113 BUMP_MIB(ill->ill_ip_mib, 7114 ipIfStatsReasmPartDups); 7115 mp1 = mp->b_cont; 7116 if (!mp1) 7117 break; 7118 offset = IP_REASS_START(mp1); 7119 } 7120 } 7121 break; 7122 } 7123 } while (start = end, mp = next_mp); 7124 7125 /* Fragment just processed could be the last one. Remember this fact */ 7126 if (!more) 7127 ipf->ipf_last_frag_seen = B_TRUE; 7128 7129 /* Still got holes? */ 7130 if (ipf->ipf_hole_cnt) 7131 return (IP_REASS_PARTIAL); 7132 /* Clean up overloaded fields to avoid upstream disasters. */ 7133 for (mp1 = ipf->ipf_mp->b_cont; mp1; mp1 = mp1->b_cont) { 7134 IP_REASS_SET_START(mp1, 0); 7135 IP_REASS_SET_END(mp1, 0); 7136 } 7137 return (IP_REASS_COMPLETE); 7138 } 7139 7140 /* 7141 * Fragmentation reassembly. Each ILL has a hash table for 7142 * queuing packets undergoing reassembly for all IPIFs 7143 * associated with the ILL. The hash is based on the packet 7144 * IP ident field. The ILL frag hash table was allocated 7145 * as a timer block at the time the ILL was created. Whenever 7146 * there is anything on the reassembly queue, the timer will 7147 * be running. Returns the reassembled packet if reassembly completes. 7148 */ 7149 mblk_t * 7150 ip_input_fragment(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira) 7151 { 7152 uint32_t frag_offset_flags; 7153 mblk_t *t_mp; 7154 ipaddr_t dst; 7155 uint8_t proto = ipha->ipha_protocol; 7156 uint32_t sum_val; 7157 uint16_t sum_flags; 7158 ipf_t *ipf; 7159 ipf_t **ipfp; 7160 ipfb_t *ipfb; 7161 uint16_t ident; 7162 uint32_t offset; 7163 ipaddr_t src; 7164 uint_t hdr_length; 7165 uint32_t end; 7166 mblk_t *mp1; 7167 mblk_t *tail_mp; 7168 size_t count; 7169 size_t msg_len; 7170 uint8_t ecn_info = 0; 7171 uint32_t packet_size; 7172 boolean_t pruned = B_FALSE; 7173 ill_t *ill = ira->ira_ill; 7174 ip_stack_t *ipst = ill->ill_ipst; 7175 7176 /* 7177 * Drop the fragmented as early as possible, if 7178 * we don't have resource(s) to re-assemble. 7179 */ 7180 if (ipst->ips_ip_reass_queue_bytes == 0) { 7181 freemsg(mp); 7182 return (NULL); 7183 } 7184 7185 /* Check for fragmentation offset; return if there's none */ 7186 if ((frag_offset_flags = ntohs(ipha->ipha_fragment_offset_and_flags) & 7187 (IPH_MF | IPH_OFFSET)) == 0) 7188 return (mp); 7189 7190 /* 7191 * We utilize hardware computed checksum info only for UDP since 7192 * IP fragmentation is a normal occurrence for the protocol. In 7193 * addition, checksum offload support for IP fragments carrying 7194 * UDP payload is commonly implemented across network adapters. 7195 */ 7196 ASSERT(ira->ira_rill != NULL); 7197 if (proto == IPPROTO_UDP && dohwcksum && 7198 ILL_HCKSUM_CAPABLE(ira->ira_rill) && 7199 (DB_CKSUMFLAGS(mp) & (HCK_FULLCKSUM | HCK_PARTIALCKSUM))) { 7200 mblk_t *mp1 = mp->b_cont; 7201 int32_t len; 7202 7203 /* Record checksum information from the packet */ 7204 sum_val = (uint32_t)DB_CKSUM16(mp); 7205 sum_flags = DB_CKSUMFLAGS(mp); 7206 7207 /* IP payload offset from beginning of mblk */ 7208 offset = ((uchar_t *)ipha + IPH_HDR_LENGTH(ipha)) - mp->b_rptr; 7209 7210 if ((sum_flags & HCK_PARTIALCKSUM) && 7211 (mp1 == NULL || mp1->b_cont == NULL) && 7212 offset >= DB_CKSUMSTART(mp) && 7213 ((len = offset - DB_CKSUMSTART(mp)) & 1) == 0) { 7214 uint32_t adj; 7215 /* 7216 * Partial checksum has been calculated by hardware 7217 * and attached to the packet; in addition, any 7218 * prepended extraneous data is even byte aligned. 7219 * If any such data exists, we adjust the checksum; 7220 * this would also handle any postpended data. 7221 */ 7222 IP_ADJCKSUM_PARTIAL(mp->b_rptr + DB_CKSUMSTART(mp), 7223 mp, mp1, len, adj); 7224 7225 /* One's complement subtract extraneous checksum */ 7226 if (adj >= sum_val) 7227 sum_val = ~(adj - sum_val) & 0xFFFF; 7228 else 7229 sum_val -= adj; 7230 } 7231 } else { 7232 sum_val = 0; 7233 sum_flags = 0; 7234 } 7235 7236 /* Clear hardware checksumming flag */ 7237 DB_CKSUMFLAGS(mp) = 0; 7238 7239 ident = ipha->ipha_ident; 7240 offset = (frag_offset_flags << 3) & 0xFFFF; 7241 src = ipha->ipha_src; 7242 dst = ipha->ipha_dst; 7243 hdr_length = IPH_HDR_LENGTH(ipha); 7244 end = ntohs(ipha->ipha_length) - hdr_length; 7245 7246 /* If end == 0 then we have a packet with no data, so just free it */ 7247 if (end == 0) { 7248 freemsg(mp); 7249 return (NULL); 7250 } 7251 7252 /* Record the ECN field info. */ 7253 ecn_info = (ipha->ipha_type_of_service & 0x3); 7254 if (offset != 0) { 7255 /* 7256 * If this isn't the first piece, strip the header, and 7257 * add the offset to the end value. 7258 */ 7259 mp->b_rptr += hdr_length; 7260 end += offset; 7261 } 7262 7263 /* Handle vnic loopback of fragments */ 7264 if (mp->b_datap->db_ref > 2) 7265 msg_len = 0; 7266 else 7267 msg_len = MBLKSIZE(mp); 7268 7269 tail_mp = mp; 7270 while (tail_mp->b_cont != NULL) { 7271 tail_mp = tail_mp->b_cont; 7272 if (tail_mp->b_datap->db_ref <= 2) 7273 msg_len += MBLKSIZE(tail_mp); 7274 } 7275 7276 /* If the reassembly list for this ILL will get too big, prune it */ 7277 if ((msg_len + sizeof (*ipf) + ill->ill_frag_count) >= 7278 ipst->ips_ip_reass_queue_bytes) { 7279 DTRACE_PROBE3(ip_reass_queue_bytes, uint_t, msg_len, 7280 uint_t, ill->ill_frag_count, 7281 uint_t, ipst->ips_ip_reass_queue_bytes); 7282 ill_frag_prune(ill, 7283 (ipst->ips_ip_reass_queue_bytes < msg_len) ? 0 : 7284 (ipst->ips_ip_reass_queue_bytes - msg_len)); 7285 pruned = B_TRUE; 7286 } 7287 7288 ipfb = &ill->ill_frag_hash_tbl[ILL_FRAG_HASH(src, ident)]; 7289 mutex_enter(&ipfb->ipfb_lock); 7290 7291 ipfp = &ipfb->ipfb_ipf; 7292 /* Try to find an existing fragment queue for this packet. */ 7293 for (;;) { 7294 ipf = ipfp[0]; 7295 if (ipf != NULL) { 7296 /* 7297 * It has to match on ident and src/dst address. 7298 */ 7299 if (ipf->ipf_ident == ident && 7300 ipf->ipf_src == src && 7301 ipf->ipf_dst == dst && 7302 ipf->ipf_protocol == proto) { 7303 /* 7304 * If we have received too many 7305 * duplicate fragments for this packet 7306 * free it. 7307 */ 7308 if (ipf->ipf_num_dups > ip_max_frag_dups) { 7309 ill_frag_free_pkts(ill, ipfb, ipf, 1); 7310 freemsg(mp); 7311 mutex_exit(&ipfb->ipfb_lock); 7312 return (NULL); 7313 } 7314 /* Found it. */ 7315 break; 7316 } 7317 ipfp = &ipf->ipf_hash_next; 7318 continue; 7319 } 7320 7321 /* 7322 * If we pruned the list, do we want to store this new 7323 * fragment?. We apply an optimization here based on the 7324 * fact that most fragments will be received in order. 7325 * So if the offset of this incoming fragment is zero, 7326 * it is the first fragment of a new packet. We will 7327 * keep it. Otherwise drop the fragment, as we have 7328 * probably pruned the packet already (since the 7329 * packet cannot be found). 7330 */ 7331 if (pruned && offset != 0) { 7332 mutex_exit(&ipfb->ipfb_lock); 7333 freemsg(mp); 7334 return (NULL); 7335 } 7336 7337 if (ipfb->ipfb_frag_pkts >= MAX_FRAG_PKTS(ipst)) { 7338 /* 7339 * Too many fragmented packets in this hash 7340 * bucket. Free the oldest. 7341 */ 7342 ill_frag_free_pkts(ill, ipfb, ipfb->ipfb_ipf, 1); 7343 } 7344 7345 /* New guy. Allocate a frag message. */ 7346 mp1 = allocb(sizeof (*ipf), BPRI_MED); 7347 if (mp1 == NULL) { 7348 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 7349 ip_drop_input("ipIfStatsInDiscards", mp, ill); 7350 freemsg(mp); 7351 reass_done: 7352 mutex_exit(&ipfb->ipfb_lock); 7353 return (NULL); 7354 } 7355 7356 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmReqds); 7357 mp1->b_cont = mp; 7358 7359 /* Initialize the fragment header. */ 7360 ipf = (ipf_t *)mp1->b_rptr; 7361 ipf->ipf_mp = mp1; 7362 ipf->ipf_ptphn = ipfp; 7363 ipfp[0] = ipf; 7364 ipf->ipf_hash_next = NULL; 7365 ipf->ipf_ident = ident; 7366 ipf->ipf_protocol = proto; 7367 ipf->ipf_src = src; 7368 ipf->ipf_dst = dst; 7369 ipf->ipf_nf_hdr_len = 0; 7370 /* Record reassembly start time. */ 7371 ipf->ipf_timestamp = gethrestime_sec(); 7372 /* Record ipf generation and account for frag header */ 7373 ipf->ipf_gen = ill->ill_ipf_gen++; 7374 ipf->ipf_count = MBLKSIZE(mp1); 7375 ipf->ipf_last_frag_seen = B_FALSE; 7376 ipf->ipf_ecn = ecn_info; 7377 ipf->ipf_num_dups = 0; 7378 ipfb->ipfb_frag_pkts++; 7379 ipf->ipf_checksum = 0; 7380 ipf->ipf_checksum_flags = 0; 7381 7382 /* Store checksum value in fragment header */ 7383 if (sum_flags != 0) { 7384 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16); 7385 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16); 7386 ipf->ipf_checksum = sum_val; 7387 ipf->ipf_checksum_flags = sum_flags; 7388 } 7389 7390 /* 7391 * We handle reassembly two ways. In the easy case, 7392 * where all the fragments show up in order, we do 7393 * minimal bookkeeping, and just clip new pieces on 7394 * the end. If we ever see a hole, then we go off 7395 * to ip_reassemble which has to mark the pieces and 7396 * keep track of the number of holes, etc. Obviously, 7397 * the point of having both mechanisms is so we can 7398 * handle the easy case as efficiently as possible. 7399 */ 7400 if (offset == 0) { 7401 /* Easy case, in-order reassembly so far. */ 7402 ipf->ipf_count += msg_len; 7403 ipf->ipf_tail_mp = tail_mp; 7404 /* 7405 * Keep track of next expected offset in 7406 * ipf_end. 7407 */ 7408 ipf->ipf_end = end; 7409 ipf->ipf_nf_hdr_len = hdr_length; 7410 } else { 7411 /* Hard case, hole at the beginning. */ 7412 ipf->ipf_tail_mp = NULL; 7413 /* 7414 * ipf_end == 0 means that we have given up 7415 * on easy reassembly. 7416 */ 7417 ipf->ipf_end = 0; 7418 7419 /* Forget checksum offload from now on */ 7420 ipf->ipf_checksum_flags = 0; 7421 7422 /* 7423 * ipf_hole_cnt is set by ip_reassemble. 7424 * ipf_count is updated by ip_reassemble. 7425 * No need to check for return value here 7426 * as we don't expect reassembly to complete 7427 * or fail for the first fragment itself. 7428 */ 7429 (void) ip_reassemble(mp, ipf, 7430 (frag_offset_flags & IPH_OFFSET) << 3, 7431 (frag_offset_flags & IPH_MF), ill, msg_len); 7432 } 7433 /* Update per ipfb and ill byte counts */ 7434 ipfb->ipfb_count += ipf->ipf_count; 7435 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */ 7436 atomic_add_32(&ill->ill_frag_count, ipf->ipf_count); 7437 /* If the frag timer wasn't already going, start it. */ 7438 mutex_enter(&ill->ill_lock); 7439 ill_frag_timer_start(ill); 7440 mutex_exit(&ill->ill_lock); 7441 goto reass_done; 7442 } 7443 7444 /* 7445 * If the packet's flag has changed (it could be coming up 7446 * from an interface different than the previous, therefore 7447 * possibly different checksum capability), then forget about 7448 * any stored checksum states. Otherwise add the value to 7449 * the existing one stored in the fragment header. 7450 */ 7451 if (sum_flags != 0 && sum_flags == ipf->ipf_checksum_flags) { 7452 sum_val += ipf->ipf_checksum; 7453 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16); 7454 sum_val = (sum_val & 0xFFFF) + (sum_val >> 16); 7455 ipf->ipf_checksum = sum_val; 7456 } else if (ipf->ipf_checksum_flags != 0) { 7457 /* Forget checksum offload from now on */ 7458 ipf->ipf_checksum_flags = 0; 7459 } 7460 7461 /* 7462 * We have a new piece of a datagram which is already being 7463 * reassembled. Update the ECN info if all IP fragments 7464 * are ECN capable. If there is one which is not, clear 7465 * all the info. If there is at least one which has CE 7466 * code point, IP needs to report that up to transport. 7467 */ 7468 if (ecn_info != IPH_ECN_NECT && ipf->ipf_ecn != IPH_ECN_NECT) { 7469 if (ecn_info == IPH_ECN_CE) 7470 ipf->ipf_ecn = IPH_ECN_CE; 7471 } else { 7472 ipf->ipf_ecn = IPH_ECN_NECT; 7473 } 7474 if (offset && ipf->ipf_end == offset) { 7475 /* The new fragment fits at the end */ 7476 ipf->ipf_tail_mp->b_cont = mp; 7477 /* Update the byte count */ 7478 ipf->ipf_count += msg_len; 7479 /* Update per ipfb and ill byte counts */ 7480 ipfb->ipfb_count += msg_len; 7481 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */ 7482 atomic_add_32(&ill->ill_frag_count, msg_len); 7483 if (frag_offset_flags & IPH_MF) { 7484 /* More to come. */ 7485 ipf->ipf_end = end; 7486 ipf->ipf_tail_mp = tail_mp; 7487 goto reass_done; 7488 } 7489 } else { 7490 /* Go do the hard cases. */ 7491 int ret; 7492 7493 if (offset == 0) 7494 ipf->ipf_nf_hdr_len = hdr_length; 7495 7496 /* Save current byte count */ 7497 count = ipf->ipf_count; 7498 ret = ip_reassemble(mp, ipf, 7499 (frag_offset_flags & IPH_OFFSET) << 3, 7500 (frag_offset_flags & IPH_MF), ill, msg_len); 7501 /* Count of bytes added and subtracted (freeb()ed) */ 7502 count = ipf->ipf_count - count; 7503 if (count) { 7504 /* Update per ipfb and ill byte counts */ 7505 ipfb->ipfb_count += count; 7506 ASSERT(ipfb->ipfb_count > 0); /* Wraparound */ 7507 atomic_add_32(&ill->ill_frag_count, count); 7508 } 7509 if (ret == IP_REASS_PARTIAL) { 7510 goto reass_done; 7511 } else if (ret == IP_REASS_FAILED) { 7512 /* Reassembly failed. Free up all resources */ 7513 ill_frag_free_pkts(ill, ipfb, ipf, 1); 7514 for (t_mp = mp; t_mp != NULL; t_mp = t_mp->b_cont) { 7515 IP_REASS_SET_START(t_mp, 0); 7516 IP_REASS_SET_END(t_mp, 0); 7517 } 7518 freemsg(mp); 7519 goto reass_done; 7520 } 7521 /* We will reach here iff 'ret' is IP_REASS_COMPLETE */ 7522 } 7523 /* 7524 * We have completed reassembly. Unhook the frag header from 7525 * the reassembly list. 7526 * 7527 * Before we free the frag header, record the ECN info 7528 * to report back to the transport. 7529 */ 7530 ecn_info = ipf->ipf_ecn; 7531 BUMP_MIB(ill->ill_ip_mib, ipIfStatsReasmOKs); 7532 ipfp = ipf->ipf_ptphn; 7533 7534 /* We need to supply these to caller */ 7535 if ((sum_flags = ipf->ipf_checksum_flags) != 0) 7536 sum_val = ipf->ipf_checksum; 7537 else 7538 sum_val = 0; 7539 7540 mp1 = ipf->ipf_mp; 7541 count = ipf->ipf_count; 7542 ipf = ipf->ipf_hash_next; 7543 if (ipf != NULL) 7544 ipf->ipf_ptphn = ipfp; 7545 ipfp[0] = ipf; 7546 atomic_add_32(&ill->ill_frag_count, -count); 7547 ASSERT(ipfb->ipfb_count >= count); 7548 ipfb->ipfb_count -= count; 7549 ipfb->ipfb_frag_pkts--; 7550 mutex_exit(&ipfb->ipfb_lock); 7551 /* Ditch the frag header. */ 7552 mp = mp1->b_cont; 7553 7554 freeb(mp1); 7555 7556 /* Restore original IP length in header. */ 7557 packet_size = (uint32_t)msgdsize(mp); 7558 if (packet_size > IP_MAXPACKET) { 7559 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7560 ip_drop_input("Reassembled packet too large", mp, ill); 7561 freemsg(mp); 7562 return (NULL); 7563 } 7564 7565 if (DB_REF(mp) > 1) { 7566 mblk_t *mp2 = copymsg(mp); 7567 7568 if (mp2 == NULL) { 7569 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 7570 ip_drop_input("ipIfStatsInDiscards", mp, ill); 7571 freemsg(mp); 7572 return (NULL); 7573 } 7574 freemsg(mp); 7575 mp = mp2; 7576 } 7577 ipha = (ipha_t *)mp->b_rptr; 7578 7579 ipha->ipha_length = htons((uint16_t)packet_size); 7580 /* We're now complete, zip the frag state */ 7581 ipha->ipha_fragment_offset_and_flags = 0; 7582 /* Record the ECN info. */ 7583 ipha->ipha_type_of_service &= 0xFC; 7584 ipha->ipha_type_of_service |= ecn_info; 7585 7586 /* Update the receive attributes */ 7587 ira->ira_pktlen = packet_size; 7588 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha); 7589 7590 /* Reassembly is successful; set checksum information in packet */ 7591 DB_CKSUM16(mp) = (uint16_t)sum_val; 7592 DB_CKSUMFLAGS(mp) = sum_flags; 7593 DB_CKSUMSTART(mp) = ira->ira_ip_hdr_length; 7594 7595 return (mp); 7596 } 7597 7598 /* 7599 * Pullup function that should be used for IP input in order to 7600 * ensure we do not loose the L2 source address; we need the l2 source 7601 * address for IP_RECVSLLA and for ndp_input. 7602 * 7603 * We return either NULL or b_rptr. 7604 */ 7605 void * 7606 ip_pullup(mblk_t *mp, ssize_t len, ip_recv_attr_t *ira) 7607 { 7608 ill_t *ill = ira->ira_ill; 7609 7610 if (ip_rput_pullups++ == 0) { 7611 (void) mi_strlog(ill->ill_rq, 1, SL_ERROR|SL_TRACE, 7612 "ip_pullup: %s forced us to " 7613 " pullup pkt, hdr len %ld, hdr addr %p", 7614 ill->ill_name, len, (void *)mp->b_rptr); 7615 } 7616 if (!(ira->ira_flags & IRAF_L2SRC_SET)) 7617 ip_setl2src(mp, ira, ira->ira_rill); 7618 ASSERT(ira->ira_flags & IRAF_L2SRC_SET); 7619 if (!pullupmsg(mp, len)) 7620 return (NULL); 7621 else 7622 return (mp->b_rptr); 7623 } 7624 7625 /* 7626 * Make sure ira_l2src has an address. If we don't have one fill with zeros. 7627 * When called from the ULP ira_rill will be NULL hence the caller has to 7628 * pass in the ill. 7629 */ 7630 /* ARGSUSED */ 7631 void 7632 ip_setl2src(mblk_t *mp, ip_recv_attr_t *ira, ill_t *ill) 7633 { 7634 const uchar_t *addr; 7635 int alen; 7636 7637 if (ira->ira_flags & IRAF_L2SRC_SET) 7638 return; 7639 7640 ASSERT(ill != NULL); 7641 alen = ill->ill_phys_addr_length; 7642 ASSERT(alen <= sizeof (ira->ira_l2src)); 7643 if (ira->ira_mhip != NULL && 7644 (addr = ira->ira_mhip->mhi_saddr) != NULL) { 7645 bcopy(addr, ira->ira_l2src, alen); 7646 } else if ((ira->ira_flags & IRAF_L2SRC_LOOPBACK) && 7647 (addr = ill->ill_phys_addr) != NULL) { 7648 bcopy(addr, ira->ira_l2src, alen); 7649 } else { 7650 bzero(ira->ira_l2src, alen); 7651 } 7652 ira->ira_flags |= IRAF_L2SRC_SET; 7653 } 7654 7655 /* 7656 * check ip header length and align it. 7657 */ 7658 mblk_t * 7659 ip_check_and_align_header(mblk_t *mp, uint_t min_size, ip_recv_attr_t *ira) 7660 { 7661 ill_t *ill = ira->ira_ill; 7662 ssize_t len; 7663 7664 len = MBLKL(mp); 7665 7666 if (!OK_32PTR(mp->b_rptr)) 7667 IP_STAT(ill->ill_ipst, ip_notaligned); 7668 else 7669 IP_STAT(ill->ill_ipst, ip_recv_pullup); 7670 7671 /* Guard against bogus device drivers */ 7672 if (len < 0) { 7673 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7674 ip_drop_input("ipIfStatsInHdrErrors", mp, ill); 7675 freemsg(mp); 7676 return (NULL); 7677 } 7678 7679 if (len == 0) { 7680 /* GLD sometimes sends up mblk with b_rptr == b_wptr! */ 7681 mblk_t *mp1 = mp->b_cont; 7682 7683 if (!(ira->ira_flags & IRAF_L2SRC_SET)) 7684 ip_setl2src(mp, ira, ira->ira_rill); 7685 ASSERT(ira->ira_flags & IRAF_L2SRC_SET); 7686 7687 freeb(mp); 7688 mp = mp1; 7689 if (mp == NULL) 7690 return (NULL); 7691 7692 if (OK_32PTR(mp->b_rptr) && MBLKL(mp) >= min_size) 7693 return (mp); 7694 } 7695 if (ip_pullup(mp, min_size, ira) == NULL) { 7696 if (msgdsize(mp) < min_size) { 7697 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7698 ip_drop_input("ipIfStatsInHdrErrors", mp, ill); 7699 } else { 7700 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 7701 ip_drop_input("ipIfStatsInDiscards", mp, ill); 7702 } 7703 freemsg(mp); 7704 return (NULL); 7705 } 7706 return (mp); 7707 } 7708 7709 /* 7710 * Common code for IPv4 and IPv6 to check and pullup multi-mblks 7711 */ 7712 mblk_t * 7713 ip_check_length(mblk_t *mp, uchar_t *rptr, ssize_t len, uint_t pkt_len, 7714 uint_t min_size, ip_recv_attr_t *ira) 7715 { 7716 ill_t *ill = ira->ira_ill; 7717 7718 /* 7719 * Make sure we have data length consistent 7720 * with the IP header. 7721 */ 7722 if (mp->b_cont == NULL) { 7723 /* pkt_len is based on ipha_len, not the mblk length */ 7724 if (pkt_len < min_size) { 7725 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7726 ip_drop_input("ipIfStatsInHdrErrors", mp, ill); 7727 freemsg(mp); 7728 return (NULL); 7729 } 7730 if (len < 0) { 7731 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts); 7732 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill); 7733 freemsg(mp); 7734 return (NULL); 7735 } 7736 /* Drop any pad */ 7737 mp->b_wptr = rptr + pkt_len; 7738 } else if ((len += msgdsize(mp->b_cont)) != 0) { 7739 ASSERT(pkt_len >= min_size); 7740 if (pkt_len < min_size) { 7741 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7742 ip_drop_input("ipIfStatsInHdrErrors", mp, ill); 7743 freemsg(mp); 7744 return (NULL); 7745 } 7746 if (len < 0) { 7747 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInTruncatedPkts); 7748 ip_drop_input("ipIfStatsInTruncatedPkts", mp, ill); 7749 freemsg(mp); 7750 return (NULL); 7751 } 7752 /* Drop any pad */ 7753 (void) adjmsg(mp, -len); 7754 /* 7755 * adjmsg may have freed an mblk from the chain, hence 7756 * invalidate any hw checksum here. This will force IP to 7757 * calculate the checksum in sw, but only for this packet. 7758 */ 7759 DB_CKSUMFLAGS(mp) = 0; 7760 IP_STAT(ill->ill_ipst, ip_multimblk); 7761 } 7762 return (mp); 7763 } 7764 7765 /* 7766 * Check that the IPv4 opt_len is consistent with the packet and pullup 7767 * the options. 7768 */ 7769 mblk_t * 7770 ip_check_optlen(mblk_t *mp, ipha_t *ipha, uint_t opt_len, uint_t pkt_len, 7771 ip_recv_attr_t *ira) 7772 { 7773 ill_t *ill = ira->ira_ill; 7774 ssize_t len; 7775 7776 /* Assume no IPv6 packets arrive over the IPv4 queue */ 7777 if (IPH_HDR_VERSION(ipha) != IPV4_VERSION) { 7778 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7779 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInWrongIPVersion); 7780 ip_drop_input("IPvN packet on IPv4 ill", mp, ill); 7781 freemsg(mp); 7782 return (NULL); 7783 } 7784 7785 if (opt_len > (15 - IP_SIMPLE_HDR_LENGTH_IN_WORDS)) { 7786 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7787 ip_drop_input("ipIfStatsInHdrErrors", mp, ill); 7788 freemsg(mp); 7789 return (NULL); 7790 } 7791 /* 7792 * Recompute complete header length and make sure we 7793 * have access to all of it. 7794 */ 7795 len = ((size_t)opt_len + IP_SIMPLE_HDR_LENGTH_IN_WORDS) << 2; 7796 if (len > (mp->b_wptr - mp->b_rptr)) { 7797 if (len > pkt_len) { 7798 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInHdrErrors); 7799 ip_drop_input("ipIfStatsInHdrErrors", mp, ill); 7800 freemsg(mp); 7801 return (NULL); 7802 } 7803 if (ip_pullup(mp, len, ira) == NULL) { 7804 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 7805 ip_drop_input("ipIfStatsInDiscards", mp, ill); 7806 freemsg(mp); 7807 return (NULL); 7808 } 7809 } 7810 return (mp); 7811 } 7812 7813 /* 7814 * Returns a new ire, or the same ire, or NULL. 7815 * If a different IRE is returned, then it is held; the caller 7816 * needs to release it. 7817 * In no case is there any hold/release on the ire argument. 7818 */ 7819 ire_t * 7820 ip_check_multihome(void *addr, ire_t *ire, ill_t *ill) 7821 { 7822 ire_t *new_ire; 7823 ill_t *ire_ill; 7824 uint_t ifindex; 7825 ip_stack_t *ipst = ill->ill_ipst; 7826 boolean_t strict_check = B_FALSE; 7827 7828 /* 7829 * IPMP common case: if IRE and ILL are in the same group, there's no 7830 * issue (e.g. packet received on an underlying interface matched an 7831 * IRE_LOCAL on its associated group interface). 7832 */ 7833 ASSERT(ire->ire_ill != NULL); 7834 if (IS_IN_SAME_ILLGRP(ill, ire->ire_ill)) 7835 return (ire); 7836 7837 /* 7838 * Do another ire lookup here, using the ingress ill, to see if the 7839 * interface is in a usesrc group. 7840 * As long as the ills belong to the same group, we don't consider 7841 * them to be arriving on the wrong interface. Thus, if the switch 7842 * is doing inbound load spreading, we won't drop packets when the 7843 * ip*_strict_dst_multihoming switch is on. 7844 * We also need to check for IPIF_UNNUMBERED point2point interfaces 7845 * where the local address may not be unique. In this case we were 7846 * at the mercy of the initial ire lookup and the IRE_LOCAL it 7847 * actually returned. The new lookup, which is more specific, should 7848 * only find the IRE_LOCAL associated with the ingress ill if one 7849 * exists. 7850 */ 7851 if (ire->ire_ipversion == IPV4_VERSION) { 7852 if (ipst->ips_ip_strict_dst_multihoming) 7853 strict_check = B_TRUE; 7854 new_ire = ire_ftable_lookup_v4(*((ipaddr_t *)addr), 0, 0, 7855 IRE_LOCAL, ill, ALL_ZONES, NULL, 7856 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL); 7857 } else { 7858 ASSERT(!IN6_IS_ADDR_MULTICAST((in6_addr_t *)addr)); 7859 if (ipst->ips_ipv6_strict_dst_multihoming) 7860 strict_check = B_TRUE; 7861 new_ire = ire_ftable_lookup_v6((in6_addr_t *)addr, NULL, NULL, 7862 IRE_LOCAL, ill, ALL_ZONES, NULL, 7863 (MATCH_IRE_TYPE|MATCH_IRE_ILL), 0, ipst, NULL); 7864 } 7865 /* 7866 * If the same ire that was returned in ip_input() is found then this 7867 * is an indication that usesrc groups are in use. The packet 7868 * arrived on a different ill in the group than the one associated with 7869 * the destination address. If a different ire was found then the same 7870 * IP address must be hosted on multiple ills. This is possible with 7871 * unnumbered point2point interfaces. We switch to use this new ire in 7872 * order to have accurate interface statistics. 7873 */ 7874 if (new_ire != NULL) { 7875 /* Note: held in one case but not the other? Caller handles */ 7876 if (new_ire != ire) 7877 return (new_ire); 7878 /* Unchanged */ 7879 ire_refrele(new_ire); 7880 return (ire); 7881 } 7882 7883 /* 7884 * Chase pointers once and store locally. 7885 */ 7886 ASSERT(ire->ire_ill != NULL); 7887 ire_ill = ire->ire_ill; 7888 ifindex = ill->ill_usesrc_ifindex; 7889 7890 /* 7891 * Check if it's a legal address on the 'usesrc' interface. 7892 * For IPMP data addresses the IRE_LOCAL is the upper, hence we 7893 * can just check phyint_ifindex. 7894 */ 7895 if (ifindex != 0 && ifindex == ire_ill->ill_phyint->phyint_ifindex) { 7896 return (ire); 7897 } 7898 7899 /* 7900 * If the ip*_strict_dst_multihoming switch is on then we can 7901 * only accept this packet if the interface is marked as routing. 7902 */ 7903 if (!(strict_check)) 7904 return (ire); 7905 7906 if ((ill->ill_flags & ire->ire_ill->ill_flags & ILLF_ROUTER) != 0) { 7907 return (ire); 7908 } 7909 return (NULL); 7910 } 7911 7912 /* 7913 * This function is used to construct a mac_header_info_s from a 7914 * DL_UNITDATA_IND message. 7915 * The address fields in the mhi structure points into the message, 7916 * thus the caller can't use those fields after freeing the message. 7917 * 7918 * We determine whether the packet received is a non-unicast packet 7919 * and in doing so, determine whether or not it is broadcast vs multicast. 7920 * For it to be a broadcast packet, we must have the appropriate mblk_t 7921 * hanging off the ill_t. If this is either not present or doesn't match 7922 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed 7923 * to be multicast. Thus NICs that have no broadcast address (or no 7924 * capability for one, such as point to point links) cannot return as 7925 * the packet being broadcast. 7926 */ 7927 void 7928 ip_dlur_to_mhi(ill_t *ill, mblk_t *mb, struct mac_header_info_s *mhip) 7929 { 7930 dl_unitdata_ind_t *ind = (dl_unitdata_ind_t *)mb->b_rptr; 7931 mblk_t *bmp; 7932 uint_t extra_offset; 7933 7934 bzero(mhip, sizeof (struct mac_header_info_s)); 7935 7936 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST; 7937 7938 if (ill->ill_sap_length < 0) 7939 extra_offset = 0; 7940 else 7941 extra_offset = ill->ill_sap_length; 7942 7943 mhip->mhi_daddr = (uchar_t *)ind + ind->dl_dest_addr_offset + 7944 extra_offset; 7945 mhip->mhi_saddr = (uchar_t *)ind + ind->dl_src_addr_offset + 7946 extra_offset; 7947 7948 if (!ind->dl_group_address) 7949 return; 7950 7951 /* Multicast or broadcast */ 7952 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST; 7953 7954 if (ind->dl_dest_addr_offset > sizeof (*ind) && 7955 ind->dl_dest_addr_offset + ind->dl_dest_addr_length < MBLKL(mb) && 7956 (bmp = ill->ill_bcast_mp) != NULL) { 7957 dl_unitdata_req_t *dlur; 7958 uint8_t *bphys_addr; 7959 7960 dlur = (dl_unitdata_req_t *)bmp->b_rptr; 7961 bphys_addr = (uchar_t *)dlur + dlur->dl_dest_addr_offset + 7962 extra_offset; 7963 7964 if (bcmp(mhip->mhi_daddr, bphys_addr, 7965 ind->dl_dest_addr_length) == 0) 7966 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST; 7967 } 7968 } 7969 7970 /* 7971 * This function is used to construct a mac_header_info_s from a 7972 * M_DATA fastpath message from a DLPI driver. 7973 * The address fields in the mhi structure points into the message, 7974 * thus the caller can't use those fields after freeing the message. 7975 * 7976 * We determine whether the packet received is a non-unicast packet 7977 * and in doing so, determine whether or not it is broadcast vs multicast. 7978 * For it to be a broadcast packet, we must have the appropriate mblk_t 7979 * hanging off the ill_t. If this is either not present or doesn't match 7980 * the destination mac address in the DL_UNITDATA_IND, the packet is deemed 7981 * to be multicast. Thus NICs that have no broadcast address (or no 7982 * capability for one, such as point to point links) cannot return as 7983 * the packet being broadcast. 7984 */ 7985 void 7986 ip_mdata_to_mhi(ill_t *ill, mblk_t *mp, struct mac_header_info_s *mhip) 7987 { 7988 mblk_t *bmp; 7989 struct ether_header *pether; 7990 7991 bzero(mhip, sizeof (struct mac_header_info_s)); 7992 7993 mhip->mhi_dsttype = MAC_ADDRTYPE_UNICAST; 7994 7995 pether = (struct ether_header *)((char *)mp->b_rptr 7996 - sizeof (struct ether_header)); 7997 7998 /* 7999 * Make sure the interface is an ethernet type, since we don't 8000 * know the header format for anything but Ethernet. Also make 8001 * sure we are pointing correctly above db_base. 8002 */ 8003 if (ill->ill_type != IFT_ETHER) 8004 return; 8005 8006 retry: 8007 if ((uchar_t *)pether < mp->b_datap->db_base) 8008 return; 8009 8010 /* Is there a VLAN tag? */ 8011 if (ill->ill_isv6) { 8012 if (pether->ether_type != htons(ETHERTYPE_IPV6)) { 8013 pether = (struct ether_header *)((char *)pether - 4); 8014 goto retry; 8015 } 8016 } else { 8017 if (pether->ether_type != htons(ETHERTYPE_IP)) { 8018 pether = (struct ether_header *)((char *)pether - 4); 8019 goto retry; 8020 } 8021 } 8022 mhip->mhi_daddr = (uchar_t *)&pether->ether_dhost; 8023 mhip->mhi_saddr = (uchar_t *)&pether->ether_shost; 8024 8025 if (!(mhip->mhi_daddr[0] & 0x01)) 8026 return; 8027 8028 /* Multicast or broadcast */ 8029 mhip->mhi_dsttype = MAC_ADDRTYPE_MULTICAST; 8030 8031 if ((bmp = ill->ill_bcast_mp) != NULL) { 8032 dl_unitdata_req_t *dlur; 8033 uint8_t *bphys_addr; 8034 uint_t addrlen; 8035 8036 dlur = (dl_unitdata_req_t *)bmp->b_rptr; 8037 addrlen = dlur->dl_dest_addr_length; 8038 if (ill->ill_sap_length < 0) { 8039 bphys_addr = (uchar_t *)dlur + 8040 dlur->dl_dest_addr_offset; 8041 addrlen += ill->ill_sap_length; 8042 } else { 8043 bphys_addr = (uchar_t *)dlur + 8044 dlur->dl_dest_addr_offset + 8045 ill->ill_sap_length; 8046 addrlen -= ill->ill_sap_length; 8047 } 8048 if (bcmp(mhip->mhi_daddr, bphys_addr, addrlen) == 0) 8049 mhip->mhi_dsttype = MAC_ADDRTYPE_BROADCAST; 8050 } 8051 } 8052 8053 /* 8054 * Handle anything but M_DATA messages 8055 * We see the DL_UNITDATA_IND which are part 8056 * of the data path, and also the other messages from the driver. 8057 */ 8058 void 8059 ip_rput_notdata(ill_t *ill, mblk_t *mp) 8060 { 8061 mblk_t *first_mp; 8062 struct iocblk *iocp; 8063 struct mac_header_info_s mhi; 8064 8065 switch (DB_TYPE(mp)) { 8066 case M_PROTO: 8067 case M_PCPROTO: { 8068 if (((dl_unitdata_ind_t *)mp->b_rptr)->dl_primitive != 8069 DL_UNITDATA_IND) { 8070 /* Go handle anything other than data elsewhere. */ 8071 ip_rput_dlpi(ill, mp); 8072 return; 8073 } 8074 8075 first_mp = mp; 8076 mp = first_mp->b_cont; 8077 first_mp->b_cont = NULL; 8078 8079 if (mp == NULL) { 8080 freeb(first_mp); 8081 return; 8082 } 8083 ip_dlur_to_mhi(ill, first_mp, &mhi); 8084 if (ill->ill_isv6) 8085 ip_input_v6(ill, NULL, mp, &mhi); 8086 else 8087 ip_input(ill, NULL, mp, &mhi); 8088 8089 /* Ditch the DLPI header. */ 8090 freeb(first_mp); 8091 return; 8092 } 8093 case M_IOCACK: 8094 iocp = (struct iocblk *)mp->b_rptr; 8095 switch (iocp->ioc_cmd) { 8096 case DL_IOC_HDR_INFO: 8097 ill_fastpath_ack(ill, mp); 8098 return; 8099 default: 8100 putnext(ill->ill_rq, mp); 8101 return; 8102 } 8103 /* FALLTHRU */ 8104 case M_ERROR: 8105 case M_HANGUP: 8106 mutex_enter(&ill->ill_lock); 8107 if (ill->ill_state_flags & ILL_CONDEMNED) { 8108 mutex_exit(&ill->ill_lock); 8109 freemsg(mp); 8110 return; 8111 } 8112 ill_refhold_locked(ill); 8113 mutex_exit(&ill->ill_lock); 8114 qwriter_ip(ill, ill->ill_rq, mp, ip_rput_other, CUR_OP, 8115 B_FALSE); 8116 return; 8117 case M_CTL: 8118 putnext(ill->ill_rq, mp); 8119 return; 8120 case M_IOCNAK: 8121 ip1dbg(("got iocnak ")); 8122 iocp = (struct iocblk *)mp->b_rptr; 8123 switch (iocp->ioc_cmd) { 8124 case DL_IOC_HDR_INFO: 8125 ip_rput_other(NULL, ill->ill_rq, mp, NULL); 8126 return; 8127 default: 8128 break; 8129 } 8130 /* FALLTHRU */ 8131 default: 8132 putnext(ill->ill_rq, mp); 8133 return; 8134 } 8135 } 8136 8137 /* Read side put procedure. Packets coming from the wire arrive here. */ 8138 void 8139 ip_rput(queue_t *q, mblk_t *mp) 8140 { 8141 ill_t *ill; 8142 union DL_primitives *dl; 8143 8144 ill = (ill_t *)q->q_ptr; 8145 8146 if (ill->ill_state_flags & (ILL_CONDEMNED | ILL_LL_SUBNET_PENDING)) { 8147 /* 8148 * If things are opening or closing, only accept high-priority 8149 * DLPI messages. (On open ill->ill_ipif has not yet been 8150 * created; on close, things hanging off the ill may have been 8151 * freed already.) 8152 */ 8153 dl = (union DL_primitives *)mp->b_rptr; 8154 if (DB_TYPE(mp) != M_PCPROTO || 8155 dl->dl_primitive == DL_UNITDATA_IND) { 8156 inet_freemsg(mp); 8157 return; 8158 } 8159 } 8160 if (DB_TYPE(mp) == M_DATA) { 8161 struct mac_header_info_s mhi; 8162 8163 ip_mdata_to_mhi(ill, mp, &mhi); 8164 ip_input(ill, NULL, mp, &mhi); 8165 } else { 8166 ip_rput_notdata(ill, mp); 8167 } 8168 } 8169 8170 /* 8171 * Move the information to a copy. 8172 */ 8173 mblk_t * 8174 ip_fix_dbref(mblk_t *mp, ip_recv_attr_t *ira) 8175 { 8176 mblk_t *mp1; 8177 ill_t *ill = ira->ira_ill; 8178 ip_stack_t *ipst = ill->ill_ipst; 8179 8180 IP_STAT(ipst, ip_db_ref); 8181 8182 /* Make sure we have ira_l2src before we loose the original mblk */ 8183 if (!(ira->ira_flags & IRAF_L2SRC_SET)) 8184 ip_setl2src(mp, ira, ira->ira_rill); 8185 8186 mp1 = copymsg(mp); 8187 if (mp1 == NULL) { 8188 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 8189 ip_drop_input("ipIfStatsInDiscards", mp, ill); 8190 freemsg(mp); 8191 return (NULL); 8192 } 8193 /* preserve the hardware checksum flags and data, if present */ 8194 if (DB_CKSUMFLAGS(mp) != 0) { 8195 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp); 8196 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp); 8197 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp); 8198 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp); 8199 DB_CKSUM16(mp1) = DB_CKSUM16(mp); 8200 } 8201 freemsg(mp); 8202 return (mp1); 8203 } 8204 8205 static void 8206 ip_dlpi_error(ill_t *ill, t_uscalar_t prim, t_uscalar_t dl_err, 8207 t_uscalar_t err) 8208 { 8209 if (dl_err == DL_SYSERR) { 8210 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE, 8211 "%s: %s failed: DL_SYSERR (errno %u)\n", 8212 ill->ill_name, dl_primstr(prim), err); 8213 return; 8214 } 8215 8216 (void) mi_strlog(ill->ill_rq, 1, SL_CONSOLE|SL_ERROR|SL_TRACE, 8217 "%s: %s failed: %s\n", ill->ill_name, dl_primstr(prim), 8218 dl_errstr(dl_err)); 8219 } 8220 8221 /* 8222 * ip_rput_dlpi is called by ip_rput to handle all DLPI messages other 8223 * than DL_UNITDATA_IND messages. If we need to process this message 8224 * exclusively, we call qwriter_ip, in which case we also need to call 8225 * ill_refhold before that, since qwriter_ip does an ill_refrele. 8226 */ 8227 void 8228 ip_rput_dlpi(ill_t *ill, mblk_t *mp) 8229 { 8230 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr; 8231 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa; 8232 queue_t *q = ill->ill_rq; 8233 t_uscalar_t prim = dloa->dl_primitive; 8234 t_uscalar_t reqprim = DL_PRIM_INVAL; 8235 8236 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi", 8237 char *, dl_primstr(prim), ill_t *, ill); 8238 ip1dbg(("ip_rput_dlpi")); 8239 8240 /* 8241 * If we received an ACK but didn't send a request for it, then it 8242 * can't be part of any pending operation; discard up-front. 8243 */ 8244 switch (prim) { 8245 case DL_ERROR_ACK: 8246 reqprim = dlea->dl_error_primitive; 8247 ip2dbg(("ip_rput_dlpi(%s): DL_ERROR_ACK for %s (0x%x): %s " 8248 "(0x%x), unix %u\n", ill->ill_name, dl_primstr(reqprim), 8249 reqprim, dl_errstr(dlea->dl_errno), dlea->dl_errno, 8250 dlea->dl_unix_errno)); 8251 break; 8252 case DL_OK_ACK: 8253 reqprim = dloa->dl_correct_primitive; 8254 break; 8255 case DL_INFO_ACK: 8256 reqprim = DL_INFO_REQ; 8257 break; 8258 case DL_BIND_ACK: 8259 reqprim = DL_BIND_REQ; 8260 break; 8261 case DL_PHYS_ADDR_ACK: 8262 reqprim = DL_PHYS_ADDR_REQ; 8263 break; 8264 case DL_NOTIFY_ACK: 8265 reqprim = DL_NOTIFY_REQ; 8266 break; 8267 case DL_CAPABILITY_ACK: 8268 reqprim = DL_CAPABILITY_REQ; 8269 break; 8270 } 8271 8272 if (prim != DL_NOTIFY_IND) { 8273 if (reqprim == DL_PRIM_INVAL || 8274 !ill_dlpi_pending(ill, reqprim)) { 8275 /* Not a DLPI message we support or expected */ 8276 freemsg(mp); 8277 return; 8278 } 8279 ip1dbg(("ip_rput: received %s for %s\n", dl_primstr(prim), 8280 dl_primstr(reqprim))); 8281 } 8282 8283 switch (reqprim) { 8284 case DL_UNBIND_REQ: 8285 /* 8286 * NOTE: we mark the unbind as complete even if we got a 8287 * DL_ERROR_ACK, since there's not much else we can do. 8288 */ 8289 mutex_enter(&ill->ill_lock); 8290 ill->ill_state_flags &= ~ILL_DL_UNBIND_IN_PROGRESS; 8291 cv_signal(&ill->ill_cv); 8292 mutex_exit(&ill->ill_lock); 8293 break; 8294 8295 case DL_ENABMULTI_REQ: 8296 if (prim == DL_OK_ACK) { 8297 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS) 8298 ill->ill_dlpi_multicast_state = IDS_OK; 8299 } 8300 break; 8301 } 8302 8303 /* 8304 * The message is one we're waiting for (or DL_NOTIFY_IND), but we 8305 * need to become writer to continue to process it. Because an 8306 * exclusive operation doesn't complete until replies to all queued 8307 * DLPI messages have been received, we know we're in the middle of an 8308 * exclusive operation and pass CUR_OP (except for DL_NOTIFY_IND). 8309 * 8310 * As required by qwriter_ip(), we refhold the ill; it will refrele. 8311 * Since this is on the ill stream we unconditionally bump up the 8312 * refcount without doing ILL_CAN_LOOKUP(). 8313 */ 8314 ill_refhold(ill); 8315 if (prim == DL_NOTIFY_IND) 8316 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, NEW_OP, B_FALSE); 8317 else 8318 qwriter_ip(ill, q, mp, ip_rput_dlpi_writer, CUR_OP, B_FALSE); 8319 } 8320 8321 /* 8322 * Handling of DLPI messages that require exclusive access to the ipsq. 8323 * 8324 * Need to do ipsq_pending_mp_get on ioctl completion, which could 8325 * happen here. (along with mi_copy_done) 8326 */ 8327 /* ARGSUSED */ 8328 static void 8329 ip_rput_dlpi_writer(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg) 8330 { 8331 dl_ok_ack_t *dloa = (dl_ok_ack_t *)mp->b_rptr; 8332 dl_error_ack_t *dlea = (dl_error_ack_t *)dloa; 8333 int err = 0; 8334 ill_t *ill = (ill_t *)q->q_ptr; 8335 ipif_t *ipif = NULL; 8336 mblk_t *mp1 = NULL; 8337 conn_t *connp = NULL; 8338 t_uscalar_t paddrreq; 8339 mblk_t *mp_hw; 8340 boolean_t success; 8341 boolean_t ioctl_aborted = B_FALSE; 8342 boolean_t log = B_TRUE; 8343 8344 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer", 8345 char *, dl_primstr(dloa->dl_primitive), ill_t *, ill); 8346 8347 ip1dbg(("ip_rput_dlpi_writer ..")); 8348 ASSERT(ipsq->ipsq_xop == ill->ill_phyint->phyint_ipsq->ipsq_xop); 8349 ASSERT(IAM_WRITER_ILL(ill)); 8350 8351 ipif = ipsq->ipsq_xop->ipx_pending_ipif; 8352 /* 8353 * The current ioctl could have been aborted by the user and a new 8354 * ioctl to bring up another ill could have started. We could still 8355 * get a response from the driver later. 8356 */ 8357 if (ipif != NULL && ipif->ipif_ill != ill) 8358 ioctl_aborted = B_TRUE; 8359 8360 switch (dloa->dl_primitive) { 8361 case DL_ERROR_ACK: 8362 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for %s\n", 8363 dl_primstr(dlea->dl_error_primitive))); 8364 8365 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer error", 8366 char *, dl_primstr(dlea->dl_error_primitive), 8367 ill_t *, ill); 8368 8369 switch (dlea->dl_error_primitive) { 8370 case DL_DISABMULTI_REQ: 8371 ill_dlpi_done(ill, dlea->dl_error_primitive); 8372 break; 8373 case DL_PROMISCON_REQ: 8374 case DL_PROMISCOFF_REQ: 8375 case DL_UNBIND_REQ: 8376 case DL_ATTACH_REQ: 8377 case DL_INFO_REQ: 8378 ill_dlpi_done(ill, dlea->dl_error_primitive); 8379 break; 8380 case DL_NOTIFY_REQ: 8381 ill_dlpi_done(ill, DL_NOTIFY_REQ); 8382 log = B_FALSE; 8383 break; 8384 case DL_PHYS_ADDR_REQ: 8385 /* 8386 * For IPv6 only, there are two additional 8387 * phys_addr_req's sent to the driver to get the 8388 * IPv6 token and lla. This allows IP to acquire 8389 * the hardware address format for a given interface 8390 * without having built in knowledge of the hardware 8391 * address. ill_phys_addr_pend keeps track of the last 8392 * DL_PAR sent so we know which response we are 8393 * dealing with. ill_dlpi_done will update 8394 * ill_phys_addr_pend when it sends the next req. 8395 * We don't complete the IOCTL until all three DL_PARs 8396 * have been attempted, so set *_len to 0 and break. 8397 */ 8398 paddrreq = ill->ill_phys_addr_pend; 8399 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ); 8400 if (paddrreq == DL_IPV6_TOKEN) { 8401 ill->ill_token_length = 0; 8402 log = B_FALSE; 8403 break; 8404 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) { 8405 ill->ill_nd_lla_len = 0; 8406 log = B_FALSE; 8407 break; 8408 } 8409 /* 8410 * Something went wrong with the DL_PHYS_ADDR_REQ. 8411 * We presumably have an IOCTL hanging out waiting 8412 * for completion. Find it and complete the IOCTL 8413 * with the error noted. 8414 * However, ill_dl_phys was called on an ill queue 8415 * (from SIOCSLIFNAME), thus conn_pending_ill is not 8416 * set. But the ioctl is known to be pending on ill_wq. 8417 */ 8418 if (!ill->ill_ifname_pending) 8419 break; 8420 ill->ill_ifname_pending = 0; 8421 if (!ioctl_aborted) 8422 mp1 = ipsq_pending_mp_get(ipsq, &connp); 8423 if (mp1 != NULL) { 8424 /* 8425 * This operation (SIOCSLIFNAME) must have 8426 * happened on the ill. Assert there is no conn 8427 */ 8428 ASSERT(connp == NULL); 8429 q = ill->ill_wq; 8430 } 8431 break; 8432 case DL_BIND_REQ: 8433 ill_dlpi_done(ill, DL_BIND_REQ); 8434 if (ill->ill_ifname_pending) 8435 break; 8436 /* 8437 * Something went wrong with the bind. We presumably 8438 * have an IOCTL hanging out waiting for completion. 8439 * Find it, take down the interface that was coming 8440 * up, and complete the IOCTL with the error noted. 8441 */ 8442 if (!ioctl_aborted) 8443 mp1 = ipsq_pending_mp_get(ipsq, &connp); 8444 if (mp1 != NULL) { 8445 /* 8446 * This might be a result of a DL_NOTE_REPLUMB 8447 * notification. In that case, connp is NULL. 8448 */ 8449 if (connp != NULL) 8450 q = CONNP_TO_WQ(connp); 8451 8452 (void) ipif_down(ipif, NULL, NULL); 8453 /* error is set below the switch */ 8454 } 8455 break; 8456 case DL_ENABMULTI_REQ: 8457 ill_dlpi_done(ill, DL_ENABMULTI_REQ); 8458 8459 if (ill->ill_dlpi_multicast_state == IDS_INPROGRESS) 8460 ill->ill_dlpi_multicast_state = IDS_FAILED; 8461 if (ill->ill_dlpi_multicast_state == IDS_FAILED) { 8462 8463 printf("ip: joining multicasts failed (%d)" 8464 " on %s - will use link layer " 8465 "broadcasts for multicast\n", 8466 dlea->dl_errno, ill->ill_name); 8467 8468 /* 8469 * Set up for multi_bcast; We are the 8470 * writer, so ok to access ill->ill_ipif 8471 * without any lock. 8472 */ 8473 mutex_enter(&ill->ill_phyint->phyint_lock); 8474 ill->ill_phyint->phyint_flags |= 8475 PHYI_MULTI_BCAST; 8476 mutex_exit(&ill->ill_phyint->phyint_lock); 8477 8478 } 8479 freemsg(mp); /* Don't want to pass this up */ 8480 return; 8481 case DL_CAPABILITY_REQ: 8482 ip1dbg(("ip_rput_dlpi_writer: got DL_ERROR_ACK for " 8483 "DL_CAPABILITY REQ\n")); 8484 if (ill->ill_dlpi_capab_state == IDCS_PROBE_SENT) 8485 ill->ill_dlpi_capab_state = IDCS_FAILED; 8486 ill_capability_done(ill); 8487 freemsg(mp); 8488 return; 8489 } 8490 /* 8491 * Note the error for IOCTL completion (mp1 is set when 8492 * ready to complete ioctl). If ill_ifname_pending_err is 8493 * set, an error occured during plumbing (ill_ifname_pending), 8494 * so we want to report that error. 8495 * 8496 * NOTE: there are two addtional DL_PHYS_ADDR_REQ's 8497 * (DL_IPV6_TOKEN and DL_IPV6_LINK_LAYER_ADDR) that are 8498 * expected to get errack'd if the driver doesn't support 8499 * these flags (e.g. ethernet). log will be set to B_FALSE 8500 * if these error conditions are encountered. 8501 */ 8502 if (mp1 != NULL) { 8503 if (ill->ill_ifname_pending_err != 0) { 8504 err = ill->ill_ifname_pending_err; 8505 ill->ill_ifname_pending_err = 0; 8506 } else { 8507 err = dlea->dl_unix_errno ? 8508 dlea->dl_unix_errno : ENXIO; 8509 } 8510 /* 8511 * If we're plumbing an interface and an error hasn't already 8512 * been saved, set ill_ifname_pending_err to the error passed 8513 * up. Ignore the error if log is B_FALSE (see comment above). 8514 */ 8515 } else if (log && ill->ill_ifname_pending && 8516 ill->ill_ifname_pending_err == 0) { 8517 ill->ill_ifname_pending_err = dlea->dl_unix_errno ? 8518 dlea->dl_unix_errno : ENXIO; 8519 } 8520 8521 if (log) 8522 ip_dlpi_error(ill, dlea->dl_error_primitive, 8523 dlea->dl_errno, dlea->dl_unix_errno); 8524 break; 8525 case DL_CAPABILITY_ACK: 8526 ill_capability_ack(ill, mp); 8527 /* 8528 * The message has been handed off to ill_capability_ack 8529 * and must not be freed below 8530 */ 8531 mp = NULL; 8532 break; 8533 8534 case DL_INFO_ACK: 8535 /* Call a routine to handle this one. */ 8536 ill_dlpi_done(ill, DL_INFO_REQ); 8537 ip_ll_subnet_defaults(ill, mp); 8538 ASSERT(!MUTEX_HELD(&ill->ill_phyint->phyint_ipsq->ipsq_lock)); 8539 return; 8540 case DL_BIND_ACK: 8541 /* 8542 * We should have an IOCTL waiting on this unless 8543 * sent by ill_dl_phys, in which case just return 8544 */ 8545 ill_dlpi_done(ill, DL_BIND_REQ); 8546 if (ill->ill_ifname_pending) { 8547 DTRACE_PROBE2(ip__rput__dlpi__ifname__pending, 8548 ill_t *, ill, mblk_t *, mp); 8549 break; 8550 } 8551 if (!ioctl_aborted) 8552 mp1 = ipsq_pending_mp_get(ipsq, &connp); 8553 if (mp1 == NULL) { 8554 DTRACE_PROBE1(ip__rput__dlpi__no__mblk, ill_t *, ill); 8555 break; 8556 } 8557 /* 8558 * mp1 was added by ill_dl_up(). if that is a result of 8559 * a DL_NOTE_REPLUMB notification, connp could be NULL. 8560 */ 8561 if (connp != NULL) 8562 q = CONNP_TO_WQ(connp); 8563 /* 8564 * We are exclusive. So nothing can change even after 8565 * we get the pending mp. 8566 */ 8567 ip1dbg(("ip_rput_dlpi: bind_ack %s\n", ill->ill_name)); 8568 DTRACE_PROBE1(ip__rput__dlpi__bind__ack, ill_t *, ill); 8569 8570 mutex_enter(&ill->ill_lock); 8571 ill->ill_dl_up = 1; 8572 ill->ill_state_flags &= ~ILL_DOWN_IN_PROGRESS; 8573 ill_nic_event_dispatch(ill, 0, NE_UP, NULL, 0); 8574 mutex_exit(&ill->ill_lock); 8575 8576 /* 8577 * Now bring up the resolver; when that is complete, we'll 8578 * create IREs. Note that we intentionally mirror what 8579 * ipif_up() would have done, because we got here by way of 8580 * ill_dl_up(), which stopped ipif_up()'s processing. 8581 */ 8582 if (ill->ill_isv6) { 8583 /* 8584 * v6 interfaces. 8585 * Unlike ARP which has to do another bind 8586 * and attach, once we get here we are 8587 * done with NDP 8588 */ 8589 (void) ipif_resolver_up(ipif, Res_act_initial); 8590 if ((err = ipif_ndp_up(ipif, B_TRUE)) == 0) 8591 err = ipif_up_done_v6(ipif); 8592 } else if (ill->ill_net_type == IRE_IF_RESOLVER) { 8593 /* 8594 * ARP and other v4 external resolvers. 8595 * Leave the pending mblk intact so that 8596 * the ioctl completes in ip_rput(). 8597 */ 8598 if (connp != NULL) 8599 mutex_enter(&connp->conn_lock); 8600 mutex_enter(&ill->ill_lock); 8601 success = ipsq_pending_mp_add(connp, ipif, q, mp1, 0); 8602 mutex_exit(&ill->ill_lock); 8603 if (connp != NULL) 8604 mutex_exit(&connp->conn_lock); 8605 if (success) { 8606 err = ipif_resolver_up(ipif, Res_act_initial); 8607 if (err == EINPROGRESS) { 8608 freemsg(mp); 8609 return; 8610 } 8611 ASSERT(arp_no_defense || err != 0); 8612 mp1 = ipsq_pending_mp_get(ipsq, &connp); 8613 } else { 8614 /* The conn has started closing */ 8615 err = EINTR; 8616 } 8617 } else { 8618 /* 8619 * This one is complete. Reply to pending ioctl. 8620 */ 8621 (void) ipif_resolver_up(ipif, Res_act_initial); 8622 err = ipif_up_done(ipif); 8623 } 8624 8625 if ((err == 0) && (ill->ill_up_ipifs)) { 8626 err = ill_up_ipifs(ill, q, mp1); 8627 if (err == EINPROGRESS) { 8628 freemsg(mp); 8629 return; 8630 } 8631 } 8632 8633 /* 8634 * If we have a moved ipif to bring up, and everything has 8635 * succeeded to this point, bring it up on the IPMP ill. 8636 * Otherwise, leave it down -- the admin can try to bring it 8637 * up by hand if need be. 8638 */ 8639 if (ill->ill_move_ipif != NULL) { 8640 if (err != 0) { 8641 ill->ill_move_ipif = NULL; 8642 } else { 8643 ipif = ill->ill_move_ipif; 8644 ill->ill_move_ipif = NULL; 8645 err = ipif_up(ipif, q, mp1); 8646 if (err == EINPROGRESS) { 8647 freemsg(mp); 8648 return; 8649 } 8650 } 8651 } 8652 break; 8653 8654 case DL_NOTIFY_IND: { 8655 dl_notify_ind_t *notify = (dl_notify_ind_t *)mp->b_rptr; 8656 uint_t orig_mtu; 8657 8658 switch (notify->dl_notification) { 8659 case DL_NOTE_PHYS_ADDR: 8660 err = ill_set_phys_addr(ill, mp); 8661 break; 8662 8663 case DL_NOTE_REPLUMB: 8664 /* 8665 * Directly return after calling ill_replumb(). 8666 * Note that we should not free mp as it is reused 8667 * in the ill_replumb() function. 8668 */ 8669 err = ill_replumb(ill, mp); 8670 return; 8671 8672 case DL_NOTE_FASTPATH_FLUSH: 8673 nce_flush(ill, B_FALSE); 8674 break; 8675 8676 case DL_NOTE_SDU_SIZE: 8677 /* 8678 * The dce and fragmentation code can cope with 8679 * this changing while packets are being sent. 8680 * When packets are sent ip_output will discover 8681 * a change. 8682 * 8683 * Change the MTU size of the interface. 8684 */ 8685 mutex_enter(&ill->ill_lock); 8686 ill->ill_current_frag = (uint_t)notify->dl_data; 8687 if (ill->ill_current_frag > ill->ill_max_frag) 8688 ill->ill_max_frag = ill->ill_current_frag; 8689 8690 orig_mtu = ill->ill_mtu; 8691 if (!(ill->ill_flags & ILLF_FIXEDMTU)) { 8692 ill->ill_mtu = ill->ill_current_frag; 8693 8694 /* 8695 * If ill_user_mtu was set (via 8696 * SIOCSLIFLNKINFO), clamp ill_mtu at it. 8697 */ 8698 if (ill->ill_user_mtu != 0 && 8699 ill->ill_user_mtu < ill->ill_mtu) 8700 ill->ill_mtu = ill->ill_user_mtu; 8701 8702 if (ill->ill_isv6) { 8703 if (ill->ill_mtu < IPV6_MIN_MTU) 8704 ill->ill_mtu = IPV6_MIN_MTU; 8705 } else { 8706 if (ill->ill_mtu < IP_MIN_MTU) 8707 ill->ill_mtu = IP_MIN_MTU; 8708 } 8709 } 8710 mutex_exit(&ill->ill_lock); 8711 /* 8712 * Make sure all dce_generation checks find out 8713 * that ill_mtu has changed. 8714 */ 8715 if (orig_mtu != ill->ill_mtu) { 8716 dce_increment_all_generations(ill->ill_isv6, 8717 ill->ill_ipst); 8718 } 8719 8720 /* 8721 * Refresh IPMP meta-interface MTU if necessary. 8722 */ 8723 if (IS_UNDER_IPMP(ill)) 8724 ipmp_illgrp_refresh_mtu(ill->ill_grp); 8725 break; 8726 8727 case DL_NOTE_LINK_UP: 8728 case DL_NOTE_LINK_DOWN: { 8729 /* 8730 * We are writer. ill / phyint / ipsq assocs stable. 8731 * The RUNNING flag reflects the state of the link. 8732 */ 8733 phyint_t *phyint = ill->ill_phyint; 8734 uint64_t new_phyint_flags; 8735 boolean_t changed = B_FALSE; 8736 boolean_t went_up; 8737 8738 went_up = notify->dl_notification == DL_NOTE_LINK_UP; 8739 mutex_enter(&phyint->phyint_lock); 8740 8741 new_phyint_flags = went_up ? 8742 phyint->phyint_flags | PHYI_RUNNING : 8743 phyint->phyint_flags & ~PHYI_RUNNING; 8744 8745 if (IS_IPMP(ill)) { 8746 new_phyint_flags = went_up ? 8747 new_phyint_flags & ~PHYI_FAILED : 8748 new_phyint_flags | PHYI_FAILED; 8749 } 8750 8751 if (new_phyint_flags != phyint->phyint_flags) { 8752 phyint->phyint_flags = new_phyint_flags; 8753 changed = B_TRUE; 8754 } 8755 mutex_exit(&phyint->phyint_lock); 8756 /* 8757 * ill_restart_dad handles the DAD restart and routing 8758 * socket notification logic. 8759 */ 8760 if (changed) { 8761 ill_restart_dad(phyint->phyint_illv4, went_up); 8762 ill_restart_dad(phyint->phyint_illv6, went_up); 8763 } 8764 break; 8765 } 8766 case DL_NOTE_PROMISC_ON_PHYS: { 8767 phyint_t *phyint = ill->ill_phyint; 8768 8769 mutex_enter(&phyint->phyint_lock); 8770 phyint->phyint_flags |= PHYI_PROMISC; 8771 mutex_exit(&phyint->phyint_lock); 8772 break; 8773 } 8774 case DL_NOTE_PROMISC_OFF_PHYS: { 8775 phyint_t *phyint = ill->ill_phyint; 8776 8777 mutex_enter(&phyint->phyint_lock); 8778 phyint->phyint_flags &= ~PHYI_PROMISC; 8779 mutex_exit(&phyint->phyint_lock); 8780 break; 8781 } 8782 case DL_NOTE_CAPAB_RENEG: 8783 /* 8784 * Something changed on the driver side. 8785 * It wants us to renegotiate the capabilities 8786 * on this ill. One possible cause is the aggregation 8787 * interface under us where a port got added or 8788 * went away. 8789 * 8790 * If the capability negotiation is already done 8791 * or is in progress, reset the capabilities and 8792 * mark the ill's ill_capab_reneg to be B_TRUE, 8793 * so that when the ack comes back, we can start 8794 * the renegotiation process. 8795 * 8796 * Note that if ill_capab_reneg is already B_TRUE 8797 * (ill_dlpi_capab_state is IDS_UNKNOWN in this case), 8798 * the capability resetting request has been sent 8799 * and the renegotiation has not been started yet; 8800 * nothing needs to be done in this case. 8801 */ 8802 ipsq_current_start(ipsq, ill->ill_ipif, 0); 8803 ill_capability_reset(ill, B_TRUE); 8804 ipsq_current_finish(ipsq); 8805 break; 8806 default: 8807 ip0dbg(("ip_rput_dlpi_writer: unknown notification " 8808 "type 0x%x for DL_NOTIFY_IND\n", 8809 notify->dl_notification)); 8810 break; 8811 } 8812 8813 /* 8814 * As this is an asynchronous operation, we 8815 * should not call ill_dlpi_done 8816 */ 8817 break; 8818 } 8819 case DL_NOTIFY_ACK: { 8820 dl_notify_ack_t *noteack = (dl_notify_ack_t *)mp->b_rptr; 8821 8822 if (noteack->dl_notifications & DL_NOTE_LINK_UP) 8823 ill->ill_note_link = 1; 8824 ill_dlpi_done(ill, DL_NOTIFY_REQ); 8825 break; 8826 } 8827 case DL_PHYS_ADDR_ACK: { 8828 /* 8829 * As part of plumbing the interface via SIOCSLIFNAME, 8830 * ill_dl_phys() will queue a series of DL_PHYS_ADDR_REQs, 8831 * whose answers we receive here. As each answer is received, 8832 * we call ill_dlpi_done() to dispatch the next request as 8833 * we're processing the current one. Once all answers have 8834 * been received, we use ipsq_pending_mp_get() to dequeue the 8835 * outstanding IOCTL and reply to it. (Because ill_dl_phys() 8836 * is invoked from an ill queue, conn_oper_pending_ill is not 8837 * available, but we know the ioctl is pending on ill_wq.) 8838 */ 8839 uint_t paddrlen, paddroff; 8840 uint8_t *addr; 8841 8842 paddrreq = ill->ill_phys_addr_pend; 8843 paddrlen = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_length; 8844 paddroff = ((dl_phys_addr_ack_t *)mp->b_rptr)->dl_addr_offset; 8845 addr = mp->b_rptr + paddroff; 8846 8847 ill_dlpi_done(ill, DL_PHYS_ADDR_REQ); 8848 if (paddrreq == DL_IPV6_TOKEN) { 8849 /* 8850 * bcopy to low-order bits of ill_token 8851 * 8852 * XXX Temporary hack - currently, all known tokens 8853 * are 64 bits, so I'll cheat for the moment. 8854 */ 8855 bcopy(addr, &ill->ill_token.s6_addr32[2], paddrlen); 8856 ill->ill_token_length = paddrlen; 8857 break; 8858 } else if (paddrreq == DL_IPV6_LINK_LAYER_ADDR) { 8859 ASSERT(ill->ill_nd_lla_mp == NULL); 8860 ill_set_ndmp(ill, mp, paddroff, paddrlen); 8861 mp = NULL; 8862 break; 8863 } else if (paddrreq == DL_CURR_DEST_ADDR) { 8864 ASSERT(ill->ill_dest_addr_mp == NULL); 8865 ill->ill_dest_addr_mp = mp; 8866 ill->ill_dest_addr = addr; 8867 mp = NULL; 8868 if (ill->ill_isv6) { 8869 ill_setdesttoken(ill); 8870 ipif_setdestlinklocal(ill->ill_ipif); 8871 } 8872 break; 8873 } 8874 8875 ASSERT(paddrreq == DL_CURR_PHYS_ADDR); 8876 ASSERT(ill->ill_phys_addr_mp == NULL); 8877 if (!ill->ill_ifname_pending) 8878 break; 8879 ill->ill_ifname_pending = 0; 8880 if (!ioctl_aborted) 8881 mp1 = ipsq_pending_mp_get(ipsq, &connp); 8882 if (mp1 != NULL) { 8883 ASSERT(connp == NULL); 8884 q = ill->ill_wq; 8885 } 8886 /* 8887 * If any error acks received during the plumbing sequence, 8888 * ill_ifname_pending_err will be set. Break out and send up 8889 * the error to the pending ioctl. 8890 */ 8891 if (ill->ill_ifname_pending_err != 0) { 8892 err = ill->ill_ifname_pending_err; 8893 ill->ill_ifname_pending_err = 0; 8894 break; 8895 } 8896 8897 ill->ill_phys_addr_mp = mp; 8898 ill->ill_phys_addr = (paddrlen == 0 ? NULL : addr); 8899 mp = NULL; 8900 8901 /* 8902 * If paddrlen or ill_phys_addr_length is zero, the DLPI 8903 * provider doesn't support physical addresses. We check both 8904 * paddrlen and ill_phys_addr_length because sppp (PPP) does 8905 * not have physical addresses, but historically adversises a 8906 * physical address length of 0 in its DL_INFO_ACK, but 6 in 8907 * its DL_PHYS_ADDR_ACK. 8908 */ 8909 if (paddrlen == 0 || ill->ill_phys_addr_length == 0) { 8910 ill->ill_phys_addr = NULL; 8911 } else if (paddrlen != ill->ill_phys_addr_length) { 8912 ip0dbg(("DL_PHYS_ADDR_ACK: got addrlen %d, expected %d", 8913 paddrlen, ill->ill_phys_addr_length)); 8914 err = EINVAL; 8915 break; 8916 } 8917 8918 if (ill->ill_nd_lla_mp == NULL) { 8919 if ((mp_hw = copyb(ill->ill_phys_addr_mp)) == NULL) { 8920 err = ENOMEM; 8921 break; 8922 } 8923 ill_set_ndmp(ill, mp_hw, paddroff, paddrlen); 8924 } 8925 8926 if (ill->ill_isv6) { 8927 ill_setdefaulttoken(ill); 8928 ipif_setlinklocal(ill->ill_ipif); 8929 } 8930 break; 8931 } 8932 case DL_OK_ACK: 8933 ip2dbg(("DL_OK_ACK %s (0x%x)\n", 8934 dl_primstr((int)dloa->dl_correct_primitive), 8935 dloa->dl_correct_primitive)); 8936 DTRACE_PROBE3(ill__dlpi, char *, "ip_rput_dlpi_writer ok", 8937 char *, dl_primstr(dloa->dl_correct_primitive), 8938 ill_t *, ill); 8939 8940 switch (dloa->dl_correct_primitive) { 8941 case DL_ENABMULTI_REQ: 8942 case DL_DISABMULTI_REQ: 8943 ill_dlpi_done(ill, dloa->dl_correct_primitive); 8944 break; 8945 case DL_PROMISCON_REQ: 8946 case DL_PROMISCOFF_REQ: 8947 case DL_UNBIND_REQ: 8948 case DL_ATTACH_REQ: 8949 ill_dlpi_done(ill, dloa->dl_correct_primitive); 8950 break; 8951 } 8952 break; 8953 default: 8954 break; 8955 } 8956 8957 freemsg(mp); 8958 if (mp1 == NULL) 8959 return; 8960 8961 /* 8962 * The operation must complete without EINPROGRESS since 8963 * ipsq_pending_mp_get() has removed the mblk (mp1). Otherwise, 8964 * the operation will be stuck forever inside the IPSQ. 8965 */ 8966 ASSERT(err != EINPROGRESS); 8967 8968 DTRACE_PROBE4(ipif__ioctl, char *, "ip_rput_dlpi_writer finish", 8969 int, ipsq->ipsq_xop->ipx_current_ioctl, ill_t *, ill, 8970 ipif_t *, NULL); 8971 8972 switch (ipsq->ipsq_xop->ipx_current_ioctl) { 8973 case 0: 8974 ipsq_current_finish(ipsq); 8975 break; 8976 8977 case SIOCSLIFNAME: 8978 case IF_UNITSEL: { 8979 ill_t *ill_other = ILL_OTHER(ill); 8980 8981 /* 8982 * If SIOCSLIFNAME or IF_UNITSEL is about to succeed, and the 8983 * ill has a peer which is in an IPMP group, then place ill 8984 * into the same group. One catch: although ifconfig plumbs 8985 * the appropriate IPMP meta-interface prior to plumbing this 8986 * ill, it is possible for multiple ifconfig applications to 8987 * race (or for another application to adjust plumbing), in 8988 * which case the IPMP meta-interface we need will be missing. 8989 * If so, kick the phyint out of the group. 8990 */ 8991 if (err == 0 && ill_other != NULL && IS_UNDER_IPMP(ill_other)) { 8992 ipmp_grp_t *grp = ill->ill_phyint->phyint_grp; 8993 ipmp_illgrp_t *illg; 8994 8995 illg = ill->ill_isv6 ? grp->gr_v6 : grp->gr_v4; 8996 if (illg == NULL) 8997 ipmp_phyint_leave_grp(ill->ill_phyint); 8998 else 8999 ipmp_ill_join_illgrp(ill, illg); 9000 } 9001 9002 if (ipsq->ipsq_xop->ipx_current_ioctl == IF_UNITSEL) 9003 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq); 9004 else 9005 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq); 9006 break; 9007 } 9008 case SIOCLIFADDIF: 9009 ip_ioctl_finish(q, mp1, err, COPYOUT, ipsq); 9010 break; 9011 9012 default: 9013 ip_ioctl_finish(q, mp1, err, NO_COPYOUT, ipsq); 9014 break; 9015 } 9016 } 9017 9018 /* 9019 * ip_rput_other is called by ip_rput to handle messages modifying the global 9020 * state in IP. If 'ipsq' is non-NULL, caller is writer on it. 9021 */ 9022 /* ARGSUSED */ 9023 void 9024 ip_rput_other(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg) 9025 { 9026 ill_t *ill = q->q_ptr; 9027 struct iocblk *iocp; 9028 9029 ip1dbg(("ip_rput_other ")); 9030 if (ipsq != NULL) { 9031 ASSERT(IAM_WRITER_IPSQ(ipsq)); 9032 ASSERT(ipsq->ipsq_xop == 9033 ill->ill_phyint->phyint_ipsq->ipsq_xop); 9034 } 9035 9036 switch (mp->b_datap->db_type) { 9037 case M_ERROR: 9038 case M_HANGUP: 9039 /* 9040 * The device has a problem. We force the ILL down. It can 9041 * be brought up again manually using SIOCSIFFLAGS (via 9042 * ifconfig or equivalent). 9043 */ 9044 ASSERT(ipsq != NULL); 9045 if (mp->b_rptr < mp->b_wptr) 9046 ill->ill_error = (int)(*mp->b_rptr & 0xFF); 9047 if (ill->ill_error == 0) 9048 ill->ill_error = ENXIO; 9049 if (!ill_down_start(q, mp)) 9050 return; 9051 ipif_all_down_tail(ipsq, q, mp, NULL); 9052 break; 9053 case M_IOCNAK: { 9054 iocp = (struct iocblk *)mp->b_rptr; 9055 9056 ASSERT(iocp->ioc_cmd == DL_IOC_HDR_INFO); 9057 /* 9058 * If this was the first attempt, turn off the fastpath 9059 * probing. 9060 */ 9061 mutex_enter(&ill->ill_lock); 9062 if (ill->ill_dlpi_fastpath_state == IDS_INPROGRESS) { 9063 ill->ill_dlpi_fastpath_state = IDS_FAILED; 9064 mutex_exit(&ill->ill_lock); 9065 /* 9066 * don't flush the nce_t entries: we use them 9067 * as an index to the ncec itself. 9068 */ 9069 ip1dbg(("ip_rput: DLPI fastpath off on interface %s\n", 9070 ill->ill_name)); 9071 } else { 9072 mutex_exit(&ill->ill_lock); 9073 } 9074 freemsg(mp); 9075 break; 9076 } 9077 default: 9078 ASSERT(0); 9079 break; 9080 } 9081 } 9082 9083 /* 9084 * Update any source route, record route or timestamp options 9085 * When it fails it has consumed the message and BUMPed the MIB. 9086 */ 9087 boolean_t 9088 ip_forward_options(mblk_t *mp, ipha_t *ipha, ill_t *dst_ill, 9089 ip_recv_attr_t *ira) 9090 { 9091 ipoptp_t opts; 9092 uchar_t *opt; 9093 uint8_t optval; 9094 uint8_t optlen; 9095 ipaddr_t dst; 9096 ipaddr_t ifaddr; 9097 uint32_t ts; 9098 timestruc_t now; 9099 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 9100 9101 ip2dbg(("ip_forward_options\n")); 9102 dst = ipha->ipha_dst; 9103 for (optval = ipoptp_first(&opts, ipha); 9104 optval != IPOPT_EOL; 9105 optval = ipoptp_next(&opts)) { 9106 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0); 9107 opt = opts.ipoptp_cur; 9108 optlen = opts.ipoptp_len; 9109 ip2dbg(("ip_forward_options: opt %d, len %d\n", 9110 optval, opts.ipoptp_len)); 9111 switch (optval) { 9112 uint32_t off; 9113 case IPOPT_SSRR: 9114 case IPOPT_LSRR: 9115 /* Check if adminstratively disabled */ 9116 if (!ipst->ips_ip_forward_src_routed) { 9117 BUMP_MIB(dst_ill->ill_ip_mib, 9118 ipIfStatsForwProhibits); 9119 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", 9120 mp, dst_ill); 9121 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, 9122 ira); 9123 return (B_FALSE); 9124 } 9125 if (ip_type_v4(dst, ipst) != IRE_LOCAL) { 9126 /* 9127 * Must be partial since ip_input_options 9128 * checked for strict. 9129 */ 9130 break; 9131 } 9132 off = opt[IPOPT_OFFSET]; 9133 off--; 9134 redo_srr: 9135 if (optlen < IP_ADDR_LEN || 9136 off > optlen - IP_ADDR_LEN) { 9137 /* End of source route */ 9138 ip1dbg(( 9139 "ip_forward_options: end of SR\n")); 9140 break; 9141 } 9142 /* Pick a reasonable address on the outbound if */ 9143 ASSERT(dst_ill != NULL); 9144 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst, 9145 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL, 9146 NULL) != 0) { 9147 /* No source! Shouldn't happen */ 9148 ifaddr = INADDR_ANY; 9149 } 9150 bcopy((char *)opt + off, &dst, IP_ADDR_LEN); 9151 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN); 9152 ip1dbg(("ip_forward_options: next hop 0x%x\n", 9153 ntohl(dst))); 9154 9155 /* 9156 * Check if our address is present more than 9157 * once as consecutive hops in source route. 9158 */ 9159 if (ip_type_v4(dst, ipst) == IRE_LOCAL) { 9160 off += IP_ADDR_LEN; 9161 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 9162 goto redo_srr; 9163 } 9164 ipha->ipha_dst = dst; 9165 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 9166 break; 9167 case IPOPT_RR: 9168 off = opt[IPOPT_OFFSET]; 9169 off--; 9170 if (optlen < IP_ADDR_LEN || 9171 off > optlen - IP_ADDR_LEN) { 9172 /* No more room - ignore */ 9173 ip1dbg(( 9174 "ip_forward_options: end of RR\n")); 9175 break; 9176 } 9177 /* Pick a reasonable address on the outbound if */ 9178 ASSERT(dst_ill != NULL); 9179 if (ip_select_source_v4(dst_ill, INADDR_ANY, dst, 9180 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL, 9181 NULL) != 0) { 9182 /* No source! Shouldn't happen */ 9183 ifaddr = INADDR_ANY; 9184 } 9185 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN); 9186 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 9187 break; 9188 case IPOPT_TS: 9189 /* Insert timestamp if there is room */ 9190 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 9191 case IPOPT_TS_TSONLY: 9192 off = IPOPT_TS_TIMELEN; 9193 break; 9194 case IPOPT_TS_PRESPEC: 9195 case IPOPT_TS_PRESPEC_RFC791: 9196 /* Verify that the address matched */ 9197 off = opt[IPOPT_OFFSET] - 1; 9198 bcopy((char *)opt + off, &dst, IP_ADDR_LEN); 9199 if (ip_type_v4(dst, ipst) != IRE_LOCAL) { 9200 /* Not for us */ 9201 break; 9202 } 9203 /* FALLTHRU */ 9204 case IPOPT_TS_TSANDADDR: 9205 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN; 9206 break; 9207 default: 9208 /* 9209 * ip_*put_options should have already 9210 * dropped this packet. 9211 */ 9212 cmn_err(CE_PANIC, "ip_forward_options: " 9213 "unknown IT - bug in ip_input_options?\n"); 9214 return (B_TRUE); /* Keep "lint" happy */ 9215 } 9216 if (opt[IPOPT_OFFSET] - 1 + off > optlen) { 9217 /* Increase overflow counter */ 9218 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1; 9219 opt[IPOPT_POS_OV_FLG] = 9220 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) | 9221 (off << 4)); 9222 break; 9223 } 9224 off = opt[IPOPT_OFFSET] - 1; 9225 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 9226 case IPOPT_TS_PRESPEC: 9227 case IPOPT_TS_PRESPEC_RFC791: 9228 case IPOPT_TS_TSANDADDR: 9229 /* Pick a reasonable addr on the outbound if */ 9230 ASSERT(dst_ill != NULL); 9231 if (ip_select_source_v4(dst_ill, INADDR_ANY, 9232 dst, INADDR_ANY, ALL_ZONES, ipst, &ifaddr, 9233 NULL, NULL) != 0) { 9234 /* No source! Shouldn't happen */ 9235 ifaddr = INADDR_ANY; 9236 } 9237 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN); 9238 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 9239 /* FALLTHRU */ 9240 case IPOPT_TS_TSONLY: 9241 off = opt[IPOPT_OFFSET] - 1; 9242 /* Compute # of milliseconds since midnight */ 9243 gethrestime(&now); 9244 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 + 9245 now.tv_nsec / (NANOSEC / MILLISEC); 9246 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN); 9247 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN; 9248 break; 9249 } 9250 break; 9251 } 9252 } 9253 return (B_TRUE); 9254 } 9255 9256 /* 9257 * Call ill_frag_timeout to do garbage collection. ill_frag_timeout 9258 * returns 'true' if there are still fragments left on the queue, in 9259 * which case we restart the timer. 9260 */ 9261 void 9262 ill_frag_timer(void *arg) 9263 { 9264 ill_t *ill = (ill_t *)arg; 9265 boolean_t frag_pending; 9266 ip_stack_t *ipst = ill->ill_ipst; 9267 time_t timeout; 9268 9269 mutex_enter(&ill->ill_lock); 9270 ASSERT(!ill->ill_fragtimer_executing); 9271 if (ill->ill_state_flags & ILL_CONDEMNED) { 9272 ill->ill_frag_timer_id = 0; 9273 mutex_exit(&ill->ill_lock); 9274 return; 9275 } 9276 ill->ill_fragtimer_executing = 1; 9277 mutex_exit(&ill->ill_lock); 9278 9279 if (ill->ill_isv6) 9280 timeout = ipst->ips_ipv6_frag_timeout; 9281 else 9282 timeout = ipst->ips_ip_g_frag_timeout; 9283 9284 frag_pending = ill_frag_timeout(ill, timeout); 9285 9286 /* 9287 * Restart the timer, if we have fragments pending or if someone 9288 * wanted us to be scheduled again. 9289 */ 9290 mutex_enter(&ill->ill_lock); 9291 ill->ill_fragtimer_executing = 0; 9292 ill->ill_frag_timer_id = 0; 9293 if (frag_pending || ill->ill_fragtimer_needrestart) 9294 ill_frag_timer_start(ill); 9295 mutex_exit(&ill->ill_lock); 9296 } 9297 9298 void 9299 ill_frag_timer_start(ill_t *ill) 9300 { 9301 ip_stack_t *ipst = ill->ill_ipst; 9302 clock_t timeo_ms; 9303 9304 ASSERT(MUTEX_HELD(&ill->ill_lock)); 9305 9306 /* If the ill is closing or opening don't proceed */ 9307 if (ill->ill_state_flags & ILL_CONDEMNED) 9308 return; 9309 9310 if (ill->ill_fragtimer_executing) { 9311 /* 9312 * ill_frag_timer is currently executing. Just record the 9313 * the fact that we want the timer to be restarted. 9314 * ill_frag_timer will post a timeout before it returns, 9315 * ensuring it will be called again. 9316 */ 9317 ill->ill_fragtimer_needrestart = 1; 9318 return; 9319 } 9320 9321 if (ill->ill_frag_timer_id == 0) { 9322 if (ill->ill_isv6) 9323 timeo_ms = ipst->ips_ipv6_frag_timo_ms; 9324 else 9325 timeo_ms = ipst->ips_ip_g_frag_timo_ms; 9326 /* 9327 * The timer is neither running nor is the timeout handler 9328 * executing. Post a timeout so that ill_frag_timer will be 9329 * called 9330 */ 9331 ill->ill_frag_timer_id = timeout(ill_frag_timer, ill, 9332 MSEC_TO_TICK(timeo_ms >> 1)); 9333 ill->ill_fragtimer_needrestart = 0; 9334 } 9335 } 9336 9337 /* 9338 * Update any source route, record route or timestamp options. 9339 * Check that we are at end of strict source route. 9340 * The options have already been checked for sanity in ip_input_options(). 9341 */ 9342 boolean_t 9343 ip_input_local_options(mblk_t *mp, ipha_t *ipha, ip_recv_attr_t *ira) 9344 { 9345 ipoptp_t opts; 9346 uchar_t *opt; 9347 uint8_t optval; 9348 uint8_t optlen; 9349 ipaddr_t dst; 9350 ipaddr_t ifaddr; 9351 uint32_t ts; 9352 timestruc_t now; 9353 ill_t *ill = ira->ira_ill; 9354 ip_stack_t *ipst = ill->ill_ipst; 9355 9356 ip2dbg(("ip_input_local_options\n")); 9357 9358 for (optval = ipoptp_first(&opts, ipha); 9359 optval != IPOPT_EOL; 9360 optval = ipoptp_next(&opts)) { 9361 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0); 9362 opt = opts.ipoptp_cur; 9363 optlen = opts.ipoptp_len; 9364 ip2dbg(("ip_input_local_options: opt %d, len %d\n", 9365 optval, optlen)); 9366 switch (optval) { 9367 uint32_t off; 9368 case IPOPT_SSRR: 9369 case IPOPT_LSRR: 9370 off = opt[IPOPT_OFFSET]; 9371 off--; 9372 if (optlen < IP_ADDR_LEN || 9373 off > optlen - IP_ADDR_LEN) { 9374 /* End of source route */ 9375 ip1dbg(("ip_input_local_options: end of SR\n")); 9376 break; 9377 } 9378 /* 9379 * This will only happen if two consecutive entries 9380 * in the source route contains our address or if 9381 * it is a packet with a loose source route which 9382 * reaches us before consuming the whole source route 9383 */ 9384 ip1dbg(("ip_input_local_options: not end of SR\n")); 9385 if (optval == IPOPT_SSRR) { 9386 goto bad_src_route; 9387 } 9388 /* 9389 * Hack: instead of dropping the packet truncate the 9390 * source route to what has been used by filling the 9391 * rest with IPOPT_NOP. 9392 */ 9393 opt[IPOPT_OLEN] = (uint8_t)off; 9394 while (off < optlen) { 9395 opt[off++] = IPOPT_NOP; 9396 } 9397 break; 9398 case IPOPT_RR: 9399 off = opt[IPOPT_OFFSET]; 9400 off--; 9401 if (optlen < IP_ADDR_LEN || 9402 off > optlen - IP_ADDR_LEN) { 9403 /* No more room - ignore */ 9404 ip1dbg(( 9405 "ip_input_local_options: end of RR\n")); 9406 break; 9407 } 9408 /* Pick a reasonable address on the outbound if */ 9409 if (ip_select_source_v4(ill, INADDR_ANY, ipha->ipha_dst, 9410 INADDR_ANY, ALL_ZONES, ipst, &ifaddr, NULL, 9411 NULL) != 0) { 9412 /* No source! Shouldn't happen */ 9413 ifaddr = INADDR_ANY; 9414 } 9415 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN); 9416 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 9417 break; 9418 case IPOPT_TS: 9419 /* Insert timestamp if there is romm */ 9420 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 9421 case IPOPT_TS_TSONLY: 9422 off = IPOPT_TS_TIMELEN; 9423 break; 9424 case IPOPT_TS_PRESPEC: 9425 case IPOPT_TS_PRESPEC_RFC791: 9426 /* Verify that the address matched */ 9427 off = opt[IPOPT_OFFSET] - 1; 9428 bcopy((char *)opt + off, &dst, IP_ADDR_LEN); 9429 if (ip_type_v4(dst, ipst) != IRE_LOCAL) { 9430 /* Not for us */ 9431 break; 9432 } 9433 /* FALLTHRU */ 9434 case IPOPT_TS_TSANDADDR: 9435 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN; 9436 break; 9437 default: 9438 /* 9439 * ip_*put_options should have already 9440 * dropped this packet. 9441 */ 9442 cmn_err(CE_PANIC, "ip_input_local_options: " 9443 "unknown IT - bug in ip_input_options?\n"); 9444 return (B_TRUE); /* Keep "lint" happy */ 9445 } 9446 if (opt[IPOPT_OFFSET] - 1 + off > optlen) { 9447 /* Increase overflow counter */ 9448 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1; 9449 opt[IPOPT_POS_OV_FLG] = 9450 (uint8_t)((opt[IPOPT_POS_OV_FLG] & 0x0F) | 9451 (off << 4)); 9452 break; 9453 } 9454 off = opt[IPOPT_OFFSET] - 1; 9455 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 9456 case IPOPT_TS_PRESPEC: 9457 case IPOPT_TS_PRESPEC_RFC791: 9458 case IPOPT_TS_TSANDADDR: 9459 /* Pick a reasonable addr on the outbound if */ 9460 if (ip_select_source_v4(ill, INADDR_ANY, 9461 ipha->ipha_dst, INADDR_ANY, ALL_ZONES, ipst, 9462 &ifaddr, NULL, NULL) != 0) { 9463 /* No source! Shouldn't happen */ 9464 ifaddr = INADDR_ANY; 9465 } 9466 bcopy(&ifaddr, (char *)opt + off, IP_ADDR_LEN); 9467 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 9468 /* FALLTHRU */ 9469 case IPOPT_TS_TSONLY: 9470 off = opt[IPOPT_OFFSET] - 1; 9471 /* Compute # of milliseconds since midnight */ 9472 gethrestime(&now); 9473 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 + 9474 now.tv_nsec / (NANOSEC / MILLISEC); 9475 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN); 9476 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN; 9477 break; 9478 } 9479 break; 9480 } 9481 } 9482 return (B_TRUE); 9483 9484 bad_src_route: 9485 /* make sure we clear any indication of a hardware checksum */ 9486 DB_CKSUMFLAGS(mp) = 0; 9487 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill); 9488 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira); 9489 return (B_FALSE); 9490 9491 } 9492 9493 /* 9494 * Process IP options in an inbound packet. Always returns the nexthop. 9495 * Normally this is the passed in nexthop, but if there is an option 9496 * that effects the nexthop (such as a source route) that will be returned. 9497 * Sets *errorp if there is an error, in which case an ICMP error has been sent 9498 * and mp freed. 9499 */ 9500 ipaddr_t 9501 ip_input_options(ipha_t *ipha, ipaddr_t dst, mblk_t *mp, 9502 ip_recv_attr_t *ira, int *errorp) 9503 { 9504 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 9505 ipoptp_t opts; 9506 uchar_t *opt; 9507 uint8_t optval; 9508 uint8_t optlen; 9509 intptr_t code = 0; 9510 ire_t *ire; 9511 9512 ip2dbg(("ip_input_options\n")); 9513 *errorp = 0; 9514 for (optval = ipoptp_first(&opts, ipha); 9515 optval != IPOPT_EOL; 9516 optval = ipoptp_next(&opts)) { 9517 opt = opts.ipoptp_cur; 9518 optlen = opts.ipoptp_len; 9519 ip2dbg(("ip_input_options: opt %d, len %d\n", 9520 optval, optlen)); 9521 /* 9522 * Note: we need to verify the checksum before we 9523 * modify anything thus this routine only extracts the next 9524 * hop dst from any source route. 9525 */ 9526 switch (optval) { 9527 uint32_t off; 9528 case IPOPT_SSRR: 9529 case IPOPT_LSRR: 9530 if (ip_type_v4(dst, ipst) != IRE_LOCAL) { 9531 if (optval == IPOPT_SSRR) { 9532 ip1dbg(("ip_input_options: not next" 9533 " strict source route 0x%x\n", 9534 ntohl(dst))); 9535 code = (char *)&ipha->ipha_dst - 9536 (char *)ipha; 9537 goto param_prob; /* RouterReq's */ 9538 } 9539 ip2dbg(("ip_input_options: " 9540 "not next source route 0x%x\n", 9541 ntohl(dst))); 9542 break; 9543 } 9544 9545 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 9546 ip1dbg(( 9547 "ip_input_options: bad option offset\n")); 9548 code = (char *)&opt[IPOPT_OLEN] - 9549 (char *)ipha; 9550 goto param_prob; 9551 } 9552 off = opt[IPOPT_OFFSET]; 9553 off--; 9554 redo_srr: 9555 if (optlen < IP_ADDR_LEN || 9556 off > optlen - IP_ADDR_LEN) { 9557 /* End of source route */ 9558 ip1dbg(("ip_input_options: end of SR\n")); 9559 break; 9560 } 9561 bcopy((char *)opt + off, &dst, IP_ADDR_LEN); 9562 ip1dbg(("ip_input_options: next hop 0x%x\n", 9563 ntohl(dst))); 9564 9565 /* 9566 * Check if our address is present more than 9567 * once as consecutive hops in source route. 9568 * XXX verify per-interface ip_forwarding 9569 * for source route? 9570 */ 9571 if (ip_type_v4(dst, ipst) == IRE_LOCAL) { 9572 off += IP_ADDR_LEN; 9573 goto redo_srr; 9574 } 9575 9576 if (dst == htonl(INADDR_LOOPBACK)) { 9577 ip1dbg(("ip_input_options: loopback addr in " 9578 "source route!\n")); 9579 goto bad_src_route; 9580 } 9581 /* 9582 * For strict: verify that dst is directly 9583 * reachable. 9584 */ 9585 if (optval == IPOPT_SSRR) { 9586 ire = ire_ftable_lookup_v4(dst, 0, 0, 9587 IRE_IF_ALL, NULL, ALL_ZONES, 9588 ira->ira_tsl, 9589 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst, 9590 NULL); 9591 if (ire == NULL) { 9592 ip1dbg(("ip_input_options: SSRR not " 9593 "directly reachable: 0x%x\n", 9594 ntohl(dst))); 9595 goto bad_src_route; 9596 } 9597 ire_refrele(ire); 9598 } 9599 /* 9600 * Defer update of the offset and the record route 9601 * until the packet is forwarded. 9602 */ 9603 break; 9604 case IPOPT_RR: 9605 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 9606 ip1dbg(( 9607 "ip_input_options: bad option offset\n")); 9608 code = (char *)&opt[IPOPT_OLEN] - 9609 (char *)ipha; 9610 goto param_prob; 9611 } 9612 break; 9613 case IPOPT_TS: 9614 /* 9615 * Verify that length >= 5 and that there is either 9616 * room for another timestamp or that the overflow 9617 * counter is not maxed out. 9618 */ 9619 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha; 9620 if (optlen < IPOPT_MINLEN_IT) { 9621 goto param_prob; 9622 } 9623 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 9624 ip1dbg(( 9625 "ip_input_options: bad option offset\n")); 9626 code = (char *)&opt[IPOPT_OFFSET] - 9627 (char *)ipha; 9628 goto param_prob; 9629 } 9630 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 9631 case IPOPT_TS_TSONLY: 9632 off = IPOPT_TS_TIMELEN; 9633 break; 9634 case IPOPT_TS_TSANDADDR: 9635 case IPOPT_TS_PRESPEC: 9636 case IPOPT_TS_PRESPEC_RFC791: 9637 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN; 9638 break; 9639 default: 9640 code = (char *)&opt[IPOPT_POS_OV_FLG] - 9641 (char *)ipha; 9642 goto param_prob; 9643 } 9644 if (opt[IPOPT_OFFSET] - 1 + off > optlen && 9645 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) { 9646 /* 9647 * No room and the overflow counter is 15 9648 * already. 9649 */ 9650 goto param_prob; 9651 } 9652 break; 9653 } 9654 } 9655 9656 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0) { 9657 return (dst); 9658 } 9659 9660 ip1dbg(("ip_input_options: error processing IP options.")); 9661 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha; 9662 9663 param_prob: 9664 /* make sure we clear any indication of a hardware checksum */ 9665 DB_CKSUMFLAGS(mp) = 0; 9666 ip_drop_input("ICMP_PARAM_PROBLEM", mp, ira->ira_ill); 9667 icmp_param_problem(mp, (uint8_t)code, ira); 9668 *errorp = -1; 9669 return (dst); 9670 9671 bad_src_route: 9672 /* make sure we clear any indication of a hardware checksum */ 9673 DB_CKSUMFLAGS(mp) = 0; 9674 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ira->ira_ill); 9675 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, ira); 9676 *errorp = -1; 9677 return (dst); 9678 } 9679 9680 /* 9681 * IP & ICMP info in >=14 msg's ... 9682 * - ip fixed part (mib2_ip_t) 9683 * - icmp fixed part (mib2_icmp_t) 9684 * - ipAddrEntryTable (ip 20) all IPv4 ipifs 9685 * - ipRouteEntryTable (ip 21) all IPv4 IREs 9686 * - ipNetToMediaEntryTable (ip 22) all IPv4 Neighbor Cache entries 9687 * - ipRouteAttributeTable (ip 102) labeled routes 9688 * - ip multicast membership (ip_member_t) 9689 * - ip multicast source filtering (ip_grpsrc_t) 9690 * - igmp fixed part (struct igmpstat) 9691 * - multicast routing stats (struct mrtstat) 9692 * - multicast routing vifs (array of struct vifctl) 9693 * - multicast routing routes (array of struct mfcctl) 9694 * - ip6 fixed part (mib2_ipv6IfStatsEntry_t) 9695 * One per ill plus one generic 9696 * - icmp6 fixed part (mib2_ipv6IfIcmpEntry_t) 9697 * One per ill plus one generic 9698 * - ipv6RouteEntry all IPv6 IREs 9699 * - ipv6RouteAttributeTable (ip6 102) labeled routes 9700 * - ipv6NetToMediaEntry all IPv6 Neighbor Cache entries 9701 * - ipv6AddrEntry all IPv6 ipifs 9702 * - ipv6 multicast membership (ipv6_member_t) 9703 * - ipv6 multicast source filtering (ipv6_grpsrc_t) 9704 * 9705 * NOTE: original mpctl is copied for msg's 2..N, since its ctl part is 9706 * already filled in by the caller. 9707 * Return value of 0 indicates that no messages were sent and caller 9708 * should free mpctl. 9709 */ 9710 int 9711 ip_snmp_get(queue_t *q, mblk_t *mpctl, int level) 9712 { 9713 ip_stack_t *ipst; 9714 sctp_stack_t *sctps; 9715 9716 if (q->q_next != NULL) { 9717 ipst = ILLQ_TO_IPST(q); 9718 } else { 9719 ipst = CONNQ_TO_IPST(q); 9720 } 9721 ASSERT(ipst != NULL); 9722 sctps = ipst->ips_netstack->netstack_sctp; 9723 9724 if (mpctl == NULL || mpctl->b_cont == NULL) { 9725 return (0); 9726 } 9727 9728 /* 9729 * For the purposes of the (broken) packet shell use 9730 * of the level we make sure MIB2_TCP/MIB2_UDP can be used 9731 * to make TCP and UDP appear first in the list of mib items. 9732 * TBD: We could expand this and use it in netstat so that 9733 * the kernel doesn't have to produce large tables (connections, 9734 * routes, etc) when netstat only wants the statistics or a particular 9735 * table. 9736 */ 9737 if (!(level == MIB2_TCP || level == MIB2_UDP)) { 9738 if ((mpctl = icmp_snmp_get(q, mpctl)) == NULL) { 9739 return (1); 9740 } 9741 } 9742 9743 if (level != MIB2_TCP) { 9744 if ((mpctl = udp_snmp_get(q, mpctl)) == NULL) { 9745 return (1); 9746 } 9747 } 9748 9749 if (level != MIB2_UDP) { 9750 if ((mpctl = tcp_snmp_get(q, mpctl)) == NULL) { 9751 return (1); 9752 } 9753 } 9754 9755 if ((mpctl = ip_snmp_get_mib2_ip_traffic_stats(q, mpctl, 9756 ipst)) == NULL) { 9757 return (1); 9758 } 9759 9760 if ((mpctl = ip_snmp_get_mib2_ip6(q, mpctl, ipst)) == NULL) { 9761 return (1); 9762 } 9763 9764 if ((mpctl = ip_snmp_get_mib2_icmp(q, mpctl, ipst)) == NULL) { 9765 return (1); 9766 } 9767 9768 if ((mpctl = ip_snmp_get_mib2_icmp6(q, mpctl, ipst)) == NULL) { 9769 return (1); 9770 } 9771 9772 if ((mpctl = ip_snmp_get_mib2_igmp(q, mpctl, ipst)) == NULL) { 9773 return (1); 9774 } 9775 9776 if ((mpctl = ip_snmp_get_mib2_multi(q, mpctl, ipst)) == NULL) { 9777 return (1); 9778 } 9779 9780 if ((mpctl = ip_snmp_get_mib2_ip_addr(q, mpctl, ipst)) == NULL) { 9781 return (1); 9782 } 9783 9784 if ((mpctl = ip_snmp_get_mib2_ip6_addr(q, mpctl, ipst)) == NULL) { 9785 return (1); 9786 } 9787 9788 if ((mpctl = ip_snmp_get_mib2_ip_group_mem(q, mpctl, ipst)) == NULL) { 9789 return (1); 9790 } 9791 9792 if ((mpctl = ip_snmp_get_mib2_ip6_group_mem(q, mpctl, ipst)) == NULL) { 9793 return (1); 9794 } 9795 9796 if ((mpctl = ip_snmp_get_mib2_ip_group_src(q, mpctl, ipst)) == NULL) { 9797 return (1); 9798 } 9799 9800 if ((mpctl = ip_snmp_get_mib2_ip6_group_src(q, mpctl, ipst)) == NULL) { 9801 return (1); 9802 } 9803 9804 if ((mpctl = ip_snmp_get_mib2_virt_multi(q, mpctl, ipst)) == NULL) { 9805 return (1); 9806 } 9807 9808 if ((mpctl = ip_snmp_get_mib2_multi_rtable(q, mpctl, ipst)) == NULL) { 9809 return (1); 9810 } 9811 9812 mpctl = ip_snmp_get_mib2_ip_route_media(q, mpctl, level, ipst); 9813 if (mpctl == NULL) 9814 return (1); 9815 9816 mpctl = ip_snmp_get_mib2_ip6_route_media(q, mpctl, level, ipst); 9817 if (mpctl == NULL) 9818 return (1); 9819 9820 if ((mpctl = sctp_snmp_get_mib2(q, mpctl, sctps)) == NULL) { 9821 return (1); 9822 } 9823 if ((mpctl = ip_snmp_get_mib2_ip_dce(q, mpctl, ipst)) == NULL) { 9824 return (1); 9825 } 9826 freemsg(mpctl); 9827 return (1); 9828 } 9829 9830 /* Get global (legacy) IPv4 statistics */ 9831 static mblk_t * 9832 ip_snmp_get_mib2_ip(queue_t *q, mblk_t *mpctl, mib2_ipIfStatsEntry_t *ipmib, 9833 ip_stack_t *ipst) 9834 { 9835 mib2_ip_t old_ip_mib; 9836 struct opthdr *optp; 9837 mblk_t *mp2ctl; 9838 9839 /* 9840 * make a copy of the original message 9841 */ 9842 mp2ctl = copymsg(mpctl); 9843 9844 /* fixed length IP structure... */ 9845 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 9846 optp->level = MIB2_IP; 9847 optp->name = 0; 9848 SET_MIB(old_ip_mib.ipForwarding, 9849 (WE_ARE_FORWARDING(ipst) ? 1 : 2)); 9850 SET_MIB(old_ip_mib.ipDefaultTTL, 9851 (uint32_t)ipst->ips_ip_def_ttl); 9852 SET_MIB(old_ip_mib.ipReasmTimeout, 9853 ipst->ips_ip_g_frag_timeout); 9854 SET_MIB(old_ip_mib.ipAddrEntrySize, 9855 sizeof (mib2_ipAddrEntry_t)); 9856 SET_MIB(old_ip_mib.ipRouteEntrySize, 9857 sizeof (mib2_ipRouteEntry_t)); 9858 SET_MIB(old_ip_mib.ipNetToMediaEntrySize, 9859 sizeof (mib2_ipNetToMediaEntry_t)); 9860 SET_MIB(old_ip_mib.ipMemberEntrySize, sizeof (ip_member_t)); 9861 SET_MIB(old_ip_mib.ipGroupSourceEntrySize, sizeof (ip_grpsrc_t)); 9862 SET_MIB(old_ip_mib.ipRouteAttributeSize, 9863 sizeof (mib2_ipAttributeEntry_t)); 9864 SET_MIB(old_ip_mib.transportMLPSize, sizeof (mib2_transportMLPEntry_t)); 9865 SET_MIB(old_ip_mib.ipDestEntrySize, sizeof (dest_cache_entry_t)); 9866 9867 /* 9868 * Grab the statistics from the new IP MIB 9869 */ 9870 SET_MIB(old_ip_mib.ipInReceives, 9871 (uint32_t)ipmib->ipIfStatsHCInReceives); 9872 SET_MIB(old_ip_mib.ipInHdrErrors, ipmib->ipIfStatsInHdrErrors); 9873 SET_MIB(old_ip_mib.ipInAddrErrors, ipmib->ipIfStatsInAddrErrors); 9874 SET_MIB(old_ip_mib.ipForwDatagrams, 9875 (uint32_t)ipmib->ipIfStatsHCOutForwDatagrams); 9876 SET_MIB(old_ip_mib.ipInUnknownProtos, 9877 ipmib->ipIfStatsInUnknownProtos); 9878 SET_MIB(old_ip_mib.ipInDiscards, ipmib->ipIfStatsInDiscards); 9879 SET_MIB(old_ip_mib.ipInDelivers, 9880 (uint32_t)ipmib->ipIfStatsHCInDelivers); 9881 SET_MIB(old_ip_mib.ipOutRequests, 9882 (uint32_t)ipmib->ipIfStatsHCOutRequests); 9883 SET_MIB(old_ip_mib.ipOutDiscards, ipmib->ipIfStatsOutDiscards); 9884 SET_MIB(old_ip_mib.ipOutNoRoutes, ipmib->ipIfStatsOutNoRoutes); 9885 SET_MIB(old_ip_mib.ipReasmReqds, ipmib->ipIfStatsReasmReqds); 9886 SET_MIB(old_ip_mib.ipReasmOKs, ipmib->ipIfStatsReasmOKs); 9887 SET_MIB(old_ip_mib.ipReasmFails, ipmib->ipIfStatsReasmFails); 9888 SET_MIB(old_ip_mib.ipFragOKs, ipmib->ipIfStatsOutFragOKs); 9889 SET_MIB(old_ip_mib.ipFragFails, ipmib->ipIfStatsOutFragFails); 9890 SET_MIB(old_ip_mib.ipFragCreates, ipmib->ipIfStatsOutFragCreates); 9891 9892 /* ipRoutingDiscards is not being used */ 9893 SET_MIB(old_ip_mib.ipRoutingDiscards, 0); 9894 SET_MIB(old_ip_mib.tcpInErrs, ipmib->tcpIfStatsInErrs); 9895 SET_MIB(old_ip_mib.udpNoPorts, ipmib->udpIfStatsNoPorts); 9896 SET_MIB(old_ip_mib.ipInCksumErrs, ipmib->ipIfStatsInCksumErrs); 9897 SET_MIB(old_ip_mib.ipReasmDuplicates, 9898 ipmib->ipIfStatsReasmDuplicates); 9899 SET_MIB(old_ip_mib.ipReasmPartDups, ipmib->ipIfStatsReasmPartDups); 9900 SET_MIB(old_ip_mib.ipForwProhibits, ipmib->ipIfStatsForwProhibits); 9901 SET_MIB(old_ip_mib.udpInCksumErrs, ipmib->udpIfStatsInCksumErrs); 9902 SET_MIB(old_ip_mib.udpInOverflows, ipmib->udpIfStatsInOverflows); 9903 SET_MIB(old_ip_mib.rawipInOverflows, 9904 ipmib->rawipIfStatsInOverflows); 9905 9906 SET_MIB(old_ip_mib.ipsecInSucceeded, ipmib->ipsecIfStatsInSucceeded); 9907 SET_MIB(old_ip_mib.ipsecInFailed, ipmib->ipsecIfStatsInFailed); 9908 SET_MIB(old_ip_mib.ipInIPv6, ipmib->ipIfStatsInWrongIPVersion); 9909 SET_MIB(old_ip_mib.ipOutIPv6, ipmib->ipIfStatsOutWrongIPVersion); 9910 SET_MIB(old_ip_mib.ipOutSwitchIPv6, 9911 ipmib->ipIfStatsOutSwitchIPVersion); 9912 9913 if (!snmp_append_data(mpctl->b_cont, (char *)&old_ip_mib, 9914 (int)sizeof (old_ip_mib))) { 9915 ip1dbg(("ip_snmp_get_mib2_ip: failed to allocate %u bytes\n", 9916 (uint_t)sizeof (old_ip_mib))); 9917 } 9918 9919 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 9920 ip3dbg(("ip_snmp_get_mib2_ip: level %d, name %d, len %d\n", 9921 (int)optp->level, (int)optp->name, (int)optp->len)); 9922 qreply(q, mpctl); 9923 return (mp2ctl); 9924 } 9925 9926 /* Per interface IPv4 statistics */ 9927 static mblk_t * 9928 ip_snmp_get_mib2_ip_traffic_stats(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 9929 { 9930 struct opthdr *optp; 9931 mblk_t *mp2ctl; 9932 ill_t *ill; 9933 ill_walk_context_t ctx; 9934 mblk_t *mp_tail = NULL; 9935 mib2_ipIfStatsEntry_t global_ip_mib; 9936 9937 /* 9938 * Make a copy of the original message 9939 */ 9940 mp2ctl = copymsg(mpctl); 9941 9942 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 9943 optp->level = MIB2_IP; 9944 optp->name = MIB2_IP_TRAFFIC_STATS; 9945 /* Include "unknown interface" ip_mib */ 9946 ipst->ips_ip_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv4; 9947 ipst->ips_ip_mib.ipIfStatsIfIndex = 9948 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */ 9949 SET_MIB(ipst->ips_ip_mib.ipIfStatsForwarding, 9950 (ipst->ips_ip_g_forward ? 1 : 2)); 9951 SET_MIB(ipst->ips_ip_mib.ipIfStatsDefaultTTL, 9952 (uint32_t)ipst->ips_ip_def_ttl); 9953 SET_MIB(ipst->ips_ip_mib.ipIfStatsEntrySize, 9954 sizeof (mib2_ipIfStatsEntry_t)); 9955 SET_MIB(ipst->ips_ip_mib.ipIfStatsAddrEntrySize, 9956 sizeof (mib2_ipAddrEntry_t)); 9957 SET_MIB(ipst->ips_ip_mib.ipIfStatsRouteEntrySize, 9958 sizeof (mib2_ipRouteEntry_t)); 9959 SET_MIB(ipst->ips_ip_mib.ipIfStatsNetToMediaEntrySize, 9960 sizeof (mib2_ipNetToMediaEntry_t)); 9961 SET_MIB(ipst->ips_ip_mib.ipIfStatsMemberEntrySize, 9962 sizeof (ip_member_t)); 9963 SET_MIB(ipst->ips_ip_mib.ipIfStatsGroupSourceEntrySize, 9964 sizeof (ip_grpsrc_t)); 9965 9966 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 9967 (char *)&ipst->ips_ip_mib, (int)sizeof (ipst->ips_ip_mib))) { 9968 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: " 9969 "failed to allocate %u bytes\n", 9970 (uint_t)sizeof (ipst->ips_ip_mib))); 9971 } 9972 9973 bcopy(&ipst->ips_ip_mib, &global_ip_mib, sizeof (global_ip_mib)); 9974 9975 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 9976 ill = ILL_START_WALK_V4(&ctx, ipst); 9977 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 9978 ill->ill_ip_mib->ipIfStatsIfIndex = 9979 ill->ill_phyint->phyint_ifindex; 9980 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding, 9981 (ipst->ips_ip_g_forward ? 1 : 2)); 9982 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultTTL, 9983 (uint32_t)ipst->ips_ip_def_ttl); 9984 9985 ip_mib2_add_ip_stats(&global_ip_mib, ill->ill_ip_mib); 9986 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 9987 (char *)ill->ill_ip_mib, 9988 (int)sizeof (*ill->ill_ip_mib))) { 9989 ip1dbg(("ip_snmp_get_mib2_ip_traffic_stats: " 9990 "failed to allocate %u bytes\n", 9991 (uint_t)sizeof (*ill->ill_ip_mib))); 9992 } 9993 } 9994 rw_exit(&ipst->ips_ill_g_lock); 9995 9996 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 9997 ip3dbg(("ip_snmp_get_mib2_ip_traffic_stats: " 9998 "level %d, name %d, len %d\n", 9999 (int)optp->level, (int)optp->name, (int)optp->len)); 10000 qreply(q, mpctl); 10001 10002 if (mp2ctl == NULL) 10003 return (NULL); 10004 10005 return (ip_snmp_get_mib2_ip(q, mp2ctl, &global_ip_mib, ipst)); 10006 } 10007 10008 /* Global IPv4 ICMP statistics */ 10009 static mblk_t * 10010 ip_snmp_get_mib2_icmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10011 { 10012 struct opthdr *optp; 10013 mblk_t *mp2ctl; 10014 10015 /* 10016 * Make a copy of the original message 10017 */ 10018 mp2ctl = copymsg(mpctl); 10019 10020 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10021 optp->level = MIB2_ICMP; 10022 optp->name = 0; 10023 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_icmp_mib, 10024 (int)sizeof (ipst->ips_icmp_mib))) { 10025 ip1dbg(("ip_snmp_get_mib2_icmp: failed to allocate %u bytes\n", 10026 (uint_t)sizeof (ipst->ips_icmp_mib))); 10027 } 10028 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10029 ip3dbg(("ip_snmp_get_mib2_icmp: level %d, name %d, len %d\n", 10030 (int)optp->level, (int)optp->name, (int)optp->len)); 10031 qreply(q, mpctl); 10032 return (mp2ctl); 10033 } 10034 10035 /* Global IPv4 IGMP statistics */ 10036 static mblk_t * 10037 ip_snmp_get_mib2_igmp(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10038 { 10039 struct opthdr *optp; 10040 mblk_t *mp2ctl; 10041 10042 /* 10043 * make a copy of the original message 10044 */ 10045 mp2ctl = copymsg(mpctl); 10046 10047 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10048 optp->level = EXPER_IGMP; 10049 optp->name = 0; 10050 if (!snmp_append_data(mpctl->b_cont, (char *)&ipst->ips_igmpstat, 10051 (int)sizeof (ipst->ips_igmpstat))) { 10052 ip1dbg(("ip_snmp_get_mib2_igmp: failed to allocate %u bytes\n", 10053 (uint_t)sizeof (ipst->ips_igmpstat))); 10054 } 10055 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10056 ip3dbg(("ip_snmp_get_mib2_igmp: level %d, name %d, len %d\n", 10057 (int)optp->level, (int)optp->name, (int)optp->len)); 10058 qreply(q, mpctl); 10059 return (mp2ctl); 10060 } 10061 10062 /* Global IPv4 Multicast Routing statistics */ 10063 static mblk_t * 10064 ip_snmp_get_mib2_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10065 { 10066 struct opthdr *optp; 10067 mblk_t *mp2ctl; 10068 10069 /* 10070 * make a copy of the original message 10071 */ 10072 mp2ctl = copymsg(mpctl); 10073 10074 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10075 optp->level = EXPER_DVMRP; 10076 optp->name = 0; 10077 if (!ip_mroute_stats(mpctl->b_cont, ipst)) { 10078 ip0dbg(("ip_mroute_stats: failed\n")); 10079 } 10080 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10081 ip3dbg(("ip_snmp_get_mib2_multi: level %d, name %d, len %d\n", 10082 (int)optp->level, (int)optp->name, (int)optp->len)); 10083 qreply(q, mpctl); 10084 return (mp2ctl); 10085 } 10086 10087 /* IPv4 address information */ 10088 static mblk_t * 10089 ip_snmp_get_mib2_ip_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10090 { 10091 struct opthdr *optp; 10092 mblk_t *mp2ctl; 10093 mblk_t *mp_tail = NULL; 10094 ill_t *ill; 10095 ipif_t *ipif; 10096 uint_t bitval; 10097 mib2_ipAddrEntry_t mae; 10098 zoneid_t zoneid; 10099 ill_walk_context_t ctx; 10100 10101 /* 10102 * make a copy of the original message 10103 */ 10104 mp2ctl = copymsg(mpctl); 10105 10106 /* ipAddrEntryTable */ 10107 10108 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10109 optp->level = MIB2_IP; 10110 optp->name = MIB2_IP_ADDR; 10111 zoneid = Q_TO_CONN(q)->conn_zoneid; 10112 10113 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10114 ill = ILL_START_WALK_V4(&ctx, ipst); 10115 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10116 for (ipif = ill->ill_ipif; ipif != NULL; 10117 ipif = ipif->ipif_next) { 10118 if (ipif->ipif_zoneid != zoneid && 10119 ipif->ipif_zoneid != ALL_ZONES) 10120 continue; 10121 /* Sum of count from dead IRE_LO* and our current */ 10122 mae.ipAdEntInfo.ae_ibcnt = ipif->ipif_ib_pkt_count; 10123 if (ipif->ipif_ire_local != NULL) { 10124 mae.ipAdEntInfo.ae_ibcnt += 10125 ipif->ipif_ire_local->ire_ib_pkt_count; 10126 } 10127 mae.ipAdEntInfo.ae_obcnt = 0; 10128 mae.ipAdEntInfo.ae_focnt = 0; 10129 10130 ipif_get_name(ipif, mae.ipAdEntIfIndex.o_bytes, 10131 OCTET_LENGTH); 10132 mae.ipAdEntIfIndex.o_length = 10133 mi_strlen(mae.ipAdEntIfIndex.o_bytes); 10134 mae.ipAdEntAddr = ipif->ipif_lcl_addr; 10135 mae.ipAdEntNetMask = ipif->ipif_net_mask; 10136 mae.ipAdEntInfo.ae_subnet = ipif->ipif_subnet; 10137 mae.ipAdEntInfo.ae_subnet_len = 10138 ip_mask_to_plen(ipif->ipif_net_mask); 10139 mae.ipAdEntInfo.ae_src_addr = ipif->ipif_lcl_addr; 10140 for (bitval = 1; 10141 bitval && 10142 !(bitval & ipif->ipif_brd_addr); 10143 bitval <<= 1) 10144 noop; 10145 mae.ipAdEntBcastAddr = bitval; 10146 mae.ipAdEntReasmMaxSize = IP_MAXPACKET; 10147 mae.ipAdEntInfo.ae_mtu = ipif->ipif_ill->ill_mtu; 10148 mae.ipAdEntInfo.ae_metric = ipif->ipif_metric; 10149 mae.ipAdEntInfo.ae_broadcast_addr = 10150 ipif->ipif_brd_addr; 10151 mae.ipAdEntInfo.ae_pp_dst_addr = 10152 ipif->ipif_pp_dst_addr; 10153 mae.ipAdEntInfo.ae_flags = ipif->ipif_flags | 10154 ill->ill_flags | ill->ill_phyint->phyint_flags; 10155 mae.ipAdEntRetransmitTime = 10156 ill->ill_reachable_retrans_time; 10157 10158 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10159 (char *)&mae, (int)sizeof (mib2_ipAddrEntry_t))) { 10160 ip1dbg(("ip_snmp_get_mib2_ip_addr: failed to " 10161 "allocate %u bytes\n", 10162 (uint_t)sizeof (mib2_ipAddrEntry_t))); 10163 } 10164 } 10165 } 10166 rw_exit(&ipst->ips_ill_g_lock); 10167 10168 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10169 ip3dbg(("ip_snmp_get_mib2_ip_addr: level %d, name %d, len %d\n", 10170 (int)optp->level, (int)optp->name, (int)optp->len)); 10171 qreply(q, mpctl); 10172 return (mp2ctl); 10173 } 10174 10175 /* IPv6 address information */ 10176 static mblk_t * 10177 ip_snmp_get_mib2_ip6_addr(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10178 { 10179 struct opthdr *optp; 10180 mblk_t *mp2ctl; 10181 mblk_t *mp_tail = NULL; 10182 ill_t *ill; 10183 ipif_t *ipif; 10184 mib2_ipv6AddrEntry_t mae6; 10185 zoneid_t zoneid; 10186 ill_walk_context_t ctx; 10187 10188 /* 10189 * make a copy of the original message 10190 */ 10191 mp2ctl = copymsg(mpctl); 10192 10193 /* ipv6AddrEntryTable */ 10194 10195 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10196 optp->level = MIB2_IP6; 10197 optp->name = MIB2_IP6_ADDR; 10198 zoneid = Q_TO_CONN(q)->conn_zoneid; 10199 10200 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10201 ill = ILL_START_WALK_V6(&ctx, ipst); 10202 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10203 for (ipif = ill->ill_ipif; ipif != NULL; 10204 ipif = ipif->ipif_next) { 10205 if (ipif->ipif_zoneid != zoneid && 10206 ipif->ipif_zoneid != ALL_ZONES) 10207 continue; 10208 /* Sum of count from dead IRE_LO* and our current */ 10209 mae6.ipv6AddrInfo.ae_ibcnt = ipif->ipif_ib_pkt_count; 10210 if (ipif->ipif_ire_local != NULL) { 10211 mae6.ipv6AddrInfo.ae_ibcnt += 10212 ipif->ipif_ire_local->ire_ib_pkt_count; 10213 } 10214 mae6.ipv6AddrInfo.ae_obcnt = 0; 10215 mae6.ipv6AddrInfo.ae_focnt = 0; 10216 10217 ipif_get_name(ipif, mae6.ipv6AddrIfIndex.o_bytes, 10218 OCTET_LENGTH); 10219 mae6.ipv6AddrIfIndex.o_length = 10220 mi_strlen(mae6.ipv6AddrIfIndex.o_bytes); 10221 mae6.ipv6AddrAddress = ipif->ipif_v6lcl_addr; 10222 mae6.ipv6AddrPfxLength = 10223 ip_mask_to_plen_v6(&ipif->ipif_v6net_mask); 10224 mae6.ipv6AddrInfo.ae_subnet = ipif->ipif_v6subnet; 10225 mae6.ipv6AddrInfo.ae_subnet_len = 10226 mae6.ipv6AddrPfxLength; 10227 mae6.ipv6AddrInfo.ae_src_addr = ipif->ipif_v6lcl_addr; 10228 10229 /* Type: stateless(1), stateful(2), unknown(3) */ 10230 if (ipif->ipif_flags & IPIF_ADDRCONF) 10231 mae6.ipv6AddrType = 1; 10232 else 10233 mae6.ipv6AddrType = 2; 10234 /* Anycast: true(1), false(2) */ 10235 if (ipif->ipif_flags & IPIF_ANYCAST) 10236 mae6.ipv6AddrAnycastFlag = 1; 10237 else 10238 mae6.ipv6AddrAnycastFlag = 2; 10239 10240 /* 10241 * Address status: preferred(1), deprecated(2), 10242 * invalid(3), inaccessible(4), unknown(5) 10243 */ 10244 if (ipif->ipif_flags & IPIF_NOLOCAL) 10245 mae6.ipv6AddrStatus = 3; 10246 else if (ipif->ipif_flags & IPIF_DEPRECATED) 10247 mae6.ipv6AddrStatus = 2; 10248 else 10249 mae6.ipv6AddrStatus = 1; 10250 mae6.ipv6AddrInfo.ae_mtu = ipif->ipif_ill->ill_mtu; 10251 mae6.ipv6AddrInfo.ae_metric = ipif->ipif_metric; 10252 mae6.ipv6AddrInfo.ae_pp_dst_addr = 10253 ipif->ipif_v6pp_dst_addr; 10254 mae6.ipv6AddrInfo.ae_flags = ipif->ipif_flags | 10255 ill->ill_flags | ill->ill_phyint->phyint_flags; 10256 mae6.ipv6AddrReasmMaxSize = IP_MAXPACKET; 10257 mae6.ipv6AddrIdentifier = ill->ill_token; 10258 mae6.ipv6AddrIdentifierLen = ill->ill_token_length; 10259 mae6.ipv6AddrReachableTime = ill->ill_reachable_time; 10260 mae6.ipv6AddrRetransmitTime = 10261 ill->ill_reachable_retrans_time; 10262 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10263 (char *)&mae6, 10264 (int)sizeof (mib2_ipv6AddrEntry_t))) { 10265 ip1dbg(("ip_snmp_get_mib2_ip6_addr: failed to " 10266 "allocate %u bytes\n", 10267 (uint_t)sizeof (mib2_ipv6AddrEntry_t))); 10268 } 10269 } 10270 } 10271 rw_exit(&ipst->ips_ill_g_lock); 10272 10273 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10274 ip3dbg(("ip_snmp_get_mib2_ip6_addr: level %d, name %d, len %d\n", 10275 (int)optp->level, (int)optp->name, (int)optp->len)); 10276 qreply(q, mpctl); 10277 return (mp2ctl); 10278 } 10279 10280 /* IPv4 multicast group membership. */ 10281 static mblk_t * 10282 ip_snmp_get_mib2_ip_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10283 { 10284 struct opthdr *optp; 10285 mblk_t *mp2ctl; 10286 ill_t *ill; 10287 ipif_t *ipif; 10288 ilm_t *ilm; 10289 ip_member_t ipm; 10290 mblk_t *mp_tail = NULL; 10291 ill_walk_context_t ctx; 10292 zoneid_t zoneid; 10293 10294 /* 10295 * make a copy of the original message 10296 */ 10297 mp2ctl = copymsg(mpctl); 10298 zoneid = Q_TO_CONN(q)->conn_zoneid; 10299 10300 /* ipGroupMember table */ 10301 optp = (struct opthdr *)&mpctl->b_rptr[ 10302 sizeof (struct T_optmgmt_ack)]; 10303 optp->level = MIB2_IP; 10304 optp->name = EXPER_IP_GROUP_MEMBERSHIP; 10305 10306 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10307 ill = ILL_START_WALK_V4(&ctx, ipst); 10308 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10309 /* Make sure the ill isn't going away. */ 10310 if (!ill_check_and_refhold(ill)) 10311 continue; 10312 rw_exit(&ipst->ips_ill_g_lock); 10313 rw_enter(&ill->ill_mcast_lock, RW_READER); 10314 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) { 10315 if (ilm->ilm_zoneid != zoneid && 10316 ilm->ilm_zoneid != ALL_ZONES) 10317 continue; 10318 10319 /* Is there an ipif for ilm_ifaddr? */ 10320 for (ipif = ill->ill_ipif; ipif != NULL; 10321 ipif = ipif->ipif_next) { 10322 if (!IPIF_IS_CONDEMNED(ipif) && 10323 ipif->ipif_lcl_addr == ilm->ilm_ifaddr && 10324 ilm->ilm_ifaddr != INADDR_ANY) 10325 break; 10326 } 10327 if (ipif != NULL) { 10328 ipif_get_name(ipif, 10329 ipm.ipGroupMemberIfIndex.o_bytes, 10330 OCTET_LENGTH); 10331 } else { 10332 ill_get_name(ill, 10333 ipm.ipGroupMemberIfIndex.o_bytes, 10334 OCTET_LENGTH); 10335 } 10336 ipm.ipGroupMemberIfIndex.o_length = 10337 mi_strlen(ipm.ipGroupMemberIfIndex.o_bytes); 10338 10339 ipm.ipGroupMemberAddress = ilm->ilm_addr; 10340 ipm.ipGroupMemberRefCnt = ilm->ilm_refcnt; 10341 ipm.ipGroupMemberFilterMode = ilm->ilm_fmode; 10342 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10343 (char *)&ipm, (int)sizeof (ipm))) { 10344 ip1dbg(("ip_snmp_get_mib2_ip_group: " 10345 "failed to allocate %u bytes\n", 10346 (uint_t)sizeof (ipm))); 10347 } 10348 } 10349 rw_exit(&ill->ill_mcast_lock); 10350 ill_refrele(ill); 10351 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10352 } 10353 rw_exit(&ipst->ips_ill_g_lock); 10354 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10355 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n", 10356 (int)optp->level, (int)optp->name, (int)optp->len)); 10357 qreply(q, mpctl); 10358 return (mp2ctl); 10359 } 10360 10361 /* IPv6 multicast group membership. */ 10362 static mblk_t * 10363 ip_snmp_get_mib2_ip6_group_mem(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10364 { 10365 struct opthdr *optp; 10366 mblk_t *mp2ctl; 10367 ill_t *ill; 10368 ilm_t *ilm; 10369 ipv6_member_t ipm6; 10370 mblk_t *mp_tail = NULL; 10371 ill_walk_context_t ctx; 10372 zoneid_t zoneid; 10373 10374 /* 10375 * make a copy of the original message 10376 */ 10377 mp2ctl = copymsg(mpctl); 10378 zoneid = Q_TO_CONN(q)->conn_zoneid; 10379 10380 /* ip6GroupMember table */ 10381 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10382 optp->level = MIB2_IP6; 10383 optp->name = EXPER_IP6_GROUP_MEMBERSHIP; 10384 10385 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10386 ill = ILL_START_WALK_V6(&ctx, ipst); 10387 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10388 /* Make sure the ill isn't going away. */ 10389 if (!ill_check_and_refhold(ill)) 10390 continue; 10391 rw_exit(&ipst->ips_ill_g_lock); 10392 /* 10393 * Normally we don't have any members on under IPMP interfaces. 10394 * We report them as a debugging aid. 10395 */ 10396 rw_enter(&ill->ill_mcast_lock, RW_READER); 10397 ipm6.ipv6GroupMemberIfIndex = ill->ill_phyint->phyint_ifindex; 10398 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) { 10399 if (ilm->ilm_zoneid != zoneid && 10400 ilm->ilm_zoneid != ALL_ZONES) 10401 continue; /* not this zone */ 10402 ipm6.ipv6GroupMemberAddress = ilm->ilm_v6addr; 10403 ipm6.ipv6GroupMemberRefCnt = ilm->ilm_refcnt; 10404 ipm6.ipv6GroupMemberFilterMode = ilm->ilm_fmode; 10405 if (!snmp_append_data2(mpctl->b_cont, 10406 &mp_tail, 10407 (char *)&ipm6, (int)sizeof (ipm6))) { 10408 ip1dbg(("ip_snmp_get_mib2_ip6_group: " 10409 "failed to allocate %u bytes\n", 10410 (uint_t)sizeof (ipm6))); 10411 } 10412 } 10413 rw_exit(&ill->ill_mcast_lock); 10414 ill_refrele(ill); 10415 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10416 } 10417 rw_exit(&ipst->ips_ill_g_lock); 10418 10419 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10420 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n", 10421 (int)optp->level, (int)optp->name, (int)optp->len)); 10422 qreply(q, mpctl); 10423 return (mp2ctl); 10424 } 10425 10426 /* IP multicast filtered sources */ 10427 static mblk_t * 10428 ip_snmp_get_mib2_ip_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10429 { 10430 struct opthdr *optp; 10431 mblk_t *mp2ctl; 10432 ill_t *ill; 10433 ipif_t *ipif; 10434 ilm_t *ilm; 10435 ip_grpsrc_t ips; 10436 mblk_t *mp_tail = NULL; 10437 ill_walk_context_t ctx; 10438 zoneid_t zoneid; 10439 int i; 10440 slist_t *sl; 10441 10442 /* 10443 * make a copy of the original message 10444 */ 10445 mp2ctl = copymsg(mpctl); 10446 zoneid = Q_TO_CONN(q)->conn_zoneid; 10447 10448 /* ipGroupSource table */ 10449 optp = (struct opthdr *)&mpctl->b_rptr[ 10450 sizeof (struct T_optmgmt_ack)]; 10451 optp->level = MIB2_IP; 10452 optp->name = EXPER_IP_GROUP_SOURCES; 10453 10454 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10455 ill = ILL_START_WALK_V4(&ctx, ipst); 10456 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10457 /* Make sure the ill isn't going away. */ 10458 if (!ill_check_and_refhold(ill)) 10459 continue; 10460 rw_exit(&ipst->ips_ill_g_lock); 10461 rw_enter(&ill->ill_mcast_lock, RW_READER); 10462 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) { 10463 sl = ilm->ilm_filter; 10464 if (ilm->ilm_zoneid != zoneid && 10465 ilm->ilm_zoneid != ALL_ZONES) 10466 continue; 10467 if (SLIST_IS_EMPTY(sl)) 10468 continue; 10469 10470 /* Is there an ipif for ilm_ifaddr? */ 10471 for (ipif = ill->ill_ipif; ipif != NULL; 10472 ipif = ipif->ipif_next) { 10473 if (!IPIF_IS_CONDEMNED(ipif) && 10474 ipif->ipif_lcl_addr == ilm->ilm_ifaddr && 10475 ilm->ilm_ifaddr != INADDR_ANY) 10476 break; 10477 } 10478 if (ipif != NULL) { 10479 ipif_get_name(ipif, 10480 ips.ipGroupSourceIfIndex.o_bytes, 10481 OCTET_LENGTH); 10482 } else { 10483 ill_get_name(ill, 10484 ips.ipGroupSourceIfIndex.o_bytes, 10485 OCTET_LENGTH); 10486 } 10487 ips.ipGroupSourceIfIndex.o_length = 10488 mi_strlen(ips.ipGroupSourceIfIndex.o_bytes); 10489 10490 ips.ipGroupSourceGroup = ilm->ilm_addr; 10491 for (i = 0; i < sl->sl_numsrc; i++) { 10492 if (!IN6_IS_ADDR_V4MAPPED(&sl->sl_addr[i])) 10493 continue; 10494 IN6_V4MAPPED_TO_IPADDR(&sl->sl_addr[i], 10495 ips.ipGroupSourceAddress); 10496 if (snmp_append_data2(mpctl->b_cont, &mp_tail, 10497 (char *)&ips, (int)sizeof (ips)) == 0) { 10498 ip1dbg(("ip_snmp_get_mib2_ip_group_src:" 10499 " failed to allocate %u bytes\n", 10500 (uint_t)sizeof (ips))); 10501 } 10502 } 10503 } 10504 rw_exit(&ill->ill_mcast_lock); 10505 ill_refrele(ill); 10506 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10507 } 10508 rw_exit(&ipst->ips_ill_g_lock); 10509 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10510 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n", 10511 (int)optp->level, (int)optp->name, (int)optp->len)); 10512 qreply(q, mpctl); 10513 return (mp2ctl); 10514 } 10515 10516 /* IPv6 multicast filtered sources. */ 10517 static mblk_t * 10518 ip_snmp_get_mib2_ip6_group_src(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10519 { 10520 struct opthdr *optp; 10521 mblk_t *mp2ctl; 10522 ill_t *ill; 10523 ilm_t *ilm; 10524 ipv6_grpsrc_t ips6; 10525 mblk_t *mp_tail = NULL; 10526 ill_walk_context_t ctx; 10527 zoneid_t zoneid; 10528 int i; 10529 slist_t *sl; 10530 10531 /* 10532 * make a copy of the original message 10533 */ 10534 mp2ctl = copymsg(mpctl); 10535 zoneid = Q_TO_CONN(q)->conn_zoneid; 10536 10537 /* ip6GroupMember table */ 10538 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10539 optp->level = MIB2_IP6; 10540 optp->name = EXPER_IP6_GROUP_SOURCES; 10541 10542 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10543 ill = ILL_START_WALK_V6(&ctx, ipst); 10544 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10545 /* Make sure the ill isn't going away. */ 10546 if (!ill_check_and_refhold(ill)) 10547 continue; 10548 rw_exit(&ipst->ips_ill_g_lock); 10549 /* 10550 * Normally we don't have any members on under IPMP interfaces. 10551 * We report them as a debugging aid. 10552 */ 10553 rw_enter(&ill->ill_mcast_lock, RW_READER); 10554 ips6.ipv6GroupSourceIfIndex = ill->ill_phyint->phyint_ifindex; 10555 for (ilm = ill->ill_ilm; ilm; ilm = ilm->ilm_next) { 10556 sl = ilm->ilm_filter; 10557 if (ilm->ilm_zoneid != zoneid && 10558 ilm->ilm_zoneid != ALL_ZONES) 10559 continue; 10560 if (SLIST_IS_EMPTY(sl)) 10561 continue; 10562 ips6.ipv6GroupSourceGroup = ilm->ilm_v6addr; 10563 for (i = 0; i < sl->sl_numsrc; i++) { 10564 ips6.ipv6GroupSourceAddress = sl->sl_addr[i]; 10565 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10566 (char *)&ips6, (int)sizeof (ips6))) { 10567 ip1dbg(("ip_snmp_get_mib2_ip6_" 10568 "group_src: failed to allocate " 10569 "%u bytes\n", 10570 (uint_t)sizeof (ips6))); 10571 } 10572 } 10573 } 10574 rw_exit(&ill->ill_mcast_lock); 10575 ill_refrele(ill); 10576 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10577 } 10578 rw_exit(&ipst->ips_ill_g_lock); 10579 10580 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10581 ip3dbg(("ip_snmp_get: level %d, name %d, len %d\n", 10582 (int)optp->level, (int)optp->name, (int)optp->len)); 10583 qreply(q, mpctl); 10584 return (mp2ctl); 10585 } 10586 10587 /* Multicast routing virtual interface table. */ 10588 static mblk_t * 10589 ip_snmp_get_mib2_virt_multi(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10590 { 10591 struct opthdr *optp; 10592 mblk_t *mp2ctl; 10593 10594 /* 10595 * make a copy of the original message 10596 */ 10597 mp2ctl = copymsg(mpctl); 10598 10599 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10600 optp->level = EXPER_DVMRP; 10601 optp->name = EXPER_DVMRP_VIF; 10602 if (!ip_mroute_vif(mpctl->b_cont, ipst)) { 10603 ip0dbg(("ip_mroute_vif: failed\n")); 10604 } 10605 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10606 ip3dbg(("ip_snmp_get_mib2_virt_multi: level %d, name %d, len %d\n", 10607 (int)optp->level, (int)optp->name, (int)optp->len)); 10608 qreply(q, mpctl); 10609 return (mp2ctl); 10610 } 10611 10612 /* Multicast routing table. */ 10613 static mblk_t * 10614 ip_snmp_get_mib2_multi_rtable(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10615 { 10616 struct opthdr *optp; 10617 mblk_t *mp2ctl; 10618 10619 /* 10620 * make a copy of the original message 10621 */ 10622 mp2ctl = copymsg(mpctl); 10623 10624 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10625 optp->level = EXPER_DVMRP; 10626 optp->name = EXPER_DVMRP_MRT; 10627 if (!ip_mroute_mrt(mpctl->b_cont, ipst)) { 10628 ip0dbg(("ip_mroute_mrt: failed\n")); 10629 } 10630 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10631 ip3dbg(("ip_snmp_get_mib2_multi_rtable: level %d, name %d, len %d\n", 10632 (int)optp->level, (int)optp->name, (int)optp->len)); 10633 qreply(q, mpctl); 10634 return (mp2ctl); 10635 } 10636 10637 /* 10638 * Return ipRouteEntryTable, ipNetToMediaEntryTable, and ipRouteAttributeTable 10639 * in one IRE walk. 10640 */ 10641 static mblk_t * 10642 ip_snmp_get_mib2_ip_route_media(queue_t *q, mblk_t *mpctl, int level, 10643 ip_stack_t *ipst) 10644 { 10645 struct opthdr *optp; 10646 mblk_t *mp2ctl; /* Returned */ 10647 mblk_t *mp3ctl; /* nettomedia */ 10648 mblk_t *mp4ctl; /* routeattrs */ 10649 iproutedata_t ird; 10650 zoneid_t zoneid; 10651 10652 /* 10653 * make copies of the original message 10654 * - mp2ctl is returned unchanged to the caller for his use 10655 * - mpctl is sent upstream as ipRouteEntryTable 10656 * - mp3ctl is sent upstream as ipNetToMediaEntryTable 10657 * - mp4ctl is sent upstream as ipRouteAttributeTable 10658 */ 10659 mp2ctl = copymsg(mpctl); 10660 mp3ctl = copymsg(mpctl); 10661 mp4ctl = copymsg(mpctl); 10662 if (mp3ctl == NULL || mp4ctl == NULL) { 10663 freemsg(mp4ctl); 10664 freemsg(mp3ctl); 10665 freemsg(mp2ctl); 10666 freemsg(mpctl); 10667 return (NULL); 10668 } 10669 10670 bzero(&ird, sizeof (ird)); 10671 10672 ird.ird_route.lp_head = mpctl->b_cont; 10673 ird.ird_netmedia.lp_head = mp3ctl->b_cont; 10674 ird.ird_attrs.lp_head = mp4ctl->b_cont; 10675 /* 10676 * If the level has been set the special EXPER_IP_AND_ALL_IRES value, 10677 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is 10678 * intended a temporary solution until a proper MIB API is provided 10679 * that provides complete filtering/caller-opt-in. 10680 */ 10681 if (level == EXPER_IP_AND_ALL_IRES) 10682 ird.ird_flags |= IRD_REPORT_ALL; 10683 10684 zoneid = Q_TO_CONN(q)->conn_zoneid; 10685 ire_walk_v4(ip_snmp_get2_v4, &ird, zoneid, ipst); 10686 10687 /* ipRouteEntryTable in mpctl */ 10688 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10689 optp->level = MIB2_IP; 10690 optp->name = MIB2_IP_ROUTE; 10691 optp->len = msgdsize(ird.ird_route.lp_head); 10692 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n", 10693 (int)optp->level, (int)optp->name, (int)optp->len)); 10694 qreply(q, mpctl); 10695 10696 /* ipNetToMediaEntryTable in mp3ctl */ 10697 ncec_walk(NULL, ip_snmp_get2_v4_media, &ird, ipst); 10698 10699 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10700 optp->level = MIB2_IP; 10701 optp->name = MIB2_IP_MEDIA; 10702 optp->len = msgdsize(ird.ird_netmedia.lp_head); 10703 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n", 10704 (int)optp->level, (int)optp->name, (int)optp->len)); 10705 qreply(q, mp3ctl); 10706 10707 /* ipRouteAttributeTable in mp4ctl */ 10708 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10709 optp->level = MIB2_IP; 10710 optp->name = EXPER_IP_RTATTR; 10711 optp->len = msgdsize(ird.ird_attrs.lp_head); 10712 ip3dbg(("ip_snmp_get_mib2_ip_route_media: level %d, name %d, len %d\n", 10713 (int)optp->level, (int)optp->name, (int)optp->len)); 10714 if (optp->len == 0) 10715 freemsg(mp4ctl); 10716 else 10717 qreply(q, mp4ctl); 10718 10719 return (mp2ctl); 10720 } 10721 10722 /* 10723 * Return ipv6RouteEntryTable and ipv6RouteAttributeTable in one IRE walk, and 10724 * ipv6NetToMediaEntryTable in an NDP walk. 10725 */ 10726 static mblk_t * 10727 ip_snmp_get_mib2_ip6_route_media(queue_t *q, mblk_t *mpctl, int level, 10728 ip_stack_t *ipst) 10729 { 10730 struct opthdr *optp; 10731 mblk_t *mp2ctl; /* Returned */ 10732 mblk_t *mp3ctl; /* nettomedia */ 10733 mblk_t *mp4ctl; /* routeattrs */ 10734 iproutedata_t ird; 10735 zoneid_t zoneid; 10736 10737 /* 10738 * make copies of the original message 10739 * - mp2ctl is returned unchanged to the caller for his use 10740 * - mpctl is sent upstream as ipv6RouteEntryTable 10741 * - mp3ctl is sent upstream as ipv6NetToMediaEntryTable 10742 * - mp4ctl is sent upstream as ipv6RouteAttributeTable 10743 */ 10744 mp2ctl = copymsg(mpctl); 10745 mp3ctl = copymsg(mpctl); 10746 mp4ctl = copymsg(mpctl); 10747 if (mp3ctl == NULL || mp4ctl == NULL) { 10748 freemsg(mp4ctl); 10749 freemsg(mp3ctl); 10750 freemsg(mp2ctl); 10751 freemsg(mpctl); 10752 return (NULL); 10753 } 10754 10755 bzero(&ird, sizeof (ird)); 10756 10757 ird.ird_route.lp_head = mpctl->b_cont; 10758 ird.ird_netmedia.lp_head = mp3ctl->b_cont; 10759 ird.ird_attrs.lp_head = mp4ctl->b_cont; 10760 /* 10761 * If the level has been set the special EXPER_IP_AND_ALL_IRES value, 10762 * then also include ire_testhidden IREs and IRE_IF_CLONE. This is 10763 * intended a temporary solution until a proper MIB API is provided 10764 * that provides complete filtering/caller-opt-in. 10765 */ 10766 if (level == EXPER_IP_AND_ALL_IRES) 10767 ird.ird_flags |= IRD_REPORT_ALL; 10768 10769 zoneid = Q_TO_CONN(q)->conn_zoneid; 10770 ire_walk_v6(ip_snmp_get2_v6_route, &ird, zoneid, ipst); 10771 10772 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10773 optp->level = MIB2_IP6; 10774 optp->name = MIB2_IP6_ROUTE; 10775 optp->len = msgdsize(ird.ird_route.lp_head); 10776 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n", 10777 (int)optp->level, (int)optp->name, (int)optp->len)); 10778 qreply(q, mpctl); 10779 10780 /* ipv6NetToMediaEntryTable in mp3ctl */ 10781 ncec_walk(NULL, ip_snmp_get2_v6_media, &ird, ipst); 10782 10783 optp = (struct opthdr *)&mp3ctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10784 optp->level = MIB2_IP6; 10785 optp->name = MIB2_IP6_MEDIA; 10786 optp->len = msgdsize(ird.ird_netmedia.lp_head); 10787 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n", 10788 (int)optp->level, (int)optp->name, (int)optp->len)); 10789 qreply(q, mp3ctl); 10790 10791 /* ipv6RouteAttributeTable in mp4ctl */ 10792 optp = (struct opthdr *)&mp4ctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10793 optp->level = MIB2_IP6; 10794 optp->name = EXPER_IP_RTATTR; 10795 optp->len = msgdsize(ird.ird_attrs.lp_head); 10796 ip3dbg(("ip_snmp_get_mib2_ip6_route_media: level %d, name %d, len %d\n", 10797 (int)optp->level, (int)optp->name, (int)optp->len)); 10798 if (optp->len == 0) 10799 freemsg(mp4ctl); 10800 else 10801 qreply(q, mp4ctl); 10802 10803 return (mp2ctl); 10804 } 10805 10806 /* 10807 * IPv6 mib: One per ill 10808 */ 10809 static mblk_t * 10810 ip_snmp_get_mib2_ip6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10811 { 10812 struct opthdr *optp; 10813 mblk_t *mp2ctl; 10814 ill_t *ill; 10815 ill_walk_context_t ctx; 10816 mblk_t *mp_tail = NULL; 10817 10818 /* 10819 * Make a copy of the original message 10820 */ 10821 mp2ctl = copymsg(mpctl); 10822 10823 /* fixed length IPv6 structure ... */ 10824 10825 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10826 optp->level = MIB2_IP6; 10827 optp->name = 0; 10828 /* Include "unknown interface" ip6_mib */ 10829 ipst->ips_ip6_mib.ipIfStatsIPVersion = MIB2_INETADDRESSTYPE_ipv6; 10830 ipst->ips_ip6_mib.ipIfStatsIfIndex = 10831 MIB2_UNKNOWN_INTERFACE; /* Flag to netstat */ 10832 SET_MIB(ipst->ips_ip6_mib.ipIfStatsForwarding, 10833 ipst->ips_ipv6_forward ? 1 : 2); 10834 SET_MIB(ipst->ips_ip6_mib.ipIfStatsDefaultHopLimit, 10835 ipst->ips_ipv6_def_hops); 10836 SET_MIB(ipst->ips_ip6_mib.ipIfStatsEntrySize, 10837 sizeof (mib2_ipIfStatsEntry_t)); 10838 SET_MIB(ipst->ips_ip6_mib.ipIfStatsAddrEntrySize, 10839 sizeof (mib2_ipv6AddrEntry_t)); 10840 SET_MIB(ipst->ips_ip6_mib.ipIfStatsRouteEntrySize, 10841 sizeof (mib2_ipv6RouteEntry_t)); 10842 SET_MIB(ipst->ips_ip6_mib.ipIfStatsNetToMediaEntrySize, 10843 sizeof (mib2_ipv6NetToMediaEntry_t)); 10844 SET_MIB(ipst->ips_ip6_mib.ipIfStatsMemberEntrySize, 10845 sizeof (ipv6_member_t)); 10846 SET_MIB(ipst->ips_ip6_mib.ipIfStatsGroupSourceEntrySize, 10847 sizeof (ipv6_grpsrc_t)); 10848 10849 /* 10850 * Synchronize 64- and 32-bit counters 10851 */ 10852 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInReceives, 10853 ipIfStatsHCInReceives); 10854 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInDelivers, 10855 ipIfStatsHCInDelivers); 10856 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutRequests, 10857 ipIfStatsHCOutRequests); 10858 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutForwDatagrams, 10859 ipIfStatsHCOutForwDatagrams); 10860 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsOutMcastPkts, 10861 ipIfStatsHCOutMcastPkts); 10862 SYNC32_MIB(&ipst->ips_ip6_mib, ipIfStatsInMcastPkts, 10863 ipIfStatsHCInMcastPkts); 10864 10865 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10866 (char *)&ipst->ips_ip6_mib, (int)sizeof (ipst->ips_ip6_mib))) { 10867 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate %u bytes\n", 10868 (uint_t)sizeof (ipst->ips_ip6_mib))); 10869 } 10870 10871 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10872 ill = ILL_START_WALK_V6(&ctx, ipst); 10873 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10874 ill->ill_ip_mib->ipIfStatsIfIndex = 10875 ill->ill_phyint->phyint_ifindex; 10876 SET_MIB(ill->ill_ip_mib->ipIfStatsForwarding, 10877 ipst->ips_ipv6_forward ? 1 : 2); 10878 SET_MIB(ill->ill_ip_mib->ipIfStatsDefaultHopLimit, 10879 ill->ill_max_hops); 10880 10881 /* 10882 * Synchronize 64- and 32-bit counters 10883 */ 10884 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInReceives, 10885 ipIfStatsHCInReceives); 10886 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInDelivers, 10887 ipIfStatsHCInDelivers); 10888 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutRequests, 10889 ipIfStatsHCOutRequests); 10890 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutForwDatagrams, 10891 ipIfStatsHCOutForwDatagrams); 10892 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsOutMcastPkts, 10893 ipIfStatsHCOutMcastPkts); 10894 SYNC32_MIB(ill->ill_ip_mib, ipIfStatsInMcastPkts, 10895 ipIfStatsHCInMcastPkts); 10896 10897 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10898 (char *)ill->ill_ip_mib, 10899 (int)sizeof (*ill->ill_ip_mib))) { 10900 ip1dbg(("ip_snmp_get_mib2_ip6: failed to allocate " 10901 "%u bytes\n", (uint_t)sizeof (*ill->ill_ip_mib))); 10902 } 10903 } 10904 rw_exit(&ipst->ips_ill_g_lock); 10905 10906 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10907 ip3dbg(("ip_snmp_get_mib2_ip6: level %d, name %d, len %d\n", 10908 (int)optp->level, (int)optp->name, (int)optp->len)); 10909 qreply(q, mpctl); 10910 return (mp2ctl); 10911 } 10912 10913 /* 10914 * ICMPv6 mib: One per ill 10915 */ 10916 static mblk_t * 10917 ip_snmp_get_mib2_icmp6(queue_t *q, mblk_t *mpctl, ip_stack_t *ipst) 10918 { 10919 struct opthdr *optp; 10920 mblk_t *mp2ctl; 10921 ill_t *ill; 10922 ill_walk_context_t ctx; 10923 mblk_t *mp_tail = NULL; 10924 /* 10925 * Make a copy of the original message 10926 */ 10927 mp2ctl = copymsg(mpctl); 10928 10929 /* fixed length ICMPv6 structure ... */ 10930 10931 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 10932 optp->level = MIB2_ICMP6; 10933 optp->name = 0; 10934 /* Include "unknown interface" icmp6_mib */ 10935 ipst->ips_icmp6_mib.ipv6IfIcmpIfIndex = 10936 MIB2_UNKNOWN_INTERFACE; /* netstat flag */ 10937 ipst->ips_icmp6_mib.ipv6IfIcmpEntrySize = 10938 sizeof (mib2_ipv6IfIcmpEntry_t); 10939 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10940 (char *)&ipst->ips_icmp6_mib, 10941 (int)sizeof (ipst->ips_icmp6_mib))) { 10942 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate %u bytes\n", 10943 (uint_t)sizeof (ipst->ips_icmp6_mib))); 10944 } 10945 10946 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 10947 ill = ILL_START_WALK_V6(&ctx, ipst); 10948 for (; ill != NULL; ill = ill_next(&ctx, ill)) { 10949 ill->ill_icmp6_mib->ipv6IfIcmpIfIndex = 10950 ill->ill_phyint->phyint_ifindex; 10951 if (!snmp_append_data2(mpctl->b_cont, &mp_tail, 10952 (char *)ill->ill_icmp6_mib, 10953 (int)sizeof (*ill->ill_icmp6_mib))) { 10954 ip1dbg(("ip_snmp_get_mib2_icmp6: failed to allocate " 10955 "%u bytes\n", 10956 (uint_t)sizeof (*ill->ill_icmp6_mib))); 10957 } 10958 } 10959 rw_exit(&ipst->ips_ill_g_lock); 10960 10961 optp->len = (t_uscalar_t)msgdsize(mpctl->b_cont); 10962 ip3dbg(("ip_snmp_get_mib2_icmp6: level %d, name %d, len %d\n", 10963 (int)optp->level, (int)optp->name, (int)optp->len)); 10964 qreply(q, mpctl); 10965 return (mp2ctl); 10966 } 10967 10968 /* 10969 * ire_walk routine to create both ipRouteEntryTable and 10970 * ipRouteAttributeTable in one IRE walk 10971 */ 10972 static void 10973 ip_snmp_get2_v4(ire_t *ire, iproutedata_t *ird) 10974 { 10975 ill_t *ill; 10976 mib2_ipRouteEntry_t *re; 10977 mib2_ipAttributeEntry_t iaes; 10978 tsol_ire_gw_secattr_t *attrp; 10979 tsol_gc_t *gc = NULL; 10980 tsol_gcgrp_t *gcgrp = NULL; 10981 ip_stack_t *ipst = ire->ire_ipst; 10982 10983 ASSERT(ire->ire_ipversion == IPV4_VERSION); 10984 10985 if (!(ird->ird_flags & IRD_REPORT_ALL)) { 10986 if (ire->ire_testhidden) 10987 return; 10988 if (ire->ire_type & IRE_IF_CLONE) 10989 return; 10990 } 10991 10992 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL) 10993 return; 10994 10995 if ((attrp = ire->ire_gw_secattr) != NULL) { 10996 mutex_enter(&attrp->igsa_lock); 10997 if ((gc = attrp->igsa_gc) != NULL) { 10998 gcgrp = gc->gc_grp; 10999 ASSERT(gcgrp != NULL); 11000 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER); 11001 } 11002 mutex_exit(&attrp->igsa_lock); 11003 } 11004 /* 11005 * Return all IRE types for route table... let caller pick and choose 11006 */ 11007 re->ipRouteDest = ire->ire_addr; 11008 ill = ire->ire_ill; 11009 re->ipRouteIfIndex.o_length = 0; 11010 if (ill != NULL) { 11011 ill_get_name(ill, re->ipRouteIfIndex.o_bytes, OCTET_LENGTH); 11012 re->ipRouteIfIndex.o_length = 11013 mi_strlen(re->ipRouteIfIndex.o_bytes); 11014 } 11015 re->ipRouteMetric1 = -1; 11016 re->ipRouteMetric2 = -1; 11017 re->ipRouteMetric3 = -1; 11018 re->ipRouteMetric4 = -1; 11019 11020 re->ipRouteNextHop = ire->ire_gateway_addr; 11021 /* indirect(4), direct(3), or invalid(2) */ 11022 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) 11023 re->ipRouteType = 2; 11024 else if (ire->ire_type & IRE_ONLINK) 11025 re->ipRouteType = 3; 11026 else 11027 re->ipRouteType = 4; 11028 11029 re->ipRouteProto = -1; 11030 re->ipRouteAge = gethrestime_sec() - ire->ire_create_time; 11031 re->ipRouteMask = ire->ire_mask; 11032 re->ipRouteMetric5 = -1; 11033 re->ipRouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu; 11034 if (ire->ire_ill != NULL && re->ipRouteInfo.re_max_frag == 0) 11035 re->ipRouteInfo.re_max_frag = ire->ire_ill->ill_mtu; 11036 11037 re->ipRouteInfo.re_frag_flag = 0; 11038 re->ipRouteInfo.re_rtt = 0; 11039 re->ipRouteInfo.re_src_addr = 0; 11040 re->ipRouteInfo.re_ref = ire->ire_refcnt; 11041 re->ipRouteInfo.re_obpkt = ire->ire_ob_pkt_count; 11042 re->ipRouteInfo.re_ibpkt = ire->ire_ib_pkt_count; 11043 re->ipRouteInfo.re_flags = ire->ire_flags; 11044 11045 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */ 11046 if (ire->ire_type & IRE_INTERFACE) { 11047 ire_t *child; 11048 11049 rw_enter(&ipst->ips_ire_dep_lock, RW_READER); 11050 child = ire->ire_dep_children; 11051 while (child != NULL) { 11052 re->ipRouteInfo.re_obpkt += child->ire_ob_pkt_count; 11053 re->ipRouteInfo.re_ibpkt += child->ire_ib_pkt_count; 11054 child = child->ire_dep_sib_next; 11055 } 11056 rw_exit(&ipst->ips_ire_dep_lock); 11057 } 11058 11059 if (ire->ire_flags & RTF_DYNAMIC) { 11060 re->ipRouteInfo.re_ire_type = IRE_HOST_REDIRECT; 11061 } else { 11062 re->ipRouteInfo.re_ire_type = ire->ire_type; 11063 } 11064 11065 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail, 11066 (char *)re, (int)sizeof (*re))) { 11067 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u bytes\n", 11068 (uint_t)sizeof (*re))); 11069 } 11070 11071 if (gc != NULL) { 11072 iaes.iae_routeidx = ird->ird_idx; 11073 iaes.iae_doi = gc->gc_db->gcdb_doi; 11074 iaes.iae_slrange = gc->gc_db->gcdb_slrange; 11075 11076 if (!snmp_append_data2(ird->ird_attrs.lp_head, 11077 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) { 11078 ip1dbg(("ip_snmp_get2_v4: failed to allocate %u " 11079 "bytes\n", (uint_t)sizeof (iaes))); 11080 } 11081 } 11082 11083 /* bump route index for next pass */ 11084 ird->ird_idx++; 11085 11086 kmem_free(re, sizeof (*re)); 11087 if (gcgrp != NULL) 11088 rw_exit(&gcgrp->gcgrp_rwlock); 11089 } 11090 11091 /* 11092 * ire_walk routine to create ipv6RouteEntryTable and ipRouteEntryTable. 11093 */ 11094 static void 11095 ip_snmp_get2_v6_route(ire_t *ire, iproutedata_t *ird) 11096 { 11097 ill_t *ill; 11098 mib2_ipv6RouteEntry_t *re; 11099 mib2_ipAttributeEntry_t iaes; 11100 tsol_ire_gw_secattr_t *attrp; 11101 tsol_gc_t *gc = NULL; 11102 tsol_gcgrp_t *gcgrp = NULL; 11103 ip_stack_t *ipst = ire->ire_ipst; 11104 11105 ASSERT(ire->ire_ipversion == IPV6_VERSION); 11106 11107 if (!(ird->ird_flags & IRD_REPORT_ALL)) { 11108 if (ire->ire_testhidden) 11109 return; 11110 if (ire->ire_type & IRE_IF_CLONE) 11111 return; 11112 } 11113 11114 if ((re = kmem_zalloc(sizeof (*re), KM_NOSLEEP)) == NULL) 11115 return; 11116 11117 if ((attrp = ire->ire_gw_secattr) != NULL) { 11118 mutex_enter(&attrp->igsa_lock); 11119 if ((gc = attrp->igsa_gc) != NULL) { 11120 gcgrp = gc->gc_grp; 11121 ASSERT(gcgrp != NULL); 11122 rw_enter(&gcgrp->gcgrp_rwlock, RW_READER); 11123 } 11124 mutex_exit(&attrp->igsa_lock); 11125 } 11126 /* 11127 * Return all IRE types for route table... let caller pick and choose 11128 */ 11129 re->ipv6RouteDest = ire->ire_addr_v6; 11130 re->ipv6RoutePfxLength = ip_mask_to_plen_v6(&ire->ire_mask_v6); 11131 re->ipv6RouteIndex = 0; /* Unique when multiple with same dest/plen */ 11132 re->ipv6RouteIfIndex.o_length = 0; 11133 ill = ire->ire_ill; 11134 if (ill != NULL) { 11135 ill_get_name(ill, re->ipv6RouteIfIndex.o_bytes, OCTET_LENGTH); 11136 re->ipv6RouteIfIndex.o_length = 11137 mi_strlen(re->ipv6RouteIfIndex.o_bytes); 11138 } 11139 11140 ASSERT(!(ire->ire_type & IRE_BROADCAST)); 11141 11142 mutex_enter(&ire->ire_lock); 11143 re->ipv6RouteNextHop = ire->ire_gateway_addr_v6; 11144 mutex_exit(&ire->ire_lock); 11145 11146 /* remote(4), local(3), or discard(2) */ 11147 if (ire->ire_flags & (RTF_REJECT | RTF_BLACKHOLE)) 11148 re->ipv6RouteType = 2; 11149 else if (ire->ire_type & IRE_ONLINK) 11150 re->ipv6RouteType = 3; 11151 else 11152 re->ipv6RouteType = 4; 11153 11154 re->ipv6RouteProtocol = -1; 11155 re->ipv6RoutePolicy = 0; 11156 re->ipv6RouteAge = gethrestime_sec() - ire->ire_create_time; 11157 re->ipv6RouteNextHopRDI = 0; 11158 re->ipv6RouteWeight = 0; 11159 re->ipv6RouteMetric = 0; 11160 re->ipv6RouteInfo.re_max_frag = ire->ire_metrics.iulp_mtu; 11161 if (ire->ire_ill != NULL && re->ipv6RouteInfo.re_max_frag == 0) 11162 re->ipv6RouteInfo.re_max_frag = ire->ire_ill->ill_mtu; 11163 11164 re->ipv6RouteInfo.re_frag_flag = 0; 11165 re->ipv6RouteInfo.re_rtt = 0; 11166 re->ipv6RouteInfo.re_src_addr = ipv6_all_zeros; 11167 re->ipv6RouteInfo.re_obpkt = ire->ire_ob_pkt_count; 11168 re->ipv6RouteInfo.re_ibpkt = ire->ire_ib_pkt_count; 11169 re->ipv6RouteInfo.re_ref = ire->ire_refcnt; 11170 re->ipv6RouteInfo.re_flags = ire->ire_flags; 11171 11172 /* Add the IRE_IF_CLONE's counters to their parent IRE_INTERFACE */ 11173 if (ire->ire_type & IRE_INTERFACE) { 11174 ire_t *child; 11175 11176 rw_enter(&ipst->ips_ire_dep_lock, RW_READER); 11177 child = ire->ire_dep_children; 11178 while (child != NULL) { 11179 re->ipv6RouteInfo.re_obpkt += child->ire_ob_pkt_count; 11180 re->ipv6RouteInfo.re_ibpkt += child->ire_ib_pkt_count; 11181 child = child->ire_dep_sib_next; 11182 } 11183 rw_exit(&ipst->ips_ire_dep_lock); 11184 } 11185 if (ire->ire_flags & RTF_DYNAMIC) { 11186 re->ipv6RouteInfo.re_ire_type = IRE_HOST_REDIRECT; 11187 } else { 11188 re->ipv6RouteInfo.re_ire_type = ire->ire_type; 11189 } 11190 11191 if (!snmp_append_data2(ird->ird_route.lp_head, &ird->ird_route.lp_tail, 11192 (char *)re, (int)sizeof (*re))) { 11193 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u bytes\n", 11194 (uint_t)sizeof (*re))); 11195 } 11196 11197 if (gc != NULL) { 11198 iaes.iae_routeidx = ird->ird_idx; 11199 iaes.iae_doi = gc->gc_db->gcdb_doi; 11200 iaes.iae_slrange = gc->gc_db->gcdb_slrange; 11201 11202 if (!snmp_append_data2(ird->ird_attrs.lp_head, 11203 &ird->ird_attrs.lp_tail, (char *)&iaes, sizeof (iaes))) { 11204 ip1dbg(("ip_snmp_get2_v6: failed to allocate %u " 11205 "bytes\n", (uint_t)sizeof (iaes))); 11206 } 11207 } 11208 11209 /* bump route index for next pass */ 11210 ird->ird_idx++; 11211 11212 kmem_free(re, sizeof (*re)); 11213 if (gcgrp != NULL) 11214 rw_exit(&gcgrp->gcgrp_rwlock); 11215 } 11216 11217 /* 11218 * ncec_walk routine to create ipv6NetToMediaEntryTable 11219 */ 11220 static int 11221 ip_snmp_get2_v6_media(ncec_t *ncec, iproutedata_t *ird) 11222 { 11223 ill_t *ill; 11224 mib2_ipv6NetToMediaEntry_t ntme; 11225 11226 ill = ncec->ncec_ill; 11227 /* skip arpce entries, and loopback ncec entries */ 11228 if (ill->ill_isv6 == B_FALSE || ill->ill_net_type == IRE_LOOPBACK) 11229 return (0); 11230 /* 11231 * Neighbor cache entry attached to IRE with on-link 11232 * destination. 11233 * We report all IPMP groups on ncec_ill which is normally the upper. 11234 */ 11235 ntme.ipv6NetToMediaIfIndex = ill->ill_phyint->phyint_ifindex; 11236 ntme.ipv6NetToMediaNetAddress = ncec->ncec_addr; 11237 ntme.ipv6NetToMediaPhysAddress.o_length = ill->ill_phys_addr_length; 11238 if (ncec->ncec_lladdr != NULL) { 11239 bcopy(ncec->ncec_lladdr, ntme.ipv6NetToMediaPhysAddress.o_bytes, 11240 ntme.ipv6NetToMediaPhysAddress.o_length); 11241 } 11242 /* 11243 * Note: Returns ND_* states. Should be: 11244 * reachable(1), stale(2), delay(3), probe(4), 11245 * invalid(5), unknown(6) 11246 */ 11247 ntme.ipv6NetToMediaState = ncec->ncec_state; 11248 ntme.ipv6NetToMediaLastUpdated = 0; 11249 11250 /* other(1), dynamic(2), static(3), local(4) */ 11251 if (NCE_MYADDR(ncec)) { 11252 ntme.ipv6NetToMediaType = 4; 11253 } else if (ncec->ncec_flags & NCE_F_PUBLISH) { 11254 ntme.ipv6NetToMediaType = 1; /* proxy */ 11255 } else if (ncec->ncec_flags & NCE_F_STATIC) { 11256 ntme.ipv6NetToMediaType = 3; 11257 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST)) { 11258 ntme.ipv6NetToMediaType = 1; 11259 } else { 11260 ntme.ipv6NetToMediaType = 2; 11261 } 11262 11263 if (!snmp_append_data2(ird->ird_netmedia.lp_head, 11264 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) { 11265 ip1dbg(("ip_snmp_get2_v6_media: failed to allocate %u bytes\n", 11266 (uint_t)sizeof (ntme))); 11267 } 11268 return (0); 11269 } 11270 11271 int 11272 nce2ace(ncec_t *ncec) 11273 { 11274 int flags = 0; 11275 11276 if (NCE_ISREACHABLE(ncec)) 11277 flags |= ACE_F_RESOLVED; 11278 if (ncec->ncec_flags & NCE_F_AUTHORITY) 11279 flags |= ACE_F_AUTHORITY; 11280 if (ncec->ncec_flags & NCE_F_PUBLISH) 11281 flags |= ACE_F_PUBLISH; 11282 if ((ncec->ncec_flags & NCE_F_NONUD) != 0) 11283 flags |= ACE_F_PERMANENT; 11284 if (NCE_MYADDR(ncec)) 11285 flags |= (ACE_F_MYADDR | ACE_F_AUTHORITY); 11286 if (ncec->ncec_flags & NCE_F_UNVERIFIED) 11287 flags |= ACE_F_UNVERIFIED; 11288 if (ncec->ncec_flags & NCE_F_AUTHORITY) 11289 flags |= ACE_F_AUTHORITY; 11290 if (ncec->ncec_flags & NCE_F_DELAYED) 11291 flags |= ACE_F_DELAYED; 11292 return (flags); 11293 } 11294 11295 /* 11296 * ncec_walk routine to create ipNetToMediaEntryTable 11297 */ 11298 static int 11299 ip_snmp_get2_v4_media(ncec_t *ncec, iproutedata_t *ird) 11300 { 11301 ill_t *ill; 11302 mib2_ipNetToMediaEntry_t ntme; 11303 const char *name = "unknown"; 11304 ipaddr_t ncec_addr; 11305 11306 ill = ncec->ncec_ill; 11307 if (ill->ill_isv6 || (ncec->ncec_flags & NCE_F_BCAST) || 11308 ill->ill_net_type == IRE_LOOPBACK) 11309 return (0); 11310 11311 /* We report all IPMP groups on ncec_ill which is normally the upper. */ 11312 name = ill->ill_name; 11313 /* Based on RFC 4293: other(1), inval(2), dyn(3), stat(4) */ 11314 if (NCE_MYADDR(ncec)) { 11315 ntme.ipNetToMediaType = 4; 11316 } else if (ncec->ncec_flags & (NCE_F_MCAST|NCE_F_BCAST|NCE_F_PUBLISH)) { 11317 ntme.ipNetToMediaType = 1; 11318 } else { 11319 ntme.ipNetToMediaType = 3; 11320 } 11321 ntme.ipNetToMediaIfIndex.o_length = MIN(OCTET_LENGTH, strlen(name)); 11322 bcopy(name, ntme.ipNetToMediaIfIndex.o_bytes, 11323 ntme.ipNetToMediaIfIndex.o_length); 11324 11325 IN6_V4MAPPED_TO_IPADDR(&ncec->ncec_addr, ncec_addr); 11326 bcopy(&ncec_addr, &ntme.ipNetToMediaNetAddress, sizeof (ncec_addr)); 11327 11328 ntme.ipNetToMediaInfo.ntm_mask.o_length = sizeof (ipaddr_t); 11329 ncec_addr = INADDR_BROADCAST; 11330 bcopy(&ncec_addr, ntme.ipNetToMediaInfo.ntm_mask.o_bytes, 11331 sizeof (ncec_addr)); 11332 /* 11333 * map all the flags to the ACE counterpart. 11334 */ 11335 ntme.ipNetToMediaInfo.ntm_flags = nce2ace(ncec); 11336 11337 ntme.ipNetToMediaPhysAddress.o_length = 11338 MIN(OCTET_LENGTH, ill->ill_phys_addr_length); 11339 11340 if (!NCE_ISREACHABLE(ncec)) 11341 ntme.ipNetToMediaPhysAddress.o_length = 0; 11342 else { 11343 if (ncec->ncec_lladdr != NULL) { 11344 bcopy(ncec->ncec_lladdr, 11345 ntme.ipNetToMediaPhysAddress.o_bytes, 11346 ntme.ipNetToMediaPhysAddress.o_length); 11347 } 11348 } 11349 11350 if (!snmp_append_data2(ird->ird_netmedia.lp_head, 11351 &ird->ird_netmedia.lp_tail, (char *)&ntme, sizeof (ntme))) { 11352 ip1dbg(("ip_snmp_get2_v4_media: failed to allocate %u bytes\n", 11353 (uint_t)sizeof (ntme))); 11354 } 11355 return (0); 11356 } 11357 11358 /* 11359 * return (0) if invalid set request, 1 otherwise, including non-tcp requests 11360 */ 11361 /* ARGSUSED */ 11362 int 11363 ip_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 11364 { 11365 switch (level) { 11366 case MIB2_IP: 11367 case MIB2_ICMP: 11368 switch (name) { 11369 default: 11370 break; 11371 } 11372 return (1); 11373 default: 11374 return (1); 11375 } 11376 } 11377 11378 /* 11379 * When there exists both a 64- and 32-bit counter of a particular type 11380 * (i.e., InReceives), only the 64-bit counters are added. 11381 */ 11382 void 11383 ip_mib2_add_ip_stats(mib2_ipIfStatsEntry_t *o1, mib2_ipIfStatsEntry_t *o2) 11384 { 11385 UPDATE_MIB(o1, ipIfStatsInHdrErrors, o2->ipIfStatsInHdrErrors); 11386 UPDATE_MIB(o1, ipIfStatsInTooBigErrors, o2->ipIfStatsInTooBigErrors); 11387 UPDATE_MIB(o1, ipIfStatsInNoRoutes, o2->ipIfStatsInNoRoutes); 11388 UPDATE_MIB(o1, ipIfStatsInAddrErrors, o2->ipIfStatsInAddrErrors); 11389 UPDATE_MIB(o1, ipIfStatsInUnknownProtos, o2->ipIfStatsInUnknownProtos); 11390 UPDATE_MIB(o1, ipIfStatsInTruncatedPkts, o2->ipIfStatsInTruncatedPkts); 11391 UPDATE_MIB(o1, ipIfStatsInDiscards, o2->ipIfStatsInDiscards); 11392 UPDATE_MIB(o1, ipIfStatsOutDiscards, o2->ipIfStatsOutDiscards); 11393 UPDATE_MIB(o1, ipIfStatsOutFragOKs, o2->ipIfStatsOutFragOKs); 11394 UPDATE_MIB(o1, ipIfStatsOutFragFails, o2->ipIfStatsOutFragFails); 11395 UPDATE_MIB(o1, ipIfStatsOutFragCreates, o2->ipIfStatsOutFragCreates); 11396 UPDATE_MIB(o1, ipIfStatsReasmReqds, o2->ipIfStatsReasmReqds); 11397 UPDATE_MIB(o1, ipIfStatsReasmOKs, o2->ipIfStatsReasmOKs); 11398 UPDATE_MIB(o1, ipIfStatsReasmFails, o2->ipIfStatsReasmFails); 11399 UPDATE_MIB(o1, ipIfStatsOutNoRoutes, o2->ipIfStatsOutNoRoutes); 11400 UPDATE_MIB(o1, ipIfStatsReasmDuplicates, o2->ipIfStatsReasmDuplicates); 11401 UPDATE_MIB(o1, ipIfStatsReasmPartDups, o2->ipIfStatsReasmPartDups); 11402 UPDATE_MIB(o1, ipIfStatsForwProhibits, o2->ipIfStatsForwProhibits); 11403 UPDATE_MIB(o1, udpInCksumErrs, o2->udpInCksumErrs); 11404 UPDATE_MIB(o1, udpInOverflows, o2->udpInOverflows); 11405 UPDATE_MIB(o1, rawipInOverflows, o2->rawipInOverflows); 11406 UPDATE_MIB(o1, ipIfStatsInWrongIPVersion, 11407 o2->ipIfStatsInWrongIPVersion); 11408 UPDATE_MIB(o1, ipIfStatsOutWrongIPVersion, 11409 o2->ipIfStatsInWrongIPVersion); 11410 UPDATE_MIB(o1, ipIfStatsOutSwitchIPVersion, 11411 o2->ipIfStatsOutSwitchIPVersion); 11412 UPDATE_MIB(o1, ipIfStatsHCInReceives, o2->ipIfStatsHCInReceives); 11413 UPDATE_MIB(o1, ipIfStatsHCInOctets, o2->ipIfStatsHCInOctets); 11414 UPDATE_MIB(o1, ipIfStatsHCInForwDatagrams, 11415 o2->ipIfStatsHCInForwDatagrams); 11416 UPDATE_MIB(o1, ipIfStatsHCInDelivers, o2->ipIfStatsHCInDelivers); 11417 UPDATE_MIB(o1, ipIfStatsHCOutRequests, o2->ipIfStatsHCOutRequests); 11418 UPDATE_MIB(o1, ipIfStatsHCOutForwDatagrams, 11419 o2->ipIfStatsHCOutForwDatagrams); 11420 UPDATE_MIB(o1, ipIfStatsOutFragReqds, o2->ipIfStatsOutFragReqds); 11421 UPDATE_MIB(o1, ipIfStatsHCOutTransmits, o2->ipIfStatsHCOutTransmits); 11422 UPDATE_MIB(o1, ipIfStatsHCOutOctets, o2->ipIfStatsHCOutOctets); 11423 UPDATE_MIB(o1, ipIfStatsHCInMcastPkts, o2->ipIfStatsHCInMcastPkts); 11424 UPDATE_MIB(o1, ipIfStatsHCInMcastOctets, o2->ipIfStatsHCInMcastOctets); 11425 UPDATE_MIB(o1, ipIfStatsHCOutMcastPkts, o2->ipIfStatsHCOutMcastPkts); 11426 UPDATE_MIB(o1, ipIfStatsHCOutMcastOctets, 11427 o2->ipIfStatsHCOutMcastOctets); 11428 UPDATE_MIB(o1, ipIfStatsHCInBcastPkts, o2->ipIfStatsHCInBcastPkts); 11429 UPDATE_MIB(o1, ipIfStatsHCOutBcastPkts, o2->ipIfStatsHCOutBcastPkts); 11430 UPDATE_MIB(o1, ipsecInSucceeded, o2->ipsecInSucceeded); 11431 UPDATE_MIB(o1, ipsecInFailed, o2->ipsecInFailed); 11432 UPDATE_MIB(o1, ipInCksumErrs, o2->ipInCksumErrs); 11433 UPDATE_MIB(o1, tcpInErrs, o2->tcpInErrs); 11434 UPDATE_MIB(o1, udpNoPorts, o2->udpNoPorts); 11435 } 11436 11437 void 11438 ip_mib2_add_icmp6_stats(mib2_ipv6IfIcmpEntry_t *o1, mib2_ipv6IfIcmpEntry_t *o2) 11439 { 11440 UPDATE_MIB(o1, ipv6IfIcmpInMsgs, o2->ipv6IfIcmpInMsgs); 11441 UPDATE_MIB(o1, ipv6IfIcmpInErrors, o2->ipv6IfIcmpInErrors); 11442 UPDATE_MIB(o1, ipv6IfIcmpInDestUnreachs, o2->ipv6IfIcmpInDestUnreachs); 11443 UPDATE_MIB(o1, ipv6IfIcmpInAdminProhibs, o2->ipv6IfIcmpInAdminProhibs); 11444 UPDATE_MIB(o1, ipv6IfIcmpInTimeExcds, o2->ipv6IfIcmpInTimeExcds); 11445 UPDATE_MIB(o1, ipv6IfIcmpInParmProblems, o2->ipv6IfIcmpInParmProblems); 11446 UPDATE_MIB(o1, ipv6IfIcmpInPktTooBigs, o2->ipv6IfIcmpInPktTooBigs); 11447 UPDATE_MIB(o1, ipv6IfIcmpInEchos, o2->ipv6IfIcmpInEchos); 11448 UPDATE_MIB(o1, ipv6IfIcmpInEchoReplies, o2->ipv6IfIcmpInEchoReplies); 11449 UPDATE_MIB(o1, ipv6IfIcmpInRouterSolicits, 11450 o2->ipv6IfIcmpInRouterSolicits); 11451 UPDATE_MIB(o1, ipv6IfIcmpInRouterAdvertisements, 11452 o2->ipv6IfIcmpInRouterAdvertisements); 11453 UPDATE_MIB(o1, ipv6IfIcmpInNeighborSolicits, 11454 o2->ipv6IfIcmpInNeighborSolicits); 11455 UPDATE_MIB(o1, ipv6IfIcmpInNeighborAdvertisements, 11456 o2->ipv6IfIcmpInNeighborAdvertisements); 11457 UPDATE_MIB(o1, ipv6IfIcmpInRedirects, o2->ipv6IfIcmpInRedirects); 11458 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembQueries, 11459 o2->ipv6IfIcmpInGroupMembQueries); 11460 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembResponses, 11461 o2->ipv6IfIcmpInGroupMembResponses); 11462 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembReductions, 11463 o2->ipv6IfIcmpInGroupMembReductions); 11464 UPDATE_MIB(o1, ipv6IfIcmpOutMsgs, o2->ipv6IfIcmpOutMsgs); 11465 UPDATE_MIB(o1, ipv6IfIcmpOutErrors, o2->ipv6IfIcmpOutErrors); 11466 UPDATE_MIB(o1, ipv6IfIcmpOutDestUnreachs, 11467 o2->ipv6IfIcmpOutDestUnreachs); 11468 UPDATE_MIB(o1, ipv6IfIcmpOutAdminProhibs, 11469 o2->ipv6IfIcmpOutAdminProhibs); 11470 UPDATE_MIB(o1, ipv6IfIcmpOutTimeExcds, o2->ipv6IfIcmpOutTimeExcds); 11471 UPDATE_MIB(o1, ipv6IfIcmpOutParmProblems, 11472 o2->ipv6IfIcmpOutParmProblems); 11473 UPDATE_MIB(o1, ipv6IfIcmpOutPktTooBigs, o2->ipv6IfIcmpOutPktTooBigs); 11474 UPDATE_MIB(o1, ipv6IfIcmpOutEchos, o2->ipv6IfIcmpOutEchos); 11475 UPDATE_MIB(o1, ipv6IfIcmpOutEchoReplies, o2->ipv6IfIcmpOutEchoReplies); 11476 UPDATE_MIB(o1, ipv6IfIcmpOutRouterSolicits, 11477 o2->ipv6IfIcmpOutRouterSolicits); 11478 UPDATE_MIB(o1, ipv6IfIcmpOutRouterAdvertisements, 11479 o2->ipv6IfIcmpOutRouterAdvertisements); 11480 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborSolicits, 11481 o2->ipv6IfIcmpOutNeighborSolicits); 11482 UPDATE_MIB(o1, ipv6IfIcmpOutNeighborAdvertisements, 11483 o2->ipv6IfIcmpOutNeighborAdvertisements); 11484 UPDATE_MIB(o1, ipv6IfIcmpOutRedirects, o2->ipv6IfIcmpOutRedirects); 11485 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembQueries, 11486 o2->ipv6IfIcmpOutGroupMembQueries); 11487 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembResponses, 11488 o2->ipv6IfIcmpOutGroupMembResponses); 11489 UPDATE_MIB(o1, ipv6IfIcmpOutGroupMembReductions, 11490 o2->ipv6IfIcmpOutGroupMembReductions); 11491 UPDATE_MIB(o1, ipv6IfIcmpInOverflows, o2->ipv6IfIcmpInOverflows); 11492 UPDATE_MIB(o1, ipv6IfIcmpBadHoplimit, o2->ipv6IfIcmpBadHoplimit); 11493 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborAdvertisements, 11494 o2->ipv6IfIcmpInBadNeighborAdvertisements); 11495 UPDATE_MIB(o1, ipv6IfIcmpInBadNeighborSolicitations, 11496 o2->ipv6IfIcmpInBadNeighborSolicitations); 11497 UPDATE_MIB(o1, ipv6IfIcmpInBadRedirects, o2->ipv6IfIcmpInBadRedirects); 11498 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembTotal, 11499 o2->ipv6IfIcmpInGroupMembTotal); 11500 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadQueries, 11501 o2->ipv6IfIcmpInGroupMembBadQueries); 11502 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembBadReports, 11503 o2->ipv6IfIcmpInGroupMembBadReports); 11504 UPDATE_MIB(o1, ipv6IfIcmpInGroupMembOurReports, 11505 o2->ipv6IfIcmpInGroupMembOurReports); 11506 } 11507 11508 /* 11509 * Called before the options are updated to check if this packet will 11510 * be source routed from here. 11511 * This routine assumes that the options are well formed i.e. that they 11512 * have already been checked. 11513 */ 11514 boolean_t 11515 ip_source_routed(ipha_t *ipha, ip_stack_t *ipst) 11516 { 11517 ipoptp_t opts; 11518 uchar_t *opt; 11519 uint8_t optval; 11520 uint8_t optlen; 11521 ipaddr_t dst; 11522 11523 if (IS_SIMPLE_IPH(ipha)) { 11524 ip2dbg(("not source routed\n")); 11525 return (B_FALSE); 11526 } 11527 dst = ipha->ipha_dst; 11528 for (optval = ipoptp_first(&opts, ipha); 11529 optval != IPOPT_EOL; 11530 optval = ipoptp_next(&opts)) { 11531 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0); 11532 opt = opts.ipoptp_cur; 11533 optlen = opts.ipoptp_len; 11534 ip2dbg(("ip_source_routed: opt %d, len %d\n", 11535 optval, optlen)); 11536 switch (optval) { 11537 uint32_t off; 11538 case IPOPT_SSRR: 11539 case IPOPT_LSRR: 11540 /* 11541 * If dst is one of our addresses and there are some 11542 * entries left in the source route return (true). 11543 */ 11544 if (ip_type_v4(dst, ipst) != IRE_LOCAL) { 11545 ip2dbg(("ip_source_routed: not next" 11546 " source route 0x%x\n", 11547 ntohl(dst))); 11548 return (B_FALSE); 11549 } 11550 off = opt[IPOPT_OFFSET]; 11551 off--; 11552 if (optlen < IP_ADDR_LEN || 11553 off > optlen - IP_ADDR_LEN) { 11554 /* End of source route */ 11555 ip1dbg(("ip_source_routed: end of SR\n")); 11556 return (B_FALSE); 11557 } 11558 return (B_TRUE); 11559 } 11560 } 11561 ip2dbg(("not source routed\n")); 11562 return (B_FALSE); 11563 } 11564 11565 /* 11566 * ip_unbind is called by the transports to remove a conn from 11567 * the fanout table. 11568 */ 11569 void 11570 ip_unbind(conn_t *connp) 11571 { 11572 11573 ASSERT(!MUTEX_HELD(&connp->conn_lock)); 11574 11575 if (is_system_labeled() && connp->conn_anon_port) { 11576 (void) tsol_mlp_anon(crgetzone(connp->conn_cred), 11577 connp->conn_mlp_type, connp->conn_proto, 11578 ntohs(connp->conn_lport), B_FALSE); 11579 connp->conn_anon_port = 0; 11580 } 11581 connp->conn_mlp_type = mlptSingle; 11582 11583 ipcl_hash_remove(connp); 11584 } 11585 11586 /* 11587 * Used for deciding the MSS size for the upper layer. Thus 11588 * we need to check the outbound policy values in the conn. 11589 */ 11590 int 11591 conn_ipsec_length(conn_t *connp) 11592 { 11593 ipsec_latch_t *ipl; 11594 11595 ipl = connp->conn_latch; 11596 if (ipl == NULL) 11597 return (0); 11598 11599 if (connp->conn_ixa->ixa_ipsec_policy == NULL) 11600 return (0); 11601 11602 return (connp->conn_ixa->ixa_ipsec_policy->ipsp_act->ipa_ovhd); 11603 } 11604 11605 /* 11606 * Returns an estimate of the IPsec headers size. This is used if 11607 * we don't want to call into IPsec to get the exact size. 11608 */ 11609 int 11610 ipsec_out_extra_length(ip_xmit_attr_t *ixa) 11611 { 11612 ipsec_action_t *a; 11613 11614 if (!(ixa->ixa_flags & IXAF_IPSEC_SECURE)) 11615 return (0); 11616 11617 a = ixa->ixa_ipsec_action; 11618 if (a == NULL) { 11619 ASSERT(ixa->ixa_ipsec_policy != NULL); 11620 a = ixa->ixa_ipsec_policy->ipsp_act; 11621 } 11622 ASSERT(a != NULL); 11623 11624 return (a->ipa_ovhd); 11625 } 11626 11627 /* 11628 * If there are any source route options, return the true final 11629 * destination. Otherwise, return the destination. 11630 */ 11631 ipaddr_t 11632 ip_get_dst(ipha_t *ipha) 11633 { 11634 ipoptp_t opts; 11635 uchar_t *opt; 11636 uint8_t optval; 11637 uint8_t optlen; 11638 ipaddr_t dst; 11639 uint32_t off; 11640 11641 dst = ipha->ipha_dst; 11642 11643 if (IS_SIMPLE_IPH(ipha)) 11644 return (dst); 11645 11646 for (optval = ipoptp_first(&opts, ipha); 11647 optval != IPOPT_EOL; 11648 optval = ipoptp_next(&opts)) { 11649 opt = opts.ipoptp_cur; 11650 optlen = opts.ipoptp_len; 11651 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0); 11652 switch (optval) { 11653 case IPOPT_SSRR: 11654 case IPOPT_LSRR: 11655 off = opt[IPOPT_OFFSET]; 11656 /* 11657 * If one of the conditions is true, it means 11658 * end of options and dst already has the right 11659 * value. 11660 */ 11661 if (!(optlen < IP_ADDR_LEN || off > optlen - 3)) { 11662 off = optlen - IP_ADDR_LEN; 11663 bcopy(&opt[off], &dst, IP_ADDR_LEN); 11664 } 11665 return (dst); 11666 default: 11667 break; 11668 } 11669 } 11670 11671 return (dst); 11672 } 11673 11674 /* 11675 * Outbound IP fragmentation routine. 11676 * Assumes the caller has checked whether or not fragmentation should 11677 * be allowed. Here we copy the DF bit from the header to all the generated 11678 * fragments. 11679 */ 11680 int 11681 ip_fragment_v4(mblk_t *mp_orig, nce_t *nce, iaflags_t ixaflags, 11682 uint_t pkt_len, uint32_t max_frag, uint32_t xmit_hint, zoneid_t szone, 11683 zoneid_t nolzid, pfirepostfrag_t postfragfn, uintptr_t *ixa_cookie) 11684 { 11685 int i1; 11686 int hdr_len; 11687 mblk_t *hdr_mp; 11688 ipha_t *ipha; 11689 int ip_data_end; 11690 int len; 11691 mblk_t *mp = mp_orig; 11692 int offset; 11693 ill_t *ill = nce->nce_ill; 11694 ip_stack_t *ipst = ill->ill_ipst; 11695 mblk_t *carve_mp; 11696 uint32_t frag_flag; 11697 uint_t priority = mp->b_band; 11698 int error = 0; 11699 11700 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragReqds); 11701 11702 if (pkt_len != msgdsize(mp)) { 11703 ip0dbg(("Packet length mismatch: %d, %ld\n", 11704 pkt_len, msgdsize(mp))); 11705 freemsg(mp); 11706 return (EINVAL); 11707 } 11708 11709 if (max_frag == 0) { 11710 ip1dbg(("ip_fragment_v4: max_frag is zero. Dropping packet\n")); 11711 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails); 11712 ip_drop_output("FragFails: zero max_frag", mp, ill); 11713 freemsg(mp); 11714 return (EINVAL); 11715 } 11716 11717 ASSERT(MBLKL(mp) >= sizeof (ipha_t)); 11718 ipha = (ipha_t *)mp->b_rptr; 11719 ASSERT(ntohs(ipha->ipha_length) == pkt_len); 11720 frag_flag = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_DF; 11721 11722 /* 11723 * Establish the starting offset. May not be zero if we are fragging 11724 * a fragment that is being forwarded. 11725 */ 11726 offset = ntohs(ipha->ipha_fragment_offset_and_flags) & IPH_OFFSET; 11727 11728 /* TODO why is this test needed? */ 11729 if (((max_frag - ntohs(ipha->ipha_length)) & ~7) < 8) { 11730 /* TODO: notify ulp somehow */ 11731 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails); 11732 ip_drop_output("FragFails: bad starting offset", mp, ill); 11733 freemsg(mp); 11734 return (EINVAL); 11735 } 11736 11737 hdr_len = IPH_HDR_LENGTH(ipha); 11738 ipha->ipha_hdr_checksum = 0; 11739 11740 /* 11741 * Establish the number of bytes maximum per frag, after putting 11742 * in the header. 11743 */ 11744 len = (max_frag - hdr_len) & ~7; 11745 11746 /* Get a copy of the header for the trailing frags */ 11747 hdr_mp = ip_fragment_copyhdr((uchar_t *)ipha, hdr_len, offset, ipst, 11748 mp); 11749 if (hdr_mp == NULL) { 11750 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails); 11751 ip_drop_output("FragFails: no hdr_mp", mp, ill); 11752 freemsg(mp); 11753 return (ENOBUFS); 11754 } 11755 11756 /* Store the starting offset, with the MoreFrags flag. */ 11757 i1 = offset | IPH_MF | frag_flag; 11758 ipha->ipha_fragment_offset_and_flags = htons((uint16_t)i1); 11759 11760 /* Establish the ending byte offset, based on the starting offset. */ 11761 offset <<= 3; 11762 ip_data_end = offset + ntohs(ipha->ipha_length) - hdr_len; 11763 11764 /* Store the length of the first fragment in the IP header. */ 11765 i1 = len + hdr_len; 11766 ASSERT(i1 <= IP_MAXPACKET); 11767 ipha->ipha_length = htons((uint16_t)i1); 11768 11769 /* 11770 * Compute the IP header checksum for the first frag. We have to 11771 * watch out that we stop at the end of the header. 11772 */ 11773 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha); 11774 11775 /* 11776 * Now carve off the first frag. Note that this will include the 11777 * original IP header. 11778 */ 11779 if (!(mp = ip_carve_mp(&mp_orig, i1))) { 11780 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails); 11781 ip_drop_output("FragFails: could not carve mp", mp_orig, ill); 11782 freeb(hdr_mp); 11783 freemsg(mp_orig); 11784 return (ENOBUFS); 11785 } 11786 11787 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates); 11788 11789 error = postfragfn(mp, nce, ixaflags, i1, xmit_hint, szone, nolzid, 11790 ixa_cookie); 11791 if (error != 0 && error != EWOULDBLOCK) { 11792 /* No point in sending the other fragments */ 11793 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails); 11794 ip_drop_output("FragFails: postfragfn failed", mp_orig, ill); 11795 freeb(hdr_mp); 11796 freemsg(mp_orig); 11797 return (error); 11798 } 11799 11800 /* No need to redo state machine in loop */ 11801 ixaflags &= ~IXAF_REACH_CONF; 11802 11803 /* Advance the offset to the second frag starting point. */ 11804 offset += len; 11805 /* 11806 * Update hdr_len from the copied header - there might be less options 11807 * in the later fragments. 11808 */ 11809 hdr_len = IPH_HDR_LENGTH(hdr_mp->b_rptr); 11810 /* Loop until done. */ 11811 for (;;) { 11812 uint16_t offset_and_flags; 11813 uint16_t ip_len; 11814 11815 if (ip_data_end - offset > len) { 11816 /* 11817 * Carve off the appropriate amount from the original 11818 * datagram. 11819 */ 11820 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) { 11821 mp = NULL; 11822 break; 11823 } 11824 /* 11825 * More frags after this one. Get another copy 11826 * of the header. 11827 */ 11828 if (carve_mp->b_datap->db_ref == 1 && 11829 hdr_mp->b_wptr - hdr_mp->b_rptr < 11830 carve_mp->b_rptr - carve_mp->b_datap->db_base) { 11831 /* Inline IP header */ 11832 carve_mp->b_rptr -= hdr_mp->b_wptr - 11833 hdr_mp->b_rptr; 11834 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr, 11835 hdr_mp->b_wptr - hdr_mp->b_rptr); 11836 mp = carve_mp; 11837 } else { 11838 if (!(mp = copyb(hdr_mp))) { 11839 freemsg(carve_mp); 11840 break; 11841 } 11842 /* Get priority marking, if any. */ 11843 mp->b_band = priority; 11844 mp->b_cont = carve_mp; 11845 } 11846 ipha = (ipha_t *)mp->b_rptr; 11847 offset_and_flags = IPH_MF; 11848 } else { 11849 /* 11850 * Last frag. Consume the header. Set len to 11851 * the length of this last piece. 11852 */ 11853 len = ip_data_end - offset; 11854 11855 /* 11856 * Carve off the appropriate amount from the original 11857 * datagram. 11858 */ 11859 if (!(carve_mp = ip_carve_mp(&mp_orig, len))) { 11860 mp = NULL; 11861 break; 11862 } 11863 if (carve_mp->b_datap->db_ref == 1 && 11864 hdr_mp->b_wptr - hdr_mp->b_rptr < 11865 carve_mp->b_rptr - carve_mp->b_datap->db_base) { 11866 /* Inline IP header */ 11867 carve_mp->b_rptr -= hdr_mp->b_wptr - 11868 hdr_mp->b_rptr; 11869 bcopy(hdr_mp->b_rptr, carve_mp->b_rptr, 11870 hdr_mp->b_wptr - hdr_mp->b_rptr); 11871 mp = carve_mp; 11872 freeb(hdr_mp); 11873 hdr_mp = mp; 11874 } else { 11875 mp = hdr_mp; 11876 /* Get priority marking, if any. */ 11877 mp->b_band = priority; 11878 mp->b_cont = carve_mp; 11879 } 11880 ipha = (ipha_t *)mp->b_rptr; 11881 /* A frag of a frag might have IPH_MF non-zero */ 11882 offset_and_flags = 11883 ntohs(ipha->ipha_fragment_offset_and_flags) & 11884 IPH_MF; 11885 } 11886 offset_and_flags |= (uint16_t)(offset >> 3); 11887 offset_and_flags |= (uint16_t)frag_flag; 11888 /* Store the offset and flags in the IP header. */ 11889 ipha->ipha_fragment_offset_and_flags = htons(offset_and_flags); 11890 11891 /* Store the length in the IP header. */ 11892 ip_len = (uint16_t)(len + hdr_len); 11893 ipha->ipha_length = htons(ip_len); 11894 11895 /* 11896 * Set the IP header checksum. Note that mp is just 11897 * the header, so this is easy to pass to ip_csum. 11898 */ 11899 ipha->ipha_hdr_checksum = ip_csum_hdr(ipha); 11900 11901 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragCreates); 11902 11903 error = postfragfn(mp, nce, ixaflags, ip_len, xmit_hint, szone, 11904 nolzid, ixa_cookie); 11905 /* All done if we just consumed the hdr_mp. */ 11906 if (mp == hdr_mp) { 11907 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragOKs); 11908 return (error); 11909 } 11910 if (error != 0 && error != EWOULDBLOCK) { 11911 DTRACE_PROBE2(ip__xmit__frag__fail, ill_t *, ill, 11912 mblk_t *, hdr_mp); 11913 /* No point in sending the other fragments */ 11914 break; 11915 } 11916 11917 /* Otherwise, advance and loop. */ 11918 offset += len; 11919 } 11920 /* Clean up following allocation failure. */ 11921 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutFragFails); 11922 ip_drop_output("FragFails: loop ended", NULL, ill); 11923 if (mp != hdr_mp) 11924 freeb(hdr_mp); 11925 if (mp != mp_orig) 11926 freemsg(mp_orig); 11927 return (error); 11928 } 11929 11930 /* 11931 * Copy the header plus those options which have the copy bit set 11932 */ 11933 static mblk_t * 11934 ip_fragment_copyhdr(uchar_t *rptr, int hdr_len, int offset, ip_stack_t *ipst, 11935 mblk_t *src) 11936 { 11937 mblk_t *mp; 11938 uchar_t *up; 11939 11940 /* 11941 * Quick check if we need to look for options without the copy bit 11942 * set 11943 */ 11944 mp = allocb_tmpl(ipst->ips_ip_wroff_extra + hdr_len, src); 11945 if (!mp) 11946 return (mp); 11947 mp->b_rptr += ipst->ips_ip_wroff_extra; 11948 if (hdr_len == IP_SIMPLE_HDR_LENGTH || offset != 0) { 11949 bcopy(rptr, mp->b_rptr, hdr_len); 11950 mp->b_wptr += hdr_len + ipst->ips_ip_wroff_extra; 11951 return (mp); 11952 } 11953 up = mp->b_rptr; 11954 bcopy(rptr, up, IP_SIMPLE_HDR_LENGTH); 11955 up += IP_SIMPLE_HDR_LENGTH; 11956 rptr += IP_SIMPLE_HDR_LENGTH; 11957 hdr_len -= IP_SIMPLE_HDR_LENGTH; 11958 while (hdr_len > 0) { 11959 uint32_t optval; 11960 uint32_t optlen; 11961 11962 optval = *rptr; 11963 if (optval == IPOPT_EOL) 11964 break; 11965 if (optval == IPOPT_NOP) 11966 optlen = 1; 11967 else 11968 optlen = rptr[1]; 11969 if (optval & IPOPT_COPY) { 11970 bcopy(rptr, up, optlen); 11971 up += optlen; 11972 } 11973 rptr += optlen; 11974 hdr_len -= optlen; 11975 } 11976 /* 11977 * Make sure that we drop an even number of words by filling 11978 * with EOL to the next word boundary. 11979 */ 11980 for (hdr_len = up - (mp->b_rptr + IP_SIMPLE_HDR_LENGTH); 11981 hdr_len & 0x3; hdr_len++) 11982 *up++ = IPOPT_EOL; 11983 mp->b_wptr = up; 11984 /* Update header length */ 11985 mp->b_rptr[0] = (uint8_t)((IP_VERSION << 4) | ((up - mp->b_rptr) >> 2)); 11986 return (mp); 11987 } 11988 11989 /* 11990 * Update any source route, record route, or timestamp options when 11991 * sending a packet back to ourselves. 11992 * Check that we are at end of strict source route. 11993 * The options have been sanity checked by ip_output_options(). 11994 */ 11995 void 11996 ip_output_local_options(ipha_t *ipha, ip_stack_t *ipst) 11997 { 11998 ipoptp_t opts; 11999 uchar_t *opt; 12000 uint8_t optval; 12001 uint8_t optlen; 12002 ipaddr_t dst; 12003 uint32_t ts; 12004 timestruc_t now; 12005 12006 for (optval = ipoptp_first(&opts, ipha); 12007 optval != IPOPT_EOL; 12008 optval = ipoptp_next(&opts)) { 12009 opt = opts.ipoptp_cur; 12010 optlen = opts.ipoptp_len; 12011 ASSERT((opts.ipoptp_flags & IPOPTP_ERROR) == 0); 12012 switch (optval) { 12013 uint32_t off; 12014 case IPOPT_SSRR: 12015 case IPOPT_LSRR: 12016 off = opt[IPOPT_OFFSET]; 12017 off--; 12018 if (optlen < IP_ADDR_LEN || 12019 off > optlen - IP_ADDR_LEN) { 12020 /* End of source route */ 12021 break; 12022 } 12023 /* 12024 * This will only happen if two consecutive entries 12025 * in the source route contains our address or if 12026 * it is a packet with a loose source route which 12027 * reaches us before consuming the whole source route 12028 */ 12029 12030 if (optval == IPOPT_SSRR) { 12031 return; 12032 } 12033 /* 12034 * Hack: instead of dropping the packet truncate the 12035 * source route to what has been used by filling the 12036 * rest with IPOPT_NOP. 12037 */ 12038 opt[IPOPT_OLEN] = (uint8_t)off; 12039 while (off < optlen) { 12040 opt[off++] = IPOPT_NOP; 12041 } 12042 break; 12043 case IPOPT_RR: 12044 off = opt[IPOPT_OFFSET]; 12045 off--; 12046 if (optlen < IP_ADDR_LEN || 12047 off > optlen - IP_ADDR_LEN) { 12048 /* No more room - ignore */ 12049 ip1dbg(( 12050 "ip_output_local_options: end of RR\n")); 12051 break; 12052 } 12053 dst = htonl(INADDR_LOOPBACK); 12054 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN); 12055 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 12056 break; 12057 case IPOPT_TS: 12058 /* Insert timestamp if there is romm */ 12059 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 12060 case IPOPT_TS_TSONLY: 12061 off = IPOPT_TS_TIMELEN; 12062 break; 12063 case IPOPT_TS_PRESPEC: 12064 case IPOPT_TS_PRESPEC_RFC791: 12065 /* Verify that the address matched */ 12066 off = opt[IPOPT_OFFSET] - 1; 12067 bcopy((char *)opt + off, &dst, IP_ADDR_LEN); 12068 if (ip_type_v4(dst, ipst) != IRE_LOCAL) { 12069 /* Not for us */ 12070 break; 12071 } 12072 /* FALLTHRU */ 12073 case IPOPT_TS_TSANDADDR: 12074 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN; 12075 break; 12076 default: 12077 /* 12078 * ip_*put_options should have already 12079 * dropped this packet. 12080 */ 12081 cmn_err(CE_PANIC, "ip_output_local_options: " 12082 "unknown IT - bug in ip_output_options?\n"); 12083 return; /* Keep "lint" happy */ 12084 } 12085 if (opt[IPOPT_OFFSET] - 1 + off > optlen) { 12086 /* Increase overflow counter */ 12087 off = (opt[IPOPT_POS_OV_FLG] >> 4) + 1; 12088 opt[IPOPT_POS_OV_FLG] = (uint8_t) 12089 (opt[IPOPT_POS_OV_FLG] & 0x0F) | 12090 (off << 4); 12091 break; 12092 } 12093 off = opt[IPOPT_OFFSET] - 1; 12094 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 12095 case IPOPT_TS_PRESPEC: 12096 case IPOPT_TS_PRESPEC_RFC791: 12097 case IPOPT_TS_TSANDADDR: 12098 dst = htonl(INADDR_LOOPBACK); 12099 bcopy(&dst, (char *)opt + off, IP_ADDR_LEN); 12100 opt[IPOPT_OFFSET] += IP_ADDR_LEN; 12101 /* FALLTHRU */ 12102 case IPOPT_TS_TSONLY: 12103 off = opt[IPOPT_OFFSET] - 1; 12104 /* Compute # of milliseconds since midnight */ 12105 gethrestime(&now); 12106 ts = (now.tv_sec % (24 * 60 * 60)) * 1000 + 12107 now.tv_nsec / (NANOSEC / MILLISEC); 12108 bcopy(&ts, (char *)opt + off, IPOPT_TS_TIMELEN); 12109 opt[IPOPT_OFFSET] += IPOPT_TS_TIMELEN; 12110 break; 12111 } 12112 break; 12113 } 12114 } 12115 } 12116 12117 /* 12118 * Prepend an M_DATA fastpath header, and if none present prepend a 12119 * DL_UNITDATA_REQ. Frees the mblk on failure. 12120 * 12121 * nce_dlur_mp and nce_fp_mp can not disappear once they have been set. 12122 * If there is a change to them, the nce will be deleted (condemned) and 12123 * a new nce_t will be created when packets are sent. Thus we need no locks 12124 * to access those fields. 12125 * 12126 * We preserve b_band to support IPQoS. If a DL_UNITDATA_REQ is prepended 12127 * we place b_band in dl_priority.dl_max. 12128 */ 12129 static mblk_t * 12130 ip_xmit_attach_llhdr(mblk_t *mp, nce_t *nce) 12131 { 12132 uint_t hlen; 12133 mblk_t *mp1; 12134 uint_t priority; 12135 uchar_t *rptr; 12136 12137 rptr = mp->b_rptr; 12138 12139 ASSERT(DB_TYPE(mp) == M_DATA); 12140 priority = mp->b_band; 12141 12142 ASSERT(nce != NULL); 12143 if ((mp1 = nce->nce_fp_mp) != NULL) { 12144 hlen = MBLKL(mp1); 12145 /* 12146 * Check if we have enough room to prepend fastpath 12147 * header 12148 */ 12149 if (hlen != 0 && (rptr - mp->b_datap->db_base) >= hlen) { 12150 rptr -= hlen; 12151 bcopy(mp1->b_rptr, rptr, hlen); 12152 /* 12153 * Set the b_rptr to the start of the link layer 12154 * header 12155 */ 12156 mp->b_rptr = rptr; 12157 return (mp); 12158 } 12159 mp1 = copyb(mp1); 12160 if (mp1 == NULL) { 12161 ill_t *ill = nce->nce_ill; 12162 12163 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 12164 ip_drop_output("ipIfStatsOutDiscards", mp, ill); 12165 freemsg(mp); 12166 return (NULL); 12167 } 12168 mp1->b_band = priority; 12169 mp1->b_cont = mp; 12170 DB_CKSUMSTART(mp1) = DB_CKSUMSTART(mp); 12171 DB_CKSUMSTUFF(mp1) = DB_CKSUMSTUFF(mp); 12172 DB_CKSUMEND(mp1) = DB_CKSUMEND(mp); 12173 DB_CKSUMFLAGS(mp1) = DB_CKSUMFLAGS(mp); 12174 DB_LSOMSS(mp1) = DB_LSOMSS(mp); 12175 DTRACE_PROBE1(ip__xmit__copyb, (mblk_t *), mp1); 12176 /* 12177 * XXX disable ICK_VALID and compute checksum 12178 * here; can happen if nce_fp_mp changes and 12179 * it can't be copied now due to insufficient 12180 * space. (unlikely, fp mp can change, but it 12181 * does not increase in length) 12182 */ 12183 return (mp1); 12184 } 12185 mp1 = copyb(nce->nce_dlur_mp); 12186 12187 if (mp1 == NULL) { 12188 ill_t *ill = nce->nce_ill; 12189 12190 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 12191 ip_drop_output("ipIfStatsOutDiscards", mp, ill); 12192 freemsg(mp); 12193 return (NULL); 12194 } 12195 mp1->b_cont = mp; 12196 if (priority != 0) { 12197 mp1->b_band = priority; 12198 ((dl_unitdata_req_t *)(mp1->b_rptr))->dl_priority.dl_max = 12199 priority; 12200 } 12201 return (mp1); 12202 #undef rptr 12203 } 12204 12205 /* 12206 * Finish the outbound IPsec processing. This function is called from 12207 * ipsec_out_process() if the IPsec packet was processed 12208 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed 12209 * asynchronously. 12210 * 12211 * This is common to IPv4 and IPv6. 12212 */ 12213 int 12214 ip_output_post_ipsec(mblk_t *mp, ip_xmit_attr_t *ixa) 12215 { 12216 iaflags_t ixaflags = ixa->ixa_flags; 12217 uint_t pktlen; 12218 12219 12220 /* AH/ESP don't update ixa_pktlen when they modify the packet */ 12221 if (ixaflags & IXAF_IS_IPV4) { 12222 ipha_t *ipha = (ipha_t *)mp->b_rptr; 12223 12224 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 12225 pktlen = ntohs(ipha->ipha_length); 12226 } else { 12227 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 12228 12229 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 12230 pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN; 12231 } 12232 12233 /* 12234 * We release any hard reference on the SAs here to make 12235 * sure the SAs can be garbage collected. ipsr_sa has a soft reference 12236 * on the SAs. 12237 * If in the future we want the hard latching of the SAs in the 12238 * ip_xmit_attr_t then we should remove this. 12239 */ 12240 if (ixa->ixa_ipsec_esp_sa != NULL) { 12241 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa); 12242 ixa->ixa_ipsec_esp_sa = NULL; 12243 } 12244 if (ixa->ixa_ipsec_ah_sa != NULL) { 12245 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa); 12246 ixa->ixa_ipsec_ah_sa = NULL; 12247 } 12248 12249 /* Do we need to fragment? */ 12250 if ((ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR) || 12251 pktlen > ixa->ixa_fragsize) { 12252 if (ixaflags & IXAF_IS_IPV4) { 12253 ASSERT(!(ixa->ixa_flags & IXAF_IPV6_ADD_FRAGHDR)); 12254 /* 12255 * We check for the DF case in ipsec_out_process 12256 * hence this only handles the non-DF case. 12257 */ 12258 return (ip_fragment_v4(mp, ixa->ixa_nce, ixa->ixa_flags, 12259 pktlen, ixa->ixa_fragsize, 12260 ixa->ixa_xmit_hint, ixa->ixa_zoneid, 12261 ixa->ixa_no_loop_zoneid, ixa->ixa_postfragfn, 12262 &ixa->ixa_cookie)); 12263 } else { 12264 mp = ip_fraghdr_add_v6(mp, ixa->ixa_ident, ixa); 12265 if (mp == NULL) { 12266 /* MIB and ip_drop_output already done */ 12267 return (ENOMEM); 12268 } 12269 pktlen += sizeof (ip6_frag_t); 12270 if (pktlen > ixa->ixa_fragsize) { 12271 return (ip_fragment_v6(mp, ixa->ixa_nce, 12272 ixa->ixa_flags, pktlen, 12273 ixa->ixa_fragsize, ixa->ixa_xmit_hint, 12274 ixa->ixa_zoneid, ixa->ixa_no_loop_zoneid, 12275 ixa->ixa_postfragfn, &ixa->ixa_cookie)); 12276 } 12277 } 12278 } 12279 return ((ixa->ixa_postfragfn)(mp, ixa->ixa_nce, ixa->ixa_flags, 12280 pktlen, ixa->ixa_xmit_hint, ixa->ixa_zoneid, 12281 ixa->ixa_no_loop_zoneid, NULL)); 12282 } 12283 12284 /* 12285 * Finish the inbound IPsec processing. This function is called from 12286 * ipsec_out_process() if the IPsec packet was processed 12287 * synchronously, or from {ah,esp}_kcf_callback_outbound() if it was processed 12288 * asynchronously. 12289 * 12290 * This is common to IPv4 and IPv6. 12291 */ 12292 void 12293 ip_input_post_ipsec(mblk_t *mp, ip_recv_attr_t *ira) 12294 { 12295 iaflags_t iraflags = ira->ira_flags; 12296 12297 /* Length might have changed */ 12298 if (iraflags & IRAF_IS_IPV4) { 12299 ipha_t *ipha = (ipha_t *)mp->b_rptr; 12300 12301 ASSERT(IPH_HDR_VERSION(ipha) == IPV4_VERSION); 12302 ira->ira_pktlen = ntohs(ipha->ipha_length); 12303 ira->ira_ip_hdr_length = IPH_HDR_LENGTH(ipha); 12304 ira->ira_protocol = ipha->ipha_protocol; 12305 12306 ip_fanout_v4(mp, ipha, ira); 12307 } else { 12308 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 12309 uint8_t *nexthdrp; 12310 12311 ASSERT(IPH_HDR_VERSION(mp->b_rptr) == IPV6_VERSION); 12312 ira->ira_pktlen = ntohs(ip6h->ip6_plen) + IPV6_HDR_LEN; 12313 if (!ip_hdr_length_nexthdr_v6(mp, ip6h, &ira->ira_ip_hdr_length, 12314 &nexthdrp)) { 12315 /* Malformed packet */ 12316 BUMP_MIB(ira->ira_ill->ill_ip_mib, ipIfStatsInDiscards); 12317 ip_drop_input("ipIfStatsInDiscards", mp, ira->ira_ill); 12318 freemsg(mp); 12319 return; 12320 } 12321 ira->ira_protocol = *nexthdrp; 12322 ip_fanout_v6(mp, ip6h, ira); 12323 } 12324 } 12325 12326 /* 12327 * Select which AH & ESP SA's to use (if any) for the outbound packet. 12328 * 12329 * If this function returns B_TRUE, the requested SA's have been filled 12330 * into the ixa_ipsec_*_sa pointers. 12331 * 12332 * If the function returns B_FALSE, the packet has been "consumed", most 12333 * likely by an ACQUIRE sent up via PF_KEY to a key management daemon. 12334 * 12335 * The SA references created by the protocol-specific "select" 12336 * function will be released in ip_output_post_ipsec. 12337 */ 12338 static boolean_t 12339 ipsec_out_select_sa(mblk_t *mp, ip_xmit_attr_t *ixa) 12340 { 12341 boolean_t need_ah_acquire = B_FALSE, need_esp_acquire = B_FALSE; 12342 ipsec_policy_t *pp; 12343 ipsec_action_t *ap; 12344 12345 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE); 12346 ASSERT((ixa->ixa_ipsec_policy != NULL) || 12347 (ixa->ixa_ipsec_action != NULL)); 12348 12349 ap = ixa->ixa_ipsec_action; 12350 if (ap == NULL) { 12351 pp = ixa->ixa_ipsec_policy; 12352 ASSERT(pp != NULL); 12353 ap = pp->ipsp_act; 12354 ASSERT(ap != NULL); 12355 } 12356 12357 /* 12358 * We have an action. now, let's select SA's. 12359 * A side effect of setting ixa_ipsec_*_sa is that it will 12360 * be cached in the conn_t. 12361 */ 12362 if (ap->ipa_want_esp) { 12363 if (ixa->ixa_ipsec_esp_sa == NULL) { 12364 need_esp_acquire = !ipsec_outbound_sa(mp, ixa, 12365 IPPROTO_ESP); 12366 } 12367 ASSERT(need_esp_acquire || ixa->ixa_ipsec_esp_sa != NULL); 12368 } 12369 12370 if (ap->ipa_want_ah) { 12371 if (ixa->ixa_ipsec_ah_sa == NULL) { 12372 need_ah_acquire = !ipsec_outbound_sa(mp, ixa, 12373 IPPROTO_AH); 12374 } 12375 ASSERT(need_ah_acquire || ixa->ixa_ipsec_ah_sa != NULL); 12376 /* 12377 * The ESP and AH processing order needs to be preserved 12378 * when both protocols are required (ESP should be applied 12379 * before AH for an outbound packet). Force an ESP ACQUIRE 12380 * when both ESP and AH are required, and an AH ACQUIRE 12381 * is needed. 12382 */ 12383 if (ap->ipa_want_esp && need_ah_acquire) 12384 need_esp_acquire = B_TRUE; 12385 } 12386 12387 /* 12388 * Send an ACQUIRE (extended, regular, or both) if we need one. 12389 * Release SAs that got referenced, but will not be used until we 12390 * acquire _all_ of the SAs we need. 12391 */ 12392 if (need_ah_acquire || need_esp_acquire) { 12393 if (ixa->ixa_ipsec_ah_sa != NULL) { 12394 IPSA_REFRELE(ixa->ixa_ipsec_ah_sa); 12395 ixa->ixa_ipsec_ah_sa = NULL; 12396 } 12397 if (ixa->ixa_ipsec_esp_sa != NULL) { 12398 IPSA_REFRELE(ixa->ixa_ipsec_esp_sa); 12399 ixa->ixa_ipsec_esp_sa = NULL; 12400 } 12401 12402 sadb_acquire(mp, ixa, need_ah_acquire, need_esp_acquire); 12403 return (B_FALSE); 12404 } 12405 12406 return (B_TRUE); 12407 } 12408 12409 /* 12410 * Handle IPsec output processing. 12411 * This function is only entered once for a given packet. 12412 * We try to do things synchronously, but if we need to have user-level 12413 * set up SAs, or ESP or AH uses asynchronous kEF, then the operation 12414 * will be completed 12415 * - when the SAs are added in esp_add_sa_finish/ah_add_sa_finish 12416 * - when asynchronous ESP is done it will do AH 12417 * 12418 * In all cases we come back in ip_output_post_ipsec() to fragment and 12419 * send out the packet. 12420 */ 12421 int 12422 ipsec_out_process(mblk_t *mp, ip_xmit_attr_t *ixa) 12423 { 12424 ill_t *ill = ixa->ixa_nce->nce_ill; 12425 ip_stack_t *ipst = ixa->ixa_ipst; 12426 ipsec_stack_t *ipss; 12427 ipsec_policy_t *pp; 12428 ipsec_action_t *ap; 12429 12430 ASSERT(ixa->ixa_flags & IXAF_IPSEC_SECURE); 12431 12432 ASSERT((ixa->ixa_ipsec_policy != NULL) || 12433 (ixa->ixa_ipsec_action != NULL)); 12434 12435 ipss = ipst->ips_netstack->netstack_ipsec; 12436 if (!ipsec_loaded(ipss)) { 12437 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 12438 ip_drop_packet(mp, B_TRUE, ill, 12439 DROPPER(ipss, ipds_ip_ipsec_not_loaded), 12440 &ipss->ipsec_dropper); 12441 return (ENOTSUP); 12442 } 12443 12444 ap = ixa->ixa_ipsec_action; 12445 if (ap == NULL) { 12446 pp = ixa->ixa_ipsec_policy; 12447 ASSERT(pp != NULL); 12448 ap = pp->ipsp_act; 12449 ASSERT(ap != NULL); 12450 } 12451 12452 /* Handle explicit drop action and bypass. */ 12453 switch (ap->ipa_act.ipa_type) { 12454 case IPSEC_ACT_DISCARD: 12455 case IPSEC_ACT_REJECT: 12456 ip_drop_packet(mp, B_FALSE, ill, 12457 DROPPER(ipss, ipds_spd_explicit), &ipss->ipsec_spd_dropper); 12458 return (EHOSTUNREACH); /* IPsec policy failure */ 12459 case IPSEC_ACT_BYPASS: 12460 return (ip_output_post_ipsec(mp, ixa)); 12461 } 12462 12463 /* 12464 * The order of processing is first insert a IP header if needed. 12465 * Then insert the ESP header and then the AH header. 12466 */ 12467 if ((ixa->ixa_flags & IXAF_IS_IPV4) && ap->ipa_want_se) { 12468 /* 12469 * First get the outer IP header before sending 12470 * it to ESP. 12471 */ 12472 ipha_t *oipha, *iipha; 12473 mblk_t *outer_mp, *inner_mp; 12474 12475 if ((outer_mp = allocb(sizeof (ipha_t), BPRI_HI)) == NULL) { 12476 (void) mi_strlog(ill->ill_rq, 0, 12477 SL_ERROR|SL_TRACE|SL_CONSOLE, 12478 "ipsec_out_process: " 12479 "Self-Encapsulation failed: Out of memory\n"); 12480 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 12481 ip_drop_output("ipIfStatsOutDiscards", mp, ill); 12482 freemsg(mp); 12483 return (ENOBUFS); 12484 } 12485 inner_mp = mp; 12486 ASSERT(inner_mp->b_datap->db_type == M_DATA); 12487 oipha = (ipha_t *)outer_mp->b_rptr; 12488 iipha = (ipha_t *)inner_mp->b_rptr; 12489 *oipha = *iipha; 12490 outer_mp->b_wptr += sizeof (ipha_t); 12491 oipha->ipha_length = htons(ntohs(iipha->ipha_length) + 12492 sizeof (ipha_t)); 12493 oipha->ipha_protocol = IPPROTO_ENCAP; 12494 oipha->ipha_version_and_hdr_length = 12495 IP_SIMPLE_HDR_VERSION; 12496 oipha->ipha_hdr_checksum = 0; 12497 oipha->ipha_hdr_checksum = ip_csum_hdr(oipha); 12498 outer_mp->b_cont = inner_mp; 12499 mp = outer_mp; 12500 12501 ixa->ixa_flags |= IXAF_IPSEC_TUNNEL; 12502 } 12503 12504 /* If we need to wait for a SA then we can't return any errno */ 12505 if (((ap->ipa_want_ah && (ixa->ixa_ipsec_ah_sa == NULL)) || 12506 (ap->ipa_want_esp && (ixa->ixa_ipsec_esp_sa == NULL))) && 12507 !ipsec_out_select_sa(mp, ixa)) 12508 return (0); 12509 12510 /* 12511 * By now, we know what SA's to use. Toss over to ESP & AH 12512 * to do the heavy lifting. 12513 */ 12514 if (ap->ipa_want_esp) { 12515 ASSERT(ixa->ixa_ipsec_esp_sa != NULL); 12516 12517 mp = ixa->ixa_ipsec_esp_sa->ipsa_output_func(mp, ixa); 12518 if (mp == NULL) { 12519 /* 12520 * Either it failed or is pending. In the former case 12521 * ipIfStatsInDiscards was increased. 12522 */ 12523 return (0); 12524 } 12525 } 12526 12527 if (ap->ipa_want_ah) { 12528 ASSERT(ixa->ixa_ipsec_ah_sa != NULL); 12529 12530 mp = ixa->ixa_ipsec_ah_sa->ipsa_output_func(mp, ixa); 12531 if (mp == NULL) { 12532 /* 12533 * Either it failed or is pending. In the former case 12534 * ipIfStatsInDiscards was increased. 12535 */ 12536 return (0); 12537 } 12538 } 12539 /* 12540 * We are done with IPsec processing. Send it over 12541 * the wire. 12542 */ 12543 return (ip_output_post_ipsec(mp, ixa)); 12544 } 12545 12546 /* 12547 * ioctls that go through a down/up sequence may need to wait for the down 12548 * to complete. This involves waiting for the ire and ipif refcnts to go down 12549 * to zero. Subsequently the ioctl is restarted from ipif_ill_refrele_tail. 12550 */ 12551 /* ARGSUSED */ 12552 void 12553 ip_reprocess_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *dummy_arg) 12554 { 12555 struct iocblk *iocp; 12556 mblk_t *mp1; 12557 ip_ioctl_cmd_t *ipip; 12558 int err; 12559 sin_t *sin; 12560 struct lifreq *lifr; 12561 struct ifreq *ifr; 12562 12563 iocp = (struct iocblk *)mp->b_rptr; 12564 ASSERT(ipsq != NULL); 12565 /* Existence of mp1 verified in ip_wput_nondata */ 12566 mp1 = mp->b_cont->b_cont; 12567 ipip = ip_sioctl_lookup(iocp->ioc_cmd); 12568 if (ipip->ipi_cmd == SIOCSLIFNAME || ipip->ipi_cmd == IF_UNITSEL) { 12569 /* 12570 * Special case where ipx_current_ipif is not set: 12571 * ill_phyint_reinit merged the v4 and v6 into a single ipsq. 12572 * We are here as were not able to complete the operation in 12573 * ipif_set_values because we could not become exclusive on 12574 * the new ipsq. 12575 */ 12576 ill_t *ill = q->q_ptr; 12577 ipsq_current_start(ipsq, ill->ill_ipif, ipip->ipi_cmd); 12578 } 12579 ASSERT(ipsq->ipsq_xop->ipx_current_ipif != NULL); 12580 12581 if (ipip->ipi_cmd_type == IF_CMD) { 12582 /* This a old style SIOC[GS]IF* command */ 12583 ifr = (struct ifreq *)mp1->b_rptr; 12584 sin = (sin_t *)&ifr->ifr_addr; 12585 } else if (ipip->ipi_cmd_type == LIF_CMD) { 12586 /* This a new style SIOC[GS]LIF* command */ 12587 lifr = (struct lifreq *)mp1->b_rptr; 12588 sin = (sin_t *)&lifr->lifr_addr; 12589 } else { 12590 sin = NULL; 12591 } 12592 12593 err = (*ipip->ipi_func_restart)(ipsq->ipsq_xop->ipx_current_ipif, sin, 12594 q, mp, ipip, mp1->b_rptr); 12595 12596 DTRACE_PROBE4(ipif__ioctl, char *, "ip_reprocess_ioctl finish", 12597 int, ipip->ipi_cmd, 12598 ill_t *, ipsq->ipsq_xop->ipx_current_ipif->ipif_ill, 12599 ipif_t *, ipsq->ipsq_xop->ipx_current_ipif); 12600 12601 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq); 12602 } 12603 12604 /* 12605 * ioctl processing 12606 * 12607 * ioctl processing starts with ip_sioctl_copyin_setup(), which looks up 12608 * the ioctl command in the ioctl tables, determines the copyin data size 12609 * from the ipi_copyin_size field, and does an mi_copyin() of that size. 12610 * 12611 * ioctl processing then continues when the M_IOCDATA makes its way down to 12612 * ip_wput_nondata(). The ioctl is looked up again in the ioctl table, its 12613 * associated 'conn' is refheld till the end of the ioctl and the general 12614 * ioctl processing function ip_process_ioctl() is called to extract the 12615 * arguments and process the ioctl. To simplify extraction, ioctl commands 12616 * are "typed" based on the arguments they take (e.g., LIF_CMD which takes a 12617 * `struct lifreq'), and a common extract function (e.g., ip_extract_lifreq()) 12618 * is used to extract the ioctl's arguments. 12619 * 12620 * ip_process_ioctl determines if the ioctl needs to be serialized, and if 12621 * so goes thru the serialization primitive ipsq_try_enter. Then the 12622 * appropriate function to handle the ioctl is called based on the entry in 12623 * the ioctl table. ioctl completion is encapsulated in ip_ioctl_finish 12624 * which also refreleases the 'conn' that was refheld at the start of the 12625 * ioctl. Finally ipsq_exit is called if needed to exit the ipsq. 12626 * 12627 * Many exclusive ioctls go thru an internal down up sequence as part of 12628 * the operation. For example an attempt to change the IP address of an 12629 * ipif entails ipif_down, set address, ipif_up. Bringing down the interface 12630 * does all the cleanup such as deleting all ires that use this address. 12631 * Then we need to wait till all references to the interface go away. 12632 */ 12633 void 12634 ip_process_ioctl(ipsq_t *ipsq, queue_t *q, mblk_t *mp, void *arg) 12635 { 12636 struct iocblk *iocp = (struct iocblk *)mp->b_rptr; 12637 ip_ioctl_cmd_t *ipip = arg; 12638 ip_extract_func_t *extract_funcp; 12639 cmd_info_t ci; 12640 int err; 12641 boolean_t entered_ipsq = B_FALSE; 12642 12643 ip3dbg(("ip_process_ioctl: ioctl %X\n", iocp->ioc_cmd)); 12644 12645 if (ipip == NULL) 12646 ipip = ip_sioctl_lookup(iocp->ioc_cmd); 12647 12648 /* 12649 * SIOCLIFADDIF needs to go thru a special path since the 12650 * ill may not exist yet. This happens in the case of lo0 12651 * which is created using this ioctl. 12652 */ 12653 if (ipip->ipi_cmd == SIOCLIFADDIF) { 12654 err = ip_sioctl_addif(NULL, NULL, q, mp, NULL, NULL); 12655 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish", 12656 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL); 12657 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL); 12658 return; 12659 } 12660 12661 ci.ci_ipif = NULL; 12662 switch (ipip->ipi_cmd_type) { 12663 case MISC_CMD: 12664 case MSFILT_CMD: 12665 /* 12666 * All MISC_CMD ioctls come in here -- e.g. SIOCGLIFCONF. 12667 */ 12668 if (ipip->ipi_cmd == IF_UNITSEL) { 12669 /* ioctl comes down the ill */ 12670 ci.ci_ipif = ((ill_t *)q->q_ptr)->ill_ipif; 12671 ipif_refhold(ci.ci_ipif); 12672 } 12673 err = 0; 12674 ci.ci_sin = NULL; 12675 ci.ci_sin6 = NULL; 12676 ci.ci_lifr = NULL; 12677 extract_funcp = NULL; 12678 break; 12679 12680 case IF_CMD: 12681 case LIF_CMD: 12682 extract_funcp = ip_extract_lifreq; 12683 break; 12684 12685 case ARP_CMD: 12686 case XARP_CMD: 12687 extract_funcp = ip_extract_arpreq; 12688 break; 12689 12690 default: 12691 ASSERT(0); 12692 } 12693 12694 if (extract_funcp != NULL) { 12695 err = (*extract_funcp)(q, mp, ipip, &ci); 12696 if (err != 0) { 12697 DTRACE_PROBE4(ipif__ioctl, 12698 char *, "ip_process_ioctl finish err", 12699 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL); 12700 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL); 12701 return; 12702 } 12703 12704 /* 12705 * All of the extraction functions return a refheld ipif. 12706 */ 12707 ASSERT(ci.ci_ipif != NULL); 12708 } 12709 12710 if (!(ipip->ipi_flags & IPI_WR)) { 12711 /* 12712 * A return value of EINPROGRESS means the ioctl is 12713 * either queued and waiting for some reason or has 12714 * already completed. 12715 */ 12716 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip, 12717 ci.ci_lifr); 12718 if (ci.ci_ipif != NULL) { 12719 DTRACE_PROBE4(ipif__ioctl, 12720 char *, "ip_process_ioctl finish RD", 12721 int, ipip->ipi_cmd, ill_t *, ci.ci_ipif->ipif_ill, 12722 ipif_t *, ci.ci_ipif); 12723 ipif_refrele(ci.ci_ipif); 12724 } else { 12725 DTRACE_PROBE4(ipif__ioctl, 12726 char *, "ip_process_ioctl finish RD", 12727 int, ipip->ipi_cmd, ill_t *, NULL, ipif_t *, NULL); 12728 } 12729 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), NULL); 12730 return; 12731 } 12732 12733 ASSERT(ci.ci_ipif != NULL); 12734 12735 /* 12736 * If ipsq is non-NULL, we are already being called exclusively 12737 */ 12738 ASSERT(ipsq == NULL || IAM_WRITER_IPSQ(ipsq)); 12739 if (ipsq == NULL) { 12740 ipsq = ipsq_try_enter(ci.ci_ipif, NULL, q, mp, ip_process_ioctl, 12741 NEW_OP, B_TRUE); 12742 if (ipsq == NULL) { 12743 ipif_refrele(ci.ci_ipif); 12744 return; 12745 } 12746 entered_ipsq = B_TRUE; 12747 } 12748 /* 12749 * Release the ipif so that ipif_down and friends that wait for 12750 * references to go away are not misled about the current ipif_refcnt 12751 * values. We are writer so we can access the ipif even after releasing 12752 * the ipif. 12753 */ 12754 ipif_refrele(ci.ci_ipif); 12755 12756 ipsq_current_start(ipsq, ci.ci_ipif, ipip->ipi_cmd); 12757 12758 /* 12759 * A return value of EINPROGRESS means the ioctl is 12760 * either queued and waiting for some reason or has 12761 * already completed. 12762 */ 12763 err = (*ipip->ipi_func)(ci.ci_ipif, ci.ci_sin, q, mp, ipip, ci.ci_lifr); 12764 12765 DTRACE_PROBE4(ipif__ioctl, char *, "ip_process_ioctl finish WR", 12766 int, ipip->ipi_cmd, 12767 ill_t *, ci.ci_ipif == NULL ? NULL : ci.ci_ipif->ipif_ill, 12768 ipif_t *, ci.ci_ipif); 12769 ip_ioctl_finish(q, mp, err, IPI2MODE(ipip), ipsq); 12770 12771 if (entered_ipsq) 12772 ipsq_exit(ipsq); 12773 } 12774 12775 /* 12776 * Complete the ioctl. Typically ioctls use the mi package and need to 12777 * do mi_copyout/mi_copy_done. 12778 */ 12779 void 12780 ip_ioctl_finish(queue_t *q, mblk_t *mp, int err, int mode, ipsq_t *ipsq) 12781 { 12782 conn_t *connp = NULL; 12783 12784 if (err == EINPROGRESS) 12785 return; 12786 12787 if (CONN_Q(q)) { 12788 connp = Q_TO_CONN(q); 12789 ASSERT(connp->conn_ref >= 2); 12790 } 12791 12792 switch (mode) { 12793 case COPYOUT: 12794 if (err == 0) 12795 mi_copyout(q, mp); 12796 else 12797 mi_copy_done(q, mp, err); 12798 break; 12799 12800 case NO_COPYOUT: 12801 mi_copy_done(q, mp, err); 12802 break; 12803 12804 default: 12805 ASSERT(mode == CONN_CLOSE); /* aborted through CONN_CLOSE */ 12806 break; 12807 } 12808 12809 /* 12810 * The refhold placed at the start of the ioctl is released here. 12811 */ 12812 if (connp != NULL) 12813 CONN_OPER_PENDING_DONE(connp); 12814 12815 if (ipsq != NULL) 12816 ipsq_current_finish(ipsq); 12817 } 12818 12819 /* Handles all non data messages */ 12820 void 12821 ip_wput_nondata(queue_t *q, mblk_t *mp) 12822 { 12823 mblk_t *mp1; 12824 struct iocblk *iocp; 12825 ip_ioctl_cmd_t *ipip; 12826 conn_t *connp; 12827 cred_t *cr; 12828 char *proto_str; 12829 12830 if (CONN_Q(q)) 12831 connp = Q_TO_CONN(q); 12832 else 12833 connp = NULL; 12834 12835 switch (DB_TYPE(mp)) { 12836 case M_IOCTL: 12837 /* 12838 * IOCTL processing begins in ip_sioctl_copyin_setup which 12839 * will arrange to copy in associated control structures. 12840 */ 12841 ip_sioctl_copyin_setup(q, mp); 12842 return; 12843 case M_IOCDATA: 12844 /* 12845 * Ensure that this is associated with one of our trans- 12846 * parent ioctls. If it's not ours, discard it if we're 12847 * running as a driver, or pass it on if we're a module. 12848 */ 12849 iocp = (struct iocblk *)mp->b_rptr; 12850 ipip = ip_sioctl_lookup(iocp->ioc_cmd); 12851 if (ipip == NULL) { 12852 if (q->q_next == NULL) { 12853 goto nak; 12854 } else { 12855 putnext(q, mp); 12856 } 12857 return; 12858 } 12859 if ((q->q_next != NULL) && !(ipip->ipi_flags & IPI_MODOK)) { 12860 /* 12861 * The ioctl is one we recognise, but is not consumed 12862 * by IP as a module and we are a module, so we drop 12863 */ 12864 goto nak; 12865 } 12866 12867 /* IOCTL continuation following copyin or copyout. */ 12868 if (mi_copy_state(q, mp, NULL) == -1) { 12869 /* 12870 * The copy operation failed. mi_copy_state already 12871 * cleaned up, so we're out of here. 12872 */ 12873 return; 12874 } 12875 /* 12876 * If we just completed a copy in, we become writer and 12877 * continue processing in ip_sioctl_copyin_done. If it 12878 * was a copy out, we call mi_copyout again. If there is 12879 * nothing more to copy out, it will complete the IOCTL. 12880 */ 12881 if (MI_COPY_DIRECTION(mp) == MI_COPY_IN) { 12882 if (!(mp1 = mp->b_cont) || !(mp1 = mp1->b_cont)) { 12883 mi_copy_done(q, mp, EPROTO); 12884 return; 12885 } 12886 /* 12887 * Check for cases that need more copying. A return 12888 * value of 0 means a second copyin has been started, 12889 * so we return; a return value of 1 means no more 12890 * copying is needed, so we continue. 12891 */ 12892 if (ipip->ipi_cmd_type == MSFILT_CMD && 12893 MI_COPY_COUNT(mp) == 1) { 12894 if (ip_copyin_msfilter(q, mp) == 0) 12895 return; 12896 } 12897 /* 12898 * Refhold the conn, till the ioctl completes. This is 12899 * needed in case the ioctl ends up in the pending mp 12900 * list. Every mp in the ipx_pending_mp list 12901 * must have a refhold on the conn 12902 * to resume processing. The refhold is released when 12903 * the ioctl completes. (normally or abnormally) 12904 * In all cases ip_ioctl_finish is called to finish 12905 * the ioctl. 12906 */ 12907 if (connp != NULL) { 12908 /* This is not a reentry */ 12909 CONN_INC_REF(connp); 12910 } else { 12911 if (!(ipip->ipi_flags & IPI_MODOK)) { 12912 mi_copy_done(q, mp, EINVAL); 12913 return; 12914 } 12915 } 12916 12917 ip_process_ioctl(NULL, q, mp, ipip); 12918 12919 } else { 12920 mi_copyout(q, mp); 12921 } 12922 return; 12923 12924 case M_IOCNAK: 12925 /* 12926 * The only way we could get here is if a resolver didn't like 12927 * an IOCTL we sent it. This shouldn't happen. 12928 */ 12929 (void) mi_strlog(q, 1, SL_ERROR|SL_TRACE, 12930 "ip_wput_nondata: unexpected M_IOCNAK, ioc_cmd 0x%x", 12931 ((struct iocblk *)mp->b_rptr)->ioc_cmd); 12932 freemsg(mp); 12933 return; 12934 case M_IOCACK: 12935 /* /dev/ip shouldn't see this */ 12936 goto nak; 12937 case M_FLUSH: 12938 if (*mp->b_rptr & FLUSHW) 12939 flushq(q, FLUSHALL); 12940 if (q->q_next) { 12941 putnext(q, mp); 12942 return; 12943 } 12944 if (*mp->b_rptr & FLUSHR) { 12945 *mp->b_rptr &= ~FLUSHW; 12946 qreply(q, mp); 12947 return; 12948 } 12949 freemsg(mp); 12950 return; 12951 case M_CTL: 12952 break; 12953 case M_PROTO: 12954 case M_PCPROTO: 12955 /* 12956 * The only PROTO messages we expect are SNMP-related. 12957 */ 12958 switch (((union T_primitives *)mp->b_rptr)->type) { 12959 case T_SVR4_OPTMGMT_REQ: 12960 ip2dbg(("ip_wput_nondata: T_SVR4_OPTMGMT_REQ " 12961 "flags %x\n", 12962 ((struct T_optmgmt_req *)mp->b_rptr)->MGMT_flags)); 12963 12964 if (connp == NULL) { 12965 proto_str = "T_SVR4_OPTMGMT_REQ"; 12966 goto protonak; 12967 } 12968 12969 /* 12970 * All Solaris components should pass a db_credp 12971 * for this TPI message, hence we ASSERT. 12972 * But in case there is some other M_PROTO that looks 12973 * like a TPI message sent by some other kernel 12974 * component, we check and return an error. 12975 */ 12976 cr = msg_getcred(mp, NULL); 12977 ASSERT(cr != NULL); 12978 if (cr == NULL) { 12979 mp = mi_tpi_err_ack_alloc(mp, TSYSERR, EINVAL); 12980 if (mp != NULL) 12981 qreply(q, mp); 12982 return; 12983 } 12984 12985 if (!snmpcom_req(q, mp, ip_snmp_set, ip_snmp_get, cr)) { 12986 proto_str = "Bad SNMPCOM request?"; 12987 goto protonak; 12988 } 12989 return; 12990 default: 12991 ip1dbg(("ip_wput_nondata: dropping M_PROTO prim %u\n", 12992 (int)*(uint_t *)mp->b_rptr)); 12993 freemsg(mp); 12994 return; 12995 } 12996 default: 12997 break; 12998 } 12999 if (q->q_next) { 13000 putnext(q, mp); 13001 } else 13002 freemsg(mp); 13003 return; 13004 13005 nak: 13006 iocp->ioc_error = EINVAL; 13007 mp->b_datap->db_type = M_IOCNAK; 13008 iocp->ioc_count = 0; 13009 qreply(q, mp); 13010 return; 13011 13012 protonak: 13013 cmn_err(CE_NOTE, "IP doesn't process %s as a module", proto_str); 13014 if ((mp = mi_tpi_err_ack_alloc(mp, TPROTO, EINVAL)) != NULL) 13015 qreply(q, mp); 13016 } 13017 13018 /* 13019 * Process IP options in an outbound packet. Verify that the nexthop in a 13020 * strict source route is onlink. 13021 * Returns non-zero if something fails in which case an ICMP error has been 13022 * sent and mp freed. 13023 * 13024 * Assumes the ULP has called ip_massage_options to move nexthop into ipha_dst. 13025 */ 13026 int 13027 ip_output_options(mblk_t *mp, ipha_t *ipha, ip_xmit_attr_t *ixa, ill_t *ill) 13028 { 13029 ipoptp_t opts; 13030 uchar_t *opt; 13031 uint8_t optval; 13032 uint8_t optlen; 13033 ipaddr_t dst; 13034 intptr_t code = 0; 13035 ire_t *ire; 13036 ip_stack_t *ipst = ixa->ixa_ipst; 13037 ip_recv_attr_t iras; 13038 13039 ip2dbg(("ip_output_options\n")); 13040 13041 dst = ipha->ipha_dst; 13042 for (optval = ipoptp_first(&opts, ipha); 13043 optval != IPOPT_EOL; 13044 optval = ipoptp_next(&opts)) { 13045 opt = opts.ipoptp_cur; 13046 optlen = opts.ipoptp_len; 13047 ip2dbg(("ip_output_options: opt %d, len %d\n", 13048 optval, optlen)); 13049 switch (optval) { 13050 uint32_t off; 13051 case IPOPT_SSRR: 13052 case IPOPT_LSRR: 13053 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 13054 ip1dbg(( 13055 "ip_output_options: bad option offset\n")); 13056 code = (char *)&opt[IPOPT_OLEN] - 13057 (char *)ipha; 13058 goto param_prob; 13059 } 13060 off = opt[IPOPT_OFFSET]; 13061 ip1dbg(("ip_output_options: next hop 0x%x\n", 13062 ntohl(dst))); 13063 /* 13064 * For strict: verify that dst is directly 13065 * reachable. 13066 */ 13067 if (optval == IPOPT_SSRR) { 13068 ire = ire_ftable_lookup_v4(dst, 0, 0, 13069 IRE_IF_ALL, NULL, ALL_ZONES, ixa->ixa_tsl, 13070 MATCH_IRE_TYPE | MATCH_IRE_SECATTR, 0, ipst, 13071 NULL); 13072 if (ire == NULL) { 13073 ip1dbg(("ip_output_options: SSRR not" 13074 " directly reachable: 0x%x\n", 13075 ntohl(dst))); 13076 goto bad_src_route; 13077 } 13078 ire_refrele(ire); 13079 } 13080 break; 13081 case IPOPT_RR: 13082 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 13083 ip1dbg(( 13084 "ip_output_options: bad option offset\n")); 13085 code = (char *)&opt[IPOPT_OLEN] - 13086 (char *)ipha; 13087 goto param_prob; 13088 } 13089 break; 13090 case IPOPT_TS: 13091 /* 13092 * Verify that length >=5 and that there is either 13093 * room for another timestamp or that the overflow 13094 * counter is not maxed out. 13095 */ 13096 code = (char *)&opt[IPOPT_OLEN] - (char *)ipha; 13097 if (optlen < IPOPT_MINLEN_IT) { 13098 goto param_prob; 13099 } 13100 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 13101 ip1dbg(( 13102 "ip_output_options: bad option offset\n")); 13103 code = (char *)&opt[IPOPT_OFFSET] - 13104 (char *)ipha; 13105 goto param_prob; 13106 } 13107 switch (opt[IPOPT_POS_OV_FLG] & 0x0F) { 13108 case IPOPT_TS_TSONLY: 13109 off = IPOPT_TS_TIMELEN; 13110 break; 13111 case IPOPT_TS_TSANDADDR: 13112 case IPOPT_TS_PRESPEC: 13113 case IPOPT_TS_PRESPEC_RFC791: 13114 off = IP_ADDR_LEN + IPOPT_TS_TIMELEN; 13115 break; 13116 default: 13117 code = (char *)&opt[IPOPT_POS_OV_FLG] - 13118 (char *)ipha; 13119 goto param_prob; 13120 } 13121 if (opt[IPOPT_OFFSET] - 1 + off > optlen && 13122 (opt[IPOPT_POS_OV_FLG] & 0xF0) == 0xF0) { 13123 /* 13124 * No room and the overflow counter is 15 13125 * already. 13126 */ 13127 goto param_prob; 13128 } 13129 break; 13130 } 13131 } 13132 13133 if ((opts.ipoptp_flags & IPOPTP_ERROR) == 0) 13134 return (0); 13135 13136 ip1dbg(("ip_output_options: error processing IP options.")); 13137 code = (char *)&opt[IPOPT_OFFSET] - (char *)ipha; 13138 13139 param_prob: 13140 bzero(&iras, sizeof (iras)); 13141 iras.ira_ill = iras.ira_rill = ill; 13142 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex; 13143 iras.ira_rifindex = iras.ira_ruifindex; 13144 iras.ira_flags = IRAF_IS_IPV4; 13145 13146 ip_drop_output("ip_output_options", mp, ill); 13147 icmp_param_problem(mp, (uint8_t)code, &iras); 13148 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE)); 13149 return (-1); 13150 13151 bad_src_route: 13152 bzero(&iras, sizeof (iras)); 13153 iras.ira_ill = iras.ira_rill = ill; 13154 iras.ira_ruifindex = ill->ill_phyint->phyint_ifindex; 13155 iras.ira_rifindex = iras.ira_ruifindex; 13156 iras.ira_flags = IRAF_IS_IPV4; 13157 13158 ip_drop_input("ICMP_SOURCE_ROUTE_FAILED", mp, ill); 13159 icmp_unreachable(mp, ICMP_SOURCE_ROUTE_FAILED, &iras); 13160 ASSERT(!(iras.ira_flags & IRAF_IPSEC_SECURE)); 13161 return (-1); 13162 } 13163 13164 /* 13165 * The maximum value of conn_drain_list_cnt is CONN_MAXDRAINCNT. 13166 * conn_drain_list_cnt can be changed by setting conn_drain_nthreads 13167 * thru /etc/system. 13168 */ 13169 #define CONN_MAXDRAINCNT 64 13170 13171 static void 13172 conn_drain_init(ip_stack_t *ipst) 13173 { 13174 int i, j; 13175 idl_tx_list_t *itl_tx; 13176 13177 ipst->ips_conn_drain_list_cnt = conn_drain_nthreads; 13178 13179 if ((ipst->ips_conn_drain_list_cnt == 0) || 13180 (ipst->ips_conn_drain_list_cnt > CONN_MAXDRAINCNT)) { 13181 /* 13182 * Default value of the number of drainers is the 13183 * number of cpus, subject to maximum of 8 drainers. 13184 */ 13185 if (boot_max_ncpus != -1) 13186 ipst->ips_conn_drain_list_cnt = MIN(boot_max_ncpus, 8); 13187 else 13188 ipst->ips_conn_drain_list_cnt = MIN(max_ncpus, 8); 13189 } 13190 13191 ipst->ips_idl_tx_list = 13192 kmem_zalloc(TX_FANOUT_SIZE * sizeof (idl_tx_list_t), KM_SLEEP); 13193 for (i = 0; i < TX_FANOUT_SIZE; i++) { 13194 itl_tx = &ipst->ips_idl_tx_list[i]; 13195 itl_tx->txl_drain_list = 13196 kmem_zalloc(ipst->ips_conn_drain_list_cnt * 13197 sizeof (idl_t), KM_SLEEP); 13198 mutex_init(&itl_tx->txl_lock, NULL, MUTEX_DEFAULT, NULL); 13199 for (j = 0; j < ipst->ips_conn_drain_list_cnt; j++) { 13200 mutex_init(&itl_tx->txl_drain_list[j].idl_lock, NULL, 13201 MUTEX_DEFAULT, NULL); 13202 itl_tx->txl_drain_list[j].idl_itl = itl_tx; 13203 } 13204 } 13205 } 13206 13207 static void 13208 conn_drain_fini(ip_stack_t *ipst) 13209 { 13210 int i; 13211 idl_tx_list_t *itl_tx; 13212 13213 for (i = 0; i < TX_FANOUT_SIZE; i++) { 13214 itl_tx = &ipst->ips_idl_tx_list[i]; 13215 kmem_free(itl_tx->txl_drain_list, 13216 ipst->ips_conn_drain_list_cnt * sizeof (idl_t)); 13217 } 13218 kmem_free(ipst->ips_idl_tx_list, 13219 TX_FANOUT_SIZE * sizeof (idl_tx_list_t)); 13220 ipst->ips_idl_tx_list = NULL; 13221 } 13222 13223 /* 13224 * Note: For an overview of how flowcontrol is handled in IP please see the 13225 * IP Flowcontrol notes at the top of this file. 13226 * 13227 * Flow control has blocked us from proceeding. Insert the given conn in one 13228 * of the conn drain lists. These conn wq's will be qenabled later on when 13229 * STREAMS flow control does a backenable. conn_walk_drain will enable 13230 * the first conn in each of these drain lists. Each of these qenabled conns 13231 * in turn enables the next in the list, after it runs, or when it closes, 13232 * thus sustaining the drain process. 13233 */ 13234 void 13235 conn_drain_insert(conn_t *connp, idl_tx_list_t *tx_list) 13236 { 13237 idl_t *idl = tx_list->txl_drain_list; 13238 uint_t index; 13239 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 13240 13241 mutex_enter(&connp->conn_lock); 13242 if (connp->conn_state_flags & CONN_CLOSING) { 13243 /* 13244 * The conn is closing as a result of which CONN_CLOSING 13245 * is set. Return. 13246 */ 13247 mutex_exit(&connp->conn_lock); 13248 return; 13249 } else if (connp->conn_idl == NULL) { 13250 /* 13251 * Assign the next drain list round robin. We dont' use 13252 * a lock, and thus it may not be strictly round robin. 13253 * Atomicity of load/stores is enough to make sure that 13254 * conn_drain_list_index is always within bounds. 13255 */ 13256 index = tx_list->txl_drain_index; 13257 ASSERT(index < ipst->ips_conn_drain_list_cnt); 13258 connp->conn_idl = &tx_list->txl_drain_list[index]; 13259 index++; 13260 if (index == ipst->ips_conn_drain_list_cnt) 13261 index = 0; 13262 tx_list->txl_drain_index = index; 13263 } 13264 mutex_exit(&connp->conn_lock); 13265 13266 mutex_enter(CONN_DRAIN_LIST_LOCK(connp)); 13267 if ((connp->conn_drain_prev != NULL) || 13268 (connp->conn_state_flags & CONN_CLOSING)) { 13269 /* 13270 * The conn is already in the drain list, OR 13271 * the conn is closing. We need to check again for 13272 * the closing case again since close can happen 13273 * after we drop the conn_lock, and before we 13274 * acquire the CONN_DRAIN_LIST_LOCK. 13275 */ 13276 mutex_exit(CONN_DRAIN_LIST_LOCK(connp)); 13277 return; 13278 } else { 13279 idl = connp->conn_idl; 13280 } 13281 13282 /* 13283 * The conn is not in the drain list. Insert it at the 13284 * tail of the drain list. The drain list is circular 13285 * and doubly linked. idl_conn points to the 1st element 13286 * in the list. 13287 */ 13288 if (idl->idl_conn == NULL) { 13289 idl->idl_conn = connp; 13290 connp->conn_drain_next = connp; 13291 connp->conn_drain_prev = connp; 13292 } else { 13293 conn_t *head = idl->idl_conn; 13294 13295 connp->conn_drain_next = head; 13296 connp->conn_drain_prev = head->conn_drain_prev; 13297 head->conn_drain_prev->conn_drain_next = connp; 13298 head->conn_drain_prev = connp; 13299 } 13300 /* 13301 * For non streams based sockets assert flow control. 13302 */ 13303 conn_setqfull(connp, NULL); 13304 mutex_exit(CONN_DRAIN_LIST_LOCK(connp)); 13305 } 13306 13307 static void 13308 conn_idl_remove(conn_t *connp) 13309 { 13310 idl_t *idl = connp->conn_idl; 13311 13312 if (idl != NULL) { 13313 /* 13314 * Remove ourself from the drain list, if we did not do 13315 * a putq, or if the conn is closing. 13316 * Note: It is possible that q->q_first is non-null. It means 13317 * that these messages landed after we did a enableok() in 13318 * ip_wsrv. Thus STREAMS will call ip_wsrv once again to 13319 * service them. 13320 */ 13321 if (connp->conn_drain_next == connp) { 13322 /* Singleton in the list */ 13323 ASSERT(connp->conn_drain_prev == connp); 13324 idl->idl_conn = NULL; 13325 } else { 13326 connp->conn_drain_prev->conn_drain_next = 13327 connp->conn_drain_next; 13328 connp->conn_drain_next->conn_drain_prev = 13329 connp->conn_drain_prev; 13330 if (idl->idl_conn == connp) 13331 idl->idl_conn = connp->conn_drain_next; 13332 } 13333 } 13334 connp->conn_drain_next = NULL; 13335 connp->conn_drain_prev = NULL; 13336 13337 conn_clrqfull(connp, NULL); 13338 /* 13339 * For streams based sockets open up flow control. 13340 */ 13341 if (!IPCL_IS_NONSTR(connp)) 13342 enableok(connp->conn_wq); 13343 } 13344 13345 /* 13346 * This conn is closing, and we are called from ip_close. OR 13347 * this conn is draining because flow-control on the ill has been relieved. 13348 * 13349 * We must also need to remove conn's on this idl from the list, and also 13350 * inform the sockfs upcalls about the change in flow-control. 13351 */ 13352 static void 13353 conn_drain_tail(conn_t *connp, boolean_t closing) 13354 { 13355 idl_t *idl; 13356 conn_t *next_connp; 13357 13358 /* 13359 * connp->conn_idl is stable at this point, and no lock is needed 13360 * to check it. If we are called from ip_close, close has already 13361 * set CONN_CLOSING, thus freezing the value of conn_idl, and 13362 * called us only because conn_idl is non-null. If we are called thru 13363 * service, conn_idl could be null, but it cannot change because 13364 * service is single-threaded per queue, and there cannot be another 13365 * instance of service trying to call conn_drain_insert on this conn 13366 * now. 13367 */ 13368 ASSERT(!closing || connp == NULL || connp->conn_idl != NULL); 13369 13370 /* 13371 * If connp->conn_idl is null, the conn has not been inserted into any 13372 * drain list even once since creation of the conn. Just return. 13373 */ 13374 if (connp == NULL || connp->conn_idl == NULL) 13375 return; 13376 13377 if (connp->conn_drain_prev == NULL) { 13378 /* This conn is currently not in the drain list. */ 13379 return; 13380 } 13381 idl = connp->conn_idl; 13382 if (!closing) { 13383 /* 13384 * This conn is the current drainer. If this is the last conn 13385 * in the drain list, we need to do more checks, in the 'if' 13386 * below. Otherwwise we need to just qenable the next conn, 13387 * to sustain the draining, and is handled in the 'else' 13388 * below. 13389 */ 13390 next_connp = connp->conn_drain_next; 13391 while (next_connp != connp) { 13392 conn_t *delconnp = next_connp; 13393 13394 next_connp = next_connp->conn_drain_next; 13395 conn_idl_remove(delconnp); 13396 } 13397 ASSERT(connp->conn_drain_next == idl->idl_conn); 13398 } 13399 conn_idl_remove(connp); 13400 13401 } 13402 13403 /* 13404 * Write service routine. Shared perimeter entry point. 13405 * The device queue's messages has fallen below the low water mark and STREAMS 13406 * has backenabled the ill_wq. Send sockfs notification about flow-control onx 13407 * each waiting conn. 13408 */ 13409 void 13410 ip_wsrv(queue_t *q) 13411 { 13412 ill_t *ill; 13413 13414 ill = (ill_t *)q->q_ptr; 13415 if (ill->ill_state_flags == 0) { 13416 ip_stack_t *ipst = ill->ill_ipst; 13417 13418 /* 13419 * The device flow control has opened up. 13420 * Walk through conn drain lists and qenable the 13421 * first conn in each list. This makes sense only 13422 * if the stream is fully plumbed and setup. 13423 * Hence the ill_state_flags check above. 13424 */ 13425 ip1dbg(("ip_wsrv: walking\n")); 13426 conn_walk_drain(ipst, &ipst->ips_idl_tx_list[0]); 13427 enableok(ill->ill_wq); 13428 } 13429 } 13430 13431 /* 13432 * Callback to disable flow control in IP. 13433 * 13434 * This is a mac client callback added when the DLD_CAPAB_DIRECT capability 13435 * is enabled. 13436 * 13437 * When MAC_TX() is not able to send any more packets, dld sets its queue 13438 * to QFULL and enable the STREAMS flow control. Later, when the underlying 13439 * driver is able to continue to send packets, it calls mac_tx_(ring_)update() 13440 * function and wakes up corresponding mac worker threads, which in turn 13441 * calls this callback function, and disables flow control. 13442 */ 13443 void 13444 ill_flow_enable(void *arg, ip_mac_tx_cookie_t cookie) 13445 { 13446 ill_t *ill = (ill_t *)arg; 13447 ip_stack_t *ipst = ill->ill_ipst; 13448 idl_tx_list_t *idl_txl; 13449 13450 idl_txl = &ipst->ips_idl_tx_list[IDLHASHINDEX(cookie)]; 13451 mutex_enter(&idl_txl->txl_lock); 13452 /* add code to to set a flag to indicate idl_txl is enabled */ 13453 conn_walk_drain(ipst, idl_txl); 13454 mutex_exit(&idl_txl->txl_lock); 13455 } 13456 13457 /* 13458 * Flowcontrol has relieved, and STREAMS has backenabled us. For each list 13459 * of conns that need to be drained, check if drain is already in progress. 13460 * If so set the idl_repeat bit, indicating that the last conn in the list 13461 * needs to reinitiate the drain once again, for the list. If drain is not 13462 * in progress for the list, initiate the draining, by qenabling the 1st 13463 * conn in the list. The drain is self-sustaining, each qenabled conn will 13464 * in turn qenable the next conn, when it is done/blocked/closing. 13465 */ 13466 static void 13467 conn_walk_drain(ip_stack_t *ipst, idl_tx_list_t *tx_list) 13468 { 13469 int i; 13470 idl_t *idl; 13471 13472 IP_STAT(ipst, ip_conn_walk_drain); 13473 13474 for (i = 0; i < ipst->ips_conn_drain_list_cnt; i++) { 13475 idl = &tx_list->txl_drain_list[i]; 13476 mutex_enter(&idl->idl_lock); 13477 conn_drain_tail(idl->idl_conn, B_FALSE); 13478 mutex_exit(&idl->idl_lock); 13479 } 13480 } 13481 13482 /* 13483 * Determine if the ill and multicast aspects of that packets 13484 * "matches" the conn. 13485 */ 13486 boolean_t 13487 conn_wantpacket(conn_t *connp, ip_recv_attr_t *ira, ipha_t *ipha) 13488 { 13489 ill_t *ill = ira->ira_rill; 13490 zoneid_t zoneid = ira->ira_zoneid; 13491 uint_t in_ifindex; 13492 ipaddr_t dst, src; 13493 13494 dst = ipha->ipha_dst; 13495 src = ipha->ipha_src; 13496 13497 /* 13498 * conn_incoming_ifindex is set by IP_BOUND_IF which limits 13499 * unicast, broadcast and multicast reception to 13500 * conn_incoming_ifindex. 13501 * conn_wantpacket is called for unicast, broadcast and 13502 * multicast packets. 13503 */ 13504 in_ifindex = connp->conn_incoming_ifindex; 13505 13506 /* mpathd can bind to the under IPMP interface, which we allow */ 13507 if (in_ifindex != 0 && in_ifindex != ill->ill_phyint->phyint_ifindex) { 13508 if (!IS_UNDER_IPMP(ill)) 13509 return (B_FALSE); 13510 13511 if (in_ifindex != ipmp_ill_get_ipmp_ifindex(ill)) 13512 return (B_FALSE); 13513 } 13514 13515 if (!IPCL_ZONE_MATCH(connp, zoneid)) 13516 return (B_FALSE); 13517 13518 if (!(ira->ira_flags & IRAF_MULTICAST)) 13519 return (B_TRUE); 13520 13521 if (connp->conn_multi_router) { 13522 /* multicast packet and multicast router socket: send up */ 13523 return (B_TRUE); 13524 } 13525 13526 if (ipha->ipha_protocol == IPPROTO_PIM || 13527 ipha->ipha_protocol == IPPROTO_RSVP) 13528 return (B_TRUE); 13529 13530 return (conn_hasmembers_ill_withsrc_v4(connp, dst, src, ira->ira_ill)); 13531 } 13532 13533 void 13534 conn_setqfull(conn_t *connp, boolean_t *flow_stopped) 13535 { 13536 if (IPCL_IS_NONSTR(connp)) { 13537 (*connp->conn_upcalls->su_txq_full) 13538 (connp->conn_upper_handle, B_TRUE); 13539 if (flow_stopped != NULL) 13540 *flow_stopped = B_TRUE; 13541 } else { 13542 queue_t *q = connp->conn_wq; 13543 13544 ASSERT(q != NULL); 13545 if (!(q->q_flag & QFULL)) { 13546 mutex_enter(QLOCK(q)); 13547 if (!(q->q_flag & QFULL)) { 13548 /* still need to set QFULL */ 13549 q->q_flag |= QFULL; 13550 /* set flow_stopped to true under QLOCK */ 13551 if (flow_stopped != NULL) 13552 *flow_stopped = B_TRUE; 13553 mutex_exit(QLOCK(q)); 13554 } else { 13555 /* flow_stopped is left unchanged */ 13556 mutex_exit(QLOCK(q)); 13557 } 13558 } 13559 } 13560 } 13561 13562 void 13563 conn_clrqfull(conn_t *connp, boolean_t *flow_stopped) 13564 { 13565 if (IPCL_IS_NONSTR(connp)) { 13566 (*connp->conn_upcalls->su_txq_full) 13567 (connp->conn_upper_handle, B_FALSE); 13568 if (flow_stopped != NULL) 13569 *flow_stopped = B_FALSE; 13570 } else { 13571 queue_t *q = connp->conn_wq; 13572 13573 ASSERT(q != NULL); 13574 if (q->q_flag & QFULL) { 13575 mutex_enter(QLOCK(q)); 13576 if (q->q_flag & QFULL) { 13577 q->q_flag &= ~QFULL; 13578 /* set flow_stopped to false under QLOCK */ 13579 if (flow_stopped != NULL) 13580 *flow_stopped = B_FALSE; 13581 mutex_exit(QLOCK(q)); 13582 if (q->q_flag & QWANTW) 13583 qbackenable(q, 0); 13584 } else { 13585 /* flow_stopped is left unchanged */ 13586 mutex_exit(QLOCK(q)); 13587 } 13588 } 13589 } 13590 connp->conn_direct_blocked = B_FALSE; 13591 } 13592 13593 /* 13594 * Return the length in bytes of the IPv4 headers (base header, label, and 13595 * other IP options) that will be needed based on the 13596 * ip_pkt_t structure passed by the caller. 13597 * 13598 * The returned length does not include the length of the upper level 13599 * protocol (ULP) header. 13600 * The caller needs to check that the length doesn't exceed the max for IPv4. 13601 */ 13602 int 13603 ip_total_hdrs_len_v4(const ip_pkt_t *ipp) 13604 { 13605 int len; 13606 13607 len = IP_SIMPLE_HDR_LENGTH; 13608 if (ipp->ipp_fields & IPPF_LABEL_V4) { 13609 ASSERT(ipp->ipp_label_len_v4 != 0); 13610 /* We need to round up here */ 13611 len += (ipp->ipp_label_len_v4 + 3) & ~3; 13612 } 13613 13614 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) { 13615 ASSERT(ipp->ipp_ipv4_options_len != 0); 13616 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0); 13617 len += ipp->ipp_ipv4_options_len; 13618 } 13619 return (len); 13620 } 13621 13622 /* 13623 * All-purpose routine to build an IPv4 header with options based 13624 * on the abstract ip_pkt_t. 13625 * 13626 * The caller has to set the source and destination address as well as 13627 * ipha_length. The caller has to massage any source route and compensate 13628 * for the ULP pseudo-header checksum due to the source route. 13629 */ 13630 void 13631 ip_build_hdrs_v4(uchar_t *buf, uint_t buf_len, const ip_pkt_t *ipp, 13632 uint8_t protocol) 13633 { 13634 ipha_t *ipha = (ipha_t *)buf; 13635 uint8_t *cp; 13636 13637 /* Initialize IPv4 header */ 13638 ipha->ipha_type_of_service = ipp->ipp_type_of_service; 13639 ipha->ipha_length = 0; /* Caller will set later */ 13640 ipha->ipha_ident = 0; 13641 ipha->ipha_fragment_offset_and_flags = 0; 13642 ipha->ipha_ttl = ipp->ipp_unicast_hops; 13643 ipha->ipha_protocol = protocol; 13644 ipha->ipha_hdr_checksum = 0; 13645 13646 if ((ipp->ipp_fields & IPPF_ADDR) && 13647 IN6_IS_ADDR_V4MAPPED(&ipp->ipp_addr)) 13648 ipha->ipha_src = ipp->ipp_addr_v4; 13649 13650 cp = (uint8_t *)&ipha[1]; 13651 if (ipp->ipp_fields & IPPF_LABEL_V4) { 13652 ASSERT(ipp->ipp_label_len_v4 != 0); 13653 bcopy(ipp->ipp_label_v4, cp, ipp->ipp_label_len_v4); 13654 cp += ipp->ipp_label_len_v4; 13655 /* We need to round up here */ 13656 while ((uintptr_t)cp & 0x3) { 13657 *cp++ = IPOPT_NOP; 13658 } 13659 } 13660 13661 if (ipp->ipp_fields & IPPF_IPV4_OPTIONS) { 13662 ASSERT(ipp->ipp_ipv4_options_len != 0); 13663 ASSERT((ipp->ipp_ipv4_options_len & 3) == 0); 13664 bcopy(ipp->ipp_ipv4_options, cp, ipp->ipp_ipv4_options_len); 13665 cp += ipp->ipp_ipv4_options_len; 13666 } 13667 ipha->ipha_version_and_hdr_length = 13668 (uint8_t)((IP_VERSION << 4) + buf_len / 4); 13669 13670 ASSERT((int)(cp - buf) == buf_len); 13671 } 13672 13673 /* Allocate the private structure */ 13674 static int 13675 ip_priv_alloc(void **bufp) 13676 { 13677 void *buf; 13678 13679 if ((buf = kmem_alloc(sizeof (ip_priv_t), KM_NOSLEEP)) == NULL) 13680 return (ENOMEM); 13681 13682 *bufp = buf; 13683 return (0); 13684 } 13685 13686 /* Function to delete the private structure */ 13687 void 13688 ip_priv_free(void *buf) 13689 { 13690 ASSERT(buf != NULL); 13691 kmem_free(buf, sizeof (ip_priv_t)); 13692 } 13693 13694 /* 13695 * The entry point for IPPF processing. 13696 * If the classifier (IPGPC_CLASSIFY) is not loaded and configured, the 13697 * routine just returns. 13698 * 13699 * When called, ip_process generates an ipp_packet_t structure 13700 * which holds the state information for this packet and invokes the 13701 * the classifier (via ipp_packet_process). The classification, depending on 13702 * configured filters, results in a list of actions for this packet. Invoking 13703 * an action may cause the packet to be dropped, in which case we return NULL. 13704 * proc indicates the callout position for 13705 * this packet and ill is the interface this packet arrived on or will leave 13706 * on (inbound and outbound resp.). 13707 * 13708 * We do the processing on the rill (mapped to the upper if ipmp), but MIB 13709 * on the ill corrsponding to the destination IP address. 13710 */ 13711 mblk_t * 13712 ip_process(ip_proc_t proc, mblk_t *mp, ill_t *rill, ill_t *ill) 13713 { 13714 ip_priv_t *priv; 13715 ipp_action_id_t aid; 13716 int rc = 0; 13717 ipp_packet_t *pp; 13718 13719 /* If the classifier is not loaded, return */ 13720 if ((aid = ipp_action_lookup(IPGPC_CLASSIFY)) == IPP_ACTION_INVAL) { 13721 return (mp); 13722 } 13723 13724 ASSERT(mp != NULL); 13725 13726 /* Allocate the packet structure */ 13727 rc = ipp_packet_alloc(&pp, "ip", aid); 13728 if (rc != 0) 13729 goto drop; 13730 13731 /* Allocate the private structure */ 13732 rc = ip_priv_alloc((void **)&priv); 13733 if (rc != 0) { 13734 ipp_packet_free(pp); 13735 goto drop; 13736 } 13737 priv->proc = proc; 13738 priv->ill_index = ill_get_upper_ifindex(rill); 13739 13740 ipp_packet_set_private(pp, priv, ip_priv_free); 13741 ipp_packet_set_data(pp, mp); 13742 13743 /* Invoke the classifier */ 13744 rc = ipp_packet_process(&pp); 13745 if (pp != NULL) { 13746 mp = ipp_packet_get_data(pp); 13747 ipp_packet_free(pp); 13748 if (rc != 0) 13749 goto drop; 13750 return (mp); 13751 } else { 13752 /* No mp to trace in ip_drop_input/ip_drop_output */ 13753 mp = NULL; 13754 } 13755 drop: 13756 if (proc == IPP_LOCAL_IN || proc == IPP_FWD_IN) { 13757 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 13758 ip_drop_input("ip_process", mp, ill); 13759 } else { 13760 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 13761 ip_drop_output("ip_process", mp, ill); 13762 } 13763 freemsg(mp); 13764 return (NULL); 13765 } 13766 13767 /* 13768 * Propagate a multicast group membership operation (add/drop) on 13769 * all the interfaces crossed by the related multirt routes. 13770 * The call is considered successful if the operation succeeds 13771 * on at least one interface. 13772 * 13773 * This assumes that a set of IRE_HOST/RTF_MULTIRT has been created for the 13774 * multicast addresses with the ire argument being the first one. 13775 * We walk the bucket to find all the of those. 13776 * 13777 * Common to IPv4 and IPv6. 13778 */ 13779 static int 13780 ip_multirt_apply_membership(int (*fn)(conn_t *, boolean_t, 13781 const in6_addr_t *, ipaddr_t, uint_t, mcast_record_t, const in6_addr_t *), 13782 ire_t *ire, conn_t *connp, boolean_t checkonly, const in6_addr_t *v6group, 13783 mcast_record_t fmode, const in6_addr_t *v6src) 13784 { 13785 ire_t *ire_gw; 13786 irb_t *irb; 13787 int ifindex; 13788 int error = 0; 13789 int result; 13790 ip_stack_t *ipst = ire->ire_ipst; 13791 ipaddr_t group; 13792 boolean_t isv6; 13793 int match_flags; 13794 13795 if (IN6_IS_ADDR_V4MAPPED(v6group)) { 13796 IN6_V4MAPPED_TO_IPADDR(v6group, group); 13797 isv6 = B_FALSE; 13798 } else { 13799 isv6 = B_TRUE; 13800 } 13801 13802 irb = ire->ire_bucket; 13803 ASSERT(irb != NULL); 13804 13805 result = 0; 13806 irb_refhold(irb); 13807 for (; ire != NULL; ire = ire->ire_next) { 13808 if ((ire->ire_flags & RTF_MULTIRT) == 0) 13809 continue; 13810 13811 /* We handle -ifp routes by matching on the ill if set */ 13812 match_flags = MATCH_IRE_TYPE; 13813 if (ire->ire_ill != NULL) 13814 match_flags |= MATCH_IRE_ILL; 13815 13816 if (isv6) { 13817 if (!IN6_ARE_ADDR_EQUAL(&ire->ire_addr_v6, v6group)) 13818 continue; 13819 13820 ire_gw = ire_ftable_lookup_v6(&ire->ire_gateway_addr_v6, 13821 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL, 13822 match_flags, 0, ipst, NULL); 13823 } else { 13824 if (ire->ire_addr != group) 13825 continue; 13826 13827 ire_gw = ire_ftable_lookup_v4(ire->ire_gateway_addr, 13828 0, 0, IRE_INTERFACE, ire->ire_ill, ALL_ZONES, NULL, 13829 match_flags, 0, ipst, NULL); 13830 } 13831 /* No interface route exists for the gateway; skip this ire. */ 13832 if (ire_gw == NULL) 13833 continue; 13834 if (ire_gw->ire_flags & (RTF_REJECT|RTF_BLACKHOLE)) { 13835 ire_refrele(ire_gw); 13836 continue; 13837 } 13838 ASSERT(ire_gw->ire_ill != NULL); /* IRE_INTERFACE */ 13839 ifindex = ire_gw->ire_ill->ill_phyint->phyint_ifindex; 13840 13841 /* 13842 * The operation is considered a success if 13843 * it succeeds at least once on any one interface. 13844 */ 13845 error = fn(connp, checkonly, v6group, INADDR_ANY, ifindex, 13846 fmode, v6src); 13847 if (error == 0) 13848 result = CGTP_MCAST_SUCCESS; 13849 13850 ire_refrele(ire_gw); 13851 } 13852 irb_refrele(irb); 13853 /* 13854 * Consider the call as successful if we succeeded on at least 13855 * one interface. Otherwise, return the last encountered error. 13856 */ 13857 return (result == CGTP_MCAST_SUCCESS ? 0 : error); 13858 } 13859 13860 /* 13861 * Get the CGTP (multirouting) filtering status. 13862 * If 0, the CGTP hooks are transparent. 13863 */ 13864 /* ARGSUSED */ 13865 static int 13866 ip_cgtp_filter_get(queue_t *q, mblk_t *mp, caddr_t cp, cred_t *ioc_cr) 13867 { 13868 boolean_t *ip_cgtp_filter_value = (boolean_t *)cp; 13869 13870 (void) mi_mpprintf(mp, "%d", (int)*ip_cgtp_filter_value); 13871 return (0); 13872 } 13873 13874 /* 13875 * Set the CGTP (multirouting) filtering status. 13876 * If the status is changed from active to transparent 13877 * or from transparent to active, forward the new status 13878 * to the filtering module (if loaded). 13879 */ 13880 /* ARGSUSED */ 13881 static int 13882 ip_cgtp_filter_set(queue_t *q, mblk_t *mp, char *value, caddr_t cp, 13883 cred_t *ioc_cr) 13884 { 13885 long new_value; 13886 boolean_t *ip_cgtp_filter_value = (boolean_t *)cp; 13887 ip_stack_t *ipst = CONNQ_TO_IPST(q); 13888 13889 if (secpolicy_ip_config(ioc_cr, B_FALSE) != 0) 13890 return (EPERM); 13891 13892 if (ddi_strtol(value, NULL, 10, &new_value) != 0 || 13893 new_value < 0 || new_value > 1) { 13894 return (EINVAL); 13895 } 13896 13897 if ((!*ip_cgtp_filter_value) && new_value) { 13898 cmn_err(CE_NOTE, "IP: enabling CGTP filtering%s", 13899 ipst->ips_ip_cgtp_filter_ops == NULL ? 13900 " (module not loaded)" : ""); 13901 } 13902 if (*ip_cgtp_filter_value && (!new_value)) { 13903 cmn_err(CE_NOTE, "IP: disabling CGTP filtering%s", 13904 ipst->ips_ip_cgtp_filter_ops == NULL ? 13905 " (module not loaded)" : ""); 13906 } 13907 13908 if (ipst->ips_ip_cgtp_filter_ops != NULL) { 13909 int res; 13910 netstackid_t stackid; 13911 13912 stackid = ipst->ips_netstack->netstack_stackid; 13913 res = ipst->ips_ip_cgtp_filter_ops->cfo_change_state(stackid, 13914 new_value); 13915 if (res) 13916 return (res); 13917 } 13918 13919 *ip_cgtp_filter_value = (boolean_t)new_value; 13920 13921 ill_set_inputfn_all(ipst); 13922 return (0); 13923 } 13924 13925 /* 13926 * Return the expected CGTP hooks version number. 13927 */ 13928 int 13929 ip_cgtp_filter_supported(void) 13930 { 13931 return (ip_cgtp_filter_rev); 13932 } 13933 13934 /* 13935 * CGTP hooks can be registered by invoking this function. 13936 * Checks that the version number matches. 13937 */ 13938 int 13939 ip_cgtp_filter_register(netstackid_t stackid, cgtp_filter_ops_t *ops) 13940 { 13941 netstack_t *ns; 13942 ip_stack_t *ipst; 13943 13944 if (ops->cfo_filter_rev != CGTP_FILTER_REV) 13945 return (ENOTSUP); 13946 13947 ns = netstack_find_by_stackid(stackid); 13948 if (ns == NULL) 13949 return (EINVAL); 13950 ipst = ns->netstack_ip; 13951 ASSERT(ipst != NULL); 13952 13953 if (ipst->ips_ip_cgtp_filter_ops != NULL) { 13954 netstack_rele(ns); 13955 return (EALREADY); 13956 } 13957 13958 ipst->ips_ip_cgtp_filter_ops = ops; 13959 13960 ill_set_inputfn_all(ipst); 13961 13962 netstack_rele(ns); 13963 return (0); 13964 } 13965 13966 /* 13967 * CGTP hooks can be unregistered by invoking this function. 13968 * Returns ENXIO if there was no registration. 13969 * Returns EBUSY if the ndd variable has not been turned off. 13970 */ 13971 int 13972 ip_cgtp_filter_unregister(netstackid_t stackid) 13973 { 13974 netstack_t *ns; 13975 ip_stack_t *ipst; 13976 13977 ns = netstack_find_by_stackid(stackid); 13978 if (ns == NULL) 13979 return (EINVAL); 13980 ipst = ns->netstack_ip; 13981 ASSERT(ipst != NULL); 13982 13983 if (ipst->ips_ip_cgtp_filter) { 13984 netstack_rele(ns); 13985 return (EBUSY); 13986 } 13987 13988 if (ipst->ips_ip_cgtp_filter_ops == NULL) { 13989 netstack_rele(ns); 13990 return (ENXIO); 13991 } 13992 ipst->ips_ip_cgtp_filter_ops = NULL; 13993 13994 ill_set_inputfn_all(ipst); 13995 13996 netstack_rele(ns); 13997 return (0); 13998 } 13999 14000 /* 14001 * Check whether there is a CGTP filter registration. 14002 * Returns non-zero if there is a registration, otherwise returns zero. 14003 * Note: returns zero if bad stackid. 14004 */ 14005 int 14006 ip_cgtp_filter_is_registered(netstackid_t stackid) 14007 { 14008 netstack_t *ns; 14009 ip_stack_t *ipst; 14010 int ret; 14011 14012 ns = netstack_find_by_stackid(stackid); 14013 if (ns == NULL) 14014 return (0); 14015 ipst = ns->netstack_ip; 14016 ASSERT(ipst != NULL); 14017 14018 if (ipst->ips_ip_cgtp_filter_ops != NULL) 14019 ret = 1; 14020 else 14021 ret = 0; 14022 14023 netstack_rele(ns); 14024 return (ret); 14025 } 14026 14027 static int 14028 ip_squeue_switch(int val) 14029 { 14030 int rval; 14031 14032 switch (val) { 14033 case IP_SQUEUE_ENTER_NODRAIN: 14034 rval = SQ_NODRAIN; 14035 break; 14036 case IP_SQUEUE_ENTER: 14037 rval = SQ_PROCESS; 14038 break; 14039 case IP_SQUEUE_FILL: 14040 default: 14041 rval = SQ_FILL; 14042 break; 14043 } 14044 return (rval); 14045 } 14046 14047 /* ARGSUSED */ 14048 static int 14049 ip_input_proc_set(queue_t *q, mblk_t *mp, char *value, 14050 caddr_t addr, cred_t *cr) 14051 { 14052 int *v = (int *)addr; 14053 long new_value; 14054 14055 if (secpolicy_net_config(cr, B_FALSE) != 0) 14056 return (EPERM); 14057 14058 if (ddi_strtol(value, NULL, 10, &new_value) != 0) 14059 return (EINVAL); 14060 14061 ip_squeue_flag = ip_squeue_switch(new_value); 14062 *v = new_value; 14063 return (0); 14064 } 14065 14066 /* 14067 * Handle ndd set of variables which require PRIV_SYS_NET_CONFIG such as 14068 * ip_debug. 14069 */ 14070 /* ARGSUSED */ 14071 static int 14072 ip_int_set(queue_t *q, mblk_t *mp, char *value, 14073 caddr_t addr, cred_t *cr) 14074 { 14075 int *v = (int *)addr; 14076 long new_value; 14077 14078 if (secpolicy_net_config(cr, B_FALSE) != 0) 14079 return (EPERM); 14080 14081 if (ddi_strtol(value, NULL, 10, &new_value) != 0) 14082 return (EINVAL); 14083 14084 *v = new_value; 14085 return (0); 14086 } 14087 14088 static void * 14089 ip_kstat2_init(netstackid_t stackid, ip_stat_t *ip_statisticsp) 14090 { 14091 kstat_t *ksp; 14092 14093 ip_stat_t template = { 14094 { "ip_udp_fannorm", KSTAT_DATA_UINT64 }, 14095 { "ip_udp_fanmb", KSTAT_DATA_UINT64 }, 14096 { "ip_recv_pullup", KSTAT_DATA_UINT64 }, 14097 { "ip_db_ref", KSTAT_DATA_UINT64 }, 14098 { "ip_notaligned", KSTAT_DATA_UINT64 }, 14099 { "ip_multimblk", KSTAT_DATA_UINT64 }, 14100 { "ip_opt", KSTAT_DATA_UINT64 }, 14101 { "ipsec_proto_ahesp", KSTAT_DATA_UINT64 }, 14102 { "ip_conn_flputbq", KSTAT_DATA_UINT64 }, 14103 { "ip_conn_walk_drain", KSTAT_DATA_UINT64 }, 14104 { "ip_out_sw_cksum", KSTAT_DATA_UINT64 }, 14105 { "ip_out_sw_cksum_bytes", KSTAT_DATA_UINT64 }, 14106 { "ip_in_sw_cksum", KSTAT_DATA_UINT64 }, 14107 { "ip_ire_reclaim_calls", KSTAT_DATA_UINT64 }, 14108 { "ip_ire_reclaim_deleted", KSTAT_DATA_UINT64 }, 14109 { "ip_nce_reclaim_calls", KSTAT_DATA_UINT64 }, 14110 { "ip_nce_reclaim_deleted", KSTAT_DATA_UINT64 }, 14111 { "ip_dce_reclaim_calls", KSTAT_DATA_UINT64 }, 14112 { "ip_dce_reclaim_deleted", KSTAT_DATA_UINT64 }, 14113 { "ip_tcp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 }, 14114 { "ip_tcp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 }, 14115 { "ip_tcp_in_sw_cksum_err", KSTAT_DATA_UINT64 }, 14116 { "ip_udp_in_full_hw_cksum_err", KSTAT_DATA_UINT64 }, 14117 { "ip_udp_in_part_hw_cksum_err", KSTAT_DATA_UINT64 }, 14118 { "ip_udp_in_sw_cksum_err", KSTAT_DATA_UINT64 }, 14119 { "conn_in_recvdstaddr", KSTAT_DATA_UINT64 }, 14120 { "conn_in_recvopts", KSTAT_DATA_UINT64 }, 14121 { "conn_in_recvif", KSTAT_DATA_UINT64 }, 14122 { "conn_in_recvslla", KSTAT_DATA_UINT64 }, 14123 { "conn_in_recvucred", KSTAT_DATA_UINT64 }, 14124 { "conn_in_recvttl", KSTAT_DATA_UINT64 }, 14125 { "conn_in_recvhopopts", KSTAT_DATA_UINT64 }, 14126 { "conn_in_recvhoplimit", KSTAT_DATA_UINT64 }, 14127 { "conn_in_recvdstopts", KSTAT_DATA_UINT64 }, 14128 { "conn_in_recvrthdrdstopts", KSTAT_DATA_UINT64 }, 14129 { "conn_in_recvrthdr", KSTAT_DATA_UINT64 }, 14130 { "conn_in_recvpktinfo", KSTAT_DATA_UINT64 }, 14131 { "conn_in_recvtclass", KSTAT_DATA_UINT64 }, 14132 { "conn_in_timestamp", KSTAT_DATA_UINT64 }, 14133 }; 14134 14135 ksp = kstat_create_netstack("ip", 0, "ipstat", "net", 14136 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 14137 KSTAT_FLAG_VIRTUAL, stackid); 14138 14139 if (ksp == NULL) 14140 return (NULL); 14141 14142 bcopy(&template, ip_statisticsp, sizeof (template)); 14143 ksp->ks_data = (void *)ip_statisticsp; 14144 ksp->ks_private = (void *)(uintptr_t)stackid; 14145 14146 kstat_install(ksp); 14147 return (ksp); 14148 } 14149 14150 static void 14151 ip_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 14152 { 14153 if (ksp != NULL) { 14154 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 14155 kstat_delete_netstack(ksp, stackid); 14156 } 14157 } 14158 14159 static void * 14160 ip_kstat_init(netstackid_t stackid, ip_stack_t *ipst) 14161 { 14162 kstat_t *ksp; 14163 14164 ip_named_kstat_t template = { 14165 { "forwarding", KSTAT_DATA_UINT32, 0 }, 14166 { "defaultTTL", KSTAT_DATA_UINT32, 0 }, 14167 { "inReceives", KSTAT_DATA_UINT64, 0 }, 14168 { "inHdrErrors", KSTAT_DATA_UINT32, 0 }, 14169 { "inAddrErrors", KSTAT_DATA_UINT32, 0 }, 14170 { "forwDatagrams", KSTAT_DATA_UINT64, 0 }, 14171 { "inUnknownProtos", KSTAT_DATA_UINT32, 0 }, 14172 { "inDiscards", KSTAT_DATA_UINT32, 0 }, 14173 { "inDelivers", KSTAT_DATA_UINT64, 0 }, 14174 { "outRequests", KSTAT_DATA_UINT64, 0 }, 14175 { "outDiscards", KSTAT_DATA_UINT32, 0 }, 14176 { "outNoRoutes", KSTAT_DATA_UINT32, 0 }, 14177 { "reasmTimeout", KSTAT_DATA_UINT32, 0 }, 14178 { "reasmReqds", KSTAT_DATA_UINT32, 0 }, 14179 { "reasmOKs", KSTAT_DATA_UINT32, 0 }, 14180 { "reasmFails", KSTAT_DATA_UINT32, 0 }, 14181 { "fragOKs", KSTAT_DATA_UINT32, 0 }, 14182 { "fragFails", KSTAT_DATA_UINT32, 0 }, 14183 { "fragCreates", KSTAT_DATA_UINT32, 0 }, 14184 { "addrEntrySize", KSTAT_DATA_INT32, 0 }, 14185 { "routeEntrySize", KSTAT_DATA_INT32, 0 }, 14186 { "netToMediaEntrySize", KSTAT_DATA_INT32, 0 }, 14187 { "routingDiscards", KSTAT_DATA_UINT32, 0 }, 14188 { "inErrs", KSTAT_DATA_UINT32, 0 }, 14189 { "noPorts", KSTAT_DATA_UINT32, 0 }, 14190 { "inCksumErrs", KSTAT_DATA_UINT32, 0 }, 14191 { "reasmDuplicates", KSTAT_DATA_UINT32, 0 }, 14192 { "reasmPartDups", KSTAT_DATA_UINT32, 0 }, 14193 { "forwProhibits", KSTAT_DATA_UINT32, 0 }, 14194 { "udpInCksumErrs", KSTAT_DATA_UINT32, 0 }, 14195 { "udpInOverflows", KSTAT_DATA_UINT32, 0 }, 14196 { "rawipInOverflows", KSTAT_DATA_UINT32, 0 }, 14197 { "ipsecInSucceeded", KSTAT_DATA_UINT32, 0 }, 14198 { "ipsecInFailed", KSTAT_DATA_INT32, 0 }, 14199 { "memberEntrySize", KSTAT_DATA_INT32, 0 }, 14200 { "inIPv6", KSTAT_DATA_UINT32, 0 }, 14201 { "outIPv6", KSTAT_DATA_UINT32, 0 }, 14202 { "outSwitchIPv6", KSTAT_DATA_UINT32, 0 }, 14203 }; 14204 14205 ksp = kstat_create_netstack("ip", 0, "ip", "mib2", KSTAT_TYPE_NAMED, 14206 NUM_OF_FIELDS(ip_named_kstat_t), 0, stackid); 14207 if (ksp == NULL || ksp->ks_data == NULL) 14208 return (NULL); 14209 14210 template.forwarding.value.ui32 = WE_ARE_FORWARDING(ipst) ? 1:2; 14211 template.defaultTTL.value.ui32 = (uint32_t)ipst->ips_ip_def_ttl; 14212 template.reasmTimeout.value.ui32 = ipst->ips_ip_g_frag_timeout; 14213 template.addrEntrySize.value.i32 = sizeof (mib2_ipAddrEntry_t); 14214 template.routeEntrySize.value.i32 = sizeof (mib2_ipRouteEntry_t); 14215 14216 template.netToMediaEntrySize.value.i32 = 14217 sizeof (mib2_ipNetToMediaEntry_t); 14218 14219 template.memberEntrySize.value.i32 = sizeof (ipv6_member_t); 14220 14221 bcopy(&template, ksp->ks_data, sizeof (template)); 14222 ksp->ks_update = ip_kstat_update; 14223 ksp->ks_private = (void *)(uintptr_t)stackid; 14224 14225 kstat_install(ksp); 14226 return (ksp); 14227 } 14228 14229 static void 14230 ip_kstat_fini(netstackid_t stackid, kstat_t *ksp) 14231 { 14232 if (ksp != NULL) { 14233 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 14234 kstat_delete_netstack(ksp, stackid); 14235 } 14236 } 14237 14238 static int 14239 ip_kstat_update(kstat_t *kp, int rw) 14240 { 14241 ip_named_kstat_t *ipkp; 14242 mib2_ipIfStatsEntry_t ipmib; 14243 ill_walk_context_t ctx; 14244 ill_t *ill; 14245 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private; 14246 netstack_t *ns; 14247 ip_stack_t *ipst; 14248 14249 if (kp == NULL || kp->ks_data == NULL) 14250 return (EIO); 14251 14252 if (rw == KSTAT_WRITE) 14253 return (EACCES); 14254 14255 ns = netstack_find_by_stackid(stackid); 14256 if (ns == NULL) 14257 return (-1); 14258 ipst = ns->netstack_ip; 14259 if (ipst == NULL) { 14260 netstack_rele(ns); 14261 return (-1); 14262 } 14263 ipkp = (ip_named_kstat_t *)kp->ks_data; 14264 14265 bcopy(&ipst->ips_ip_mib, &ipmib, sizeof (ipmib)); 14266 rw_enter(&ipst->ips_ill_g_lock, RW_READER); 14267 ill = ILL_START_WALK_V4(&ctx, ipst); 14268 for (; ill != NULL; ill = ill_next(&ctx, ill)) 14269 ip_mib2_add_ip_stats(&ipmib, ill->ill_ip_mib); 14270 rw_exit(&ipst->ips_ill_g_lock); 14271 14272 ipkp->forwarding.value.ui32 = ipmib.ipIfStatsForwarding; 14273 ipkp->defaultTTL.value.ui32 = ipmib.ipIfStatsDefaultTTL; 14274 ipkp->inReceives.value.ui64 = ipmib.ipIfStatsHCInReceives; 14275 ipkp->inHdrErrors.value.ui32 = ipmib.ipIfStatsInHdrErrors; 14276 ipkp->inAddrErrors.value.ui32 = ipmib.ipIfStatsInAddrErrors; 14277 ipkp->forwDatagrams.value.ui64 = ipmib.ipIfStatsHCOutForwDatagrams; 14278 ipkp->inUnknownProtos.value.ui32 = ipmib.ipIfStatsInUnknownProtos; 14279 ipkp->inDiscards.value.ui32 = ipmib.ipIfStatsInDiscards; 14280 ipkp->inDelivers.value.ui64 = ipmib.ipIfStatsHCInDelivers; 14281 ipkp->outRequests.value.ui64 = ipmib.ipIfStatsHCOutRequests; 14282 ipkp->outDiscards.value.ui32 = ipmib.ipIfStatsOutDiscards; 14283 ipkp->outNoRoutes.value.ui32 = ipmib.ipIfStatsOutNoRoutes; 14284 ipkp->reasmTimeout.value.ui32 = ipst->ips_ip_g_frag_timeout; 14285 ipkp->reasmReqds.value.ui32 = ipmib.ipIfStatsReasmReqds; 14286 ipkp->reasmOKs.value.ui32 = ipmib.ipIfStatsReasmOKs; 14287 ipkp->reasmFails.value.ui32 = ipmib.ipIfStatsReasmFails; 14288 ipkp->fragOKs.value.ui32 = ipmib.ipIfStatsOutFragOKs; 14289 ipkp->fragFails.value.ui32 = ipmib.ipIfStatsOutFragFails; 14290 ipkp->fragCreates.value.ui32 = ipmib.ipIfStatsOutFragCreates; 14291 14292 ipkp->routingDiscards.value.ui32 = 0; 14293 ipkp->inErrs.value.ui32 = ipmib.tcpIfStatsInErrs; 14294 ipkp->noPorts.value.ui32 = ipmib.udpIfStatsNoPorts; 14295 ipkp->inCksumErrs.value.ui32 = ipmib.ipIfStatsInCksumErrs; 14296 ipkp->reasmDuplicates.value.ui32 = ipmib.ipIfStatsReasmDuplicates; 14297 ipkp->reasmPartDups.value.ui32 = ipmib.ipIfStatsReasmPartDups; 14298 ipkp->forwProhibits.value.ui32 = ipmib.ipIfStatsForwProhibits; 14299 ipkp->udpInCksumErrs.value.ui32 = ipmib.udpIfStatsInCksumErrs; 14300 ipkp->udpInOverflows.value.ui32 = ipmib.udpIfStatsInOverflows; 14301 ipkp->rawipInOverflows.value.ui32 = ipmib.rawipIfStatsInOverflows; 14302 ipkp->ipsecInSucceeded.value.ui32 = ipmib.ipsecIfStatsInSucceeded; 14303 ipkp->ipsecInFailed.value.i32 = ipmib.ipsecIfStatsInFailed; 14304 14305 ipkp->inIPv6.value.ui32 = ipmib.ipIfStatsInWrongIPVersion; 14306 ipkp->outIPv6.value.ui32 = ipmib.ipIfStatsOutWrongIPVersion; 14307 ipkp->outSwitchIPv6.value.ui32 = ipmib.ipIfStatsOutSwitchIPVersion; 14308 14309 netstack_rele(ns); 14310 14311 return (0); 14312 } 14313 14314 static void * 14315 icmp_kstat_init(netstackid_t stackid) 14316 { 14317 kstat_t *ksp; 14318 14319 icmp_named_kstat_t template = { 14320 { "inMsgs", KSTAT_DATA_UINT32 }, 14321 { "inErrors", KSTAT_DATA_UINT32 }, 14322 { "inDestUnreachs", KSTAT_DATA_UINT32 }, 14323 { "inTimeExcds", KSTAT_DATA_UINT32 }, 14324 { "inParmProbs", KSTAT_DATA_UINT32 }, 14325 { "inSrcQuenchs", KSTAT_DATA_UINT32 }, 14326 { "inRedirects", KSTAT_DATA_UINT32 }, 14327 { "inEchos", KSTAT_DATA_UINT32 }, 14328 { "inEchoReps", KSTAT_DATA_UINT32 }, 14329 { "inTimestamps", KSTAT_DATA_UINT32 }, 14330 { "inTimestampReps", KSTAT_DATA_UINT32 }, 14331 { "inAddrMasks", KSTAT_DATA_UINT32 }, 14332 { "inAddrMaskReps", KSTAT_DATA_UINT32 }, 14333 { "outMsgs", KSTAT_DATA_UINT32 }, 14334 { "outErrors", KSTAT_DATA_UINT32 }, 14335 { "outDestUnreachs", KSTAT_DATA_UINT32 }, 14336 { "outTimeExcds", KSTAT_DATA_UINT32 }, 14337 { "outParmProbs", KSTAT_DATA_UINT32 }, 14338 { "outSrcQuenchs", KSTAT_DATA_UINT32 }, 14339 { "outRedirects", KSTAT_DATA_UINT32 }, 14340 { "outEchos", KSTAT_DATA_UINT32 }, 14341 { "outEchoReps", KSTAT_DATA_UINT32 }, 14342 { "outTimestamps", KSTAT_DATA_UINT32 }, 14343 { "outTimestampReps", KSTAT_DATA_UINT32 }, 14344 { "outAddrMasks", KSTAT_DATA_UINT32 }, 14345 { "outAddrMaskReps", KSTAT_DATA_UINT32 }, 14346 { "inChksumErrs", KSTAT_DATA_UINT32 }, 14347 { "inUnknowns", KSTAT_DATA_UINT32 }, 14348 { "inFragNeeded", KSTAT_DATA_UINT32 }, 14349 { "outFragNeeded", KSTAT_DATA_UINT32 }, 14350 { "outDrops", KSTAT_DATA_UINT32 }, 14351 { "inOverFlows", KSTAT_DATA_UINT32 }, 14352 { "inBadRedirects", KSTAT_DATA_UINT32 }, 14353 }; 14354 14355 ksp = kstat_create_netstack("ip", 0, "icmp", "mib2", KSTAT_TYPE_NAMED, 14356 NUM_OF_FIELDS(icmp_named_kstat_t), 0, stackid); 14357 if (ksp == NULL || ksp->ks_data == NULL) 14358 return (NULL); 14359 14360 bcopy(&template, ksp->ks_data, sizeof (template)); 14361 14362 ksp->ks_update = icmp_kstat_update; 14363 ksp->ks_private = (void *)(uintptr_t)stackid; 14364 14365 kstat_install(ksp); 14366 return (ksp); 14367 } 14368 14369 static void 14370 icmp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 14371 { 14372 if (ksp != NULL) { 14373 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 14374 kstat_delete_netstack(ksp, stackid); 14375 } 14376 } 14377 14378 static int 14379 icmp_kstat_update(kstat_t *kp, int rw) 14380 { 14381 icmp_named_kstat_t *icmpkp; 14382 netstackid_t stackid = (zoneid_t)(uintptr_t)kp->ks_private; 14383 netstack_t *ns; 14384 ip_stack_t *ipst; 14385 14386 if ((kp == NULL) || (kp->ks_data == NULL)) 14387 return (EIO); 14388 14389 if (rw == KSTAT_WRITE) 14390 return (EACCES); 14391 14392 ns = netstack_find_by_stackid(stackid); 14393 if (ns == NULL) 14394 return (-1); 14395 ipst = ns->netstack_ip; 14396 if (ipst == NULL) { 14397 netstack_rele(ns); 14398 return (-1); 14399 } 14400 icmpkp = (icmp_named_kstat_t *)kp->ks_data; 14401 14402 icmpkp->inMsgs.value.ui32 = ipst->ips_icmp_mib.icmpInMsgs; 14403 icmpkp->inErrors.value.ui32 = ipst->ips_icmp_mib.icmpInErrors; 14404 icmpkp->inDestUnreachs.value.ui32 = 14405 ipst->ips_icmp_mib.icmpInDestUnreachs; 14406 icmpkp->inTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpInTimeExcds; 14407 icmpkp->inParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpInParmProbs; 14408 icmpkp->inSrcQuenchs.value.ui32 = ipst->ips_icmp_mib.icmpInSrcQuenchs; 14409 icmpkp->inRedirects.value.ui32 = ipst->ips_icmp_mib.icmpInRedirects; 14410 icmpkp->inEchos.value.ui32 = ipst->ips_icmp_mib.icmpInEchos; 14411 icmpkp->inEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpInEchoReps; 14412 icmpkp->inTimestamps.value.ui32 = ipst->ips_icmp_mib.icmpInTimestamps; 14413 icmpkp->inTimestampReps.value.ui32 = 14414 ipst->ips_icmp_mib.icmpInTimestampReps; 14415 icmpkp->inAddrMasks.value.ui32 = ipst->ips_icmp_mib.icmpInAddrMasks; 14416 icmpkp->inAddrMaskReps.value.ui32 = 14417 ipst->ips_icmp_mib.icmpInAddrMaskReps; 14418 icmpkp->outMsgs.value.ui32 = ipst->ips_icmp_mib.icmpOutMsgs; 14419 icmpkp->outErrors.value.ui32 = ipst->ips_icmp_mib.icmpOutErrors; 14420 icmpkp->outDestUnreachs.value.ui32 = 14421 ipst->ips_icmp_mib.icmpOutDestUnreachs; 14422 icmpkp->outTimeExcds.value.ui32 = ipst->ips_icmp_mib.icmpOutTimeExcds; 14423 icmpkp->outParmProbs.value.ui32 = ipst->ips_icmp_mib.icmpOutParmProbs; 14424 icmpkp->outSrcQuenchs.value.ui32 = 14425 ipst->ips_icmp_mib.icmpOutSrcQuenchs; 14426 icmpkp->outRedirects.value.ui32 = ipst->ips_icmp_mib.icmpOutRedirects; 14427 icmpkp->outEchos.value.ui32 = ipst->ips_icmp_mib.icmpOutEchos; 14428 icmpkp->outEchoReps.value.ui32 = ipst->ips_icmp_mib.icmpOutEchoReps; 14429 icmpkp->outTimestamps.value.ui32 = 14430 ipst->ips_icmp_mib.icmpOutTimestamps; 14431 icmpkp->outTimestampReps.value.ui32 = 14432 ipst->ips_icmp_mib.icmpOutTimestampReps; 14433 icmpkp->outAddrMasks.value.ui32 = 14434 ipst->ips_icmp_mib.icmpOutAddrMasks; 14435 icmpkp->outAddrMaskReps.value.ui32 = 14436 ipst->ips_icmp_mib.icmpOutAddrMaskReps; 14437 icmpkp->inCksumErrs.value.ui32 = ipst->ips_icmp_mib.icmpInCksumErrs; 14438 icmpkp->inUnknowns.value.ui32 = ipst->ips_icmp_mib.icmpInUnknowns; 14439 icmpkp->inFragNeeded.value.ui32 = ipst->ips_icmp_mib.icmpInFragNeeded; 14440 icmpkp->outFragNeeded.value.ui32 = 14441 ipst->ips_icmp_mib.icmpOutFragNeeded; 14442 icmpkp->outDrops.value.ui32 = ipst->ips_icmp_mib.icmpOutDrops; 14443 icmpkp->inOverflows.value.ui32 = ipst->ips_icmp_mib.icmpInOverflows; 14444 icmpkp->inBadRedirects.value.ui32 = 14445 ipst->ips_icmp_mib.icmpInBadRedirects; 14446 14447 netstack_rele(ns); 14448 return (0); 14449 } 14450 14451 /* 14452 * This is the fanout function for raw socket opened for SCTP. Note 14453 * that it is called after SCTP checks that there is no socket which 14454 * wants a packet. Then before SCTP handles this out of the blue packet, 14455 * this function is called to see if there is any raw socket for SCTP. 14456 * If there is and it is bound to the correct address, the packet will 14457 * be sent to that socket. Note that only one raw socket can be bound to 14458 * a port. This is assured in ipcl_sctp_hash_insert(); 14459 */ 14460 void 14461 ip_fanout_sctp_raw(mblk_t *mp, ipha_t *ipha, ip6_t *ip6h, uint32_t ports, 14462 ip_recv_attr_t *ira) 14463 { 14464 conn_t *connp; 14465 queue_t *rq; 14466 boolean_t secure; 14467 ill_t *ill = ira->ira_ill; 14468 ip_stack_t *ipst = ill->ill_ipst; 14469 ipsec_stack_t *ipss = ipst->ips_netstack->netstack_ipsec; 14470 sctp_stack_t *sctps = ipst->ips_netstack->netstack_sctp; 14471 iaflags_t iraflags = ira->ira_flags; 14472 ill_t *rill = ira->ira_rill; 14473 14474 secure = iraflags & IRAF_IPSEC_SECURE; 14475 14476 connp = ipcl_classify_raw(mp, IPPROTO_SCTP, ports, ipha, ip6h, 14477 ira, ipst); 14478 if (connp == NULL) { 14479 /* 14480 * Although raw sctp is not summed, OOB chunks must be. 14481 * Drop the packet here if the sctp checksum failed. 14482 */ 14483 if (iraflags & IRAF_SCTP_CSUM_ERR) { 14484 BUMP_MIB(&sctps->sctps_mib, sctpChecksumError); 14485 freemsg(mp); 14486 return; 14487 } 14488 ira->ira_ill = ira->ira_rill = NULL; 14489 sctp_ootb_input(mp, ira, ipst); 14490 ira->ira_ill = ill; 14491 ira->ira_rill = rill; 14492 return; 14493 } 14494 rq = connp->conn_rq; 14495 if (IPCL_IS_NONSTR(connp) ? connp->conn_flow_cntrld : !canputnext(rq)) { 14496 CONN_DEC_REF(connp); 14497 BUMP_MIB(ill->ill_ip_mib, rawipIfStatsInOverflows); 14498 freemsg(mp); 14499 return; 14500 } 14501 if (((iraflags & IRAF_IS_IPV4) ? 14502 CONN_INBOUND_POLICY_PRESENT(connp, ipss) : 14503 CONN_INBOUND_POLICY_PRESENT_V6(connp, ipss)) || 14504 secure) { 14505 mp = ipsec_check_inbound_policy(mp, connp, ipha, 14506 ip6h, ira); 14507 if (mp == NULL) { 14508 BUMP_MIB(ill->ill_ip_mib, ipIfStatsInDiscards); 14509 /* Note that mp is NULL */ 14510 ip_drop_input("ipIfStatsInDiscards", mp, ill); 14511 CONN_DEC_REF(connp); 14512 return; 14513 } 14514 } 14515 14516 if (iraflags & IRAF_ICMP_ERROR) { 14517 (connp->conn_recvicmp)(connp, mp, NULL, ira); 14518 } else { 14519 ill_t *rill = ira->ira_rill; 14520 14521 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCInDelivers); 14522 /* This is the SOCK_RAW, IPPROTO_SCTP case. */ 14523 ira->ira_ill = ira->ira_rill = NULL; 14524 (connp->conn_recv)(connp, mp, NULL, ira); 14525 ira->ira_ill = ill; 14526 ira->ira_rill = rill; 14527 } 14528 CONN_DEC_REF(connp); 14529 } 14530 14531 /* 14532 * Free a packet that has the link-layer dl_unitdata_req_t or fast-path 14533 * header before the ip payload. 14534 */ 14535 static void 14536 ip_xmit_flowctl_drop(ill_t *ill, mblk_t *mp, boolean_t is_fp_mp, int fp_mp_len) 14537 { 14538 int len = (mp->b_wptr - mp->b_rptr); 14539 mblk_t *ip_mp; 14540 14541 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 14542 if (is_fp_mp || len != fp_mp_len) { 14543 if (len > fp_mp_len) { 14544 /* 14545 * fastpath header and ip header in the first mblk 14546 */ 14547 mp->b_rptr += fp_mp_len; 14548 } else { 14549 /* 14550 * ip_xmit_attach_llhdr had to prepend an mblk to 14551 * attach the fastpath header before ip header. 14552 */ 14553 ip_mp = mp->b_cont; 14554 freeb(mp); 14555 mp = ip_mp; 14556 mp->b_rptr += (fp_mp_len - len); 14557 } 14558 } else { 14559 ip_mp = mp->b_cont; 14560 freeb(mp); 14561 mp = ip_mp; 14562 } 14563 ip_drop_output("ipIfStatsOutDiscards - flow ctl", mp, ill); 14564 freemsg(mp); 14565 } 14566 14567 /* 14568 * Normal post fragmentation function. 14569 * 14570 * Send a packet using the passed in nce. This handles both IPv4 and IPv6 14571 * using the same state machine. 14572 * 14573 * We return an error on failure. In particular we return EWOULDBLOCK 14574 * when the driver flow controls. In that case this ensures that ip_wsrv runs 14575 * (currently by canputnext failure resulting in backenabling from GLD.) 14576 * This allows the callers of conn_ip_output() to use EWOULDBLOCK as an 14577 * indication that they can flow control until ip_wsrv() tells then to restart. 14578 * 14579 * If the nce passed by caller is incomplete, this function 14580 * queues the packet and if necessary, sends ARP request and bails. 14581 * If the Neighbor Cache passed is fully resolved, we simply prepend 14582 * the link-layer header to the packet, do ipsec hw acceleration 14583 * work if necessary, and send the packet out on the wire. 14584 */ 14585 /* ARGSUSED6 */ 14586 int 14587 ip_xmit(mblk_t *mp, nce_t *nce, iaflags_t ixaflags, uint_t pkt_len, 14588 uint32_t xmit_hint, zoneid_t szone, zoneid_t nolzid, uintptr_t *ixacookie) 14589 { 14590 queue_t *wq; 14591 ill_t *ill = nce->nce_ill; 14592 ip_stack_t *ipst = ill->ill_ipst; 14593 uint64_t delta; 14594 boolean_t isv6 = ill->ill_isv6; 14595 boolean_t fp_mp; 14596 ncec_t *ncec = nce->nce_common; 14597 int64_t now = LBOLT_FASTPATH64; 14598 14599 DTRACE_PROBE1(ip__xmit, nce_t *, nce); 14600 14601 ASSERT(mp != NULL); 14602 ASSERT(mp->b_datap->db_type == M_DATA); 14603 ASSERT(pkt_len == msgdsize(mp)); 14604 14605 /* 14606 * If we have already been here and are coming back after ARP/ND. 14607 * the IXAF_NO_TRACE flag is set. We skip FW_HOOKS, DTRACE and ipobs 14608 * in that case since they have seen the packet when it came here 14609 * the first time. 14610 */ 14611 if (ixaflags & IXAF_NO_TRACE) 14612 goto sendit; 14613 14614 if (ixaflags & IXAF_IS_IPV4) { 14615 ipha_t *ipha = (ipha_t *)mp->b_rptr; 14616 14617 ASSERT(!isv6); 14618 ASSERT(pkt_len == ntohs(((ipha_t *)mp->b_rptr)->ipha_length)); 14619 if (HOOKS4_INTERESTED_PHYSICAL_OUT(ipst) && 14620 !(ixaflags & IXAF_NO_PFHOOK)) { 14621 int error; 14622 14623 FW_HOOKS(ipst->ips_ip4_physical_out_event, 14624 ipst->ips_ipv4firewall_physical_out, 14625 NULL, ill, ipha, mp, mp, 0, ipst, error); 14626 DTRACE_PROBE1(ip4__physical__out__end, 14627 mblk_t *, mp); 14628 if (mp == NULL) 14629 return (error); 14630 14631 /* The length could have changed */ 14632 pkt_len = msgdsize(mp); 14633 } 14634 if (ipst->ips_ip4_observe.he_interested) { 14635 /* 14636 * Note that for TX the zoneid is the sending 14637 * zone, whether or not MLP is in play. 14638 * Since the szone argument is the IP zoneid (i.e., 14639 * zero for exclusive-IP zones) and ipobs wants 14640 * the system zoneid, we map it here. 14641 */ 14642 szone = IP_REAL_ZONEID(szone, ipst); 14643 14644 /* 14645 * On the outbound path the destination zone will be 14646 * unknown as we're sending this packet out on the 14647 * wire. 14648 */ 14649 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES, 14650 ill, ipst); 14651 } 14652 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL, 14653 void_ip_t *, ipha, __dtrace_ipsr_ill_t *, ill, 14654 ipha_t *, ipha, ip6_t *, NULL, int, 0); 14655 } else { 14656 ip6_t *ip6h = (ip6_t *)mp->b_rptr; 14657 14658 ASSERT(isv6); 14659 ASSERT(pkt_len == 14660 ntohs(((ip6_t *)mp->b_rptr)->ip6_plen) + IPV6_HDR_LEN); 14661 if (HOOKS6_INTERESTED_PHYSICAL_OUT(ipst) && 14662 !(ixaflags & IXAF_NO_PFHOOK)) { 14663 int error; 14664 14665 FW_HOOKS6(ipst->ips_ip6_physical_out_event, 14666 ipst->ips_ipv6firewall_physical_out, 14667 NULL, ill, ip6h, mp, mp, 0, ipst, error); 14668 DTRACE_PROBE1(ip6__physical__out__end, 14669 mblk_t *, mp); 14670 if (mp == NULL) 14671 return (error); 14672 14673 /* The length could have changed */ 14674 pkt_len = msgdsize(mp); 14675 } 14676 if (ipst->ips_ip6_observe.he_interested) { 14677 /* See above */ 14678 szone = IP_REAL_ZONEID(szone, ipst); 14679 14680 ipobs_hook(mp, IPOBS_HOOK_OUTBOUND, szone, ALL_ZONES, 14681 ill, ipst); 14682 } 14683 DTRACE_IP7(send, mblk_t *, mp, conn_t *, NULL, 14684 void_ip_t *, ip6h, __dtrace_ipsr_ill_t *, ill, 14685 ipha_t *, NULL, ip6_t *, ip6h, int, 0); 14686 } 14687 14688 sendit: 14689 /* 14690 * We check the state without a lock because the state can never 14691 * move "backwards" to initial or incomplete. 14692 */ 14693 switch (ncec->ncec_state) { 14694 case ND_REACHABLE: 14695 case ND_STALE: 14696 case ND_DELAY: 14697 case ND_PROBE: 14698 mp = ip_xmit_attach_llhdr(mp, nce); 14699 if (mp == NULL) { 14700 /* 14701 * ip_xmit_attach_llhdr has increased 14702 * ipIfStatsOutDiscards and called ip_drop_output() 14703 */ 14704 return (ENOBUFS); 14705 } 14706 /* 14707 * check if nce_fastpath completed and we tagged on a 14708 * copy of nce_fp_mp in ip_xmit_attach_llhdr(). 14709 */ 14710 fp_mp = (mp->b_datap->db_type == M_DATA); 14711 14712 if (fp_mp && 14713 (ill->ill_capabilities & ILL_CAPAB_DLD_DIRECT)) { 14714 ill_dld_direct_t *idd; 14715 14716 idd = &ill->ill_dld_capab->idc_direct; 14717 /* 14718 * Send the packet directly to DLD, where it 14719 * may be queued depending on the availability 14720 * of transmit resources at the media layer. 14721 * Return value should be taken into 14722 * account and flow control the TCP. 14723 */ 14724 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 14725 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 14726 pkt_len); 14727 14728 if (ixaflags & IXAF_NO_DEV_FLOW_CTL) { 14729 (void) idd->idd_tx_df(idd->idd_tx_dh, mp, 14730 (uintptr_t)xmit_hint, IP_DROP_ON_NO_DESC); 14731 } else { 14732 uintptr_t cookie; 14733 14734 if ((cookie = idd->idd_tx_df(idd->idd_tx_dh, 14735 mp, (uintptr_t)xmit_hint, 0)) != 0) { 14736 if (ixacookie != NULL) 14737 *ixacookie = cookie; 14738 return (EWOULDBLOCK); 14739 } 14740 } 14741 } else { 14742 wq = ill->ill_wq; 14743 14744 if (!(ixaflags & IXAF_NO_DEV_FLOW_CTL) && 14745 !canputnext(wq)) { 14746 if (ixacookie != NULL) 14747 *ixacookie = 0; 14748 ip_xmit_flowctl_drop(ill, mp, fp_mp, 14749 nce->nce_fp_mp != NULL ? 14750 MBLKL(nce->nce_fp_mp) : 0); 14751 return (EWOULDBLOCK); 14752 } 14753 BUMP_MIB(ill->ill_ip_mib, ipIfStatsHCOutTransmits); 14754 UPDATE_MIB(ill->ill_ip_mib, ipIfStatsHCOutOctets, 14755 pkt_len); 14756 putnext(wq, mp); 14757 } 14758 14759 /* 14760 * The rest of this function implements Neighbor Unreachability 14761 * detection. Determine if the ncec is eligible for NUD. 14762 */ 14763 if (ncec->ncec_flags & NCE_F_NONUD) 14764 return (0); 14765 14766 ASSERT(ncec->ncec_state != ND_INCOMPLETE); 14767 14768 /* 14769 * Check for upper layer advice 14770 */ 14771 if (ixaflags & IXAF_REACH_CONF) { 14772 timeout_id_t tid; 14773 14774 /* 14775 * It should be o.k. to check the state without 14776 * a lock here, at most we lose an advice. 14777 */ 14778 ncec->ncec_last = TICK_TO_MSEC(now); 14779 if (ncec->ncec_state != ND_REACHABLE) { 14780 mutex_enter(&ncec->ncec_lock); 14781 ncec->ncec_state = ND_REACHABLE; 14782 tid = ncec->ncec_timeout_id; 14783 ncec->ncec_timeout_id = 0; 14784 mutex_exit(&ncec->ncec_lock); 14785 (void) untimeout(tid); 14786 if (ip_debug > 2) { 14787 /* ip1dbg */ 14788 pr_addr_dbg("ip_xmit: state" 14789 " for %s changed to" 14790 " REACHABLE\n", AF_INET6, 14791 &ncec->ncec_addr); 14792 } 14793 } 14794 return (0); 14795 } 14796 14797 delta = TICK_TO_MSEC(now) - ncec->ncec_last; 14798 ip1dbg(("ip_xmit: delta = %" PRId64 14799 " ill_reachable_time = %d \n", delta, 14800 ill->ill_reachable_time)); 14801 if (delta > (uint64_t)ill->ill_reachable_time) { 14802 mutex_enter(&ncec->ncec_lock); 14803 switch (ncec->ncec_state) { 14804 case ND_REACHABLE: 14805 ASSERT((ncec->ncec_flags & NCE_F_NONUD) == 0); 14806 /* FALLTHROUGH */ 14807 case ND_STALE: 14808 /* 14809 * ND_REACHABLE is identical to 14810 * ND_STALE in this specific case. If 14811 * reachable time has expired for this 14812 * neighbor (delta is greater than 14813 * reachable time), conceptually, the 14814 * neighbor cache is no longer in 14815 * REACHABLE state, but already in 14816 * STALE state. So the correct 14817 * transition here is to ND_DELAY. 14818 */ 14819 ncec->ncec_state = ND_DELAY; 14820 mutex_exit(&ncec->ncec_lock); 14821 nce_restart_timer(ncec, 14822 ipst->ips_delay_first_probe_time); 14823 if (ip_debug > 3) { 14824 /* ip2dbg */ 14825 pr_addr_dbg("ip_xmit: state" 14826 " for %s changed to" 14827 " DELAY\n", AF_INET6, 14828 &ncec->ncec_addr); 14829 } 14830 break; 14831 case ND_DELAY: 14832 case ND_PROBE: 14833 mutex_exit(&ncec->ncec_lock); 14834 /* Timers have already started */ 14835 break; 14836 case ND_UNREACHABLE: 14837 /* 14838 * nce_timer has detected that this ncec 14839 * is unreachable and initiated deleting 14840 * this ncec. 14841 * This is a harmless race where we found the 14842 * ncec before it was deleted and have 14843 * just sent out a packet using this 14844 * unreachable ncec. 14845 */ 14846 mutex_exit(&ncec->ncec_lock); 14847 break; 14848 default: 14849 ASSERT(0); 14850 mutex_exit(&ncec->ncec_lock); 14851 } 14852 } 14853 return (0); 14854 14855 case ND_INCOMPLETE: 14856 /* 14857 * the state could have changed since we didn't hold the lock. 14858 * Re-verify state under lock. 14859 */ 14860 mutex_enter(&ncec->ncec_lock); 14861 if (NCE_ISREACHABLE(ncec)) { 14862 mutex_exit(&ncec->ncec_lock); 14863 goto sendit; 14864 } 14865 /* queue the packet */ 14866 nce_queue_mp(ncec, mp, ipmp_packet_is_probe(mp, nce->nce_ill)); 14867 mutex_exit(&ncec->ncec_lock); 14868 DTRACE_PROBE2(ip__xmit__incomplete, 14869 (ncec_t *), ncec, (mblk_t *), mp); 14870 return (0); 14871 14872 case ND_INITIAL: 14873 /* 14874 * State could have changed since we didn't hold the lock, so 14875 * re-verify state. 14876 */ 14877 mutex_enter(&ncec->ncec_lock); 14878 if (NCE_ISREACHABLE(ncec)) { 14879 mutex_exit(&ncec->ncec_lock); 14880 goto sendit; 14881 } 14882 nce_queue_mp(ncec, mp, ipmp_packet_is_probe(mp, nce->nce_ill)); 14883 if (ncec->ncec_state == ND_INITIAL) { 14884 ncec->ncec_state = ND_INCOMPLETE; 14885 mutex_exit(&ncec->ncec_lock); 14886 /* 14887 * figure out the source we want to use 14888 * and resolve it. 14889 */ 14890 ip_ndp_resolve(ncec); 14891 } else { 14892 mutex_exit(&ncec->ncec_lock); 14893 } 14894 return (0); 14895 14896 case ND_UNREACHABLE: 14897 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 14898 ip_drop_output("ipIfStatsOutDiscards - ND_UNREACHABLE", 14899 mp, ill); 14900 freemsg(mp); 14901 return (0); 14902 14903 default: 14904 ASSERT(0); 14905 BUMP_MIB(ill->ill_ip_mib, ipIfStatsOutDiscards); 14906 ip_drop_output("ipIfStatsOutDiscards - ND_other", 14907 mp, ill); 14908 freemsg(mp); 14909 return (ENETUNREACH); 14910 } 14911 } 14912 14913 /* 14914 * Return B_TRUE if the buffers differ in length or content. 14915 * This is used for comparing extension header buffers. 14916 * Note that an extension header would be declared different 14917 * even if all that changed was the next header value in that header i.e. 14918 * what really changed is the next extension header. 14919 */ 14920 boolean_t 14921 ip_cmpbuf(const void *abuf, uint_t alen, boolean_t b_valid, const void *bbuf, 14922 uint_t blen) 14923 { 14924 if (!b_valid) 14925 blen = 0; 14926 14927 if (alen != blen) 14928 return (B_TRUE); 14929 if (alen == 0) 14930 return (B_FALSE); /* Both zero length */ 14931 return (bcmp(abuf, bbuf, alen)); 14932 } 14933 14934 /* 14935 * Preallocate memory for ip_savebuf(). Returns B_TRUE if ok. 14936 * Return B_FALSE if memory allocation fails - don't change any state! 14937 */ 14938 boolean_t 14939 ip_allocbuf(void **dstp, uint_t *dstlenp, boolean_t src_valid, 14940 const void *src, uint_t srclen) 14941 { 14942 void *dst; 14943 14944 if (!src_valid) 14945 srclen = 0; 14946 14947 ASSERT(*dstlenp == 0); 14948 if (src != NULL && srclen != 0) { 14949 dst = mi_alloc(srclen, BPRI_MED); 14950 if (dst == NULL) 14951 return (B_FALSE); 14952 } else { 14953 dst = NULL; 14954 } 14955 if (*dstp != NULL) 14956 mi_free(*dstp); 14957 *dstp = dst; 14958 *dstlenp = dst == NULL ? 0 : srclen; 14959 return (B_TRUE); 14960 } 14961 14962 /* 14963 * Replace what is in *dst, *dstlen with the source. 14964 * Assumes ip_allocbuf has already been called. 14965 */ 14966 void 14967 ip_savebuf(void **dstp, uint_t *dstlenp, boolean_t src_valid, 14968 const void *src, uint_t srclen) 14969 { 14970 if (!src_valid) 14971 srclen = 0; 14972 14973 ASSERT(*dstlenp == srclen); 14974 if (src != NULL && srclen != 0) 14975 bcopy(src, *dstp, srclen); 14976 } 14977 14978 /* 14979 * Free the storage pointed to by the members of an ip_pkt_t. 14980 */ 14981 void 14982 ip_pkt_free(ip_pkt_t *ipp) 14983 { 14984 uint_t fields = ipp->ipp_fields; 14985 14986 if (fields & IPPF_HOPOPTS) { 14987 kmem_free(ipp->ipp_hopopts, ipp->ipp_hopoptslen); 14988 ipp->ipp_hopopts = NULL; 14989 ipp->ipp_hopoptslen = 0; 14990 } 14991 if (fields & IPPF_RTHDRDSTOPTS) { 14992 kmem_free(ipp->ipp_rthdrdstopts, ipp->ipp_rthdrdstoptslen); 14993 ipp->ipp_rthdrdstopts = NULL; 14994 ipp->ipp_rthdrdstoptslen = 0; 14995 } 14996 if (fields & IPPF_DSTOPTS) { 14997 kmem_free(ipp->ipp_dstopts, ipp->ipp_dstoptslen); 14998 ipp->ipp_dstopts = NULL; 14999 ipp->ipp_dstoptslen = 0; 15000 } 15001 if (fields & IPPF_RTHDR) { 15002 kmem_free(ipp->ipp_rthdr, ipp->ipp_rthdrlen); 15003 ipp->ipp_rthdr = NULL; 15004 ipp->ipp_rthdrlen = 0; 15005 } 15006 if (fields & IPPF_IPV4_OPTIONS) { 15007 kmem_free(ipp->ipp_ipv4_options, ipp->ipp_ipv4_options_len); 15008 ipp->ipp_ipv4_options = NULL; 15009 ipp->ipp_ipv4_options_len = 0; 15010 } 15011 if (fields & IPPF_LABEL_V4) { 15012 kmem_free(ipp->ipp_label_v4, ipp->ipp_label_len_v4); 15013 ipp->ipp_label_v4 = NULL; 15014 ipp->ipp_label_len_v4 = 0; 15015 } 15016 if (fields & IPPF_LABEL_V6) { 15017 kmem_free(ipp->ipp_label_v6, ipp->ipp_label_len_v6); 15018 ipp->ipp_label_v6 = NULL; 15019 ipp->ipp_label_len_v6 = 0; 15020 } 15021 ipp->ipp_fields &= ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS | 15022 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6); 15023 } 15024 15025 /* 15026 * Copy from src to dst and allocate as needed. 15027 * Returns zero or ENOMEM. 15028 * 15029 * The caller must initialize dst to zero. 15030 */ 15031 int 15032 ip_pkt_copy(ip_pkt_t *src, ip_pkt_t *dst, int kmflag) 15033 { 15034 uint_t fields = src->ipp_fields; 15035 15036 /* Start with fields that don't require memory allocation */ 15037 dst->ipp_fields = fields & 15038 ~(IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS | 15039 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6); 15040 15041 dst->ipp_addr = src->ipp_addr; 15042 dst->ipp_unicast_hops = src->ipp_unicast_hops; 15043 dst->ipp_hoplimit = src->ipp_hoplimit; 15044 dst->ipp_tclass = src->ipp_tclass; 15045 dst->ipp_type_of_service = src->ipp_type_of_service; 15046 15047 if (!(fields & (IPPF_HOPOPTS | IPPF_RTHDRDSTOPTS | IPPF_DSTOPTS | 15048 IPPF_RTHDR | IPPF_IPV4_OPTIONS | IPPF_LABEL_V4 | IPPF_LABEL_V6))) 15049 return (0); 15050 15051 if (fields & IPPF_HOPOPTS) { 15052 dst->ipp_hopopts = kmem_alloc(src->ipp_hopoptslen, kmflag); 15053 if (dst->ipp_hopopts == NULL) { 15054 ip_pkt_free(dst); 15055 return (ENOMEM); 15056 } 15057 dst->ipp_fields |= IPPF_HOPOPTS; 15058 bcopy(src->ipp_hopopts, dst->ipp_hopopts, 15059 src->ipp_hopoptslen); 15060 dst->ipp_hopoptslen = src->ipp_hopoptslen; 15061 } 15062 if (fields & IPPF_RTHDRDSTOPTS) { 15063 dst->ipp_rthdrdstopts = kmem_alloc(src->ipp_rthdrdstoptslen, 15064 kmflag); 15065 if (dst->ipp_rthdrdstopts == NULL) { 15066 ip_pkt_free(dst); 15067 return (ENOMEM); 15068 } 15069 dst->ipp_fields |= IPPF_RTHDRDSTOPTS; 15070 bcopy(src->ipp_rthdrdstopts, dst->ipp_rthdrdstopts, 15071 src->ipp_rthdrdstoptslen); 15072 dst->ipp_rthdrdstoptslen = src->ipp_rthdrdstoptslen; 15073 } 15074 if (fields & IPPF_DSTOPTS) { 15075 dst->ipp_dstopts = kmem_alloc(src->ipp_dstoptslen, kmflag); 15076 if (dst->ipp_dstopts == NULL) { 15077 ip_pkt_free(dst); 15078 return (ENOMEM); 15079 } 15080 dst->ipp_fields |= IPPF_DSTOPTS; 15081 bcopy(src->ipp_dstopts, dst->ipp_dstopts, 15082 src->ipp_dstoptslen); 15083 dst->ipp_dstoptslen = src->ipp_dstoptslen; 15084 } 15085 if (fields & IPPF_RTHDR) { 15086 dst->ipp_rthdr = kmem_alloc(src->ipp_rthdrlen, kmflag); 15087 if (dst->ipp_rthdr == NULL) { 15088 ip_pkt_free(dst); 15089 return (ENOMEM); 15090 } 15091 dst->ipp_fields |= IPPF_RTHDR; 15092 bcopy(src->ipp_rthdr, dst->ipp_rthdr, 15093 src->ipp_rthdrlen); 15094 dst->ipp_rthdrlen = src->ipp_rthdrlen; 15095 } 15096 if (fields & IPPF_IPV4_OPTIONS) { 15097 dst->ipp_ipv4_options = kmem_alloc(src->ipp_ipv4_options_len, 15098 kmflag); 15099 if (dst->ipp_ipv4_options == NULL) { 15100 ip_pkt_free(dst); 15101 return (ENOMEM); 15102 } 15103 dst->ipp_fields |= IPPF_IPV4_OPTIONS; 15104 bcopy(src->ipp_ipv4_options, dst->ipp_ipv4_options, 15105 src->ipp_ipv4_options_len); 15106 dst->ipp_ipv4_options_len = src->ipp_ipv4_options_len; 15107 } 15108 if (fields & IPPF_LABEL_V4) { 15109 dst->ipp_label_v4 = kmem_alloc(src->ipp_label_len_v4, kmflag); 15110 if (dst->ipp_label_v4 == NULL) { 15111 ip_pkt_free(dst); 15112 return (ENOMEM); 15113 } 15114 dst->ipp_fields |= IPPF_LABEL_V4; 15115 bcopy(src->ipp_label_v4, dst->ipp_label_v4, 15116 src->ipp_label_len_v4); 15117 dst->ipp_label_len_v4 = src->ipp_label_len_v4; 15118 } 15119 if (fields & IPPF_LABEL_V6) { 15120 dst->ipp_label_v6 = kmem_alloc(src->ipp_label_len_v6, kmflag); 15121 if (dst->ipp_label_v6 == NULL) { 15122 ip_pkt_free(dst); 15123 return (ENOMEM); 15124 } 15125 dst->ipp_fields |= IPPF_LABEL_V6; 15126 bcopy(src->ipp_label_v6, dst->ipp_label_v6, 15127 src->ipp_label_len_v6); 15128 dst->ipp_label_len_v6 = src->ipp_label_len_v6; 15129 } 15130 if (fields & IPPF_FRAGHDR) { 15131 dst->ipp_fraghdr = kmem_alloc(src->ipp_fraghdrlen, kmflag); 15132 if (dst->ipp_fraghdr == NULL) { 15133 ip_pkt_free(dst); 15134 return (ENOMEM); 15135 } 15136 dst->ipp_fields |= IPPF_FRAGHDR; 15137 bcopy(src->ipp_fraghdr, dst->ipp_fraghdr, 15138 src->ipp_fraghdrlen); 15139 dst->ipp_fraghdrlen = src->ipp_fraghdrlen; 15140 } 15141 return (0); 15142 } 15143 15144 /* 15145 * Returns INADDR_ANY if no source route 15146 */ 15147 ipaddr_t 15148 ip_pkt_source_route_v4(const ip_pkt_t *ipp) 15149 { 15150 ipaddr_t nexthop = INADDR_ANY; 15151 ipoptp_t opts; 15152 uchar_t *opt; 15153 uint8_t optval; 15154 uint8_t optlen; 15155 uint32_t totallen; 15156 15157 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS)) 15158 return (INADDR_ANY); 15159 15160 totallen = ipp->ipp_ipv4_options_len; 15161 if (totallen & 0x3) 15162 return (INADDR_ANY); 15163 15164 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options); 15165 optval != IPOPT_EOL; 15166 optval = ipoptp_next(&opts)) { 15167 opt = opts.ipoptp_cur; 15168 switch (optval) { 15169 uint8_t off; 15170 case IPOPT_SSRR: 15171 case IPOPT_LSRR: 15172 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 15173 break; 15174 } 15175 optlen = opts.ipoptp_len; 15176 off = opt[IPOPT_OFFSET]; 15177 off--; 15178 if (optlen < IP_ADDR_LEN || 15179 off > optlen - IP_ADDR_LEN) { 15180 /* End of source route */ 15181 break; 15182 } 15183 bcopy((char *)opt + off, &nexthop, IP_ADDR_LEN); 15184 if (nexthop == htonl(INADDR_LOOPBACK)) { 15185 /* Ignore */ 15186 nexthop = INADDR_ANY; 15187 break; 15188 } 15189 break; 15190 } 15191 } 15192 return (nexthop); 15193 } 15194 15195 /* 15196 * Reverse a source route. 15197 */ 15198 void 15199 ip_pkt_source_route_reverse_v4(ip_pkt_t *ipp) 15200 { 15201 ipaddr_t tmp; 15202 ipoptp_t opts; 15203 uchar_t *opt; 15204 uint8_t optval; 15205 uint32_t totallen; 15206 15207 if (!(ipp->ipp_fields & IPPF_IPV4_OPTIONS)) 15208 return; 15209 15210 totallen = ipp->ipp_ipv4_options_len; 15211 if (totallen & 0x3) 15212 return; 15213 15214 for (optval = ipoptp_first2(&opts, totallen, ipp->ipp_ipv4_options); 15215 optval != IPOPT_EOL; 15216 optval = ipoptp_next(&opts)) { 15217 uint8_t off1, off2; 15218 15219 opt = opts.ipoptp_cur; 15220 switch (optval) { 15221 case IPOPT_SSRR: 15222 case IPOPT_LSRR: 15223 if ((opts.ipoptp_flags & IPOPTP_ERROR) != 0) { 15224 break; 15225 } 15226 off1 = IPOPT_MINOFF_SR - 1; 15227 off2 = opt[IPOPT_OFFSET] - IP_ADDR_LEN - 1; 15228 while (off2 > off1) { 15229 bcopy(opt + off2, &tmp, IP_ADDR_LEN); 15230 bcopy(opt + off1, opt + off2, IP_ADDR_LEN); 15231 bcopy(&tmp, opt + off2, IP_ADDR_LEN); 15232 off2 -= IP_ADDR_LEN; 15233 off1 += IP_ADDR_LEN; 15234 } 15235 opt[IPOPT_OFFSET] = IPOPT_MINOFF_SR; 15236 break; 15237 } 15238 } 15239 } 15240 15241 /* 15242 * Returns NULL if no routing header 15243 */ 15244 in6_addr_t * 15245 ip_pkt_source_route_v6(const ip_pkt_t *ipp) 15246 { 15247 in6_addr_t *nexthop = NULL; 15248 ip6_rthdr0_t *rthdr; 15249 15250 if (!(ipp->ipp_fields & IPPF_RTHDR)) 15251 return (NULL); 15252 15253 rthdr = (ip6_rthdr0_t *)ipp->ipp_rthdr; 15254 if (rthdr->ip6r0_segleft == 0) 15255 return (NULL); 15256 15257 nexthop = (in6_addr_t *)((char *)rthdr + sizeof (*rthdr)); 15258 return (nexthop); 15259 } 15260 15261 zoneid_t 15262 ip_get_zoneid_v4(ipaddr_t addr, mblk_t *mp, ip_recv_attr_t *ira, 15263 zoneid_t lookup_zoneid) 15264 { 15265 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 15266 ire_t *ire; 15267 int ire_flags = MATCH_IRE_TYPE; 15268 zoneid_t zoneid = ALL_ZONES; 15269 15270 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE)) 15271 return (ALL_ZONES); 15272 15273 if (lookup_zoneid != ALL_ZONES) 15274 ire_flags |= MATCH_IRE_ZONEONLY; 15275 ire = ire_ftable_lookup_v4(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK, 15276 NULL, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL); 15277 if (ire != NULL) { 15278 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst); 15279 ire_refrele(ire); 15280 } 15281 return (zoneid); 15282 } 15283 15284 zoneid_t 15285 ip_get_zoneid_v6(in6_addr_t *addr, mblk_t *mp, const ill_t *ill, 15286 ip_recv_attr_t *ira, zoneid_t lookup_zoneid) 15287 { 15288 ip_stack_t *ipst = ira->ira_ill->ill_ipst; 15289 ire_t *ire; 15290 int ire_flags = MATCH_IRE_TYPE; 15291 zoneid_t zoneid = ALL_ZONES; 15292 15293 if (is_system_labeled() && !tsol_can_accept_raw(mp, ira, B_FALSE)) 15294 return (ALL_ZONES); 15295 15296 if (IN6_IS_ADDR_LINKLOCAL(addr)) 15297 ire_flags |= MATCH_IRE_ILL; 15298 15299 if (lookup_zoneid != ALL_ZONES) 15300 ire_flags |= MATCH_IRE_ZONEONLY; 15301 ire = ire_ftable_lookup_v6(addr, NULL, NULL, IRE_LOCAL | IRE_LOOPBACK, 15302 ill, lookup_zoneid, NULL, ire_flags, 0, ipst, NULL); 15303 if (ire != NULL) { 15304 zoneid = IP_REAL_ZONEID(ire->ire_zoneid, ipst); 15305 ire_refrele(ire); 15306 } 15307 return (zoneid); 15308 } 15309 15310 /* 15311 * IP obserability hook support functions. 15312 */ 15313 static void 15314 ipobs_init(ip_stack_t *ipst) 15315 { 15316 netid_t id; 15317 15318 id = net_getnetidbynetstackid(ipst->ips_netstack->netstack_stackid); 15319 15320 ipst->ips_ip4_observe_pr = net_protocol_lookup(id, NHF_INET); 15321 VERIFY(ipst->ips_ip4_observe_pr != NULL); 15322 15323 ipst->ips_ip6_observe_pr = net_protocol_lookup(id, NHF_INET6); 15324 VERIFY(ipst->ips_ip6_observe_pr != NULL); 15325 } 15326 15327 static void 15328 ipobs_fini(ip_stack_t *ipst) 15329 { 15330 15331 VERIFY(net_protocol_release(ipst->ips_ip4_observe_pr) == 0); 15332 VERIFY(net_protocol_release(ipst->ips_ip6_observe_pr) == 0); 15333 } 15334 15335 /* 15336 * hook_pkt_observe_t is composed in network byte order so that the 15337 * entire mblk_t chain handed into hook_run can be used as-is. 15338 * The caveat is that use of the fields, such as the zone fields, 15339 * requires conversion into host byte order first. 15340 */ 15341 void 15342 ipobs_hook(mblk_t *mp, int htype, zoneid_t zsrc, zoneid_t zdst, 15343 const ill_t *ill, ip_stack_t *ipst) 15344 { 15345 hook_pkt_observe_t *hdr; 15346 uint64_t grifindex; 15347 mblk_t *imp; 15348 15349 imp = allocb(sizeof (*hdr), BPRI_HI); 15350 if (imp == NULL) 15351 return; 15352 15353 hdr = (hook_pkt_observe_t *)imp->b_rptr; 15354 /* 15355 * b_wptr is set to make the apparent size of the data in the mblk_t 15356 * to exclude the pointers at the end of hook_pkt_observer_t. 15357 */ 15358 imp->b_wptr = imp->b_rptr + sizeof (dl_ipnetinfo_t); 15359 imp->b_cont = mp; 15360 15361 ASSERT(DB_TYPE(mp) == M_DATA); 15362 15363 if (IS_UNDER_IPMP(ill)) 15364 grifindex = ipmp_ill_get_ipmp_ifindex(ill); 15365 else 15366 grifindex = 0; 15367 15368 hdr->hpo_version = 1; 15369 hdr->hpo_htype = htype; 15370 hdr->hpo_pktlen = htonl((ulong_t)msgdsize(mp)); 15371 hdr->hpo_ifindex = htonl(ill->ill_phyint->phyint_ifindex); 15372 hdr->hpo_grifindex = htonl(grifindex); 15373 hdr->hpo_zsrc = htonl(zsrc); 15374 hdr->hpo_zdst = htonl(zdst); 15375 hdr->hpo_pkt = imp; 15376 hdr->hpo_ctx = ipst->ips_netstack; 15377 15378 if (ill->ill_isv6) { 15379 hdr->hpo_family = AF_INET6; 15380 (void) hook_run(ipst->ips_ipv6_net_data->netd_hooks, 15381 ipst->ips_ipv6observing, (hook_data_t)hdr); 15382 } else { 15383 hdr->hpo_family = AF_INET; 15384 (void) hook_run(ipst->ips_ipv4_net_data->netd_hooks, 15385 ipst->ips_ipv4observing, (hook_data_t)hdr); 15386 } 15387 15388 imp->b_cont = NULL; 15389 freemsg(imp); 15390 } 15391 15392 /* 15393 * Utility routine that checks if `v4srcp' is a valid address on underlying 15394 * interface `ill'. If `ipifp' is non-NULL, it's set to a held ipif 15395 * associated with `v4srcp' on success. NOTE: if this is not called from 15396 * inside the IPSQ (ill_g_lock is not held), `ill' may be removed from the 15397 * group during or after this lookup. 15398 */ 15399 boolean_t 15400 ipif_lookup_testaddr_v4(ill_t *ill, const in_addr_t *v4srcp, ipif_t **ipifp) 15401 { 15402 ipif_t *ipif; 15403 15404 ipif = ipif_lookup_addr_exact(*v4srcp, ill, ill->ill_ipst); 15405 if (ipif != NULL) { 15406 if (ipifp != NULL) 15407 *ipifp = ipif; 15408 else 15409 ipif_refrele(ipif); 15410 return (B_TRUE); 15411 } 15412 15413 ip1dbg(("ipif_lookup_testaddr_v4: cannot find ipif for src %x\n", 15414 *v4srcp)); 15415 return (B_FALSE); 15416 } 15417