1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * Copyright (c) 2016 by Delphix. All rights reserved. 25 */ 26 27 /* 28 * gld - Generic LAN Driver Version 2, PSARC/1997/382 29 * 30 * This is a utility module that provides generic facilities for 31 * LAN drivers. The DLPI protocol and most STREAMS interfaces 32 * are handled here. 33 * 34 * It no longer provides compatibility with drivers 35 * implemented according to the GLD v0 documentation published 36 * in 1993. (See PSARC 2003/728) 37 */ 38 39 40 #include <sys/types.h> 41 #include <sys/errno.h> 42 #include <sys/stropts.h> 43 #include <sys/stream.h> 44 #include <sys/kmem.h> 45 #include <sys/stat.h> 46 #include <sys/modctl.h> 47 #include <sys/kstat.h> 48 #include <sys/debug.h> 49 #include <sys/note.h> 50 #include <sys/sysmacros.h> 51 52 #include <sys/byteorder.h> 53 #include <sys/strsun.h> 54 #include <sys/strsubr.h> 55 #include <sys/dlpi.h> 56 #include <sys/pattr.h> 57 #include <sys/ethernet.h> 58 #include <sys/ib/clients/ibd/ibd.h> 59 #include <sys/policy.h> 60 #include <sys/atomic.h> 61 62 #include <sys/multidata.h> 63 #include <sys/gld.h> 64 #include <sys/gldpriv.h> 65 66 #include <sys/ddi.h> 67 #include <sys/sunddi.h> 68 69 /* 70 * Macros to increment statistics. 71 */ 72 73 /* 74 * Increase kstats. Note this operation is not atomic. It can be used when 75 * GLDM_LOCK_HELD_WRITE(macinfo). 76 */ 77 #define BUMP(stats, vstats, stat, delta) do { \ 78 ((stats)->stat) += (delta); \ 79 _NOTE(CONSTANTCONDITION) \ 80 if ((vstats) != NULL) \ 81 ((struct gld_stats *)(vstats))->stat += (delta); \ 82 _NOTE(CONSTANTCONDITION) \ 83 } while (0) 84 85 #define ATOMIC_BUMP_STAT(stat, delta) do { \ 86 _NOTE(CONSTANTCONDITION) \ 87 if (sizeof ((stat)) == sizeof (uint32_t)) { \ 88 atomic_add_32((uint32_t *)&(stat), (delta)); \ 89 _NOTE(CONSTANTCONDITION) \ 90 } else if (sizeof ((stat)) == sizeof (uint64_t)) { \ 91 atomic_add_64((uint64_t *)&(stat), (delta)); \ 92 } \ 93 _NOTE(CONSTANTCONDITION) \ 94 } while (0) 95 96 #define ATOMIC_BUMP(stats, vstats, stat, delta) do { \ 97 ATOMIC_BUMP_STAT((stats)->stat, (delta)); \ 98 _NOTE(CONSTANTCONDITION) \ 99 if ((vstats) != NULL) { \ 100 ATOMIC_BUMP_STAT(((struct gld_stats *)(vstats))->stat, \ 101 (delta)); \ 102 } \ 103 _NOTE(CONSTANTCONDITION) \ 104 } while (0) 105 106 #define UPDATE_STATS(stats, vstats, pktinfo, delta) { \ 107 if ((pktinfo).isBroadcast) { \ 108 ATOMIC_BUMP((stats), (vstats), \ 109 glds_brdcstxmt, (delta)); \ 110 } else if ((pktinfo).isMulticast) { \ 111 ATOMIC_BUMP((stats), (vstats), glds_multixmt, (delta)); \ 112 } \ 113 ATOMIC_BUMP((stats), (vstats), glds_bytexmt64, \ 114 ((pktinfo).pktLen)); \ 115 ATOMIC_BUMP((stats), (vstats), glds_pktxmt64, (delta)); \ 116 } 117 118 #ifdef GLD_DEBUG 119 int gld_debug = GLDERRS; 120 #endif 121 122 /* called from gld_register */ 123 static int gld_initstats(gld_mac_info_t *); 124 125 /* called from kstat mechanism, and from wsrv's get_statistics */ 126 static int gld_update_kstat(kstat_t *, int); 127 128 /* statistics for additional vlans */ 129 static int gld_init_vlan_stats(gld_vlan_t *); 130 static int gld_update_vlan_kstat(kstat_t *, int); 131 132 /* called from gld_getinfo */ 133 static dev_info_t *gld_finddevinfo(dev_t); 134 135 /* called from wput, wsrv, unidata, and v0_sched to send a packet */ 136 /* also from the source routing stuff for sending RDE protocol packets */ 137 static int gld_start(queue_t *, mblk_t *, int, uint32_t); 138 static int gld_start_mdt(queue_t *, mblk_t *, int); 139 140 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */ 141 static void gld_precv(gld_mac_info_t *, mblk_t *, uint32_t, struct gld_stats *); 142 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *, 143 pdesc_t *, pktinfo_t *); 144 145 /* receive group: called from gld_recv and gld_precv* with maclock held */ 146 static void gld_sendup(gld_mac_info_t *, pktinfo_t *, mblk_t *, 147 int (*)()); 148 static int gld_accept(gld_t *, pktinfo_t *); 149 static int gld_mcmatch(gld_t *, pktinfo_t *); 150 static int gld_multicast(unsigned char *, gld_t *); 151 static int gld_paccept(gld_t *, pktinfo_t *); 152 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *, 153 void (*)(queue_t *, mblk_t *)); 154 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *, boolean_t); 155 156 /* wsrv group: called from wsrv, single threaded per queue */ 157 static int gld_ioctl(queue_t *, mblk_t *); 158 static void gld_fastpath(gld_t *, queue_t *, mblk_t *); 159 static int gld_cmds(queue_t *, mblk_t *); 160 static mblk_t *gld_bindack(queue_t *, mblk_t *); 161 static int gld_notify_req(queue_t *, mblk_t *); 162 static int gld_udqos(queue_t *, mblk_t *); 163 static int gld_bind(queue_t *, mblk_t *); 164 static int gld_unbind(queue_t *, mblk_t *); 165 static int gld_inforeq(queue_t *, mblk_t *); 166 static int gld_unitdata(queue_t *, mblk_t *); 167 static int gldattach(queue_t *, mblk_t *); 168 static int gldunattach(queue_t *, mblk_t *); 169 static int gld_enable_multi(queue_t *, mblk_t *); 170 static int gld_disable_multi(queue_t *, mblk_t *); 171 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *); 172 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t); 173 static int gld_physaddr(queue_t *, mblk_t *); 174 static int gld_setaddr(queue_t *, mblk_t *); 175 static int gld_get_statistics(queue_t *, mblk_t *); 176 static int gld_cap(queue_t *, mblk_t *); 177 static int gld_cap_ack(queue_t *, mblk_t *); 178 static int gld_cap_enable(queue_t *, mblk_t *); 179 180 /* misc utilities, some requiring various mutexes held */ 181 static int gld_start_mac(gld_mac_info_t *); 182 static void gld_stop_mac(gld_mac_info_t *); 183 static void gld_set_ipq(gld_t *); 184 static void gld_flushqueue(queue_t *); 185 static glddev_t *gld_devlookup(int); 186 static int gld_findminor(glddev_t *); 187 static void gldinsque(void *, void *); 188 static void gldremque(void *); 189 void gld_bitrevcopy(caddr_t, caddr_t, size_t); 190 void gld_bitreverse(uchar_t *, size_t); 191 char *gld_macaddr_sprintf(char *, unsigned char *, int); 192 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid); 193 static void gld_rem_vlan(gld_vlan_t *); 194 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t); 195 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t); 196 197 #ifdef GLD_DEBUG 198 static void gld_check_assertions(void); 199 extern void gld_sr_dump(gld_mac_info_t *); 200 #endif 201 202 /* 203 * Allocate and zero-out "number" structures each of type "structure" in 204 * kernel memory. 205 */ 206 #define GLD_GETSTRUCT(structure, number) \ 207 (kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP)) 208 209 #define abs(a) ((a) < 0 ? -(a) : a) 210 211 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP; 212 213 /* 214 * The device is of DL_ETHER type and is able to support VLAN by itself. 215 */ 216 #define VLAN_CAPABLE(macinfo) \ 217 ((macinfo)->gldm_type == DL_ETHER && \ 218 (macinfo)->gldm_send_tagged != NULL) 219 220 /* 221 * The set of notifications generatable by GLD itself, the additional 222 * set that can be generated if the MAC driver provide the link-state 223 * tracking callback capability, and the set supported by the GLD 224 * notification code below. 225 * 226 * PLEASE keep these in sync with what the code actually does! 227 */ 228 static const uint32_t gld_internal_notes = DL_NOTE_PROMISC_ON_PHYS | 229 DL_NOTE_PROMISC_OFF_PHYS | 230 DL_NOTE_PHYS_ADDR; 231 static const uint32_t gld_linkstate_notes = DL_NOTE_LINK_DOWN | 232 DL_NOTE_LINK_UP | 233 DL_NOTE_SPEED; 234 static const uint32_t gld_supported_notes = DL_NOTE_PROMISC_ON_PHYS | 235 DL_NOTE_PROMISC_OFF_PHYS | 236 DL_NOTE_PHYS_ADDR | 237 DL_NOTE_LINK_DOWN | 238 DL_NOTE_LINK_UP | 239 DL_NOTE_SPEED; 240 241 /* Media must correspond to #defines in gld.h */ 242 static char *gld_media[] = { 243 "unknown", /* GLDM_UNKNOWN - driver cannot determine media */ 244 "aui", /* GLDM_AUI */ 245 "bnc", /* GLDM_BNC */ 246 "twpair", /* GLDM_TP */ 247 "fiber", /* GLDM_FIBER */ 248 "100baseT", /* GLDM_100BT */ 249 "100vgAnyLan", /* GLDM_VGANYLAN */ 250 "10baseT", /* GLDM_10BT */ 251 "ring4", /* GLDM_RING4 */ 252 "ring16", /* GLDM_RING16 */ 253 "PHY/MII", /* GLDM_PHYMII */ 254 "100baseTX", /* GLDM_100BTX */ 255 "100baseT4", /* GLDM_100BT4 */ 256 "unknown", /* skip */ 257 "ipib", /* GLDM_IB */ 258 }; 259 260 /* Must correspond to #defines in gld.h */ 261 static char *gld_duplex[] = { 262 "unknown", /* GLD_DUPLEX_UNKNOWN - not known or not applicable */ 263 "half", /* GLD_DUPLEX_HALF */ 264 "full" /* GLD_DUPLEX_FULL */ 265 }; 266 267 /* 268 * Interface types currently supported by GLD. 269 * If you add new types, you must check all "XXX" strings in the GLD source 270 * for implementation issues that may affect the support of your new type. 271 * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will 272 * require generalizing this GLD source to handle the new cases. In other 273 * words there are assumptions built into the code in a few places that must 274 * be fixed. Be sure to turn on DEBUG/ASSERT code when testing a new type. 275 */ 276 static gld_interface_t interfaces[] = { 277 278 /* Ethernet Bus */ 279 { 280 DL_ETHER, 281 (uint_t)-1, 282 sizeof (struct ether_header), 283 gld_interpret_ether, 284 NULL, 285 gld_fastpath_ether, 286 gld_unitdata_ether, 287 gld_init_ether, 288 gld_uninit_ether, 289 "ether" 290 }, 291 292 /* Fiber Distributed data interface */ 293 { 294 DL_FDDI, 295 4352, 296 sizeof (struct fddi_mac_frm), 297 gld_interpret_fddi, 298 NULL, 299 gld_fastpath_fddi, 300 gld_unitdata_fddi, 301 gld_init_fddi, 302 gld_uninit_fddi, 303 "fddi" 304 }, 305 306 /* Token Ring interface */ 307 { 308 DL_TPR, 309 17914, 310 -1, /* variable header size */ 311 gld_interpret_tr, 312 NULL, 313 gld_fastpath_tr, 314 gld_unitdata_tr, 315 gld_init_tr, 316 gld_uninit_tr, 317 "tpr" 318 }, 319 320 /* Infiniband */ 321 { 322 DL_IB, 323 4092, 324 sizeof (struct ipoib_header), 325 gld_interpret_ib, 326 gld_interpret_mdt_ib, 327 gld_fastpath_ib, 328 gld_unitdata_ib, 329 gld_init_ib, 330 gld_uninit_ib, 331 "ipib" 332 }, 333 }; 334 335 /* 336 * bit reversal lookup table. 337 */ 338 static uchar_t bit_rev[] = { 339 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 340 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 341 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4, 342 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, 343 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 344 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 345 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca, 346 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 347 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 348 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 349 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1, 350 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, 351 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 352 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 353 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd, 354 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 355 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 356 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 357 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7, 358 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 359 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 360 0x3f, 0xbf, 0x7f, 0xff, 361 }; 362 363 /* 364 * User priorities, mapped from b_band. 365 */ 366 static uint32_t user_priority[] = { 367 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 368 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 369 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 370 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 371 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 372 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 373 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 374 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 375 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 376 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 377 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 378 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 379 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 380 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 381 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 382 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 383 }; 384 385 #define UPRI(gld, band) ((band != 0) ? user_priority[(band)] : (gld)->gld_upri) 386 387 static struct glddevice gld_device_list; /* Per-system root of GLD tables */ 388 389 /* 390 * Module linkage information for the kernel. 391 */ 392 393 static struct modldrv modlmisc = { 394 &mod_miscops, /* Type of module - a utility provider */ 395 "Generic LAN Driver (" GLD_VERSION_STRING ")" 396 #ifdef GLD_DEBUG 397 " DEBUG" 398 #endif 399 }; 400 401 static struct modlinkage modlinkage = { 402 MODREV_1, &modlmisc, NULL 403 }; 404 405 int 406 _init(void) 407 { 408 int e; 409 410 /* initialize gld_device_list mutex */ 411 mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL); 412 413 /* initialize device driver (per-major) list */ 414 gld_device_list.gld_next = 415 gld_device_list.gld_prev = &gld_device_list; 416 417 if ((e = mod_install(&modlinkage)) != 0) 418 mutex_destroy(&gld_device_list.gld_devlock); 419 420 return (e); 421 } 422 423 int 424 _fini(void) 425 { 426 int e; 427 428 if ((e = mod_remove(&modlinkage)) != 0) 429 return (e); 430 431 ASSERT(gld_device_list.gld_next == 432 (glddev_t *)&gld_device_list.gld_next); 433 ASSERT(gld_device_list.gld_prev == 434 (glddev_t *)&gld_device_list.gld_next); 435 mutex_destroy(&gld_device_list.gld_devlock); 436 437 return (e); 438 } 439 440 int 441 _info(struct modinfo *modinfop) 442 { 443 return (mod_info(&modlinkage, modinfop)); 444 } 445 446 /* 447 * GLD service routines 448 */ 449 450 /* So this gld binary maybe can be forward compatible with future v2 drivers */ 451 #define GLD_MAC_RESERVED (16 * sizeof (caddr_t)) 452 453 /*ARGSUSED*/ 454 gld_mac_info_t * 455 gld_mac_alloc(dev_info_t *devinfo) 456 { 457 gld_mac_info_t *macinfo; 458 459 macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED, 460 KM_SLEEP); 461 462 /* 463 * The setting of gldm_driver_version will not be documented or allowed 464 * until a future release. 465 */ 466 macinfo->gldm_driver_version = GLD_VERSION_200; 467 468 /* 469 * GLD's version. This also is undocumented for now, but will be 470 * available if needed in the future. 471 */ 472 macinfo->gldm_GLD_version = GLD_VERSION; 473 474 return (macinfo); 475 } 476 477 /* 478 * gld_mac_free must be called after the driver has removed interrupts 479 * and completely stopped calling gld_recv() and gld_sched(). At that 480 * point the interrupt routine is guaranteed by the system to have been 481 * exited and the maclock is no longer needed. Of course, it is 482 * expected (required) that (assuming gld_register() succeeded), 483 * gld_unregister() was called before gld_mac_free(). 484 */ 485 void 486 gld_mac_free(gld_mac_info_t *macinfo) 487 { 488 ASSERT(macinfo); 489 ASSERT(macinfo->gldm_GLD_version == GLD_VERSION); 490 491 /* 492 * Assert that if we made it through gld_register, then we must 493 * have unregistered. 494 */ 495 ASSERT(!GLDM_LOCK_INITED(macinfo) || 496 (macinfo->gldm_GLD_flags & GLD_UNREGISTERED)); 497 498 GLDM_LOCK_DESTROY(macinfo); 499 500 kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED); 501 } 502 503 /* 504 * gld_register -- called once per device instance (PPA) 505 * 506 * During its attach routine, a real device driver will register with GLD 507 * so that later opens and dl_attach_reqs will work. The arguments are the 508 * devinfo pointer, the device name, and a macinfo structure describing the 509 * physical device instance. 510 */ 511 int 512 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo) 513 { 514 int mediatype; 515 int major = ddi_name_to_major(devname), i; 516 glddev_t *glddev; 517 gld_mac_pvt_t *mac_pvt; 518 char minordev[32]; 519 char pbuf[3*GLD_MAX_ADDRLEN]; 520 gld_interface_t *ifp; 521 522 ASSERT(devinfo != NULL); 523 ASSERT(macinfo != NULL); 524 525 if (macinfo->gldm_driver_version != GLD_VERSION) 526 return (DDI_FAILURE); 527 528 mediatype = macinfo->gldm_type; 529 530 /* 531 * Entry points should be ready for us. 532 * ioctl is optional. 533 * set_multicast and get_stats are optional in v0. 534 * intr is only required if you add an interrupt. 535 */ 536 ASSERT(macinfo->gldm_reset != NULL); 537 ASSERT(macinfo->gldm_start != NULL); 538 ASSERT(macinfo->gldm_stop != NULL); 539 ASSERT(macinfo->gldm_set_mac_addr != NULL); 540 ASSERT(macinfo->gldm_set_promiscuous != NULL); 541 ASSERT(macinfo->gldm_send != NULL); 542 543 ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt); 544 ASSERT(macinfo->gldm_GLD_version == GLD_VERSION); 545 ASSERT(macinfo->gldm_broadcast_addr != NULL); 546 ASSERT(macinfo->gldm_vendor_addr != NULL); 547 ASSERT(macinfo->gldm_ident != NULL); 548 549 if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) { 550 cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup" 551 "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN); 552 return (DDI_FAILURE); 553 } 554 555 /* 556 * GLD only functions properly with saplen == -2 557 */ 558 if (macinfo->gldm_saplen != -2) { 559 cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 " 560 "not supported", devname, macinfo->gldm_saplen); 561 return (DDI_FAILURE); 562 } 563 564 /* see gld_rsrv() */ 565 if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0)) 566 macinfo->gldm_options |= GLDOPT_FAST_RECV; 567 568 mutex_enter(&gld_device_list.gld_devlock); 569 glddev = gld_devlookup(major); 570 571 /* 572 * Allocate per-driver (major) data structure if necessary 573 */ 574 if (glddev == NULL) { 575 /* first occurrence of this device name (major number) */ 576 glddev = GLD_GETSTRUCT(glddev_t, 1); 577 if (glddev == NULL) { 578 mutex_exit(&gld_device_list.gld_devlock); 579 return (DDI_FAILURE); 580 } 581 (void) strncpy(glddev->gld_name, devname, 582 sizeof (glddev->gld_name) - 1); 583 glddev->gld_major = major; 584 glddev->gld_nextminor = GLD_MIN_CLONE_MINOR; 585 glddev->gld_mac_next = glddev->gld_mac_prev = 586 (gld_mac_info_t *)&glddev->gld_mac_next; 587 glddev->gld_str_next = glddev->gld_str_prev = 588 (gld_t *)&glddev->gld_str_next; 589 mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL); 590 591 /* allow increase of number of supported multicast addrs */ 592 glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE, 593 devinfo, 0, "multisize", GLD_MAX_MULTICAST); 594 595 /* 596 * Optionally restrict DLPI provider style 597 * 598 * -1 - don't create style 1 nodes 599 * -2 - don't create style 2 nodes 600 */ 601 glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, 602 "gld-provider-styles", 0); 603 604 /* Stuff that's needed before any PPA gets attached */ 605 glddev->gld_type = macinfo->gldm_type; 606 glddev->gld_minsdu = macinfo->gldm_minpkt; 607 glddev->gld_saplen = macinfo->gldm_saplen; 608 glddev->gld_addrlen = macinfo->gldm_addrlen; 609 glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen, 610 KM_SLEEP); 611 bcopy(macinfo->gldm_broadcast_addr, 612 glddev->gld_broadcast, macinfo->gldm_addrlen); 613 glddev->gld_maxsdu = macinfo->gldm_maxpkt; 614 gldinsque(glddev, gld_device_list.gld_prev); 615 } 616 glddev->gld_ndevice++; 617 /* Now glddev can't go away until we unregister this mac (or fail) */ 618 mutex_exit(&gld_device_list.gld_devlock); 619 620 /* 621 * Per-instance initialization 622 */ 623 624 /* 625 * Initialize per-mac structure that is private to GLD. 626 * Set up interface pointer. These are device class specific pointers 627 * used to handle FDDI/TR/ETHER/IPoIB specific packets. 628 */ 629 for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) { 630 if (mediatype != interfaces[i].mac_type) 631 continue; 632 633 macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t), 634 KM_SLEEP); 635 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp = 636 &interfaces[i]; 637 break; 638 } 639 640 if (ifp == NULL) { 641 cmn_err(CE_WARN, "GLD: this version does not support %s driver " 642 "of type %d", devname, mediatype); 643 goto failure; 644 } 645 646 /* 647 * Driver can only register MTU within legal media range. 648 */ 649 if (macinfo->gldm_maxpkt > ifp->mtu_size) { 650 cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s", 651 devname); 652 goto failure; 653 } 654 655 /* 656 * Correct margin size if it is not set. 657 */ 658 if (VLAN_CAPABLE(macinfo) && (macinfo->gldm_margin == 0)) 659 macinfo->gldm_margin = VTAG_SIZE; 660 661 /* 662 * For now, only Infiniband drivers can use MDT. Do not add 663 * support for Ethernet, FDDI or TR. 664 */ 665 if (macinfo->gldm_mdt_pre != NULL) { 666 if (mediatype != DL_IB) { 667 cmn_err(CE_WARN, "GLD: MDT not supported for %s " 668 "driver of type %d", devname, mediatype); 669 goto failure; 670 } 671 672 /* 673 * Validate entry points. 674 */ 675 if ((macinfo->gldm_mdt_send == NULL) || 676 (macinfo->gldm_mdt_post == NULL)) { 677 cmn_err(CE_WARN, "GLD: invalid MDT entry points for " 678 "%s driver of type %d", devname, mediatype); 679 goto failure; 680 } 681 macinfo->gldm_options |= GLDOPT_MDT; 682 } 683 684 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 685 mac_pvt->major_dev = glddev; 686 687 mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP); 688 /* 689 * XXX Do bit-reversed devices store gldm_vendor in canonical 690 * format or in wire format? Also gldm_broadcast. For now 691 * we are assuming canonical, but I'm not sure that makes the 692 * most sense for ease of driver implementation. 693 */ 694 bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr, 695 macinfo->gldm_addrlen); 696 mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP); 697 698 /* 699 * The available set of notifications is those generatable by GLD 700 * itself, plus those corresponding to the capabilities of the MAC 701 * driver, intersected with those supported by gld_notify_ind() above. 702 */ 703 mac_pvt->notifications = gld_internal_notes; 704 if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE) 705 mac_pvt->notifications |= gld_linkstate_notes; 706 mac_pvt->notifications &= gld_supported_notes; 707 708 GLDM_LOCK_INIT(macinfo); 709 710 ddi_set_driver_private(devinfo, macinfo); 711 712 /* 713 * Now atomically get a PPA and put ourselves on the mac list. 714 */ 715 mutex_enter(&glddev->gld_devlock); 716 717 #ifdef DEBUG 718 if (macinfo->gldm_ppa != ddi_get_instance(devinfo)) 719 cmn_err(CE_WARN, "%s%d instance != ppa %d", 720 ddi_driver_name(devinfo), ddi_get_instance(devinfo), 721 macinfo->gldm_ppa); 722 #endif 723 724 /* 725 * Create style 2 node (gated by gld-provider-styles property). 726 * 727 * NOTE: When the CLONE_DEV flag is specified to 728 * ddi_create_minor_node() the minor number argument is 729 * immaterial. Opens of that node will go via the clone 730 * driver and gld_open() will always be passed a dev_t with 731 * minor of zero. 732 */ 733 if (glddev->gld_styles != -2) { 734 if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR, 735 0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) { 736 mutex_exit(&glddev->gld_devlock); 737 goto late_failure; 738 } 739 } 740 741 /* 742 * Create style 1 node (gated by gld-provider-styles property) 743 */ 744 if (glddev->gld_styles != -1) { 745 (void) sprintf(minordev, "%s%d", glddev->gld_name, 746 macinfo->gldm_ppa); 747 if (ddi_create_minor_node(devinfo, minordev, S_IFCHR, 748 GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET, 749 0) != DDI_SUCCESS) { 750 mutex_exit(&glddev->gld_devlock); 751 goto late_failure; 752 } 753 } 754 755 /* add ourselves to this major device's linked list of instances */ 756 gldinsque(macinfo, glddev->gld_mac_prev); 757 758 mutex_exit(&glddev->gld_devlock); 759 760 /* 761 * Unfortunately we need the ppa before we call gld_initstats(); 762 * otherwise we would like to do this just above the mutex_enter 763 * above. In which case we could have set MAC_READY inside the 764 * mutex and we wouldn't have needed to check it in open and 765 * DL_ATTACH. We wouldn't like to do the initstats/kstat_create 766 * inside the mutex because it might get taken in our kstat_update 767 * routine and cause a deadlock with kstat_chain_lock. 768 */ 769 770 /* gld_initstats() calls (*ifp->init)() */ 771 if (gld_initstats(macinfo) != GLD_SUCCESS) { 772 mutex_enter(&glddev->gld_devlock); 773 gldremque(macinfo); 774 mutex_exit(&glddev->gld_devlock); 775 goto late_failure; 776 } 777 778 /* 779 * Need to indicate we are NOW ready to process interrupts; 780 * any interrupt before this is set is for someone else. 781 * This flag is also now used to tell open, et. al. that this 782 * mac is now fully ready and available for use. 783 */ 784 GLDM_LOCK(macinfo, RW_WRITER); 785 macinfo->gldm_GLD_flags |= GLD_MAC_READY; 786 GLDM_UNLOCK(macinfo); 787 788 /* log local ethernet address -- XXX not DDI compliant */ 789 if (macinfo->gldm_addrlen == sizeof (struct ether_addr)) 790 (void) localetheraddr( 791 (struct ether_addr *)macinfo->gldm_vendor_addr, NULL); 792 793 /* now put announcement into the message buffer */ 794 cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n", 795 glddev->gld_name, 796 macinfo->gldm_ppa, macinfo->gldm_ident, 797 mac_pvt->interfacep->mac_string, 798 gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr, 799 macinfo->gldm_addrlen)); 800 801 ddi_report_dev(devinfo); 802 return (DDI_SUCCESS); 803 804 late_failure: 805 ddi_remove_minor_node(devinfo, NULL); 806 GLDM_LOCK_DESTROY(macinfo); 807 if (mac_pvt->curr_macaddr != NULL) 808 kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen); 809 if (mac_pvt->statistics != NULL) 810 kmem_free(mac_pvt->statistics, sizeof (struct gld_stats)); 811 kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t)); 812 macinfo->gldm_mac_pvt = NULL; 813 814 failure: 815 mutex_enter(&gld_device_list.gld_devlock); 816 glddev->gld_ndevice--; 817 /* 818 * Note that just because this goes to zero here does not necessarily 819 * mean that we were the one who added the glddev above. It's 820 * possible that the first mac unattached while were were in here 821 * failing to attach the second mac. But we're now the last. 822 */ 823 if (glddev->gld_ndevice == 0) { 824 /* There should be no macinfos left */ 825 ASSERT(glddev->gld_mac_next == 826 (gld_mac_info_t *)&glddev->gld_mac_next); 827 ASSERT(glddev->gld_mac_prev == 828 (gld_mac_info_t *)&glddev->gld_mac_next); 829 830 /* 831 * There should be no DL_UNATTACHED streams: the system 832 * should not have detached the "first" devinfo which has 833 * all the open style 2 streams. 834 * 835 * XXX This is not clear. See gld_getinfo and Bug 1165519 836 */ 837 ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next); 838 ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next); 839 840 gldremque(glddev); 841 mutex_destroy(&glddev->gld_devlock); 842 if (glddev->gld_broadcast != NULL) 843 kmem_free(glddev->gld_broadcast, glddev->gld_addrlen); 844 kmem_free(glddev, sizeof (glddev_t)); 845 } 846 mutex_exit(&gld_device_list.gld_devlock); 847 848 return (DDI_FAILURE); 849 } 850 851 /* 852 * gld_unregister (macinfo) 853 * remove the macinfo structure from local structures 854 * this is cleanup for a driver to be unloaded 855 */ 856 int 857 gld_unregister(gld_mac_info_t *macinfo) 858 { 859 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 860 glddev_t *glddev = mac_pvt->major_dev; 861 gld_interface_t *ifp; 862 int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize; 863 864 mutex_enter(&glddev->gld_devlock); 865 GLDM_LOCK(macinfo, RW_WRITER); 866 867 if (mac_pvt->nvlan > 0) { 868 GLDM_UNLOCK(macinfo); 869 mutex_exit(&glddev->gld_devlock); 870 return (DDI_FAILURE); 871 } 872 873 #ifdef GLD_DEBUG 874 { 875 int i; 876 877 for (i = 0; i < VLAN_HASHSZ; i++) { 878 if ((mac_pvt->vlan_hash[i] != NULL)) 879 cmn_err(CE_PANIC, 880 "%s, line %d: " 881 "mac_pvt->vlan_hash[%d] != NULL", 882 __FILE__, __LINE__, i); 883 } 884 } 885 #endif 886 887 /* Delete this mac */ 888 gldremque(macinfo); 889 890 /* Disallow further entries to gld_recv() and gld_sched() */ 891 macinfo->gldm_GLD_flags |= GLD_UNREGISTERED; 892 893 GLDM_UNLOCK(macinfo); 894 mutex_exit(&glddev->gld_devlock); 895 896 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 897 (*ifp->uninit)(macinfo); 898 899 ASSERT(mac_pvt->kstatp); 900 kstat_delete(mac_pvt->kstatp); 901 902 ASSERT(GLDM_LOCK_INITED(macinfo)); 903 kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen); 904 kmem_free(mac_pvt->statistics, sizeof (struct gld_stats)); 905 906 if (mac_pvt->mcast_table != NULL) 907 kmem_free(mac_pvt->mcast_table, multisize); 908 kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t)); 909 macinfo->gldm_mac_pvt = (caddr_t)NULL; 910 911 /* We now have one fewer instance for this major device */ 912 mutex_enter(&gld_device_list.gld_devlock); 913 glddev->gld_ndevice--; 914 if (glddev->gld_ndevice == 0) { 915 /* There should be no macinfos left */ 916 ASSERT(glddev->gld_mac_next == 917 (gld_mac_info_t *)&glddev->gld_mac_next); 918 ASSERT(glddev->gld_mac_prev == 919 (gld_mac_info_t *)&glddev->gld_mac_next); 920 921 /* 922 * There should be no DL_UNATTACHED streams: the system 923 * should not have detached the "first" devinfo which has 924 * all the open style 2 streams. 925 * 926 * XXX This is not clear. See gld_getinfo and Bug 1165519 927 */ 928 ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next); 929 ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next); 930 931 ddi_remove_minor_node(macinfo->gldm_devinfo, NULL); 932 gldremque(glddev); 933 mutex_destroy(&glddev->gld_devlock); 934 if (glddev->gld_broadcast != NULL) 935 kmem_free(glddev->gld_broadcast, glddev->gld_addrlen); 936 kmem_free(glddev, sizeof (glddev_t)); 937 } 938 mutex_exit(&gld_device_list.gld_devlock); 939 940 return (DDI_SUCCESS); 941 } 942 943 /* 944 * gld_initstats 945 * called from gld_register 946 */ 947 static int 948 gld_initstats(gld_mac_info_t *macinfo) 949 { 950 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 951 struct gldkstats *sp; 952 glddev_t *glddev; 953 kstat_t *ksp; 954 gld_interface_t *ifp; 955 956 glddev = mac_pvt->major_dev; 957 958 if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa, 959 NULL, "net", KSTAT_TYPE_NAMED, 960 sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) { 961 cmn_err(CE_WARN, 962 "GLD: failed to create kstat structure for %s%d", 963 glddev->gld_name, macinfo->gldm_ppa); 964 return (GLD_FAILURE); 965 } 966 mac_pvt->kstatp = ksp; 967 968 ksp->ks_update = gld_update_kstat; 969 ksp->ks_private = (void *)macinfo; 970 971 sp = ksp->ks_data; 972 kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32); 973 kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32); 974 kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG); 975 kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG); 976 kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32); 977 kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32); 978 kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG); 979 kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG); 980 kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG); 981 kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG); 982 kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG); 983 kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG); 984 kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG); 985 kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG); 986 kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG); 987 kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64); 988 kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64); 989 kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64); 990 kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64); 991 kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG); 992 kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64); 993 kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR); 994 kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR); 995 996 kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG); 997 kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG); 998 kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG); 999 1000 kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp", 1001 KSTAT_DATA_UINT32); 1002 kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp", 1003 KSTAT_DATA_UINT32); 1004 1005 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 1006 1007 (*ifp->init)(macinfo); 1008 1009 kstat_install(ksp); 1010 1011 return (GLD_SUCCESS); 1012 } 1013 1014 /* called from kstat mechanism, and from wsrv's get_statistics_req */ 1015 static int 1016 gld_update_kstat(kstat_t *ksp, int rw) 1017 { 1018 gld_mac_info_t *macinfo; 1019 gld_mac_pvt_t *mac_pvt; 1020 struct gldkstats *gsp; 1021 struct gld_stats *stats; 1022 1023 if (rw == KSTAT_WRITE) 1024 return (EACCES); 1025 1026 macinfo = (gld_mac_info_t *)ksp->ks_private; 1027 ASSERT(macinfo != NULL); 1028 1029 GLDM_LOCK(macinfo, RW_WRITER); 1030 1031 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) { 1032 GLDM_UNLOCK(macinfo); 1033 return (EIO); /* this one's not ready yet */ 1034 } 1035 1036 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 1037 GLDM_UNLOCK(macinfo); 1038 return (EIO); /* this one's not ready any more */ 1039 } 1040 1041 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 1042 gsp = mac_pvt->kstatp->ks_data; 1043 ASSERT(gsp); 1044 stats = mac_pvt->statistics; 1045 1046 if (macinfo->gldm_get_stats) 1047 (void) (*macinfo->gldm_get_stats)(macinfo, stats); 1048 1049 gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff; 1050 gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff; 1051 gsp->glds_multixmt.value.ul = stats->glds_multixmt; 1052 gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt; 1053 gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf; /* 0 for now */ 1054 gsp->glds_xmtretry.value.ul = stats->glds_xmtretry; 1055 1056 gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64; 1057 gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64; 1058 gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp; 1059 1060 gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff; 1061 gsp->glds_errxmt.value.ul = stats->glds_errxmt; 1062 gsp->glds_errrcv.value.ul = stats->glds_errrcv; 1063 gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff; 1064 gsp->glds_multircv.value.ul = stats->glds_multircv; 1065 gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv; 1066 gsp->glds_blocked.value.ul = stats->glds_blocked; 1067 gsp->glds_overflow.value.ul = stats->glds_overflow; 1068 gsp->glds_underflow.value.ul = stats->glds_underflow; 1069 gsp->glds_missed.value.ul = stats->glds_missed; 1070 gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf + 1071 stats->glds_gldnorcvbuf; 1072 gsp->glds_intr.value.ul = stats->glds_intr; 1073 1074 gsp->glds_speed.value.ui64 = stats->glds_speed; 1075 gsp->glds_unknowns.value.ul = stats->glds_unknowns; 1076 gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64; 1077 gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64; 1078 gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp; 1079 1080 if (mac_pvt->nprom) 1081 (void) strcpy(gsp->glds_prom.value.c, "phys"); 1082 else if (mac_pvt->nprom_multi) 1083 (void) strcpy(gsp->glds_prom.value.c, "multi"); 1084 else 1085 (void) strcpy(gsp->glds_prom.value.c, "off"); 1086 1087 (void) strcpy(gsp->glds_media.value.c, gld_media[ 1088 stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0]) 1089 ? stats->glds_media : 0]); 1090 1091 switch (macinfo->gldm_type) { 1092 case DL_ETHER: 1093 gsp->glds_frame.value.ul = stats->glds_frame; 1094 gsp->glds_crc.value.ul = stats->glds_crc; 1095 gsp->glds_collisions.value.ul = stats->glds_collisions; 1096 gsp->glds_excoll.value.ul = stats->glds_excoll; 1097 gsp->glds_defer.value.ul = stats->glds_defer; 1098 gsp->glds_short.value.ul = stats->glds_short; 1099 gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll; 1100 gsp->glds_nocarrier.value.ul = stats->glds_nocarrier; 1101 gsp->glds_dot3_first_coll.value.ui32 = 1102 stats->glds_dot3_first_coll; 1103 gsp->glds_dot3_multi_coll.value.ui32 = 1104 stats->glds_dot3_multi_coll; 1105 gsp->glds_dot3_sqe_error.value.ui32 = 1106 stats->glds_dot3_sqe_error; 1107 gsp->glds_dot3_mac_xmt_error.value.ui32 = 1108 stats->glds_dot3_mac_xmt_error; 1109 gsp->glds_dot3_mac_rcv_error.value.ui32 = 1110 stats->glds_dot3_mac_rcv_error; 1111 gsp->glds_dot3_frame_too_long.value.ui32 = 1112 stats->glds_dot3_frame_too_long; 1113 (void) strcpy(gsp->glds_duplex.value.c, gld_duplex[ 1114 stats->glds_duplex < 1115 sizeof (gld_duplex) / sizeof (gld_duplex[0]) ? 1116 stats->glds_duplex : 0]); 1117 break; 1118 case DL_TPR: 1119 gsp->glds_dot5_line_error.value.ui32 = 1120 stats->glds_dot5_line_error; 1121 gsp->glds_dot5_burst_error.value.ui32 = 1122 stats->glds_dot5_burst_error; 1123 gsp->glds_dot5_signal_loss.value.ui32 = 1124 stats->glds_dot5_signal_loss; 1125 gsp->glds_dot5_ace_error.value.ui32 = 1126 stats->glds_dot5_ace_error; 1127 gsp->glds_dot5_internal_error.value.ui32 = 1128 stats->glds_dot5_internal_error; 1129 gsp->glds_dot5_lost_frame_error.value.ui32 = 1130 stats->glds_dot5_lost_frame_error; 1131 gsp->glds_dot5_frame_copied_error.value.ui32 = 1132 stats->glds_dot5_frame_copied_error; 1133 gsp->glds_dot5_token_error.value.ui32 = 1134 stats->glds_dot5_token_error; 1135 gsp->glds_dot5_freq_error.value.ui32 = 1136 stats->glds_dot5_freq_error; 1137 break; 1138 case DL_FDDI: 1139 gsp->glds_fddi_mac_error.value.ui32 = 1140 stats->glds_fddi_mac_error; 1141 gsp->glds_fddi_mac_lost.value.ui32 = 1142 stats->glds_fddi_mac_lost; 1143 gsp->glds_fddi_mac_token.value.ui32 = 1144 stats->glds_fddi_mac_token; 1145 gsp->glds_fddi_mac_tvx_expired.value.ui32 = 1146 stats->glds_fddi_mac_tvx_expired; 1147 gsp->glds_fddi_mac_late.value.ui32 = 1148 stats->glds_fddi_mac_late; 1149 gsp->glds_fddi_mac_ring_op.value.ui32 = 1150 stats->glds_fddi_mac_ring_op; 1151 break; 1152 case DL_IB: 1153 break; 1154 default: 1155 break; 1156 } 1157 1158 GLDM_UNLOCK(macinfo); 1159 1160 #ifdef GLD_DEBUG 1161 gld_check_assertions(); 1162 if (gld_debug & GLDRDE) 1163 gld_sr_dump(macinfo); 1164 #endif 1165 1166 return (0); 1167 } 1168 1169 static int 1170 gld_init_vlan_stats(gld_vlan_t *vlan) 1171 { 1172 gld_mac_info_t *mac = vlan->gldv_mac; 1173 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt; 1174 struct gldkstats *sp; 1175 glddev_t *glddev; 1176 kstat_t *ksp; 1177 char *name; 1178 int instance; 1179 1180 glddev = mac_pvt->major_dev; 1181 name = glddev->gld_name; 1182 instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa; 1183 1184 if ((ksp = kstat_create(name, instance, 1185 NULL, "net", KSTAT_TYPE_NAMED, 1186 sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) { 1187 cmn_err(CE_WARN, 1188 "GLD: failed to create kstat structure for %s%d", 1189 name, instance); 1190 return (GLD_FAILURE); 1191 } 1192 1193 vlan->gldv_kstatp = ksp; 1194 1195 ksp->ks_update = gld_update_vlan_kstat; 1196 ksp->ks_private = (void *)vlan; 1197 1198 sp = ksp->ks_data; 1199 kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32); 1200 kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32); 1201 kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG); 1202 kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG); 1203 kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32); 1204 kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32); 1205 kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG); 1206 kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG); 1207 kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG); 1208 kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG); 1209 kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG); 1210 kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG); 1211 kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG); 1212 kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG); 1213 kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG); 1214 kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64); 1215 kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64); 1216 kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64); 1217 kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64); 1218 kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG); 1219 kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64); 1220 kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR); 1221 kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR); 1222 1223 kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG); 1224 kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG); 1225 kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG); 1226 1227 kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp", 1228 KSTAT_DATA_UINT32); 1229 kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp", 1230 KSTAT_DATA_UINT32); 1231 1232 kstat_install(ksp); 1233 return (GLD_SUCCESS); 1234 } 1235 1236 static int 1237 gld_update_vlan_kstat(kstat_t *ksp, int rw) 1238 { 1239 gld_vlan_t *vlan; 1240 gld_mac_info_t *macinfo; 1241 struct gldkstats *gsp; 1242 struct gld_stats *stats; 1243 gld_mac_pvt_t *mac_pvt; 1244 uint32_t media; 1245 1246 if (rw == KSTAT_WRITE) 1247 return (EACCES); 1248 1249 vlan = (gld_vlan_t *)ksp->ks_private; 1250 ASSERT(vlan != NULL); 1251 1252 macinfo = vlan->gldv_mac; 1253 GLDM_LOCK(macinfo, RW_WRITER); 1254 1255 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 1256 1257 gsp = vlan->gldv_kstatp->ks_data; 1258 ASSERT(gsp); 1259 stats = vlan->gldv_stats; 1260 1261 gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff; 1262 gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff; 1263 gsp->glds_errxmt.value.ul = stats->glds_errxmt; 1264 gsp->glds_multixmt.value.ul = stats->glds_multixmt; 1265 gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt; 1266 gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf; 1267 gsp->glds_xmtretry.value.ul = stats->glds_xmtretry; 1268 gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64; 1269 gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64; 1270 1271 gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff; 1272 gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff; 1273 gsp->glds_errrcv.value.ul = stats->glds_errrcv; 1274 gsp->glds_multircv.value.ul = stats->glds_multircv; 1275 gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv; 1276 gsp->glds_blocked.value.ul = stats->glds_blocked; 1277 gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64; 1278 gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64; 1279 gsp->glds_unknowns.value.ul = stats->glds_unknowns; 1280 gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp; 1281 gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp; 1282 1283 gsp->glds_speed.value.ui64 = mac_pvt->statistics->glds_speed; 1284 media = mac_pvt->statistics->glds_media; 1285 (void) strcpy(gsp->glds_media.value.c, 1286 gld_media[media < sizeof (gld_media) / sizeof (gld_media[0]) ? 1287 media : 0]); 1288 1289 GLDM_UNLOCK(macinfo); 1290 return (0); 1291 } 1292 1293 /* 1294 * The device dependent driver specifies gld_getinfo as its getinfo routine. 1295 */ 1296 /*ARGSUSED*/ 1297 int 1298 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 1299 { 1300 dev_info_t *devinfo; 1301 minor_t minor = getminor((dev_t)arg); 1302 int rc = DDI_FAILURE; 1303 1304 switch (cmd) { 1305 case DDI_INFO_DEVT2DEVINFO: 1306 if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) { 1307 *(dev_info_t **)resultp = devinfo; 1308 rc = DDI_SUCCESS; 1309 } 1310 break; 1311 case DDI_INFO_DEVT2INSTANCE: 1312 /* Need static mapping for deferred attach */ 1313 if (minor == GLD_USE_STYLE2) { 1314 /* 1315 * Style 2: this minor number does not correspond to 1316 * any particular instance number. 1317 */ 1318 rc = DDI_FAILURE; 1319 } else if (minor <= GLD_MAX_STYLE1_MINOR) { 1320 /* Style 1: calculate the PPA from the minor */ 1321 *resultp = (void *)(uintptr_t) 1322 GLD_STYLE1_MINOR_TO_PPA(minor); 1323 rc = DDI_SUCCESS; 1324 } else { 1325 /* Clone: look for it. Not a static mapping */ 1326 if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) { 1327 *resultp = (void *)(uintptr_t) 1328 ddi_get_instance(devinfo); 1329 rc = DDI_SUCCESS; 1330 } 1331 } 1332 break; 1333 } 1334 1335 return (rc); 1336 } 1337 1338 /* called from gld_getinfo */ 1339 dev_info_t * 1340 gld_finddevinfo(dev_t dev) 1341 { 1342 minor_t minor = getminor(dev); 1343 glddev_t *device; 1344 gld_mac_info_t *mac; 1345 gld_vlan_t *vlan; 1346 gld_t *str; 1347 dev_info_t *devinfo = NULL; 1348 int i; 1349 1350 if (minor == GLD_USE_STYLE2) { 1351 /* 1352 * Style 2: this minor number does not correspond to 1353 * any particular instance number. 1354 * 1355 * XXX We don't know what to say. See Bug 1165519. 1356 */ 1357 return (NULL); 1358 } 1359 1360 mutex_enter(&gld_device_list.gld_devlock); /* hold the device */ 1361 1362 device = gld_devlookup(getmajor(dev)); 1363 if (device == NULL) { 1364 /* There are no attached instances of this device */ 1365 mutex_exit(&gld_device_list.gld_devlock); 1366 return (NULL); 1367 } 1368 1369 /* 1370 * Search all attached macs and streams. 1371 * 1372 * XXX We don't bother checking the DL_UNATTACHED streams since 1373 * we don't know what devinfo we should report back even if we 1374 * found the minor. Maybe we should associate streams that are 1375 * not currently attached to a PPA with the "first" devinfo node 1376 * of the major device to attach -- the one that created the 1377 * minor node for the generic device. 1378 */ 1379 mutex_enter(&device->gld_devlock); 1380 1381 for (mac = device->gld_mac_next; 1382 mac != (gld_mac_info_t *)&device->gld_mac_next; 1383 mac = mac->gldm_next) { 1384 gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt; 1385 1386 if (!(mac->gldm_GLD_flags & GLD_MAC_READY)) 1387 continue; /* this one's not ready yet */ 1388 if (minor <= GLD_MAX_STYLE1_MINOR) { 1389 /* Style 1 -- look for the corresponding PPA */ 1390 if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) { 1391 devinfo = mac->gldm_devinfo; 1392 goto out; /* found it! */ 1393 } else 1394 continue; /* not this PPA */ 1395 } 1396 1397 /* We are looking for a clone */ 1398 for (i = 0; i < VLAN_HASHSZ; i++) { 1399 for (vlan = pvt->vlan_hash[i]; 1400 vlan != NULL; vlan = vlan->gldv_next) { 1401 for (str = vlan->gldv_str_next; 1402 str != (gld_t *)&vlan->gldv_str_next; 1403 str = str->gld_next) { 1404 ASSERT(str->gld_mac_info == mac); 1405 if (minor == str->gld_minor) { 1406 devinfo = mac->gldm_devinfo; 1407 goto out; 1408 } 1409 } 1410 } 1411 } 1412 } 1413 out: 1414 mutex_exit(&device->gld_devlock); 1415 mutex_exit(&gld_device_list.gld_devlock); 1416 return (devinfo); 1417 } 1418 1419 /* 1420 * STREAMS open routine. The device dependent driver specifies this as its 1421 * open entry point. 1422 */ 1423 /*ARGSUSED2*/ 1424 int 1425 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred) 1426 { 1427 gld_mac_pvt_t *mac_pvt; 1428 gld_t *gld; 1429 glddev_t *glddev; 1430 gld_mac_info_t *macinfo; 1431 minor_t minor = getminor(*dev); 1432 gld_vlan_t *vlan; 1433 t_uscalar_t ppa; 1434 1435 ASSERT(q != NULL); 1436 1437 if (minor > GLD_MAX_STYLE1_MINOR) 1438 return (ENXIO); 1439 1440 ASSERT(q->q_ptr == NULL); /* Clone device gives us a fresh Q */ 1441 1442 /* Find our per-major glddev_t structure */ 1443 mutex_enter(&gld_device_list.gld_devlock); 1444 glddev = gld_devlookup(getmajor(*dev)); 1445 1446 /* 1447 * This glddev will hang around since detach (and therefore 1448 * gld_unregister) can't run while we're here in the open routine. 1449 */ 1450 mutex_exit(&gld_device_list.gld_devlock); 1451 1452 if (glddev == NULL) 1453 return (ENXIO); 1454 1455 #ifdef GLD_DEBUG 1456 if (gld_debug & GLDPROT) { 1457 if (minor == GLD_USE_STYLE2) 1458 cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q); 1459 else 1460 cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)", 1461 (void *)q, minor); 1462 } 1463 #endif 1464 1465 /* 1466 * get a per-stream structure and link things together so we 1467 * can easily find them later. 1468 */ 1469 gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP); 1470 1471 /* 1472 * fill in the structure and state info 1473 */ 1474 gld->gld_qptr = q; 1475 gld->gld_device = glddev; 1476 gld->gld_state = DL_UNATTACHED; 1477 1478 /* 1479 * we must atomically find a free minor number and add the stream 1480 * to a list, because gld_findminor has to traverse the lists to 1481 * determine which minor numbers are free. 1482 */ 1483 mutex_enter(&glddev->gld_devlock); 1484 1485 /* find a free minor device number for the clone */ 1486 gld->gld_minor = gld_findminor(glddev); 1487 if (gld->gld_minor == 0) { 1488 mutex_exit(&glddev->gld_devlock); 1489 kmem_free(gld, sizeof (gld_t)); 1490 return (ENOSR); 1491 } 1492 1493 #ifdef GLD_VERBOSE_DEBUG 1494 if (gld_debug & GLDPROT) 1495 cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d", 1496 (void *)gld, gld->gld_minor); 1497 #endif 1498 1499 if (minor == GLD_USE_STYLE2) { 1500 gld->gld_style = DL_STYLE2; 1501 *dev = makedevice(getmajor(*dev), gld->gld_minor); 1502 WR(q)->q_ptr = q->q_ptr = (caddr_t)gld; 1503 gldinsque(gld, glddev->gld_str_prev); 1504 #ifdef GLD_VERBOSE_DEBUG 1505 if (gld_debug & GLDPROT) 1506 cmn_err(CE_NOTE, "GLDstruct added to device list"); 1507 #endif 1508 (void) qassociate(q, -1); 1509 goto done; 1510 } 1511 1512 gld->gld_style = DL_STYLE1; 1513 1514 /* the PPA is actually 1 less than the minordev */ 1515 ppa = GLD_STYLE1_MINOR_TO_PPA(minor); 1516 1517 for (macinfo = glddev->gld_mac_next; 1518 macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next); 1519 macinfo = macinfo->gldm_next) { 1520 ASSERT(macinfo != NULL); 1521 if (macinfo->gldm_ppa != ppa) 1522 continue; 1523 1524 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) 1525 continue; /* this one's not ready yet */ 1526 1527 /* 1528 * we found the correct PPA 1529 */ 1530 GLDM_LOCK(macinfo, RW_WRITER); 1531 1532 gld->gld_mac_info = macinfo; 1533 1534 if (macinfo->gldm_send_tagged != NULL) 1535 gld->gld_send = macinfo->gldm_send_tagged; 1536 else 1537 gld->gld_send = macinfo->gldm_send; 1538 1539 /* now ready for action */ 1540 gld->gld_state = DL_UNBOUND; 1541 1542 if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) { 1543 GLDM_UNLOCK(macinfo); 1544 mutex_exit(&glddev->gld_devlock); 1545 kmem_free(gld, sizeof (gld_t)); 1546 return (EIO); 1547 } 1548 1549 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 1550 if (!mac_pvt->started) { 1551 if (gld_start_mac(macinfo) != GLD_SUCCESS) { 1552 gld_rem_vlan(vlan); 1553 GLDM_UNLOCK(macinfo); 1554 mutex_exit(&glddev->gld_devlock); 1555 kmem_free(gld, sizeof (gld_t)); 1556 return (EIO); 1557 } 1558 } 1559 1560 gld->gld_vlan = vlan; 1561 vlan->gldv_nstreams++; 1562 gldinsque(gld, vlan->gldv_str_prev); 1563 *dev = makedevice(getmajor(*dev), gld->gld_minor); 1564 WR(q)->q_ptr = q->q_ptr = (caddr_t)gld; 1565 1566 GLDM_UNLOCK(macinfo); 1567 #ifdef GLD_VERBOSE_DEBUG 1568 if (gld_debug & GLDPROT) 1569 cmn_err(CE_NOTE, 1570 "GLDstruct added to instance list"); 1571 #endif 1572 break; 1573 } 1574 1575 if (gld->gld_state == DL_UNATTACHED) { 1576 mutex_exit(&glddev->gld_devlock); 1577 kmem_free(gld, sizeof (gld_t)); 1578 return (ENXIO); 1579 } 1580 1581 done: 1582 mutex_exit(&glddev->gld_devlock); 1583 noenable(WR(q)); /* We'll do the qenables manually */ 1584 qprocson(q); /* start the queues running */ 1585 qenable(WR(q)); 1586 return (0); 1587 } 1588 1589 /* 1590 * normal stream close call checks current status and cleans up 1591 * data structures that were dynamically allocated 1592 */ 1593 /*ARGSUSED1*/ 1594 int 1595 gld_close(queue_t *q, int flag, cred_t *cred) 1596 { 1597 gld_t *gld = (gld_t *)q->q_ptr; 1598 glddev_t *glddev = gld->gld_device; 1599 1600 ASSERT(q); 1601 ASSERT(gld); 1602 1603 #ifdef GLD_DEBUG 1604 if (gld_debug & GLDPROT) { 1605 cmn_err(CE_NOTE, "gld_close(%p, Style %d)", 1606 (void *)q, (gld->gld_style & 0x1) + 1); 1607 } 1608 #endif 1609 1610 /* Hold all device streams lists still while we check for a macinfo */ 1611 mutex_enter(&glddev->gld_devlock); 1612 1613 if (gld->gld_mac_info != NULL) { 1614 /* If there's a macinfo, block recv while we change state */ 1615 GLDM_LOCK(gld->gld_mac_info, RW_WRITER); 1616 gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */ 1617 GLDM_UNLOCK(gld->gld_mac_info); 1618 } else { 1619 /* no mac DL_ATTACHED right now */ 1620 gld->gld_flags |= GLD_STR_CLOSING; 1621 } 1622 1623 mutex_exit(&glddev->gld_devlock); 1624 1625 /* 1626 * qprocsoff before we call gld_unbind/gldunattach, so that 1627 * we know wsrv isn't in there trying to undo what we're doing. 1628 */ 1629 qprocsoff(q); 1630 1631 ASSERT(gld->gld_wput_count == 0); 1632 gld->gld_wput_count = 0; /* just in case */ 1633 1634 if (gld->gld_state == DL_IDLE) { 1635 /* Need to unbind */ 1636 ASSERT(gld->gld_mac_info != NULL); 1637 (void) gld_unbind(WR(q), NULL); 1638 } 1639 1640 if (gld->gld_state == DL_UNBOUND) { 1641 /* 1642 * Need to unattach 1643 * For style 2 stream, gldunattach also 1644 * associate queue with NULL dip 1645 */ 1646 ASSERT(gld->gld_mac_info != NULL); 1647 (void) gldunattach(WR(q), NULL); 1648 } 1649 1650 /* disassociate the stream from the device */ 1651 q->q_ptr = WR(q)->q_ptr = NULL; 1652 1653 /* 1654 * Since we unattached above (if necessary), we know that we're 1655 * on the per-major list of unattached streams, rather than a 1656 * per-PPA list. So we know we should hold the devlock. 1657 */ 1658 mutex_enter(&glddev->gld_devlock); 1659 gldremque(gld); /* remove from Style 2 list */ 1660 mutex_exit(&glddev->gld_devlock); 1661 1662 kmem_free(gld, sizeof (gld_t)); 1663 1664 return (0); 1665 } 1666 1667 /* 1668 * gld_rsrv (q) 1669 * simple read service procedure 1670 * purpose is to avoid the time it takes for packets 1671 * to move through IP so we can get them off the board 1672 * as fast as possible due to limited PC resources. 1673 * 1674 * This is not normally used in the current implementation. It 1675 * can be selected with the undocumented property "fast_recv". 1676 * If that property is set, gld_recv will send the packet 1677 * upstream with a putq() rather than a putnext(), thus causing 1678 * this routine to be scheduled. 1679 */ 1680 int 1681 gld_rsrv(queue_t *q) 1682 { 1683 mblk_t *mp; 1684 1685 while ((mp = getq(q)) != NULL) { 1686 if (canputnext(q)) { 1687 putnext(q, mp); 1688 } else { 1689 freemsg(mp); 1690 } 1691 } 1692 return (0); 1693 } 1694 1695 /* 1696 * gld_wput (q, mp) 1697 * general gld stream write put routine. Receives fastpath data from upper 1698 * modules and processes it immediately. ioctl and M_PROTO/M_PCPROTO are 1699 * queued for later processing by the service procedure. 1700 */ 1701 1702 int 1703 gld_wput(queue_t *q, mblk_t *mp) 1704 { 1705 gld_t *gld = (gld_t *)(q->q_ptr); 1706 int rc; 1707 boolean_t multidata = B_TRUE; 1708 uint32_t upri; 1709 1710 #ifdef GLD_DEBUG 1711 if (gld_debug & GLDTRACE) 1712 cmn_err(CE_NOTE, "gld_wput(%p %p): type %x", 1713 (void *)q, (void *)mp, DB_TYPE(mp)); 1714 #endif 1715 switch (DB_TYPE(mp)) { 1716 1717 case M_DATA: 1718 /* fast data / raw support */ 1719 /* we must be DL_ATTACHED and DL_BOUND to do this */ 1720 /* Tricky to access memory without taking the mutex */ 1721 if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 || 1722 gld->gld_state != DL_IDLE) { 1723 merror(q, mp, EPROTO); 1724 break; 1725 } 1726 /* 1727 * Cleanup MBLK_VTAG in case it is set by other 1728 * modules. MBLK_VTAG is used to save the vtag information. 1729 */ 1730 GLD_CLEAR_MBLK_VTAG(mp); 1731 multidata = B_FALSE; 1732 /* LINTED: E_CASE_FALLTHRU */ 1733 case M_MULTIDATA: 1734 /* Only call gld_start() directly if nothing queued ahead */ 1735 /* No guarantees about ordering with different threads */ 1736 if (q->q_first) 1737 goto use_wsrv; 1738 1739 /* 1740 * This can happen if wsrv has taken off the last mblk but 1741 * is still processing it. 1742 */ 1743 membar_consumer(); 1744 if (gld->gld_in_wsrv) 1745 goto use_wsrv; 1746 1747 /* 1748 * Keep a count of current wput calls to start. 1749 * Nonzero count delays any attempted DL_UNBIND. 1750 * See comments above gld_start(). 1751 */ 1752 atomic_inc_32((uint32_t *)&gld->gld_wput_count); 1753 membar_enter(); 1754 1755 /* Recheck state now wput_count is set to prevent DL_UNBIND */ 1756 /* If this Q is in process of DL_UNBIND, don't call start */ 1757 if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) { 1758 /* Extremely unlikely */ 1759 atomic_dec_32((uint32_t *)&gld->gld_wput_count); 1760 goto use_wsrv; 1761 } 1762 1763 /* 1764 * Get the priority value. Note that in raw mode, the 1765 * per-packet priority value kept in b_band is ignored. 1766 */ 1767 upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri : 1768 UPRI(gld, mp->b_band); 1769 1770 rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) : 1771 gld_start(q, mp, GLD_WPUT, upri); 1772 1773 /* Allow DL_UNBIND again */ 1774 membar_exit(); 1775 atomic_dec_32((uint32_t *)&gld->gld_wput_count); 1776 1777 if (rc == GLD_NORESOURCES) 1778 qenable(q); 1779 break; /* Done with this packet */ 1780 1781 use_wsrv: 1782 /* Q not empty, in DL_DETACH, or start gave NORESOURCES */ 1783 (void) putq(q, mp); 1784 qenable(q); 1785 break; 1786 1787 case M_IOCTL: 1788 /* ioctl relies on wsrv single threading per queue */ 1789 (void) putq(q, mp); 1790 qenable(q); 1791 break; 1792 1793 case M_CTL: 1794 (void) putq(q, mp); 1795 qenable(q); 1796 break; 1797 1798 case M_FLUSH: /* canonical flush handling */ 1799 /* XXX Should these be FLUSHALL? */ 1800 if (*mp->b_rptr & FLUSHW) 1801 flushq(q, 0); 1802 if (*mp->b_rptr & FLUSHR) { 1803 flushq(RD(q), 0); 1804 *mp->b_rptr &= ~FLUSHW; 1805 qreply(q, mp); 1806 } else 1807 freemsg(mp); 1808 break; 1809 1810 case M_PROTO: 1811 case M_PCPROTO: 1812 /* these rely on wsrv single threading per queue */ 1813 (void) putq(q, mp); 1814 qenable(q); 1815 break; 1816 1817 default: 1818 #ifdef GLD_DEBUG 1819 if (gld_debug & GLDETRACE) 1820 cmn_err(CE_WARN, 1821 "gld: Unexpected packet type from queue: 0x%x", 1822 DB_TYPE(mp)); 1823 #endif 1824 freemsg(mp); 1825 } 1826 return (0); 1827 } 1828 1829 /* 1830 * gld_wsrv - Incoming messages are processed according to the DLPI protocol 1831 * specification. 1832 * 1833 * wsrv is single-threaded per Q. We make use of this to avoid taking the 1834 * lock for reading data items that are only ever written by us. 1835 */ 1836 1837 int 1838 gld_wsrv(queue_t *q) 1839 { 1840 mblk_t *mp; 1841 gld_t *gld = (gld_t *)q->q_ptr; 1842 gld_mac_info_t *macinfo; 1843 union DL_primitives *prim; 1844 int err; 1845 boolean_t multidata; 1846 uint32_t upri; 1847 1848 #ifdef GLD_DEBUG 1849 if (gld_debug & GLDTRACE) 1850 cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q); 1851 #endif 1852 1853 ASSERT(!gld->gld_in_wsrv); 1854 1855 gld->gld_xwait = B_FALSE; /* We are now going to process this Q */ 1856 1857 if (q->q_first == NULL) 1858 return (0); 1859 1860 macinfo = gld->gld_mac_info; 1861 1862 /* 1863 * Help wput avoid a call to gld_start if there might be a message 1864 * previously queued by that thread being processed here. 1865 */ 1866 gld->gld_in_wsrv = B_TRUE; 1867 membar_enter(); 1868 1869 while ((mp = getq(q)) != NULL) { 1870 switch (DB_TYPE(mp)) { 1871 case M_DATA: 1872 case M_MULTIDATA: 1873 multidata = (DB_TYPE(mp) == M_MULTIDATA); 1874 1875 /* 1876 * retry of a previously processed UNITDATA_REQ 1877 * or is a RAW or FAST message from above. 1878 */ 1879 if (macinfo == NULL) { 1880 /* No longer attached to a PPA, drop packet */ 1881 freemsg(mp); 1882 break; 1883 } 1884 1885 gld->gld_sched_ran = B_FALSE; 1886 membar_enter(); 1887 1888 /* 1889 * Get the priority value. Note that in raw mode, the 1890 * per-packet priority value kept in b_band is ignored. 1891 */ 1892 upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri : 1893 UPRI(gld, mp->b_band); 1894 1895 err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) : 1896 gld_start(q, mp, GLD_WSRV, upri); 1897 if (err == GLD_NORESOURCES) { 1898 /* gld_sched will qenable us later */ 1899 gld->gld_xwait = B_TRUE; /* want qenable */ 1900 membar_enter(); 1901 /* 1902 * v2: we're not holding the lock; it's 1903 * possible that the driver could have already 1904 * called gld_sched (following up on its 1905 * return of GLD_NORESOURCES), before we got a 1906 * chance to do the putbq() and set gld_xwait. 1907 * So if we saw a call to gld_sched that 1908 * examined this queue, since our call to 1909 * gld_start() above, then it's possible we've 1910 * already seen the only call to gld_sched() 1911 * we're ever going to see. So we better retry 1912 * transmitting this packet right now. 1913 */ 1914 if (gld->gld_sched_ran) { 1915 #ifdef GLD_DEBUG 1916 if (gld_debug & GLDTRACE) 1917 cmn_err(CE_NOTE, "gld_wsrv: " 1918 "sched was called"); 1919 #endif 1920 break; /* try again right now */ 1921 } 1922 gld->gld_in_wsrv = B_FALSE; 1923 return (0); 1924 } 1925 break; 1926 1927 case M_IOCTL: 1928 (void) gld_ioctl(q, mp); 1929 break; 1930 1931 case M_CTL: 1932 if (macinfo == NULL) { 1933 freemsg(mp); 1934 break; 1935 } 1936 1937 if (macinfo->gldm_mctl != NULL) { 1938 GLDM_LOCK(macinfo, RW_WRITER); 1939 (void) (*macinfo->gldm_mctl) (macinfo, q, mp); 1940 GLDM_UNLOCK(macinfo); 1941 } else { 1942 /* This driver doesn't recognize, just drop */ 1943 freemsg(mp); 1944 } 1945 break; 1946 1947 case M_PROTO: /* Will be an DLPI message of some type */ 1948 case M_PCPROTO: 1949 if ((err = gld_cmds(q, mp)) != GLDE_OK) { 1950 if (err == GLDE_RETRY) { 1951 gld->gld_in_wsrv = B_FALSE; 1952 return (0); /* quit while we're ahead */ 1953 } 1954 prim = (union DL_primitives *)mp->b_rptr; 1955 dlerrorack(q, mp, prim->dl_primitive, err, 0); 1956 } 1957 break; 1958 1959 default: 1960 /* This should never happen */ 1961 #ifdef GLD_DEBUG 1962 if (gld_debug & GLDERRS) 1963 cmn_err(CE_WARN, 1964 "gld_wsrv: db_type(%x) not supported", 1965 mp->b_datap->db_type); 1966 #endif 1967 freemsg(mp); /* unknown types are discarded */ 1968 break; 1969 } 1970 } 1971 1972 membar_exit(); 1973 gld->gld_in_wsrv = B_FALSE; 1974 return (0); 1975 } 1976 1977 /* 1978 * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata(). 1979 * 1980 * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case. 1981 * 1982 * In particular, we must avoid calling gld_precv*() if we came from wput(). 1983 * gld_precv*() is where we, on the transmit side, loop back our outgoing 1984 * packets to the receive side if we are in physical promiscuous mode. 1985 * Since the receive side holds a lock across its call to the upstream 1986 * putnext, and that upstream module could well have looped back to our 1987 * wput() routine on the same thread, we cannot call gld_precv* from here 1988 * for fear of causing a recursive lock entry in our receive code. 1989 * 1990 * There is a problem here when coming from gld_wput(). While wput 1991 * only comes here if the queue is attached to a PPA and bound to a SAP 1992 * and there are no messages on the queue ahead of the M_DATA that could 1993 * change that, it is theoretically possible that another thread could 1994 * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine 1995 * could wake up and process them, before we finish processing this 1996 * send of the M_DATA. This can only possibly happen on a Style 2 RAW or 1997 * FAST (fastpath) stream: non RAW/FAST streams always go through wsrv(), 1998 * and Style 1 streams only DL_DETACH in the close routine, where 1999 * qprocsoff() protects us. If this happens we could end up calling 2000 * gldm_send() after we have detached the stream and possibly called 2001 * gldm_stop(). Worse, once the number of attached streams goes to zero, 2002 * detach/unregister could be called, and the macinfo could go away entirely. 2003 * 2004 * No one has ever seen this happen. 2005 * 2006 * It is some trouble to fix this, and we would rather not add any mutex 2007 * logic into the wput() routine, which is supposed to be a "fast" 2008 * path. 2009 * 2010 * What I've done is use an atomic counter to keep a count of the number 2011 * of threads currently calling gld_start() from wput() on this stream. 2012 * If DL_DETACH sees this as nonzero, it putbqs the request back onto 2013 * the queue and qenables, hoping to have better luck next time. Since 2014 * people shouldn't be trying to send after they've asked to DL_DETACH, 2015 * hopefully very soon all the wput=>start threads should have returned 2016 * and the DL_DETACH will succeed. It's hard to test this since the odds 2017 * of the failure even trying to happen are so small. I probably could 2018 * have ignored the whole issue and never been the worse for it. 2019 * 2020 * Because some GLDv2 Ethernet drivers do not allow the size of transmitted 2021 * packet to be greater than ETHERMAX, we must first strip the VLAN tag 2022 * from a tagged packet before passing it to the driver's gld_send() entry 2023 * point function, and pass the VLAN tag as a separate argument. The 2024 * gld_send() function may fail. In that case, the packet will need to be 2025 * queued in order to be processed again in GLD's service routine. As the 2026 * VTAG has already been stripped at that time, we save the VTAG information 2027 * in (the unused fields of) dblk using GLD_SAVE_MBLK_VTAG(), so that the 2028 * VTAG can also be queued and be able to be got when gld_start() is called 2029 * next time from gld_wsrv(). 2030 * 2031 * Some rules to use GLD_{CLEAR|SAVE}_MBLK_VTAG macros: 2032 * 2033 * - GLD_SAVE_MBLK_VTAG() must be called to save the VTAG information each time 2034 * the message is queued by putbq(). 2035 * 2036 * - GLD_CLEAR_MBLK_VTAG() must be called to clear the bogus VTAG information 2037 * (if any) in dblk before the message is passed to the gld_start() function. 2038 */ 2039 static int 2040 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri) 2041 { 2042 mblk_t *nmp; 2043 gld_t *gld = (gld_t *)q->q_ptr; 2044 gld_mac_info_t *macinfo; 2045 gld_mac_pvt_t *mac_pvt; 2046 int rc; 2047 gld_interface_t *ifp; 2048 pktinfo_t pktinfo; 2049 uint32_t vtag, vid; 2050 uint32_t raw_vtag = 0; 2051 gld_vlan_t *vlan; 2052 struct gld_stats *stats0, *stats = NULL; 2053 2054 ASSERT(DB_TYPE(mp) == M_DATA); 2055 macinfo = gld->gld_mac_info; 2056 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2057 ifp = mac_pvt->interfacep; 2058 vlan = (gld_vlan_t *)gld->gld_vlan; 2059 vid = vlan->gldv_id; 2060 2061 /* 2062 * If this interface is a VLAN, the kstats of corresponding 2063 * "VLAN 0" should also be updated. Note that the gld_vlan_t 2064 * structure for VLAN 0 might not exist if there are no DLPI 2065 * consumers attaching on VLAN 0. Fortunately we can directly 2066 * access VLAN 0's kstats from macinfo. 2067 * 2068 * Therefore, stats0 (VLAN 0's kstats) must always be 2069 * updated, and stats must to be updated if it is not NULL. 2070 */ 2071 stats0 = mac_pvt->statistics; 2072 if (vid != VLAN_VID_NONE) 2073 stats = vlan->gldv_stats; 2074 2075 if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) { 2076 #ifdef GLD_DEBUG 2077 if (gld_debug & GLDERRS) 2078 cmn_err(CE_WARN, 2079 "gld_start: failed to interpret outbound packet"); 2080 #endif 2081 goto badarg; 2082 } 2083 2084 vtag = VLAN_VID_NONE; 2085 raw_vtag = GLD_GET_MBLK_VTAG(mp); 2086 if (GLD_VTAG_TCI(raw_vtag) != 0) { 2087 uint16_t raw_pri, raw_vid, evid; 2088 2089 /* 2090 * Tagged packet. 2091 */ 2092 raw_pri = GLD_VTAG_PRI(raw_vtag); 2093 raw_vid = GLD_VTAG_VID(raw_vtag); 2094 GLD_CLEAR_MBLK_VTAG(mp); 2095 2096 if (gld->gld_flags & GLD_RAW) { 2097 /* 2098 * In raw mode, we only expect untagged packets or 2099 * special priority-tagged packets on a VLAN stream. 2100 * Drop the packet if its VID is not zero. 2101 */ 2102 if (vid != VLAN_VID_NONE && raw_vid != VLAN_VID_NONE) 2103 goto badarg; 2104 2105 /* 2106 * If it is raw mode, use the per-stream priority if 2107 * the priority is not specified in the packet. 2108 * Otherwise, ignore the priority bits in the packet. 2109 */ 2110 upri = (raw_pri != 0) ? raw_pri : upri; 2111 } 2112 2113 if (vid == VLAN_VID_NONE && vid != raw_vid) { 2114 gld_vlan_t *tmp_vlan; 2115 2116 /* 2117 * This link is a physical link but the packet is 2118 * a VLAN tagged packet, the kstats of corresponding 2119 * VLAN (if any) should also be updated. 2120 */ 2121 tmp_vlan = gld_find_vlan(macinfo, raw_vid); 2122 if (tmp_vlan != NULL) 2123 stats = tmp_vlan->gldv_stats; 2124 } 2125 2126 evid = (vid == VLAN_VID_NONE) ? raw_vid : vid; 2127 if (evid != VLAN_VID_NONE || upri != 0) 2128 vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, evid); 2129 } else { 2130 /* 2131 * Untagged packet: 2132 * Get vtag from the attached PPA of this stream. 2133 */ 2134 if ((vid != VLAN_VID_NONE) || 2135 ((macinfo->gldm_type == DL_ETHER) && (upri != 0))) { 2136 vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, vid); 2137 } 2138 } 2139 2140 /* 2141 * We're not holding the lock for this check. If the promiscuous 2142 * state is in flux it doesn't matter much if we get this wrong. 2143 */ 2144 if (mac_pvt->nprom > 0) { 2145 /* 2146 * We want to loopback to the receive side, but to avoid 2147 * recursive lock entry: if we came from wput(), which 2148 * could have looped back via IP from our own receive 2149 * interrupt thread, we decline this request. wput() 2150 * will then queue the packet for wsrv(). This means 2151 * that when snoop is running we don't get the advantage 2152 * of the wput() multithreaded direct entry to the 2153 * driver's send routine. 2154 */ 2155 if (caller == GLD_WPUT) { 2156 GLD_SAVE_MBLK_VTAG(mp, raw_vtag); 2157 (void) putbq(q, mp); 2158 return (GLD_NORESOURCES); 2159 } 2160 if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) 2161 nmp = dupmsg_noloan(mp); 2162 else 2163 nmp = dupmsg(mp); 2164 } else 2165 nmp = NULL; /* we need no loopback */ 2166 2167 if (ifp->hdr_size > 0 && 2168 pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) + 2169 macinfo->gldm_maxpkt) { 2170 if (nmp) 2171 freemsg(nmp); /* free the duped message */ 2172 #ifdef GLD_DEBUG 2173 if (gld_debug & GLDERRS) 2174 cmn_err(CE_WARN, 2175 "gld_start: oversize outbound packet, size %d," 2176 "max %d", pktinfo.pktLen, 2177 ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) + 2178 macinfo->gldm_maxpkt); 2179 #endif 2180 goto badarg; 2181 } 2182 2183 rc = (*gld->gld_send)(macinfo, mp, vtag); 2184 2185 if (rc != GLD_SUCCESS) { 2186 if (rc == GLD_NORESOURCES) { 2187 ATOMIC_BUMP(stats0, stats, glds_xmtretry, 1); 2188 GLD_SAVE_MBLK_VTAG(mp, raw_vtag); 2189 (void) putbq(q, mp); 2190 } else { 2191 /* transmit error; drop the packet */ 2192 freemsg(mp); 2193 /* We're supposed to count failed attempts as well */ 2194 UPDATE_STATS(stats0, stats, pktinfo, 1); 2195 #ifdef GLD_DEBUG 2196 if (gld_debug & GLDERRS) 2197 cmn_err(CE_WARN, 2198 "gld_start: gldm_send failed %d", rc); 2199 #endif 2200 } 2201 if (nmp) 2202 freemsg(nmp); /* free the dupped message */ 2203 return (rc); 2204 } 2205 2206 UPDATE_STATS(stats0, stats, pktinfo, 1); 2207 2208 /* 2209 * Loopback case. The message needs to be returned back on 2210 * the read side. This would silently fail if the dupmsg fails 2211 * above. This is probably OK, if there is no memory to dup the 2212 * block, then there isn't much we could do anyway. 2213 */ 2214 if (nmp) { 2215 GLDM_LOCK(macinfo, RW_WRITER); 2216 gld_precv(macinfo, nmp, vtag, stats); 2217 GLDM_UNLOCK(macinfo); 2218 } 2219 2220 return (GLD_SUCCESS); 2221 badarg: 2222 freemsg(mp); 2223 2224 ATOMIC_BUMP(stats0, stats, glds_xmtbadinterp, 1); 2225 return (GLD_BADARG); 2226 } 2227 2228 /* 2229 * With MDT V.2 a single message mp can have one header area and multiple 2230 * payload areas. A packet is described by dl_pkt_info, and each packet can 2231 * span multiple payload areas (currently with TCP, each packet will have one 2232 * header and at the most two payload areas). MACs might have a limit on the 2233 * number of payload segments (i.e. per packet scatter-gather limit), and 2234 * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver 2235 * might also have a limit on the total number of payloads in a message, and 2236 * that is specified by mdt_max_pld. 2237 */ 2238 static int 2239 gld_start_mdt(queue_t *q, mblk_t *mp, int caller) 2240 { 2241 mblk_t *nextmp; 2242 gld_t *gld = (gld_t *)q->q_ptr; 2243 gld_mac_info_t *macinfo = gld->gld_mac_info; 2244 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2245 int numpacks, mdtpacks; 2246 gld_interface_t *ifp = mac_pvt->interfacep; 2247 pktinfo_t pktinfo; 2248 gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan; 2249 boolean_t doloop = B_FALSE; 2250 multidata_t *dlmdp; 2251 pdescinfo_t pinfo; 2252 pdesc_t *dl_pkt; 2253 void *cookie; 2254 uint_t totLen = 0; 2255 2256 ASSERT(DB_TYPE(mp) == M_MULTIDATA); 2257 2258 /* 2259 * We're not holding the lock for this check. If the promiscuous 2260 * state is in flux it doesn't matter much if we get this wrong. 2261 */ 2262 if (mac_pvt->nprom > 0) { 2263 /* 2264 * We want to loopback to the receive side, but to avoid 2265 * recursive lock entry: if we came from wput(), which 2266 * could have looped back via IP from our own receive 2267 * interrupt thread, we decline this request. wput() 2268 * will then queue the packet for wsrv(). This means 2269 * that when snoop is running we don't get the advantage 2270 * of the wput() multithreaded direct entry to the 2271 * driver's send routine. 2272 */ 2273 if (caller == GLD_WPUT) { 2274 (void) putbq(q, mp); 2275 return (GLD_NORESOURCES); 2276 } 2277 doloop = B_TRUE; 2278 2279 /* 2280 * unlike the M_DATA case, we don't have to call 2281 * dupmsg_noloan here because mmd_transform 2282 * (called by gld_precv_mdt) will make a copy of 2283 * each dblk. 2284 */ 2285 } 2286 2287 while (mp != NULL) { 2288 /* 2289 * The lower layer driver only gets a single multidata 2290 * message; this also makes it easier to handle noresources. 2291 */ 2292 nextmp = mp->b_cont; 2293 mp->b_cont = NULL; 2294 2295 /* 2296 * Get number of packets in this message; if nothing 2297 * to transmit, go to next message. 2298 */ 2299 dlmdp = mmd_getmultidata(mp); 2300 if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) { 2301 freemsg(mp); 2302 mp = nextmp; 2303 continue; 2304 } 2305 2306 /* 2307 * Run interpreter to populate media specific pktinfo fields. 2308 * This collects per MDT message information like sap, 2309 * broad/multicast etc. 2310 */ 2311 (void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo, 2312 GLD_MDT_TX); 2313 2314 numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie); 2315 2316 if (numpacks > 0) { 2317 /* 2318 * Driver indicates it can transmit at least 1, and 2319 * possibly all, packets in MDT message. 2320 */ 2321 int count = numpacks; 2322 2323 for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo); 2324 (dl_pkt != NULL); 2325 dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) { 2326 /* 2327 * Format this packet by adding link header and 2328 * adjusting pdescinfo to include it; get 2329 * packet length. 2330 */ 2331 (void) (*ifp->interpreter_mdt)(macinfo, NULL, 2332 &pinfo, &pktinfo, GLD_MDT_TXPKT); 2333 2334 totLen += pktinfo.pktLen; 2335 2336 /* 2337 * Loop back packet before handing to the 2338 * driver. 2339 */ 2340 if (doloop && 2341 mmd_adjpdesc(dl_pkt, &pinfo) != NULL) { 2342 GLDM_LOCK(macinfo, RW_WRITER); 2343 gld_precv_mdt(macinfo, vlan, mp, 2344 dl_pkt, &pktinfo); 2345 GLDM_UNLOCK(macinfo); 2346 } 2347 2348 /* 2349 * And send off to driver. 2350 */ 2351 (*macinfo->gldm_mdt_send)(macinfo, cookie, 2352 &pinfo); 2353 2354 /* 2355 * Be careful not to invoke getnextpdesc if we 2356 * already sent the last packet, since driver 2357 * might have posted it to hardware causing a 2358 * completion and freemsg() so the MDT data 2359 * structures might not be valid anymore. 2360 */ 2361 if (--count == 0) 2362 break; 2363 } 2364 (*macinfo->gldm_mdt_post)(macinfo, mp, cookie); 2365 pktinfo.pktLen = totLen; 2366 UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, numpacks); 2367 2368 /* 2369 * In the noresources case (when driver indicates it 2370 * can not transmit all packets in the MDT message), 2371 * adjust to skip the first few packets on retrial. 2372 */ 2373 if (numpacks != mdtpacks) { 2374 /* 2375 * Release already processed packet descriptors. 2376 */ 2377 for (count = 0; count < numpacks; count++) { 2378 dl_pkt = mmd_getfirstpdesc(dlmdp, 2379 &pinfo); 2380 mmd_rempdesc(dl_pkt); 2381 } 2382 ATOMIC_BUMP(vlan->gldv_stats, NULL, 2383 glds_xmtretry, 1); 2384 mp->b_cont = nextmp; 2385 (void) putbq(q, mp); 2386 return (GLD_NORESOURCES); 2387 } 2388 } else if (numpacks == 0) { 2389 /* 2390 * Driver indicates it can not transmit any packets 2391 * currently and will request retrial later. 2392 */ 2393 ATOMIC_BUMP(vlan->gldv_stats, NULL, glds_xmtretry, 1); 2394 mp->b_cont = nextmp; 2395 (void) putbq(q, mp); 2396 return (GLD_NORESOURCES); 2397 } else { 2398 ASSERT(numpacks == -1); 2399 /* 2400 * We're supposed to count failed attempts as well. 2401 */ 2402 dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo); 2403 while (dl_pkt != NULL) { 2404 /* 2405 * Call interpreter to determine total packet 2406 * bytes that are being dropped. 2407 */ 2408 (void) (*ifp->interpreter_mdt)(macinfo, NULL, 2409 &pinfo, &pktinfo, GLD_MDT_TXPKT); 2410 2411 totLen += pktinfo.pktLen; 2412 2413 dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo); 2414 } 2415 pktinfo.pktLen = totLen; 2416 UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, mdtpacks); 2417 2418 /* 2419 * Transmit error; drop the message, move on 2420 * to the next one. 2421 */ 2422 freemsg(mp); 2423 } 2424 2425 /* 2426 * Process the next multidata block, if there is one. 2427 */ 2428 mp = nextmp; 2429 } 2430 2431 return (GLD_SUCCESS); 2432 } 2433 2434 /* 2435 * gld_intr (macinfo) 2436 */ 2437 uint_t 2438 gld_intr(gld_mac_info_t *macinfo) 2439 { 2440 ASSERT(macinfo != NULL); 2441 2442 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) 2443 return (DDI_INTR_UNCLAIMED); 2444 2445 return ((*macinfo->gldm_intr)(macinfo)); 2446 } 2447 2448 /* 2449 * gld_sched (macinfo) 2450 * 2451 * This routine scans the streams that refer to a specific macinfo 2452 * structure and causes the STREAMS scheduler to try to run them if 2453 * they are marked as waiting for the transmit buffer. 2454 */ 2455 void 2456 gld_sched(gld_mac_info_t *macinfo) 2457 { 2458 gld_mac_pvt_t *mac_pvt; 2459 gld_t *gld; 2460 gld_vlan_t *vlan; 2461 int i; 2462 2463 ASSERT(macinfo != NULL); 2464 2465 GLDM_LOCK(macinfo, RW_WRITER); 2466 2467 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 2468 /* We're probably being called from a leftover interrupt */ 2469 GLDM_UNLOCK(macinfo); 2470 return; 2471 } 2472 2473 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2474 2475 for (i = 0; i < VLAN_HASHSZ; i++) { 2476 for (vlan = mac_pvt->vlan_hash[i]; 2477 vlan != NULL; vlan = vlan->gldv_next) { 2478 for (gld = vlan->gldv_str_next; 2479 gld != (gld_t *)&vlan->gldv_str_next; 2480 gld = gld->gld_next) { 2481 ASSERT(gld->gld_mac_info == macinfo); 2482 gld->gld_sched_ran = B_TRUE; 2483 membar_enter(); 2484 if (gld->gld_xwait) { 2485 gld->gld_xwait = B_FALSE; 2486 qenable(WR(gld->gld_qptr)); 2487 } 2488 } 2489 } 2490 } 2491 2492 GLDM_UNLOCK(macinfo); 2493 } 2494 2495 /* 2496 * gld_precv (macinfo, mp, vtag, stats) 2497 * called from gld_start to loopback a packet when in promiscuous mode 2498 * 2499 * VLAN 0's statistics need to be updated. If stats is not NULL, 2500 * it needs to be updated as well. 2501 */ 2502 static void 2503 gld_precv(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag, 2504 struct gld_stats *stats) 2505 { 2506 gld_mac_pvt_t *mac_pvt; 2507 gld_interface_t *ifp; 2508 pktinfo_t pktinfo; 2509 2510 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 2511 2512 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2513 ifp = mac_pvt->interfacep; 2514 2515 /* 2516 * call the media specific packet interpreter routine 2517 */ 2518 if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) { 2519 freemsg(mp); 2520 BUMP(mac_pvt->statistics, stats, glds_rcvbadinterp, 1); 2521 #ifdef GLD_DEBUG 2522 if (gld_debug & GLDERRS) 2523 cmn_err(CE_WARN, 2524 "gld_precv: interpreter failed"); 2525 #endif 2526 return; 2527 } 2528 2529 /* 2530 * Update the vtag information. 2531 */ 2532 pktinfo.isTagged = (vtag != VLAN_VID_NONE); 2533 pktinfo.vid = GLD_VTAG_VID(vtag); 2534 pktinfo.cfi = GLD_VTAG_CFI(vtag); 2535 pktinfo.user_pri = GLD_VTAG_PRI(vtag); 2536 2537 gld_sendup(macinfo, &pktinfo, mp, gld_paccept); 2538 } 2539 2540 /* 2541 * Called from gld_start_mdt to loopback packet(s) when in promiscuous mode. 2542 * Note that 'vlan' is always a physical link, because MDT can only be 2543 * enabled on non-VLAN streams. 2544 */ 2545 /*ARGSUSED*/ 2546 static void 2547 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp, 2548 pdesc_t *dl_pkt, pktinfo_t *pktinfo) 2549 { 2550 mblk_t *adjmp; 2551 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2552 gld_interface_t *ifp = mac_pvt->interfacep; 2553 2554 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 2555 2556 /* 2557 * Get source/destination. 2558 */ 2559 (void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo, 2560 GLD_MDT_RXLOOP); 2561 if ((adjmp = mmd_transform(dl_pkt)) != NULL) 2562 gld_sendup(macinfo, pktinfo, adjmp, gld_paccept); 2563 } 2564 2565 /* 2566 * gld_recv (macinfo, mp) 2567 * called with an mac-level packet in a mblock; take the maclock, 2568 * try the ip4q and ip6q hack, and otherwise call gld_sendup. 2569 * 2570 * V0 drivers already are holding the mutex when they call us. 2571 */ 2572 void 2573 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp) 2574 { 2575 gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE); 2576 } 2577 2578 void 2579 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag) 2580 { 2581 gld_mac_pvt_t *mac_pvt; 2582 char pbuf[3*GLD_MAX_ADDRLEN]; 2583 pktinfo_t pktinfo; 2584 gld_interface_t *ifp; 2585 queue_t *ipq = NULL; 2586 gld_vlan_t *vlan = NULL, *vlan0 = NULL, *vlann = NULL; 2587 struct gld_stats *stats0, *stats = NULL; 2588 uint32_t vid; 2589 int err; 2590 2591 ASSERT(macinfo != NULL); 2592 ASSERT(mp->b_datap->db_ref); 2593 2594 GLDM_LOCK(macinfo, RW_READER); 2595 2596 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 2597 /* We're probably being called from a leftover interrupt */ 2598 freemsg(mp); 2599 goto done; 2600 } 2601 2602 /* 2603 * If this packet is a VLAN tagged packet, the kstats of corresponding 2604 * "VLAN 0" should also be updated. We can directly access VLAN 0's 2605 * kstats from macinfo. 2606 * 2607 * Further, the packets needs to be passed to VLAN 0 if there is 2608 * any DLPI consumer on VLAN 0 who is interested in tagged packets 2609 * (DL_PROMISC_SAP is on or is bounded to ETHERTYPE_VLAN SAP). 2610 */ 2611 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2612 stats0 = mac_pvt->statistics; 2613 2614 vid = GLD_VTAG_VID(vtag); 2615 vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE); 2616 if (vid != VLAN_VID_NONE) { 2617 /* 2618 * If there are no physical DLPI consumers interested in the 2619 * VLAN packet, clear vlan0. 2620 */ 2621 if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0)) 2622 vlan0 = NULL; 2623 /* 2624 * vlann is the VLAN with the same VID as the VLAN packet. 2625 */ 2626 vlann = gld_find_vlan(macinfo, vid); 2627 if (vlann != NULL) 2628 stats = vlann->gldv_stats; 2629 } 2630 2631 vlan = (vid == VLAN_VID_NONE) ? vlan0 : vlann; 2632 2633 ifp = mac_pvt->interfacep; 2634 err = (*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXQUICK); 2635 2636 BUMP(stats0, stats, glds_bytercv64, pktinfo.pktLen); 2637 BUMP(stats0, stats, glds_pktrcv64, 1); 2638 2639 if ((vlann == NULL) && (vlan0 == NULL)) { 2640 freemsg(mp); 2641 goto done; 2642 } 2643 2644 /* 2645 * Check whether underlying media code supports the IPQ hack: 2646 * 2647 * - the interpreter could quickly parse the packet 2648 * - the device type supports IPQ (ethernet and IPoIB) 2649 * - there is one, and only one, IP stream bound (to this VLAN) 2650 * - that stream is a "fastpath" stream 2651 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6 2652 * - there are no streams in promiscuous mode (on this VLAN) 2653 * - if this packet is tagged, there is no need to send this 2654 * packet to physical streams 2655 */ 2656 if ((err != 0) && ((vlan != NULL) && (vlan->gldv_nprom == 0)) && 2657 (vlan == vlan0 || vlan0 == NULL)) { 2658 switch (pktinfo.ethertype) { 2659 case ETHERTYPE_IP: 2660 ipq = vlan->gldv_ipq; 2661 break; 2662 case ETHERTYPE_IPV6: 2663 ipq = vlan->gldv_ipv6q; 2664 break; 2665 } 2666 } 2667 2668 /* 2669 * Special case for IP; we can simply do the putnext here, if: 2670 * o The IPQ hack is possible (ipq != NULL). 2671 * o the packet is specifically for me, and therefore: 2672 * - the packet is not multicast or broadcast (fastpath only 2673 * wants unicast packets). 2674 * 2675 * o the stream is not asserting flow control. 2676 */ 2677 if (ipq != NULL && 2678 pktinfo.isForMe && 2679 canputnext(ipq)) { 2680 /* 2681 * Skip the mac header. We know there is no LLC1/SNAP header 2682 * in this packet 2683 */ 2684 mp->b_rptr += pktinfo.macLen; 2685 putnext(ipq, mp); 2686 goto done; 2687 } 2688 2689 /* 2690 * call the media specific packet interpreter routine 2691 */ 2692 if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) { 2693 BUMP(stats0, stats, glds_rcvbadinterp, 1); 2694 #ifdef GLD_DEBUG 2695 if (gld_debug & GLDERRS) 2696 cmn_err(CE_WARN, 2697 "gld_recv_tagged: interpreter failed"); 2698 #endif 2699 freemsg(mp); 2700 goto done; 2701 } 2702 2703 /* 2704 * This is safe even if vtag is VLAN_VTAG_NONE 2705 */ 2706 pktinfo.vid = vid; 2707 pktinfo.cfi = GLD_VTAG_CFI(vtag); 2708 #ifdef GLD_DEBUG 2709 if (pktinfo.cfi != VLAN_CFI_ETHER) 2710 cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI"); 2711 #endif 2712 pktinfo.user_pri = GLD_VTAG_PRI(vtag); 2713 pktinfo.isTagged = (vtag != VLAN_VID_NONE); 2714 2715 #ifdef GLD_DEBUG 2716 if ((gld_debug & GLDRECV) && 2717 (!(gld_debug & GLDNOBR) || 2718 (!pktinfo.isBroadcast && !pktinfo.isMulticast))) { 2719 char pbuf2[3*GLD_MAX_ADDRLEN]; 2720 2721 cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n", 2722 gld_macaddr_sprintf(pbuf, pktinfo.shost, 2723 macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2, 2724 pktinfo.dhost, macinfo->gldm_addrlen)); 2725 cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n", 2726 pktinfo.vid, 2727 pktinfo.user_pri); 2728 cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d " 2729 "Hdr: %d,%d isMulticast: %s\n", 2730 pktinfo.ethertype, 2731 pktinfo.pktLen, 2732 pktinfo.macLen, 2733 pktinfo.hdrLen, 2734 pktinfo.isMulticast ? "Y" : "N"); 2735 } 2736 #endif 2737 2738 gld_sendup(macinfo, &pktinfo, mp, gld_accept); 2739 2740 done: 2741 GLDM_UNLOCK(macinfo); 2742 } 2743 2744 /* =================================================================== */ 2745 /* receive group: called from gld_recv and gld_precv* with maclock held */ 2746 /* =================================================================== */ 2747 2748 /* 2749 * Search all the streams attached to the specified VLAN looking for 2750 * those eligible to receive the packet. 2751 * Note that in order to avoid an extra dupmsg(), if this is the first 2752 * eligible stream, remember it (in fgldp) so that we can send up the 2753 * message after this function. 2754 * 2755 * Return errno if fails. Currently the only error is ENOMEM. 2756 */ 2757 static int 2758 gld_sendup_vlan(gld_vlan_t *vlan, pktinfo_t *pktinfo, mblk_t *mp, 2759 int (*acceptfunc)(), void (*send)(), int (*cansend)(), gld_t **fgldp) 2760 { 2761 mblk_t *nmp; 2762 gld_t *gld; 2763 int err = 0; 2764 2765 ASSERT(vlan != NULL); 2766 for (gld = vlan->gldv_str_next; gld != (gld_t *)&vlan->gldv_str_next; 2767 gld = gld->gld_next) { 2768 #ifdef GLD_VERBOSE_DEBUG 2769 cmn_err(CE_NOTE, "gld_sendup_vlan: SAP: %4x QPTR: %p " 2770 "QSTATE: %s", gld->gld_sap, (void *)gld->gld_qptr, 2771 gld->gld_state == DL_IDLE ? "IDLE" : "NOT IDLE"); 2772 #endif 2773 ASSERT(gld->gld_qptr != NULL); 2774 ASSERT(gld->gld_state == DL_IDLE || 2775 gld->gld_state == DL_UNBOUND); 2776 ASSERT(gld->gld_vlan == vlan); 2777 2778 if (gld->gld_state != DL_IDLE) 2779 continue; /* not eligible to receive */ 2780 if (gld->gld_flags & GLD_STR_CLOSING) 2781 continue; /* not eligible to receive */ 2782 2783 #ifdef GLD_DEBUG 2784 if ((gld_debug & GLDRECV) && 2785 (!(gld_debug & GLDNOBR) || 2786 (!pktinfo->isBroadcast && !pktinfo->isMulticast))) 2787 cmn_err(CE_NOTE, 2788 "gld_sendup: queue sap: %4x promis: %s %s %s", 2789 gld->gld_sap, 2790 gld->gld_flags & GLD_PROM_PHYS ? "phys " : " ", 2791 gld->gld_flags & GLD_PROM_SAP ? "sap " : " ", 2792 gld->gld_flags & GLD_PROM_MULT ? "multi" : " "); 2793 #endif 2794 2795 /* 2796 * The accept function differs depending on whether this is 2797 * a packet that we received from the wire or a loopback. 2798 */ 2799 if ((*acceptfunc)(gld, pktinfo)) { 2800 /* sap matches */ 2801 pktinfo->wasAccepted = 1; /* known protocol */ 2802 2803 if (!(*cansend)(gld->gld_qptr)) { 2804 /* 2805 * Upper stream is not accepting messages, i.e. 2806 * it is flow controlled, therefore we will 2807 * forgo sending the message up this stream. 2808 */ 2809 #ifdef GLD_DEBUG 2810 if (gld_debug & GLDETRACE) 2811 cmn_err(CE_WARN, 2812 "gld_sendup: canput failed"); 2813 #endif 2814 BUMP(vlan->gldv_stats, NULL, glds_blocked, 1); 2815 qenable(gld->gld_qptr); 2816 continue; 2817 } 2818 2819 /* 2820 * In order to avoid an extra dupmsg(), remember this 2821 * gld if this is the first eligible stream. 2822 */ 2823 if (*fgldp == NULL) { 2824 *fgldp = gld; 2825 continue; 2826 } 2827 2828 /* duplicate the packet for this stream */ 2829 nmp = dupmsg(mp); 2830 if (nmp == NULL) { 2831 BUMP(vlan->gldv_stats, NULL, 2832 glds_gldnorcvbuf, 1); 2833 #ifdef GLD_DEBUG 2834 if (gld_debug & GLDERRS) 2835 cmn_err(CE_WARN, 2836 "gld_sendup: dupmsg failed"); 2837 #endif 2838 /* couldn't get resources; drop it */ 2839 err = ENOMEM; 2840 break; 2841 } 2842 /* pass the message up the stream */ 2843 gld_passon(gld, nmp, pktinfo, send); 2844 } 2845 } 2846 return (err); 2847 } 2848 2849 /* 2850 * gld_sendup (macinfo, pktinfo, mp, acceptfunc) 2851 * called with an ethernet packet in an mblk; must decide whether 2852 * packet is for us and which streams to queue it to. 2853 */ 2854 static void 2855 gld_sendup(gld_mac_info_t *macinfo, pktinfo_t *pktinfo, 2856 mblk_t *mp, int (*acceptfunc)()) 2857 { 2858 gld_t *fgld = NULL; 2859 void (*send)(queue_t *qp, mblk_t *mp); 2860 int (*cansend)(queue_t *qp); 2861 gld_vlan_t *vlan0, *vlann = NULL; 2862 struct gld_stats *stats0, *stats = NULL; 2863 int err = 0; 2864 2865 #ifdef GLD_DEBUG 2866 if (gld_debug & GLDTRACE) 2867 cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp, 2868 (void *)macinfo); 2869 #endif 2870 2871 ASSERT(mp != NULL); 2872 ASSERT(macinfo != NULL); 2873 ASSERT(pktinfo != NULL); 2874 ASSERT(GLDM_LOCK_HELD(macinfo)); 2875 2876 /* 2877 * The tagged packets should also be looped back (transmit-side) 2878 * or sent up (receive-side) to VLAN 0 if VLAN 0 is set to 2879 * DL_PROMISC_SAP or there is any DLPI consumer bind to the 2880 * ETHERTYPE_VLAN SAP. The kstats of VLAN 0 needs to be updated 2881 * as well. 2882 */ 2883 stats0 = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->statistics; 2884 vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE); 2885 if (pktinfo->vid != VLAN_VID_NONE) { 2886 if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0)) 2887 vlan0 = NULL; 2888 vlann = gld_find_vlan(macinfo, pktinfo->vid); 2889 if (vlann != NULL) 2890 stats = vlann->gldv_stats; 2891 } 2892 2893 ASSERT((vlan0 != NULL) || (vlann != NULL)); 2894 2895 /* 2896 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which 2897 * gld_recv returns to the caller's interrupt routine. The total 2898 * network throughput would normally be lower when selecting this 2899 * option, because we putq the messages and process them later, 2900 * instead of sending them with putnext now. Some time critical 2901 * device might need this, so it's here but undocumented. 2902 */ 2903 if (macinfo->gldm_options & GLDOPT_FAST_RECV) { 2904 send = (void (*)(queue_t *, mblk_t *))putq; 2905 cansend = canput; 2906 } else { 2907 send = (void (*)(queue_t *, mblk_t *))putnext; 2908 cansend = canputnext; 2909 } 2910 2911 /* 2912 * Send the packets for all eligible streams. 2913 */ 2914 if (vlan0 != NULL) { 2915 err = gld_sendup_vlan(vlan0, pktinfo, mp, acceptfunc, send, 2916 cansend, &fgld); 2917 } 2918 if ((err == 0) && (vlann != NULL)) { 2919 err = gld_sendup_vlan(vlann, pktinfo, mp, acceptfunc, send, 2920 cansend, &fgld); 2921 } 2922 2923 ASSERT(mp); 2924 /* send the original dup of the packet up the first stream found */ 2925 if (fgld) 2926 gld_passon(fgld, mp, pktinfo, send); 2927 else 2928 freemsg(mp); /* no streams matched */ 2929 2930 /* We do not count looped back packets */ 2931 if (acceptfunc == gld_paccept) 2932 return; /* transmit loopback case */ 2933 2934 if (pktinfo->isBroadcast) 2935 BUMP(stats0, stats, glds_brdcstrcv, 1); 2936 else if (pktinfo->isMulticast) 2937 BUMP(stats0, stats, glds_multircv, 1); 2938 2939 /* No stream accepted this packet */ 2940 if (!pktinfo->wasAccepted) 2941 BUMP(stats0, stats, glds_unknowns, 1); 2942 } 2943 2944 #define GLD_IS_PHYS(gld) \ 2945 (((gld_vlan_t *)gld->gld_vlan)->gldv_id == VLAN_VID_NONE) 2946 2947 /* 2948 * A packet matches a stream if: 2949 * The stream's VLAN id is the same as the one in the packet. 2950 * and the stream accepts EtherType encoded packets and the type matches 2951 * or the stream accepts LLC packets and the packet is an LLC packet 2952 */ 2953 #define MATCH(stream, pktinfo) \ 2954 ((((gld_vlan_t *)stream->gld_vlan)->gldv_id == pktinfo->vid) && \ 2955 ((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \ 2956 (!stream->gld_ethertype && pktinfo->isLLC))) 2957 2958 /* 2959 * This function validates a packet for sending up a particular 2960 * stream. The message header has been parsed and its characteristic 2961 * are recorded in the pktinfo data structure. The streams stack info 2962 * are presented in gld data structures. 2963 */ 2964 static int 2965 gld_accept(gld_t *gld, pktinfo_t *pktinfo) 2966 { 2967 /* 2968 * if there is no match do not bother checking further. 2969 * Note that it is okay to examine gld_vlan because 2970 * macinfo->gldm_lock is held. 2971 * 2972 * Because all tagged packets have SAP value ETHERTYPE_VLAN, 2973 * these packets will pass the SAP filter check if the stream 2974 * is a ETHERTYPE_VLAN listener. 2975 */ 2976 if ((!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP) && 2977 !(GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN && 2978 pktinfo->isTagged))) 2979 return (0); 2980 2981 /* 2982 * We don't accept any packet from the hardware if we originated it. 2983 * (Contrast gld_paccept, the send-loopback accept function.) 2984 */ 2985 if (pktinfo->isLooped) 2986 return (0); 2987 2988 /* 2989 * If the packet is broadcast or sent to us directly we will accept it. 2990 * Also we will accept multicast packets requested by the stream. 2991 */ 2992 if (pktinfo->isForMe || pktinfo->isBroadcast || 2993 gld_mcmatch(gld, pktinfo)) 2994 return (1); 2995 2996 /* 2997 * Finally, accept anything else if we're in promiscuous mode 2998 */ 2999 if (gld->gld_flags & GLD_PROM_PHYS) 3000 return (1); 3001 3002 return (0); 3003 } 3004 3005 /* 3006 * Return TRUE if the given multicast address is one 3007 * of those that this particular Stream is interested in. 3008 */ 3009 static int 3010 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo) 3011 { 3012 /* 3013 * Return FALSE if not a multicast address. 3014 */ 3015 if (!pktinfo->isMulticast) 3016 return (0); 3017 3018 /* 3019 * Check if all multicasts have been enabled for this Stream 3020 */ 3021 if (gld->gld_flags & GLD_PROM_MULT) 3022 return (1); 3023 3024 /* 3025 * Return FALSE if no multicast addresses enabled for this Stream. 3026 */ 3027 if (!gld->gld_mcast) 3028 return (0); 3029 3030 /* 3031 * Otherwise, look for it in the table. 3032 */ 3033 return (gld_multicast(pktinfo->dhost, gld)); 3034 } 3035 3036 /* 3037 * gld_multicast determines if the address is a multicast address for 3038 * this stream. 3039 */ 3040 static int 3041 gld_multicast(unsigned char *macaddr, gld_t *gld) 3042 { 3043 int i; 3044 3045 ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info)); 3046 3047 if (!gld->gld_mcast) 3048 return (0); 3049 3050 for (i = 0; i < gld->gld_multicnt; i++) { 3051 if (gld->gld_mcast[i]) { 3052 ASSERT(gld->gld_mcast[i]->gldm_refcnt); 3053 if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr, 3054 gld->gld_mac_info->gldm_addrlen)) 3055 return (1); 3056 } 3057 } 3058 3059 return (0); 3060 } 3061 3062 /* 3063 * accept function for looped back packets 3064 */ 3065 static int 3066 gld_paccept(gld_t *gld, pktinfo_t *pktinfo) 3067 { 3068 /* 3069 * Note that it is okay to examine gld_vlan because macinfo->gldm_lock 3070 * is held. 3071 * 3072 * If a stream is a ETHERTYPE_VLAN listener, it must 3073 * accept all tagged packets as those packets have SAP value 3074 * ETHERTYPE_VLAN. 3075 */ 3076 return (gld->gld_flags & GLD_PROM_PHYS && 3077 (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP || 3078 (GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN && 3079 pktinfo->isTagged))); 3080 3081 } 3082 3083 static void 3084 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, 3085 void (*send)(queue_t *qp, mblk_t *mp)) 3086 { 3087 boolean_t is_phys = GLD_IS_PHYS(gld); 3088 int skiplen; 3089 boolean_t addtag = B_FALSE; 3090 uint32_t vtag = 0; 3091 3092 #ifdef GLD_DEBUG 3093 if (gld_debug & GLDTRACE) 3094 cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld, 3095 (void *)mp, (void *)pktinfo); 3096 3097 if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) || 3098 (!pktinfo->isBroadcast && !pktinfo->isMulticast))) 3099 cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x", 3100 (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor, 3101 gld->gld_sap); 3102 #endif 3103 /* 3104 * Figure out how much of the packet header to throw away. 3105 * 3106 * Normal DLPI (non RAW/FAST) streams also want the 3107 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA. 3108 */ 3109 if (gld->gld_flags & GLD_RAW) { 3110 /* 3111 * The packet will be tagged in the following cases: 3112 * - if priority is not 0 3113 * - a tagged packet sent on a physical link 3114 */ 3115 if ((pktinfo->isTagged && is_phys) || (pktinfo->user_pri != 0)) 3116 addtag = B_TRUE; 3117 skiplen = 0; 3118 } else { 3119 /* 3120 * The packet will be tagged if it meets all below conditions: 3121 * - this is a physical stream 3122 * - this packet is tagged packet 3123 * - the stream is either a DL_PROMISC_SAP listener or a 3124 * ETHERTYPE_VLAN listener 3125 */ 3126 if (is_phys && pktinfo->isTagged && 3127 ((gld->gld_sap == ETHERTYPE_VLAN) || 3128 (gld->gld_flags & GLD_PROM_SAP))) { 3129 addtag = B_TRUE; 3130 } 3131 3132 skiplen = pktinfo->macLen; /* skip mac header */ 3133 if (gld->gld_ethertype) 3134 skiplen += pktinfo->hdrLen; /* skip any extra */ 3135 } 3136 if (skiplen >= pktinfo->pktLen) { 3137 /* 3138 * If the interpreter did its job right, then it cannot be 3139 * asking us to skip more bytes than are in the packet! 3140 * However, there could be zero data bytes left after the 3141 * amount to skip. DLPI specifies that passed M_DATA blocks 3142 * should contain at least one byte of data, so if we have 3143 * none we just drop it. 3144 */ 3145 ASSERT(!(skiplen > pktinfo->pktLen)); 3146 freemsg(mp); 3147 return; 3148 } 3149 3150 if (addtag) { 3151 mblk_t *savemp = mp; 3152 3153 vtag = GLD_MAKE_VTAG(pktinfo->user_pri, pktinfo->cfi, 3154 is_phys ? pktinfo->vid : VLAN_VID_NONE); 3155 if ((mp = gld_insert_vtag_ether(mp, vtag)) == NULL) { 3156 freemsg(savemp); 3157 return; 3158 } 3159 } 3160 3161 /* 3162 * Skip over the header(s), taking care to possibly handle message 3163 * fragments shorter than the amount we need to skip. Hopefully 3164 * the driver will put the entire packet, or at least the entire 3165 * header, into a single message block. But we handle it if not. 3166 */ 3167 while (skiplen >= MBLKL(mp)) { 3168 mblk_t *savemp = mp; 3169 skiplen -= MBLKL(mp); 3170 mp = mp->b_cont; 3171 ASSERT(mp != NULL); /* because skiplen < pktinfo->pktLen */ 3172 freeb(savemp); 3173 } 3174 mp->b_rptr += skiplen; 3175 3176 /* Add M_PROTO if necessary, and pass upstream */ 3177 if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast && 3178 !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) { 3179 /* RAW/FAST: just send up the M_DATA */ 3180 (*send)(gld->gld_qptr, mp); 3181 } else { 3182 /* everybody else wants to see a unitdata_ind structure */ 3183 mp = gld_addudind(gld, mp, pktinfo, addtag); 3184 if (mp) 3185 (*send)(gld->gld_qptr, mp); 3186 /* if it failed, gld_addudind already bumped statistic */ 3187 } 3188 } 3189 3190 /* 3191 * gld_addudind(gld, mp, pktinfo) 3192 * format a DL_UNITDATA_IND message to be sent upstream to the user 3193 */ 3194 static mblk_t * 3195 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, boolean_t tagged) 3196 { 3197 gld_mac_info_t *macinfo = gld->gld_mac_info; 3198 gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan; 3199 dl_unitdata_ind_t *dludindp; 3200 mblk_t *nmp; 3201 int size; 3202 int type; 3203 3204 #ifdef GLD_DEBUG 3205 if (gld_debug & GLDTRACE) 3206 cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld, 3207 (void *)mp, (void *)pktinfo); 3208 #endif 3209 ASSERT(macinfo != NULL); 3210 3211 /* 3212 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails 3213 * might as well discard since we can't go further 3214 */ 3215 size = sizeof (dl_unitdata_ind_t) + 3216 2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)); 3217 if ((nmp = allocb(size, BPRI_MED)) == NULL) { 3218 freemsg(mp); 3219 BUMP(vlan->gldv_stats, NULL, glds_gldnorcvbuf, 1); 3220 #ifdef GLD_DEBUG 3221 if (gld_debug & GLDERRS) 3222 cmn_err(CE_WARN, 3223 "gld_addudind: allocb failed"); 3224 #endif 3225 return ((mblk_t *)NULL); 3226 } 3227 DB_TYPE(nmp) = M_PROTO; 3228 nmp->b_rptr = nmp->b_datap->db_lim - size; 3229 3230 if (tagged) 3231 type = ETHERTYPE_VLAN; 3232 else 3233 type = (gld->gld_ethertype) ? pktinfo->ethertype : 0; 3234 3235 3236 /* 3237 * now setup the DL_UNITDATA_IND header 3238 * 3239 * XXX This looks broken if the saps aren't two bytes. 3240 */ 3241 dludindp = (dl_unitdata_ind_t *)nmp->b_rptr; 3242 dludindp->dl_primitive = DL_UNITDATA_IND; 3243 dludindp->dl_src_addr_length = 3244 dludindp->dl_dest_addr_length = macinfo->gldm_addrlen + 3245 abs(macinfo->gldm_saplen); 3246 dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t); 3247 dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset + 3248 dludindp->dl_dest_addr_length; 3249 3250 dludindp->dl_group_address = (pktinfo->isMulticast || 3251 pktinfo->isBroadcast); 3252 3253 nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset; 3254 3255 mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen); 3256 nmp->b_wptr += macinfo->gldm_addrlen; 3257 3258 ASSERT(macinfo->gldm_saplen == -2); /* XXX following code assumes */ 3259 *(ushort_t *)(nmp->b_wptr) = type; 3260 nmp->b_wptr += abs(macinfo->gldm_saplen); 3261 3262 ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset); 3263 3264 mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen); 3265 nmp->b_wptr += macinfo->gldm_addrlen; 3266 3267 *(ushort_t *)(nmp->b_wptr) = type; 3268 nmp->b_wptr += abs(macinfo->gldm_saplen); 3269 3270 if (pktinfo->nosource) 3271 dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0; 3272 linkb(nmp, mp); 3273 return (nmp); 3274 } 3275 3276 /* ======================================================= */ 3277 /* wsrv group: called from wsrv, single threaded per queue */ 3278 /* ======================================================= */ 3279 3280 /* 3281 * We go to some trouble to avoid taking the same lock during normal 3282 * transmit processing as we do during normal receive processing. 3283 * 3284 * Elements of the per-instance macinfo and per-stream gld_t structures 3285 * are for the most part protected by the GLDM_LOCK rwlock/mutex. 3286 * (Elements of the gld_mac_pvt_t structure are considered part of the 3287 * macinfo structure for purposes of this discussion). 3288 * 3289 * However, it is more complicated than that: 3290 * 3291 * Elements of the macinfo structure that are set before the macinfo 3292 * structure is added to its device list by gld_register(), and never 3293 * thereafter modified, are accessed without requiring taking the lock. 3294 * A similar rule applies to those elements of the gld_t structure that 3295 * are written by gld_open() before the stream is added to any list. 3296 * 3297 * Most other elements of the macinfo structure may only be read or 3298 * written while holding the maclock. 3299 * 3300 * Most writable elements of the gld_t structure are written only 3301 * within the single-threaded domain of wsrv() and subsidiaries. 3302 * (This domain includes open/close while qprocs are not on.) 3303 * The maclock need not be taken while within that domain 3304 * simply to read those elements. Writing to them, even within 3305 * that domain, or reading from it outside that domain, requires 3306 * holding the maclock. Exception: if the stream is not 3307 * presently attached to a PPA, there is no associated macinfo, 3308 * and no maclock need be taken. 3309 * 3310 * The curr_macaddr element of the mac private structure is also 3311 * protected by the GLDM_LOCK rwlock/mutex, like most other members 3312 * of that structure. However, there are a few instances in the 3313 * transmit path where we choose to forgo lock protection when 3314 * reading this variable. This is to avoid lock contention between 3315 * threads executing the DL_UNITDATA_REQ case and receive threads. 3316 * In doing so we will take a small risk or a few corrupted packets 3317 * during the short an rare times when someone is changing the interface's 3318 * physical address. We consider the small cost in this rare case to be 3319 * worth the benefit of reduced lock contention under normal operating 3320 * conditions. The risk/cost is small because: 3321 * 1. there is no guarantee at this layer of uncorrupted delivery. 3322 * 2. the physaddr doesn't change very often - no performance hit. 3323 * 3. if the physaddr changes, other stuff is going to be screwed 3324 * up for a while anyway, while other sites refigure ARP, etc., 3325 * so losing a couple of packets is the least of our worries. 3326 * 3327 * The list of streams associated with a macinfo is protected by 3328 * two locks: the per-macinfo maclock, and the per-major-device 3329 * gld_devlock. Both must be held to modify the list, but either 3330 * may be held to protect the list during reading/traversing. This 3331 * allows independent locking for multiple instances in the receive 3332 * path (using macinfo), while facilitating routines that must search 3333 * the entire set of streams associated with a major device, such as 3334 * gld_findminor(), gld_finddevinfo(), close(). The "nstreams" 3335 * macinfo element, and the gld_mac_info gld_t element, are similarly 3336 * protected, since they change at exactly the same time macinfo 3337 * streams list does. 3338 * 3339 * The list of macinfo structures associated with a major device 3340 * structure is protected by the gld_devlock, as is the per-major 3341 * list of Style 2 streams in the DL_UNATTACHED state. 3342 * 3343 * The list of major devices is kept on a module-global list 3344 * gld_device_list, which has its own lock to protect the list. 3345 * 3346 * When it is necessary to hold more than one lock at a time, they 3347 * are acquired in this "outside in" order: 3348 * gld_device_list.gld_devlock 3349 * glddev->gld_devlock 3350 * GLDM_LOCK(macinfo) 3351 * 3352 * Finally, there are some "volatile" elements of the gld_t structure 3353 * used for synchronization between various routines that don't share 3354 * the same mutexes. See the routines for details. These are: 3355 * gld_xwait between gld_wsrv() and gld_sched() 3356 * gld_sched_ran between gld_wsrv() and gld_sched() 3357 * gld_in_unbind between gld_wput() and wsrv's gld_unbind() 3358 * gld_wput_count between gld_wput() and wsrv's gld_unbind() 3359 * gld_in_wsrv between gld_wput() and gld_wsrv() 3360 * (used in conjunction with q->q_first) 3361 */ 3362 3363 /* 3364 * gld_ioctl (q, mp) 3365 * handles all ioctl requests passed downstream. This routine is 3366 * passed a pointer to the message block with the ioctl request in it, and a 3367 * pointer to the queue so it can respond to the ioctl request with an ack. 3368 */ 3369 int 3370 gld_ioctl(queue_t *q, mblk_t *mp) 3371 { 3372 struct iocblk *iocp; 3373 gld_t *gld; 3374 gld_mac_info_t *macinfo; 3375 3376 #ifdef GLD_DEBUG 3377 if (gld_debug & GLDTRACE) 3378 cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp); 3379 #endif 3380 gld = (gld_t *)q->q_ptr; 3381 iocp = (struct iocblk *)mp->b_rptr; 3382 switch (iocp->ioc_cmd) { 3383 case DLIOCRAW: /* raw M_DATA mode */ 3384 gld->gld_flags |= GLD_RAW; 3385 DB_TYPE(mp) = M_IOCACK; 3386 qreply(q, mp); 3387 break; 3388 3389 case DL_IOC_HDR_INFO: /* fastpath */ 3390 /* 3391 * DL_IOC_HDR_INFO should only come from IP. The one 3392 * initiated from user-land should not be allowed. 3393 */ 3394 if ((gld_global_options & GLD_OPT_NO_FASTPATH) || 3395 (iocp->ioc_cr != kcred)) { 3396 miocnak(q, mp, 0, EINVAL); 3397 break; 3398 } 3399 gld_fastpath(gld, q, mp); 3400 break; 3401 3402 case DLIOCMARGININFO: { /* margin size */ 3403 int err; 3404 3405 if ((macinfo = gld->gld_mac_info) == NULL) { 3406 miocnak(q, mp, 0, EINVAL); 3407 break; 3408 } 3409 3410 if ((err = miocpullup(mp, sizeof (uint32_t))) != 0) { 3411 miocnak(q, mp, 0, err); 3412 break; 3413 } 3414 3415 *((uint32_t *)mp->b_cont->b_rptr) = macinfo->gldm_margin; 3416 miocack(q, mp, sizeof (uint32_t), 0); 3417 break; 3418 } 3419 default: 3420 macinfo = gld->gld_mac_info; 3421 if (macinfo == NULL || macinfo->gldm_ioctl == NULL) { 3422 miocnak(q, mp, 0, EINVAL); 3423 break; 3424 } 3425 3426 GLDM_LOCK(macinfo, RW_WRITER); 3427 (void) (*macinfo->gldm_ioctl) (macinfo, q, mp); 3428 GLDM_UNLOCK(macinfo); 3429 break; 3430 } 3431 return (0); 3432 } 3433 3434 /* 3435 * Since the rules for "fastpath" mode don't seem to be documented 3436 * anywhere, I will describe GLD's rules for fastpath users here: 3437 * 3438 * Once in this mode you remain there until close. 3439 * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO. 3440 * You must be bound (DL_IDLE) to transmit. 3441 * There are other rules not listed above. 3442 */ 3443 static void 3444 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp) 3445 { 3446 gld_interface_t *ifp; 3447 gld_mac_info_t *macinfo; 3448 dl_unitdata_req_t *dludp; 3449 mblk_t *nmp; 3450 t_scalar_t off, len; 3451 uint_t maclen; 3452 int error; 3453 3454 if (gld->gld_state != DL_IDLE) { 3455 miocnak(q, mp, 0, EINVAL); 3456 return; 3457 } 3458 3459 macinfo = gld->gld_mac_info; 3460 ASSERT(macinfo != NULL); 3461 maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen); 3462 3463 error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen); 3464 if (error != 0) { 3465 miocnak(q, mp, 0, error); 3466 return; 3467 } 3468 3469 dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr; 3470 off = dludp->dl_dest_addr_offset; 3471 len = dludp->dl_dest_addr_length; 3472 if (dludp->dl_primitive != DL_UNITDATA_REQ || 3473 !MBLKIN(mp->b_cont, off, len) || len != maclen) { 3474 miocnak(q, mp, 0, EINVAL); 3475 return; 3476 } 3477 3478 /* 3479 * We take the fastpath request as a declaration that they will accept 3480 * M_DATA messages from us, whether or not we are willing to accept 3481 * M_DATA from them. This allows us to have fastpath in one direction 3482 * (flow upstream) even on media with Source Routing, where we are 3483 * unable to provide a fixed MAC header to be prepended to downstream 3484 * flowing packets. So we set GLD_FAST whether or not we decide to 3485 * allow them to send M_DATA down to us. 3486 */ 3487 GLDM_LOCK(macinfo, RW_WRITER); 3488 gld->gld_flags |= GLD_FAST; 3489 GLDM_UNLOCK(macinfo); 3490 3491 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 3492 3493 /* This will fail for Source Routing media */ 3494 /* Also on Ethernet on 802.2 SAPs */ 3495 if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) { 3496 miocnak(q, mp, 0, ENOMEM); 3497 return; 3498 } 3499 3500 /* 3501 * Link new mblk in after the "request" mblks. 3502 */ 3503 linkb(mp, nmp); 3504 miocack(q, mp, msgdsize(mp->b_cont), 0); 3505 } 3506 3507 /* 3508 * gld_cmds (q, mp) 3509 * process the DL commands as defined in dlpi.h 3510 * note that the primitives return status which is passed back 3511 * to the service procedure. If the value is GLDE_RETRY, then 3512 * it is assumed that processing must stop and the primitive has 3513 * been put back onto the queue. If the value is any other error, 3514 * then an error ack is generated by the service procedure. 3515 */ 3516 static int 3517 gld_cmds(queue_t *q, mblk_t *mp) 3518 { 3519 union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr; 3520 gld_t *gld = (gld_t *)(q->q_ptr); 3521 int result = DL_BADPRIM; 3522 int mblkl = MBLKL(mp); 3523 t_uscalar_t dlreq; 3524 3525 /* Make sure we have at least dlp->dl_primitive */ 3526 if (mblkl < sizeof (dlp->dl_primitive)) 3527 return (DL_BADPRIM); 3528 3529 dlreq = dlp->dl_primitive; 3530 #ifdef GLD_DEBUG 3531 if (gld_debug & GLDTRACE) 3532 cmn_err(CE_NOTE, 3533 "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d", 3534 (void *)q, (void *)mp, (void *)dlp, dlreq); 3535 #endif 3536 3537 switch (dlreq) { 3538 case DL_UDQOS_REQ: 3539 if (mblkl < DL_UDQOS_REQ_SIZE) 3540 break; 3541 result = gld_udqos(q, mp); 3542 break; 3543 3544 case DL_BIND_REQ: 3545 if (mblkl < DL_BIND_REQ_SIZE) 3546 break; 3547 result = gld_bind(q, mp); 3548 break; 3549 3550 case DL_UNBIND_REQ: 3551 if (mblkl < DL_UNBIND_REQ_SIZE) 3552 break; 3553 result = gld_unbind(q, mp); 3554 break; 3555 3556 case DL_UNITDATA_REQ: 3557 if (mblkl < DL_UNITDATA_REQ_SIZE) 3558 break; 3559 result = gld_unitdata(q, mp); 3560 break; 3561 3562 case DL_INFO_REQ: 3563 if (mblkl < DL_INFO_REQ_SIZE) 3564 break; 3565 result = gld_inforeq(q, mp); 3566 break; 3567 3568 case DL_ATTACH_REQ: 3569 if (mblkl < DL_ATTACH_REQ_SIZE) 3570 break; 3571 if (gld->gld_style == DL_STYLE2) 3572 result = gldattach(q, mp); 3573 else 3574 result = DL_NOTSUPPORTED; 3575 break; 3576 3577 case DL_DETACH_REQ: 3578 if (mblkl < DL_DETACH_REQ_SIZE) 3579 break; 3580 if (gld->gld_style == DL_STYLE2) 3581 result = gldunattach(q, mp); 3582 else 3583 result = DL_NOTSUPPORTED; 3584 break; 3585 3586 case DL_ENABMULTI_REQ: 3587 if (mblkl < DL_ENABMULTI_REQ_SIZE) 3588 break; 3589 result = gld_enable_multi(q, mp); 3590 break; 3591 3592 case DL_DISABMULTI_REQ: 3593 if (mblkl < DL_DISABMULTI_REQ_SIZE) 3594 break; 3595 result = gld_disable_multi(q, mp); 3596 break; 3597 3598 case DL_PHYS_ADDR_REQ: 3599 if (mblkl < DL_PHYS_ADDR_REQ_SIZE) 3600 break; 3601 result = gld_physaddr(q, mp); 3602 break; 3603 3604 case DL_SET_PHYS_ADDR_REQ: 3605 if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE) 3606 break; 3607 result = gld_setaddr(q, mp); 3608 break; 3609 3610 case DL_PROMISCON_REQ: 3611 if (mblkl < DL_PROMISCON_REQ_SIZE) 3612 break; 3613 result = gld_promisc(q, mp, dlreq, B_TRUE); 3614 break; 3615 3616 case DL_PROMISCOFF_REQ: 3617 if (mblkl < DL_PROMISCOFF_REQ_SIZE) 3618 break; 3619 result = gld_promisc(q, mp, dlreq, B_FALSE); 3620 break; 3621 3622 case DL_GET_STATISTICS_REQ: 3623 if (mblkl < DL_GET_STATISTICS_REQ_SIZE) 3624 break; 3625 result = gld_get_statistics(q, mp); 3626 break; 3627 3628 case DL_CAPABILITY_REQ: 3629 if (mblkl < DL_CAPABILITY_REQ_SIZE) 3630 break; 3631 result = gld_cap(q, mp); 3632 break; 3633 3634 case DL_NOTIFY_REQ: 3635 if (mblkl < DL_NOTIFY_REQ_SIZE) 3636 break; 3637 result = gld_notify_req(q, mp); 3638 break; 3639 3640 case DL_XID_REQ: 3641 case DL_XID_RES: 3642 case DL_TEST_REQ: 3643 case DL_TEST_RES: 3644 case DL_CONTROL_REQ: 3645 case DL_PASSIVE_REQ: 3646 result = DL_NOTSUPPORTED; 3647 break; 3648 3649 default: 3650 #ifdef GLD_DEBUG 3651 if (gld_debug & GLDERRS) 3652 cmn_err(CE_WARN, 3653 "gld_cmds: unknown M_PROTO message: %d", 3654 dlreq); 3655 #endif 3656 result = DL_BADPRIM; 3657 } 3658 3659 return (result); 3660 } 3661 3662 static int 3663 gld_cap(queue_t *q, mblk_t *mp) 3664 { 3665 gld_t *gld = (gld_t *)q->q_ptr; 3666 dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr; 3667 3668 if (gld->gld_state == DL_UNATTACHED) 3669 return (DL_OUTSTATE); 3670 3671 if (dlp->dl_sub_length == 0) 3672 return (gld_cap_ack(q, mp)); 3673 3674 return (gld_cap_enable(q, mp)); 3675 } 3676 3677 static int 3678 gld_cap_ack(queue_t *q, mblk_t *mp) 3679 { 3680 gld_t *gld = (gld_t *)q->q_ptr; 3681 gld_mac_info_t *macinfo = gld->gld_mac_info; 3682 gld_interface_t *ifp; 3683 dl_capability_ack_t *dlap; 3684 dl_capability_sub_t *dlsp; 3685 size_t size = sizeof (dl_capability_ack_t); 3686 size_t subsize = 0; 3687 3688 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 3689 3690 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) 3691 subsize += sizeof (dl_capability_sub_t) + 3692 sizeof (dl_capab_hcksum_t); 3693 if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) 3694 subsize += sizeof (dl_capability_sub_t) + 3695 sizeof (dl_capab_zerocopy_t); 3696 if (macinfo->gldm_options & GLDOPT_MDT) 3697 subsize += (sizeof (dl_capability_sub_t) + 3698 sizeof (dl_capab_mdt_t)); 3699 3700 if ((mp = mexchange(q, mp, size + subsize, M_PROTO, 3701 DL_CAPABILITY_ACK)) == NULL) 3702 return (GLDE_OK); 3703 3704 dlap = (dl_capability_ack_t *)mp->b_rptr; 3705 dlap->dl_sub_offset = 0; 3706 if ((dlap->dl_sub_length = subsize) != 0) 3707 dlap->dl_sub_offset = sizeof (dl_capability_ack_t); 3708 dlsp = (dl_capability_sub_t *)&dlap[1]; 3709 3710 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) { 3711 dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1]; 3712 3713 dlsp->dl_cap = DL_CAPAB_HCKSUM; 3714 dlsp->dl_length = sizeof (dl_capab_hcksum_t); 3715 3716 dlhp->hcksum_version = HCKSUM_VERSION_1; 3717 3718 dlhp->hcksum_txflags = 0; 3719 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL) 3720 dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL; 3721 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4) 3722 dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4; 3723 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6) 3724 dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6; 3725 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR) 3726 dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM; 3727 3728 dlcapabsetqid(&(dlhp->hcksum_mid), RD(q)); 3729 dlsp = (dl_capability_sub_t *)&dlhp[1]; 3730 } 3731 3732 if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) { 3733 dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1]; 3734 3735 dlsp->dl_cap = DL_CAPAB_ZEROCOPY; 3736 dlsp->dl_length = sizeof (dl_capab_zerocopy_t); 3737 dlzp->zerocopy_version = ZEROCOPY_VERSION_1; 3738 dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM; 3739 3740 dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q)); 3741 dlsp = (dl_capability_sub_t *)&dlzp[1]; 3742 } 3743 3744 if (macinfo->gldm_options & GLDOPT_MDT) { 3745 dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1]; 3746 3747 dlsp->dl_cap = DL_CAPAB_MDT; 3748 dlsp->dl_length = sizeof (dl_capab_mdt_t); 3749 3750 dlmp->mdt_version = MDT_VERSION_2; 3751 dlmp->mdt_max_pld = macinfo->gldm_mdt_segs; 3752 dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl; 3753 dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q)); 3754 dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE; 3755 dlmp->mdt_hdr_head = ifp->hdr_size; 3756 dlmp->mdt_hdr_tail = 0; 3757 } 3758 3759 qreply(q, mp); 3760 return (GLDE_OK); 3761 } 3762 3763 static int 3764 gld_cap_enable(queue_t *q, mblk_t *mp) 3765 { 3766 dl_capability_req_t *dlp; 3767 dl_capability_sub_t *dlsp; 3768 dl_capab_hcksum_t *dlhp; 3769 offset_t off; 3770 size_t len; 3771 size_t size; 3772 offset_t end; 3773 3774 dlp = (dl_capability_req_t *)mp->b_rptr; 3775 dlp->dl_primitive = DL_CAPABILITY_ACK; 3776 3777 off = dlp->dl_sub_offset; 3778 len = dlp->dl_sub_length; 3779 3780 if (!MBLKIN(mp, off, len)) 3781 return (DL_BADPRIM); 3782 3783 end = off + len; 3784 while (off < end) { 3785 dlsp = (dl_capability_sub_t *)(mp->b_rptr + off); 3786 size = sizeof (dl_capability_sub_t) + dlsp->dl_length; 3787 if (off + size > end) 3788 return (DL_BADPRIM); 3789 3790 switch (dlsp->dl_cap) { 3791 case DL_CAPAB_HCKSUM: 3792 dlhp = (dl_capab_hcksum_t *)&dlsp[1]; 3793 /* nothing useful we can do with the contents */ 3794 dlcapabsetqid(&(dlhp->hcksum_mid), RD(q)); 3795 break; 3796 default: 3797 break; 3798 } 3799 3800 off += size; 3801 } 3802 3803 qreply(q, mp); 3804 return (GLDE_OK); 3805 } 3806 3807 /* 3808 * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has 3809 * requested the specific <notification> that the message carries AND is 3810 * eligible and ready to receive the notification immediately. 3811 * 3812 * This routine ignores flow control. Notifications will be sent regardless. 3813 * 3814 * In all cases, the original message passed in is freed at the end of 3815 * the routine. 3816 */ 3817 static void 3818 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification) 3819 { 3820 gld_mac_pvt_t *mac_pvt; 3821 gld_vlan_t *vlan; 3822 gld_t *gld; 3823 mblk_t *nmp; 3824 int i; 3825 3826 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 3827 3828 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3829 3830 /* 3831 * Search all the streams attached to this macinfo looking 3832 * for those eligible to receive the present notification. 3833 */ 3834 for (i = 0; i < VLAN_HASHSZ; i++) { 3835 for (vlan = mac_pvt->vlan_hash[i]; 3836 vlan != NULL; vlan = vlan->gldv_next) { 3837 for (gld = vlan->gldv_str_next; 3838 gld != (gld_t *)&vlan->gldv_str_next; 3839 gld = gld->gld_next) { 3840 ASSERT(gld->gld_qptr != NULL); 3841 ASSERT(gld->gld_state == DL_IDLE || 3842 gld->gld_state == DL_UNBOUND); 3843 ASSERT(gld->gld_mac_info == macinfo); 3844 3845 if (gld->gld_flags & GLD_STR_CLOSING) 3846 continue; /* not eligible - skip */ 3847 if (!(notification & gld->gld_notifications)) 3848 continue; /* not wanted - skip */ 3849 if ((nmp = dupmsg(mp)) == NULL) 3850 continue; /* can't copy - skip */ 3851 3852 /* 3853 * All OK; send dup'd notification up this 3854 * stream 3855 */ 3856 qreply(WR(gld->gld_qptr), nmp); 3857 } 3858 } 3859 } 3860 3861 /* 3862 * Drop the original message block now 3863 */ 3864 freemsg(mp); 3865 } 3866 3867 /* 3868 * For each (understood) bit in the <notifications> argument, contruct 3869 * a DL_NOTIFY_IND message and send it to the specified <q>, or to all 3870 * eligible queues if <q> is NULL. 3871 */ 3872 static void 3873 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q) 3874 { 3875 gld_mac_pvt_t *mac_pvt; 3876 dl_notify_ind_t *dlnip; 3877 struct gld_stats *stats; 3878 mblk_t *mp; 3879 size_t size; 3880 uint32_t bit; 3881 3882 GLDM_LOCK(macinfo, RW_WRITER); 3883 3884 /* 3885 * The following cases shouldn't happen, but just in case the 3886 * MAC driver calls gld_linkstate() at an inappropriate time, we 3887 * check anyway ... 3888 */ 3889 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) { 3890 GLDM_UNLOCK(macinfo); 3891 return; /* not ready yet */ 3892 } 3893 3894 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 3895 GLDM_UNLOCK(macinfo); 3896 return; /* not ready anymore */ 3897 } 3898 3899 /* 3900 * Make sure the kstats are up to date, 'cos we use some of 3901 * the kstat values below, specifically the link speed ... 3902 */ 3903 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3904 stats = mac_pvt->statistics; 3905 if (macinfo->gldm_get_stats) 3906 (void) (*macinfo->gldm_get_stats)(macinfo, stats); 3907 3908 for (bit = 1; notifications != 0; bit <<= 1) { 3909 if ((notifications & bit) == 0) 3910 continue; 3911 notifications &= ~bit; 3912 3913 size = DL_NOTIFY_IND_SIZE; 3914 if (bit == DL_NOTE_PHYS_ADDR) 3915 size += macinfo->gldm_addrlen; 3916 if ((mp = allocb(size, BPRI_MED)) == NULL) 3917 continue; 3918 3919 mp->b_datap->db_type = M_PROTO; 3920 mp->b_wptr = mp->b_rptr + size; 3921 dlnip = (dl_notify_ind_t *)mp->b_rptr; 3922 dlnip->dl_primitive = DL_NOTIFY_IND; 3923 dlnip->dl_notification = 0; 3924 dlnip->dl_data = 0; 3925 dlnip->dl_addr_length = 0; 3926 dlnip->dl_addr_offset = 0; 3927 3928 switch (bit) { 3929 case DL_NOTE_PROMISC_ON_PHYS: 3930 case DL_NOTE_PROMISC_OFF_PHYS: 3931 if (mac_pvt->nprom != 0) 3932 dlnip->dl_notification = bit; 3933 break; 3934 3935 case DL_NOTE_LINK_DOWN: 3936 if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN) 3937 dlnip->dl_notification = bit; 3938 break; 3939 3940 case DL_NOTE_LINK_UP: 3941 if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP) 3942 dlnip->dl_notification = bit; 3943 break; 3944 3945 case DL_NOTE_SPEED: 3946 /* 3947 * Conversion required here: 3948 * GLD keeps the speed in bit/s in a uint64 3949 * DLPI wants it in kb/s in a uint32 3950 * Fortunately this is still big enough for 10Gb/s! 3951 */ 3952 dlnip->dl_notification = bit; 3953 dlnip->dl_data = stats->glds_speed/1000ULL; 3954 break; 3955 3956 case DL_NOTE_PHYS_ADDR: 3957 dlnip->dl_notification = bit; 3958 dlnip->dl_data = DL_CURR_PHYS_ADDR; 3959 dlnip->dl_addr_offset = sizeof (dl_notify_ind_t); 3960 dlnip->dl_addr_length = macinfo->gldm_addrlen + 3961 abs(macinfo->gldm_saplen); 3962 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3963 mac_copy(mac_pvt->curr_macaddr, 3964 mp->b_rptr + sizeof (dl_notify_ind_t), 3965 macinfo->gldm_addrlen); 3966 break; 3967 3968 default: 3969 break; 3970 } 3971 3972 if (dlnip->dl_notification == 0) 3973 freemsg(mp); 3974 else if (q != NULL) 3975 qreply(q, mp); 3976 else 3977 gld_notify_qs(macinfo, mp, bit); 3978 } 3979 3980 GLDM_UNLOCK(macinfo); 3981 } 3982 3983 /* 3984 * gld_notify_req - handle a DL_NOTIFY_REQ message 3985 */ 3986 static int 3987 gld_notify_req(queue_t *q, mblk_t *mp) 3988 { 3989 gld_t *gld = (gld_t *)q->q_ptr; 3990 gld_mac_info_t *macinfo; 3991 gld_mac_pvt_t *pvt; 3992 dl_notify_req_t *dlnrp; 3993 dl_notify_ack_t *dlnap; 3994 3995 ASSERT(gld != NULL); 3996 ASSERT(gld->gld_qptr == RD(q)); 3997 3998 dlnrp = (dl_notify_req_t *)mp->b_rptr; 3999 4000 #ifdef GLD_DEBUG 4001 if (gld_debug & GLDTRACE) 4002 cmn_err(CE_NOTE, "gld_notify_req(%p %p)", 4003 (void *)q, (void *)mp); 4004 #endif 4005 4006 if (gld->gld_state == DL_UNATTACHED) { 4007 #ifdef GLD_DEBUG 4008 if (gld_debug & GLDERRS) 4009 cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)", 4010 gld->gld_state); 4011 #endif 4012 return (DL_OUTSTATE); 4013 } 4014 4015 /* 4016 * Remember what notifications are required by this stream 4017 */ 4018 macinfo = gld->gld_mac_info; 4019 pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4020 4021 gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications; 4022 4023 /* 4024 * The return DL_NOTIFY_ACK carries the bitset of notifications 4025 * that this driver can provide, independently of which ones have 4026 * previously been or are now being requested. 4027 */ 4028 if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO, 4029 DL_NOTIFY_ACK)) == NULL) 4030 return (DL_SYSERR); 4031 4032 dlnap = (dl_notify_ack_t *)mp->b_rptr; 4033 dlnap->dl_notifications = pvt->notifications; 4034 qreply(q, mp); 4035 4036 /* 4037 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK 4038 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages 4039 * that provide the current status. 4040 */ 4041 gld_notify_ind(macinfo, gld->gld_notifications, q); 4042 4043 return (GLDE_OK); 4044 } 4045 4046 /* 4047 * gld_linkstate() 4048 * Called by driver to tell GLD the state of the physical link. 4049 * As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN 4050 * notification to each client that has previously requested such 4051 * notifications 4052 */ 4053 void 4054 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate) 4055 { 4056 uint32_t notification; 4057 4058 switch (newstate) { 4059 default: 4060 return; 4061 4062 case GLD_LINKSTATE_DOWN: 4063 notification = DL_NOTE_LINK_DOWN; 4064 break; 4065 4066 case GLD_LINKSTATE_UP: 4067 notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED; 4068 break; 4069 4070 case GLD_LINKSTATE_UNKNOWN: 4071 notification = 0; 4072 break; 4073 } 4074 4075 GLDM_LOCK(macinfo, RW_WRITER); 4076 if (macinfo->gldm_linkstate == newstate) 4077 notification = 0; 4078 else 4079 macinfo->gldm_linkstate = newstate; 4080 GLDM_UNLOCK(macinfo); 4081 4082 if (notification) 4083 gld_notify_ind(macinfo, notification, NULL); 4084 } 4085 4086 /* 4087 * gld_udqos - set the current QoS parameters (priority only at the moment). 4088 */ 4089 static int 4090 gld_udqos(queue_t *q, mblk_t *mp) 4091 { 4092 dl_udqos_req_t *dlp; 4093 gld_t *gld = (gld_t *)q->q_ptr; 4094 int off; 4095 int len; 4096 dl_qos_cl_sel1_t *selp; 4097 4098 ASSERT(gld); 4099 ASSERT(gld->gld_qptr == RD(q)); 4100 4101 #ifdef GLD_DEBUG 4102 if (gld_debug & GLDTRACE) 4103 cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp); 4104 #endif 4105 4106 if (gld->gld_state != DL_IDLE) { 4107 #ifdef GLD_DEBUG 4108 if (gld_debug & GLDERRS) 4109 cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)", 4110 gld->gld_state); 4111 #endif 4112 return (DL_OUTSTATE); 4113 } 4114 4115 dlp = (dl_udqos_req_t *)mp->b_rptr; 4116 off = dlp->dl_qos_offset; 4117 len = dlp->dl_qos_length; 4118 4119 if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len)) 4120 return (DL_BADQOSTYPE); 4121 4122 selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off); 4123 if (selp->dl_qos_type != DL_QOS_CL_SEL1) 4124 return (DL_BADQOSTYPE); 4125 4126 if (selp->dl_trans_delay != 0 && 4127 selp->dl_trans_delay != DL_QOS_DONT_CARE) 4128 return (DL_BADQOSPARAM); 4129 if (selp->dl_protection != 0 && 4130 selp->dl_protection != DL_QOS_DONT_CARE) 4131 return (DL_BADQOSPARAM); 4132 if (selp->dl_residual_error != 0 && 4133 selp->dl_residual_error != DL_QOS_DONT_CARE) 4134 return (DL_BADQOSPARAM); 4135 if (selp->dl_priority < 0 || selp->dl_priority > 7) 4136 return (DL_BADQOSPARAM); 4137 4138 gld->gld_upri = selp->dl_priority; 4139 4140 dlokack(q, mp, DL_UDQOS_REQ); 4141 return (GLDE_OK); 4142 } 4143 4144 static mblk_t * 4145 gld_bindack(queue_t *q, mblk_t *mp) 4146 { 4147 gld_t *gld = (gld_t *)q->q_ptr; 4148 gld_mac_info_t *macinfo = gld->gld_mac_info; 4149 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4150 dl_bind_ack_t *dlp; 4151 size_t size; 4152 t_uscalar_t addrlen; 4153 uchar_t *sapp; 4154 4155 addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen); 4156 size = sizeof (dl_bind_ack_t) + addrlen; 4157 if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL) 4158 return (NULL); 4159 4160 dlp = (dl_bind_ack_t *)mp->b_rptr; 4161 dlp->dl_sap = gld->gld_sap; 4162 dlp->dl_addr_length = addrlen; 4163 dlp->dl_addr_offset = sizeof (dl_bind_ack_t); 4164 dlp->dl_max_conind = 0; 4165 dlp->dl_xidtest_flg = 0; 4166 4167 mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1], 4168 macinfo->gldm_addrlen); 4169 sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen; 4170 *(ushort_t *)sapp = gld->gld_sap; 4171 4172 return (mp); 4173 } 4174 4175 /* 4176 * gld_bind - determine if a SAP is already allocated and whether it is legal 4177 * to do the bind at this time 4178 */ 4179 static int 4180 gld_bind(queue_t *q, mblk_t *mp) 4181 { 4182 ulong_t sap; 4183 dl_bind_req_t *dlp; 4184 gld_t *gld = (gld_t *)q->q_ptr; 4185 gld_mac_info_t *macinfo = gld->gld_mac_info; 4186 4187 ASSERT(gld); 4188 ASSERT(gld->gld_qptr == RD(q)); 4189 4190 #ifdef GLD_DEBUG 4191 if (gld_debug & GLDTRACE) 4192 cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp); 4193 #endif 4194 4195 dlp = (dl_bind_req_t *)mp->b_rptr; 4196 sap = dlp->dl_sap; 4197 4198 #ifdef GLD_DEBUG 4199 if (gld_debug & GLDPROT) 4200 cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap); 4201 #endif 4202 4203 if (gld->gld_state != DL_UNBOUND) { 4204 #ifdef GLD_DEBUG 4205 if (gld_debug & GLDERRS) 4206 cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)", 4207 gld->gld_state); 4208 #endif 4209 return (DL_OUTSTATE); 4210 } 4211 ASSERT(macinfo); 4212 4213 if (dlp->dl_service_mode != DL_CLDLS) { 4214 return (DL_UNSUPPORTED); 4215 } 4216 if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) { 4217 return (DL_NOAUTO); 4218 } 4219 4220 /* 4221 * Check sap validity and decide whether this stream accepts 4222 * IEEE 802.2 (LLC) packets. 4223 */ 4224 if (sap > ETHERTYPE_MAX) 4225 return (DL_BADSAP); 4226 4227 /* 4228 * Decide whether the SAP value selects EtherType encoding/decoding. 4229 * For compatibility with monolithic ethernet drivers, the range of 4230 * SAP values is different for DL_ETHER media. 4231 */ 4232 switch (macinfo->gldm_type) { 4233 case DL_ETHER: 4234 gld->gld_ethertype = (sap > ETHERMTU); 4235 break; 4236 default: 4237 gld->gld_ethertype = (sap > GLD_MAX_802_SAP); 4238 break; 4239 } 4240 4241 /* if we get to here, then the SAP is legal enough */ 4242 GLDM_LOCK(macinfo, RW_WRITER); 4243 gld->gld_state = DL_IDLE; /* bound and ready */ 4244 gld->gld_sap = sap; 4245 if ((macinfo->gldm_type == DL_ETHER) && (sap == ETHERTYPE_VLAN)) 4246 ((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap++; 4247 gld_set_ipq(gld); 4248 4249 #ifdef GLD_DEBUG 4250 if (gld_debug & GLDPROT) 4251 cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap); 4252 #endif 4253 4254 /* ACK the BIND */ 4255 mp = gld_bindack(q, mp); 4256 GLDM_UNLOCK(macinfo); 4257 4258 if (mp != NULL) { 4259 qreply(q, mp); 4260 return (GLDE_OK); 4261 } 4262 4263 return (DL_SYSERR); 4264 } 4265 4266 /* 4267 * gld_unbind - perform an unbind of an LSAP or ether type on the stream. 4268 * The stream is still open and can be re-bound. 4269 */ 4270 static int 4271 gld_unbind(queue_t *q, mblk_t *mp) 4272 { 4273 gld_t *gld = (gld_t *)q->q_ptr; 4274 gld_mac_info_t *macinfo = gld->gld_mac_info; 4275 4276 ASSERT(gld); 4277 4278 #ifdef GLD_DEBUG 4279 if (gld_debug & GLDTRACE) 4280 cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp); 4281 #endif 4282 4283 if (gld->gld_state != DL_IDLE) { 4284 #ifdef GLD_DEBUG 4285 if (gld_debug & GLDERRS) 4286 cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)", 4287 gld->gld_state); 4288 #endif 4289 return (DL_OUTSTATE); 4290 } 4291 ASSERT(macinfo); 4292 4293 /* 4294 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput. 4295 * See comments above gld_start(). 4296 */ 4297 gld->gld_in_unbind = B_TRUE; /* disallow wput=>start */ 4298 membar_enter(); 4299 if (gld->gld_wput_count != 0) { 4300 gld->gld_in_unbind = B_FALSE; 4301 ASSERT(mp); /* we didn't come from close */ 4302 #ifdef GLD_DEBUG 4303 if (gld_debug & GLDETRACE) 4304 cmn_err(CE_NOTE, "gld_unbind: defer for wput"); 4305 #endif 4306 (void) putbq(q, mp); 4307 qenable(q); /* try again soon */ 4308 return (GLDE_RETRY); 4309 } 4310 4311 GLDM_LOCK(macinfo, RW_WRITER); 4312 if ((macinfo->gldm_type == DL_ETHER) && 4313 (gld->gld_sap == ETHERTYPE_VLAN)) { 4314 ((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap--; 4315 } 4316 gld->gld_state = DL_UNBOUND; 4317 gld->gld_sap = 0; 4318 gld_set_ipq(gld); 4319 GLDM_UNLOCK(macinfo); 4320 4321 membar_exit(); 4322 gld->gld_in_unbind = B_FALSE; 4323 4324 /* mp is NULL if we came from close */ 4325 if (mp) { 4326 gld_flushqueue(q); /* flush the queues */ 4327 dlokack(q, mp, DL_UNBIND_REQ); 4328 } 4329 return (GLDE_OK); 4330 } 4331 4332 /* 4333 * gld_inforeq - generate the response to an info request 4334 */ 4335 static int 4336 gld_inforeq(queue_t *q, mblk_t *mp) 4337 { 4338 gld_t *gld; 4339 dl_info_ack_t *dlp; 4340 int bufsize; 4341 glddev_t *glddev; 4342 gld_mac_info_t *macinfo; 4343 gld_mac_pvt_t *mac_pvt; 4344 int sel_offset = 0; 4345 int range_offset = 0; 4346 int addr_offset; 4347 int addr_length; 4348 int sap_length; 4349 int brdcst_offset; 4350 int brdcst_length; 4351 uchar_t *sapp; 4352 4353 #ifdef GLD_DEBUG 4354 if (gld_debug & GLDTRACE) 4355 cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp); 4356 #endif 4357 gld = (gld_t *)q->q_ptr; 4358 ASSERT(gld); 4359 glddev = gld->gld_device; 4360 ASSERT(glddev); 4361 4362 if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) { 4363 macinfo = gld->gld_mac_info; 4364 ASSERT(macinfo != NULL); 4365 4366 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4367 4368 addr_length = macinfo->gldm_addrlen; 4369 sap_length = macinfo->gldm_saplen; 4370 brdcst_length = macinfo->gldm_addrlen; 4371 } else { 4372 addr_length = glddev->gld_addrlen; 4373 sap_length = glddev->gld_saplen; 4374 brdcst_length = glddev->gld_addrlen; 4375 } 4376 4377 bufsize = sizeof (dl_info_ack_t); 4378 4379 addr_offset = bufsize; 4380 bufsize += addr_length; 4381 bufsize += abs(sap_length); 4382 4383 brdcst_offset = bufsize; 4384 bufsize += brdcst_length; 4385 4386 if (((gld_vlan_t *)gld->gld_vlan) != NULL) { 4387 sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t)); 4388 bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t); 4389 4390 range_offset = P2ROUNDUP(bufsize, sizeof (int64_t)); 4391 bufsize = range_offset + sizeof (dl_qos_cl_range1_t); 4392 } 4393 4394 if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL) 4395 return (GLDE_OK); /* nothing more to be done */ 4396 4397 bzero(mp->b_rptr, bufsize); 4398 4399 dlp = (dl_info_ack_t *)mp->b_rptr; 4400 dlp->dl_primitive = DL_INFO_ACK; 4401 dlp->dl_version = DL_VERSION_2; 4402 dlp->dl_service_mode = DL_CLDLS; 4403 dlp->dl_current_state = gld->gld_state; 4404 dlp->dl_provider_style = gld->gld_style; 4405 4406 if (sel_offset != 0) { 4407 dl_qos_cl_sel1_t *selp; 4408 dl_qos_cl_range1_t *rangep; 4409 4410 ASSERT(range_offset != 0); 4411 4412 dlp->dl_qos_offset = sel_offset; 4413 dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t); 4414 dlp->dl_qos_range_offset = range_offset; 4415 dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t); 4416 4417 selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset); 4418 selp->dl_qos_type = DL_QOS_CL_SEL1; 4419 selp->dl_priority = gld->gld_upri; 4420 4421 rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset); 4422 rangep->dl_qos_type = DL_QOS_CL_RANGE1; 4423 rangep->dl_priority.dl_min = 0; 4424 rangep->dl_priority.dl_max = 7; 4425 } 4426 4427 if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) { 4428 dlp->dl_min_sdu = macinfo->gldm_minpkt; 4429 dlp->dl_max_sdu = macinfo->gldm_maxpkt; 4430 dlp->dl_mac_type = macinfo->gldm_type; 4431 dlp->dl_addr_length = addr_length + abs(sap_length); 4432 dlp->dl_sap_length = sap_length; 4433 4434 if (gld->gld_state == DL_IDLE) { 4435 /* 4436 * If we are bound to a non-LLC SAP on any medium 4437 * other than Ethernet, then we need room for a 4438 * SNAP header. So we have to adjust the MTU size 4439 * accordingly. XXX I suppose this should be done 4440 * in gldutil.c, but it seems likely that this will 4441 * always be true for everything GLD supports but 4442 * Ethernet. Check this if you add another medium. 4443 */ 4444 if ((macinfo->gldm_type == DL_TPR || 4445 macinfo->gldm_type == DL_FDDI) && 4446 gld->gld_ethertype) 4447 dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN; 4448 4449 /* copy macaddr and sap */ 4450 dlp->dl_addr_offset = addr_offset; 4451 4452 mac_copy(mac_pvt->curr_macaddr, mp->b_rptr + 4453 addr_offset, macinfo->gldm_addrlen); 4454 sapp = mp->b_rptr + addr_offset + 4455 macinfo->gldm_addrlen; 4456 *(ushort_t *)sapp = gld->gld_sap; 4457 } else { 4458 dlp->dl_addr_offset = 0; 4459 } 4460 4461 /* copy broadcast addr */ 4462 dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen; 4463 dlp->dl_brdcst_addr_offset = brdcst_offset; 4464 mac_copy((caddr_t)macinfo->gldm_broadcast_addr, 4465 mp->b_rptr + brdcst_offset, brdcst_length); 4466 } else { 4467 /* 4468 * No PPA is attached. 4469 * The best we can do is use the values provided 4470 * by the first mac that called gld_register. 4471 */ 4472 dlp->dl_min_sdu = glddev->gld_minsdu; 4473 dlp->dl_max_sdu = glddev->gld_maxsdu; 4474 dlp->dl_mac_type = glddev->gld_type; 4475 dlp->dl_addr_length = addr_length + abs(sap_length); 4476 dlp->dl_sap_length = sap_length; 4477 dlp->dl_addr_offset = 0; 4478 dlp->dl_brdcst_addr_offset = brdcst_offset; 4479 dlp->dl_brdcst_addr_length = brdcst_length; 4480 mac_copy((caddr_t)glddev->gld_broadcast, 4481 mp->b_rptr + brdcst_offset, brdcst_length); 4482 } 4483 qreply(q, mp); 4484 return (GLDE_OK); 4485 } 4486 4487 /* 4488 * gld_unitdata (q, mp) 4489 * send a datagram. Destination address/lsap is in M_PROTO 4490 * message (first mblock), data is in remainder of message. 4491 * 4492 */ 4493 static int 4494 gld_unitdata(queue_t *q, mblk_t *mp) 4495 { 4496 gld_t *gld = (gld_t *)q->q_ptr; 4497 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr; 4498 gld_mac_info_t *macinfo = gld->gld_mac_info; 4499 size_t msglen; 4500 mblk_t *nmp; 4501 gld_interface_t *ifp; 4502 uint32_t start; 4503 uint32_t stuff; 4504 uint32_t end; 4505 uint32_t value; 4506 uint32_t flags; 4507 uint32_t upri; 4508 4509 #ifdef GLD_DEBUG 4510 if (gld_debug & GLDTRACE) 4511 cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp); 4512 #endif 4513 4514 if (gld->gld_state != DL_IDLE) { 4515 #ifdef GLD_DEBUG 4516 if (gld_debug & GLDERRS) 4517 cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)", 4518 gld->gld_state); 4519 #endif 4520 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4521 dlp->dl_dest_addr_length, DL_OUTSTATE, 0); 4522 return (GLDE_OK); 4523 } 4524 ASSERT(macinfo != NULL); 4525 4526 if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) || 4527 dlp->dl_dest_addr_length != 4528 macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) { 4529 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4530 dlp->dl_dest_addr_length, DL_BADADDR, 0); 4531 return (GLDE_OK); 4532 } 4533 4534 upri = dlp->dl_priority.dl_max; 4535 4536 msglen = msgdsize(mp); 4537 if (msglen == 0 || msglen > macinfo->gldm_maxpkt) { 4538 #ifdef GLD_DEBUG 4539 if (gld_debug & GLDERRS) 4540 cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)", 4541 (int)msglen); 4542 #endif 4543 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4544 dlp->dl_dest_addr_length, DL_BADDATA, 0); 4545 return (GLDE_OK); 4546 } 4547 4548 ASSERT(mp->b_cont != NULL); /* because msgdsize(mp) is nonzero */ 4549 4550 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 4551 4552 /* grab any checksum information that may be present */ 4553 hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end, 4554 &value, &flags); 4555 4556 /* 4557 * Prepend a valid header for transmission 4558 */ 4559 if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) { 4560 #ifdef GLD_DEBUG 4561 if (gld_debug & GLDERRS) 4562 cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed."); 4563 #endif 4564 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4565 dlp->dl_dest_addr_length, DL_SYSERR, ENOSR); 4566 return (GLDE_OK); 4567 } 4568 4569 /* apply any checksum information to the first block in the chain */ 4570 (void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value, 4571 flags, 0); 4572 4573 GLD_CLEAR_MBLK_VTAG(nmp); 4574 if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) { 4575 qenable(q); 4576 return (GLDE_RETRY); 4577 } 4578 4579 return (GLDE_OK); 4580 } 4581 4582 /* 4583 * gldattach(q, mp) 4584 * DLPI DL_ATTACH_REQ 4585 * this attaches the stream to a PPA 4586 */ 4587 static int 4588 gldattach(queue_t *q, mblk_t *mp) 4589 { 4590 dl_attach_req_t *at; 4591 gld_mac_info_t *macinfo; 4592 gld_t *gld = (gld_t *)q->q_ptr; 4593 glddev_t *glddev; 4594 gld_mac_pvt_t *mac_pvt; 4595 uint32_t ppa; 4596 uint32_t vid; 4597 gld_vlan_t *vlan; 4598 4599 at = (dl_attach_req_t *)mp->b_rptr; 4600 4601 if (gld->gld_state != DL_UNATTACHED) 4602 return (DL_OUTSTATE); 4603 4604 ASSERT(!gld->gld_mac_info); 4605 4606 ppa = at->dl_ppa % GLD_VLAN_SCALE; /* 0 .. 999 */ 4607 vid = at->dl_ppa / GLD_VLAN_SCALE; /* 0 .. 4094 */ 4608 if (vid > VLAN_VID_MAX) 4609 return (DL_BADPPA); 4610 4611 glddev = gld->gld_device; 4612 mutex_enter(&glddev->gld_devlock); 4613 for (macinfo = glddev->gld_mac_next; 4614 macinfo != (gld_mac_info_t *)&glddev->gld_mac_next; 4615 macinfo = macinfo->gldm_next) { 4616 int inst; 4617 4618 ASSERT(macinfo != NULL); 4619 if (macinfo->gldm_ppa != ppa) 4620 continue; 4621 4622 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) 4623 continue; /* this one's not ready yet */ 4624 4625 /* 4626 * VLAN sanity check 4627 */ 4628 if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) { 4629 mutex_exit(&glddev->gld_devlock); 4630 return (DL_BADPPA); 4631 } 4632 4633 /* 4634 * We found the correct PPA, hold the instance 4635 */ 4636 inst = ddi_get_instance(macinfo->gldm_devinfo); 4637 if (inst == -1 || qassociate(q, inst) != 0) { 4638 mutex_exit(&glddev->gld_devlock); 4639 return (DL_BADPPA); 4640 } 4641 4642 /* Take the stream off the per-driver-class list */ 4643 gldremque(gld); 4644 4645 /* 4646 * We must hold the lock to prevent multiple calls 4647 * to the reset and start routines. 4648 */ 4649 GLDM_LOCK(macinfo, RW_WRITER); 4650 4651 gld->gld_mac_info = macinfo; 4652 4653 if (macinfo->gldm_send_tagged != NULL) 4654 gld->gld_send = macinfo->gldm_send_tagged; 4655 else 4656 gld->gld_send = macinfo->gldm_send; 4657 4658 if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) { 4659 GLDM_UNLOCK(macinfo); 4660 gldinsque(gld, glddev->gld_str_prev); 4661 mutex_exit(&glddev->gld_devlock); 4662 (void) qassociate(q, -1); 4663 return (DL_BADPPA); 4664 } 4665 4666 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4667 if (!mac_pvt->started) { 4668 if (gld_start_mac(macinfo) != GLD_SUCCESS) { 4669 gld_rem_vlan(vlan); 4670 GLDM_UNLOCK(macinfo); 4671 gldinsque(gld, glddev->gld_str_prev); 4672 mutex_exit(&glddev->gld_devlock); 4673 dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR, 4674 EIO); 4675 (void) qassociate(q, -1); 4676 return (GLDE_OK); 4677 } 4678 } 4679 4680 gld->gld_vlan = vlan; 4681 vlan->gldv_nstreams++; 4682 gldinsque(gld, vlan->gldv_str_prev); 4683 gld->gld_state = DL_UNBOUND; 4684 GLDM_UNLOCK(macinfo); 4685 4686 #ifdef GLD_DEBUG 4687 if (gld_debug & GLDPROT) { 4688 cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)", 4689 (void *)q, (void *)mp, macinfo->gldm_ppa); 4690 } 4691 #endif 4692 mutex_exit(&glddev->gld_devlock); 4693 dlokack(q, mp, DL_ATTACH_REQ); 4694 return (GLDE_OK); 4695 } 4696 mutex_exit(&glddev->gld_devlock); 4697 return (DL_BADPPA); 4698 } 4699 4700 /* 4701 * gldunattach(q, mp) 4702 * DLPI DL_DETACH_REQ 4703 * detaches the mac layer from the stream 4704 */ 4705 int 4706 gldunattach(queue_t *q, mblk_t *mp) 4707 { 4708 gld_t *gld = (gld_t *)q->q_ptr; 4709 glddev_t *glddev = gld->gld_device; 4710 gld_mac_info_t *macinfo = gld->gld_mac_info; 4711 int state = gld->gld_state; 4712 int i; 4713 gld_mac_pvt_t *mac_pvt; 4714 gld_vlan_t *vlan; 4715 boolean_t phys_off; 4716 boolean_t mult_off; 4717 int op = GLD_MAC_PROMISC_NOOP; 4718 4719 if (state != DL_UNBOUND) 4720 return (DL_OUTSTATE); 4721 4722 ASSERT(macinfo != NULL); 4723 ASSERT(gld->gld_sap == 0); 4724 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4725 4726 #ifdef GLD_DEBUG 4727 if (gld_debug & GLDPROT) { 4728 cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)", 4729 (void *)q, (void *)mp, macinfo->gldm_ppa); 4730 } 4731 #endif 4732 4733 GLDM_LOCK(macinfo, RW_WRITER); 4734 4735 if (gld->gld_mcast) { 4736 for (i = 0; i < gld->gld_multicnt; i++) { 4737 gld_mcast_t *mcast; 4738 4739 if ((mcast = gld->gld_mcast[i]) != NULL) { 4740 ASSERT(mcast->gldm_refcnt); 4741 gld_send_disable_multi(macinfo, mcast); 4742 } 4743 } 4744 kmem_free(gld->gld_mcast, 4745 sizeof (gld_mcast_t *) * gld->gld_multicnt); 4746 gld->gld_mcast = NULL; 4747 gld->gld_multicnt = 0; 4748 } 4749 4750 /* decide if we need to turn off any promiscuity */ 4751 phys_off = (gld->gld_flags & GLD_PROM_PHYS && 4752 --mac_pvt->nprom == 0); 4753 mult_off = (gld->gld_flags & GLD_PROM_MULT && 4754 --mac_pvt->nprom_multi == 0); 4755 4756 if (phys_off) { 4757 op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE : 4758 GLD_MAC_PROMISC_MULTI; 4759 } else if (mult_off) { 4760 op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE : 4761 GLD_MAC_PROMISC_NOOP; /* phys overrides multi */ 4762 } 4763 4764 if (op != GLD_MAC_PROMISC_NOOP) 4765 (void) (*macinfo->gldm_set_promiscuous)(macinfo, op); 4766 4767 vlan = (gld_vlan_t *)gld->gld_vlan; 4768 if (gld->gld_flags & GLD_PROM_PHYS) 4769 vlan->gldv_nprom--; 4770 if (gld->gld_flags & GLD_PROM_MULT) 4771 vlan->gldv_nprom--; 4772 if (gld->gld_flags & GLD_PROM_SAP) { 4773 vlan->gldv_nprom--; 4774 vlan->gldv_nvlan_sap--; 4775 } 4776 4777 gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT); 4778 4779 GLDM_UNLOCK(macinfo); 4780 4781 if (phys_off) 4782 gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL); 4783 4784 /* 4785 * We need to hold both locks when modifying the mac stream list 4786 * to protect findminor as well as everyone else. 4787 */ 4788 mutex_enter(&glddev->gld_devlock); 4789 GLDM_LOCK(macinfo, RW_WRITER); 4790 4791 /* disassociate this stream with its vlan and underlying mac */ 4792 gldremque(gld); 4793 4794 if (--vlan->gldv_nstreams == 0) { 4795 gld_rem_vlan(vlan); 4796 gld->gld_vlan = NULL; 4797 } 4798 4799 gld->gld_mac_info = NULL; 4800 gld->gld_state = DL_UNATTACHED; 4801 4802 /* cleanup mac layer if last vlan */ 4803 if (mac_pvt->nvlan == 0) { 4804 gld_stop_mac(macinfo); 4805 macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT; 4806 } 4807 4808 /* make sure no references to this gld for gld_v0_sched */ 4809 if (mac_pvt->last_sched == gld) 4810 mac_pvt->last_sched = NULL; 4811 4812 GLDM_UNLOCK(macinfo); 4813 4814 /* put the stream on the unattached Style 2 list */ 4815 gldinsque(gld, glddev->gld_str_prev); 4816 4817 mutex_exit(&glddev->gld_devlock); 4818 4819 /* There will be no mp if we were called from close */ 4820 if (mp) { 4821 dlokack(q, mp, DL_DETACH_REQ); 4822 } 4823 if (gld->gld_style == DL_STYLE2) 4824 (void) qassociate(q, -1); 4825 return (GLDE_OK); 4826 } 4827 4828 /* 4829 * gld_enable_multi (q, mp) 4830 * Enables multicast address on the stream. If the mac layer 4831 * isn't enabled for this address, enable at that level as well. 4832 */ 4833 static int 4834 gld_enable_multi(queue_t *q, mblk_t *mp) 4835 { 4836 gld_t *gld = (gld_t *)q->q_ptr; 4837 glddev_t *glddev; 4838 gld_mac_info_t *macinfo = gld->gld_mac_info; 4839 unsigned char *maddr; 4840 dl_enabmulti_req_t *multi; 4841 gld_mcast_t *mcast; 4842 int i, rc; 4843 gld_mac_pvt_t *mac_pvt; 4844 4845 #ifdef GLD_DEBUG 4846 if (gld_debug & GLDPROT) { 4847 cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q, 4848 (void *)mp); 4849 } 4850 #endif 4851 4852 if (gld->gld_state == DL_UNATTACHED) 4853 return (DL_OUTSTATE); 4854 4855 ASSERT(macinfo != NULL); 4856 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4857 4858 if (macinfo->gldm_set_multicast == NULL) { 4859 return (DL_UNSUPPORTED); 4860 } 4861 4862 multi = (dl_enabmulti_req_t *)mp->b_rptr; 4863 4864 if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) || 4865 multi->dl_addr_length != macinfo->gldm_addrlen) 4866 return (DL_BADADDR); 4867 4868 /* request appears to be valid */ 4869 4870 glddev = mac_pvt->major_dev; 4871 ASSERT(glddev == gld->gld_device); 4872 4873 maddr = mp->b_rptr + multi->dl_addr_offset; 4874 4875 /* 4876 * The multicast addresses live in a per-device table, along 4877 * with a reference count. Each stream has a table that 4878 * points to entries in the device table, with the reference 4879 * count reflecting the number of streams pointing at it. If 4880 * this multicast address is already in the per-device table, 4881 * all we have to do is point at it. 4882 */ 4883 GLDM_LOCK(macinfo, RW_WRITER); 4884 4885 /* does this address appear in current table? */ 4886 if (gld->gld_mcast == NULL) { 4887 /* no mcast addresses -- allocate table */ 4888 gld->gld_mcast = GLD_GETSTRUCT(gld_mcast_t *, 4889 glddev->gld_multisize); 4890 if (gld->gld_mcast == NULL) { 4891 GLDM_UNLOCK(macinfo); 4892 dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR); 4893 return (GLDE_OK); 4894 } 4895 gld->gld_multicnt = glddev->gld_multisize; 4896 } else { 4897 for (i = 0; i < gld->gld_multicnt; i++) { 4898 if (gld->gld_mcast[i] && 4899 mac_eq(gld->gld_mcast[i]->gldm_addr, 4900 maddr, macinfo->gldm_addrlen)) { 4901 /* this is a match -- just succeed */ 4902 ASSERT(gld->gld_mcast[i]->gldm_refcnt); 4903 GLDM_UNLOCK(macinfo); 4904 dlokack(q, mp, DL_ENABMULTI_REQ); 4905 return (GLDE_OK); 4906 } 4907 } 4908 } 4909 4910 /* 4911 * it wasn't in the stream so check to see if the mac layer has it 4912 */ 4913 mcast = NULL; 4914 if (mac_pvt->mcast_table == NULL) { 4915 mac_pvt->mcast_table = GLD_GETSTRUCT(gld_mcast_t, 4916 glddev->gld_multisize); 4917 if (mac_pvt->mcast_table == NULL) { 4918 GLDM_UNLOCK(macinfo); 4919 dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR); 4920 return (GLDE_OK); 4921 } 4922 } else { 4923 for (i = 0; i < glddev->gld_multisize; i++) { 4924 if (mac_pvt->mcast_table[i].gldm_refcnt && 4925 mac_eq(mac_pvt->mcast_table[i].gldm_addr, 4926 maddr, macinfo->gldm_addrlen)) { 4927 mcast = &mac_pvt->mcast_table[i]; 4928 break; 4929 } 4930 } 4931 } 4932 if (mcast == NULL) { 4933 /* not in mac layer -- find an empty mac slot to fill in */ 4934 for (i = 0; i < glddev->gld_multisize; i++) { 4935 if (mac_pvt->mcast_table[i].gldm_refcnt == 0) { 4936 mcast = &mac_pvt->mcast_table[i]; 4937 mac_copy(maddr, mcast->gldm_addr, 4938 macinfo->gldm_addrlen); 4939 break; 4940 } 4941 } 4942 } 4943 if (mcast == NULL) { 4944 /* couldn't get a mac layer slot */ 4945 GLDM_UNLOCK(macinfo); 4946 return (DL_TOOMANY); 4947 } 4948 4949 /* now we have a mac layer slot in mcast -- get a stream slot */ 4950 for (i = 0; i < gld->gld_multicnt; i++) { 4951 if (gld->gld_mcast[i] != NULL) 4952 continue; 4953 /* found an empty slot */ 4954 if (!mcast->gldm_refcnt) { 4955 /* set mcast in hardware */ 4956 unsigned char cmaddr[GLD_MAX_ADDRLEN]; 4957 4958 ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen); 4959 cmac_copy(maddr, cmaddr, 4960 macinfo->gldm_addrlen, macinfo); 4961 4962 rc = (*macinfo->gldm_set_multicast) 4963 (macinfo, cmaddr, GLD_MULTI_ENABLE); 4964 if (rc == GLD_NOTSUPPORTED) { 4965 GLDM_UNLOCK(macinfo); 4966 return (DL_NOTSUPPORTED); 4967 } else if (rc == GLD_NORESOURCES) { 4968 GLDM_UNLOCK(macinfo); 4969 return (DL_TOOMANY); 4970 } else if (rc == GLD_BADARG) { 4971 GLDM_UNLOCK(macinfo); 4972 return (DL_BADADDR); 4973 } else if (rc == GLD_RETRY) { 4974 /* 4975 * The putbq and gld_xwait must be 4976 * within the lock to prevent races 4977 * with gld_sched. 4978 */ 4979 (void) putbq(q, mp); 4980 gld->gld_xwait = B_TRUE; 4981 GLDM_UNLOCK(macinfo); 4982 return (GLDE_RETRY); 4983 } else if (rc != GLD_SUCCESS) { 4984 GLDM_UNLOCK(macinfo); 4985 dlerrorack(q, mp, DL_ENABMULTI_REQ, 4986 DL_SYSERR, EIO); 4987 return (GLDE_OK); 4988 } 4989 } 4990 gld->gld_mcast[i] = mcast; 4991 mcast->gldm_refcnt++; 4992 GLDM_UNLOCK(macinfo); 4993 dlokack(q, mp, DL_ENABMULTI_REQ); 4994 return (GLDE_OK); 4995 } 4996 4997 /* couldn't get a stream slot */ 4998 GLDM_UNLOCK(macinfo); 4999 return (DL_TOOMANY); 5000 } 5001 5002 5003 /* 5004 * gld_disable_multi (q, mp) 5005 * Disable the multicast address on the stream. If last 5006 * reference for the mac layer, disable there as well. 5007 */ 5008 static int 5009 gld_disable_multi(queue_t *q, mblk_t *mp) 5010 { 5011 gld_t *gld; 5012 gld_mac_info_t *macinfo; 5013 unsigned char *maddr; 5014 dl_disabmulti_req_t *multi; 5015 int i; 5016 gld_mcast_t *mcast; 5017 5018 #ifdef GLD_DEBUG 5019 if (gld_debug & GLDPROT) { 5020 cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q, 5021 (void *)mp); 5022 } 5023 #endif 5024 5025 gld = (gld_t *)q->q_ptr; 5026 if (gld->gld_state == DL_UNATTACHED) 5027 return (DL_OUTSTATE); 5028 5029 macinfo = gld->gld_mac_info; 5030 ASSERT(macinfo != NULL); 5031 if (macinfo->gldm_set_multicast == NULL) { 5032 return (DL_UNSUPPORTED); 5033 } 5034 5035 multi = (dl_disabmulti_req_t *)mp->b_rptr; 5036 5037 if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) || 5038 multi->dl_addr_length != macinfo->gldm_addrlen) 5039 return (DL_BADADDR); 5040 5041 maddr = mp->b_rptr + multi->dl_addr_offset; 5042 5043 /* request appears to be valid */ 5044 /* does this address appear in current table? */ 5045 GLDM_LOCK(macinfo, RW_WRITER); 5046 if (gld->gld_mcast != NULL) { 5047 for (i = 0; i < gld->gld_multicnt; i++) 5048 if (((mcast = gld->gld_mcast[i]) != NULL) && 5049 mac_eq(mcast->gldm_addr, 5050 maddr, macinfo->gldm_addrlen)) { 5051 ASSERT(mcast->gldm_refcnt); 5052 gld_send_disable_multi(macinfo, mcast); 5053 gld->gld_mcast[i] = NULL; 5054 GLDM_UNLOCK(macinfo); 5055 dlokack(q, mp, DL_DISABMULTI_REQ); 5056 return (GLDE_OK); 5057 } 5058 } 5059 GLDM_UNLOCK(macinfo); 5060 return (DL_NOTENAB); /* not an enabled address */ 5061 } 5062 5063 /* 5064 * gld_send_disable_multi(macinfo, mcast) 5065 * this function is used to disable a multicast address if the reference 5066 * count goes to zero. The disable request will then be forwarded to the 5067 * lower stream. 5068 */ 5069 static void 5070 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast) 5071 { 5072 ASSERT(macinfo != NULL); 5073 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5074 ASSERT(mcast != NULL); 5075 ASSERT(mcast->gldm_refcnt); 5076 5077 if (!mcast->gldm_refcnt) { 5078 return; /* "cannot happen" */ 5079 } 5080 5081 if (--mcast->gldm_refcnt > 0) { 5082 return; 5083 } 5084 5085 /* 5086 * This must be converted from canonical form to device form. 5087 * The refcnt is now zero so we can trash the data. 5088 */ 5089 if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR) 5090 gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen); 5091 5092 /* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */ 5093 (void) (*macinfo->gldm_set_multicast) 5094 (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE); 5095 } 5096 5097 /* 5098 * gld_promisc (q, mp, req, on) 5099 * enable or disable the use of promiscuous mode with the hardware 5100 */ 5101 static int 5102 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on) 5103 { 5104 gld_t *gld; 5105 gld_mac_info_t *macinfo; 5106 gld_mac_pvt_t *mac_pvt; 5107 gld_vlan_t *vlan; 5108 union DL_primitives *prim; 5109 int macrc = GLD_SUCCESS; 5110 int dlerr = GLDE_OK; 5111 int op = GLD_MAC_PROMISC_NOOP; 5112 5113 #ifdef GLD_DEBUG 5114 if (gld_debug & GLDTRACE) 5115 cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)", 5116 (void *)q, (void *)mp, req, on); 5117 #endif 5118 5119 ASSERT(mp != NULL); 5120 prim = (union DL_primitives *)mp->b_rptr; 5121 5122 /* XXX I think spec allows promisc in unattached state */ 5123 gld = (gld_t *)q->q_ptr; 5124 if (gld->gld_state == DL_UNATTACHED) 5125 return (DL_OUTSTATE); 5126 5127 macinfo = gld->gld_mac_info; 5128 ASSERT(macinfo != NULL); 5129 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5130 5131 vlan = (gld_vlan_t *)gld->gld_vlan; 5132 ASSERT(vlan != NULL); 5133 5134 GLDM_LOCK(macinfo, RW_WRITER); 5135 5136 /* 5137 * Work out what request (if any) has to be made to the MAC layer 5138 */ 5139 if (on) { 5140 switch (prim->promiscon_req.dl_level) { 5141 default: 5142 dlerr = DL_UNSUPPORTED; /* this is an error */ 5143 break; 5144 5145 case DL_PROMISC_PHYS: 5146 if (mac_pvt->nprom == 0) 5147 op = GLD_MAC_PROMISC_PHYS; 5148 break; 5149 5150 case DL_PROMISC_MULTI: 5151 if (mac_pvt->nprom_multi == 0) 5152 if (mac_pvt->nprom == 0) 5153 op = GLD_MAC_PROMISC_MULTI; 5154 break; 5155 5156 case DL_PROMISC_SAP: 5157 /* We can do this without reference to the MAC */ 5158 break; 5159 } 5160 } else { 5161 switch (prim->promiscoff_req.dl_level) { 5162 default: 5163 dlerr = DL_UNSUPPORTED; /* this is an error */ 5164 break; 5165 5166 case DL_PROMISC_PHYS: 5167 if (!(gld->gld_flags & GLD_PROM_PHYS)) 5168 dlerr = DL_NOTENAB; 5169 else if (mac_pvt->nprom == 1) 5170 if (mac_pvt->nprom_multi) 5171 op = GLD_MAC_PROMISC_MULTI; 5172 else 5173 op = GLD_MAC_PROMISC_NONE; 5174 break; 5175 5176 case DL_PROMISC_MULTI: 5177 if (!(gld->gld_flags & GLD_PROM_MULT)) 5178 dlerr = DL_NOTENAB; 5179 else if (mac_pvt->nprom_multi == 1) 5180 if (mac_pvt->nprom == 0) 5181 op = GLD_MAC_PROMISC_NONE; 5182 break; 5183 5184 case DL_PROMISC_SAP: 5185 if (!(gld->gld_flags & GLD_PROM_SAP)) 5186 dlerr = DL_NOTENAB; 5187 5188 /* We can do this without reference to the MAC */ 5189 break; 5190 } 5191 } 5192 5193 /* 5194 * The request was invalid in some way so no need to continue. 5195 */ 5196 if (dlerr != GLDE_OK) { 5197 GLDM_UNLOCK(macinfo); 5198 return (dlerr); 5199 } 5200 5201 /* 5202 * Issue the request to the MAC layer, if required 5203 */ 5204 if (op != GLD_MAC_PROMISC_NOOP) { 5205 macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op); 5206 } 5207 5208 /* 5209 * On success, update the appropriate flags & refcounts 5210 */ 5211 if (macrc == GLD_SUCCESS) { 5212 if (on) { 5213 switch (prim->promiscon_req.dl_level) { 5214 case DL_PROMISC_PHYS: 5215 mac_pvt->nprom++; 5216 vlan->gldv_nprom++; 5217 gld->gld_flags |= GLD_PROM_PHYS; 5218 break; 5219 5220 case DL_PROMISC_MULTI: 5221 mac_pvt->nprom_multi++; 5222 vlan->gldv_nprom++; 5223 gld->gld_flags |= GLD_PROM_MULT; 5224 break; 5225 5226 case DL_PROMISC_SAP: 5227 gld->gld_flags |= GLD_PROM_SAP; 5228 vlan->gldv_nprom++; 5229 vlan->gldv_nvlan_sap++; 5230 break; 5231 5232 default: 5233 break; 5234 } 5235 } else { 5236 switch (prim->promiscoff_req.dl_level) { 5237 case DL_PROMISC_PHYS: 5238 mac_pvt->nprom--; 5239 vlan->gldv_nprom--; 5240 gld->gld_flags &= ~GLD_PROM_PHYS; 5241 break; 5242 5243 case DL_PROMISC_MULTI: 5244 mac_pvt->nprom_multi--; 5245 vlan->gldv_nprom--; 5246 gld->gld_flags &= ~GLD_PROM_MULT; 5247 break; 5248 5249 case DL_PROMISC_SAP: 5250 gld->gld_flags &= ~GLD_PROM_SAP; 5251 vlan->gldv_nvlan_sap--; 5252 vlan->gldv_nprom--; 5253 break; 5254 5255 default: 5256 break; 5257 } 5258 } 5259 } else if (macrc == GLD_RETRY) { 5260 /* 5261 * The putbq and gld_xwait must be within the lock to 5262 * prevent races with gld_sched. 5263 */ 5264 (void) putbq(q, mp); 5265 gld->gld_xwait = B_TRUE; 5266 } 5267 5268 GLDM_UNLOCK(macinfo); 5269 5270 /* 5271 * Finally, decide how to reply. 5272 * 5273 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC 5274 * layer but failed. In such cases, we can return a DL_* error 5275 * code and let the caller send an error-ack reply upstream, or 5276 * we can send a reply here and then return GLDE_OK so that the 5277 * caller doesn't also respond. 5278 * 5279 * If physical-promiscuous mode was (successfully) switched on or 5280 * off, send a notification (DL_NOTIFY_IND) to anyone interested. 5281 */ 5282 switch (macrc) { 5283 case GLD_NOTSUPPORTED: 5284 return (DL_NOTSUPPORTED); 5285 5286 case GLD_NORESOURCES: 5287 dlerrorack(q, mp, req, DL_SYSERR, ENOSR); 5288 return (GLDE_OK); 5289 5290 case GLD_RETRY: 5291 return (GLDE_RETRY); 5292 5293 default: 5294 dlerrorack(q, mp, req, DL_SYSERR, EIO); 5295 return (GLDE_OK); 5296 5297 case GLD_SUCCESS: 5298 dlokack(q, mp, req); 5299 break; 5300 } 5301 5302 switch (op) { 5303 case GLD_MAC_PROMISC_NOOP: 5304 break; 5305 5306 case GLD_MAC_PROMISC_PHYS: 5307 gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL); 5308 break; 5309 5310 default: 5311 gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL); 5312 break; 5313 } 5314 5315 return (GLDE_OK); 5316 } 5317 5318 /* 5319 * gld_physaddr() 5320 * get the current or factory physical address value 5321 */ 5322 static int 5323 gld_physaddr(queue_t *q, mblk_t *mp) 5324 { 5325 gld_t *gld = (gld_t *)q->q_ptr; 5326 gld_mac_info_t *macinfo; 5327 union DL_primitives *prim = (union DL_primitives *)mp->b_rptr; 5328 unsigned char addr[GLD_MAX_ADDRLEN]; 5329 5330 if (gld->gld_state == DL_UNATTACHED) 5331 return (DL_OUTSTATE); 5332 5333 macinfo = (gld_mac_info_t *)gld->gld_mac_info; 5334 ASSERT(macinfo != NULL); 5335 ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN); 5336 5337 switch (prim->physaddr_req.dl_addr_type) { 5338 case DL_FACT_PHYS_ADDR: 5339 mac_copy((caddr_t)macinfo->gldm_vendor_addr, 5340 (caddr_t)addr, macinfo->gldm_addrlen); 5341 break; 5342 case DL_CURR_PHYS_ADDR: 5343 /* make a copy so we don't hold the lock across qreply */ 5344 GLDM_LOCK(macinfo, RW_WRITER); 5345 mac_copy((caddr_t) 5346 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr, 5347 (caddr_t)addr, macinfo->gldm_addrlen); 5348 GLDM_UNLOCK(macinfo); 5349 break; 5350 default: 5351 return (DL_BADPRIM); 5352 } 5353 dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen); 5354 return (GLDE_OK); 5355 } 5356 5357 /* 5358 * gld_setaddr() 5359 * change the hardware's physical address to a user specified value 5360 */ 5361 static int 5362 gld_setaddr(queue_t *q, mblk_t *mp) 5363 { 5364 gld_t *gld = (gld_t *)q->q_ptr; 5365 gld_mac_info_t *macinfo; 5366 gld_mac_pvt_t *mac_pvt; 5367 union DL_primitives *prim = (union DL_primitives *)mp->b_rptr; 5368 unsigned char *addr; 5369 unsigned char cmaddr[GLD_MAX_ADDRLEN]; 5370 int rc; 5371 gld_vlan_t *vlan; 5372 5373 if (gld->gld_state == DL_UNATTACHED) 5374 return (DL_OUTSTATE); 5375 5376 vlan = (gld_vlan_t *)gld->gld_vlan; 5377 ASSERT(vlan != NULL); 5378 5379 if (vlan->gldv_id != VLAN_VID_NONE) 5380 return (DL_NOTSUPPORTED); 5381 5382 macinfo = (gld_mac_info_t *)gld->gld_mac_info; 5383 ASSERT(macinfo != NULL); 5384 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5385 5386 if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset, 5387 prim->set_physaddr_req.dl_addr_length) || 5388 prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen) 5389 return (DL_BADADDR); 5390 5391 GLDM_LOCK(macinfo, RW_WRITER); 5392 5393 /* now do the set at the hardware level */ 5394 addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset; 5395 ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen); 5396 cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo); 5397 5398 rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr); 5399 if (rc == GLD_SUCCESS) 5400 mac_copy(addr, mac_pvt->curr_macaddr, 5401 macinfo->gldm_addrlen); 5402 5403 GLDM_UNLOCK(macinfo); 5404 5405 switch (rc) { 5406 case GLD_SUCCESS: 5407 break; 5408 case GLD_NOTSUPPORTED: 5409 return (DL_NOTSUPPORTED); 5410 case GLD_BADARG: 5411 return (DL_BADADDR); 5412 case GLD_NORESOURCES: 5413 dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR); 5414 return (GLDE_OK); 5415 default: 5416 dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO); 5417 return (GLDE_OK); 5418 } 5419 5420 gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL); 5421 5422 dlokack(q, mp, DL_SET_PHYS_ADDR_REQ); 5423 return (GLDE_OK); 5424 } 5425 5426 int 5427 gld_get_statistics(queue_t *q, mblk_t *mp) 5428 { 5429 dl_get_statistics_ack_t *dlsp; 5430 gld_t *gld = (gld_t *)q->q_ptr; 5431 gld_mac_info_t *macinfo = gld->gld_mac_info; 5432 gld_mac_pvt_t *mac_pvt; 5433 5434 if (gld->gld_state == DL_UNATTACHED) 5435 return (DL_OUTSTATE); 5436 5437 ASSERT(macinfo != NULL); 5438 5439 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5440 (void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ); 5441 5442 mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE + 5443 sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK); 5444 5445 if (mp == NULL) 5446 return (GLDE_OK); /* mexchange already sent merror */ 5447 5448 dlsp = (dl_get_statistics_ack_t *)mp->b_rptr; 5449 dlsp->dl_primitive = DL_GET_STATISTICS_ACK; 5450 dlsp->dl_stat_length = sizeof (struct gldkstats); 5451 dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE; 5452 5453 GLDM_LOCK(macinfo, RW_WRITER); 5454 bcopy(mac_pvt->kstatp->ks_data, 5455 (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE), 5456 sizeof (struct gldkstats)); 5457 GLDM_UNLOCK(macinfo); 5458 5459 qreply(q, mp); 5460 return (GLDE_OK); 5461 } 5462 5463 /* =================================================== */ 5464 /* misc utilities, some requiring various mutexes held */ 5465 /* =================================================== */ 5466 5467 /* 5468 * Initialize and start the driver. 5469 */ 5470 static int 5471 gld_start_mac(gld_mac_info_t *macinfo) 5472 { 5473 int rc; 5474 unsigned char cmaddr[GLD_MAX_ADDRLEN]; 5475 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5476 5477 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5478 ASSERT(!mac_pvt->started); 5479 5480 rc = (*macinfo->gldm_reset)(macinfo); 5481 if (rc != GLD_SUCCESS) 5482 return (GLD_FAILURE); 5483 5484 /* set the addr after we reset the device */ 5485 ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen); 5486 cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt) 5487 ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo); 5488 5489 rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr); 5490 ASSERT(rc != GLD_BADARG); /* this address was good before */ 5491 if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED) 5492 return (GLD_FAILURE); 5493 5494 rc = (*macinfo->gldm_start)(macinfo); 5495 if (rc != GLD_SUCCESS) 5496 return (GLD_FAILURE); 5497 5498 mac_pvt->started = B_TRUE; 5499 return (GLD_SUCCESS); 5500 } 5501 5502 /* 5503 * Stop the driver. 5504 */ 5505 static void 5506 gld_stop_mac(gld_mac_info_t *macinfo) 5507 { 5508 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5509 5510 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5511 ASSERT(mac_pvt->started); 5512 5513 (void) (*macinfo->gldm_stop)(macinfo); 5514 5515 mac_pvt->started = B_FALSE; 5516 } 5517 5518 5519 /* 5520 * gld_set_ipq will set a pointer to the queue which is bound to the 5521 * IP sap if: 5522 * o the device type is ethernet or IPoIB. 5523 * o there is no stream in SAP promiscuous mode. 5524 * o there is exactly one stream bound to the IP sap. 5525 * o the stream is in "fastpath" mode. 5526 */ 5527 static void 5528 gld_set_ipq(gld_t *gld) 5529 { 5530 gld_vlan_t *vlan; 5531 gld_mac_info_t *macinfo = gld->gld_mac_info; 5532 gld_t *ip_gld = NULL; 5533 uint_t ipq_candidates = 0; 5534 gld_t *ipv6_gld = NULL; 5535 uint_t ipv6q_candidates = 0; 5536 5537 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5538 5539 /* The ipq code in gld_recv() is intimate with ethernet/IPoIB */ 5540 if (((macinfo->gldm_type != DL_ETHER) && 5541 (macinfo->gldm_type != DL_IB)) || 5542 (gld_global_options & GLD_OPT_NO_IPQ)) 5543 return; 5544 5545 vlan = (gld_vlan_t *)gld->gld_vlan; 5546 ASSERT(vlan != NULL); 5547 5548 /* clear down any previously defined ipqs */ 5549 vlan->gldv_ipq = NULL; 5550 vlan->gldv_ipv6q = NULL; 5551 5552 /* Try to find a single stream eligible to receive IP packets */ 5553 for (gld = vlan->gldv_str_next; 5554 gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) { 5555 if (gld->gld_state != DL_IDLE) 5556 continue; /* not eligible to receive */ 5557 if (gld->gld_flags & GLD_STR_CLOSING) 5558 continue; /* not eligible to receive */ 5559 5560 if (gld->gld_sap == ETHERTYPE_IP) { 5561 ip_gld = gld; 5562 ipq_candidates++; 5563 } 5564 5565 if (gld->gld_sap == ETHERTYPE_IPV6) { 5566 ipv6_gld = gld; 5567 ipv6q_candidates++; 5568 } 5569 } 5570 5571 if (ipq_candidates == 1) { 5572 ASSERT(ip_gld != NULL); 5573 5574 if (ip_gld->gld_flags & GLD_FAST) /* eligible for ipq */ 5575 vlan->gldv_ipq = ip_gld->gld_qptr; 5576 } 5577 5578 if (ipv6q_candidates == 1) { 5579 ASSERT(ipv6_gld != NULL); 5580 5581 if (ipv6_gld->gld_flags & GLD_FAST) /* eligible for ipq */ 5582 vlan->gldv_ipv6q = ipv6_gld->gld_qptr; 5583 } 5584 } 5585 5586 /* 5587 * gld_flushqueue (q) 5588 * used by DLPI primitives that require flushing the queues. 5589 * essentially, this is DL_UNBIND_REQ. 5590 */ 5591 static void 5592 gld_flushqueue(queue_t *q) 5593 { 5594 /* flush all data in both queues */ 5595 /* XXX Should these be FLUSHALL? */ 5596 flushq(q, FLUSHDATA); 5597 flushq(WR(q), FLUSHDATA); 5598 /* flush all the queues upstream */ 5599 (void) putctl1(q, M_FLUSH, FLUSHRW); 5600 } 5601 5602 /* 5603 * gld_devlookup (major) 5604 * search the device table for the device with specified 5605 * major number and return a pointer to it if it exists 5606 */ 5607 static glddev_t * 5608 gld_devlookup(int major) 5609 { 5610 struct glddevice *dev; 5611 5612 ASSERT(mutex_owned(&gld_device_list.gld_devlock)); 5613 5614 for (dev = gld_device_list.gld_next; 5615 dev != &gld_device_list; 5616 dev = dev->gld_next) { 5617 ASSERT(dev); 5618 if (dev->gld_major == major) 5619 return (dev); 5620 } 5621 return (NULL); 5622 } 5623 5624 /* 5625 * gld_findminor(device) 5626 * Returns a minor number currently unused by any stream in the current 5627 * device class (major) list. 5628 */ 5629 static int 5630 gld_findminor(glddev_t *device) 5631 { 5632 gld_t *next; 5633 gld_mac_info_t *nextmac; 5634 gld_vlan_t *nextvlan; 5635 int minor; 5636 int i; 5637 5638 ASSERT(mutex_owned(&device->gld_devlock)); 5639 5640 /* The fast way */ 5641 if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR && 5642 device->gld_nextminor <= GLD_MAX_CLONE_MINOR) 5643 return (device->gld_nextminor++); 5644 5645 /* The steady way */ 5646 for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR; 5647 minor++) { 5648 /* Search all unattached streams */ 5649 for (next = device->gld_str_next; 5650 next != (gld_t *)&device->gld_str_next; 5651 next = next->gld_next) { 5652 if (minor == next->gld_minor) 5653 goto nextminor; 5654 } 5655 /* Search all attached streams; we don't need maclock because */ 5656 /* mac stream list is protected by devlock as well as maclock */ 5657 for (nextmac = device->gld_mac_next; 5658 nextmac != (gld_mac_info_t *)&device->gld_mac_next; 5659 nextmac = nextmac->gldm_next) { 5660 gld_mac_pvt_t *pvt = 5661 (gld_mac_pvt_t *)nextmac->gldm_mac_pvt; 5662 5663 if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY)) 5664 continue; /* this one's not ready yet */ 5665 5666 for (i = 0; i < VLAN_HASHSZ; i++) { 5667 for (nextvlan = pvt->vlan_hash[i]; 5668 nextvlan != NULL; 5669 nextvlan = nextvlan->gldv_next) { 5670 for (next = nextvlan->gldv_str_next; 5671 next != 5672 (gld_t *)&nextvlan->gldv_str_next; 5673 next = next->gld_next) { 5674 if (minor == next->gld_minor) 5675 goto nextminor; 5676 } 5677 } 5678 } 5679 } 5680 5681 return (minor); 5682 nextminor: 5683 /* don't need to do anything */ 5684 ; 5685 } 5686 cmn_err(CE_WARN, "GLD ran out of minor numbers for %s", 5687 device->gld_name); 5688 return (0); 5689 } 5690 5691 /* 5692 * version of insque/remque for use by this driver 5693 */ 5694 struct qelem { 5695 struct qelem *q_forw; 5696 struct qelem *q_back; 5697 /* rest of structure */ 5698 }; 5699 5700 static void 5701 gldinsque(void *elem, void *pred) 5702 { 5703 struct qelem *pelem = elem; 5704 struct qelem *ppred = pred; 5705 struct qelem *pnext = ppred->q_forw; 5706 5707 pelem->q_forw = pnext; 5708 pelem->q_back = ppred; 5709 ppred->q_forw = pelem; 5710 pnext->q_back = pelem; 5711 } 5712 5713 static void 5714 gldremque(void *arg) 5715 { 5716 struct qelem *pelem = arg; 5717 struct qelem *elem = arg; 5718 5719 pelem->q_forw->q_back = pelem->q_back; 5720 pelem->q_back->q_forw = pelem->q_forw; 5721 elem->q_back = elem->q_forw = NULL; 5722 } 5723 5724 static gld_vlan_t * 5725 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid) 5726 { 5727 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5728 gld_vlan_t **pp; 5729 gld_vlan_t *p; 5730 5731 pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]); 5732 while ((p = *pp) != NULL) { 5733 ASSERT(p->gldv_id != vid); 5734 pp = &(p->gldv_next); 5735 } 5736 5737 if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL) 5738 return (NULL); 5739 5740 p->gldv_mac = macinfo; 5741 p->gldv_id = vid; 5742 5743 if (vid == VLAN_VID_NONE) { 5744 p->gldv_ptag = VLAN_VTAG_NONE; 5745 p->gldv_stats = mac_pvt->statistics; 5746 p->gldv_kstatp = NULL; 5747 } else { 5748 p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid); 5749 p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats), 5750 KM_SLEEP); 5751 5752 if (gld_init_vlan_stats(p) != GLD_SUCCESS) { 5753 kmem_free(p->gldv_stats, sizeof (struct gld_stats)); 5754 kmem_free(p, sizeof (gld_vlan_t)); 5755 return (NULL); 5756 } 5757 } 5758 5759 p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next; 5760 mac_pvt->nvlan++; 5761 *pp = p; 5762 5763 return (p); 5764 } 5765 5766 static void 5767 gld_rem_vlan(gld_vlan_t *vlan) 5768 { 5769 gld_mac_info_t *macinfo = vlan->gldv_mac; 5770 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5771 gld_vlan_t **pp; 5772 gld_vlan_t *p; 5773 5774 pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]); 5775 while ((p = *pp) != NULL) { 5776 if (p->gldv_id == vlan->gldv_id) 5777 break; 5778 pp = &(p->gldv_next); 5779 } 5780 ASSERT(p != NULL); 5781 5782 *pp = p->gldv_next; 5783 mac_pvt->nvlan--; 5784 if (p->gldv_id != VLAN_VID_NONE) { 5785 ASSERT(p->gldv_kstatp != NULL); 5786 kstat_delete(p->gldv_kstatp); 5787 kmem_free(p->gldv_stats, sizeof (struct gld_stats)); 5788 } 5789 kmem_free(p, sizeof (gld_vlan_t)); 5790 } 5791 5792 gld_vlan_t * 5793 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid) 5794 { 5795 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5796 gld_vlan_t *p; 5797 5798 p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ]; 5799 while (p != NULL) { 5800 if (p->gldv_id == vid) 5801 return (p); 5802 p = p->gldv_next; 5803 } 5804 return (NULL); 5805 } 5806 5807 gld_vlan_t * 5808 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid) 5809 { 5810 gld_vlan_t *vlan; 5811 5812 if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) 5813 vlan = gld_add_vlan(macinfo, vid); 5814 5815 return (vlan); 5816 } 5817 5818 /* 5819 * gld_bitrevcopy() 5820 * This is essentially bcopy, with the ability to bit reverse the 5821 * the source bytes. The MAC addresses bytes as transmitted by FDDI 5822 * interfaces are bit reversed. 5823 */ 5824 void 5825 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n) 5826 { 5827 while (n--) 5828 *target++ = bit_rev[(uchar_t)*src++]; 5829 } 5830 5831 /* 5832 * gld_bitreverse() 5833 * Convert the bit order by swaping all the bits, using a 5834 * lookup table. 5835 */ 5836 void 5837 gld_bitreverse(uchar_t *rptr, size_t n) 5838 { 5839 while (n--) { 5840 *rptr = bit_rev[*rptr]; 5841 rptr++; 5842 } 5843 } 5844 5845 char * 5846 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len) 5847 { 5848 int i; 5849 char *cp = etherbuf; 5850 static char digits[] = "0123456789abcdef"; 5851 5852 for (i = 0; i < len; i++) { 5853 *cp++ = digits[*ap >> 4]; 5854 *cp++ = digits[*ap++ & 0xf]; 5855 *cp++ = ':'; 5856 } 5857 *--cp = 0; 5858 return (etherbuf); 5859 } 5860 5861 #ifdef GLD_DEBUG 5862 static void 5863 gld_check_assertions() 5864 { 5865 glddev_t *dev; 5866 gld_mac_info_t *mac; 5867 gld_t *str; 5868 gld_vlan_t *vlan; 5869 int i; 5870 5871 mutex_enter(&gld_device_list.gld_devlock); 5872 5873 for (dev = gld_device_list.gld_next; 5874 dev != (glddev_t *)&gld_device_list.gld_next; 5875 dev = dev->gld_next) { 5876 mutex_enter(&dev->gld_devlock); 5877 ASSERT(dev->gld_broadcast != NULL); 5878 for (str = dev->gld_str_next; 5879 str != (gld_t *)&dev->gld_str_next; 5880 str = str->gld_next) { 5881 ASSERT(str->gld_device == dev); 5882 ASSERT(str->gld_mac_info == NULL); 5883 ASSERT(str->gld_qptr != NULL); 5884 ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR); 5885 ASSERT(str->gld_multicnt == 0); 5886 ASSERT(str->gld_mcast == NULL); 5887 ASSERT(!(str->gld_flags & 5888 (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP))); 5889 ASSERT(str->gld_sap == 0); 5890 ASSERT(str->gld_state == DL_UNATTACHED); 5891 } 5892 for (mac = dev->gld_mac_next; 5893 mac != (gld_mac_info_t *)&dev->gld_mac_next; 5894 mac = mac->gldm_next) { 5895 int nvlan = 0; 5896 gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt; 5897 5898 if (!(mac->gldm_GLD_flags & GLD_MAC_READY)) 5899 continue; /* this one's not ready yet */ 5900 5901 GLDM_LOCK(mac, RW_WRITER); 5902 ASSERT(mac->gldm_devinfo != NULL); 5903 ASSERT(mac->gldm_mac_pvt != NULL); 5904 ASSERT(pvt->interfacep != NULL); 5905 ASSERT(pvt->kstatp != NULL); 5906 ASSERT(pvt->statistics != NULL); 5907 ASSERT(pvt->major_dev == dev); 5908 5909 for (i = 0; i < VLAN_HASHSZ; i++) { 5910 for (vlan = pvt->vlan_hash[i]; 5911 vlan != NULL; vlan = vlan->gldv_next) { 5912 int nstr = 0; 5913 5914 ASSERT(vlan->gldv_mac == mac); 5915 5916 for (str = vlan->gldv_str_next; 5917 str != 5918 (gld_t *)&vlan->gldv_str_next; 5919 str = str->gld_next) { 5920 ASSERT(str->gld_device == dev); 5921 ASSERT(str->gld_mac_info == 5922 mac); 5923 ASSERT(str->gld_qptr != NULL); 5924 ASSERT(str->gld_minor >= 5925 GLD_MIN_CLONE_MINOR); 5926 ASSERT( 5927 str->gld_multicnt == 0 || 5928 str->gld_mcast); 5929 nstr++; 5930 } 5931 ASSERT(vlan->gldv_nstreams == nstr); 5932 nvlan++; 5933 } 5934 } 5935 ASSERT(pvt->nvlan == nvlan); 5936 GLDM_UNLOCK(mac); 5937 } 5938 mutex_exit(&dev->gld_devlock); 5939 } 5940 mutex_exit(&gld_device_list.gld_devlock); 5941 } 5942 #endif 5943