1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * gld - Generic LAN Driver Version 2, PSARC/1997/382 31 * 32 * This is a utility module that provides generic facilities for 33 * LAN drivers. The DLPI protocol and most STREAMS interfaces 34 * are handled here. 35 * 36 * It no longer provides compatibility with drivers 37 * implemented according to the GLD v0 documentation published 38 * in 1993. (See PSARC 2003/728) 39 */ 40 41 42 #include <sys/types.h> 43 #include <sys/errno.h> 44 #include <sys/stropts.h> 45 #include <sys/stream.h> 46 #include <sys/kmem.h> 47 #include <sys/stat.h> 48 #include <sys/modctl.h> 49 #include <sys/kstat.h> 50 #include <sys/debug.h> 51 #include <sys/note.h> 52 #include <sys/sysmacros.h> 53 54 #include <sys/byteorder.h> 55 #include <sys/strsun.h> 56 #include <sys/strsubr.h> 57 #include <sys/dlpi.h> 58 #include <sys/pattr.h> 59 #include <sys/ethernet.h> 60 #include <sys/ib/clients/ibd/ibd.h> 61 #include <sys/policy.h> 62 #include <sys/atomic.h> 63 64 #include <sys/multidata.h> 65 #include <sys/gld.h> 66 #include <sys/gldpriv.h> 67 68 #include <sys/ddi.h> 69 #include <sys/sunddi.h> 70 71 /* 72 * Macro to atomically increment counters of type uint32_t, uint64_t 73 * and ulong_t. 74 */ 75 #define BUMP(stat, delta) do { \ 76 _NOTE(CONSTANTCONDITION) \ 77 if (sizeof (stat) == sizeof (uint32_t)) { \ 78 atomic_add_32((uint32_t *)&stat, delta); \ 79 _NOTE(CONSTANTCONDITION) \ 80 } else if (sizeof (stat) == sizeof (uint64_t)) { \ 81 atomic_add_64((uint64_t *)&stat, delta); \ 82 } \ 83 _NOTE(CONSTANTCONDITION) \ 84 } while (0) 85 86 #define UPDATE_STATS(vlan, pktinfo, number) { \ 87 if ((pktinfo).isBroadcast) \ 88 (vlan)->gldv_stats->glds_brdcstxmt += (number); \ 89 else if ((pktinfo).isMulticast) \ 90 (vlan)->gldv_stats->glds_multixmt += (number); \ 91 (vlan)->gldv_stats->glds_bytexmt64 += (pktinfo).pktLen; \ 92 (vlan)->gldv_stats->glds_pktxmt64 += (number); \ 93 } 94 95 #ifdef GLD_DEBUG 96 int gld_debug = GLDERRS; 97 #endif 98 99 /* called from gld_register */ 100 static int gld_initstats(gld_mac_info_t *); 101 102 /* called from kstat mechanism, and from wsrv's get_statistics */ 103 static int gld_update_kstat(kstat_t *, int); 104 105 /* statistics for additional vlans */ 106 static int gld_init_vlan_stats(gld_vlan_t *); 107 static int gld_update_vlan_kstat(kstat_t *, int); 108 109 /* called from gld_getinfo */ 110 static dev_info_t *gld_finddevinfo(dev_t); 111 112 /* called from wput, wsrv, unidata, and v0_sched to send a packet */ 113 /* also from the source routing stuff for sending RDE protocol packets */ 114 static int gld_start(queue_t *, mblk_t *, int, uint32_t); 115 static int gld_start_mdt(queue_t *, mblk_t *, int); 116 117 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */ 118 static void gld_precv(gld_mac_info_t *, gld_vlan_t *, mblk_t *); 119 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *, 120 pdesc_t *, pktinfo_t *); 121 122 /* receive group: called from gld_recv and gld_precv* with maclock held */ 123 static void gld_sendup(gld_mac_info_t *, gld_vlan_t *, pktinfo_t *, mblk_t *, 124 int (*)()); 125 static int gld_accept(gld_t *, pktinfo_t *); 126 static int gld_mcmatch(gld_t *, pktinfo_t *); 127 static int gld_multicast(unsigned char *, gld_t *); 128 static int gld_paccept(gld_t *, pktinfo_t *); 129 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *, 130 void (*)(queue_t *, mblk_t *)); 131 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *); 132 133 /* wsrv group: called from wsrv, single threaded per queue */ 134 static int gld_ioctl(queue_t *, mblk_t *); 135 static void gld_fastpath(gld_t *, queue_t *, mblk_t *); 136 static int gld_cmds(queue_t *, mblk_t *); 137 static mblk_t *gld_bindack(queue_t *, mblk_t *); 138 static int gld_notify_req(queue_t *, mblk_t *); 139 static int gld_udqos(queue_t *, mblk_t *); 140 static int gld_bind(queue_t *, mblk_t *); 141 static int gld_unbind(queue_t *, mblk_t *); 142 static int gld_inforeq(queue_t *, mblk_t *); 143 static int gld_unitdata(queue_t *, mblk_t *); 144 static int gldattach(queue_t *, mblk_t *); 145 static int gldunattach(queue_t *, mblk_t *); 146 static int gld_enable_multi(queue_t *, mblk_t *); 147 static int gld_disable_multi(queue_t *, mblk_t *); 148 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *); 149 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t); 150 static int gld_physaddr(queue_t *, mblk_t *); 151 static int gld_setaddr(queue_t *, mblk_t *); 152 static int gld_get_statistics(queue_t *, mblk_t *); 153 static int gld_cap(queue_t *, mblk_t *); 154 static int gld_cap_ack(queue_t *, mblk_t *); 155 static int gld_cap_enable(queue_t *, mblk_t *); 156 157 /* misc utilities, some requiring various mutexes held */ 158 static int gld_start_mac(gld_mac_info_t *); 159 static void gld_stop_mac(gld_mac_info_t *); 160 static void gld_set_ipq(gld_t *); 161 static void gld_flushqueue(queue_t *); 162 static glddev_t *gld_devlookup(int); 163 static int gld_findminor(glddev_t *); 164 static void gldinsque(void *, void *); 165 static void gldremque(void *); 166 void gld_bitrevcopy(caddr_t, caddr_t, size_t); 167 void gld_bitreverse(uchar_t *, size_t); 168 char *gld_macaddr_sprintf(char *, unsigned char *, int); 169 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid); 170 static void gld_rem_vlan(gld_vlan_t *); 171 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t); 172 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t); 173 174 #ifdef GLD_DEBUG 175 static void gld_check_assertions(void); 176 extern void gld_sr_dump(gld_mac_info_t *); 177 #endif 178 179 /* 180 * Allocate and zero-out "number" structures each of type "structure" in 181 * kernel memory. 182 */ 183 #define GETSTRUCT(structure, number) \ 184 (kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP)) 185 186 #define abs(a) ((a) < 0 ? -(a) : a) 187 188 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP; 189 190 /* 191 * VLANs are only supported on ethernet devices that manipulate VLAN headers 192 * themselves. 193 */ 194 #define VLAN_CAPABLE(macinfo) \ 195 ((macinfo)->gldm_type == DL_ETHER && \ 196 (macinfo)->gldm_send_tagged != NULL) 197 198 /* 199 * The set of notifications generatable by GLD itself, the additional 200 * set that can be generated if the MAC driver provide the link-state 201 * tracking callback capability, and the set supported by the GLD 202 * notification code below. 203 * 204 * PLEASE keep these in sync with what the code actually does! 205 */ 206 static const uint32_t gld_internal_notes = DL_NOTE_PROMISC_ON_PHYS | 207 DL_NOTE_PROMISC_OFF_PHYS | 208 DL_NOTE_PHYS_ADDR; 209 static const uint32_t gld_linkstate_notes = DL_NOTE_LINK_DOWN | 210 DL_NOTE_LINK_UP | 211 DL_NOTE_SPEED; 212 static const uint32_t gld_supported_notes = DL_NOTE_PROMISC_ON_PHYS | 213 DL_NOTE_PROMISC_OFF_PHYS | 214 DL_NOTE_PHYS_ADDR | 215 DL_NOTE_LINK_DOWN | 216 DL_NOTE_LINK_UP | 217 DL_NOTE_SPEED; 218 219 /* Media must correspond to #defines in gld.h */ 220 static char *gld_media[] = { 221 "unknown", /* GLDM_UNKNOWN - driver cannot determine media */ 222 "aui", /* GLDM_AUI */ 223 "bnc", /* GLDM_BNC */ 224 "twpair", /* GLDM_TP */ 225 "fiber", /* GLDM_FIBER */ 226 "100baseT", /* GLDM_100BT */ 227 "100vgAnyLan", /* GLDM_VGANYLAN */ 228 "10baseT", /* GLDM_10BT */ 229 "ring4", /* GLDM_RING4 */ 230 "ring16", /* GLDM_RING16 */ 231 "PHY/MII", /* GLDM_PHYMII */ 232 "100baseTX", /* GLDM_100BTX */ 233 "100baseT4", /* GLDM_100BT4 */ 234 "unknown", /* skip */ 235 "ipib", /* GLDM_IB */ 236 }; 237 238 /* Must correspond to #defines in gld.h */ 239 static char *gld_duplex[] = { 240 "unknown", /* GLD_DUPLEX_UNKNOWN - not known or not applicable */ 241 "half", /* GLD_DUPLEX_HALF */ 242 "full" /* GLD_DUPLEX_FULL */ 243 }; 244 245 extern int gld_interpret_ether(gld_mac_info_t *, mblk_t *, pktinfo_t *, int); 246 extern int gld_interpret_fddi(gld_mac_info_t *, mblk_t *, pktinfo_t *, int); 247 extern int gld_interpret_tr(gld_mac_info_t *, mblk_t *, pktinfo_t *, int); 248 extern int gld_interpret_ib(gld_mac_info_t *, mblk_t *, pktinfo_t *, int); 249 extern void gld_interpret_mdt_ib(gld_mac_info_t *, mblk_t *, pdescinfo_t *, 250 pktinfo_t *, int); 251 252 extern mblk_t *gld_fastpath_ether(gld_t *, mblk_t *); 253 extern mblk_t *gld_fastpath_fddi(gld_t *, mblk_t *); 254 extern mblk_t *gld_fastpath_tr(gld_t *, mblk_t *); 255 extern mblk_t *gld_fastpath_ib(gld_t *, mblk_t *); 256 257 extern mblk_t *gld_unitdata_ether(gld_t *, mblk_t *); 258 extern mblk_t *gld_unitdata_fddi(gld_t *, mblk_t *); 259 extern mblk_t *gld_unitdata_tr(gld_t *, mblk_t *); 260 extern mblk_t *gld_unitdata_ib(gld_t *, mblk_t *); 261 262 extern void gld_init_ether(gld_mac_info_t *); 263 extern void gld_init_fddi(gld_mac_info_t *); 264 extern void gld_init_tr(gld_mac_info_t *); 265 extern void gld_init_ib(gld_mac_info_t *); 266 267 extern void gld_uninit_ether(gld_mac_info_t *); 268 extern void gld_uninit_fddi(gld_mac_info_t *); 269 extern void gld_uninit_tr(gld_mac_info_t *); 270 extern void gld_uninit_ib(gld_mac_info_t *); 271 272 /* 273 * Interface types currently supported by GLD. 274 * If you add new types, you must check all "XXX" strings in the GLD source 275 * for implementation issues that may affect the support of your new type. 276 * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will 277 * require generalizing this GLD source to handle the new cases. In other 278 * words there are assumptions built into the code in a few places that must 279 * be fixed. Be sure to turn on DEBUG/ASSERT code when testing a new type. 280 */ 281 static gld_interface_t interfaces[] = { 282 283 /* Ethernet Bus */ 284 { 285 DL_ETHER, 286 (uint_t)-1, 287 sizeof (struct ether_mac_frm), 288 gld_interpret_ether, 289 NULL, 290 gld_fastpath_ether, 291 gld_unitdata_ether, 292 gld_init_ether, 293 gld_uninit_ether, 294 "ether" 295 }, 296 297 /* Fiber Distributed data interface */ 298 { 299 DL_FDDI, 300 4352, 301 sizeof (struct fddi_mac_frm), 302 gld_interpret_fddi, 303 NULL, 304 gld_fastpath_fddi, 305 gld_unitdata_fddi, 306 gld_init_fddi, 307 gld_uninit_fddi, 308 "fddi" 309 }, 310 311 /* Token Ring interface */ 312 { 313 DL_TPR, 314 17914, 315 -1, /* variable header size */ 316 gld_interpret_tr, 317 NULL, 318 gld_fastpath_tr, 319 gld_unitdata_tr, 320 gld_init_tr, 321 gld_uninit_tr, 322 "tpr" 323 }, 324 325 /* Infiniband */ 326 { 327 DL_IB, 328 4092, 329 sizeof (struct ipoib_header), 330 gld_interpret_ib, 331 gld_interpret_mdt_ib, 332 gld_fastpath_ib, 333 gld_unitdata_ib, 334 gld_init_ib, 335 gld_uninit_ib, 336 "ipib" 337 }, 338 }; 339 340 /* 341 * bit reversal lookup table. 342 */ 343 static uchar_t bit_rev[] = { 344 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 345 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 346 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4, 347 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, 348 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 349 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 350 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca, 351 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 352 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 353 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 354 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1, 355 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, 356 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 357 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 358 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd, 359 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 360 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 361 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 362 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7, 363 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 364 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 365 0x3f, 0xbf, 0x7f, 0xff, 366 }; 367 368 /* 369 * User priorities, mapped from b_band. 370 */ 371 static uint32_t user_priority[] = { 372 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 373 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 374 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 375 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 376 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 377 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 378 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 379 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 380 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 381 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 382 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 383 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 384 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 385 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 386 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 387 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 388 }; 389 390 #define UPRI(gld, band) ((band != 0) ? user_priority[(band)] : (gld)->gld_upri) 391 392 static struct glddevice gld_device_list; /* Per-system root of GLD tables */ 393 394 /* 395 * Module linkage information for the kernel. 396 */ 397 398 static struct modldrv modlmisc = { 399 &mod_miscops, /* Type of module - a utility provider */ 400 "Generic LAN Driver (" GLD_VERSION_STRING ") %I%" 401 #ifdef GLD_DEBUG 402 " DEBUG" 403 #endif 404 }; 405 406 static struct modlinkage modlinkage = { 407 MODREV_1, &modlmisc, NULL 408 }; 409 410 int 411 _init(void) 412 { 413 int e; 414 415 /* initialize gld_device_list mutex */ 416 mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL); 417 418 /* initialize device driver (per-major) list */ 419 gld_device_list.gld_next = 420 gld_device_list.gld_prev = &gld_device_list; 421 422 if ((e = mod_install(&modlinkage)) != 0) 423 mutex_destroy(&gld_device_list.gld_devlock); 424 425 return (e); 426 } 427 428 int 429 _fini(void) 430 { 431 int e; 432 433 if ((e = mod_remove(&modlinkage)) != 0) 434 return (e); 435 436 ASSERT(gld_device_list.gld_next == 437 (glddev_t *)&gld_device_list.gld_next); 438 ASSERT(gld_device_list.gld_prev == 439 (glddev_t *)&gld_device_list.gld_next); 440 mutex_destroy(&gld_device_list.gld_devlock); 441 442 return (e); 443 } 444 445 int 446 _info(struct modinfo *modinfop) 447 { 448 return (mod_info(&modlinkage, modinfop)); 449 } 450 451 /* 452 * GLD service routines 453 */ 454 455 /* So this gld binary maybe can be forward compatible with future v2 drivers */ 456 #define GLD_MAC_RESERVED (16 * sizeof (caddr_t)) 457 458 /*ARGSUSED*/ 459 gld_mac_info_t * 460 gld_mac_alloc(dev_info_t *devinfo) 461 { 462 gld_mac_info_t *macinfo; 463 464 macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED, 465 KM_SLEEP); 466 467 /* 468 * The setting of gldm_driver_version will not be documented or allowed 469 * until a future release. 470 */ 471 macinfo->gldm_driver_version = GLD_VERSION_200; 472 473 /* 474 * GLD's version. This also is undocumented for now, but will be 475 * available if needed in the future. 476 */ 477 macinfo->gldm_GLD_version = GLD_VERSION; 478 479 return (macinfo); 480 } 481 482 /* 483 * gld_mac_free must be called after the driver has removed interrupts 484 * and completely stopped calling gld_recv() and gld_sched(). At that 485 * point the interrupt routine is guaranteed by the system to have been 486 * exited and the maclock is no longer needed. Of course, it is 487 * expected (required) that (assuming gld_register() succeeded), 488 * gld_unregister() was called before gld_mac_free(). 489 */ 490 void 491 gld_mac_free(gld_mac_info_t *macinfo) 492 { 493 ASSERT(macinfo); 494 ASSERT(macinfo->gldm_GLD_version == GLD_VERSION); 495 496 /* 497 * Assert that if we made it through gld_register, then we must 498 * have unregistered. 499 */ 500 ASSERT(!GLDM_LOCK_INITED(macinfo) || 501 (macinfo->gldm_GLD_flags & GLD_UNREGISTERED)); 502 503 GLDM_LOCK_DESTROY(macinfo); 504 505 kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED); 506 } 507 508 /* 509 * gld_register -- called once per device instance (PPA) 510 * 511 * During its attach routine, a real device driver will register with GLD 512 * so that later opens and dl_attach_reqs will work. The arguments are the 513 * devinfo pointer, the device name, and a macinfo structure describing the 514 * physical device instance. 515 */ 516 int 517 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo) 518 { 519 int mediatype; 520 int major = ddi_name_to_major(devname), i; 521 glddev_t *glddev; 522 gld_mac_pvt_t *mac_pvt; 523 char minordev[32]; 524 char pbuf[3*GLD_MAX_ADDRLEN]; 525 gld_interface_t *ifp; 526 527 ASSERT(devinfo != NULL); 528 ASSERT(macinfo != NULL); 529 530 if (macinfo->gldm_driver_version != GLD_VERSION) 531 return (DDI_FAILURE); 532 533 mediatype = macinfo->gldm_type; 534 535 /* 536 * Entry points should be ready for us. 537 * ioctl is optional. 538 * set_multicast and get_stats are optional in v0. 539 * intr is only required if you add an interrupt. 540 */ 541 ASSERT(macinfo->gldm_reset != NULL); 542 ASSERT(macinfo->gldm_start != NULL); 543 ASSERT(macinfo->gldm_stop != NULL); 544 ASSERT(macinfo->gldm_set_mac_addr != NULL); 545 ASSERT(macinfo->gldm_set_promiscuous != NULL); 546 ASSERT(macinfo->gldm_send != NULL); 547 548 ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt); 549 ASSERT(macinfo->gldm_GLD_version == GLD_VERSION); 550 ASSERT(macinfo->gldm_broadcast_addr != NULL); 551 ASSERT(macinfo->gldm_vendor_addr != NULL); 552 ASSERT(macinfo->gldm_ident != NULL); 553 554 if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) { 555 cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup" 556 "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN); 557 return (DDI_FAILURE); 558 } 559 560 /* 561 * GLD only functions properly with saplen == -2 562 */ 563 if (macinfo->gldm_saplen != -2) { 564 cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 " 565 "not supported", devname, macinfo->gldm_saplen); 566 return (DDI_FAILURE); 567 } 568 569 /* see gld_rsrv() */ 570 if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0)) 571 macinfo->gldm_options |= GLDOPT_FAST_RECV; 572 573 mutex_enter(&gld_device_list.gld_devlock); 574 glddev = gld_devlookup(major); 575 576 /* 577 * Allocate per-driver (major) data structure if necessary 578 */ 579 if (glddev == NULL) { 580 /* first occurrence of this device name (major number) */ 581 glddev = GETSTRUCT(glddev_t, 1); 582 if (glddev == NULL) { 583 mutex_exit(&gld_device_list.gld_devlock); 584 return (DDI_FAILURE); 585 } 586 (void) strncpy(glddev->gld_name, devname, 587 sizeof (glddev->gld_name) - 1); 588 glddev->gld_major = major; 589 glddev->gld_nextminor = GLD_MIN_CLONE_MINOR; 590 glddev->gld_mac_next = glddev->gld_mac_prev = 591 (gld_mac_info_t *)&glddev->gld_mac_next; 592 glddev->gld_str_next = glddev->gld_str_prev = 593 (gld_t *)&glddev->gld_str_next; 594 mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL); 595 596 /* allow increase of number of supported multicast addrs */ 597 glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE, 598 devinfo, 0, "multisize", GLD_MAX_MULTICAST); 599 600 /* 601 * Optionally restrict DLPI provider style 602 * 603 * -1 - don't create style 1 nodes 604 * -2 - don't create style 2 nodes 605 */ 606 glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, 607 "gld-provider-styles", 0); 608 609 /* Stuff that's needed before any PPA gets attached */ 610 glddev->gld_type = macinfo->gldm_type; 611 glddev->gld_minsdu = macinfo->gldm_minpkt; 612 glddev->gld_saplen = macinfo->gldm_saplen; 613 glddev->gld_addrlen = macinfo->gldm_addrlen; 614 glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen, 615 KM_SLEEP); 616 bcopy(macinfo->gldm_broadcast_addr, 617 glddev->gld_broadcast, macinfo->gldm_addrlen); 618 glddev->gld_maxsdu = macinfo->gldm_maxpkt; 619 gldinsque(glddev, gld_device_list.gld_prev); 620 } 621 glddev->gld_ndevice++; 622 /* Now glddev can't go away until we unregister this mac (or fail) */ 623 mutex_exit(&gld_device_list.gld_devlock); 624 625 /* 626 * Per-instance initialization 627 */ 628 629 /* 630 * Initialize per-mac structure that is private to GLD. 631 * Set up interface pointer. These are device class specific pointers 632 * used to handle FDDI/TR/ETHER/IPoIB specific packets. 633 */ 634 for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) { 635 if (mediatype != interfaces[i].mac_type) 636 continue; 637 638 macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t), 639 KM_SLEEP); 640 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp = 641 &interfaces[i]; 642 break; 643 } 644 645 if (ifp == NULL) { 646 cmn_err(CE_WARN, "GLD: this version does not support %s driver " 647 "of type %d", devname, mediatype); 648 goto failure; 649 } 650 651 /* 652 * Driver can only register MTU within legal media range. 653 */ 654 if (macinfo->gldm_maxpkt > ifp->mtu_size) { 655 cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s", 656 devname); 657 goto failure; 658 } 659 660 /* 661 * For now, only Infiniband drivers can use MDT. Do not add 662 * support for Ethernet, FDDI or TR. 663 */ 664 if (macinfo->gldm_mdt_pre != NULL) { 665 if (mediatype != DL_IB) { 666 cmn_err(CE_WARN, "GLD: MDT not supported for %s " 667 "driver of type %d", devname, mediatype); 668 goto failure; 669 } 670 671 /* 672 * Validate entry points. 673 */ 674 if ((macinfo->gldm_mdt_send == NULL) || 675 (macinfo->gldm_mdt_post == NULL)) { 676 cmn_err(CE_WARN, "GLD: invalid MDT entry points for " 677 "%s driver of type %d", devname, mediatype); 678 goto failure; 679 } 680 macinfo->gldm_options |= GLDOPT_MDT; 681 } 682 683 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 684 mac_pvt->major_dev = glddev; 685 686 mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP); 687 /* 688 * XXX Do bit-reversed devices store gldm_vendor in canonical 689 * format or in wire format? Also gldm_broadcast. For now 690 * we are assuming canonical, but I'm not sure that makes the 691 * most sense for ease of driver implementation. 692 */ 693 bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr, 694 macinfo->gldm_addrlen); 695 mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP); 696 697 /* 698 * The available set of notifications is those generatable by GLD 699 * itself, plus those corresponding to the capabilities of the MAC 700 * driver, intersected with those supported by gld_notify_ind() above. 701 */ 702 mac_pvt->notifications = gld_internal_notes; 703 if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE) 704 mac_pvt->notifications |= gld_linkstate_notes; 705 mac_pvt->notifications &= gld_supported_notes; 706 707 GLDM_LOCK_INIT(macinfo); 708 709 ddi_set_driver_private(devinfo, macinfo); 710 711 /* 712 * Now atomically get a PPA and put ourselves on the mac list. 713 */ 714 mutex_enter(&glddev->gld_devlock); 715 716 #ifdef DEBUG 717 if (macinfo->gldm_ppa != ddi_get_instance(devinfo)) 718 cmn_err(CE_WARN, "%s%d instance != ppa %d", 719 ddi_driver_name(devinfo), ddi_get_instance(devinfo), 720 macinfo->gldm_ppa); 721 #endif 722 723 /* 724 * Create style 2 node (gated by gld-provider-styles property). 725 * 726 * NOTE: When the CLONE_DEV flag is specified to 727 * ddi_create_minor_node() the minor number argument is 728 * immaterial. Opens of that node will go via the clone 729 * driver and gld_open() will always be passed a dev_t with 730 * minor of zero. 731 */ 732 if (glddev->gld_styles != -2) { 733 if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR, 734 0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) { 735 mutex_exit(&glddev->gld_devlock); 736 goto late_failure; 737 } 738 } 739 740 /* 741 * Create style 1 node (gated by gld-provider-styles property) 742 */ 743 if (glddev->gld_styles != -1) { 744 (void) sprintf(minordev, "%s%d", glddev->gld_name, 745 macinfo->gldm_ppa); 746 if (ddi_create_minor_node(devinfo, minordev, S_IFCHR, 747 GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET, 748 0) != DDI_SUCCESS) { 749 mutex_exit(&glddev->gld_devlock); 750 goto late_failure; 751 } 752 } 753 754 /* add ourselves to this major device's linked list of instances */ 755 gldinsque(macinfo, glddev->gld_mac_prev); 756 757 mutex_exit(&glddev->gld_devlock); 758 759 /* 760 * Unfortunately we need the ppa before we call gld_initstats(); 761 * otherwise we would like to do this just above the mutex_enter 762 * above. In which case we could have set MAC_READY inside the 763 * mutex and we wouldn't have needed to check it in open and 764 * DL_ATTACH. We wouldn't like to do the initstats/kstat_create 765 * inside the mutex because it might get taken in our kstat_update 766 * routine and cause a deadlock with kstat_chain_lock. 767 */ 768 769 /* gld_initstats() calls (*ifp->init)() */ 770 if (gld_initstats(macinfo) != GLD_SUCCESS) { 771 mutex_enter(&glddev->gld_devlock); 772 gldremque(macinfo); 773 mutex_exit(&glddev->gld_devlock); 774 goto late_failure; 775 } 776 777 /* 778 * Need to indicate we are NOW ready to process interrupts; 779 * any interrupt before this is set is for someone else. 780 * This flag is also now used to tell open, et. al. that this 781 * mac is now fully ready and available for use. 782 */ 783 GLDM_LOCK(macinfo, RW_WRITER); 784 macinfo->gldm_GLD_flags |= GLD_MAC_READY; 785 GLDM_UNLOCK(macinfo); 786 787 /* log local ethernet address -- XXX not DDI compliant */ 788 if (macinfo->gldm_addrlen == sizeof (struct ether_addr)) 789 (void) localetheraddr( 790 (struct ether_addr *)macinfo->gldm_vendor_addr, NULL); 791 792 /* now put announcement into the message buffer */ 793 cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n", 794 glddev->gld_name, 795 macinfo->gldm_ppa, macinfo->gldm_ident, 796 mac_pvt->interfacep->mac_string, 797 gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr, 798 macinfo->gldm_addrlen)); 799 800 ddi_report_dev(devinfo); 801 return (DDI_SUCCESS); 802 803 late_failure: 804 ddi_remove_minor_node(devinfo, NULL); 805 GLDM_LOCK_DESTROY(macinfo); 806 if (mac_pvt->curr_macaddr != NULL) 807 kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen); 808 if (mac_pvt->statistics != NULL) 809 kmem_free(mac_pvt->statistics, sizeof (struct gld_stats)); 810 kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t)); 811 macinfo->gldm_mac_pvt = NULL; 812 813 failure: 814 mutex_enter(&gld_device_list.gld_devlock); 815 glddev->gld_ndevice--; 816 /* 817 * Note that just because this goes to zero here does not necessarily 818 * mean that we were the one who added the glddev above. It's 819 * possible that the first mac unattached while were were in here 820 * failing to attach the second mac. But we're now the last. 821 */ 822 if (glddev->gld_ndevice == 0) { 823 /* There should be no macinfos left */ 824 ASSERT(glddev->gld_mac_next == 825 (gld_mac_info_t *)&glddev->gld_mac_next); 826 ASSERT(glddev->gld_mac_prev == 827 (gld_mac_info_t *)&glddev->gld_mac_next); 828 829 /* 830 * There should be no DL_UNATTACHED streams: the system 831 * should not have detached the "first" devinfo which has 832 * all the open style 2 streams. 833 * 834 * XXX This is not clear. See gld_getinfo and Bug 1165519 835 */ 836 ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next); 837 ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next); 838 839 gldremque(glddev); 840 mutex_destroy(&glddev->gld_devlock); 841 if (glddev->gld_broadcast != NULL) 842 kmem_free(glddev->gld_broadcast, glddev->gld_addrlen); 843 kmem_free(glddev, sizeof (glddev_t)); 844 } 845 mutex_exit(&gld_device_list.gld_devlock); 846 847 return (DDI_FAILURE); 848 } 849 850 /* 851 * gld_unregister (macinfo) 852 * remove the macinfo structure from local structures 853 * this is cleanup for a driver to be unloaded 854 */ 855 int 856 gld_unregister(gld_mac_info_t *macinfo) 857 { 858 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 859 glddev_t *glddev = mac_pvt->major_dev; 860 gld_interface_t *ifp; 861 int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize; 862 863 mutex_enter(&glddev->gld_devlock); 864 GLDM_LOCK(macinfo, RW_WRITER); 865 866 if (mac_pvt->nvlan > 0) { 867 GLDM_UNLOCK(macinfo); 868 mutex_exit(&glddev->gld_devlock); 869 return (DDI_FAILURE); 870 } 871 872 #ifdef GLD_DEBUG 873 { 874 int i; 875 876 for (i = 0; i < VLAN_HASHSZ; i++) { 877 if ((mac_pvt->vlan_hash[i] != NULL)) 878 cmn_err(CE_PANIC, 879 "%s, line %d: " 880 "mac_pvt->vlan_hash[%d] != NULL", 881 __FILE__, __LINE__, i); 882 } 883 } 884 #endif 885 886 /* Delete this mac */ 887 gldremque(macinfo); 888 889 /* Disallow further entries to gld_recv() and gld_sched() */ 890 macinfo->gldm_GLD_flags |= GLD_UNREGISTERED; 891 892 GLDM_UNLOCK(macinfo); 893 mutex_exit(&glddev->gld_devlock); 894 895 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 896 (*ifp->uninit)(macinfo); 897 898 ASSERT(mac_pvt->kstatp); 899 kstat_delete(mac_pvt->kstatp); 900 901 ASSERT(GLDM_LOCK_INITED(macinfo)); 902 kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen); 903 kmem_free(mac_pvt->statistics, sizeof (struct gld_stats)); 904 905 if (mac_pvt->mcast_table != NULL) 906 kmem_free(mac_pvt->mcast_table, multisize); 907 kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t)); 908 macinfo->gldm_mac_pvt = (caddr_t)NULL; 909 910 /* We now have one fewer instance for this major device */ 911 mutex_enter(&gld_device_list.gld_devlock); 912 glddev->gld_ndevice--; 913 if (glddev->gld_ndevice == 0) { 914 /* There should be no macinfos left */ 915 ASSERT(glddev->gld_mac_next == 916 (gld_mac_info_t *)&glddev->gld_mac_next); 917 ASSERT(glddev->gld_mac_prev == 918 (gld_mac_info_t *)&glddev->gld_mac_next); 919 920 /* 921 * There should be no DL_UNATTACHED streams: the system 922 * should not have detached the "first" devinfo which has 923 * all the open style 2 streams. 924 * 925 * XXX This is not clear. See gld_getinfo and Bug 1165519 926 */ 927 ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next); 928 ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next); 929 930 ddi_remove_minor_node(macinfo->gldm_devinfo, NULL); 931 gldremque(glddev); 932 mutex_destroy(&glddev->gld_devlock); 933 if (glddev->gld_broadcast != NULL) 934 kmem_free(glddev->gld_broadcast, glddev->gld_addrlen); 935 kmem_free(glddev, sizeof (glddev_t)); 936 } 937 mutex_exit(&gld_device_list.gld_devlock); 938 939 return (DDI_SUCCESS); 940 } 941 942 /* 943 * gld_initstats 944 * called from gld_register 945 */ 946 static int 947 gld_initstats(gld_mac_info_t *macinfo) 948 { 949 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 950 struct gldkstats *sp; 951 glddev_t *glddev; 952 kstat_t *ksp; 953 gld_interface_t *ifp; 954 955 glddev = mac_pvt->major_dev; 956 957 if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa, 958 NULL, "net", KSTAT_TYPE_NAMED, 959 sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) { 960 cmn_err(CE_WARN, 961 "GLD: failed to create kstat structure for %s%d", 962 glddev->gld_name, macinfo->gldm_ppa); 963 return (GLD_FAILURE); 964 } 965 mac_pvt->kstatp = ksp; 966 967 ksp->ks_update = gld_update_kstat; 968 ksp->ks_private = (void *)macinfo; 969 970 sp = ksp->ks_data; 971 kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32); 972 kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32); 973 kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG); 974 kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG); 975 kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32); 976 kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32); 977 kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG); 978 kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG); 979 kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG); 980 kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG); 981 kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG); 982 kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG); 983 kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG); 984 kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG); 985 kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG); 986 kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64); 987 kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64); 988 kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64); 989 kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64); 990 kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG); 991 kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64); 992 kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR); 993 kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR); 994 995 kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG); 996 kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG); 997 kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG); 998 999 kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp", 1000 KSTAT_DATA_UINT32); 1001 kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp", 1002 KSTAT_DATA_UINT32); 1003 1004 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 1005 1006 (*ifp->init)(macinfo); 1007 1008 kstat_install(ksp); 1009 1010 return (GLD_SUCCESS); 1011 } 1012 1013 /* called from kstat mechanism, and from wsrv's get_statistics_req */ 1014 static int 1015 gld_update_kstat(kstat_t *ksp, int rw) 1016 { 1017 gld_mac_info_t *macinfo; 1018 gld_mac_pvt_t *mac_pvt; 1019 struct gldkstats *gsp; 1020 struct gld_stats *stats; 1021 1022 if (rw == KSTAT_WRITE) 1023 return (EACCES); 1024 1025 macinfo = (gld_mac_info_t *)ksp->ks_private; 1026 ASSERT(macinfo != NULL); 1027 1028 GLDM_LOCK(macinfo, RW_WRITER); 1029 1030 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) { 1031 GLDM_UNLOCK(macinfo); 1032 return (EIO); /* this one's not ready yet */ 1033 } 1034 1035 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 1036 GLDM_UNLOCK(macinfo); 1037 return (EIO); /* this one's not ready any more */ 1038 } 1039 1040 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 1041 gsp = mac_pvt->kstatp->ks_data; 1042 ASSERT(gsp); 1043 stats = mac_pvt->statistics; 1044 1045 if (macinfo->gldm_get_stats) 1046 (void) (*macinfo->gldm_get_stats)(macinfo, stats); 1047 1048 gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff; 1049 gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff; 1050 gsp->glds_multixmt.value.ul = stats->glds_multixmt; 1051 gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt; 1052 gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf; /* 0 for now */ 1053 gsp->glds_xmtretry.value.ul = stats->glds_xmtretry; 1054 1055 gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64; 1056 gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64; 1057 gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp; 1058 1059 gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff; 1060 gsp->glds_errxmt.value.ul = stats->glds_errxmt; 1061 gsp->glds_errrcv.value.ul = stats->glds_errrcv; 1062 gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff; 1063 gsp->glds_multircv.value.ul = stats->glds_multircv; 1064 gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv; 1065 gsp->glds_blocked.value.ul = stats->glds_blocked; 1066 gsp->glds_overflow.value.ul = stats->glds_overflow; 1067 gsp->glds_underflow.value.ul = stats->glds_underflow; 1068 gsp->glds_missed.value.ul = stats->glds_missed; 1069 gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf + 1070 stats->glds_gldnorcvbuf; 1071 gsp->glds_intr.value.ul = stats->glds_intr; 1072 1073 gsp->glds_speed.value.ui64 = stats->glds_speed; 1074 gsp->glds_unknowns.value.ul = stats->glds_unknowns; 1075 gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64; 1076 gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64; 1077 gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp; 1078 1079 if (mac_pvt->nprom) 1080 (void) strcpy(gsp->glds_prom.value.c, "phys"); 1081 else if (mac_pvt->nprom_multi) 1082 (void) strcpy(gsp->glds_prom.value.c, "multi"); 1083 else 1084 (void) strcpy(gsp->glds_prom.value.c, "off"); 1085 1086 (void) strcpy(gsp->glds_media.value.c, gld_media[ 1087 stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0]) 1088 ? stats->glds_media : 0]); 1089 1090 switch (macinfo->gldm_type) { 1091 case DL_ETHER: 1092 gsp->glds_frame.value.ul = stats->glds_frame; 1093 gsp->glds_crc.value.ul = stats->glds_crc; 1094 gsp->glds_collisions.value.ul = stats->glds_collisions; 1095 gsp->glds_excoll.value.ul = stats->glds_excoll; 1096 gsp->glds_defer.value.ul = stats->glds_defer; 1097 gsp->glds_short.value.ul = stats->glds_short; 1098 gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll; 1099 gsp->glds_nocarrier.value.ul = stats->glds_nocarrier; 1100 gsp->glds_dot3_first_coll.value.ui32 = 1101 stats->glds_dot3_first_coll; 1102 gsp->glds_dot3_multi_coll.value.ui32 = 1103 stats->glds_dot3_multi_coll; 1104 gsp->glds_dot3_sqe_error.value.ui32 = 1105 stats->glds_dot3_sqe_error; 1106 gsp->glds_dot3_mac_xmt_error.value.ui32 = 1107 stats->glds_dot3_mac_xmt_error; 1108 gsp->glds_dot3_mac_rcv_error.value.ui32 = 1109 stats->glds_dot3_mac_rcv_error; 1110 gsp->glds_dot3_frame_too_long.value.ui32 = 1111 stats->glds_dot3_frame_too_long; 1112 (void) strcpy(gsp->glds_duplex.value.c, gld_duplex[ 1113 stats->glds_duplex < 1114 sizeof (gld_duplex) / sizeof (gld_duplex[0]) ? 1115 stats->glds_duplex : 0]); 1116 break; 1117 case DL_TPR: 1118 gsp->glds_dot5_line_error.value.ui32 = 1119 stats->glds_dot5_line_error; 1120 gsp->glds_dot5_burst_error.value.ui32 = 1121 stats->glds_dot5_burst_error; 1122 gsp->glds_dot5_signal_loss.value.ui32 = 1123 stats->glds_dot5_signal_loss; 1124 gsp->glds_dot5_ace_error.value.ui32 = 1125 stats->glds_dot5_ace_error; 1126 gsp->glds_dot5_internal_error.value.ui32 = 1127 stats->glds_dot5_internal_error; 1128 gsp->glds_dot5_lost_frame_error.value.ui32 = 1129 stats->glds_dot5_lost_frame_error; 1130 gsp->glds_dot5_frame_copied_error.value.ui32 = 1131 stats->glds_dot5_frame_copied_error; 1132 gsp->glds_dot5_token_error.value.ui32 = 1133 stats->glds_dot5_token_error; 1134 gsp->glds_dot5_freq_error.value.ui32 = 1135 stats->glds_dot5_freq_error; 1136 break; 1137 case DL_FDDI: 1138 gsp->glds_fddi_mac_error.value.ui32 = 1139 stats->glds_fddi_mac_error; 1140 gsp->glds_fddi_mac_lost.value.ui32 = 1141 stats->glds_fddi_mac_lost; 1142 gsp->glds_fddi_mac_token.value.ui32 = 1143 stats->glds_fddi_mac_token; 1144 gsp->glds_fddi_mac_tvx_expired.value.ui32 = 1145 stats->glds_fddi_mac_tvx_expired; 1146 gsp->glds_fddi_mac_late.value.ui32 = 1147 stats->glds_fddi_mac_late; 1148 gsp->glds_fddi_mac_ring_op.value.ui32 = 1149 stats->glds_fddi_mac_ring_op; 1150 break; 1151 case DL_IB: 1152 break; 1153 default: 1154 break; 1155 } 1156 1157 GLDM_UNLOCK(macinfo); 1158 1159 #ifdef GLD_DEBUG 1160 gld_check_assertions(); 1161 if (gld_debug & GLDRDE) 1162 gld_sr_dump(macinfo); 1163 #endif 1164 1165 return (0); 1166 } 1167 1168 static int 1169 gld_init_vlan_stats(gld_vlan_t *vlan) 1170 { 1171 gld_mac_info_t *mac = vlan->gldv_mac; 1172 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt; 1173 struct gldkstats *sp; 1174 glddev_t *glddev; 1175 kstat_t *ksp; 1176 char *name; 1177 int instance; 1178 1179 glddev = mac_pvt->major_dev; 1180 name = glddev->gld_name; 1181 instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa; 1182 1183 if ((ksp = kstat_create(name, instance, 1184 NULL, "net", KSTAT_TYPE_NAMED, 1185 sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) { 1186 cmn_err(CE_WARN, 1187 "GLD: failed to create kstat structure for %s%d", 1188 name, instance); 1189 return (GLD_FAILURE); 1190 } 1191 1192 vlan->gldv_kstatp = ksp; 1193 1194 ksp->ks_update = gld_update_vlan_kstat; 1195 ksp->ks_private = (void *)vlan; 1196 1197 sp = ksp->ks_data; 1198 kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32); 1199 kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32); 1200 kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG); 1201 kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG); 1202 kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32); 1203 kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32); 1204 kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG); 1205 kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG); 1206 kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG); 1207 kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG); 1208 kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG); 1209 kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG); 1210 kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG); 1211 kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG); 1212 kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG); 1213 kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64); 1214 kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64); 1215 kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64); 1216 kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64); 1217 kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG); 1218 kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64); 1219 kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR); 1220 kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR); 1221 1222 kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG); 1223 kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG); 1224 kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG); 1225 1226 kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp", 1227 KSTAT_DATA_UINT32); 1228 kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp", 1229 KSTAT_DATA_UINT32); 1230 1231 kstat_install(ksp); 1232 return (GLD_SUCCESS); 1233 } 1234 1235 static int 1236 gld_update_vlan_kstat(kstat_t *ksp, int rw) 1237 { 1238 gld_vlan_t *vlan; 1239 gld_mac_info_t *macinfo; 1240 struct gldkstats *gsp; 1241 struct gld_stats *stats; 1242 1243 if (rw == KSTAT_WRITE) 1244 return (EACCES); 1245 1246 vlan = (gld_vlan_t *)ksp->ks_private; 1247 ASSERT(vlan != NULL); 1248 1249 macinfo = vlan->gldv_mac; 1250 GLDM_LOCK(macinfo, RW_WRITER); 1251 1252 gsp = vlan->gldv_kstatp->ks_data; 1253 ASSERT(gsp); 1254 stats = vlan->gldv_stats; 1255 1256 gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff; 1257 gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff; 1258 gsp->glds_errxmt.value.ul = stats->glds_errxmt; 1259 gsp->glds_multixmt.value.ul = stats->glds_multixmt; 1260 gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt; 1261 gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf; 1262 gsp->glds_xmtretry.value.ul = stats->glds_xmtretry; 1263 gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64; 1264 gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64; 1265 1266 gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff; 1267 gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff; 1268 gsp->glds_errrcv.value.ul = stats->glds_errrcv; 1269 gsp->glds_multircv.value.ul = stats->glds_multircv; 1270 gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv; 1271 gsp->glds_blocked.value.ul = stats->glds_blocked; 1272 gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64; 1273 gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64; 1274 1275 GLDM_UNLOCK(macinfo); 1276 return (0); 1277 } 1278 1279 /* 1280 * The device dependent driver specifies gld_getinfo as its getinfo routine. 1281 */ 1282 /*ARGSUSED*/ 1283 int 1284 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp) 1285 { 1286 dev_info_t *devinfo; 1287 minor_t minor = getminor((dev_t)arg); 1288 int rc = DDI_FAILURE; 1289 1290 switch (cmd) { 1291 case DDI_INFO_DEVT2DEVINFO: 1292 if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) { 1293 *(dev_info_t **)resultp = devinfo; 1294 rc = DDI_SUCCESS; 1295 } 1296 break; 1297 case DDI_INFO_DEVT2INSTANCE: 1298 /* Need static mapping for deferred attach */ 1299 if (minor == GLD_USE_STYLE2) { 1300 /* 1301 * Style 2: this minor number does not correspond to 1302 * any particular instance number. 1303 */ 1304 rc = DDI_FAILURE; 1305 } else if (minor <= GLD_MAX_STYLE1_MINOR) { 1306 /* Style 1: calculate the PPA from the minor */ 1307 *(int *)resultp = GLD_STYLE1_MINOR_TO_PPA(minor); 1308 rc = DDI_SUCCESS; 1309 } else { 1310 /* Clone: look for it. Not a static mapping */ 1311 if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) { 1312 *(int *)resultp = ddi_get_instance(devinfo); 1313 rc = DDI_SUCCESS; 1314 } 1315 } 1316 break; 1317 } 1318 1319 return (rc); 1320 } 1321 1322 /* called from gld_getinfo */ 1323 dev_info_t * 1324 gld_finddevinfo(dev_t dev) 1325 { 1326 minor_t minor = getminor(dev); 1327 glddev_t *device; 1328 gld_mac_info_t *mac; 1329 gld_vlan_t *vlan; 1330 gld_t *str; 1331 dev_info_t *devinfo = NULL; 1332 int i; 1333 1334 if (minor == GLD_USE_STYLE2) { 1335 /* 1336 * Style 2: this minor number does not correspond to 1337 * any particular instance number. 1338 * 1339 * XXX We don't know what to say. See Bug 1165519. 1340 */ 1341 return (NULL); 1342 } 1343 1344 mutex_enter(&gld_device_list.gld_devlock); /* hold the device */ 1345 1346 device = gld_devlookup(getmajor(dev)); 1347 if (device == NULL) { 1348 /* There are no attached instances of this device */ 1349 mutex_exit(&gld_device_list.gld_devlock); 1350 return (NULL); 1351 } 1352 1353 /* 1354 * Search all attached macs and streams. 1355 * 1356 * XXX We don't bother checking the DL_UNATTACHED streams since 1357 * we don't know what devinfo we should report back even if we 1358 * found the minor. Maybe we should associate streams that are 1359 * not currently attached to a PPA with the "first" devinfo node 1360 * of the major device to attach -- the one that created the 1361 * minor node for the generic device. 1362 */ 1363 mutex_enter(&device->gld_devlock); 1364 1365 for (mac = device->gld_mac_next; 1366 mac != (gld_mac_info_t *)&device->gld_mac_next; 1367 mac = mac->gldm_next) { 1368 gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt; 1369 1370 if (!(mac->gldm_GLD_flags & GLD_MAC_READY)) 1371 continue; /* this one's not ready yet */ 1372 if (minor <= GLD_MAX_STYLE1_MINOR) { 1373 /* Style 1 -- look for the corresponding PPA */ 1374 if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) { 1375 devinfo = mac->gldm_devinfo; 1376 goto out; /* found it! */ 1377 } else 1378 continue; /* not this PPA */ 1379 } 1380 1381 /* We are looking for a clone */ 1382 for (i = 0; i < VLAN_HASHSZ; i++) { 1383 for (vlan = pvt->vlan_hash[i]; 1384 vlan != NULL; vlan = vlan->gldv_next) { 1385 for (str = vlan->gldv_str_next; 1386 str != (gld_t *)&vlan->gldv_str_next; 1387 str = str->gld_next) { 1388 ASSERT(str->gld_mac_info == mac); 1389 if (minor == str->gld_minor) { 1390 devinfo = mac->gldm_devinfo; 1391 goto out; 1392 } 1393 } 1394 } 1395 } 1396 } 1397 out: 1398 mutex_exit(&device->gld_devlock); 1399 mutex_exit(&gld_device_list.gld_devlock); 1400 return (devinfo); 1401 } 1402 1403 /* 1404 * STREAMS open routine. The device dependent driver specifies this as its 1405 * open entry point. 1406 */ 1407 /*ARGSUSED2*/ 1408 int 1409 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred) 1410 { 1411 gld_mac_pvt_t *mac_pvt; 1412 gld_t *gld; 1413 glddev_t *glddev; 1414 gld_mac_info_t *macinfo; 1415 minor_t minor = getminor(*dev); 1416 gld_vlan_t *vlan; 1417 t_uscalar_t ppa; 1418 1419 ASSERT(q != NULL); 1420 1421 if (minor > GLD_MAX_STYLE1_MINOR) 1422 return (ENXIO); 1423 1424 ASSERT(q->q_ptr == NULL); /* Clone device gives us a fresh Q */ 1425 1426 /* Find our per-major glddev_t structure */ 1427 mutex_enter(&gld_device_list.gld_devlock); 1428 glddev = gld_devlookup(getmajor(*dev)); 1429 1430 /* 1431 * This glddev will hang around since detach (and therefore 1432 * gld_unregister) can't run while we're here in the open routine. 1433 */ 1434 mutex_exit(&gld_device_list.gld_devlock); 1435 1436 if (glddev == NULL) 1437 return (ENXIO); 1438 1439 #ifdef GLD_DEBUG 1440 if (gld_debug & GLDPROT) { 1441 if (minor == GLD_USE_STYLE2) 1442 cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q); 1443 else 1444 cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)", 1445 (void *)q, minor); 1446 } 1447 #endif 1448 1449 /* 1450 * get a per-stream structure and link things together so we 1451 * can easily find them later. 1452 */ 1453 gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP); 1454 1455 /* 1456 * fill in the structure and state info 1457 */ 1458 gld->gld_qptr = q; 1459 gld->gld_device = glddev; 1460 gld->gld_state = DL_UNATTACHED; 1461 1462 /* 1463 * we must atomically find a free minor number and add the stream 1464 * to a list, because gld_findminor has to traverse the lists to 1465 * determine which minor numbers are free. 1466 */ 1467 mutex_enter(&glddev->gld_devlock); 1468 1469 /* find a free minor device number for the clone */ 1470 gld->gld_minor = gld_findminor(glddev); 1471 if (gld->gld_minor == 0) { 1472 mutex_exit(&glddev->gld_devlock); 1473 kmem_free(gld, sizeof (gld_t)); 1474 return (ENOSR); 1475 } 1476 1477 #ifdef GLD_VERBOSE_DEBUG 1478 if (gld_debug & GLDPROT) 1479 cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d", 1480 (void *)gld, gld->gld_minor); 1481 #endif 1482 1483 if (minor == GLD_USE_STYLE2) { 1484 gld->gld_style = DL_STYLE2; 1485 *dev = makedevice(getmajor(*dev), gld->gld_minor); 1486 WR(q)->q_ptr = q->q_ptr = (caddr_t)gld; 1487 gldinsque(gld, glddev->gld_str_prev); 1488 #ifdef GLD_VERBOSE_DEBUG 1489 if (gld_debug & GLDPROT) 1490 cmn_err(CE_NOTE, "GLDstruct added to device list"); 1491 #endif 1492 (void) qassociate(q, -1); 1493 goto done; 1494 } 1495 1496 gld->gld_style = DL_STYLE1; 1497 1498 /* the PPA is actually 1 less than the minordev */ 1499 ppa = GLD_STYLE1_MINOR_TO_PPA(minor); 1500 1501 for (macinfo = glddev->gld_mac_next; 1502 macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next); 1503 macinfo = macinfo->gldm_next) { 1504 ASSERT(macinfo != NULL); 1505 if (macinfo->gldm_ppa != ppa) 1506 continue; 1507 1508 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) 1509 continue; /* this one's not ready yet */ 1510 1511 /* 1512 * we found the correct PPA 1513 */ 1514 GLDM_LOCK(macinfo, RW_WRITER); 1515 1516 gld->gld_mac_info = macinfo; 1517 1518 if (macinfo->gldm_send_tagged != NULL) 1519 gld->gld_send = macinfo->gldm_send_tagged; 1520 else 1521 gld->gld_send = macinfo->gldm_send; 1522 1523 /* now ready for action */ 1524 gld->gld_state = DL_UNBOUND; 1525 1526 if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) { 1527 GLDM_UNLOCK(macinfo); 1528 mutex_exit(&glddev->gld_devlock); 1529 kmem_free(gld, sizeof (gld_t)); 1530 return (EIO); 1531 } 1532 1533 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 1534 if (!mac_pvt->started) { 1535 if (gld_start_mac(macinfo) != GLD_SUCCESS) { 1536 GLDM_UNLOCK(macinfo); 1537 mutex_exit(&glddev->gld_devlock); 1538 kmem_free(gld, sizeof (gld_t)); 1539 return (EIO); 1540 } 1541 } 1542 1543 gld->gld_vlan = vlan; 1544 vlan->gldv_nstreams++; 1545 gldinsque(gld, vlan->gldv_str_prev); 1546 *dev = makedevice(getmajor(*dev), gld->gld_minor); 1547 WR(q)->q_ptr = q->q_ptr = (caddr_t)gld; 1548 1549 GLDM_UNLOCK(macinfo); 1550 #ifdef GLD_VERBOSE_DEBUG 1551 if (gld_debug & GLDPROT) 1552 cmn_err(CE_NOTE, 1553 "GLDstruct added to instance list"); 1554 #endif 1555 break; 1556 } 1557 1558 if (gld->gld_state == DL_UNATTACHED) { 1559 mutex_exit(&glddev->gld_devlock); 1560 kmem_free(gld, sizeof (gld_t)); 1561 return (ENXIO); 1562 } 1563 1564 done: 1565 mutex_exit(&glddev->gld_devlock); 1566 noenable(WR(q)); /* We'll do the qenables manually */ 1567 qprocson(q); /* start the queues running */ 1568 qenable(WR(q)); 1569 return (0); 1570 } 1571 1572 /* 1573 * normal stream close call checks current status and cleans up 1574 * data structures that were dynamically allocated 1575 */ 1576 /*ARGSUSED1*/ 1577 int 1578 gld_close(queue_t *q, int flag, cred_t *cred) 1579 { 1580 gld_t *gld = (gld_t *)q->q_ptr; 1581 glddev_t *glddev = gld->gld_device; 1582 1583 ASSERT(q); 1584 ASSERT(gld); 1585 1586 #ifdef GLD_DEBUG 1587 if (gld_debug & GLDPROT) { 1588 cmn_err(CE_NOTE, "gld_close(%p, Style %d)", 1589 (void *)q, (gld->gld_style & 0x1) + 1); 1590 } 1591 #endif 1592 1593 /* Hold all device streams lists still while we check for a macinfo */ 1594 mutex_enter(&glddev->gld_devlock); 1595 1596 if (gld->gld_mac_info != NULL) { 1597 /* If there's a macinfo, block recv while we change state */ 1598 GLDM_LOCK(gld->gld_mac_info, RW_WRITER); 1599 gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */ 1600 GLDM_UNLOCK(gld->gld_mac_info); 1601 } else { 1602 /* no mac DL_ATTACHED right now */ 1603 gld->gld_flags |= GLD_STR_CLOSING; 1604 } 1605 1606 mutex_exit(&glddev->gld_devlock); 1607 1608 /* 1609 * qprocsoff before we call gld_unbind/gldunattach, so that 1610 * we know wsrv isn't in there trying to undo what we're doing. 1611 */ 1612 qprocsoff(q); 1613 1614 ASSERT(gld->gld_wput_count == 0); 1615 gld->gld_wput_count = 0; /* just in case */ 1616 1617 if (gld->gld_state == DL_IDLE) { 1618 /* Need to unbind */ 1619 ASSERT(gld->gld_mac_info != NULL); 1620 (void) gld_unbind(WR(q), NULL); 1621 } 1622 1623 if (gld->gld_state == DL_UNBOUND) { 1624 /* 1625 * Need to unattach 1626 * For style 2 stream, gldunattach also 1627 * associate queue with NULL dip 1628 */ 1629 ASSERT(gld->gld_mac_info != NULL); 1630 (void) gldunattach(WR(q), NULL); 1631 } 1632 1633 /* disassociate the stream from the device */ 1634 q->q_ptr = WR(q)->q_ptr = NULL; 1635 1636 /* 1637 * Since we unattached above (if necessary), we know that we're 1638 * on the per-major list of unattached streams, rather than a 1639 * per-PPA list. So we know we should hold the devlock. 1640 */ 1641 mutex_enter(&glddev->gld_devlock); 1642 gldremque(gld); /* remove from Style 2 list */ 1643 mutex_exit(&glddev->gld_devlock); 1644 1645 kmem_free(gld, sizeof (gld_t)); 1646 1647 return (0); 1648 } 1649 1650 /* 1651 * gld_rsrv (q) 1652 * simple read service procedure 1653 * purpose is to avoid the time it takes for packets 1654 * to move through IP so we can get them off the board 1655 * as fast as possible due to limited PC resources. 1656 * 1657 * This is not normally used in the current implementation. It 1658 * can be selected with the undocumented property "fast_recv". 1659 * If that property is set, gld_recv will send the packet 1660 * upstream with a putq() rather than a putnext(), thus causing 1661 * this routine to be scheduled. 1662 */ 1663 int 1664 gld_rsrv(queue_t *q) 1665 { 1666 mblk_t *mp; 1667 1668 while ((mp = getq(q)) != NULL) { 1669 if (canputnext(q)) { 1670 putnext(q, mp); 1671 } else { 1672 freemsg(mp); 1673 } 1674 } 1675 return (0); 1676 } 1677 1678 /* 1679 * gld_wput (q, mp) 1680 * general gld stream write put routine. Receives fastpath data from upper 1681 * modules and processes it immediately. ioctl and M_PROTO/M_PCPROTO are 1682 * queued for later processing by the service procedure. 1683 */ 1684 1685 int 1686 gld_wput(queue_t *q, mblk_t *mp) 1687 { 1688 gld_t *gld = (gld_t *)(q->q_ptr); 1689 int rc; 1690 boolean_t multidata = B_TRUE; 1691 1692 #ifdef GLD_DEBUG 1693 if (gld_debug & GLDTRACE) 1694 cmn_err(CE_NOTE, "gld_wput(%p %p): type %x", 1695 (void *)q, (void *)mp, DB_TYPE(mp)); 1696 #endif 1697 switch (DB_TYPE(mp)) { 1698 1699 case M_DATA: 1700 /* fast data / raw support */ 1701 /* we must be DL_ATTACHED and DL_BOUND to do this */ 1702 /* Tricky to access memory without taking the mutex */ 1703 if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 || 1704 gld->gld_state != DL_IDLE) { 1705 merror(q, mp, EPROTO); 1706 break; 1707 } 1708 multidata = B_FALSE; 1709 /* LINTED: E_CASE_FALLTHRU */ 1710 case M_MULTIDATA: 1711 /* Only call gld_start() directly if nothing queued ahead */ 1712 /* No guarantees about ordering with different threads */ 1713 if (q->q_first) 1714 goto use_wsrv; 1715 1716 /* 1717 * This can happen if wsrv has taken off the last mblk but 1718 * is still processing it. 1719 */ 1720 membar_consumer(); 1721 if (gld->gld_in_wsrv) 1722 goto use_wsrv; 1723 1724 /* 1725 * Keep a count of current wput calls to start. 1726 * Nonzero count delays any attempted DL_UNBIND. 1727 * See comments above gld_start(). 1728 */ 1729 atomic_add_32((uint32_t *)&gld->gld_wput_count, 1); 1730 membar_enter(); 1731 1732 /* Recheck state now wput_count is set to prevent DL_UNBIND */ 1733 /* If this Q is in process of DL_UNBIND, don't call start */ 1734 if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) { 1735 /* Extremely unlikely */ 1736 atomic_add_32((uint32_t *)&gld->gld_wput_count, -1); 1737 goto use_wsrv; 1738 } 1739 1740 rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) : 1741 gld_start(q, mp, GLD_WPUT, UPRI(gld, mp->b_band)); 1742 1743 /* Allow DL_UNBIND again */ 1744 membar_exit(); 1745 atomic_add_32((uint32_t *)&gld->gld_wput_count, -1); 1746 1747 if (rc == GLD_NORESOURCES) 1748 qenable(q); 1749 break; /* Done with this packet */ 1750 1751 use_wsrv: 1752 /* Q not empty, in DL_DETACH, or start gave NORESOURCES */ 1753 (void) putq(q, mp); 1754 qenable(q); 1755 break; 1756 1757 case M_IOCTL: 1758 /* ioctl relies on wsrv single threading per queue */ 1759 (void) putq(q, mp); 1760 qenable(q); 1761 break; 1762 1763 case M_CTL: 1764 (void) putq(q, mp); 1765 qenable(q); 1766 break; 1767 1768 case M_FLUSH: /* canonical flush handling */ 1769 /* XXX Should these be FLUSHALL? */ 1770 if (*mp->b_rptr & FLUSHW) 1771 flushq(q, 0); 1772 if (*mp->b_rptr & FLUSHR) { 1773 flushq(RD(q), 0); 1774 *mp->b_rptr &= ~FLUSHW; 1775 qreply(q, mp); 1776 } else 1777 freemsg(mp); 1778 break; 1779 1780 case M_PROTO: 1781 case M_PCPROTO: 1782 /* these rely on wsrv single threading per queue */ 1783 (void) putq(q, mp); 1784 qenable(q); 1785 break; 1786 1787 default: 1788 #ifdef GLD_DEBUG 1789 if (gld_debug & GLDETRACE) 1790 cmn_err(CE_WARN, 1791 "gld: Unexpected packet type from queue: 0x%x", 1792 DB_TYPE(mp)); 1793 #endif 1794 freemsg(mp); 1795 } 1796 return (0); 1797 } 1798 1799 /* 1800 * gld_wsrv - Incoming messages are processed according to the DLPI protocol 1801 * specification. 1802 * 1803 * wsrv is single-threaded per Q. We make use of this to avoid taking the 1804 * lock for reading data items that are only ever written by us. 1805 */ 1806 1807 int 1808 gld_wsrv(queue_t *q) 1809 { 1810 mblk_t *mp; 1811 gld_t *gld = (gld_t *)q->q_ptr; 1812 gld_mac_info_t *macinfo; 1813 union DL_primitives *prim; 1814 int err; 1815 boolean_t multidata; 1816 1817 #ifdef GLD_DEBUG 1818 if (gld_debug & GLDTRACE) 1819 cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q); 1820 #endif 1821 1822 ASSERT(!gld->gld_in_wsrv); 1823 1824 gld->gld_xwait = B_FALSE; /* We are now going to process this Q */ 1825 1826 if (q->q_first == NULL) 1827 return (0); 1828 1829 macinfo = gld->gld_mac_info; 1830 1831 /* 1832 * Help wput avoid a call to gld_start if there might be a message 1833 * previously queued by that thread being processed here. 1834 */ 1835 gld->gld_in_wsrv = B_TRUE; 1836 membar_enter(); 1837 1838 while ((mp = getq(q)) != NULL) { 1839 switch (DB_TYPE(mp)) { 1840 case M_DATA: 1841 case M_MULTIDATA: 1842 multidata = (DB_TYPE(mp) == M_MULTIDATA); 1843 1844 /* 1845 * retry of a previously processed UNITDATA_REQ 1846 * or is a RAW or FAST message from above. 1847 */ 1848 if (macinfo == NULL) { 1849 /* No longer attached to a PPA, drop packet */ 1850 freemsg(mp); 1851 break; 1852 } 1853 1854 gld->gld_sched_ran = B_FALSE; 1855 membar_enter(); 1856 err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) : 1857 gld_start(q, mp, GLD_WSRV, UPRI(gld, mp->b_band)); 1858 if (err == GLD_NORESOURCES) { 1859 /* gld_sched will qenable us later */ 1860 gld->gld_xwait = B_TRUE; /* want qenable */ 1861 membar_enter(); 1862 /* 1863 * v2: we're not holding the lock; it's 1864 * possible that the driver could have already 1865 * called gld_sched (following up on its 1866 * return of GLD_NORESOURCES), before we got a 1867 * chance to do the putbq() and set gld_xwait. 1868 * So if we saw a call to gld_sched that 1869 * examined this queue, since our call to 1870 * gld_start() above, then it's possible we've 1871 * already seen the only call to gld_sched() 1872 * we're ever going to see. So we better retry 1873 * transmitting this packet right now. 1874 */ 1875 if (gld->gld_sched_ran) { 1876 #ifdef GLD_DEBUG 1877 if (gld_debug & GLDTRACE) 1878 cmn_err(CE_NOTE, "gld_wsrv: " 1879 "sched was called"); 1880 #endif 1881 break; /* try again right now */ 1882 } 1883 gld->gld_in_wsrv = B_FALSE; 1884 return (0); 1885 } 1886 break; 1887 1888 case M_IOCTL: 1889 (void) gld_ioctl(q, mp); 1890 break; 1891 1892 case M_CTL: 1893 if (macinfo == NULL) { 1894 freemsg(mp); 1895 break; 1896 } 1897 1898 if (macinfo->gldm_mctl != NULL) { 1899 GLDM_LOCK(macinfo, RW_WRITER); 1900 (void) (*macinfo->gldm_mctl) (macinfo, q, mp); 1901 GLDM_UNLOCK(macinfo); 1902 } else { 1903 /* This driver doesn't recognize, just drop */ 1904 freemsg(mp); 1905 } 1906 break; 1907 1908 case M_PROTO: /* Will be an DLPI message of some type */ 1909 case M_PCPROTO: 1910 if ((err = gld_cmds(q, mp)) != GLDE_OK) { 1911 if (err == GLDE_RETRY) { 1912 gld->gld_in_wsrv = B_FALSE; 1913 return (0); /* quit while we're ahead */ 1914 } 1915 prim = (union DL_primitives *)mp->b_rptr; 1916 dlerrorack(q, mp, prim->dl_primitive, err, 0); 1917 } 1918 break; 1919 1920 default: 1921 /* This should never happen */ 1922 #ifdef GLD_DEBUG 1923 if (gld_debug & GLDERRS) 1924 cmn_err(CE_WARN, 1925 "gld_wsrv: db_type(%x) not supported", 1926 mp->b_datap->db_type); 1927 #endif 1928 freemsg(mp); /* unknown types are discarded */ 1929 break; 1930 } 1931 } 1932 1933 membar_exit(); 1934 gld->gld_in_wsrv = B_FALSE; 1935 return (0); 1936 } 1937 1938 /* 1939 * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata(). 1940 * 1941 * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case. 1942 * 1943 * In particular, we must avoid calling gld_precv*() if we came from wput(). 1944 * gld_precv*() is where we, on the transmit side, loop back our outgoing 1945 * packets to the receive side if we are in physical promiscuous mode. 1946 * Since the receive side holds a lock across its call to the upstream 1947 * putnext, and that upstream module could well have looped back to our 1948 * wput() routine on the same thread, we cannot call gld_precv* from here 1949 * for fear of causing a recursive lock entry in our receive code. 1950 * 1951 * There is a problem here when coming from gld_wput(). While wput 1952 * only comes here if the queue is attached to a PPA and bound to a SAP 1953 * and there are no messages on the queue ahead of the M_DATA that could 1954 * change that, it is theoretically possible that another thread could 1955 * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine 1956 * could wake up and process them, before we finish processing this 1957 * send of the M_DATA. This can only possibly happen on a Style 2 RAW or 1958 * FAST (fastpath) stream: non RAW/FAST streams always go through wsrv(), 1959 * and Style 1 streams only DL_DETACH in the close routine, where 1960 * qprocsoff() protects us. If this happens we could end up calling 1961 * gldm_send() after we have detached the stream and possibly called 1962 * gldm_stop(). Worse, once the number of attached streams goes to zero, 1963 * detach/unregister could be called, and the macinfo could go away entirely. 1964 * 1965 * No one has ever seen this happen. 1966 * 1967 * It is some trouble to fix this, and we would rather not add any mutex 1968 * logic into the wput() routine, which is supposed to be a "fast" 1969 * path. 1970 * 1971 * What I've done is use an atomic counter to keep a count of the number 1972 * of threads currently calling gld_start() from wput() on this stream. 1973 * If DL_DETACH sees this as nonzero, it putbqs the request back onto 1974 * the queue and qenables, hoping to have better luck next time. Since 1975 * people shouldn't be trying to send after they've asked to DL_DETACH, 1976 * hopefully very soon all the wput=>start threads should have returned 1977 * and the DL_DETACH will succeed. It's hard to test this since the odds 1978 * of the failure even trying to happen are so small. I probably could 1979 * have ignored the whole issue and never been the worse for it. 1980 */ 1981 static int 1982 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri) 1983 { 1984 mblk_t *nmp; 1985 gld_t *gld = (gld_t *)q->q_ptr; 1986 gld_mac_info_t *macinfo; 1987 gld_mac_pvt_t *mac_pvt; 1988 int rc; 1989 gld_interface_t *ifp; 1990 pktinfo_t pktinfo; 1991 uint32_t vtag; 1992 gld_vlan_t *vlan; 1993 1994 ASSERT(DB_TYPE(mp) == M_DATA); 1995 macinfo = gld->gld_mac_info; 1996 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 1997 ifp = mac_pvt->interfacep; 1998 vlan = (gld_vlan_t *)gld->gld_vlan; 1999 2000 if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) { 2001 freemsg(mp); 2002 #ifdef GLD_DEBUG 2003 if (gld_debug & GLDERRS) 2004 cmn_err(CE_WARN, 2005 "gld_start: failed to interpret outbound packet"); 2006 #endif 2007 vlan->gldv_stats->glds_xmtbadinterp++; 2008 return (GLD_BADARG); 2009 } 2010 2011 /* 2012 * We're not holding the lock for this check. If the promiscuous 2013 * state is in flux it doesn't matter much if we get this wrong. 2014 */ 2015 if (mac_pvt->nprom > 0) { 2016 /* 2017 * We want to loopback to the receive side, but to avoid 2018 * recursive lock entry: if we came from wput(), which 2019 * could have looped back via IP from our own receive 2020 * interrupt thread, we decline this request. wput() 2021 * will then queue the packet for wsrv(). This means 2022 * that when snoop is running we don't get the advantage 2023 * of the wput() multithreaded direct entry to the 2024 * driver's send routine. 2025 */ 2026 if (caller == GLD_WPUT) { 2027 (void) putbq(q, mp); 2028 return (GLD_NORESOURCES); 2029 } 2030 if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) 2031 nmp = dupmsg_noloan(mp); 2032 else 2033 nmp = dupmsg(mp); 2034 } else 2035 nmp = NULL; /* we need no loopback */ 2036 2037 vtag = GLD_MK_VTAG(vlan->gldv_ptag, upri); 2038 if (ifp->hdr_size > 0 && 2039 pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) + 2040 macinfo->gldm_maxpkt) { 2041 freemsg(mp); /* discard oversized outbound packet */ 2042 if (nmp) 2043 freemsg(nmp); /* free the duped message */ 2044 #ifdef GLD_DEBUG 2045 if (gld_debug & GLDERRS) 2046 cmn_err(CE_WARN, 2047 "gld_start: oversize outbound packet, size %d," 2048 "max %d", pktinfo.pktLen, 2049 ifp->hdr_size + macinfo->gldm_maxpkt); 2050 #endif 2051 vlan->gldv_stats->glds_xmtbadinterp++; 2052 return (GLD_BADARG); 2053 } 2054 2055 rc = (*gld->gld_send)(macinfo, mp, vtag); 2056 2057 if (rc != GLD_SUCCESS) { 2058 if (rc == GLD_NORESOURCES) { 2059 vlan->gldv_stats->glds_xmtretry++; 2060 (void) putbq(q, mp); 2061 } else { 2062 /* transmit error; drop the packet */ 2063 freemsg(mp); 2064 /* We're supposed to count failed attempts as well */ 2065 UPDATE_STATS(vlan, pktinfo, 1); 2066 #ifdef GLD_DEBUG 2067 if (gld_debug & GLDERRS) 2068 cmn_err(CE_WARN, 2069 "gld_start: gldm_send failed %d", rc); 2070 #endif 2071 } 2072 if (nmp) 2073 freemsg(nmp); /* free the dupped message */ 2074 return (rc); 2075 } 2076 2077 UPDATE_STATS(vlan, pktinfo, 1); 2078 2079 /* 2080 * Loopback case. The message needs to be returned back on 2081 * the read side. This would silently fail if the dumpmsg fails 2082 * above. This is probably OK, if there is no memory to dup the 2083 * block, then there isn't much we could do anyway. 2084 */ 2085 if (nmp) { 2086 GLDM_LOCK(macinfo, RW_WRITER); 2087 gld_precv(macinfo, vlan, nmp); 2088 GLDM_UNLOCK(macinfo); 2089 } 2090 2091 return (GLD_SUCCESS); 2092 } 2093 2094 /* 2095 * With MDT V.2 a single message mp can have one header area and multiple 2096 * payload areas. A packet is described by dl_pkt_info, and each packet can 2097 * span multiple payload areas (currently with TCP, each packet will have one 2098 * header and at the most two payload areas). MACs might have a limit on the 2099 * number of payload segments (i.e. per packet scatter-gather limit), and 2100 * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver 2101 * might also have a limit on the total number of payloads in a message, and 2102 * that is specified by mdt_max_pld. 2103 */ 2104 static int 2105 gld_start_mdt(queue_t *q, mblk_t *mp, int caller) 2106 { 2107 mblk_t *nextmp; 2108 gld_t *gld = (gld_t *)q->q_ptr; 2109 gld_mac_info_t *macinfo = gld->gld_mac_info; 2110 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2111 int numpacks, mdtpacks; 2112 gld_interface_t *ifp = mac_pvt->interfacep; 2113 pktinfo_t pktinfo; 2114 gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan; 2115 boolean_t doloop = B_FALSE; 2116 multidata_t *dlmdp; 2117 pdescinfo_t pinfo; 2118 pdesc_t *dl_pkt; 2119 void *cookie; 2120 uint_t totLen = 0; 2121 2122 ASSERT(DB_TYPE(mp) == M_MULTIDATA); 2123 2124 /* 2125 * We're not holding the lock for this check. If the promiscuous 2126 * state is in flux it doesn't matter much if we get this wrong. 2127 */ 2128 if (mac_pvt->nprom > 0) { 2129 /* 2130 * We want to loopback to the receive side, but to avoid 2131 * recursive lock entry: if we came from wput(), which 2132 * could have looped back via IP from our own receive 2133 * interrupt thread, we decline this request. wput() 2134 * will then queue the packet for wsrv(). This means 2135 * that when snoop is running we don't get the advantage 2136 * of the wput() multithreaded direct entry to the 2137 * driver's send routine. 2138 */ 2139 if (caller == GLD_WPUT) { 2140 (void) putbq(q, mp); 2141 return (GLD_NORESOURCES); 2142 } 2143 doloop = B_TRUE; 2144 2145 /* 2146 * unlike the M_DATA case, we don't have to call 2147 * dupmsg_noloan here because mmd_transform 2148 * (called by gld_precv_mdt) will make a copy of 2149 * each dblk. 2150 */ 2151 } 2152 2153 while (mp != NULL) { 2154 /* 2155 * The lower layer driver only gets a single multidata 2156 * message; this also makes it easier to handle noresources. 2157 */ 2158 nextmp = mp->b_cont; 2159 mp->b_cont = NULL; 2160 2161 /* 2162 * Get number of packets in this message; if nothing 2163 * to transmit, go to next message. 2164 */ 2165 dlmdp = mmd_getmultidata(mp); 2166 if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) { 2167 freemsg(mp); 2168 mp = nextmp; 2169 continue; 2170 } 2171 2172 /* 2173 * Run interpreter to populate media specific pktinfo fields. 2174 * This collects per MDT message information like sap, 2175 * broad/multicast etc. 2176 */ 2177 (void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo, 2178 GLD_MDT_TX); 2179 2180 numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie); 2181 2182 if (numpacks > 0) { 2183 /* 2184 * Driver indicates it can transmit at least 1, and 2185 * possibly all, packets in MDT message. 2186 */ 2187 int count = numpacks; 2188 2189 for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo); 2190 (dl_pkt != NULL); 2191 dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) { 2192 /* 2193 * Format this packet by adding link header and 2194 * adjusting pdescinfo to include it; get 2195 * packet length. 2196 */ 2197 (void) (*ifp->interpreter_mdt)(macinfo, NULL, 2198 &pinfo, &pktinfo, GLD_MDT_TXPKT); 2199 2200 totLen += pktinfo.pktLen; 2201 2202 /* 2203 * Loop back packet before handing to the 2204 * driver. 2205 */ 2206 if (doloop && 2207 mmd_adjpdesc(dl_pkt, &pinfo) != NULL) { 2208 GLDM_LOCK(macinfo, RW_WRITER); 2209 gld_precv_mdt(macinfo, vlan, mp, 2210 dl_pkt, &pktinfo); 2211 GLDM_UNLOCK(macinfo); 2212 } 2213 2214 /* 2215 * And send off to driver. 2216 */ 2217 (*macinfo->gldm_mdt_send)(macinfo, cookie, 2218 &pinfo); 2219 2220 /* 2221 * Be careful not to invoke getnextpdesc if we 2222 * already sent the last packet, since driver 2223 * might have posted it to hardware causing a 2224 * completion and freemsg() so the MDT data 2225 * structures might not be valid anymore. 2226 */ 2227 if (--count == 0) 2228 break; 2229 } 2230 (*macinfo->gldm_mdt_post)(macinfo, mp, cookie); 2231 pktinfo.pktLen = totLen; 2232 UPDATE_STATS(vlan, pktinfo, numpacks); 2233 2234 /* 2235 * In the noresources case (when driver indicates it 2236 * can not transmit all packets in the MDT message), 2237 * adjust to skip the first few packets on retrial. 2238 */ 2239 if (numpacks != mdtpacks) { 2240 /* 2241 * Release already processed packet descriptors. 2242 */ 2243 for (count = 0; count < numpacks; count++) { 2244 dl_pkt = mmd_getfirstpdesc(dlmdp, 2245 &pinfo); 2246 mmd_rempdesc(dl_pkt); 2247 } 2248 vlan->gldv_stats->glds_xmtretry++; 2249 mp->b_cont = nextmp; 2250 (void) putbq(q, mp); 2251 return (GLD_NORESOURCES); 2252 } 2253 } else if (numpacks == 0) { 2254 /* 2255 * Driver indicates it can not transmit any packets 2256 * currently and will request retrial later. 2257 */ 2258 vlan->gldv_stats->glds_xmtretry++; 2259 mp->b_cont = nextmp; 2260 (void) putbq(q, mp); 2261 return (GLD_NORESOURCES); 2262 } else { 2263 ASSERT(numpacks == -1); 2264 /* 2265 * We're supposed to count failed attempts as well. 2266 */ 2267 dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo); 2268 while (dl_pkt != NULL) { 2269 /* 2270 * Call interpreter to determine total packet 2271 * bytes that are being dropped. 2272 */ 2273 (void) (*ifp->interpreter_mdt)(macinfo, NULL, 2274 &pinfo, &pktinfo, GLD_MDT_TXPKT); 2275 2276 totLen += pktinfo.pktLen; 2277 2278 dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo); 2279 } 2280 pktinfo.pktLen = totLen; 2281 UPDATE_STATS(vlan, pktinfo, mdtpacks); 2282 2283 /* 2284 * Transmit error; drop the message, move on 2285 * to the next one. 2286 */ 2287 freemsg(mp); 2288 } 2289 2290 /* 2291 * Process the next multidata block, if there is one. 2292 */ 2293 mp = nextmp; 2294 } 2295 2296 return (GLD_SUCCESS); 2297 } 2298 2299 /* 2300 * gld_intr (macinfo) 2301 */ 2302 uint_t 2303 gld_intr(gld_mac_info_t *macinfo) 2304 { 2305 ASSERT(macinfo != NULL); 2306 2307 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) 2308 return (DDI_INTR_UNCLAIMED); 2309 2310 return ((*macinfo->gldm_intr)(macinfo)); 2311 } 2312 2313 /* 2314 * gld_sched (macinfo) 2315 * 2316 * This routine scans the streams that refer to a specific macinfo 2317 * structure and causes the STREAMS scheduler to try to run them if 2318 * they are marked as waiting for the transmit buffer. 2319 */ 2320 void 2321 gld_sched(gld_mac_info_t *macinfo) 2322 { 2323 gld_mac_pvt_t *mac_pvt; 2324 gld_t *gld; 2325 gld_vlan_t *vlan; 2326 int i; 2327 2328 ASSERT(macinfo != NULL); 2329 2330 GLDM_LOCK(macinfo, RW_WRITER); 2331 2332 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 2333 /* We're probably being called from a leftover interrupt */ 2334 GLDM_UNLOCK(macinfo); 2335 return; 2336 } 2337 2338 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2339 2340 for (i = 0; i < VLAN_HASHSZ; i++) { 2341 for (vlan = mac_pvt->vlan_hash[i]; 2342 vlan != NULL; vlan = vlan->gldv_next) { 2343 for (gld = vlan->gldv_str_next; 2344 gld != (gld_t *)&vlan->gldv_str_next; 2345 gld = gld->gld_next) { 2346 ASSERT(gld->gld_mac_info == macinfo); 2347 gld->gld_sched_ran = B_TRUE; 2348 membar_enter(); 2349 if (gld->gld_xwait) { 2350 gld->gld_xwait = B_FALSE; 2351 qenable(WR(gld->gld_qptr)); 2352 } 2353 } 2354 } 2355 } 2356 2357 GLDM_UNLOCK(macinfo); 2358 } 2359 2360 /* 2361 * gld_precv (macinfo, mp) 2362 * called from gld_start to loopback a packet when in promiscuous mode 2363 */ 2364 static void 2365 gld_precv(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp) 2366 { 2367 gld_mac_pvt_t *mac_pvt; 2368 gld_interface_t *ifp; 2369 pktinfo_t pktinfo; 2370 2371 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 2372 2373 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2374 ifp = mac_pvt->interfacep; 2375 2376 /* 2377 * call the media specific packet interpreter routine 2378 */ 2379 if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) { 2380 freemsg(mp); 2381 BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1); 2382 #ifdef GLD_DEBUG 2383 if (gld_debug & GLDERRS) 2384 cmn_err(CE_WARN, 2385 "gld_precv: interpreter failed"); 2386 #endif 2387 return; 2388 } 2389 2390 gld_sendup(macinfo, vlan, &pktinfo, mp, gld_paccept); 2391 } 2392 2393 /* 2394 * called from gld_start_mdt to loopback packet(s) when in promiscuous mode 2395 */ 2396 static void 2397 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp, 2398 pdesc_t *dl_pkt, pktinfo_t *pktinfo) 2399 { 2400 mblk_t *adjmp; 2401 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2402 gld_interface_t *ifp = mac_pvt->interfacep; 2403 2404 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 2405 2406 /* 2407 * Get source/destination. 2408 */ 2409 (void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo, 2410 GLD_MDT_RXLOOP); 2411 if ((adjmp = mmd_transform(dl_pkt)) != NULL) 2412 gld_sendup(macinfo, vlan, pktinfo, adjmp, gld_paccept); 2413 } 2414 2415 /* 2416 * gld_recv (macinfo, mp) 2417 * called with an mac-level packet in a mblock; take the maclock, 2418 * try the ip4q and ip6q hack, and otherwise call gld_sendup. 2419 * 2420 * V0 drivers already are holding the mutex when they call us. 2421 */ 2422 void 2423 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp) 2424 { 2425 gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE); 2426 } 2427 2428 void 2429 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag) 2430 { 2431 gld_mac_pvt_t *mac_pvt; 2432 char pbuf[3*GLD_MAX_ADDRLEN]; 2433 pktinfo_t pktinfo; 2434 gld_interface_t *ifp; 2435 queue_t *ipq = NULL; 2436 gld_vlan_t *vlan; 2437 uint32_t vid; 2438 2439 ASSERT(macinfo != NULL); 2440 ASSERT(mp->b_datap->db_ref); 2441 2442 GLDM_LOCK(macinfo, RW_READER); 2443 2444 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 2445 /* We're probably being called from a leftover interrupt */ 2446 freemsg(mp); 2447 goto done; 2448 } 2449 2450 vid = GLD_VTAG_VID(vtag); 2451 if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) { 2452 freemsg(mp); 2453 goto done; 2454 } 2455 2456 /* 2457 * Check whether underlying media code supports the IPQ hack, 2458 * and if so, whether the interpreter can quickly parse the 2459 * packet to get some relevant parameters. 2460 */ 2461 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 2462 ifp = mac_pvt->interfacep; 2463 if (((*ifp->interpreter)(macinfo, mp, &pktinfo, 2464 GLD_RXQUICK) == 0) && (vlan->gldv_ipq_flags == 0)) { 2465 switch (pktinfo.ethertype) { 2466 case ETHERTYPE_IP: 2467 ipq = vlan->gldv_ipq; 2468 break; 2469 case ETHERTYPE_IPV6: 2470 ipq = vlan->gldv_ipv6q; 2471 break; 2472 } 2473 } 2474 2475 BUMP(vlan->gldv_stats->glds_bytercv64, pktinfo.pktLen); 2476 BUMP(vlan->gldv_stats->glds_pktrcv64, 1); 2477 2478 /* 2479 * Special case for IP; we can simply do the putnext here, if: 2480 * o ipq != NULL, and therefore: 2481 * - the device type supports IPQ (ethernet and IPoIB); 2482 * - the interpreter could quickly parse the packet; 2483 * - there are no PROMISC_SAP streams (on this VLAN); 2484 * - there is one, and only one, IP stream bound (to this VLAN); 2485 * - that stream is a "fastpath" stream; 2486 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6 2487 * 2488 * o the packet is specifically for me, and therefore: 2489 * - the packet is not multicast or broadcast (fastpath only 2490 * wants unicast packets). 2491 * 2492 * o the stream is not asserting flow control. 2493 */ 2494 if (ipq != NULL && 2495 pktinfo.isForMe && 2496 canputnext(ipq)) { 2497 /* 2498 * Skip the mac header. We know there is no LLC1/SNAP header 2499 * in this packet 2500 */ 2501 mp->b_rptr += pktinfo.macLen; 2502 putnext(ipq, mp); 2503 goto done; 2504 } 2505 2506 /* 2507 * call the media specific packet interpreter routine 2508 */ 2509 if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) { 2510 BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1); 2511 #ifdef GLD_DEBUG 2512 if (gld_debug & GLDERRS) 2513 cmn_err(CE_WARN, 2514 "gld_recv_tagged: interpreter failed"); 2515 #endif 2516 freemsg(mp); 2517 goto done; 2518 } 2519 2520 /* 2521 * This is safe even if vtag is VLAN_VTAG_NONE 2522 */ 2523 2524 pktinfo.vid = vid; 2525 pktinfo.cfi = GLD_VTAG_CFI(vtag); 2526 #ifdef GLD_DEBUG 2527 if (pktinfo.cfi != VLAN_CFI_ETHER) 2528 cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI"); 2529 #endif 2530 pktinfo.user_pri = GLD_VTAG_PRI(vtag); 2531 2532 #ifdef GLD_DEBUG 2533 if ((gld_debug & GLDRECV) && 2534 (!(gld_debug & GLDNOBR) || 2535 (!pktinfo.isBroadcast && !pktinfo.isMulticast))) { 2536 char pbuf2[3*GLD_MAX_ADDRLEN]; 2537 2538 cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n", 2539 gld_macaddr_sprintf(pbuf, pktinfo.shost, 2540 macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2, 2541 pktinfo.dhost, macinfo->gldm_addrlen)); 2542 cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n", 2543 pktinfo.vid, 2544 pktinfo.user_pri); 2545 cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d " 2546 "Hdr: %d,%d isMulticast: %s\n", 2547 pktinfo.ethertype, 2548 pktinfo.pktLen, 2549 pktinfo.macLen, 2550 pktinfo.hdrLen, 2551 pktinfo.isMulticast ? "Y" : "N"); 2552 } 2553 #endif 2554 2555 gld_sendup(macinfo, vlan, &pktinfo, mp, gld_accept); 2556 2557 done: 2558 GLDM_UNLOCK(macinfo); 2559 } 2560 2561 /* =================================================================== */ 2562 /* receive group: called from gld_recv and gld_precv* with maclock held */ 2563 /* =================================================================== */ 2564 2565 /* 2566 * gld_sendup (macinfo, mp) 2567 * called with an ethernet packet in a mblock; must decide whether 2568 * packet is for us and which streams to queue it to. 2569 */ 2570 static void 2571 gld_sendup(gld_mac_info_t *macinfo, gld_vlan_t *vlan, pktinfo_t *pktinfo, 2572 mblk_t *mp, int (*acceptfunc)()) 2573 { 2574 gld_t *gld; 2575 gld_t *fgld = NULL; 2576 mblk_t *nmp; 2577 void (*send)(queue_t *qp, mblk_t *mp); 2578 int (*cansend)(queue_t *qp); 2579 2580 #ifdef GLD_DEBUG 2581 if (gld_debug & GLDTRACE) 2582 cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp, 2583 (void *)macinfo); 2584 #endif 2585 2586 ASSERT(mp != NULL); 2587 ASSERT(macinfo != NULL); 2588 ASSERT(vlan != NULL); 2589 ASSERT(pktinfo != NULL); 2590 ASSERT(GLDM_LOCK_HELD(macinfo)); 2591 2592 /* 2593 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which 2594 * gld_recv returns to the caller's interrupt routine. The total 2595 * network throughput would normally be lower when selecting this 2596 * option, because we putq the messages and process them later, 2597 * instead of sending them with putnext now. Some time critical 2598 * device might need this, so it's here but undocumented. 2599 */ 2600 if (macinfo->gldm_options & GLDOPT_FAST_RECV) { 2601 send = (void (*)(queue_t *, mblk_t *))putq; 2602 cansend = canput; 2603 } else { 2604 send = (void (*)(queue_t *, mblk_t *))putnext; 2605 cansend = canputnext; 2606 } 2607 2608 /* 2609 * Search all the streams attached to this macinfo looking for 2610 * those eligible to receive the present packet. 2611 */ 2612 for (gld = vlan->gldv_str_next; 2613 gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) { 2614 #ifdef GLD_VERBOSE_DEBUG 2615 cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p QSTATE: %s", 2616 gld->gld_sap, (void *)gld->gld_qptr, 2617 gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE"); 2618 #endif 2619 ASSERT(gld->gld_qptr != NULL); 2620 ASSERT(gld->gld_state == DL_IDLE || 2621 gld->gld_state == DL_UNBOUND); 2622 ASSERT(gld->gld_mac_info == macinfo); 2623 ASSERT(gld->gld_vlan == vlan); 2624 2625 if (gld->gld_state != DL_IDLE) 2626 continue; /* not eligible to receive */ 2627 if (gld->gld_flags & GLD_STR_CLOSING) 2628 continue; /* not eligible to receive */ 2629 2630 #ifdef GLD_DEBUG 2631 if ((gld_debug & GLDRECV) && 2632 (!(gld_debug & GLDNOBR) || 2633 (!pktinfo->isBroadcast && !pktinfo->isMulticast))) 2634 cmn_err(CE_NOTE, 2635 "gld_sendup: queue sap: %4x promis: %s %s %s", 2636 gld->gld_sap, 2637 gld->gld_flags & GLD_PROM_PHYS ? "phys " : " ", 2638 gld->gld_flags & GLD_PROM_SAP ? "sap " : " ", 2639 gld->gld_flags & GLD_PROM_MULT ? "multi" : " "); 2640 #endif 2641 2642 /* 2643 * The accept function differs depending on whether this is 2644 * a packet that we received from the wire or a loopback. 2645 */ 2646 if ((*acceptfunc)(gld, pktinfo)) { 2647 /* sap matches */ 2648 pktinfo->wasAccepted = 1; /* known protocol */ 2649 2650 if (!(*cansend)(gld->gld_qptr)) { 2651 /* 2652 * Upper stream is not accepting messages, i.e. 2653 * it is flow controlled, therefore we will 2654 * forgo sending the message up this stream. 2655 */ 2656 #ifdef GLD_DEBUG 2657 if (gld_debug & GLDETRACE) 2658 cmn_err(CE_WARN, 2659 "gld_sendup: canput failed"); 2660 #endif 2661 BUMP(vlan->gldv_stats->glds_blocked, 1); 2662 qenable(gld->gld_qptr); 2663 continue; 2664 } 2665 2666 /* 2667 * we are trying to avoid an extra dumpmsg() here. 2668 * If this is the first eligible queue, remember the 2669 * queue and send up the message after the loop. 2670 */ 2671 if (!fgld) { 2672 fgld = gld; 2673 continue; 2674 } 2675 2676 /* duplicate the packet for this stream */ 2677 nmp = dupmsg(mp); 2678 if (nmp == NULL) { 2679 BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1); 2680 #ifdef GLD_DEBUG 2681 if (gld_debug & GLDERRS) 2682 cmn_err(CE_WARN, 2683 "gld_sendup: dupmsg failed"); 2684 #endif 2685 break; /* couldn't get resources; drop it */ 2686 } 2687 /* pass the message up the stream */ 2688 gld_passon(gld, nmp, pktinfo, send); 2689 } 2690 } 2691 2692 ASSERT(mp); 2693 /* send the original dup of the packet up the first stream found */ 2694 if (fgld) 2695 gld_passon(fgld, mp, pktinfo, send); 2696 else 2697 freemsg(mp); /* no streams matched */ 2698 2699 /* We do not count looped back packets */ 2700 if (acceptfunc == gld_paccept) 2701 return; /* transmit loopback case */ 2702 2703 if (pktinfo->isBroadcast) 2704 BUMP(vlan->gldv_stats->glds_brdcstrcv, 1); 2705 else if (pktinfo->isMulticast) 2706 BUMP(vlan->gldv_stats->glds_multircv, 1); 2707 2708 /* No stream accepted this packet */ 2709 if (!pktinfo->wasAccepted) 2710 BUMP(vlan->gldv_stats->glds_unknowns, 1); 2711 } 2712 2713 /* 2714 * A packet matches a stream if: 2715 * the stream accepts EtherType encoded packets and the type matches 2716 * or the stream accepts LLC packets and the packet is an LLC packet 2717 */ 2718 #define MATCH(stream, pktinfo) \ 2719 ((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \ 2720 (!stream->gld_ethertype && pktinfo->isLLC)) 2721 2722 /* 2723 * This function validates a packet for sending up a particular 2724 * stream. The message header has been parsed and its characteristic 2725 * are recorded in the pktinfo data structure. The streams stack info 2726 * are presented in gld data structures. 2727 */ 2728 static int 2729 gld_accept(gld_t *gld, pktinfo_t *pktinfo) 2730 { 2731 /* 2732 * if there is no match do not bother checking further. 2733 */ 2734 if (!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP)) 2735 return (0); 2736 2737 /* 2738 * We don't accept any packet from the hardware if we originated it. 2739 * (Contrast gld_paccept, the send-loopback accept function.) 2740 */ 2741 if (pktinfo->isLooped) 2742 return (0); 2743 2744 /* 2745 * If the packet is broadcast or sent to us directly we will accept it. 2746 * Also we will accept multicast packets requested by the stream. 2747 */ 2748 if (pktinfo->isForMe || pktinfo->isBroadcast || 2749 gld_mcmatch(gld, pktinfo)) 2750 return (1); 2751 2752 /* 2753 * Finally, accept anything else if we're in promiscuous mode 2754 */ 2755 if (gld->gld_flags & GLD_PROM_PHYS) 2756 return (1); 2757 2758 return (0); 2759 } 2760 2761 /* 2762 * Return TRUE if the given multicast address is one 2763 * of those that this particular Stream is interested in. 2764 */ 2765 static int 2766 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo) 2767 { 2768 /* 2769 * Return FALSE if not a multicast address. 2770 */ 2771 if (!pktinfo->isMulticast) 2772 return (0); 2773 2774 /* 2775 * Check if all multicasts have been enabled for this Stream 2776 */ 2777 if (gld->gld_flags & GLD_PROM_MULT) 2778 return (1); 2779 2780 /* 2781 * Return FALSE if no multicast addresses enabled for this Stream. 2782 */ 2783 if (!gld->gld_mcast) 2784 return (0); 2785 2786 /* 2787 * Otherwise, look for it in the table. 2788 */ 2789 return (gld_multicast(pktinfo->dhost, gld)); 2790 } 2791 2792 /* 2793 * gld_multicast determines if the address is a multicast address for 2794 * this stream. 2795 */ 2796 static int 2797 gld_multicast(unsigned char *macaddr, gld_t *gld) 2798 { 2799 int i; 2800 2801 ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info)); 2802 2803 if (!gld->gld_mcast) 2804 return (0); 2805 2806 for (i = 0; i < gld->gld_multicnt; i++) { 2807 if (gld->gld_mcast[i]) { 2808 ASSERT(gld->gld_mcast[i]->gldm_refcnt); 2809 if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr, 2810 gld->gld_mac_info->gldm_addrlen)) 2811 return (1); 2812 } 2813 } 2814 2815 return (0); 2816 } 2817 2818 /* 2819 * accept function for looped back packets 2820 */ 2821 static int 2822 gld_paccept(gld_t *gld, pktinfo_t *pktinfo) 2823 { 2824 return (gld->gld_flags & GLD_PROM_PHYS && 2825 (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP)); 2826 } 2827 2828 static void 2829 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, 2830 void (*send)(queue_t *qp, mblk_t *mp)) 2831 { 2832 int skiplen; 2833 2834 #ifdef GLD_DEBUG 2835 if (gld_debug & GLDTRACE) 2836 cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld, 2837 (void *)mp, (void *)pktinfo); 2838 2839 if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) || 2840 (!pktinfo->isBroadcast && !pktinfo->isMulticast))) 2841 cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x", 2842 (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor, 2843 gld->gld_sap); 2844 #endif 2845 2846 /* 2847 * Figure out how much of the packet header to throw away. 2848 * 2849 * RAW streams expect to see the whole packet. 2850 * 2851 * Other streams expect to see the packet with the MAC header 2852 * removed. 2853 * 2854 * Normal DLPI (non RAW/FAST) streams also want the 2855 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA. 2856 */ 2857 if (gld->gld_flags & GLD_RAW) { 2858 skiplen = 0; 2859 } else { 2860 skiplen = pktinfo->macLen; /* skip mac header */ 2861 if (gld->gld_ethertype) 2862 skiplen += pktinfo->hdrLen; /* skip any extra */ 2863 } 2864 2865 if (skiplen >= pktinfo->pktLen) { 2866 /* 2867 * If the interpreter did its job right, then it cannot be 2868 * asking us to skip more bytes than are in the packet! 2869 * However, there could be zero data bytes left after the 2870 * amount to skip. DLPI specifies that passed M_DATA blocks 2871 * should contain at least one byte of data, so if we have 2872 * none we just drop it. 2873 */ 2874 ASSERT(!(skiplen > pktinfo->pktLen)); 2875 freemsg(mp); 2876 return; 2877 } 2878 2879 /* 2880 * Skip over the header(s), taking care to possibly handle message 2881 * fragments shorter than the amount we need to skip. Hopefully 2882 * the driver will put the entire packet, or at least the entire 2883 * header, into a single message block. But we handle it if not. 2884 */ 2885 while (skiplen >= MBLKL(mp)) { 2886 mblk_t *tmp = mp; 2887 skiplen -= MBLKL(mp); 2888 mp = mp->b_cont; 2889 ASSERT(mp != NULL); /* because skiplen < pktinfo->pktLen */ 2890 freeb(tmp); 2891 } 2892 mp->b_rptr += skiplen; 2893 2894 /* Add M_PROTO if necessary, and pass upstream */ 2895 if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast && 2896 !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) { 2897 /* RAW/FAST: just send up the M_DATA */ 2898 (*send)(gld->gld_qptr, mp); 2899 } else { 2900 /* everybody else wants to see a unitdata_ind structure */ 2901 mp = gld_addudind(gld, mp, pktinfo); 2902 if (mp) 2903 (*send)(gld->gld_qptr, mp); 2904 /* if it failed, gld_addudind already bumped statistic */ 2905 } 2906 } 2907 2908 /* 2909 * gld_addudind(gld, mp, pktinfo) 2910 * format a DL_UNITDATA_IND message to be sent upstream to the user 2911 */ 2912 static mblk_t * 2913 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo) 2914 { 2915 gld_mac_info_t *macinfo = gld->gld_mac_info; 2916 gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan; 2917 dl_unitdata_ind_t *dludindp; 2918 mblk_t *nmp; 2919 int size; 2920 int type; 2921 2922 #ifdef GLD_DEBUG 2923 if (gld_debug & GLDTRACE) 2924 cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld, 2925 (void *)mp, (void *)pktinfo); 2926 #endif 2927 ASSERT(macinfo != NULL); 2928 2929 /* 2930 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails 2931 * might as well discard since we can't go further 2932 */ 2933 size = sizeof (dl_unitdata_ind_t) + 2934 2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)); 2935 if ((nmp = allocb(size, BPRI_MED)) == NULL) { 2936 freemsg(mp); 2937 BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1); 2938 #ifdef GLD_DEBUG 2939 if (gld_debug & GLDERRS) 2940 cmn_err(CE_WARN, 2941 "gld_addudind: allocb failed"); 2942 #endif 2943 return ((mblk_t *)NULL); 2944 } 2945 DB_TYPE(nmp) = M_PROTO; 2946 nmp->b_rptr = nmp->b_datap->db_lim - size; 2947 2948 type = (gld->gld_ethertype) ? pktinfo->ethertype : 0; 2949 2950 /* 2951 * now setup the DL_UNITDATA_IND header 2952 * 2953 * XXX This looks broken if the saps aren't two bytes. 2954 */ 2955 dludindp = (dl_unitdata_ind_t *)nmp->b_rptr; 2956 dludindp->dl_primitive = DL_UNITDATA_IND; 2957 dludindp->dl_src_addr_length = 2958 dludindp->dl_dest_addr_length = macinfo->gldm_addrlen + 2959 abs(macinfo->gldm_saplen); 2960 dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t); 2961 dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset + 2962 dludindp->dl_dest_addr_length; 2963 2964 dludindp->dl_group_address = (pktinfo->isMulticast || 2965 pktinfo->isBroadcast); 2966 2967 nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset; 2968 2969 mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen); 2970 nmp->b_wptr += macinfo->gldm_addrlen; 2971 2972 ASSERT(macinfo->gldm_saplen == -2); /* XXX following code assumes */ 2973 *(ushort_t *)(nmp->b_wptr) = type; 2974 nmp->b_wptr += abs(macinfo->gldm_saplen); 2975 2976 ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset); 2977 2978 mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen); 2979 nmp->b_wptr += macinfo->gldm_addrlen; 2980 2981 *(ushort_t *)(nmp->b_wptr) = type; 2982 nmp->b_wptr += abs(macinfo->gldm_saplen); 2983 2984 if (pktinfo->nosource) 2985 dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0; 2986 linkb(nmp, mp); 2987 return (nmp); 2988 } 2989 2990 /* ======================================================= */ 2991 /* wsrv group: called from wsrv, single threaded per queue */ 2992 /* ======================================================= */ 2993 2994 /* 2995 * We go to some trouble to avoid taking the same lock during normal 2996 * transmit processing as we do during normal receive processing. 2997 * 2998 * Elements of the per-instance macinfo and per-stream gld_t structures 2999 * are for the most part protected by the GLDM_LOCK rwlock/mutex. 3000 * (Elements of the gld_mac_pvt_t structure are considered part of the 3001 * macinfo structure for purposes of this discussion). 3002 * 3003 * However, it is more complicated than that: 3004 * 3005 * Elements of the macinfo structure that are set before the macinfo 3006 * structure is added to its device list by gld_register(), and never 3007 * thereafter modified, are accessed without requiring taking the lock. 3008 * A similar rule applies to those elements of the gld_t structure that 3009 * are written by gld_open() before the stream is added to any list. 3010 * 3011 * Most other elements of the macinfo structure may only be read or 3012 * written while holding the maclock. 3013 * 3014 * Most writable elements of the gld_t structure are written only 3015 * within the single-threaded domain of wsrv() and subsidiaries. 3016 * (This domain includes open/close while qprocs are not on.) 3017 * The maclock need not be taken while within that domain 3018 * simply to read those elements. Writing to them, even within 3019 * that domain, or reading from it outside that domain, requires 3020 * holding the maclock. Exception: if the stream is not 3021 * presently attached to a PPA, there is no associated macinfo, 3022 * and no maclock need be taken. 3023 * 3024 * The curr_macaddr element of the mac private structure is also 3025 * protected by the GLDM_LOCK rwlock/mutex, like most other members 3026 * of that structure. However, there are a few instances in the 3027 * transmit path where we choose to forgo lock protection when 3028 * reading this variable. This is to avoid lock contention between 3029 * threads executing the DL_UNITDATA_REQ case and receive threads. 3030 * In doing so we will take a small risk or a few corrupted packets 3031 * during the short an rare times when someone is changing the interface's 3032 * physical address. We consider the small cost in this rare case to be 3033 * worth the benefit of reduced lock contention under normal operating 3034 * conditions. The risk/cost is small because: 3035 * 1. there is no guarantee at this layer of uncorrupted delivery. 3036 * 2. the physaddr doesn't change very often - no performance hit. 3037 * 3. if the physaddr changes, other stuff is going to be screwed 3038 * up for a while anyway, while other sites refigure ARP, etc., 3039 * so losing a couple of packets is the least of our worries. 3040 * 3041 * The list of streams associated with a macinfo is protected by 3042 * two locks: the per-macinfo maclock, and the per-major-device 3043 * gld_devlock. Both must be held to modify the list, but either 3044 * may be held to protect the list during reading/traversing. This 3045 * allows independent locking for multiple instances in the receive 3046 * path (using macinfo), while facilitating routines that must search 3047 * the entire set of streams associated with a major device, such as 3048 * gld_findminor(), gld_finddevinfo(), close(). The "nstreams" 3049 * macinfo element, and the gld_mac_info gld_t element, are similarly 3050 * protected, since they change at exactly the same time macinfo 3051 * streams list does. 3052 * 3053 * The list of macinfo structures associated with a major device 3054 * structure is protected by the gld_devlock, as is the per-major 3055 * list of Style 2 streams in the DL_UNATTACHED state. 3056 * 3057 * The list of major devices is kept on a module-global list 3058 * gld_device_list, which has its own lock to protect the list. 3059 * 3060 * When it is necessary to hold more than one lock at a time, they 3061 * are acquired in this "outside in" order: 3062 * gld_device_list.gld_devlock 3063 * glddev->gld_devlock 3064 * GLDM_LOCK(macinfo) 3065 * 3066 * Finally, there are some "volatile" elements of the gld_t structure 3067 * used for synchronization between various routines that don't share 3068 * the same mutexes. See the routines for details. These are: 3069 * gld_xwait between gld_wsrv() and gld_sched() 3070 * gld_sched_ran between gld_wsrv() and gld_sched() 3071 * gld_in_unbind between gld_wput() and wsrv's gld_unbind() 3072 * gld_wput_count between gld_wput() and wsrv's gld_unbind() 3073 * gld_in_wsrv between gld_wput() and gld_wsrv() 3074 * (used in conjunction with q->q_first) 3075 */ 3076 3077 /* 3078 * gld_ioctl (q, mp) 3079 * handles all ioctl requests passed downstream. This routine is 3080 * passed a pointer to the message block with the ioctl request in it, and a 3081 * pointer to the queue so it can respond to the ioctl request with an ack. 3082 */ 3083 int 3084 gld_ioctl(queue_t *q, mblk_t *mp) 3085 { 3086 struct iocblk *iocp; 3087 gld_t *gld; 3088 gld_mac_info_t *macinfo; 3089 3090 #ifdef GLD_DEBUG 3091 if (gld_debug & GLDTRACE) 3092 cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp); 3093 #endif 3094 gld = (gld_t *)q->q_ptr; 3095 iocp = (struct iocblk *)mp->b_rptr; 3096 switch (iocp->ioc_cmd) { 3097 case DLIOCRAW: /* raw M_DATA mode */ 3098 gld->gld_flags |= GLD_RAW; 3099 DB_TYPE(mp) = M_IOCACK; 3100 qreply(q, mp); 3101 break; 3102 3103 case DL_IOC_HDR_INFO: /* fastpath */ 3104 if (gld_global_options & GLD_OPT_NO_FASTPATH) { 3105 miocnak(q, mp, 0, EINVAL); 3106 break; 3107 } 3108 gld_fastpath(gld, q, mp); 3109 break; 3110 3111 default: 3112 macinfo = gld->gld_mac_info; 3113 if (macinfo == NULL || macinfo->gldm_ioctl == NULL) { 3114 miocnak(q, mp, 0, EINVAL); 3115 break; 3116 } 3117 3118 GLDM_LOCK(macinfo, RW_WRITER); 3119 (void) (*macinfo->gldm_ioctl) (macinfo, q, mp); 3120 GLDM_UNLOCK(macinfo); 3121 break; 3122 } 3123 return (0); 3124 } 3125 3126 /* 3127 * Since the rules for "fastpath" mode don't seem to be documented 3128 * anywhere, I will describe GLD's rules for fastpath users here: 3129 * 3130 * Once in this mode you remain there until close. 3131 * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO. 3132 * You must be bound (DL_IDLE) to transmit. 3133 * There are other rules not listed above. 3134 */ 3135 static void 3136 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp) 3137 { 3138 gld_interface_t *ifp; 3139 gld_mac_info_t *macinfo; 3140 dl_unitdata_req_t *dludp; 3141 mblk_t *nmp; 3142 t_scalar_t off, len; 3143 uint_t maclen; 3144 int error; 3145 gld_vlan_t *vlan; 3146 3147 if (gld->gld_state != DL_IDLE) { 3148 miocnak(q, mp, 0, EINVAL); 3149 return; 3150 } 3151 3152 macinfo = gld->gld_mac_info; 3153 ASSERT(macinfo != NULL); 3154 maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen); 3155 3156 error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen); 3157 if (error != 0) { 3158 miocnak(q, mp, 0, error); 3159 return; 3160 } 3161 3162 dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr; 3163 off = dludp->dl_dest_addr_offset; 3164 len = dludp->dl_dest_addr_length; 3165 if (dludp->dl_primitive != DL_UNITDATA_REQ || 3166 !MBLKIN(mp->b_cont, off, len) || len != maclen) { 3167 miocnak(q, mp, 0, EINVAL); 3168 return; 3169 } 3170 3171 /* 3172 * We take his fastpath request as a declaration that he will accept 3173 * M_DATA messages from us, whether or not we are willing to accept 3174 * them from him. This allows us to have fastpath in one direction 3175 * (flow upstream) even on media with Source Routing, where we are 3176 * unable to provide a fixed MAC header to be prepended to downstream 3177 * flowing packets. So we set GLD_FAST whether or not we decide to 3178 * allow him to send M_DATA down to us. 3179 */ 3180 GLDM_LOCK(macinfo, RW_WRITER); 3181 gld->gld_flags |= GLD_FAST; 3182 vlan = (gld_vlan_t *)gld->gld_vlan; 3183 vlan->gldv_ipq_flags &= ~IPQ_DISABLED; 3184 GLDM_UNLOCK(macinfo); 3185 3186 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 3187 3188 /* This will fail for Source Routing media */ 3189 /* Also on Ethernet on 802.2 SAPs */ 3190 if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) { 3191 miocnak(q, mp, 0, ENOMEM); 3192 return; 3193 } 3194 3195 /* 3196 * Link new mblk in after the "request" mblks. 3197 */ 3198 linkb(mp, nmp); 3199 miocack(q, mp, msgdsize(mp->b_cont), 0); 3200 } 3201 3202 /* 3203 * gld_cmds (q, mp) 3204 * process the DL commands as defined in dlpi.h 3205 * note that the primitives return status which is passed back 3206 * to the service procedure. If the value is GLDE_RETRY, then 3207 * it is assumed that processing must stop and the primitive has 3208 * been put back onto the queue. If the value is any other error, 3209 * then an error ack is generated by the service procedure. 3210 */ 3211 static int 3212 gld_cmds(queue_t *q, mblk_t *mp) 3213 { 3214 union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr; 3215 gld_t *gld = (gld_t *)(q->q_ptr); 3216 int result = DL_BADPRIM; 3217 int mblkl = MBLKL(mp); 3218 t_uscalar_t dlreq; 3219 3220 /* Make sure we have at least dlp->dl_primitive */ 3221 if (mblkl < sizeof (dlp->dl_primitive)) 3222 return (DL_BADPRIM); 3223 3224 dlreq = dlp->dl_primitive; 3225 #ifdef GLD_DEBUG 3226 if (gld_debug & GLDTRACE) 3227 cmn_err(CE_NOTE, 3228 "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d", 3229 (void *)q, (void *)mp, (void *)dlp, dlreq); 3230 #endif 3231 3232 switch (dlreq) { 3233 case DL_UDQOS_REQ: 3234 if (mblkl < DL_UDQOS_REQ_SIZE) 3235 break; 3236 result = gld_udqos(q, mp); 3237 break; 3238 3239 case DL_BIND_REQ: 3240 if (mblkl < DL_BIND_REQ_SIZE) 3241 break; 3242 result = gld_bind(q, mp); 3243 break; 3244 3245 case DL_UNBIND_REQ: 3246 if (mblkl < DL_UNBIND_REQ_SIZE) 3247 break; 3248 result = gld_unbind(q, mp); 3249 break; 3250 3251 case DL_UNITDATA_REQ: 3252 if (mblkl < DL_UNITDATA_REQ_SIZE) 3253 break; 3254 result = gld_unitdata(q, mp); 3255 break; 3256 3257 case DL_INFO_REQ: 3258 if (mblkl < DL_INFO_REQ_SIZE) 3259 break; 3260 result = gld_inforeq(q, mp); 3261 break; 3262 3263 case DL_ATTACH_REQ: 3264 if (mblkl < DL_ATTACH_REQ_SIZE) 3265 break; 3266 if (gld->gld_style == DL_STYLE2) 3267 result = gldattach(q, mp); 3268 else 3269 result = DL_NOTSUPPORTED; 3270 break; 3271 3272 case DL_DETACH_REQ: 3273 if (mblkl < DL_DETACH_REQ_SIZE) 3274 break; 3275 if (gld->gld_style == DL_STYLE2) 3276 result = gldunattach(q, mp); 3277 else 3278 result = DL_NOTSUPPORTED; 3279 break; 3280 3281 case DL_ENABMULTI_REQ: 3282 if (mblkl < DL_ENABMULTI_REQ_SIZE) 3283 break; 3284 result = gld_enable_multi(q, mp); 3285 break; 3286 3287 case DL_DISABMULTI_REQ: 3288 if (mblkl < DL_DISABMULTI_REQ_SIZE) 3289 break; 3290 result = gld_disable_multi(q, mp); 3291 break; 3292 3293 case DL_PHYS_ADDR_REQ: 3294 if (mblkl < DL_PHYS_ADDR_REQ_SIZE) 3295 break; 3296 result = gld_physaddr(q, mp); 3297 break; 3298 3299 case DL_SET_PHYS_ADDR_REQ: 3300 if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE) 3301 break; 3302 result = gld_setaddr(q, mp); 3303 break; 3304 3305 case DL_PROMISCON_REQ: 3306 if (mblkl < DL_PROMISCON_REQ_SIZE) 3307 break; 3308 result = gld_promisc(q, mp, dlreq, B_TRUE); 3309 break; 3310 3311 case DL_PROMISCOFF_REQ: 3312 if (mblkl < DL_PROMISCOFF_REQ_SIZE) 3313 break; 3314 result = gld_promisc(q, mp, dlreq, B_FALSE); 3315 break; 3316 3317 case DL_GET_STATISTICS_REQ: 3318 if (mblkl < DL_GET_STATISTICS_REQ_SIZE) 3319 break; 3320 result = gld_get_statistics(q, mp); 3321 break; 3322 3323 case DL_CAPABILITY_REQ: 3324 if (mblkl < DL_CAPABILITY_REQ_SIZE) 3325 break; 3326 result = gld_cap(q, mp); 3327 break; 3328 3329 case DL_NOTIFY_REQ: 3330 if (mblkl < DL_NOTIFY_REQ_SIZE) 3331 break; 3332 result = gld_notify_req(q, mp); 3333 break; 3334 3335 case DL_XID_REQ: 3336 case DL_XID_RES: 3337 case DL_TEST_REQ: 3338 case DL_TEST_RES: 3339 case DL_CONTROL_REQ: 3340 result = DL_NOTSUPPORTED; 3341 break; 3342 3343 default: 3344 #ifdef GLD_DEBUG 3345 if (gld_debug & GLDERRS) 3346 cmn_err(CE_WARN, 3347 "gld_cmds: unknown M_PROTO message: %d", 3348 dlreq); 3349 #endif 3350 result = DL_BADPRIM; 3351 } 3352 3353 return (result); 3354 } 3355 3356 static int 3357 gld_cap(queue_t *q, mblk_t *mp) 3358 { 3359 gld_t *gld = (gld_t *)q->q_ptr; 3360 dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr; 3361 3362 if (gld->gld_state == DL_UNATTACHED) 3363 return (DL_OUTSTATE); 3364 3365 if (dlp->dl_sub_length == 0) 3366 return (gld_cap_ack(q, mp)); 3367 3368 return (gld_cap_enable(q, mp)); 3369 } 3370 3371 static int 3372 gld_cap_ack(queue_t *q, mblk_t *mp) 3373 { 3374 gld_t *gld = (gld_t *)q->q_ptr; 3375 gld_mac_info_t *macinfo = gld->gld_mac_info; 3376 gld_interface_t *ifp; 3377 dl_capability_ack_t *dlap; 3378 dl_capability_sub_t *dlsp; 3379 size_t size = sizeof (dl_capability_ack_t); 3380 size_t subsize = 0; 3381 3382 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 3383 3384 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) 3385 subsize += sizeof (dl_capability_sub_t) + 3386 sizeof (dl_capab_hcksum_t); 3387 if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) 3388 subsize += sizeof (dl_capability_sub_t) + 3389 sizeof (dl_capab_zerocopy_t); 3390 if (macinfo->gldm_options & GLDOPT_MDT) 3391 subsize += (sizeof (dl_capability_sub_t) + 3392 sizeof (dl_capab_mdt_t)); 3393 3394 if ((mp = mexchange(q, mp, size + subsize, M_PROTO, 3395 DL_CAPABILITY_ACK)) == NULL) 3396 return (GLDE_OK); 3397 3398 dlap = (dl_capability_ack_t *)mp->b_rptr; 3399 dlap->dl_sub_offset = 0; 3400 if ((dlap->dl_sub_length = subsize) != 0) 3401 dlap->dl_sub_offset = sizeof (dl_capability_ack_t); 3402 dlsp = (dl_capability_sub_t *)&dlap[1]; 3403 3404 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) { 3405 dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1]; 3406 3407 dlsp->dl_cap = DL_CAPAB_HCKSUM; 3408 dlsp->dl_length = sizeof (dl_capab_hcksum_t); 3409 3410 dlhp->hcksum_version = HCKSUM_VERSION_1; 3411 3412 dlhp->hcksum_txflags = 0; 3413 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL) 3414 dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL; 3415 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4) 3416 dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4; 3417 if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR) 3418 dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM; 3419 3420 dlcapabsetqid(&(dlhp->hcksum_mid), RD(q)); 3421 dlsp = (dl_capability_sub_t *)&dlhp[1]; 3422 } 3423 3424 if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) { 3425 dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1]; 3426 3427 dlsp->dl_cap = DL_CAPAB_ZEROCOPY; 3428 dlsp->dl_length = sizeof (dl_capab_zerocopy_t); 3429 dlzp->zerocopy_version = ZEROCOPY_VERSION_1; 3430 dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM; 3431 3432 dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q)); 3433 dlsp = (dl_capability_sub_t *)&dlzp[1]; 3434 } 3435 3436 if (macinfo->gldm_options & GLDOPT_MDT) { 3437 dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1]; 3438 3439 dlsp->dl_cap = DL_CAPAB_MDT; 3440 dlsp->dl_length = sizeof (dl_capab_mdt_t); 3441 3442 dlmp->mdt_version = MDT_VERSION_2; 3443 dlmp->mdt_max_pld = macinfo->gldm_mdt_segs; 3444 dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl; 3445 dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q)); 3446 dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE; 3447 dlmp->mdt_hdr_head = ifp->hdr_size; 3448 dlmp->mdt_hdr_tail = 0; 3449 } 3450 3451 qreply(q, mp); 3452 return (GLDE_OK); 3453 } 3454 3455 static int 3456 gld_cap_enable(queue_t *q, mblk_t *mp) 3457 { 3458 dl_capability_req_t *dlp; 3459 dl_capability_sub_t *dlsp; 3460 dl_capab_hcksum_t *dlhp; 3461 offset_t off; 3462 size_t len; 3463 size_t size; 3464 offset_t end; 3465 3466 dlp = (dl_capability_req_t *)mp->b_rptr; 3467 dlp->dl_primitive = DL_CAPABILITY_ACK; 3468 3469 off = dlp->dl_sub_offset; 3470 len = dlp->dl_sub_length; 3471 3472 if (!MBLKIN(mp, off, len)) 3473 return (DL_BADPRIM); 3474 3475 end = off + len; 3476 while (off < end) { 3477 dlsp = (dl_capability_sub_t *)(mp->b_rptr + off); 3478 size = sizeof (dl_capability_sub_t) + dlsp->dl_length; 3479 if (off + size > end) 3480 return (DL_BADPRIM); 3481 3482 switch (dlsp->dl_cap) { 3483 case DL_CAPAB_HCKSUM: 3484 dlhp = (dl_capab_hcksum_t *)&dlsp[1]; 3485 /* nothing useful we can do with the contents */ 3486 dlcapabsetqid(&(dlhp->hcksum_mid), RD(q)); 3487 break; 3488 default: 3489 break; 3490 } 3491 3492 off += size; 3493 } 3494 3495 qreply(q, mp); 3496 return (GLDE_OK); 3497 } 3498 3499 /* 3500 * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has 3501 * requested the specific <notification> that the message carries AND is 3502 * eligible and ready to receive the notification immediately. 3503 * 3504 * This routine ignores flow control. Notifications will be sent regardless. 3505 * 3506 * In all cases, the original message passed in is freed at the end of 3507 * the routine. 3508 */ 3509 static void 3510 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification) 3511 { 3512 gld_mac_pvt_t *mac_pvt; 3513 gld_vlan_t *vlan; 3514 gld_t *gld; 3515 mblk_t *nmp; 3516 int i; 3517 3518 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 3519 3520 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3521 3522 /* 3523 * Search all the streams attached to this macinfo looking 3524 * for those eligible to receive the present notification. 3525 */ 3526 for (i = 0; i < VLAN_HASHSZ; i++) { 3527 for (vlan = mac_pvt->vlan_hash[i]; 3528 vlan != NULL; vlan = vlan->gldv_next) { 3529 for (gld = vlan->gldv_str_next; 3530 gld != (gld_t *)&vlan->gldv_str_next; 3531 gld = gld->gld_next) { 3532 ASSERT(gld->gld_qptr != NULL); 3533 ASSERT(gld->gld_state == DL_IDLE || 3534 gld->gld_state == DL_UNBOUND); 3535 ASSERT(gld->gld_mac_info == macinfo); 3536 3537 if (gld->gld_flags & GLD_STR_CLOSING) 3538 continue; /* not eligible - skip */ 3539 if (!(notification & gld->gld_notifications)) 3540 continue; /* not wanted - skip */ 3541 if ((nmp = dupmsg(mp)) == NULL) 3542 continue; /* can't copy - skip */ 3543 3544 /* 3545 * All OK; send dup'd notification up this 3546 * stream 3547 */ 3548 qreply(WR(gld->gld_qptr), nmp); 3549 } 3550 } 3551 } 3552 3553 /* 3554 * Drop the original message block now 3555 */ 3556 freemsg(mp); 3557 } 3558 3559 /* 3560 * For each (understood) bit in the <notifications> argument, contruct 3561 * a DL_NOTIFY_IND message and send it to the specified <q>, or to all 3562 * eligible queues if <q> is NULL. 3563 */ 3564 static void 3565 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q) 3566 { 3567 gld_mac_pvt_t *mac_pvt; 3568 dl_notify_ind_t *dlnip; 3569 struct gld_stats *stats; 3570 mblk_t *mp; 3571 size_t size; 3572 uint32_t bit; 3573 3574 GLDM_LOCK(macinfo, RW_WRITER); 3575 3576 /* 3577 * The following cases shouldn't happen, but just in case the 3578 * MAC driver calls gld_linkstate() at an inappropriate time, we 3579 * check anyway ... 3580 */ 3581 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) { 3582 GLDM_UNLOCK(macinfo); 3583 return; /* not ready yet */ 3584 } 3585 3586 if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) { 3587 GLDM_UNLOCK(macinfo); 3588 return; /* not ready anymore */ 3589 } 3590 3591 /* 3592 * Make sure the kstats are up to date, 'cos we use some of 3593 * the kstat values below, specifically the link speed ... 3594 */ 3595 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3596 stats = mac_pvt->statistics; 3597 if (macinfo->gldm_get_stats) 3598 (void) (*macinfo->gldm_get_stats)(macinfo, stats); 3599 3600 for (bit = 1; notifications != 0; bit <<= 1) { 3601 if ((notifications & bit) == 0) 3602 continue; 3603 notifications &= ~bit; 3604 3605 size = DL_NOTIFY_IND_SIZE; 3606 if (bit == DL_NOTE_PHYS_ADDR) 3607 size += macinfo->gldm_addrlen; 3608 if ((mp = allocb(size, BPRI_MED)) == NULL) 3609 continue; 3610 3611 mp->b_datap->db_type = M_PROTO; 3612 mp->b_wptr = mp->b_rptr + size; 3613 dlnip = (dl_notify_ind_t *)mp->b_rptr; 3614 dlnip->dl_primitive = DL_NOTIFY_IND; 3615 dlnip->dl_notification = 0; 3616 dlnip->dl_data = 0; 3617 dlnip->dl_addr_length = 0; 3618 dlnip->dl_addr_offset = 0; 3619 3620 switch (bit) { 3621 case DL_NOTE_PROMISC_ON_PHYS: 3622 case DL_NOTE_PROMISC_OFF_PHYS: 3623 if (mac_pvt->nprom != 0) 3624 dlnip->dl_notification = bit; 3625 break; 3626 3627 case DL_NOTE_LINK_DOWN: 3628 if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN) 3629 dlnip->dl_notification = bit; 3630 break; 3631 3632 case DL_NOTE_LINK_UP: 3633 if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP) 3634 dlnip->dl_notification = bit; 3635 break; 3636 3637 case DL_NOTE_SPEED: 3638 /* 3639 * Conversion required here: 3640 * GLD keeps the speed in bit/s in a uint64 3641 * DLPI wants it in kb/s in a uint32 3642 * Fortunately this is still big enough for 10Gb/s! 3643 */ 3644 dlnip->dl_notification = bit; 3645 dlnip->dl_data = stats->glds_speed/1000ULL; 3646 break; 3647 3648 case DL_NOTE_PHYS_ADDR: 3649 dlnip->dl_notification = bit; 3650 dlnip->dl_data = DL_CURR_PHYS_ADDR; 3651 dlnip->dl_addr_offset = sizeof (dl_notify_ind_t); 3652 dlnip->dl_addr_length = macinfo->gldm_addrlen + 3653 abs(macinfo->gldm_saplen); 3654 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3655 mac_copy(mac_pvt->curr_macaddr, 3656 mp->b_rptr + sizeof (dl_notify_ind_t), 3657 macinfo->gldm_addrlen); 3658 break; 3659 3660 default: 3661 break; 3662 } 3663 3664 if (dlnip->dl_notification == 0) 3665 freemsg(mp); 3666 else if (q != NULL) 3667 qreply(q, mp); 3668 else 3669 gld_notify_qs(macinfo, mp, bit); 3670 } 3671 3672 GLDM_UNLOCK(macinfo); 3673 } 3674 3675 /* 3676 * gld_notify_req - handle a DL_NOTIFY_REQ message 3677 */ 3678 static int 3679 gld_notify_req(queue_t *q, mblk_t *mp) 3680 { 3681 gld_t *gld = (gld_t *)q->q_ptr; 3682 gld_mac_info_t *macinfo; 3683 gld_mac_pvt_t *pvt; 3684 dl_notify_req_t *dlnrp; 3685 dl_notify_ack_t *dlnap; 3686 3687 ASSERT(gld != NULL); 3688 ASSERT(gld->gld_qptr == RD(q)); 3689 3690 dlnrp = (dl_notify_req_t *)mp->b_rptr; 3691 3692 #ifdef GLD_DEBUG 3693 if (gld_debug & GLDTRACE) 3694 cmn_err(CE_NOTE, "gld_notify_req(%p %p)", 3695 (void *)q, (void *)mp); 3696 #endif 3697 3698 if (gld->gld_state == DL_UNATTACHED) { 3699 #ifdef GLD_DEBUG 3700 if (gld_debug & GLDERRS) 3701 cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)", 3702 gld->gld_state); 3703 #endif 3704 return (DL_OUTSTATE); 3705 } 3706 3707 /* 3708 * Remember what notifications are required by this stream 3709 */ 3710 macinfo = gld->gld_mac_info; 3711 pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3712 3713 gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications; 3714 3715 /* 3716 * The return DL_NOTIFY_ACK carries the bitset of notifications 3717 * that this driver can provide, independently of which ones have 3718 * previously been or are now being requested. 3719 */ 3720 if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO, 3721 DL_NOTIFY_ACK)) == NULL) 3722 return (DL_SYSERR); 3723 3724 dlnap = (dl_notify_ack_t *)mp->b_rptr; 3725 dlnap->dl_notifications = pvt->notifications; 3726 qreply(q, mp); 3727 3728 /* 3729 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK 3730 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages 3731 * that provide the current status. 3732 */ 3733 gld_notify_ind(macinfo, gld->gld_notifications, q); 3734 3735 return (GLDE_OK); 3736 } 3737 3738 /* 3739 * gld_linkstate() 3740 * Called by driver to tell GLD the state of the physical link. 3741 * As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN 3742 * notification to each client that has previously requested such 3743 * notifications 3744 */ 3745 void 3746 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate) 3747 { 3748 uint32_t notification; 3749 3750 switch (newstate) { 3751 default: 3752 return; 3753 3754 case GLD_LINKSTATE_DOWN: 3755 notification = DL_NOTE_LINK_DOWN; 3756 break; 3757 3758 case GLD_LINKSTATE_UP: 3759 notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED; 3760 break; 3761 3762 case GLD_LINKSTATE_UNKNOWN: 3763 notification = 0; 3764 break; 3765 } 3766 3767 GLDM_LOCK(macinfo, RW_WRITER); 3768 if (macinfo->gldm_linkstate == newstate) 3769 notification = 0; 3770 else 3771 macinfo->gldm_linkstate = newstate; 3772 GLDM_UNLOCK(macinfo); 3773 3774 if (notification) 3775 gld_notify_ind(macinfo, notification, NULL); 3776 } 3777 3778 /* 3779 * gld_udqos - set the current QoS parameters (priority only at the moment). 3780 */ 3781 static int 3782 gld_udqos(queue_t *q, mblk_t *mp) 3783 { 3784 dl_udqos_req_t *dlp; 3785 gld_t *gld = (gld_t *)q->q_ptr; 3786 int off; 3787 int len; 3788 dl_qos_cl_sel1_t *selp; 3789 3790 ASSERT(gld); 3791 ASSERT(gld->gld_qptr == RD(q)); 3792 3793 #ifdef GLD_DEBUG 3794 if (gld_debug & GLDTRACE) 3795 cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp); 3796 #endif 3797 3798 if (gld->gld_state != DL_IDLE) { 3799 #ifdef GLD_DEBUG 3800 if (gld_debug & GLDERRS) 3801 cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)", 3802 gld->gld_state); 3803 #endif 3804 return (DL_OUTSTATE); 3805 } 3806 3807 dlp = (dl_udqos_req_t *)mp->b_rptr; 3808 off = dlp->dl_qos_offset; 3809 len = dlp->dl_qos_length; 3810 3811 if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len)) 3812 return (DL_BADQOSTYPE); 3813 3814 selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off); 3815 if (selp->dl_qos_type != DL_QOS_CL_SEL1) 3816 return (DL_BADQOSTYPE); 3817 3818 if (selp->dl_trans_delay != 0 && 3819 selp->dl_trans_delay != DL_QOS_DONT_CARE) 3820 return (DL_BADQOSPARAM); 3821 if (selp->dl_protection != 0 && 3822 selp->dl_protection != DL_QOS_DONT_CARE) 3823 return (DL_BADQOSPARAM); 3824 if (selp->dl_residual_error != 0 && 3825 selp->dl_residual_error != DL_QOS_DONT_CARE) 3826 return (DL_BADQOSPARAM); 3827 if (selp->dl_priority < 0 || selp->dl_priority > 7) 3828 return (DL_BADQOSPARAM); 3829 3830 gld->gld_upri = selp->dl_priority; 3831 3832 dlokack(q, mp, DL_UDQOS_REQ); 3833 return (GLDE_OK); 3834 } 3835 3836 static mblk_t * 3837 gld_bindack(queue_t *q, mblk_t *mp) 3838 { 3839 gld_t *gld = (gld_t *)q->q_ptr; 3840 gld_mac_info_t *macinfo = gld->gld_mac_info; 3841 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 3842 dl_bind_ack_t *dlp; 3843 size_t size; 3844 t_uscalar_t addrlen; 3845 uchar_t *sapp; 3846 3847 addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen); 3848 size = sizeof (dl_bind_ack_t) + addrlen; 3849 if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL) 3850 return (NULL); 3851 3852 dlp = (dl_bind_ack_t *)mp->b_rptr; 3853 dlp->dl_sap = gld->gld_sap; 3854 dlp->dl_addr_length = addrlen; 3855 dlp->dl_addr_offset = sizeof (dl_bind_ack_t); 3856 dlp->dl_max_conind = 0; 3857 dlp->dl_xidtest_flg = 0; 3858 3859 mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1], 3860 macinfo->gldm_addrlen); 3861 sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen; 3862 *(ushort_t *)sapp = gld->gld_sap; 3863 3864 return (mp); 3865 } 3866 3867 /* 3868 * gld_bind - determine if a SAP is already allocated and whether it is legal 3869 * to do the bind at this time 3870 */ 3871 static int 3872 gld_bind(queue_t *q, mblk_t *mp) 3873 { 3874 ulong_t sap; 3875 dl_bind_req_t *dlp; 3876 gld_t *gld = (gld_t *)q->q_ptr; 3877 gld_mac_info_t *macinfo = gld->gld_mac_info; 3878 3879 ASSERT(gld); 3880 ASSERT(gld->gld_qptr == RD(q)); 3881 3882 #ifdef GLD_DEBUG 3883 if (gld_debug & GLDTRACE) 3884 cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp); 3885 #endif 3886 3887 dlp = (dl_bind_req_t *)mp->b_rptr; 3888 sap = dlp->dl_sap; 3889 3890 #ifdef GLD_DEBUG 3891 if (gld_debug & GLDPROT) 3892 cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap); 3893 #endif 3894 3895 if (gld->gld_state != DL_UNBOUND) { 3896 #ifdef GLD_DEBUG 3897 if (gld_debug & GLDERRS) 3898 cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)", 3899 gld->gld_state); 3900 #endif 3901 return (DL_OUTSTATE); 3902 } 3903 ASSERT(macinfo); 3904 3905 if (dlp->dl_service_mode != DL_CLDLS) { 3906 return (DL_UNSUPPORTED); 3907 } 3908 if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) { 3909 return (DL_NOAUTO); 3910 } 3911 3912 /* 3913 * Check sap validity and decide whether this stream accepts 3914 * IEEE 802.2 (LLC) packets. 3915 */ 3916 if (sap > ETHERTYPE_MAX) 3917 return (DL_BADSAP); 3918 3919 /* 3920 * Decide whether the SAP value selects EtherType encoding/decoding. 3921 * For compatibility with monolithic ethernet drivers, the range of 3922 * SAP values is different for DL_ETHER media. 3923 */ 3924 switch (macinfo->gldm_type) { 3925 case DL_ETHER: 3926 gld->gld_ethertype = (sap > ETHERMTU); 3927 break; 3928 default: 3929 gld->gld_ethertype = (sap > GLD_MAX_802_SAP); 3930 break; 3931 } 3932 3933 /* if we get to here, then the SAP is legal enough */ 3934 GLDM_LOCK(macinfo, RW_WRITER); 3935 gld->gld_state = DL_IDLE; /* bound and ready */ 3936 gld->gld_sap = sap; 3937 gld_set_ipq(gld); 3938 3939 #ifdef GLD_DEBUG 3940 if (gld_debug & GLDPROT) 3941 cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap); 3942 #endif 3943 3944 /* ACK the BIND */ 3945 mp = gld_bindack(q, mp); 3946 GLDM_UNLOCK(macinfo); 3947 3948 if (mp != NULL) { 3949 qreply(q, mp); 3950 return (GLDE_OK); 3951 } 3952 3953 return (DL_SYSERR); 3954 } 3955 3956 /* 3957 * gld_unbind - perform an unbind of an LSAP or ether type on the stream. 3958 * The stream is still open and can be re-bound. 3959 */ 3960 static int 3961 gld_unbind(queue_t *q, mblk_t *mp) 3962 { 3963 gld_t *gld = (gld_t *)q->q_ptr; 3964 gld_mac_info_t *macinfo = gld->gld_mac_info; 3965 3966 ASSERT(gld); 3967 3968 #ifdef GLD_DEBUG 3969 if (gld_debug & GLDTRACE) 3970 cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp); 3971 #endif 3972 3973 if (gld->gld_state != DL_IDLE) { 3974 #ifdef GLD_DEBUG 3975 if (gld_debug & GLDERRS) 3976 cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)", 3977 gld->gld_state); 3978 #endif 3979 return (DL_OUTSTATE); 3980 } 3981 ASSERT(macinfo); 3982 3983 /* 3984 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput. 3985 * See comments above gld_start(). 3986 */ 3987 gld->gld_in_unbind = B_TRUE; /* disallow wput=>start */ 3988 membar_enter(); 3989 if (gld->gld_wput_count != 0) { 3990 gld->gld_in_unbind = B_FALSE; 3991 ASSERT(mp); /* we didn't come from close */ 3992 #ifdef GLD_DEBUG 3993 if (gld_debug & GLDETRACE) 3994 cmn_err(CE_NOTE, "gld_unbind: defer for wput"); 3995 #endif 3996 (void) putbq(q, mp); 3997 qenable(q); /* try again soon */ 3998 return (GLDE_RETRY); 3999 } 4000 4001 GLDM_LOCK(macinfo, RW_WRITER); 4002 gld->gld_state = DL_UNBOUND; 4003 gld->gld_sap = 0; 4004 gld_set_ipq(gld); 4005 GLDM_UNLOCK(macinfo); 4006 4007 membar_exit(); 4008 gld->gld_in_unbind = B_FALSE; 4009 4010 /* mp is NULL if we came from close */ 4011 if (mp) { 4012 gld_flushqueue(q); /* flush the queues */ 4013 dlokack(q, mp, DL_UNBIND_REQ); 4014 } 4015 return (GLDE_OK); 4016 } 4017 4018 /* 4019 * gld_inforeq - generate the response to an info request 4020 */ 4021 static int 4022 gld_inforeq(queue_t *q, mblk_t *mp) 4023 { 4024 gld_t *gld; 4025 dl_info_ack_t *dlp; 4026 int bufsize; 4027 glddev_t *glddev; 4028 gld_mac_info_t *macinfo; 4029 gld_mac_pvt_t *mac_pvt; 4030 int sel_offset = 0; 4031 int range_offset = 0; 4032 int addr_offset; 4033 int addr_length; 4034 int sap_length; 4035 int brdcst_offset; 4036 int brdcst_length; 4037 gld_vlan_t *vlan; 4038 uchar_t *sapp; 4039 4040 #ifdef GLD_DEBUG 4041 if (gld_debug & GLDTRACE) 4042 cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp); 4043 #endif 4044 gld = (gld_t *)q->q_ptr; 4045 ASSERT(gld); 4046 glddev = gld->gld_device; 4047 ASSERT(glddev); 4048 4049 if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) { 4050 macinfo = gld->gld_mac_info; 4051 ASSERT(macinfo != NULL); 4052 4053 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4054 4055 addr_length = macinfo->gldm_addrlen; 4056 sap_length = macinfo->gldm_saplen; 4057 brdcst_length = macinfo->gldm_addrlen; 4058 } else { 4059 addr_length = glddev->gld_addrlen; 4060 sap_length = glddev->gld_saplen; 4061 brdcst_length = glddev->gld_addrlen; 4062 } 4063 4064 bufsize = sizeof (dl_info_ack_t); 4065 4066 addr_offset = bufsize; 4067 bufsize += addr_length; 4068 bufsize += abs(sap_length); 4069 4070 brdcst_offset = bufsize; 4071 bufsize += brdcst_length; 4072 4073 if ((vlan = (gld_vlan_t *)gld->gld_vlan) != NULL && 4074 vlan->gldv_id != VLAN_VID_NONE) { 4075 sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t)); 4076 bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t); 4077 4078 range_offset = P2ROUNDUP(bufsize, sizeof (int64_t)); 4079 bufsize = range_offset + sizeof (dl_qos_cl_range1_t); 4080 } 4081 4082 if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL) 4083 return (GLDE_OK); /* nothing more to be done */ 4084 4085 bzero(mp->b_rptr, bufsize); 4086 4087 dlp = (dl_info_ack_t *)mp->b_rptr; 4088 dlp->dl_primitive = DL_INFO_ACK; 4089 dlp->dl_version = DL_VERSION_2; 4090 dlp->dl_service_mode = DL_CLDLS; 4091 dlp->dl_current_state = gld->gld_state; 4092 dlp->dl_provider_style = gld->gld_style; 4093 4094 if (sel_offset != 0) { 4095 dl_qos_cl_sel1_t *selp; 4096 dl_qos_cl_range1_t *rangep; 4097 4098 ASSERT(range_offset != 0); 4099 4100 dlp->dl_qos_offset = sel_offset; 4101 dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t); 4102 dlp->dl_qos_range_offset = range_offset; 4103 dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t); 4104 4105 selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset); 4106 selp->dl_qos_type = DL_QOS_CL_SEL1; 4107 selp->dl_priority = gld->gld_upri; 4108 4109 rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset); 4110 rangep->dl_qos_type = DL_QOS_CL_RANGE1; 4111 rangep->dl_priority.dl_min = 0; 4112 rangep->dl_priority.dl_max = 7; 4113 } 4114 4115 if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) { 4116 dlp->dl_min_sdu = macinfo->gldm_minpkt; 4117 dlp->dl_max_sdu = macinfo->gldm_maxpkt; 4118 dlp->dl_mac_type = macinfo->gldm_type; 4119 dlp->dl_addr_length = addr_length + abs(sap_length); 4120 dlp->dl_sap_length = sap_length; 4121 4122 if (gld->gld_state == DL_IDLE) { 4123 /* 4124 * If we are bound to a non-LLC SAP on any medium 4125 * other than Ethernet, then we need room for a 4126 * SNAP header. So we have to adjust the MTU size 4127 * accordingly. XXX I suppose this should be done 4128 * in gldutil.c, but it seems likely that this will 4129 * always be true for everything GLD supports but 4130 * Ethernet. Check this if you add another medium. 4131 */ 4132 if ((macinfo->gldm_type == DL_TPR || 4133 macinfo->gldm_type == DL_FDDI) && 4134 gld->gld_ethertype) 4135 dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN; 4136 4137 /* copy macaddr and sap */ 4138 dlp->dl_addr_offset = addr_offset; 4139 4140 mac_copy(mac_pvt->curr_macaddr, mp->b_rptr + 4141 addr_offset, macinfo->gldm_addrlen); 4142 sapp = mp->b_rptr + addr_offset + 4143 macinfo->gldm_addrlen; 4144 *(ushort_t *)sapp = gld->gld_sap; 4145 } else { 4146 dlp->dl_addr_offset = 0; 4147 } 4148 4149 /* copy broadcast addr */ 4150 dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen; 4151 dlp->dl_brdcst_addr_offset = brdcst_offset; 4152 mac_copy((caddr_t)macinfo->gldm_broadcast_addr, 4153 mp->b_rptr + brdcst_offset, brdcst_length); 4154 } else { 4155 /* 4156 * No PPA is attached. 4157 * The best we can do is use the values provided 4158 * by the first mac that called gld_register. 4159 */ 4160 dlp->dl_min_sdu = glddev->gld_minsdu; 4161 dlp->dl_max_sdu = glddev->gld_maxsdu; 4162 dlp->dl_mac_type = glddev->gld_type; 4163 dlp->dl_addr_length = addr_length + abs(sap_length); 4164 dlp->dl_sap_length = sap_length; 4165 dlp->dl_addr_offset = 0; 4166 dlp->dl_brdcst_addr_offset = brdcst_offset; 4167 dlp->dl_brdcst_addr_length = brdcst_length; 4168 mac_copy((caddr_t)glddev->gld_broadcast, 4169 mp->b_rptr + brdcst_offset, brdcst_length); 4170 } 4171 qreply(q, mp); 4172 return (GLDE_OK); 4173 } 4174 4175 /* 4176 * gld_unitdata (q, mp) 4177 * send a datagram. Destination address/lsap is in M_PROTO 4178 * message (first mblock), data is in remainder of message. 4179 * 4180 */ 4181 static int 4182 gld_unitdata(queue_t *q, mblk_t *mp) 4183 { 4184 gld_t *gld = (gld_t *)q->q_ptr; 4185 dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr; 4186 gld_mac_info_t *macinfo = gld->gld_mac_info; 4187 size_t msglen; 4188 mblk_t *nmp; 4189 gld_interface_t *ifp; 4190 uint32_t start; 4191 uint32_t stuff; 4192 uint32_t end; 4193 uint32_t value; 4194 uint32_t flags; 4195 uint32_t upri; 4196 4197 #ifdef GLD_DEBUG 4198 if (gld_debug & GLDTRACE) 4199 cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp); 4200 #endif 4201 4202 if (gld->gld_state != DL_IDLE) { 4203 #ifdef GLD_DEBUG 4204 if (gld_debug & GLDERRS) 4205 cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)", 4206 gld->gld_state); 4207 #endif 4208 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4209 dlp->dl_dest_addr_length, DL_OUTSTATE, 0); 4210 return (GLDE_OK); 4211 } 4212 ASSERT(macinfo != NULL); 4213 4214 if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) || 4215 dlp->dl_dest_addr_length != 4216 macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) { 4217 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4218 dlp->dl_dest_addr_length, DL_BADADDR, 0); 4219 return (GLDE_OK); 4220 } 4221 4222 upri = dlp->dl_priority.dl_max; 4223 4224 msglen = msgdsize(mp); 4225 if (msglen == 0 || msglen > macinfo->gldm_maxpkt) { 4226 #ifdef GLD_DEBUG 4227 if (gld_debug & GLDERRS) 4228 cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)", 4229 (int)msglen); 4230 #endif 4231 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4232 dlp->dl_dest_addr_length, DL_BADDATA, 0); 4233 return (GLDE_OK); 4234 } 4235 4236 ASSERT(mp->b_cont != NULL); /* because msgdsize(mp) is nonzero */ 4237 4238 ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep; 4239 4240 /* grab any checksum information that may be present */ 4241 hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end, 4242 &value, &flags); 4243 4244 /* 4245 * Prepend a valid header for transmission 4246 */ 4247 if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) { 4248 #ifdef GLD_DEBUG 4249 if (gld_debug & GLDERRS) 4250 cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed."); 4251 #endif 4252 dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset, 4253 dlp->dl_dest_addr_length, DL_SYSERR, ENOSR); 4254 return (GLDE_OK); 4255 } 4256 4257 /* apply any checksum information to the first block in the chain */ 4258 (void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value, 4259 flags, 0); 4260 4261 if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) { 4262 qenable(q); 4263 return (GLDE_RETRY); 4264 } 4265 4266 return (GLDE_OK); 4267 } 4268 4269 /* 4270 * gldattach(q, mp) 4271 * DLPI DL_ATTACH_REQ 4272 * this attaches the stream to a PPA 4273 */ 4274 static int 4275 gldattach(queue_t *q, mblk_t *mp) 4276 { 4277 dl_attach_req_t *at; 4278 gld_mac_info_t *macinfo; 4279 gld_t *gld = (gld_t *)q->q_ptr; 4280 glddev_t *glddev; 4281 gld_mac_pvt_t *mac_pvt; 4282 uint32_t ppa; 4283 uint32_t vid; 4284 gld_vlan_t *vlan; 4285 4286 at = (dl_attach_req_t *)mp->b_rptr; 4287 4288 if (gld->gld_state != DL_UNATTACHED) 4289 return (DL_OUTSTATE); 4290 4291 ASSERT(!gld->gld_mac_info); 4292 4293 ppa = at->dl_ppa % GLD_VLAN_SCALE; /* 0 .. 999 */ 4294 vid = at->dl_ppa / GLD_VLAN_SCALE; /* 0 .. 4094 */ 4295 if (vid > VLAN_VID_MAX) 4296 return (DL_BADPPA); 4297 4298 glddev = gld->gld_device; 4299 mutex_enter(&glddev->gld_devlock); 4300 for (macinfo = glddev->gld_mac_next; 4301 macinfo != (gld_mac_info_t *)&glddev->gld_mac_next; 4302 macinfo = macinfo->gldm_next) { 4303 int inst; 4304 4305 ASSERT(macinfo != NULL); 4306 if (macinfo->gldm_ppa != ppa) 4307 continue; 4308 4309 if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) 4310 continue; /* this one's not ready yet */ 4311 4312 /* 4313 * VLAN sanity check 4314 */ 4315 if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) { 4316 mutex_exit(&glddev->gld_devlock); 4317 return (DL_BADPPA); 4318 } 4319 4320 /* 4321 * We found the correct PPA, hold the instance 4322 */ 4323 inst = ddi_get_instance(macinfo->gldm_devinfo); 4324 if (inst == -1 || qassociate(q, inst) != 0) { 4325 mutex_exit(&glddev->gld_devlock); 4326 return (DL_BADPPA); 4327 } 4328 4329 /* Take the stream off the per-driver-class list */ 4330 gldremque(gld); 4331 4332 /* 4333 * We must hold the lock to prevent multiple calls 4334 * to the reset and start routines. 4335 */ 4336 GLDM_LOCK(macinfo, RW_WRITER); 4337 4338 gld->gld_mac_info = macinfo; 4339 4340 if (macinfo->gldm_send_tagged != NULL) 4341 gld->gld_send = macinfo->gldm_send_tagged; 4342 else 4343 gld->gld_send = macinfo->gldm_send; 4344 4345 if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) { 4346 GLDM_UNLOCK(macinfo); 4347 gldinsque(gld, glddev->gld_str_prev); 4348 mutex_exit(&glddev->gld_devlock); 4349 (void) qassociate(q, -1); 4350 return (DL_BADPPA); 4351 } 4352 4353 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4354 if (!mac_pvt->started) { 4355 if (gld_start_mac(macinfo) != GLD_SUCCESS) { 4356 gld_rem_vlan(vlan); 4357 GLDM_UNLOCK(macinfo); 4358 gldinsque(gld, glddev->gld_str_prev); 4359 mutex_exit(&glddev->gld_devlock); 4360 dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR, 4361 EIO); 4362 (void) qassociate(q, -1); 4363 return (GLDE_OK); 4364 } 4365 } 4366 4367 gld->gld_vlan = vlan; 4368 vlan->gldv_nstreams++; 4369 gldinsque(gld, vlan->gldv_str_prev); 4370 gld->gld_state = DL_UNBOUND; 4371 GLDM_UNLOCK(macinfo); 4372 4373 #ifdef GLD_DEBUG 4374 if (gld_debug & GLDPROT) { 4375 cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)", 4376 (void *)q, (void *)mp, macinfo->gldm_ppa); 4377 } 4378 #endif 4379 mutex_exit(&glddev->gld_devlock); 4380 dlokack(q, mp, DL_ATTACH_REQ); 4381 return (GLDE_OK); 4382 } 4383 mutex_exit(&glddev->gld_devlock); 4384 return (DL_BADPPA); 4385 } 4386 4387 /* 4388 * gldunattach(q, mp) 4389 * DLPI DL_DETACH_REQ 4390 * detaches the mac layer from the stream 4391 */ 4392 int 4393 gldunattach(queue_t *q, mblk_t *mp) 4394 { 4395 gld_t *gld = (gld_t *)q->q_ptr; 4396 glddev_t *glddev = gld->gld_device; 4397 gld_mac_info_t *macinfo = gld->gld_mac_info; 4398 int state = gld->gld_state; 4399 int i; 4400 gld_mac_pvt_t *mac_pvt; 4401 gld_vlan_t *vlan; 4402 boolean_t phys_off; 4403 boolean_t mult_off; 4404 int op = GLD_MAC_PROMISC_NOOP; 4405 4406 if (state != DL_UNBOUND) 4407 return (DL_OUTSTATE); 4408 4409 ASSERT(macinfo != NULL); 4410 ASSERT(gld->gld_sap == 0); 4411 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4412 4413 #ifdef GLD_DEBUG 4414 if (gld_debug & GLDPROT) { 4415 cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)", 4416 (void *)q, (void *)mp, macinfo->gldm_ppa); 4417 } 4418 #endif 4419 4420 GLDM_LOCK(macinfo, RW_WRITER); 4421 4422 if (gld->gld_mcast) { 4423 for (i = 0; i < gld->gld_multicnt; i++) { 4424 gld_mcast_t *mcast; 4425 4426 if ((mcast = gld->gld_mcast[i]) != NULL) { 4427 ASSERT(mcast->gldm_refcnt); 4428 gld_send_disable_multi(macinfo, mcast); 4429 } 4430 } 4431 kmem_free(gld->gld_mcast, 4432 sizeof (gld_mcast_t *) * gld->gld_multicnt); 4433 gld->gld_mcast = NULL; 4434 gld->gld_multicnt = 0; 4435 } 4436 4437 /* decide if we need to turn off any promiscuity */ 4438 phys_off = (gld->gld_flags & GLD_PROM_PHYS && 4439 --mac_pvt->nprom == 0); 4440 mult_off = (gld->gld_flags & GLD_PROM_MULT && 4441 --mac_pvt->nprom_multi == 0); 4442 4443 gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT); 4444 4445 if (phys_off) { 4446 op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE : 4447 GLD_MAC_PROMISC_MULTI; 4448 } else if (mult_off) { 4449 op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE : 4450 GLD_MAC_PROMISC_NOOP; /* phys overrides multi */ 4451 } 4452 4453 if (op != GLD_MAC_PROMISC_NOOP) 4454 (void) (*macinfo->gldm_set_promiscuous)(macinfo, op); 4455 4456 GLDM_UNLOCK(macinfo); 4457 4458 if (phys_off) 4459 gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL); 4460 4461 /* 4462 * We need to hold both locks when modifying the mac stream list 4463 * to protect findminor as well as everyone else. 4464 */ 4465 mutex_enter(&glddev->gld_devlock); 4466 GLDM_LOCK(macinfo, RW_WRITER); 4467 4468 /* disassociate this stream with its vlan and underlying mac */ 4469 gldremque(gld); 4470 4471 vlan = (gld_vlan_t *)gld->gld_vlan; 4472 if (--vlan->gldv_nstreams == 0) { 4473 gld_rem_vlan(vlan); 4474 gld->gld_vlan = NULL; 4475 } 4476 4477 gld->gld_mac_info = NULL; 4478 gld->gld_state = DL_UNATTACHED; 4479 4480 /* cleanup mac layer if last vlan */ 4481 if (mac_pvt->nvlan == 0) { 4482 gld_stop_mac(macinfo); 4483 macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT; 4484 } 4485 4486 /* make sure no references to this gld for gld_v0_sched */ 4487 if (mac_pvt->last_sched == gld) 4488 mac_pvt->last_sched = NULL; 4489 4490 GLDM_UNLOCK(macinfo); 4491 4492 /* put the stream on the unattached Style 2 list */ 4493 gldinsque(gld, glddev->gld_str_prev); 4494 4495 mutex_exit(&glddev->gld_devlock); 4496 4497 /* There will be no mp if we were called from close */ 4498 if (mp) { 4499 dlokack(q, mp, DL_DETACH_REQ); 4500 } 4501 if (gld->gld_style == DL_STYLE2) 4502 (void) qassociate(q, -1); 4503 return (GLDE_OK); 4504 } 4505 4506 /* 4507 * gld_enable_multi (q, mp) 4508 * Enables multicast address on the stream. If the mac layer 4509 * isn't enabled for this address, enable at that level as well. 4510 */ 4511 static int 4512 gld_enable_multi(queue_t *q, mblk_t *mp) 4513 { 4514 gld_t *gld = (gld_t *)q->q_ptr; 4515 glddev_t *glddev; 4516 gld_mac_info_t *macinfo = gld->gld_mac_info; 4517 unsigned char *maddr; 4518 dl_enabmulti_req_t *multi; 4519 gld_mcast_t *mcast; 4520 int i, rc; 4521 gld_mac_pvt_t *mac_pvt; 4522 4523 #ifdef GLD_DEBUG 4524 if (gld_debug & GLDPROT) { 4525 cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q, 4526 (void *)mp); 4527 } 4528 #endif 4529 4530 if (gld->gld_state == DL_UNATTACHED) 4531 return (DL_OUTSTATE); 4532 4533 ASSERT(macinfo != NULL); 4534 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4535 4536 if (macinfo->gldm_set_multicast == NULL) { 4537 return (DL_UNSUPPORTED); 4538 } 4539 4540 multi = (dl_enabmulti_req_t *)mp->b_rptr; 4541 4542 if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) || 4543 multi->dl_addr_length != macinfo->gldm_addrlen) 4544 return (DL_BADADDR); 4545 4546 /* request appears to be valid */ 4547 4548 glddev = mac_pvt->major_dev; 4549 ASSERT(glddev == gld->gld_device); 4550 4551 maddr = mp->b_rptr + multi->dl_addr_offset; 4552 4553 /* 4554 * The multicast addresses live in a per-device table, along 4555 * with a reference count. Each stream has a table that 4556 * points to entries in the device table, with the reference 4557 * count reflecting the number of streams pointing at it. If 4558 * this multicast address is already in the per-device table, 4559 * all we have to do is point at it. 4560 */ 4561 GLDM_LOCK(macinfo, RW_WRITER); 4562 4563 /* does this address appear in current table? */ 4564 if (gld->gld_mcast == NULL) { 4565 /* no mcast addresses -- allocate table */ 4566 gld->gld_mcast = GETSTRUCT(gld_mcast_t *, 4567 glddev->gld_multisize); 4568 if (gld->gld_mcast == NULL) { 4569 GLDM_UNLOCK(macinfo); 4570 dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR); 4571 return (GLDE_OK); 4572 } 4573 gld->gld_multicnt = glddev->gld_multisize; 4574 } else { 4575 for (i = 0; i < gld->gld_multicnt; i++) { 4576 if (gld->gld_mcast[i] && 4577 mac_eq(gld->gld_mcast[i]->gldm_addr, 4578 maddr, macinfo->gldm_addrlen)) { 4579 /* this is a match -- just succeed */ 4580 ASSERT(gld->gld_mcast[i]->gldm_refcnt); 4581 GLDM_UNLOCK(macinfo); 4582 dlokack(q, mp, DL_ENABMULTI_REQ); 4583 return (GLDE_OK); 4584 } 4585 } 4586 } 4587 4588 /* 4589 * it wasn't in the stream so check to see if the mac layer has it 4590 */ 4591 mcast = NULL; 4592 if (mac_pvt->mcast_table == NULL) { 4593 mac_pvt->mcast_table = GETSTRUCT(gld_mcast_t, 4594 glddev->gld_multisize); 4595 if (mac_pvt->mcast_table == NULL) { 4596 GLDM_UNLOCK(macinfo); 4597 dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR); 4598 return (GLDE_OK); 4599 } 4600 } else { 4601 for (i = 0; i < glddev->gld_multisize; i++) { 4602 if (mac_pvt->mcast_table[i].gldm_refcnt && 4603 mac_eq(mac_pvt->mcast_table[i].gldm_addr, 4604 maddr, macinfo->gldm_addrlen)) { 4605 mcast = &mac_pvt->mcast_table[i]; 4606 break; 4607 } 4608 } 4609 } 4610 if (mcast == NULL) { 4611 /* not in mac layer -- find an empty mac slot to fill in */ 4612 for (i = 0; i < glddev->gld_multisize; i++) { 4613 if (mac_pvt->mcast_table[i].gldm_refcnt == 0) { 4614 mcast = &mac_pvt->mcast_table[i]; 4615 mac_copy(maddr, mcast->gldm_addr, 4616 macinfo->gldm_addrlen); 4617 break; 4618 } 4619 } 4620 } 4621 if (mcast == NULL) { 4622 /* couldn't get a mac layer slot */ 4623 GLDM_UNLOCK(macinfo); 4624 return (DL_TOOMANY); 4625 } 4626 4627 /* now we have a mac layer slot in mcast -- get a stream slot */ 4628 for (i = 0; i < gld->gld_multicnt; i++) { 4629 if (gld->gld_mcast[i] != NULL) 4630 continue; 4631 /* found an empty slot */ 4632 if (!mcast->gldm_refcnt) { 4633 /* set mcast in hardware */ 4634 unsigned char cmaddr[GLD_MAX_ADDRLEN]; 4635 4636 ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen); 4637 cmac_copy(maddr, cmaddr, 4638 macinfo->gldm_addrlen, macinfo); 4639 4640 rc = (*macinfo->gldm_set_multicast) 4641 (macinfo, cmaddr, GLD_MULTI_ENABLE); 4642 if (rc == GLD_NOTSUPPORTED) { 4643 GLDM_UNLOCK(macinfo); 4644 return (DL_NOTSUPPORTED); 4645 } else if (rc == GLD_NORESOURCES) { 4646 GLDM_UNLOCK(macinfo); 4647 return (DL_TOOMANY); 4648 } else if (rc == GLD_BADARG) { 4649 GLDM_UNLOCK(macinfo); 4650 return (DL_BADADDR); 4651 } else if (rc == GLD_RETRY) { 4652 /* 4653 * The putbq and gld_xwait must be 4654 * within the lock to prevent races 4655 * with gld_sched. 4656 */ 4657 (void) putbq(q, mp); 4658 gld->gld_xwait = B_TRUE; 4659 GLDM_UNLOCK(macinfo); 4660 return (GLDE_RETRY); 4661 } else if (rc != GLD_SUCCESS) { 4662 GLDM_UNLOCK(macinfo); 4663 dlerrorack(q, mp, DL_ENABMULTI_REQ, 4664 DL_SYSERR, EIO); 4665 return (GLDE_OK); 4666 } 4667 } 4668 gld->gld_mcast[i] = mcast; 4669 mcast->gldm_refcnt++; 4670 GLDM_UNLOCK(macinfo); 4671 dlokack(q, mp, DL_ENABMULTI_REQ); 4672 return (GLDE_OK); 4673 } 4674 4675 /* couldn't get a stream slot */ 4676 GLDM_UNLOCK(macinfo); 4677 return (DL_TOOMANY); 4678 } 4679 4680 4681 /* 4682 * gld_disable_multi (q, mp) 4683 * Disable the multicast address on the stream. If last 4684 * reference for the mac layer, disable there as well. 4685 */ 4686 static int 4687 gld_disable_multi(queue_t *q, mblk_t *mp) 4688 { 4689 gld_t *gld; 4690 gld_mac_info_t *macinfo; 4691 unsigned char *maddr; 4692 dl_disabmulti_req_t *multi; 4693 int i; 4694 gld_mcast_t *mcast; 4695 4696 #ifdef GLD_DEBUG 4697 if (gld_debug & GLDPROT) { 4698 cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q, 4699 (void *)mp); 4700 } 4701 #endif 4702 4703 gld = (gld_t *)q->q_ptr; 4704 if (gld->gld_state == DL_UNATTACHED) 4705 return (DL_OUTSTATE); 4706 4707 macinfo = gld->gld_mac_info; 4708 ASSERT(macinfo != NULL); 4709 if (macinfo->gldm_set_multicast == NULL) { 4710 return (DL_UNSUPPORTED); 4711 } 4712 4713 multi = (dl_disabmulti_req_t *)mp->b_rptr; 4714 4715 if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) || 4716 multi->dl_addr_length != macinfo->gldm_addrlen) 4717 return (DL_BADADDR); 4718 4719 maddr = mp->b_rptr + multi->dl_addr_offset; 4720 4721 /* request appears to be valid */ 4722 /* does this address appear in current table? */ 4723 GLDM_LOCK(macinfo, RW_WRITER); 4724 if (gld->gld_mcast != NULL) { 4725 for (i = 0; i < gld->gld_multicnt; i++) 4726 if (((mcast = gld->gld_mcast[i]) != NULL) && 4727 mac_eq(mcast->gldm_addr, 4728 maddr, macinfo->gldm_addrlen)) { 4729 ASSERT(mcast->gldm_refcnt); 4730 gld_send_disable_multi(macinfo, mcast); 4731 gld->gld_mcast[i] = NULL; 4732 GLDM_UNLOCK(macinfo); 4733 dlokack(q, mp, DL_DISABMULTI_REQ); 4734 return (GLDE_OK); 4735 } 4736 } 4737 GLDM_UNLOCK(macinfo); 4738 return (DL_NOTENAB); /* not an enabled address */ 4739 } 4740 4741 /* 4742 * gld_send_disable_multi(macinfo, mcast) 4743 * this function is used to disable a multicast address if the reference 4744 * count goes to zero. The disable request will then be forwarded to the 4745 * lower stream. 4746 */ 4747 static void 4748 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast) 4749 { 4750 ASSERT(macinfo != NULL); 4751 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 4752 ASSERT(mcast != NULL); 4753 ASSERT(mcast->gldm_refcnt); 4754 4755 if (!mcast->gldm_refcnt) { 4756 return; /* "cannot happen" */ 4757 } 4758 4759 if (--mcast->gldm_refcnt > 0) { 4760 return; 4761 } 4762 4763 /* 4764 * This must be converted from canonical form to device form. 4765 * The refcnt is now zero so we can trash the data. 4766 */ 4767 if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR) 4768 gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen); 4769 4770 /* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */ 4771 (void) (*macinfo->gldm_set_multicast) 4772 (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE); 4773 } 4774 4775 /* 4776 * gld_promisc (q, mp, req, on) 4777 * enable or disable the use of promiscuous mode with the hardware 4778 */ 4779 static int 4780 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on) 4781 { 4782 gld_t *gld; 4783 gld_mac_info_t *macinfo; 4784 gld_mac_pvt_t *mac_pvt; 4785 gld_vlan_t *vlan; 4786 union DL_primitives *prim; 4787 int macrc = GLD_SUCCESS; 4788 int dlerr = GLDE_OK; 4789 int op = GLD_MAC_PROMISC_NOOP; 4790 4791 #ifdef GLD_DEBUG 4792 if (gld_debug & GLDTRACE) 4793 cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)", 4794 (void *)q, (void *)mp, req, on); 4795 #endif 4796 4797 ASSERT(mp != NULL); 4798 prim = (union DL_primitives *)mp->b_rptr; 4799 4800 /* XXX I think spec allows promisc in unattached state */ 4801 gld = (gld_t *)q->q_ptr; 4802 if (gld->gld_state == DL_UNATTACHED) 4803 return (DL_OUTSTATE); 4804 4805 macinfo = gld->gld_mac_info; 4806 ASSERT(macinfo != NULL); 4807 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 4808 4809 vlan = (gld_vlan_t *)gld->gld_vlan; 4810 ASSERT(vlan != NULL); 4811 4812 GLDM_LOCK(macinfo, RW_WRITER); 4813 4814 /* 4815 * Work out what request (if any) has to be made to the MAC layer 4816 */ 4817 if (on) { 4818 switch (prim->promiscon_req.dl_level) { 4819 default: 4820 dlerr = DL_UNSUPPORTED; /* this is an error */ 4821 break; 4822 4823 case DL_PROMISC_PHYS: 4824 if (mac_pvt->nprom == 0) 4825 op = GLD_MAC_PROMISC_PHYS; 4826 break; 4827 4828 case DL_PROMISC_MULTI: 4829 if (mac_pvt->nprom_multi == 0) 4830 if (mac_pvt->nprom == 0) 4831 op = GLD_MAC_PROMISC_MULTI; 4832 break; 4833 4834 case DL_PROMISC_SAP: 4835 /* We can do this without reference to the MAC */ 4836 break; 4837 } 4838 } else { 4839 switch (prim->promiscoff_req.dl_level) { 4840 default: 4841 dlerr = DL_UNSUPPORTED; /* this is an error */ 4842 break; 4843 4844 case DL_PROMISC_PHYS: 4845 if (!(gld->gld_flags & GLD_PROM_PHYS)) 4846 dlerr = DL_NOTENAB; 4847 else if (mac_pvt->nprom == 1) 4848 if (mac_pvt->nprom_multi) 4849 op = GLD_MAC_PROMISC_MULTI; 4850 else 4851 op = GLD_MAC_PROMISC_NONE; 4852 break; 4853 4854 case DL_PROMISC_MULTI: 4855 if (!(gld->gld_flags & GLD_PROM_MULT)) 4856 dlerr = DL_NOTENAB; 4857 else if (mac_pvt->nprom_multi == 1) 4858 if (mac_pvt->nprom == 0) 4859 op = GLD_MAC_PROMISC_NONE; 4860 break; 4861 4862 case DL_PROMISC_SAP: 4863 if (!(gld->gld_flags & GLD_PROM_SAP)) 4864 dlerr = DL_NOTENAB; 4865 4866 /* We can do this without reference to the MAC */ 4867 break; 4868 } 4869 } 4870 4871 /* 4872 * The request was invalid in some way so no need to continue. 4873 */ 4874 if (dlerr != GLDE_OK) { 4875 GLDM_UNLOCK(macinfo); 4876 return (dlerr); 4877 } 4878 4879 /* 4880 * Issue the request to the MAC layer, if required 4881 */ 4882 if (op != GLD_MAC_PROMISC_NOOP) { 4883 macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op); 4884 } 4885 4886 /* 4887 * On success, update the appropriate flags & refcounts 4888 */ 4889 if (macrc == GLD_SUCCESS) { 4890 if (on) { 4891 switch (prim->promiscon_req.dl_level) { 4892 case DL_PROMISC_PHYS: 4893 mac_pvt->nprom++; 4894 gld->gld_flags |= GLD_PROM_PHYS; 4895 break; 4896 4897 case DL_PROMISC_MULTI: 4898 mac_pvt->nprom_multi++; 4899 gld->gld_flags |= GLD_PROM_MULT; 4900 break; 4901 4902 case DL_PROMISC_SAP: 4903 gld->gld_flags |= GLD_PROM_SAP; 4904 break; 4905 4906 default: 4907 break; 4908 } 4909 } else { 4910 switch (prim->promiscoff_req.dl_level) { 4911 case DL_PROMISC_PHYS: 4912 mac_pvt->nprom--; 4913 gld->gld_flags &= ~GLD_PROM_PHYS; 4914 break; 4915 4916 case DL_PROMISC_MULTI: 4917 mac_pvt->nprom_multi--; 4918 gld->gld_flags &= ~GLD_PROM_MULT; 4919 break; 4920 4921 case DL_PROMISC_SAP: 4922 gld->gld_flags &= ~GLD_PROM_SAP; 4923 break; 4924 4925 default: 4926 break; 4927 } 4928 } 4929 } else if (macrc == GLD_RETRY) { 4930 /* 4931 * The putbq and gld_xwait must be within the lock to 4932 * prevent races with gld_sched. 4933 */ 4934 (void) putbq(q, mp); 4935 gld->gld_xwait = B_TRUE; 4936 } 4937 4938 /* 4939 * Update VLAN IPQ status -- it may have changed 4940 */ 4941 if (gld->gld_flags & (GLD_PROM_SAP | GLD_PROM_MULT | GLD_PROM_PHYS)) 4942 vlan->gldv_ipq_flags |= IPQ_FORBIDDEN; 4943 else 4944 vlan->gldv_ipq_flags &= ~IPQ_FORBIDDEN; 4945 4946 GLDM_UNLOCK(macinfo); 4947 4948 /* 4949 * Finally, decide how to reply. 4950 * 4951 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC 4952 * layer but failed. In such cases, we can return a DL_* error 4953 * code and let the caller send an error-ack reply upstream, or 4954 * we can send a reply here and then return GLDE_OK so that the 4955 * caller doesn't also respond. 4956 * 4957 * If physical-promiscuous mode was (successfully) switched on or 4958 * off, send a notification (DL_NOTIFY_IND) to anyone interested. 4959 */ 4960 switch (macrc) { 4961 case GLD_NOTSUPPORTED: 4962 return (DL_NOTSUPPORTED); 4963 4964 case GLD_NORESOURCES: 4965 dlerrorack(q, mp, req, DL_SYSERR, ENOSR); 4966 return (GLDE_OK); 4967 4968 case GLD_RETRY: 4969 return (GLDE_RETRY); 4970 4971 default: 4972 dlerrorack(q, mp, req, DL_SYSERR, EIO); 4973 return (GLDE_OK); 4974 4975 case GLD_SUCCESS: 4976 dlokack(q, mp, req); 4977 break; 4978 } 4979 4980 switch (op) { 4981 case GLD_MAC_PROMISC_NOOP: 4982 break; 4983 4984 case GLD_MAC_PROMISC_PHYS: 4985 gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL); 4986 break; 4987 4988 default: 4989 gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL); 4990 break; 4991 } 4992 4993 return (GLDE_OK); 4994 } 4995 4996 /* 4997 * gld_physaddr() 4998 * get the current or factory physical address value 4999 */ 5000 static int 5001 gld_physaddr(queue_t *q, mblk_t *mp) 5002 { 5003 gld_t *gld = (gld_t *)q->q_ptr; 5004 gld_mac_info_t *macinfo; 5005 union DL_primitives *prim = (union DL_primitives *)mp->b_rptr; 5006 unsigned char addr[GLD_MAX_ADDRLEN]; 5007 5008 if (gld->gld_state == DL_UNATTACHED) 5009 return (DL_OUTSTATE); 5010 5011 macinfo = (gld_mac_info_t *)gld->gld_mac_info; 5012 ASSERT(macinfo != NULL); 5013 ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN); 5014 5015 switch (prim->physaddr_req.dl_addr_type) { 5016 case DL_FACT_PHYS_ADDR: 5017 mac_copy((caddr_t)macinfo->gldm_vendor_addr, 5018 (caddr_t)addr, macinfo->gldm_addrlen); 5019 break; 5020 case DL_CURR_PHYS_ADDR: 5021 /* make a copy so we don't hold the lock across qreply */ 5022 GLDM_LOCK(macinfo, RW_WRITER); 5023 mac_copy((caddr_t) 5024 ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr, 5025 (caddr_t)addr, macinfo->gldm_addrlen); 5026 GLDM_UNLOCK(macinfo); 5027 break; 5028 default: 5029 return (DL_BADPRIM); 5030 } 5031 dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen); 5032 return (GLDE_OK); 5033 } 5034 5035 /* 5036 * gld_setaddr() 5037 * change the hardware's physical address to a user specified value 5038 */ 5039 static int 5040 gld_setaddr(queue_t *q, mblk_t *mp) 5041 { 5042 gld_t *gld = (gld_t *)q->q_ptr; 5043 gld_mac_info_t *macinfo; 5044 gld_mac_pvt_t *mac_pvt; 5045 union DL_primitives *prim = (union DL_primitives *)mp->b_rptr; 5046 unsigned char *addr; 5047 unsigned char cmaddr[GLD_MAX_ADDRLEN]; 5048 int rc; 5049 gld_vlan_t *vlan; 5050 5051 if (gld->gld_state == DL_UNATTACHED) 5052 return (DL_OUTSTATE); 5053 5054 vlan = (gld_vlan_t *)gld->gld_vlan; 5055 ASSERT(vlan != NULL); 5056 5057 if (vlan->gldv_id != VLAN_VID_NONE) 5058 return (DL_NOTSUPPORTED); 5059 5060 macinfo = (gld_mac_info_t *)gld->gld_mac_info; 5061 ASSERT(macinfo != NULL); 5062 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5063 5064 if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset, 5065 prim->set_physaddr_req.dl_addr_length) || 5066 prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen) 5067 return (DL_BADADDR); 5068 5069 GLDM_LOCK(macinfo, RW_WRITER); 5070 5071 /* now do the set at the hardware level */ 5072 addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset; 5073 ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen); 5074 cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo); 5075 5076 rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr); 5077 if (rc == GLD_SUCCESS) 5078 mac_copy(addr, mac_pvt->curr_macaddr, 5079 macinfo->gldm_addrlen); 5080 5081 GLDM_UNLOCK(macinfo); 5082 5083 switch (rc) { 5084 case GLD_SUCCESS: 5085 break; 5086 case GLD_NOTSUPPORTED: 5087 return (DL_NOTSUPPORTED); 5088 case GLD_BADARG: 5089 return (DL_BADADDR); 5090 case GLD_NORESOURCES: 5091 dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR); 5092 return (GLDE_OK); 5093 default: 5094 dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO); 5095 return (GLDE_OK); 5096 } 5097 5098 gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL); 5099 5100 dlokack(q, mp, DL_SET_PHYS_ADDR_REQ); 5101 return (GLDE_OK); 5102 } 5103 5104 int 5105 gld_get_statistics(queue_t *q, mblk_t *mp) 5106 { 5107 dl_get_statistics_ack_t *dlsp; 5108 gld_t *gld = (gld_t *)q->q_ptr; 5109 gld_mac_info_t *macinfo = gld->gld_mac_info; 5110 gld_mac_pvt_t *mac_pvt; 5111 5112 if (gld->gld_state == DL_UNATTACHED) 5113 return (DL_OUTSTATE); 5114 5115 ASSERT(macinfo != NULL); 5116 5117 mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5118 (void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ); 5119 5120 mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE + 5121 sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK); 5122 5123 if (mp == NULL) 5124 return (GLDE_OK); /* mexchange already sent merror */ 5125 5126 dlsp = (dl_get_statistics_ack_t *)mp->b_rptr; 5127 dlsp->dl_primitive = DL_GET_STATISTICS_ACK; 5128 dlsp->dl_stat_length = sizeof (struct gldkstats); 5129 dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE; 5130 5131 GLDM_LOCK(macinfo, RW_WRITER); 5132 bcopy(mac_pvt->kstatp->ks_data, 5133 (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE), 5134 sizeof (struct gldkstats)); 5135 GLDM_UNLOCK(macinfo); 5136 5137 qreply(q, mp); 5138 return (GLDE_OK); 5139 } 5140 5141 /* =================================================== */ 5142 /* misc utilities, some requiring various mutexes held */ 5143 /* =================================================== */ 5144 5145 /* 5146 * Initialize and start the driver. 5147 */ 5148 static int 5149 gld_start_mac(gld_mac_info_t *macinfo) 5150 { 5151 int rc; 5152 unsigned char cmaddr[GLD_MAX_ADDRLEN]; 5153 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5154 5155 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5156 ASSERT(!mac_pvt->started); 5157 5158 rc = (*macinfo->gldm_reset)(macinfo); 5159 if (rc != GLD_SUCCESS) 5160 return (GLD_FAILURE); 5161 5162 /* set the addr after we reset the device */ 5163 ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen); 5164 cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt) 5165 ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo); 5166 5167 rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr); 5168 ASSERT(rc != GLD_BADARG); /* this address was good before */ 5169 if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED) 5170 return (GLD_FAILURE); 5171 5172 rc = (*macinfo->gldm_start)(macinfo); 5173 if (rc != GLD_SUCCESS) 5174 return (GLD_FAILURE); 5175 5176 mac_pvt->started = B_TRUE; 5177 return (GLD_SUCCESS); 5178 } 5179 5180 /* 5181 * Stop the driver. 5182 */ 5183 static void 5184 gld_stop_mac(gld_mac_info_t *macinfo) 5185 { 5186 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5187 5188 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5189 ASSERT(mac_pvt->started); 5190 5191 (void) (*macinfo->gldm_stop)(macinfo); 5192 5193 mac_pvt->started = B_FALSE; 5194 } 5195 5196 5197 /* 5198 * gld_set_ipq will set a pointer to the queue which is bound to the 5199 * IP sap if: 5200 * o the device type is ethernet or IPoIB. 5201 * o there is no stream in SAP promiscuous mode. 5202 * o there is exactly one stream bound to the IP sap. 5203 * o the stream is in "fastpath" mode. 5204 */ 5205 static void 5206 gld_set_ipq(gld_t *gld) 5207 { 5208 gld_vlan_t *vlan; 5209 gld_mac_info_t *macinfo = gld->gld_mac_info; 5210 gld_t *ip_gld = NULL; 5211 uint_t ipq_candidates = 0; 5212 gld_t *ipv6_gld = NULL; 5213 uint_t ipv6q_candidates = 0; 5214 5215 ASSERT(GLDM_LOCK_HELD_WRITE(macinfo)); 5216 5217 /* The ipq code in gld_recv() is intimate with ethernet/IPoIB */ 5218 if (((macinfo->gldm_type != DL_ETHER) && 5219 (macinfo->gldm_type != DL_IB)) || 5220 (gld_global_options & GLD_OPT_NO_IPQ)) 5221 return; 5222 5223 vlan = (gld_vlan_t *)gld->gld_vlan; 5224 ASSERT(vlan != NULL); 5225 5226 /* clear down any previously defined ipqs */ 5227 vlan->gldv_ipq = NULL; 5228 vlan->gldv_ipv6q = NULL; 5229 5230 /* Try to find a single stream eligible to receive IP packets */ 5231 for (gld = vlan->gldv_str_next; 5232 gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) { 5233 if (gld->gld_state != DL_IDLE) 5234 continue; /* not eligible to receive */ 5235 if (gld->gld_flags & GLD_STR_CLOSING) 5236 continue; /* not eligible to receive */ 5237 5238 if (gld->gld_sap == ETHERTYPE_IP) { 5239 ip_gld = gld; 5240 ipq_candidates++; 5241 } 5242 5243 if (gld->gld_sap == ETHERTYPE_IPV6) { 5244 ipv6_gld = gld; 5245 ipv6q_candidates++; 5246 } 5247 } 5248 5249 if (ipq_candidates == 1) { 5250 ASSERT(ip_gld != NULL); 5251 5252 if (ip_gld->gld_flags & GLD_FAST) /* eligible for ipq */ 5253 vlan->gldv_ipq = ip_gld->gld_qptr; 5254 } 5255 5256 if (ipv6q_candidates == 1) { 5257 ASSERT(ipv6_gld != NULL); 5258 5259 if (ipv6_gld->gld_flags & GLD_FAST) /* eligible for ipq */ 5260 vlan->gldv_ipv6q = ipv6_gld->gld_qptr; 5261 } 5262 } 5263 5264 /* 5265 * gld_flushqueue (q) 5266 * used by DLPI primitives that require flushing the queues. 5267 * essentially, this is DL_UNBIND_REQ. 5268 */ 5269 static void 5270 gld_flushqueue(queue_t *q) 5271 { 5272 /* flush all data in both queues */ 5273 /* XXX Should these be FLUSHALL? */ 5274 flushq(q, FLUSHDATA); 5275 flushq(WR(q), FLUSHDATA); 5276 /* flush all the queues upstream */ 5277 (void) putctl1(q, M_FLUSH, FLUSHRW); 5278 } 5279 5280 /* 5281 * gld_devlookup (major) 5282 * search the device table for the device with specified 5283 * major number and return a pointer to it if it exists 5284 */ 5285 static glddev_t * 5286 gld_devlookup(int major) 5287 { 5288 struct glddevice *dev; 5289 5290 ASSERT(mutex_owned(&gld_device_list.gld_devlock)); 5291 5292 for (dev = gld_device_list.gld_next; 5293 dev != &gld_device_list; 5294 dev = dev->gld_next) { 5295 ASSERT(dev); 5296 if (dev->gld_major == major) 5297 return (dev); 5298 } 5299 return (NULL); 5300 } 5301 5302 /* 5303 * gld_findminor(device) 5304 * Returns a minor number currently unused by any stream in the current 5305 * device class (major) list. 5306 */ 5307 static int 5308 gld_findminor(glddev_t *device) 5309 { 5310 gld_t *next; 5311 gld_mac_info_t *nextmac; 5312 gld_vlan_t *nextvlan; 5313 int minor; 5314 int i; 5315 5316 ASSERT(mutex_owned(&device->gld_devlock)); 5317 5318 /* The fast way */ 5319 if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR && 5320 device->gld_nextminor <= GLD_MAX_CLONE_MINOR) 5321 return (device->gld_nextminor++); 5322 5323 /* The steady way */ 5324 for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR; 5325 minor++) { 5326 /* Search all unattached streams */ 5327 for (next = device->gld_str_next; 5328 next != (gld_t *)&device->gld_str_next; 5329 next = next->gld_next) { 5330 if (minor == next->gld_minor) 5331 goto nextminor; 5332 } 5333 /* Search all attached streams; we don't need maclock because */ 5334 /* mac stream list is protected by devlock as well as maclock */ 5335 for (nextmac = device->gld_mac_next; 5336 nextmac != (gld_mac_info_t *)&device->gld_mac_next; 5337 nextmac = nextmac->gldm_next) { 5338 gld_mac_pvt_t *pvt = 5339 (gld_mac_pvt_t *)nextmac->gldm_mac_pvt; 5340 5341 if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY)) 5342 continue; /* this one's not ready yet */ 5343 5344 for (i = 0; i < VLAN_HASHSZ; i++) { 5345 for (nextvlan = pvt->vlan_hash[i]; 5346 nextvlan != NULL; 5347 nextvlan = nextvlan->gldv_next) { 5348 for (next = nextvlan->gldv_str_next; 5349 next != 5350 (gld_t *)&nextvlan->gldv_str_next; 5351 next = next->gld_next) { 5352 if (minor == next->gld_minor) 5353 goto nextminor; 5354 } 5355 } 5356 } 5357 } 5358 5359 return (minor); 5360 nextminor: 5361 /* don't need to do anything */ 5362 ; 5363 } 5364 cmn_err(CE_WARN, "GLD ran out of minor numbers for %s", 5365 device->gld_name); 5366 return (0); 5367 } 5368 5369 /* 5370 * version of insque/remque for use by this driver 5371 */ 5372 struct qelem { 5373 struct qelem *q_forw; 5374 struct qelem *q_back; 5375 /* rest of structure */ 5376 }; 5377 5378 static void 5379 gldinsque(void *elem, void *pred) 5380 { 5381 struct qelem *pelem = elem; 5382 struct qelem *ppred = pred; 5383 struct qelem *pnext = ppred->q_forw; 5384 5385 pelem->q_forw = pnext; 5386 pelem->q_back = ppred; 5387 ppred->q_forw = pelem; 5388 pnext->q_back = pelem; 5389 } 5390 5391 static void 5392 gldremque(void *arg) 5393 { 5394 struct qelem *pelem = arg; 5395 struct qelem *elem = arg; 5396 5397 pelem->q_forw->q_back = pelem->q_back; 5398 pelem->q_back->q_forw = pelem->q_forw; 5399 elem->q_back = elem->q_forw = NULL; 5400 } 5401 5402 static gld_vlan_t * 5403 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid) 5404 { 5405 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5406 gld_vlan_t **pp; 5407 gld_vlan_t *p; 5408 5409 pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]); 5410 while ((p = *pp) != NULL) { 5411 ASSERT(p->gldv_id != vid); 5412 pp = &(p->gldv_next); 5413 } 5414 5415 if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL) 5416 return (NULL); 5417 5418 p->gldv_mac = macinfo; 5419 p->gldv_id = vid; 5420 5421 if (vid == VLAN_VID_NONE) { 5422 p->gldv_ptag = VLAN_VTAG_NONE; 5423 p->gldv_stats = mac_pvt->statistics; 5424 p->gldv_kstatp = NULL; 5425 } else { 5426 p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid); 5427 p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats), 5428 KM_SLEEP); 5429 5430 if (gld_init_vlan_stats(p) != GLD_SUCCESS) { 5431 kmem_free(p->gldv_stats, sizeof (struct gld_stats)); 5432 kmem_free(p, sizeof (gld_vlan_t)); 5433 return (NULL); 5434 } 5435 } 5436 5437 p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next; 5438 mac_pvt->nvlan++; 5439 *pp = p; 5440 5441 return (p); 5442 } 5443 5444 static void 5445 gld_rem_vlan(gld_vlan_t *vlan) 5446 { 5447 gld_mac_info_t *macinfo = vlan->gldv_mac; 5448 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5449 gld_vlan_t **pp; 5450 gld_vlan_t *p; 5451 5452 pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]); 5453 while ((p = *pp) != NULL) { 5454 if (p->gldv_id == vlan->gldv_id) 5455 break; 5456 pp = &(p->gldv_next); 5457 } 5458 ASSERT(p != NULL); 5459 5460 *pp = p->gldv_next; 5461 mac_pvt->nvlan--; 5462 if (p->gldv_id != VLAN_VID_NONE) { 5463 ASSERT(p->gldv_kstatp != NULL); 5464 kstat_delete(p->gldv_kstatp); 5465 kmem_free(p->gldv_stats, sizeof (struct gld_stats)); 5466 } 5467 kmem_free(p, sizeof (gld_vlan_t)); 5468 } 5469 5470 gld_vlan_t * 5471 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid) 5472 { 5473 gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt; 5474 gld_vlan_t *p; 5475 5476 p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ]; 5477 while (p != NULL) { 5478 if (p->gldv_id == vid) 5479 return (p); 5480 p = p->gldv_next; 5481 } 5482 return (NULL); 5483 } 5484 5485 gld_vlan_t * 5486 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid) 5487 { 5488 gld_vlan_t *vlan; 5489 5490 if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) 5491 vlan = gld_add_vlan(macinfo, vid); 5492 5493 return (vlan); 5494 } 5495 5496 /* 5497 * gld_bitrevcopy() 5498 * This is essentially bcopy, with the ability to bit reverse the 5499 * the source bytes. The MAC addresses bytes as transmitted by FDDI 5500 * interfaces are bit reversed. 5501 */ 5502 void 5503 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n) 5504 { 5505 while (n--) 5506 *target++ = bit_rev[(uchar_t)*src++]; 5507 } 5508 5509 /* 5510 * gld_bitreverse() 5511 * Convert the bit order by swaping all the bits, using a 5512 * lookup table. 5513 */ 5514 void 5515 gld_bitreverse(uchar_t *rptr, size_t n) 5516 { 5517 while (n--) { 5518 *rptr = bit_rev[*rptr]; 5519 rptr++; 5520 } 5521 } 5522 5523 char * 5524 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len) 5525 { 5526 int i; 5527 char *cp = etherbuf; 5528 static char digits[] = "0123456789abcdef"; 5529 5530 for (i = 0; i < len; i++) { 5531 *cp++ = digits[*ap >> 4]; 5532 *cp++ = digits[*ap++ & 0xf]; 5533 *cp++ = ':'; 5534 } 5535 *--cp = 0; 5536 return (etherbuf); 5537 } 5538 5539 #ifdef GLD_DEBUG 5540 static void 5541 gld_check_assertions() 5542 { 5543 glddev_t *dev; 5544 gld_mac_info_t *mac; 5545 gld_t *str; 5546 gld_vlan_t *vlan; 5547 int i; 5548 5549 mutex_enter(&gld_device_list.gld_devlock); 5550 5551 for (dev = gld_device_list.gld_next; 5552 dev != (glddev_t *)&gld_device_list.gld_next; 5553 dev = dev->gld_next) { 5554 mutex_enter(&dev->gld_devlock); 5555 ASSERT(dev->gld_broadcast != NULL); 5556 for (str = dev->gld_str_next; 5557 str != (gld_t *)&dev->gld_str_next; 5558 str = str->gld_next) { 5559 ASSERT(str->gld_device == dev); 5560 ASSERT(str->gld_mac_info == NULL); 5561 ASSERT(str->gld_qptr != NULL); 5562 ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR); 5563 ASSERT(str->gld_multicnt == 0); 5564 ASSERT(str->gld_mcast == NULL); 5565 ASSERT(!(str->gld_flags & 5566 (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP))); 5567 ASSERT(str->gld_sap == 0); 5568 ASSERT(str->gld_state == DL_UNATTACHED); 5569 } 5570 for (mac = dev->gld_mac_next; 5571 mac != (gld_mac_info_t *)&dev->gld_mac_next; 5572 mac = mac->gldm_next) { 5573 int nvlan = 0; 5574 gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt; 5575 5576 if (!(mac->gldm_GLD_flags & GLD_MAC_READY)) 5577 continue; /* this one's not ready yet */ 5578 5579 GLDM_LOCK(mac, RW_WRITER); 5580 ASSERT(mac->gldm_devinfo != NULL); 5581 ASSERT(mac->gldm_mac_pvt != NULL); 5582 ASSERT(pvt->interfacep != NULL); 5583 ASSERT(pvt->kstatp != NULL); 5584 ASSERT(pvt->statistics != NULL); 5585 ASSERT(pvt->major_dev == dev); 5586 5587 for (i = 0; i < VLAN_HASHSZ; i++) { 5588 for (vlan = pvt->vlan_hash[i]; 5589 vlan != NULL; vlan = vlan->gldv_next) { 5590 int nstr = 0; 5591 5592 ASSERT(vlan->gldv_mac == mac); 5593 5594 for (str = vlan->gldv_str_next; 5595 str != 5596 (gld_t *)&vlan->gldv_str_next; 5597 str = str->gld_next) { 5598 ASSERT(str->gld_device == dev); 5599 ASSERT(str->gld_mac_info == 5600 mac); 5601 ASSERT(str->gld_qptr != NULL); 5602 ASSERT(str->gld_minor >= 5603 GLD_MIN_CLONE_MINOR); 5604 ASSERT( 5605 str->gld_multicnt == 0 || 5606 str->gld_mcast); 5607 nstr++; 5608 } 5609 ASSERT(vlan->gldv_nstreams == nstr); 5610 nvlan++; 5611 } 5612 } 5613 ASSERT(pvt->nvlan == nvlan); 5614 GLDM_UNLOCK(mac); 5615 } 5616 mutex_exit(&dev->gld_devlock); 5617 } 5618 mutex_exit(&gld_device_list.gld_devlock); 5619 } 5620 #endif 5621