xref: /titanic_51/usr/src/uts/common/io/gld.c (revision 1a7c1b724419d3cb5fa6eea75123c6b2060ba31b)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * gld - Generic LAN Driver Version 2, PSARC/1997/382
31  *
32  * This is a utility module that provides generic facilities for
33  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
34  * are handled here.
35  *
36  * It no longer provides compatibility with drivers
37  * implemented according to the GLD v0 documentation published
38  * in 1993. (See PSARC 2003/728)
39  */
40 
41 
42 #include <sys/types.h>
43 #include <sys/errno.h>
44 #include <sys/stropts.h>
45 #include <sys/stream.h>
46 #include <sys/kmem.h>
47 #include <sys/stat.h>
48 #include <sys/modctl.h>
49 #include <sys/kstat.h>
50 #include <sys/debug.h>
51 #include <sys/note.h>
52 #include <sys/sysmacros.h>
53 
54 #include <sys/byteorder.h>
55 #include <sys/strsun.h>
56 #include <sys/strsubr.h>
57 #include <sys/dlpi.h>
58 #include <sys/pattr.h>
59 #include <sys/ethernet.h>
60 #include <sys/ib/clients/ibd/ibd.h>
61 #include <sys/policy.h>
62 #include <sys/atomic.h>
63 
64 #include <sys/multidata.h>
65 #include <sys/gld.h>
66 #include <sys/gldpriv.h>
67 
68 #include <sys/ddi.h>
69 #include <sys/sunddi.h>
70 
71 /*
72  * Macro to atomically increment counters of type uint32_t, uint64_t
73  * and ulong_t.
74  */
75 #define	BUMP(stat, delta)	do {				\
76 	_NOTE(CONSTANTCONDITION)				\
77 	if (sizeof (stat) == sizeof (uint32_t))	{		\
78 		atomic_add_32((uint32_t *)&stat, delta);	\
79 	_NOTE(CONSTANTCONDITION)				\
80 	} else if (sizeof (stat) == sizeof (uint64_t)) {	\
81 		atomic_add_64((uint64_t *)&stat, delta);	\
82 	}							\
83 	_NOTE(CONSTANTCONDITION)				\
84 } while (0)
85 
86 #define	UPDATE_STATS(vlan, pktinfo, number)	{		\
87 	if ((pktinfo).isBroadcast)				\
88 		(vlan)->gldv_stats->glds_brdcstxmt += (number);	\
89 	else if ((pktinfo).isMulticast)				\
90 		(vlan)->gldv_stats->glds_multixmt += (number);	\
91 	(vlan)->gldv_stats->glds_bytexmt64 += (pktinfo).pktLen;	\
92 	(vlan)->gldv_stats->glds_pktxmt64 += (number);		\
93 }
94 
95 #ifdef GLD_DEBUG
96 int gld_debug = GLDERRS;
97 #endif
98 
99 /* called from gld_register */
100 static int gld_initstats(gld_mac_info_t *);
101 
102 /* called from kstat mechanism, and from wsrv's get_statistics */
103 static int gld_update_kstat(kstat_t *, int);
104 
105 /* statistics for additional vlans */
106 static int gld_init_vlan_stats(gld_vlan_t *);
107 static int gld_update_vlan_kstat(kstat_t *, int);
108 
109 /* called from gld_getinfo */
110 static dev_info_t *gld_finddevinfo(dev_t);
111 
112 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
113 /* also from the source routing stuff for sending RDE protocol packets */
114 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
115 static int gld_start_mdt(queue_t *, mblk_t *, int);
116 
117 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
118 static void gld_precv(gld_mac_info_t *, gld_vlan_t *, mblk_t *);
119 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
120     pdesc_t *, pktinfo_t *);
121 
122 /* receive group: called from gld_recv and gld_precv* with maclock held */
123 static void gld_sendup(gld_mac_info_t *, gld_vlan_t *, pktinfo_t *, mblk_t *,
124     int (*)());
125 static int gld_accept(gld_t *, pktinfo_t *);
126 static int gld_mcmatch(gld_t *, pktinfo_t *);
127 static int gld_multicast(unsigned char *, gld_t *);
128 static int gld_paccept(gld_t *, pktinfo_t *);
129 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
130     void (*)(queue_t *, mblk_t *));
131 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *);
132 
133 /* wsrv group: called from wsrv, single threaded per queue */
134 static int gld_ioctl(queue_t *, mblk_t *);
135 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
136 static int gld_cmds(queue_t *, mblk_t *);
137 static mblk_t *gld_bindack(queue_t *, mblk_t *);
138 static int gld_notify_req(queue_t *, mblk_t *);
139 static int gld_udqos(queue_t *, mblk_t *);
140 static int gld_bind(queue_t *, mblk_t *);
141 static int gld_unbind(queue_t *, mblk_t *);
142 static int gld_inforeq(queue_t *, mblk_t *);
143 static int gld_unitdata(queue_t *, mblk_t *);
144 static int gldattach(queue_t *, mblk_t *);
145 static int gldunattach(queue_t *, mblk_t *);
146 static int gld_enable_multi(queue_t *, mblk_t *);
147 static int gld_disable_multi(queue_t *, mblk_t *);
148 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
149 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
150 static int gld_physaddr(queue_t *, mblk_t *);
151 static int gld_setaddr(queue_t *, mblk_t *);
152 static int gld_get_statistics(queue_t *, mblk_t *);
153 static int gld_cap(queue_t *, mblk_t *);
154 static int gld_cap_ack(queue_t *, mblk_t *);
155 static int gld_cap_enable(queue_t *, mblk_t *);
156 
157 /* misc utilities, some requiring various mutexes held */
158 static int gld_start_mac(gld_mac_info_t *);
159 static void gld_stop_mac(gld_mac_info_t *);
160 static void gld_set_ipq(gld_t *);
161 static void gld_flushqueue(queue_t *);
162 static glddev_t *gld_devlookup(int);
163 static int gld_findminor(glddev_t *);
164 static void gldinsque(void *, void *);
165 static void gldremque(void *);
166 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
167 void gld_bitreverse(uchar_t *, size_t);
168 char *gld_macaddr_sprintf(char *, unsigned char *, int);
169 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
170 static void gld_rem_vlan(gld_vlan_t *);
171 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
172 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
173 
174 #ifdef GLD_DEBUG
175 static void gld_check_assertions(void);
176 extern void gld_sr_dump(gld_mac_info_t *);
177 #endif
178 
179 /*
180  * Allocate and zero-out "number" structures each of type "structure" in
181  * kernel memory.
182  */
183 #define	GETSTRUCT(structure, number)   \
184 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
185 
186 #define	abs(a) ((a) < 0 ? -(a) : a)
187 
188 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
189 
190 /*
191  * VLANs are only supported on ethernet devices that manipulate VLAN headers
192  * themselves.
193  */
194 #define	VLAN_CAPABLE(macinfo) \
195 	((macinfo)->gldm_type == DL_ETHER && \
196 	(macinfo)->gldm_send_tagged != NULL)
197 
198 /*
199  * The set of notifications generatable by GLD itself, the additional
200  * set that can be generated if the MAC driver provide the link-state
201  * tracking callback capability, and the set supported by the GLD
202  * notification code below.
203  *
204  * PLEASE keep these in sync with what the code actually does!
205  */
206 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
207 						DL_NOTE_PROMISC_OFF_PHYS |
208 						DL_NOTE_PHYS_ADDR;
209 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
210 						DL_NOTE_LINK_UP |
211 						DL_NOTE_SPEED;
212 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
213 						DL_NOTE_PROMISC_OFF_PHYS |
214 						DL_NOTE_PHYS_ADDR |
215 						DL_NOTE_LINK_DOWN |
216 						DL_NOTE_LINK_UP |
217 						DL_NOTE_SPEED;
218 
219 /* Media must correspond to #defines in gld.h */
220 static char *gld_media[] = {
221 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
222 	"aui",		/* GLDM_AUI */
223 	"bnc",		/* GLDM_BNC */
224 	"twpair",	/* GLDM_TP */
225 	"fiber",	/* GLDM_FIBER */
226 	"100baseT",	/* GLDM_100BT */
227 	"100vgAnyLan",	/* GLDM_VGANYLAN */
228 	"10baseT",	/* GLDM_10BT */
229 	"ring4",	/* GLDM_RING4 */
230 	"ring16",	/* GLDM_RING16 */
231 	"PHY/MII",	/* GLDM_PHYMII */
232 	"100baseTX",	/* GLDM_100BTX */
233 	"100baseT4",	/* GLDM_100BT4 */
234 	"unknown",	/* skip */
235 	"ipib",		/* GLDM_IB */
236 };
237 
238 /* Must correspond to #defines in gld.h */
239 static char *gld_duplex[] = {
240 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
241 	"half",		/* GLD_DUPLEX_HALF */
242 	"full"		/* GLD_DUPLEX_FULL */
243 };
244 
245 extern int gld_interpret_ether(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
246 extern int gld_interpret_fddi(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
247 extern int gld_interpret_tr(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
248 extern int gld_interpret_ib(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
249 extern void gld_interpret_mdt_ib(gld_mac_info_t *, mblk_t *, pdescinfo_t *,
250     pktinfo_t *, int);
251 
252 extern mblk_t *gld_fastpath_ether(gld_t *, mblk_t *);
253 extern mblk_t *gld_fastpath_fddi(gld_t *, mblk_t *);
254 extern mblk_t *gld_fastpath_tr(gld_t *, mblk_t *);
255 extern mblk_t *gld_fastpath_ib(gld_t *, mblk_t *);
256 
257 extern mblk_t *gld_unitdata_ether(gld_t *, mblk_t *);
258 extern mblk_t *gld_unitdata_fddi(gld_t *, mblk_t *);
259 extern mblk_t *gld_unitdata_tr(gld_t *, mblk_t *);
260 extern mblk_t *gld_unitdata_ib(gld_t *, mblk_t *);
261 
262 extern void gld_init_ether(gld_mac_info_t *);
263 extern void gld_init_fddi(gld_mac_info_t *);
264 extern void gld_init_tr(gld_mac_info_t *);
265 extern void gld_init_ib(gld_mac_info_t *);
266 
267 extern void gld_uninit_ether(gld_mac_info_t *);
268 extern void gld_uninit_fddi(gld_mac_info_t *);
269 extern void gld_uninit_tr(gld_mac_info_t *);
270 extern void gld_uninit_ib(gld_mac_info_t *);
271 
272 /*
273  * Interface types currently supported by GLD.
274  * If you add new types, you must check all "XXX" strings in the GLD source
275  * for implementation issues that may affect the support of your new type.
276  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
277  * require generalizing this GLD source to handle the new cases.  In other
278  * words there are assumptions built into the code in a few places that must
279  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
280  */
281 static gld_interface_t interfaces[] = {
282 
283 	/* Ethernet Bus */
284 	{
285 		DL_ETHER,
286 		(uint_t)-1,
287 		sizeof (struct ether_mac_frm),
288 		gld_interpret_ether,
289 		NULL,
290 		gld_fastpath_ether,
291 		gld_unitdata_ether,
292 		gld_init_ether,
293 		gld_uninit_ether,
294 		"ether"
295 	},
296 
297 	/* Fiber Distributed data interface */
298 	{
299 		DL_FDDI,
300 		4352,
301 		sizeof (struct fddi_mac_frm),
302 		gld_interpret_fddi,
303 		NULL,
304 		gld_fastpath_fddi,
305 		gld_unitdata_fddi,
306 		gld_init_fddi,
307 		gld_uninit_fddi,
308 		"fddi"
309 	},
310 
311 	/* Token Ring interface */
312 	{
313 		DL_TPR,
314 		17914,
315 		-1,			/* variable header size */
316 		gld_interpret_tr,
317 		NULL,
318 		gld_fastpath_tr,
319 		gld_unitdata_tr,
320 		gld_init_tr,
321 		gld_uninit_tr,
322 		"tpr"
323 	},
324 
325 	/* Infiniband */
326 	{
327 		DL_IB,
328 		4092,
329 		sizeof (struct ipoib_header),
330 		gld_interpret_ib,
331 		gld_interpret_mdt_ib,
332 		gld_fastpath_ib,
333 		gld_unitdata_ib,
334 		gld_init_ib,
335 		gld_uninit_ib,
336 		"ipib"
337 	},
338 };
339 
340 /*
341  * bit reversal lookup table.
342  */
343 static	uchar_t bit_rev[] = {
344 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
345 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
346 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
347 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
348 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
349 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
350 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
351 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
352 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
353 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
354 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
355 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
356 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
357 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
358 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
359 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
360 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
361 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
362 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
363 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
364 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
365 	0x3f, 0xbf, 0x7f, 0xff,
366 };
367 
368 /*
369  * User priorities, mapped from b_band.
370  */
371 static uint32_t user_priority[] = {
372 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
373 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
374 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
375 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
376 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
377 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
378 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
379 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
380 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
381 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
382 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
383 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
384 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
385 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
386 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
387 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
388 };
389 
390 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
391 
392 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
393 
394 /*
395  * Module linkage information for the kernel.
396  */
397 
398 static struct modldrv modlmisc = {
399 	&mod_miscops,		/* Type of module - a utility provider */
400 	"Generic LAN Driver (" GLD_VERSION_STRING ") %I%"
401 #ifdef GLD_DEBUG
402 	" DEBUG"
403 #endif
404 };
405 
406 static struct modlinkage modlinkage = {
407 	MODREV_1, &modlmisc, NULL
408 };
409 
410 int
411 _init(void)
412 {
413 	int e;
414 
415 	/* initialize gld_device_list mutex */
416 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
417 
418 	/* initialize device driver (per-major) list */
419 	gld_device_list.gld_next =
420 	    gld_device_list.gld_prev = &gld_device_list;
421 
422 	if ((e = mod_install(&modlinkage)) != 0)
423 		mutex_destroy(&gld_device_list.gld_devlock);
424 
425 	return (e);
426 }
427 
428 int
429 _fini(void)
430 {
431 	int e;
432 
433 	if ((e = mod_remove(&modlinkage)) != 0)
434 		return (e);
435 
436 	ASSERT(gld_device_list.gld_next ==
437 	    (glddev_t *)&gld_device_list.gld_next);
438 	ASSERT(gld_device_list.gld_prev ==
439 	    (glddev_t *)&gld_device_list.gld_next);
440 	mutex_destroy(&gld_device_list.gld_devlock);
441 
442 	return (e);
443 }
444 
445 int
446 _info(struct modinfo *modinfop)
447 {
448 	return (mod_info(&modlinkage, modinfop));
449 }
450 
451 /*
452  * GLD service routines
453  */
454 
455 /* So this gld binary maybe can be forward compatible with future v2 drivers */
456 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
457 
458 /*ARGSUSED*/
459 gld_mac_info_t *
460 gld_mac_alloc(dev_info_t *devinfo)
461 {
462 	gld_mac_info_t *macinfo;
463 
464 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
465 	    KM_SLEEP);
466 
467 	/*
468 	 * The setting of gldm_driver_version will not be documented or allowed
469 	 * until a future release.
470 	 */
471 	macinfo->gldm_driver_version = GLD_VERSION_200;
472 
473 	/*
474 	 * GLD's version.  This also is undocumented for now, but will be
475 	 * available if needed in the future.
476 	 */
477 	macinfo->gldm_GLD_version = GLD_VERSION;
478 
479 	return (macinfo);
480 }
481 
482 /*
483  * gld_mac_free must be called after the driver has removed interrupts
484  * and completely stopped calling gld_recv() and gld_sched().  At that
485  * point the interrupt routine is guaranteed by the system to have been
486  * exited and the maclock is no longer needed.  Of course, it is
487  * expected (required) that (assuming gld_register() succeeded),
488  * gld_unregister() was called before gld_mac_free().
489  */
490 void
491 gld_mac_free(gld_mac_info_t *macinfo)
492 {
493 	ASSERT(macinfo);
494 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
495 
496 	/*
497 	 * Assert that if we made it through gld_register, then we must
498 	 * have unregistered.
499 	 */
500 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
501 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
502 
503 	GLDM_LOCK_DESTROY(macinfo);
504 
505 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
506 }
507 
508 /*
509  * gld_register -- called once per device instance (PPA)
510  *
511  * During its attach routine, a real device driver will register with GLD
512  * so that later opens and dl_attach_reqs will work.  The arguments are the
513  * devinfo pointer, the device name, and a macinfo structure describing the
514  * physical device instance.
515  */
516 int
517 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
518 {
519 	int mediatype;
520 	int major = ddi_name_to_major(devname), i;
521 	glddev_t *glddev;
522 	gld_mac_pvt_t *mac_pvt;
523 	char minordev[32];
524 	char pbuf[3*GLD_MAX_ADDRLEN];
525 	gld_interface_t *ifp;
526 
527 	ASSERT(devinfo != NULL);
528 	ASSERT(macinfo != NULL);
529 
530 	if (macinfo->gldm_driver_version != GLD_VERSION)
531 		return (DDI_FAILURE);
532 
533 	mediatype = macinfo->gldm_type;
534 
535 	/*
536 	 * Entry points should be ready for us.
537 	 * ioctl is optional.
538 	 * set_multicast and get_stats are optional in v0.
539 	 * intr is only required if you add an interrupt.
540 	 */
541 	ASSERT(macinfo->gldm_reset != NULL);
542 	ASSERT(macinfo->gldm_start != NULL);
543 	ASSERT(macinfo->gldm_stop != NULL);
544 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
545 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
546 	ASSERT(macinfo->gldm_send != NULL);
547 
548 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
549 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
550 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
551 	ASSERT(macinfo->gldm_vendor_addr != NULL);
552 	ASSERT(macinfo->gldm_ident != NULL);
553 
554 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
555 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
556 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
557 		return (DDI_FAILURE);
558 	}
559 
560 	/*
561 	 * GLD only functions properly with saplen == -2
562 	 */
563 	if (macinfo->gldm_saplen != -2) {
564 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
565 		    "not supported", devname, macinfo->gldm_saplen);
566 		return (DDI_FAILURE);
567 	}
568 
569 	/* see gld_rsrv() */
570 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
571 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
572 
573 	mutex_enter(&gld_device_list.gld_devlock);
574 	glddev = gld_devlookup(major);
575 
576 	/*
577 	 *  Allocate per-driver (major) data structure if necessary
578 	 */
579 	if (glddev == NULL) {
580 		/* first occurrence of this device name (major number) */
581 		glddev = GETSTRUCT(glddev_t, 1);
582 		if (glddev == NULL) {
583 			mutex_exit(&gld_device_list.gld_devlock);
584 			return (DDI_FAILURE);
585 		}
586 		(void) strncpy(glddev->gld_name, devname,
587 		    sizeof (glddev->gld_name) - 1);
588 		glddev->gld_major = major;
589 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
590 		glddev->gld_mac_next = glddev->gld_mac_prev =
591 			(gld_mac_info_t *)&glddev->gld_mac_next;
592 		glddev->gld_str_next = glddev->gld_str_prev =
593 			(gld_t *)&glddev->gld_str_next;
594 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
595 
596 		/* allow increase of number of supported multicast addrs */
597 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
598 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
599 
600 		/*
601 		 * Optionally restrict DLPI provider style
602 		 *
603 		 * -1 - don't create style 1 nodes
604 		 * -2 - don't create style 2 nodes
605 		 */
606 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
607 		    "gld-provider-styles", 0);
608 
609 		/* Stuff that's needed before any PPA gets attached */
610 		glddev->gld_type = macinfo->gldm_type;
611 		glddev->gld_minsdu = macinfo->gldm_minpkt;
612 		glddev->gld_saplen = macinfo->gldm_saplen;
613 		glddev->gld_addrlen = macinfo->gldm_addrlen;
614 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
615 		    KM_SLEEP);
616 		bcopy(macinfo->gldm_broadcast_addr,
617 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
618 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
619 		gldinsque(glddev, gld_device_list.gld_prev);
620 	}
621 	glddev->gld_ndevice++;
622 	/* Now glddev can't go away until we unregister this mac (or fail) */
623 	mutex_exit(&gld_device_list.gld_devlock);
624 
625 	/*
626 	 *  Per-instance initialization
627 	 */
628 
629 	/*
630 	 * Initialize per-mac structure that is private to GLD.
631 	 * Set up interface pointer. These are device class specific pointers
632 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
633 	 */
634 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
635 		if (mediatype != interfaces[i].mac_type)
636 			continue;
637 
638 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
639 		    KM_SLEEP);
640 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
641 		    &interfaces[i];
642 		break;
643 	}
644 
645 	if (ifp == NULL) {
646 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
647 		    "of type %d", devname, mediatype);
648 		goto failure;
649 	}
650 
651 	/*
652 	 * Driver can only register MTU within legal media range.
653 	 */
654 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
655 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
656 		    devname);
657 		goto failure;
658 	}
659 
660 	/*
661 	 * For now, only Infiniband drivers can use MDT. Do not add
662 	 * support for Ethernet, FDDI or TR.
663 	 */
664 	if (macinfo->gldm_mdt_pre != NULL) {
665 		if (mediatype != DL_IB) {
666 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
667 			    "driver of type %d", devname, mediatype);
668 			goto failure;
669 		}
670 
671 		/*
672 		 * Validate entry points.
673 		 */
674 		if ((macinfo->gldm_mdt_send == NULL) ||
675 		    (macinfo->gldm_mdt_post == NULL)) {
676 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
677 			    "%s driver of type %d", devname, mediatype);
678 			goto failure;
679 		}
680 		macinfo->gldm_options |= GLDOPT_MDT;
681 	}
682 
683 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
684 	mac_pvt->major_dev = glddev;
685 
686 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
687 	/*
688 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
689 	 * format or in wire format?  Also gldm_broadcast.  For now
690 	 * we are assuming canonical, but I'm not sure that makes the
691 	 * most sense for ease of driver implementation.
692 	 */
693 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
694 	    macinfo->gldm_addrlen);
695 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
696 
697 	/*
698 	 * The available set of notifications is those generatable by GLD
699 	 * itself, plus those corresponding to the capabilities of the MAC
700 	 * driver, intersected with those supported by gld_notify_ind() above.
701 	 */
702 	mac_pvt->notifications = gld_internal_notes;
703 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
704 		mac_pvt->notifications |= gld_linkstate_notes;
705 	mac_pvt->notifications &= gld_supported_notes;
706 
707 	GLDM_LOCK_INIT(macinfo);
708 
709 	ddi_set_driver_private(devinfo, macinfo);
710 
711 	/*
712 	 * Now atomically get a PPA and put ourselves on the mac list.
713 	 */
714 	mutex_enter(&glddev->gld_devlock);
715 
716 #ifdef DEBUG
717 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
718 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
719 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
720 		    macinfo->gldm_ppa);
721 #endif
722 
723 	/*
724 	 * Create style 2 node (gated by gld-provider-styles property).
725 	 *
726 	 * NOTE: When the CLONE_DEV flag is specified to
727 	 *	 ddi_create_minor_node() the minor number argument is
728 	 *	 immaterial. Opens of that node will go via the clone
729 	 *	 driver and gld_open() will always be passed a dev_t with
730 	 *	 minor of zero.
731 	 */
732 	if (glddev->gld_styles != -2) {
733 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
734 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
735 			mutex_exit(&glddev->gld_devlock);
736 			goto late_failure;
737 		}
738 	}
739 
740 	/*
741 	 * Create style 1 node (gated by gld-provider-styles property)
742 	 */
743 	if (glddev->gld_styles != -1) {
744 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
745 		    macinfo->gldm_ppa);
746 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
747 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
748 		    0) != DDI_SUCCESS) {
749 			mutex_exit(&glddev->gld_devlock);
750 			goto late_failure;
751 		}
752 	}
753 
754 	/* add ourselves to this major device's linked list of instances */
755 	gldinsque(macinfo, glddev->gld_mac_prev);
756 
757 	mutex_exit(&glddev->gld_devlock);
758 
759 	/*
760 	 * Unfortunately we need the ppa before we call gld_initstats();
761 	 * otherwise we would like to do this just above the mutex_enter
762 	 * above.  In which case we could have set MAC_READY inside the
763 	 * mutex and we wouldn't have needed to check it in open and
764 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
765 	 * inside the mutex because it might get taken in our kstat_update
766 	 * routine and cause a deadlock with kstat_chain_lock.
767 	 */
768 
769 	/* gld_initstats() calls (*ifp->init)() */
770 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
771 		mutex_enter(&glddev->gld_devlock);
772 		gldremque(macinfo);
773 		mutex_exit(&glddev->gld_devlock);
774 		goto late_failure;
775 	}
776 
777 	/*
778 	 * Need to indicate we are NOW ready to process interrupts;
779 	 * any interrupt before this is set is for someone else.
780 	 * This flag is also now used to tell open, et. al. that this
781 	 * mac is now fully ready and available for use.
782 	 */
783 	GLDM_LOCK(macinfo, RW_WRITER);
784 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
785 	GLDM_UNLOCK(macinfo);
786 
787 	/* log local ethernet address -- XXX not DDI compliant */
788 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
789 		(void) localetheraddr(
790 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
791 
792 	/* now put announcement into the message buffer */
793 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
794 	    glddev->gld_name,
795 	    macinfo->gldm_ppa, macinfo->gldm_ident,
796 	    mac_pvt->interfacep->mac_string,
797 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
798 	    macinfo->gldm_addrlen));
799 
800 	ddi_report_dev(devinfo);
801 	return (DDI_SUCCESS);
802 
803 late_failure:
804 	ddi_remove_minor_node(devinfo, NULL);
805 	GLDM_LOCK_DESTROY(macinfo);
806 	if (mac_pvt->curr_macaddr != NULL)
807 	    kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
808 	if (mac_pvt->statistics != NULL)
809 	    kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
810 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
811 	macinfo->gldm_mac_pvt = NULL;
812 
813 failure:
814 	mutex_enter(&gld_device_list.gld_devlock);
815 	glddev->gld_ndevice--;
816 	/*
817 	 * Note that just because this goes to zero here does not necessarily
818 	 * mean that we were the one who added the glddev above.  It's
819 	 * possible that the first mac unattached while were were in here
820 	 * failing to attach the second mac.  But we're now the last.
821 	 */
822 	if (glddev->gld_ndevice == 0) {
823 		/* There should be no macinfos left */
824 		ASSERT(glddev->gld_mac_next ==
825 		    (gld_mac_info_t *)&glddev->gld_mac_next);
826 		ASSERT(glddev->gld_mac_prev ==
827 		    (gld_mac_info_t *)&glddev->gld_mac_next);
828 
829 		/*
830 		 * There should be no DL_UNATTACHED streams: the system
831 		 * should not have detached the "first" devinfo which has
832 		 * all the open style 2 streams.
833 		 *
834 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
835 		 */
836 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
837 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
838 
839 		gldremque(glddev);
840 		mutex_destroy(&glddev->gld_devlock);
841 		if (glddev->gld_broadcast != NULL)
842 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
843 		kmem_free(glddev, sizeof (glddev_t));
844 	}
845 	mutex_exit(&gld_device_list.gld_devlock);
846 
847 	return (DDI_FAILURE);
848 }
849 
850 /*
851  * gld_unregister (macinfo)
852  * remove the macinfo structure from local structures
853  * this is cleanup for a driver to be unloaded
854  */
855 int
856 gld_unregister(gld_mac_info_t *macinfo)
857 {
858 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
859 	glddev_t *glddev = mac_pvt->major_dev;
860 	gld_interface_t *ifp;
861 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
862 
863 	mutex_enter(&glddev->gld_devlock);
864 	GLDM_LOCK(macinfo, RW_WRITER);
865 
866 	if (mac_pvt->nvlan > 0) {
867 		GLDM_UNLOCK(macinfo);
868 		mutex_exit(&glddev->gld_devlock);
869 		return (DDI_FAILURE);
870 	}
871 
872 #ifdef	GLD_DEBUG
873 	{
874 		int i;
875 
876 		for (i = 0; i < VLAN_HASHSZ; i++) {
877 			if ((mac_pvt->vlan_hash[i] != NULL))
878 				cmn_err(CE_PANIC,
879 				    "%s, line %d: "
880 				    "mac_pvt->vlan_hash[%d] != NULL",
881 				    __FILE__, __LINE__, i);
882 		}
883 	}
884 #endif
885 
886 	/* Delete this mac */
887 	gldremque(macinfo);
888 
889 	/* Disallow further entries to gld_recv() and gld_sched() */
890 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
891 
892 	GLDM_UNLOCK(macinfo);
893 	mutex_exit(&glddev->gld_devlock);
894 
895 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
896 	(*ifp->uninit)(macinfo);
897 
898 	ASSERT(mac_pvt->kstatp);
899 	kstat_delete(mac_pvt->kstatp);
900 
901 	ASSERT(GLDM_LOCK_INITED(macinfo));
902 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
903 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
904 
905 	if (mac_pvt->mcast_table != NULL)
906 		kmem_free(mac_pvt->mcast_table, multisize);
907 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
908 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
909 
910 	/* We now have one fewer instance for this major device */
911 	mutex_enter(&gld_device_list.gld_devlock);
912 	glddev->gld_ndevice--;
913 	if (glddev->gld_ndevice == 0) {
914 		/* There should be no macinfos left */
915 		ASSERT(glddev->gld_mac_next ==
916 		    (gld_mac_info_t *)&glddev->gld_mac_next);
917 		ASSERT(glddev->gld_mac_prev ==
918 		    (gld_mac_info_t *)&glddev->gld_mac_next);
919 
920 		/*
921 		 * There should be no DL_UNATTACHED streams: the system
922 		 * should not have detached the "first" devinfo which has
923 		 * all the open style 2 streams.
924 		 *
925 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
926 		 */
927 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
928 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
929 
930 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
931 		gldremque(glddev);
932 		mutex_destroy(&glddev->gld_devlock);
933 		if (glddev->gld_broadcast != NULL)
934 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
935 		kmem_free(glddev, sizeof (glddev_t));
936 	}
937 	mutex_exit(&gld_device_list.gld_devlock);
938 
939 	return (DDI_SUCCESS);
940 }
941 
942 /*
943  * gld_initstats
944  * called from gld_register
945  */
946 static int
947 gld_initstats(gld_mac_info_t *macinfo)
948 {
949 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
950 	struct gldkstats *sp;
951 	glddev_t *glddev;
952 	kstat_t *ksp;
953 	gld_interface_t *ifp;
954 
955 	glddev = mac_pvt->major_dev;
956 
957 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
958 	    NULL, "net", KSTAT_TYPE_NAMED,
959 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
960 		cmn_err(CE_WARN,
961 		    "GLD: failed to create kstat structure for %s%d",
962 		    glddev->gld_name, macinfo->gldm_ppa);
963 		return (GLD_FAILURE);
964 	}
965 	mac_pvt->kstatp = ksp;
966 
967 	ksp->ks_update = gld_update_kstat;
968 	ksp->ks_private = (void *)macinfo;
969 
970 	sp = ksp->ks_data;
971 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
972 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
973 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
974 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
975 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
976 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
977 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
978 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
979 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
984 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
985 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
986 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
987 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
988 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
990 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
991 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
992 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
993 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
994 
995 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
996 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
997 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
998 
999 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1000 	    KSTAT_DATA_UINT32);
1001 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1002 	    KSTAT_DATA_UINT32);
1003 
1004 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1005 
1006 	(*ifp->init)(macinfo);
1007 
1008 	kstat_install(ksp);
1009 
1010 	return (GLD_SUCCESS);
1011 }
1012 
1013 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1014 static int
1015 gld_update_kstat(kstat_t *ksp, int rw)
1016 {
1017 	gld_mac_info_t	*macinfo;
1018 	gld_mac_pvt_t	*mac_pvt;
1019 	struct gldkstats *gsp;
1020 	struct gld_stats *stats;
1021 
1022 	if (rw == KSTAT_WRITE)
1023 		return (EACCES);
1024 
1025 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1026 	ASSERT(macinfo != NULL);
1027 
1028 	GLDM_LOCK(macinfo, RW_WRITER);
1029 
1030 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1031 		GLDM_UNLOCK(macinfo);
1032 		return (EIO);	/* this one's not ready yet */
1033 	}
1034 
1035 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1036 		GLDM_UNLOCK(macinfo);
1037 		return (EIO);	/* this one's not ready any more */
1038 	}
1039 
1040 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1041 	gsp = mac_pvt->kstatp->ks_data;
1042 	ASSERT(gsp);
1043 	stats = mac_pvt->statistics;
1044 
1045 	if (macinfo->gldm_get_stats)
1046 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1047 
1048 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1049 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1050 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1051 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1052 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1053 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1054 
1055 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1056 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1057 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1058 
1059 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1060 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1061 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1062 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1063 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1064 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1065 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1066 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1067 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1068 	gsp->glds_missed.value.ul = stats->glds_missed;
1069 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1070 	    stats->glds_gldnorcvbuf;
1071 	gsp->glds_intr.value.ul = stats->glds_intr;
1072 
1073 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1074 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1075 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1076 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1077 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1078 
1079 	if (mac_pvt->nprom)
1080 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1081 	else if (mac_pvt->nprom_multi)
1082 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1083 	else
1084 		(void) strcpy(gsp->glds_prom.value.c, "off");
1085 
1086 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1087 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1088 	    ? stats->glds_media : 0]);
1089 
1090 	switch (macinfo->gldm_type) {
1091 	case DL_ETHER:
1092 		gsp->glds_frame.value.ul = stats->glds_frame;
1093 		gsp->glds_crc.value.ul = stats->glds_crc;
1094 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1095 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1096 		gsp->glds_defer.value.ul = stats->glds_defer;
1097 		gsp->glds_short.value.ul = stats->glds_short;
1098 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1099 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1100 		gsp->glds_dot3_first_coll.value.ui32 =
1101 		    stats->glds_dot3_first_coll;
1102 		gsp->glds_dot3_multi_coll.value.ui32 =
1103 		    stats->glds_dot3_multi_coll;
1104 		gsp->glds_dot3_sqe_error.value.ui32 =
1105 		    stats->glds_dot3_sqe_error;
1106 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1107 		    stats->glds_dot3_mac_xmt_error;
1108 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1109 		    stats->glds_dot3_mac_rcv_error;
1110 		gsp->glds_dot3_frame_too_long.value.ui32 =
1111 		    stats->glds_dot3_frame_too_long;
1112 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1113 		    stats->glds_duplex <
1114 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1115 		    stats->glds_duplex : 0]);
1116 		break;
1117 	case DL_TPR:
1118 		gsp->glds_dot5_line_error.value.ui32 =
1119 		    stats->glds_dot5_line_error;
1120 		gsp->glds_dot5_burst_error.value.ui32 =
1121 		    stats->glds_dot5_burst_error;
1122 		gsp->glds_dot5_signal_loss.value.ui32 =
1123 		    stats->glds_dot5_signal_loss;
1124 		gsp->glds_dot5_ace_error.value.ui32 =
1125 		    stats->glds_dot5_ace_error;
1126 		gsp->glds_dot5_internal_error.value.ui32 =
1127 		    stats->glds_dot5_internal_error;
1128 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1129 		    stats->glds_dot5_lost_frame_error;
1130 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1131 		    stats->glds_dot5_frame_copied_error;
1132 		gsp->glds_dot5_token_error.value.ui32 =
1133 		    stats->glds_dot5_token_error;
1134 		gsp->glds_dot5_freq_error.value.ui32 =
1135 		    stats->glds_dot5_freq_error;
1136 		break;
1137 	case DL_FDDI:
1138 		gsp->glds_fddi_mac_error.value.ui32 =
1139 		    stats->glds_fddi_mac_error;
1140 		gsp->glds_fddi_mac_lost.value.ui32 =
1141 		    stats->glds_fddi_mac_lost;
1142 		gsp->glds_fddi_mac_token.value.ui32 =
1143 		    stats->glds_fddi_mac_token;
1144 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1145 		    stats->glds_fddi_mac_tvx_expired;
1146 		gsp->glds_fddi_mac_late.value.ui32 =
1147 		    stats->glds_fddi_mac_late;
1148 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1149 		    stats->glds_fddi_mac_ring_op;
1150 		break;
1151 	case DL_IB:
1152 		break;
1153 	default:
1154 		break;
1155 	}
1156 
1157 	GLDM_UNLOCK(macinfo);
1158 
1159 #ifdef GLD_DEBUG
1160 	gld_check_assertions();
1161 	if (gld_debug & GLDRDE)
1162 		gld_sr_dump(macinfo);
1163 #endif
1164 
1165 	return (0);
1166 }
1167 
1168 static int
1169 gld_init_vlan_stats(gld_vlan_t *vlan)
1170 {
1171 	gld_mac_info_t *mac = vlan->gldv_mac;
1172 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1173 	struct gldkstats *sp;
1174 	glddev_t *glddev;
1175 	kstat_t *ksp;
1176 	char *name;
1177 	int instance;
1178 
1179 	glddev = mac_pvt->major_dev;
1180 	name = glddev->gld_name;
1181 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1182 
1183 	if ((ksp = kstat_create(name, instance,
1184 	    NULL, "net", KSTAT_TYPE_NAMED,
1185 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1186 		cmn_err(CE_WARN,
1187 		    "GLD: failed to create kstat structure for %s%d",
1188 		    name, instance);
1189 		return (GLD_FAILURE);
1190 	}
1191 
1192 	vlan->gldv_kstatp = ksp;
1193 
1194 	ksp->ks_update = gld_update_vlan_kstat;
1195 	ksp->ks_private = (void *)vlan;
1196 
1197 	sp = ksp->ks_data;
1198 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1199 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1200 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1201 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1202 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1203 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1204 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1205 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1206 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1211 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1212 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1213 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1214 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1215 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1217 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1218 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1219 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1220 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1221 
1222 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1223 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1224 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1225 
1226 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1227 	    KSTAT_DATA_UINT32);
1228 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1229 	    KSTAT_DATA_UINT32);
1230 
1231 	kstat_install(ksp);
1232 	return (GLD_SUCCESS);
1233 }
1234 
1235 static int
1236 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1237 {
1238 	gld_vlan_t	*vlan;
1239 	gld_mac_info_t	*macinfo;
1240 	struct gldkstats *gsp;
1241 	struct gld_stats *stats;
1242 
1243 	if (rw == KSTAT_WRITE)
1244 		return (EACCES);
1245 
1246 	vlan = (gld_vlan_t *)ksp->ks_private;
1247 	ASSERT(vlan != NULL);
1248 
1249 	macinfo = vlan->gldv_mac;
1250 	GLDM_LOCK(macinfo, RW_WRITER);
1251 
1252 	gsp = vlan->gldv_kstatp->ks_data;
1253 	ASSERT(gsp);
1254 	stats = vlan->gldv_stats;
1255 
1256 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1257 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1258 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1259 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1260 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1261 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1262 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1263 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1264 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1265 
1266 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1267 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1268 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1269 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1270 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1271 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1272 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1273 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1274 
1275 	GLDM_UNLOCK(macinfo);
1276 	return (0);
1277 }
1278 
1279 /*
1280  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1281  */
1282 /*ARGSUSED*/
1283 int
1284 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1285 {
1286 	dev_info_t	*devinfo;
1287 	minor_t		minor = getminor((dev_t)arg);
1288 	int		rc = DDI_FAILURE;
1289 
1290 	switch (cmd) {
1291 	case DDI_INFO_DEVT2DEVINFO:
1292 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1293 			*(dev_info_t **)resultp = devinfo;
1294 			rc = DDI_SUCCESS;
1295 		}
1296 		break;
1297 	case DDI_INFO_DEVT2INSTANCE:
1298 		/* Need static mapping for deferred attach */
1299 		if (minor == GLD_USE_STYLE2) {
1300 			/*
1301 			 * Style 2:  this minor number does not correspond to
1302 			 * any particular instance number.
1303 			 */
1304 			rc = DDI_FAILURE;
1305 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1306 			/* Style 1:  calculate the PPA from the minor */
1307 			*(int *)resultp = GLD_STYLE1_MINOR_TO_PPA(minor);
1308 			rc = DDI_SUCCESS;
1309 		} else {
1310 			/* Clone:  look for it.  Not a static mapping */
1311 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1312 				*(int *)resultp = ddi_get_instance(devinfo);
1313 				rc = DDI_SUCCESS;
1314 			}
1315 		}
1316 		break;
1317 	}
1318 
1319 	return (rc);
1320 }
1321 
1322 /* called from gld_getinfo */
1323 dev_info_t *
1324 gld_finddevinfo(dev_t dev)
1325 {
1326 	minor_t		minor = getminor(dev);
1327 	glddev_t	*device;
1328 	gld_mac_info_t	*mac;
1329 	gld_vlan_t	*vlan;
1330 	gld_t		*str;
1331 	dev_info_t	*devinfo = NULL;
1332 	int		i;
1333 
1334 	if (minor == GLD_USE_STYLE2) {
1335 		/*
1336 		 * Style 2:  this minor number does not correspond to
1337 		 * any particular instance number.
1338 		 *
1339 		 * XXX We don't know what to say.  See Bug 1165519.
1340 		 */
1341 		return (NULL);
1342 	}
1343 
1344 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1345 
1346 	device = gld_devlookup(getmajor(dev));
1347 	if (device == NULL) {
1348 		/* There are no attached instances of this device */
1349 		mutex_exit(&gld_device_list.gld_devlock);
1350 		return (NULL);
1351 	}
1352 
1353 	/*
1354 	 * Search all attached macs and streams.
1355 	 *
1356 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1357 	 * we don't know what devinfo we should report back even if we
1358 	 * found the minor.  Maybe we should associate streams that are
1359 	 * not currently attached to a PPA with the "first" devinfo node
1360 	 * of the major device to attach -- the one that created the
1361 	 * minor node for the generic device.
1362 	 */
1363 	mutex_enter(&device->gld_devlock);
1364 
1365 	for (mac = device->gld_mac_next;
1366 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1367 	    mac = mac->gldm_next) {
1368 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1369 
1370 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1371 			continue;	/* this one's not ready yet */
1372 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1373 			/* Style 1 -- look for the corresponding PPA */
1374 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1375 				devinfo = mac->gldm_devinfo;
1376 				goto out;	/* found it! */
1377 			} else
1378 				continue;	/* not this PPA */
1379 		}
1380 
1381 		/* We are looking for a clone */
1382 		for (i = 0; i < VLAN_HASHSZ; i++) {
1383 			for (vlan = pvt->vlan_hash[i];
1384 			    vlan != NULL; vlan = vlan->gldv_next) {
1385 				for (str = vlan->gldv_str_next;
1386 				    str != (gld_t *)&vlan->gldv_str_next;
1387 				    str = str->gld_next) {
1388 					ASSERT(str->gld_mac_info == mac);
1389 					if (minor == str->gld_minor) {
1390 						devinfo = mac->gldm_devinfo;
1391 						goto out;
1392 					}
1393 				}
1394 			}
1395 		}
1396 	}
1397 out:
1398 	mutex_exit(&device->gld_devlock);
1399 	mutex_exit(&gld_device_list.gld_devlock);
1400 	return (devinfo);
1401 }
1402 
1403 /*
1404  * STREAMS open routine.  The device dependent driver specifies this as its
1405  * open entry point.
1406  */
1407 /*ARGSUSED2*/
1408 int
1409 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1410 {
1411 	gld_mac_pvt_t *mac_pvt;
1412 	gld_t *gld;
1413 	glddev_t *glddev;
1414 	gld_mac_info_t *macinfo;
1415 	minor_t minor = getminor(*dev);
1416 	gld_vlan_t *vlan;
1417 	t_uscalar_t ppa;
1418 
1419 	ASSERT(q != NULL);
1420 
1421 	if (minor > GLD_MAX_STYLE1_MINOR)
1422 		return (ENXIO);
1423 
1424 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1425 
1426 	/* Find our per-major glddev_t structure */
1427 	mutex_enter(&gld_device_list.gld_devlock);
1428 	glddev = gld_devlookup(getmajor(*dev));
1429 
1430 	/*
1431 	 * This glddev will hang around since detach (and therefore
1432 	 * gld_unregister) can't run while we're here in the open routine.
1433 	 */
1434 	mutex_exit(&gld_device_list.gld_devlock);
1435 
1436 	if (glddev == NULL)
1437 		return (ENXIO);
1438 
1439 #ifdef GLD_DEBUG
1440 	if (gld_debug & GLDPROT) {
1441 		if (minor == GLD_USE_STYLE2)
1442 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1443 		else
1444 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1445 			    (void *)q, minor);
1446 	}
1447 #endif
1448 
1449 	/*
1450 	 * get a per-stream structure and link things together so we
1451 	 * can easily find them later.
1452 	 */
1453 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1454 
1455 	/*
1456 	 * fill in the structure and state info
1457 	 */
1458 	gld->gld_qptr = q;
1459 	gld->gld_device = glddev;
1460 	gld->gld_state = DL_UNATTACHED;
1461 
1462 	/*
1463 	 * we must atomically find a free minor number and add the stream
1464 	 * to a list, because gld_findminor has to traverse the lists to
1465 	 * determine which minor numbers are free.
1466 	 */
1467 	mutex_enter(&glddev->gld_devlock);
1468 
1469 	/* find a free minor device number for the clone */
1470 	gld->gld_minor = gld_findminor(glddev);
1471 	if (gld->gld_minor == 0) {
1472 		mutex_exit(&glddev->gld_devlock);
1473 		kmem_free(gld, sizeof (gld_t));
1474 		return (ENOSR);
1475 	}
1476 
1477 #ifdef GLD_VERBOSE_DEBUG
1478 	if (gld_debug & GLDPROT)
1479 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1480 		    (void *)gld, gld->gld_minor);
1481 #endif
1482 
1483 	if (minor == GLD_USE_STYLE2) {
1484 		gld->gld_style = DL_STYLE2;
1485 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1486 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1487 		gldinsque(gld, glddev->gld_str_prev);
1488 #ifdef GLD_VERBOSE_DEBUG
1489 		if (gld_debug & GLDPROT)
1490 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1491 #endif
1492 		(void) qassociate(q, -1);
1493 		goto done;
1494 	}
1495 
1496 	gld->gld_style = DL_STYLE1;
1497 
1498 	/* the PPA is actually 1 less than the minordev */
1499 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1500 
1501 	for (macinfo = glddev->gld_mac_next;
1502 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1503 	    macinfo = macinfo->gldm_next) {
1504 		ASSERT(macinfo != NULL);
1505 		if (macinfo->gldm_ppa != ppa)
1506 			continue;
1507 
1508 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1509 			continue;	/* this one's not ready yet */
1510 
1511 		/*
1512 		 * we found the correct PPA
1513 		 */
1514 		GLDM_LOCK(macinfo, RW_WRITER);
1515 
1516 		gld->gld_mac_info = macinfo;
1517 
1518 		if (macinfo->gldm_send_tagged != NULL)
1519 			gld->gld_send = macinfo->gldm_send_tagged;
1520 		else
1521 			gld->gld_send = macinfo->gldm_send;
1522 
1523 		/* now ready for action */
1524 		gld->gld_state = DL_UNBOUND;
1525 
1526 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1527 			GLDM_UNLOCK(macinfo);
1528 			mutex_exit(&glddev->gld_devlock);
1529 			kmem_free(gld, sizeof (gld_t));
1530 			return (EIO);
1531 		}
1532 
1533 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1534 		if (!mac_pvt->started) {
1535 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1536 				GLDM_UNLOCK(macinfo);
1537 				mutex_exit(&glddev->gld_devlock);
1538 				kmem_free(gld, sizeof (gld_t));
1539 				return (EIO);
1540 			}
1541 		}
1542 
1543 		gld->gld_vlan = vlan;
1544 		vlan->gldv_nstreams++;
1545 		gldinsque(gld, vlan->gldv_str_prev);
1546 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1547 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1548 
1549 		GLDM_UNLOCK(macinfo);
1550 #ifdef GLD_VERBOSE_DEBUG
1551 		if (gld_debug & GLDPROT)
1552 			cmn_err(CE_NOTE,
1553 			    "GLDstruct added to instance list");
1554 #endif
1555 		break;
1556 	}
1557 
1558 	if (gld->gld_state == DL_UNATTACHED) {
1559 		mutex_exit(&glddev->gld_devlock);
1560 		kmem_free(gld, sizeof (gld_t));
1561 		return (ENXIO);
1562 	}
1563 
1564 done:
1565 	mutex_exit(&glddev->gld_devlock);
1566 	noenable(WR(q));	/* We'll do the qenables manually */
1567 	qprocson(q);		/* start the queues running */
1568 	qenable(WR(q));
1569 	return (0);
1570 }
1571 
1572 /*
1573  * normal stream close call checks current status and cleans up
1574  * data structures that were dynamically allocated
1575  */
1576 /*ARGSUSED1*/
1577 int
1578 gld_close(queue_t *q, int flag, cred_t *cred)
1579 {
1580 	gld_t	*gld = (gld_t *)q->q_ptr;
1581 	glddev_t *glddev = gld->gld_device;
1582 
1583 	ASSERT(q);
1584 	ASSERT(gld);
1585 
1586 #ifdef GLD_DEBUG
1587 	if (gld_debug & GLDPROT) {
1588 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1589 		    (void *)q, (gld->gld_style & 0x1) + 1);
1590 	}
1591 #endif
1592 
1593 	/* Hold all device streams lists still while we check for a macinfo */
1594 	mutex_enter(&glddev->gld_devlock);
1595 
1596 	if (gld->gld_mac_info != NULL) {
1597 		/* If there's a macinfo, block recv while we change state */
1598 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1599 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1600 		GLDM_UNLOCK(gld->gld_mac_info);
1601 	} else {
1602 		/* no mac DL_ATTACHED right now */
1603 		gld->gld_flags |= GLD_STR_CLOSING;
1604 	}
1605 
1606 	mutex_exit(&glddev->gld_devlock);
1607 
1608 	/*
1609 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1610 	 * we know wsrv isn't in there trying to undo what we're doing.
1611 	 */
1612 	qprocsoff(q);
1613 
1614 	ASSERT(gld->gld_wput_count == 0);
1615 	gld->gld_wput_count = 0;	/* just in case */
1616 
1617 	if (gld->gld_state == DL_IDLE) {
1618 		/* Need to unbind */
1619 		ASSERT(gld->gld_mac_info != NULL);
1620 		(void) gld_unbind(WR(q), NULL);
1621 	}
1622 
1623 	if (gld->gld_state == DL_UNBOUND) {
1624 		/*
1625 		 * Need to unattach
1626 		 * For style 2 stream, gldunattach also
1627 		 * associate queue with NULL dip
1628 		 */
1629 		ASSERT(gld->gld_mac_info != NULL);
1630 		(void) gldunattach(WR(q), NULL);
1631 	}
1632 
1633 	/* disassociate the stream from the device */
1634 	q->q_ptr = WR(q)->q_ptr = NULL;
1635 
1636 	/*
1637 	 * Since we unattached above (if necessary), we know that we're
1638 	 * on the per-major list of unattached streams, rather than a
1639 	 * per-PPA list.  So we know we should hold the devlock.
1640 	 */
1641 	mutex_enter(&glddev->gld_devlock);
1642 	gldremque(gld);			/* remove from Style 2 list */
1643 	mutex_exit(&glddev->gld_devlock);
1644 
1645 	kmem_free(gld, sizeof (gld_t));
1646 
1647 	return (0);
1648 }
1649 
1650 /*
1651  * gld_rsrv (q)
1652  *	simple read service procedure
1653  *	purpose is to avoid the time it takes for packets
1654  *	to move through IP so we can get them off the board
1655  *	as fast as possible due to limited PC resources.
1656  *
1657  *	This is not normally used in the current implementation.  It
1658  *	can be selected with the undocumented property "fast_recv".
1659  *	If that property is set, gld_recv will send the packet
1660  *	upstream with a putq() rather than a putnext(), thus causing
1661  *	this routine to be scheduled.
1662  */
1663 int
1664 gld_rsrv(queue_t *q)
1665 {
1666 	mblk_t *mp;
1667 
1668 	while ((mp = getq(q)) != NULL) {
1669 		if (canputnext(q)) {
1670 			putnext(q, mp);
1671 		} else {
1672 			freemsg(mp);
1673 		}
1674 	}
1675 	return (0);
1676 }
1677 
1678 /*
1679  * gld_wput (q, mp)
1680  * general gld stream write put routine. Receives fastpath data from upper
1681  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1682  * queued for later processing by the service procedure.
1683  */
1684 
1685 int
1686 gld_wput(queue_t *q, mblk_t *mp)
1687 {
1688 	gld_t  *gld = (gld_t *)(q->q_ptr);
1689 	int	rc;
1690 	boolean_t multidata = B_TRUE;
1691 
1692 #ifdef GLD_DEBUG
1693 	if (gld_debug & GLDTRACE)
1694 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1695 		    (void *)q, (void *)mp, DB_TYPE(mp));
1696 #endif
1697 	switch (DB_TYPE(mp)) {
1698 
1699 	case M_DATA:
1700 		/* fast data / raw support */
1701 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1702 		/* Tricky to access memory without taking the mutex */
1703 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1704 		    gld->gld_state != DL_IDLE) {
1705 			merror(q, mp, EPROTO);
1706 			break;
1707 		}
1708 		multidata = B_FALSE;
1709 		/* LINTED: E_CASE_FALLTHRU */
1710 	case M_MULTIDATA:
1711 		/* Only call gld_start() directly if nothing queued ahead */
1712 		/* No guarantees about ordering with different threads */
1713 		if (q->q_first)
1714 			goto use_wsrv;
1715 
1716 		/*
1717 		 * This can happen if wsrv has taken off the last mblk but
1718 		 * is still processing it.
1719 		 */
1720 		membar_consumer();
1721 		if (gld->gld_in_wsrv)
1722 			goto use_wsrv;
1723 
1724 		/*
1725 		 * Keep a count of current wput calls to start.
1726 		 * Nonzero count delays any attempted DL_UNBIND.
1727 		 * See comments above gld_start().
1728 		 */
1729 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1730 		membar_enter();
1731 
1732 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1733 		/* If this Q is in process of DL_UNBIND, don't call start */
1734 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1735 			/* Extremely unlikely */
1736 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1737 			goto use_wsrv;
1738 		}
1739 
1740 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1741 		    gld_start(q, mp, GLD_WPUT, UPRI(gld, mp->b_band));
1742 
1743 		/* Allow DL_UNBIND again */
1744 		membar_exit();
1745 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1746 
1747 		if (rc == GLD_NORESOURCES)
1748 			qenable(q);
1749 		break;	/*  Done with this packet */
1750 
1751 use_wsrv:
1752 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1753 		(void) putq(q, mp);
1754 		qenable(q);
1755 		break;
1756 
1757 	case M_IOCTL:
1758 		/* ioctl relies on wsrv single threading per queue */
1759 		(void) putq(q, mp);
1760 		qenable(q);
1761 		break;
1762 
1763 	case M_CTL:
1764 		(void) putq(q, mp);
1765 		qenable(q);
1766 		break;
1767 
1768 	case M_FLUSH:		/* canonical flush handling */
1769 		/* XXX Should these be FLUSHALL? */
1770 		if (*mp->b_rptr & FLUSHW)
1771 			flushq(q, 0);
1772 		if (*mp->b_rptr & FLUSHR) {
1773 			flushq(RD(q), 0);
1774 			*mp->b_rptr &= ~FLUSHW;
1775 			qreply(q, mp);
1776 		} else
1777 			freemsg(mp);
1778 		break;
1779 
1780 	case M_PROTO:
1781 	case M_PCPROTO:
1782 		/* these rely on wsrv single threading per queue */
1783 		(void) putq(q, mp);
1784 		qenable(q);
1785 		break;
1786 
1787 	default:
1788 #ifdef GLD_DEBUG
1789 		if (gld_debug & GLDETRACE)
1790 			cmn_err(CE_WARN,
1791 			    "gld: Unexpected packet type from queue: 0x%x",
1792 			    DB_TYPE(mp));
1793 #endif
1794 		freemsg(mp);
1795 	}
1796 	return (0);
1797 }
1798 
1799 /*
1800  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1801  * specification.
1802  *
1803  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1804  * lock for reading data items that are only ever written by us.
1805  */
1806 
1807 int
1808 gld_wsrv(queue_t *q)
1809 {
1810 	mblk_t *mp;
1811 	gld_t *gld = (gld_t *)q->q_ptr;
1812 	gld_mac_info_t *macinfo;
1813 	union DL_primitives *prim;
1814 	int err;
1815 	boolean_t multidata;
1816 
1817 #ifdef GLD_DEBUG
1818 	if (gld_debug & GLDTRACE)
1819 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1820 #endif
1821 
1822 	ASSERT(!gld->gld_in_wsrv);
1823 
1824 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1825 
1826 	if (q->q_first == NULL)
1827 		return (0);
1828 
1829 	macinfo = gld->gld_mac_info;
1830 
1831 	/*
1832 	 * Help wput avoid a call to gld_start if there might be a message
1833 	 * previously queued by that thread being processed here.
1834 	 */
1835 	gld->gld_in_wsrv = B_TRUE;
1836 	membar_enter();
1837 
1838 	while ((mp = getq(q)) != NULL) {
1839 		switch (DB_TYPE(mp)) {
1840 		case M_DATA:
1841 		case M_MULTIDATA:
1842 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1843 
1844 			/*
1845 			 * retry of a previously processed UNITDATA_REQ
1846 			 * or is a RAW or FAST message from above.
1847 			 */
1848 			if (macinfo == NULL) {
1849 				/* No longer attached to a PPA, drop packet */
1850 				freemsg(mp);
1851 				break;
1852 			}
1853 
1854 			gld->gld_sched_ran = B_FALSE;
1855 			membar_enter();
1856 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1857 			    gld_start(q, mp, GLD_WSRV, UPRI(gld, mp->b_band));
1858 			if (err == GLD_NORESOURCES) {
1859 				/* gld_sched will qenable us later */
1860 				gld->gld_xwait = B_TRUE; /* want qenable */
1861 				membar_enter();
1862 				/*
1863 				 * v2:  we're not holding the lock; it's
1864 				 * possible that the driver could have already
1865 				 * called gld_sched (following up on its
1866 				 * return of GLD_NORESOURCES), before we got a
1867 				 * chance to do the putbq() and set gld_xwait.
1868 				 * So if we saw a call to gld_sched that
1869 				 * examined this queue, since our call to
1870 				 * gld_start() above, then it's possible we've
1871 				 * already seen the only call to gld_sched()
1872 				 * we're ever going to see.  So we better retry
1873 				 * transmitting this packet right now.
1874 				 */
1875 				if (gld->gld_sched_ran) {
1876 #ifdef GLD_DEBUG
1877 					if (gld_debug & GLDTRACE)
1878 						cmn_err(CE_NOTE, "gld_wsrv: "
1879 						    "sched was called");
1880 #endif
1881 					break;	/* try again right now */
1882 				}
1883 				gld->gld_in_wsrv = B_FALSE;
1884 				return (0);
1885 			}
1886 			break;
1887 
1888 		case M_IOCTL:
1889 			(void) gld_ioctl(q, mp);
1890 			break;
1891 
1892 		case M_CTL:
1893 			if (macinfo == NULL) {
1894 				freemsg(mp);
1895 				break;
1896 			}
1897 
1898 			if (macinfo->gldm_mctl != NULL) {
1899 				GLDM_LOCK(macinfo, RW_WRITER);
1900 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1901 				GLDM_UNLOCK(macinfo);
1902 			} else {
1903 				/* This driver doesn't recognize, just drop */
1904 				freemsg(mp);
1905 			}
1906 			break;
1907 
1908 		case M_PROTO:	/* Will be an DLPI message of some type */
1909 		case M_PCPROTO:
1910 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1911 				if (err == GLDE_RETRY) {
1912 					gld->gld_in_wsrv = B_FALSE;
1913 					return (0); /* quit while we're ahead */
1914 				}
1915 				prim = (union DL_primitives *)mp->b_rptr;
1916 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1917 			}
1918 			break;
1919 
1920 		default:
1921 			/* This should never happen */
1922 #ifdef GLD_DEBUG
1923 			if (gld_debug & GLDERRS)
1924 				cmn_err(CE_WARN,
1925 				    "gld_wsrv: db_type(%x) not supported",
1926 				    mp->b_datap->db_type);
1927 #endif
1928 			freemsg(mp);	/* unknown types are discarded */
1929 			break;
1930 		}
1931 	}
1932 
1933 	membar_exit();
1934 	gld->gld_in_wsrv = B_FALSE;
1935 	return (0);
1936 }
1937 
1938 /*
1939  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1940  *
1941  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1942  *
1943  * In particular, we must avoid calling gld_precv*() if we came from wput().
1944  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1945  * packets to the receive side if we are in physical promiscuous mode.
1946  * Since the receive side holds a lock across its call to the upstream
1947  * putnext, and that upstream module could well have looped back to our
1948  * wput() routine on the same thread, we cannot call gld_precv* from here
1949  * for fear of causing a recursive lock entry in our receive code.
1950  *
1951  * There is a problem here when coming from gld_wput().  While wput
1952  * only comes here if the queue is attached to a PPA and bound to a SAP
1953  * and there are no messages on the queue ahead of the M_DATA that could
1954  * change that, it is theoretically possible that another thread could
1955  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1956  * could wake up and process them, before we finish processing this
1957  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1958  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1959  * and Style 1 streams only DL_DETACH in the close routine, where
1960  * qprocsoff() protects us.  If this happens we could end up calling
1961  * gldm_send() after we have detached the stream and possibly called
1962  * gldm_stop().  Worse, once the number of attached streams goes to zero,
1963  * detach/unregister could be called, and the macinfo could go away entirely.
1964  *
1965  * No one has ever seen this happen.
1966  *
1967  * It is some trouble to fix this, and we would rather not add any mutex
1968  * logic into the wput() routine, which is supposed to be a "fast"
1969  * path.
1970  *
1971  * What I've done is use an atomic counter to keep a count of the number
1972  * of threads currently calling gld_start() from wput() on this stream.
1973  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
1974  * the queue and qenables, hoping to have better luck next time.  Since
1975  * people shouldn't be trying to send after they've asked to DL_DETACH,
1976  * hopefully very soon all the wput=>start threads should have returned
1977  * and the DL_DETACH will succeed.  It's hard to test this since the odds
1978  * of the failure even trying to happen are so small.  I probably could
1979  * have ignored the whole issue and never been the worse for it.
1980  */
1981 static int
1982 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
1983 {
1984 	mblk_t *nmp;
1985 	gld_t *gld = (gld_t *)q->q_ptr;
1986 	gld_mac_info_t *macinfo;
1987 	gld_mac_pvt_t *mac_pvt;
1988 	int rc;
1989 	gld_interface_t *ifp;
1990 	pktinfo_t pktinfo;
1991 	uint32_t vtag;
1992 	gld_vlan_t *vlan;
1993 
1994 	ASSERT(DB_TYPE(mp) == M_DATA);
1995 	macinfo = gld->gld_mac_info;
1996 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1997 	ifp = mac_pvt->interfacep;
1998 	vlan = (gld_vlan_t *)gld->gld_vlan;
1999 
2000 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2001 		freemsg(mp);
2002 #ifdef GLD_DEBUG
2003 		if (gld_debug & GLDERRS)
2004 			cmn_err(CE_WARN,
2005 			    "gld_start: failed to interpret outbound packet");
2006 #endif
2007 		vlan->gldv_stats->glds_xmtbadinterp++;
2008 		return (GLD_BADARG);
2009 	}
2010 
2011 	/*
2012 	 * We're not holding the lock for this check.  If the promiscuous
2013 	 * state is in flux it doesn't matter much if we get this wrong.
2014 	 */
2015 	if (mac_pvt->nprom > 0) {
2016 		/*
2017 		 * We want to loopback to the receive side, but to avoid
2018 		 * recursive lock entry:  if we came from wput(), which
2019 		 * could have looped back via IP from our own receive
2020 		 * interrupt thread, we decline this request.  wput()
2021 		 * will then queue the packet for wsrv().  This means
2022 		 * that when snoop is running we don't get the advantage
2023 		 * of the wput() multithreaded direct entry to the
2024 		 * driver's send routine.
2025 		 */
2026 		if (caller == GLD_WPUT) {
2027 			(void) putbq(q, mp);
2028 			return (GLD_NORESOURCES);
2029 		}
2030 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2031 			nmp = dupmsg_noloan(mp);
2032 		else
2033 			nmp = dupmsg(mp);
2034 	} else
2035 		nmp = NULL;		/* we need no loopback */
2036 
2037 	vtag = GLD_MK_VTAG(vlan->gldv_ptag, upri);
2038 	if (ifp->hdr_size > 0 &&
2039 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2040 	    macinfo->gldm_maxpkt) {
2041 		freemsg(mp);	/* discard oversized outbound packet */
2042 		if (nmp)
2043 			freemsg(nmp);	/* free the duped message */
2044 #ifdef GLD_DEBUG
2045 		if (gld_debug & GLDERRS)
2046 			cmn_err(CE_WARN,
2047 			    "gld_start: oversize outbound packet, size %d,"
2048 			    "max %d", pktinfo.pktLen,
2049 			    ifp->hdr_size + macinfo->gldm_maxpkt);
2050 #endif
2051 		vlan->gldv_stats->glds_xmtbadinterp++;
2052 		return (GLD_BADARG);
2053 	}
2054 
2055 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2056 
2057 	if (rc != GLD_SUCCESS) {
2058 		if (rc == GLD_NORESOURCES) {
2059 			vlan->gldv_stats->glds_xmtretry++;
2060 			(void) putbq(q, mp);
2061 		} else {
2062 			/* transmit error; drop the packet */
2063 			freemsg(mp);
2064 			/* We're supposed to count failed attempts as well */
2065 			UPDATE_STATS(vlan, pktinfo, 1);
2066 #ifdef GLD_DEBUG
2067 			if (gld_debug & GLDERRS)
2068 				cmn_err(CE_WARN,
2069 				    "gld_start: gldm_send failed %d", rc);
2070 #endif
2071 		}
2072 		if (nmp)
2073 			freemsg(nmp);	/* free the dupped message */
2074 		return (rc);
2075 	}
2076 
2077 	UPDATE_STATS(vlan, pktinfo, 1);
2078 
2079 	/*
2080 	 * Loopback case. The message needs to be returned back on
2081 	 * the read side. This would silently fail if the dumpmsg fails
2082 	 * above. This is probably OK, if there is no memory to dup the
2083 	 * block, then there isn't much we could do anyway.
2084 	 */
2085 	if (nmp) {
2086 		GLDM_LOCK(macinfo, RW_WRITER);
2087 		gld_precv(macinfo, vlan, nmp);
2088 		GLDM_UNLOCK(macinfo);
2089 	}
2090 
2091 	return (GLD_SUCCESS);
2092 }
2093 
2094 /*
2095  * With MDT V.2 a single message mp can have one header area and multiple
2096  * payload areas. A packet is described by dl_pkt_info, and each packet can
2097  * span multiple payload areas (currently with TCP, each packet will have one
2098  * header and at the most two payload areas). MACs might have a limit on the
2099  * number of payload segments (i.e. per packet scatter-gather limit), and
2100  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2101  * might also have a limit on the total number of payloads in a message, and
2102  * that is specified by mdt_max_pld.
2103  */
2104 static int
2105 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2106 {
2107 	mblk_t *nextmp;
2108 	gld_t *gld = (gld_t *)q->q_ptr;
2109 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2110 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2111 	int numpacks, mdtpacks;
2112 	gld_interface_t *ifp = mac_pvt->interfacep;
2113 	pktinfo_t pktinfo;
2114 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2115 	boolean_t doloop = B_FALSE;
2116 	multidata_t *dlmdp;
2117 	pdescinfo_t pinfo;
2118 	pdesc_t *dl_pkt;
2119 	void *cookie;
2120 	uint_t totLen = 0;
2121 
2122 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2123 
2124 	/*
2125 	 * We're not holding the lock for this check.  If the promiscuous
2126 	 * state is in flux it doesn't matter much if we get this wrong.
2127 	 */
2128 	if (mac_pvt->nprom > 0) {
2129 		/*
2130 		 * We want to loopback to the receive side, but to avoid
2131 		 * recursive lock entry:  if we came from wput(), which
2132 		 * could have looped back via IP from our own receive
2133 		 * interrupt thread, we decline this request.  wput()
2134 		 * will then queue the packet for wsrv().  This means
2135 		 * that when snoop is running we don't get the advantage
2136 		 * of the wput() multithreaded direct entry to the
2137 		 * driver's send routine.
2138 		 */
2139 		if (caller == GLD_WPUT) {
2140 			(void) putbq(q, mp);
2141 			return (GLD_NORESOURCES);
2142 		}
2143 		doloop = B_TRUE;
2144 
2145 		/*
2146 		 * unlike the M_DATA case, we don't have to call
2147 		 * dupmsg_noloan here because mmd_transform
2148 		 * (called by gld_precv_mdt) will make a copy of
2149 		 * each dblk.
2150 		 */
2151 	}
2152 
2153 	while (mp != NULL) {
2154 		/*
2155 		 * The lower layer driver only gets a single multidata
2156 		 * message; this also makes it easier to handle noresources.
2157 		 */
2158 		nextmp = mp->b_cont;
2159 		mp->b_cont = NULL;
2160 
2161 		/*
2162 		 * Get number of packets in this message; if nothing
2163 		 * to transmit, go to next message.
2164 		 */
2165 		dlmdp = mmd_getmultidata(mp);
2166 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2167 			freemsg(mp);
2168 			mp = nextmp;
2169 			continue;
2170 		}
2171 
2172 		/*
2173 		 * Run interpreter to populate media specific pktinfo fields.
2174 		 * This collects per MDT message information like sap,
2175 		 * broad/multicast etc.
2176 		 */
2177 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2178 		    GLD_MDT_TX);
2179 
2180 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2181 
2182 		if (numpacks > 0) {
2183 			/*
2184 			 * Driver indicates it can transmit at least 1, and
2185 			 * possibly all, packets in MDT message.
2186 			 */
2187 			int count = numpacks;
2188 
2189 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2190 			    (dl_pkt != NULL);
2191 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2192 				/*
2193 				 * Format this packet by adding link header and
2194 				 * adjusting pdescinfo to include it; get
2195 				 * packet length.
2196 				 */
2197 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2198 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2199 
2200 				totLen += pktinfo.pktLen;
2201 
2202 				/*
2203 				 * Loop back packet before handing to the
2204 				 * driver.
2205 				 */
2206 				if (doloop &&
2207 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2208 					GLDM_LOCK(macinfo, RW_WRITER);
2209 					gld_precv_mdt(macinfo, vlan, mp,
2210 					    dl_pkt, &pktinfo);
2211 					GLDM_UNLOCK(macinfo);
2212 				}
2213 
2214 				/*
2215 				 * And send off to driver.
2216 				 */
2217 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2218 				    &pinfo);
2219 
2220 				/*
2221 				 * Be careful not to invoke getnextpdesc if we
2222 				 * already sent the last packet, since driver
2223 				 * might have posted it to hardware causing a
2224 				 * completion and freemsg() so the MDT data
2225 				 * structures might not be valid anymore.
2226 				 */
2227 				if (--count == 0)
2228 					break;
2229 			}
2230 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2231 			pktinfo.pktLen = totLen;
2232 			UPDATE_STATS(vlan, pktinfo, numpacks);
2233 
2234 			/*
2235 			 * In the noresources case (when driver indicates it
2236 			 * can not transmit all packets in the MDT message),
2237 			 * adjust to skip the first few packets on retrial.
2238 			 */
2239 			if (numpacks != mdtpacks) {
2240 				/*
2241 				 * Release already processed packet descriptors.
2242 				 */
2243 				for (count = 0; count < numpacks; count++) {
2244 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2245 					    &pinfo);
2246 					mmd_rempdesc(dl_pkt);
2247 				}
2248 				vlan->gldv_stats->glds_xmtretry++;
2249 				mp->b_cont = nextmp;
2250 				(void) putbq(q, mp);
2251 				return (GLD_NORESOURCES);
2252 			}
2253 		} else if (numpacks == 0) {
2254 			/*
2255 			 * Driver indicates it can not transmit any packets
2256 			 * currently and will request retrial later.
2257 			 */
2258 			vlan->gldv_stats->glds_xmtretry++;
2259 			mp->b_cont = nextmp;
2260 			(void) putbq(q, mp);
2261 			return (GLD_NORESOURCES);
2262 		} else {
2263 			ASSERT(numpacks == -1);
2264 			/*
2265 			 * We're supposed to count failed attempts as well.
2266 			 */
2267 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2268 			while (dl_pkt != NULL) {
2269 				/*
2270 				 * Call interpreter to determine total packet
2271 				 * bytes that are being dropped.
2272 				 */
2273 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2274 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2275 
2276 				totLen += pktinfo.pktLen;
2277 
2278 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2279 			}
2280 			pktinfo.pktLen = totLen;
2281 			UPDATE_STATS(vlan, pktinfo, mdtpacks);
2282 
2283 			/*
2284 			 * Transmit error; drop the message, move on
2285 			 * to the next one.
2286 			 */
2287 			freemsg(mp);
2288 		}
2289 
2290 		/*
2291 		 * Process the next multidata block, if there is one.
2292 		 */
2293 		mp = nextmp;
2294 	}
2295 
2296 	return (GLD_SUCCESS);
2297 }
2298 
2299 /*
2300  * gld_intr (macinfo)
2301  */
2302 uint_t
2303 gld_intr(gld_mac_info_t *macinfo)
2304 {
2305 	ASSERT(macinfo != NULL);
2306 
2307 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2308 		return (DDI_INTR_UNCLAIMED);
2309 
2310 	return ((*macinfo->gldm_intr)(macinfo));
2311 }
2312 
2313 /*
2314  * gld_sched (macinfo)
2315  *
2316  * This routine scans the streams that refer to a specific macinfo
2317  * structure and causes the STREAMS scheduler to try to run them if
2318  * they are marked as waiting for the transmit buffer.
2319  */
2320 void
2321 gld_sched(gld_mac_info_t *macinfo)
2322 {
2323 	gld_mac_pvt_t *mac_pvt;
2324 	gld_t *gld;
2325 	gld_vlan_t *vlan;
2326 	int i;
2327 
2328 	ASSERT(macinfo != NULL);
2329 
2330 	GLDM_LOCK(macinfo, RW_WRITER);
2331 
2332 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2333 		/* We're probably being called from a leftover interrupt */
2334 		GLDM_UNLOCK(macinfo);
2335 		return;
2336 	}
2337 
2338 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2339 
2340 	for (i = 0; i < VLAN_HASHSZ; i++) {
2341 		for (vlan = mac_pvt->vlan_hash[i];
2342 		    vlan != NULL; vlan = vlan->gldv_next) {
2343 			for (gld = vlan->gldv_str_next;
2344 			    gld != (gld_t *)&vlan->gldv_str_next;
2345 			    gld = gld->gld_next) {
2346 				ASSERT(gld->gld_mac_info == macinfo);
2347 				gld->gld_sched_ran = B_TRUE;
2348 				membar_enter();
2349 				if (gld->gld_xwait) {
2350 					gld->gld_xwait = B_FALSE;
2351 					qenable(WR(gld->gld_qptr));
2352 				}
2353 			}
2354 		}
2355 	}
2356 
2357 	GLDM_UNLOCK(macinfo);
2358 }
2359 
2360 /*
2361  * gld_precv (macinfo, mp)
2362  * called from gld_start to loopback a packet when in promiscuous mode
2363  */
2364 static void
2365 gld_precv(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp)
2366 {
2367 	gld_mac_pvt_t *mac_pvt;
2368 	gld_interface_t *ifp;
2369 	pktinfo_t pktinfo;
2370 
2371 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2372 
2373 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2374 	ifp = mac_pvt->interfacep;
2375 
2376 	/*
2377 	 * call the media specific packet interpreter routine
2378 	 */
2379 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2380 		freemsg(mp);
2381 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2382 #ifdef GLD_DEBUG
2383 		if (gld_debug & GLDERRS)
2384 			cmn_err(CE_WARN,
2385 			    "gld_precv: interpreter failed");
2386 #endif
2387 		return;
2388 	}
2389 
2390 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_paccept);
2391 }
2392 
2393 /*
2394  * called from gld_start_mdt to loopback packet(s) when in promiscuous mode
2395  */
2396 static void
2397 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2398     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2399 {
2400 	mblk_t *adjmp;
2401 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2402 	gld_interface_t *ifp = mac_pvt->interfacep;
2403 
2404 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2405 
2406 	/*
2407 	 * Get source/destination.
2408 	 */
2409 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2410 	    GLD_MDT_RXLOOP);
2411 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2412 		gld_sendup(macinfo, vlan, pktinfo, adjmp, gld_paccept);
2413 }
2414 
2415 /*
2416  * gld_recv (macinfo, mp)
2417  * called with an mac-level packet in a mblock; take the maclock,
2418  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2419  *
2420  * V0 drivers already are holding the mutex when they call us.
2421  */
2422 void
2423 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2424 {
2425 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2426 }
2427 
2428 void
2429 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2430 {
2431 	gld_mac_pvt_t *mac_pvt;
2432 	char pbuf[3*GLD_MAX_ADDRLEN];
2433 	pktinfo_t pktinfo;
2434 	gld_interface_t *ifp;
2435 	queue_t *ipq = NULL;
2436 	gld_vlan_t *vlan;
2437 	uint32_t vid;
2438 
2439 	ASSERT(macinfo != NULL);
2440 	ASSERT(mp->b_datap->db_ref);
2441 
2442 	GLDM_LOCK(macinfo, RW_READER);
2443 
2444 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2445 		/* We're probably being called from a leftover interrupt */
2446 		freemsg(mp);
2447 		goto done;
2448 	}
2449 
2450 	vid = GLD_VTAG_VID(vtag);
2451 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) {
2452 		freemsg(mp);
2453 		goto done;
2454 	}
2455 
2456 	/*
2457 	 * Check whether underlying media code supports the IPQ hack,
2458 	 * and if so, whether the interpreter can quickly parse the
2459 	 * packet to get some relevant parameters.
2460 	 */
2461 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2462 	ifp = mac_pvt->interfacep;
2463 	if (((*ifp->interpreter)(macinfo, mp, &pktinfo,
2464 	    GLD_RXQUICK) == 0) && (vlan->gldv_ipq_flags == 0)) {
2465 		switch (pktinfo.ethertype) {
2466 		case ETHERTYPE_IP:
2467 			ipq = vlan->gldv_ipq;
2468 			break;
2469 		case ETHERTYPE_IPV6:
2470 			ipq = vlan->gldv_ipv6q;
2471 			break;
2472 		}
2473 	}
2474 
2475 	BUMP(vlan->gldv_stats->glds_bytercv64, pktinfo.pktLen);
2476 	BUMP(vlan->gldv_stats->glds_pktrcv64, 1);
2477 
2478 	/*
2479 	 * Special case for IP; we can simply do the putnext here, if:
2480 	 * o ipq != NULL, and therefore:
2481 	 * - the device type supports IPQ (ethernet and IPoIB);
2482 	 * - the interpreter could quickly parse the packet;
2483 	 * - there are no PROMISC_SAP streams (on this VLAN);
2484 	 * - there is one, and only one, IP stream bound (to this VLAN);
2485 	 * - that stream is a "fastpath" stream;
2486 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2487 	 *
2488 	 * o the packet is specifically for me, and therefore:
2489 	 * - the packet is not multicast or broadcast (fastpath only
2490 	 *   wants unicast packets).
2491 	 *
2492 	 * o the stream is not asserting flow control.
2493 	 */
2494 	if (ipq != NULL &&
2495 	    pktinfo.isForMe &&
2496 	    canputnext(ipq)) {
2497 		/*
2498 		 * Skip the mac header. We know there is no LLC1/SNAP header
2499 		 * in this packet
2500 		 */
2501 		mp->b_rptr += pktinfo.macLen;
2502 		putnext(ipq, mp);
2503 		goto done;
2504 	}
2505 
2506 	/*
2507 	 * call the media specific packet interpreter routine
2508 	 */
2509 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2510 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2511 #ifdef GLD_DEBUG
2512 		if (gld_debug & GLDERRS)
2513 			cmn_err(CE_WARN,
2514 			    "gld_recv_tagged: interpreter failed");
2515 #endif
2516 		freemsg(mp);
2517 		goto done;
2518 	}
2519 
2520 	/*
2521 	 * This is safe even if vtag is VLAN_VTAG_NONE
2522 	 */
2523 
2524 	pktinfo.vid = vid;
2525 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2526 #ifdef GLD_DEBUG
2527 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2528 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2529 #endif
2530 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2531 
2532 #ifdef GLD_DEBUG
2533 	if ((gld_debug & GLDRECV) &&
2534 	    (!(gld_debug & GLDNOBR) ||
2535 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2536 		char pbuf2[3*GLD_MAX_ADDRLEN];
2537 
2538 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2539 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2540 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2541 		    pktinfo.dhost, macinfo->gldm_addrlen));
2542 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2543 		    pktinfo.vid,
2544 		    pktinfo.user_pri);
2545 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2546 		    "Hdr: %d,%d isMulticast: %s\n",
2547 		    pktinfo.ethertype,
2548 		    pktinfo.pktLen,
2549 		    pktinfo.macLen,
2550 		    pktinfo.hdrLen,
2551 		    pktinfo.isMulticast ? "Y" : "N");
2552 	}
2553 #endif
2554 
2555 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_accept);
2556 
2557 done:
2558 	GLDM_UNLOCK(macinfo);
2559 }
2560 
2561 /* =================================================================== */
2562 /* receive group: called from gld_recv and gld_precv* with maclock held */
2563 /* =================================================================== */
2564 
2565 /*
2566  * gld_sendup (macinfo, mp)
2567  * called with an ethernet packet in a mblock; must decide whether
2568  * packet is for us and which streams to queue it to.
2569  */
2570 static void
2571 gld_sendup(gld_mac_info_t *macinfo, gld_vlan_t *vlan, pktinfo_t *pktinfo,
2572     mblk_t *mp, int (*acceptfunc)())
2573 {
2574 	gld_t *gld;
2575 	gld_t *fgld = NULL;
2576 	mblk_t *nmp;
2577 	void (*send)(queue_t *qp, mblk_t *mp);
2578 	int (*cansend)(queue_t *qp);
2579 
2580 #ifdef GLD_DEBUG
2581 	if (gld_debug & GLDTRACE)
2582 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2583 		    (void *)macinfo);
2584 #endif
2585 
2586 	ASSERT(mp != NULL);
2587 	ASSERT(macinfo != NULL);
2588 	ASSERT(vlan != NULL);
2589 	ASSERT(pktinfo != NULL);
2590 	ASSERT(GLDM_LOCK_HELD(macinfo));
2591 
2592 	/*
2593 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2594 	 * gld_recv returns to the caller's interrupt routine.  The total
2595 	 * network throughput would normally be lower when selecting this
2596 	 * option, because we putq the messages and process them later,
2597 	 * instead of sending them with putnext now.  Some time critical
2598 	 * device might need this, so it's here but undocumented.
2599 	 */
2600 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2601 		send = (void (*)(queue_t *, mblk_t *))putq;
2602 		cansend = canput;
2603 	} else {
2604 		send = (void (*)(queue_t *, mblk_t *))putnext;
2605 		cansend = canputnext;
2606 	}
2607 
2608 	/*
2609 	 * Search all the streams attached to this macinfo looking for
2610 	 * those eligible to receive the present packet.
2611 	 */
2612 	for (gld = vlan->gldv_str_next;
2613 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
2614 #ifdef GLD_VERBOSE_DEBUG
2615 		cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p QSTATE: %s",
2616 		    gld->gld_sap, (void *)gld->gld_qptr,
2617 		    gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE");
2618 #endif
2619 		ASSERT(gld->gld_qptr != NULL);
2620 		ASSERT(gld->gld_state == DL_IDLE ||
2621 		    gld->gld_state == DL_UNBOUND);
2622 		ASSERT(gld->gld_mac_info == macinfo);
2623 		ASSERT(gld->gld_vlan == vlan);
2624 
2625 		if (gld->gld_state != DL_IDLE)
2626 			continue;	/* not eligible to receive */
2627 		if (gld->gld_flags & GLD_STR_CLOSING)
2628 			continue;	/* not eligible to receive */
2629 
2630 #ifdef GLD_DEBUG
2631 		if ((gld_debug & GLDRECV) &&
2632 		    (!(gld_debug & GLDNOBR) ||
2633 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2634 			cmn_err(CE_NOTE,
2635 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2636 			    gld->gld_sap,
2637 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2638 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2639 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2640 #endif
2641 
2642 		/*
2643 		 * The accept function differs depending on whether this is
2644 		 * a packet that we received from the wire or a loopback.
2645 		 */
2646 		if ((*acceptfunc)(gld, pktinfo)) {
2647 			/* sap matches */
2648 			pktinfo->wasAccepted = 1;	/* known protocol */
2649 
2650 			if (!(*cansend)(gld->gld_qptr)) {
2651 				/*
2652 				 * Upper stream is not accepting messages, i.e.
2653 				 * it is flow controlled, therefore we will
2654 				 * forgo sending the message up this stream.
2655 				 */
2656 #ifdef GLD_DEBUG
2657 				if (gld_debug & GLDETRACE)
2658 					cmn_err(CE_WARN,
2659 					    "gld_sendup: canput failed");
2660 #endif
2661 				BUMP(vlan->gldv_stats->glds_blocked, 1);
2662 				qenable(gld->gld_qptr);
2663 				continue;
2664 			}
2665 
2666 			/*
2667 			 * we are trying to avoid an extra dumpmsg() here.
2668 			 * If this is the first eligible queue, remember the
2669 			 * queue and send up the message after the loop.
2670 			 */
2671 			if (!fgld) {
2672 				fgld = gld;
2673 				continue;
2674 			}
2675 
2676 			/* duplicate the packet for this stream */
2677 			nmp = dupmsg(mp);
2678 			if (nmp == NULL) {
2679 				BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2680 #ifdef GLD_DEBUG
2681 				if (gld_debug & GLDERRS)
2682 					cmn_err(CE_WARN,
2683 					    "gld_sendup: dupmsg failed");
2684 #endif
2685 				break;	/* couldn't get resources; drop it */
2686 			}
2687 			/* pass the message up the stream */
2688 			gld_passon(gld, nmp, pktinfo, send);
2689 		}
2690 	}
2691 
2692 	ASSERT(mp);
2693 	/* send the original dup of the packet up the first stream found */
2694 	if (fgld)
2695 		gld_passon(fgld, mp, pktinfo, send);
2696 	else
2697 		freemsg(mp);	/* no streams matched */
2698 
2699 	/* We do not count looped back packets */
2700 	if (acceptfunc == gld_paccept)
2701 		return;		/* transmit loopback case */
2702 
2703 	if (pktinfo->isBroadcast)
2704 		BUMP(vlan->gldv_stats->glds_brdcstrcv, 1);
2705 	else if (pktinfo->isMulticast)
2706 		BUMP(vlan->gldv_stats->glds_multircv, 1);
2707 
2708 	/* No stream accepted this packet */
2709 	if (!pktinfo->wasAccepted)
2710 		BUMP(vlan->gldv_stats->glds_unknowns, 1);
2711 }
2712 
2713 /*
2714  * A packet matches a stream if:
2715  *     the stream accepts EtherType encoded packets and the type matches
2716  *  or the stream accepts LLC packets and the packet is an LLC packet
2717  */
2718 #define	MATCH(stream, pktinfo) \
2719 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2720 	(!stream->gld_ethertype && pktinfo->isLLC))
2721 
2722 /*
2723  * This function validates a packet for sending up a particular
2724  * stream. The message header has been parsed and its characteristic
2725  * are recorded in the pktinfo data structure. The streams stack info
2726  * are presented in gld data structures.
2727  */
2728 static int
2729 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2730 {
2731 	/*
2732 	 * if there is no match do not bother checking further.
2733 	 */
2734 	if (!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP))
2735 		return (0);
2736 
2737 	/*
2738 	 * We don't accept any packet from the hardware if we originated it.
2739 	 * (Contrast gld_paccept, the send-loopback accept function.)
2740 	 */
2741 	if (pktinfo->isLooped)
2742 		return (0);
2743 
2744 	/*
2745 	 * If the packet is broadcast or sent to us directly we will accept it.
2746 	 * Also we will accept multicast packets requested by the stream.
2747 	 */
2748 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2749 	    gld_mcmatch(gld, pktinfo))
2750 		return (1);
2751 
2752 	/*
2753 	 * Finally, accept anything else if we're in promiscuous mode
2754 	 */
2755 	if (gld->gld_flags & GLD_PROM_PHYS)
2756 		return (1);
2757 
2758 	return (0);
2759 }
2760 
2761 /*
2762  * Return TRUE if the given multicast address is one
2763  * of those that this particular Stream is interested in.
2764  */
2765 static int
2766 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
2767 {
2768 	/*
2769 	 * Return FALSE if not a multicast address.
2770 	 */
2771 	if (!pktinfo->isMulticast)
2772 		return (0);
2773 
2774 	/*
2775 	 * Check if all multicasts have been enabled for this Stream
2776 	 */
2777 	if (gld->gld_flags & GLD_PROM_MULT)
2778 		return (1);
2779 
2780 	/*
2781 	 * Return FALSE if no multicast addresses enabled for this Stream.
2782 	 */
2783 	if (!gld->gld_mcast)
2784 		return (0);
2785 
2786 	/*
2787 	 * Otherwise, look for it in the table.
2788 	 */
2789 	return (gld_multicast(pktinfo->dhost, gld));
2790 }
2791 
2792 /*
2793  * gld_multicast determines if the address is a multicast address for
2794  * this stream.
2795  */
2796 static int
2797 gld_multicast(unsigned char *macaddr, gld_t *gld)
2798 {
2799 	int i;
2800 
2801 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
2802 
2803 	if (!gld->gld_mcast)
2804 		return (0);
2805 
2806 	for (i = 0; i < gld->gld_multicnt; i++) {
2807 		if (gld->gld_mcast[i]) {
2808 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
2809 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
2810 			    gld->gld_mac_info->gldm_addrlen))
2811 				return (1);
2812 		}
2813 	}
2814 
2815 	return (0);
2816 }
2817 
2818 /*
2819  * accept function for looped back packets
2820  */
2821 static int
2822 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
2823 {
2824 	return (gld->gld_flags & GLD_PROM_PHYS &&
2825 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP));
2826 }
2827 
2828 static void
2829 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
2830 	void (*send)(queue_t *qp, mblk_t *mp))
2831 {
2832 	int skiplen;
2833 
2834 #ifdef GLD_DEBUG
2835 	if (gld_debug & GLDTRACE)
2836 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
2837 		    (void *)mp, (void *)pktinfo);
2838 
2839 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
2840 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2841 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
2842 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
2843 		    gld->gld_sap);
2844 #endif
2845 
2846 	/*
2847 	 * Figure out how much of the packet header to throw away.
2848 	 *
2849 	 * RAW streams expect to see the whole packet.
2850 	 *
2851 	 * Other streams expect to see the packet with the MAC header
2852 	 * removed.
2853 	 *
2854 	 * Normal DLPI (non RAW/FAST) streams also want the
2855 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
2856 	 */
2857 	if (gld->gld_flags & GLD_RAW) {
2858 		skiplen = 0;
2859 	} else {
2860 		skiplen = pktinfo->macLen;		/* skip mac header */
2861 		if (gld->gld_ethertype)
2862 			skiplen += pktinfo->hdrLen;	/* skip any extra */
2863 	}
2864 
2865 	if (skiplen >= pktinfo->pktLen) {
2866 		/*
2867 		 * If the interpreter did its job right, then it cannot be
2868 		 * asking us to skip more bytes than are in the packet!
2869 		 * However, there could be zero data bytes left after the
2870 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
2871 		 * should contain at least one byte of data, so if we have
2872 		 * none we just drop it.
2873 		 */
2874 		ASSERT(!(skiplen > pktinfo->pktLen));
2875 		freemsg(mp);
2876 		return;
2877 	}
2878 
2879 	/*
2880 	 * Skip over the header(s), taking care to possibly handle message
2881 	 * fragments shorter than the amount we need to skip.  Hopefully
2882 	 * the driver will put the entire packet, or at least the entire
2883 	 * header, into a single message block.  But we handle it if not.
2884 	 */
2885 	while (skiplen >= MBLKL(mp)) {
2886 		mblk_t *tmp = mp;
2887 		skiplen -= MBLKL(mp);
2888 		mp = mp->b_cont;
2889 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
2890 		freeb(tmp);
2891 	}
2892 	mp->b_rptr += skiplen;
2893 
2894 	/* Add M_PROTO if necessary, and pass upstream */
2895 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
2896 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
2897 		/* RAW/FAST: just send up the M_DATA */
2898 		(*send)(gld->gld_qptr, mp);
2899 	} else {
2900 		/* everybody else wants to see a unitdata_ind structure */
2901 		mp = gld_addudind(gld, mp, pktinfo);
2902 		if (mp)
2903 			(*send)(gld->gld_qptr, mp);
2904 		/* if it failed, gld_addudind already bumped statistic */
2905 	}
2906 }
2907 
2908 /*
2909  * gld_addudind(gld, mp, pktinfo)
2910  * format a DL_UNITDATA_IND message to be sent upstream to the user
2911  */
2912 static mblk_t *
2913 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo)
2914 {
2915 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
2916 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
2917 	dl_unitdata_ind_t	*dludindp;
2918 	mblk_t			*nmp;
2919 	int			size;
2920 	int			type;
2921 
2922 #ifdef GLD_DEBUG
2923 	if (gld_debug & GLDTRACE)
2924 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
2925 		    (void *)mp, (void *)pktinfo);
2926 #endif
2927 	ASSERT(macinfo != NULL);
2928 
2929 	/*
2930 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
2931 	 * might as well discard since we can't go further
2932 	 */
2933 	size = sizeof (dl_unitdata_ind_t) +
2934 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
2935 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
2936 		freemsg(mp);
2937 		BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2938 #ifdef GLD_DEBUG
2939 		if (gld_debug & GLDERRS)
2940 			cmn_err(CE_WARN,
2941 			    "gld_addudind: allocb failed");
2942 #endif
2943 		return ((mblk_t *)NULL);
2944 	}
2945 	DB_TYPE(nmp) = M_PROTO;
2946 	nmp->b_rptr = nmp->b_datap->db_lim - size;
2947 
2948 	type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
2949 
2950 	/*
2951 	 * now setup the DL_UNITDATA_IND header
2952 	 *
2953 	 * XXX This looks broken if the saps aren't two bytes.
2954 	 */
2955 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2956 	dludindp->dl_primitive = DL_UNITDATA_IND;
2957 	dludindp->dl_src_addr_length =
2958 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
2959 					abs(macinfo->gldm_saplen);
2960 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2961 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
2962 					dludindp->dl_dest_addr_length;
2963 
2964 	dludindp->dl_group_address = (pktinfo->isMulticast ||
2965 					pktinfo->isBroadcast);
2966 
2967 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
2968 
2969 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
2970 	nmp->b_wptr += macinfo->gldm_addrlen;
2971 
2972 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
2973 	*(ushort_t *)(nmp->b_wptr) = type;
2974 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2975 
2976 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
2977 
2978 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
2979 	nmp->b_wptr += macinfo->gldm_addrlen;
2980 
2981 	*(ushort_t *)(nmp->b_wptr) = type;
2982 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2983 
2984 	if (pktinfo->nosource)
2985 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
2986 	linkb(nmp, mp);
2987 	return (nmp);
2988 }
2989 
2990 /* ======================================================= */
2991 /* wsrv group: called from wsrv, single threaded per queue */
2992 /* ======================================================= */
2993 
2994 /*
2995  * We go to some trouble to avoid taking the same lock during normal
2996  * transmit processing as we do during normal receive processing.
2997  *
2998  * Elements of the per-instance macinfo and per-stream gld_t structures
2999  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3000  * (Elements of the gld_mac_pvt_t structure are considered part of the
3001  * macinfo structure for purposes of this discussion).
3002  *
3003  * However, it is more complicated than that:
3004  *
3005  *	Elements of the macinfo structure that are set before the macinfo
3006  *	structure is added to its device list by gld_register(), and never
3007  *	thereafter modified, are accessed without requiring taking the lock.
3008  *	A similar rule applies to those elements of the gld_t structure that
3009  *	are written by gld_open() before the stream is added to any list.
3010  *
3011  *	Most other elements of the macinfo structure may only be read or
3012  *	written while holding the maclock.
3013  *
3014  *	Most writable elements of the gld_t structure are written only
3015  *	within the single-threaded domain of wsrv() and subsidiaries.
3016  *	(This domain includes open/close while qprocs are not on.)
3017  *	The maclock need not be taken while within that domain
3018  *	simply to read those elements.  Writing to them, even within
3019  *	that domain, or reading from it outside that domain, requires
3020  *	holding the maclock.  Exception:  if the stream is not
3021  *	presently attached to a PPA, there is no associated macinfo,
3022  *	and no maclock need be taken.
3023  *
3024  *	The curr_macaddr element of the mac private structure is also
3025  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3026  *      of that structure. However, there are a few instances in the
3027  *      transmit path where we choose to forgo lock protection when
3028  *      reading this variable. This is to avoid lock contention between
3029  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3030  *      In doing so we will take a small risk or a few corrupted packets
3031  *      during the short an rare times when someone is changing the interface's
3032  *      physical address. We consider the small cost in this rare case to be
3033  *      worth the benefit of reduced lock contention under normal operating
3034  *      conditions. The risk/cost is small because:
3035  *          1. there is no guarantee at this layer of uncorrupted delivery.
3036  *          2. the physaddr doesn't change very often - no performance hit.
3037  *          3. if the physaddr changes, other stuff is going to be screwed
3038  *             up for a while anyway, while other sites refigure ARP, etc.,
3039  *             so losing a couple of packets is the least of our worries.
3040  *
3041  *	The list of streams associated with a macinfo is protected by
3042  *	two locks:  the per-macinfo maclock, and the per-major-device
3043  *	gld_devlock.  Both must be held to modify the list, but either
3044  *	may be held to protect the list during reading/traversing.  This
3045  *	allows independent locking for multiple instances in the receive
3046  *	path (using macinfo), while facilitating routines that must search
3047  *	the entire set of streams associated with a major device, such as
3048  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3049  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3050  *	protected, since they change at exactly the same time macinfo
3051  *	streams list does.
3052  *
3053  *	The list of macinfo structures associated with a major device
3054  *	structure is protected by the gld_devlock, as is the per-major
3055  *	list of Style 2 streams in the DL_UNATTACHED state.
3056  *
3057  *	The list of major devices is kept on a module-global list
3058  *	gld_device_list, which has its own lock to protect the list.
3059  *
3060  *	When it is necessary to hold more than one lock at a time, they
3061  *	are acquired in this "outside in" order:
3062  *		gld_device_list.gld_devlock
3063  *		glddev->gld_devlock
3064  *		GLDM_LOCK(macinfo)
3065  *
3066  *	Finally, there are some "volatile" elements of the gld_t structure
3067  *	used for synchronization between various routines that don't share
3068  *	the same mutexes.  See the routines for details.  These are:
3069  *		gld_xwait	between gld_wsrv() and gld_sched()
3070  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3071  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3072  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3073  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3074  *				(used in conjunction with q->q_first)
3075  */
3076 
3077 /*
3078  * gld_ioctl (q, mp)
3079  * handles all ioctl requests passed downstream. This routine is
3080  * passed a pointer to the message block with the ioctl request in it, and a
3081  * pointer to the queue so it can respond to the ioctl request with an ack.
3082  */
3083 int
3084 gld_ioctl(queue_t *q, mblk_t *mp)
3085 {
3086 	struct iocblk *iocp;
3087 	gld_t *gld;
3088 	gld_mac_info_t *macinfo;
3089 
3090 #ifdef GLD_DEBUG
3091 	if (gld_debug & GLDTRACE)
3092 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3093 #endif
3094 	gld = (gld_t *)q->q_ptr;
3095 	iocp = (struct iocblk *)mp->b_rptr;
3096 	switch (iocp->ioc_cmd) {
3097 	case DLIOCRAW:		/* raw M_DATA mode */
3098 		gld->gld_flags |= GLD_RAW;
3099 		DB_TYPE(mp) = M_IOCACK;
3100 		qreply(q, mp);
3101 		break;
3102 
3103 	case DL_IOC_HDR_INFO:	/* fastpath */
3104 		if (gld_global_options & GLD_OPT_NO_FASTPATH) {
3105 			miocnak(q, mp, 0, EINVAL);
3106 			break;
3107 		}
3108 		gld_fastpath(gld, q, mp);
3109 		break;
3110 
3111 	default:
3112 		macinfo	 = gld->gld_mac_info;
3113 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3114 			miocnak(q, mp, 0, EINVAL);
3115 			break;
3116 		}
3117 
3118 		GLDM_LOCK(macinfo, RW_WRITER);
3119 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3120 		GLDM_UNLOCK(macinfo);
3121 		break;
3122 	}
3123 	return (0);
3124 }
3125 
3126 /*
3127  * Since the rules for "fastpath" mode don't seem to be documented
3128  * anywhere, I will describe GLD's rules for fastpath users here:
3129  *
3130  * Once in this mode you remain there until close.
3131  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3132  * You must be bound (DL_IDLE) to transmit.
3133  * There are other rules not listed above.
3134  */
3135 static void
3136 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3137 {
3138 	gld_interface_t *ifp;
3139 	gld_mac_info_t *macinfo;
3140 	dl_unitdata_req_t *dludp;
3141 	mblk_t *nmp;
3142 	t_scalar_t off, len;
3143 	uint_t maclen;
3144 	int error;
3145 	gld_vlan_t *vlan;
3146 
3147 	if (gld->gld_state != DL_IDLE) {
3148 		miocnak(q, mp, 0, EINVAL);
3149 		return;
3150 	}
3151 
3152 	macinfo = gld->gld_mac_info;
3153 	ASSERT(macinfo != NULL);
3154 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3155 
3156 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3157 	if (error != 0) {
3158 		miocnak(q, mp, 0, error);
3159 		return;
3160 	}
3161 
3162 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3163 	off = dludp->dl_dest_addr_offset;
3164 	len = dludp->dl_dest_addr_length;
3165 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3166 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3167 		miocnak(q, mp, 0, EINVAL);
3168 		return;
3169 	}
3170 
3171 	/*
3172 	 * We take his fastpath request as a declaration that he will accept
3173 	 * M_DATA messages from us, whether or not we are willing to accept
3174 	 * them from him.  This allows us to have fastpath in one direction
3175 	 * (flow upstream) even on media with Source Routing, where we are
3176 	 * unable to provide a fixed MAC header to be prepended to downstream
3177 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3178 	 * allow him to send M_DATA down to us.
3179 	 */
3180 	GLDM_LOCK(macinfo, RW_WRITER);
3181 	gld->gld_flags |= GLD_FAST;
3182 	vlan = (gld_vlan_t *)gld->gld_vlan;
3183 	vlan->gldv_ipq_flags &= ~IPQ_DISABLED;
3184 	GLDM_UNLOCK(macinfo);
3185 
3186 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3187 
3188 	/* This will fail for Source Routing media */
3189 	/* Also on Ethernet on 802.2 SAPs */
3190 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3191 		miocnak(q, mp, 0, ENOMEM);
3192 		return;
3193 	}
3194 
3195 	/*
3196 	 * Link new mblk in after the "request" mblks.
3197 	 */
3198 	linkb(mp, nmp);
3199 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3200 }
3201 
3202 /*
3203  * gld_cmds (q, mp)
3204  *	process the DL commands as defined in dlpi.h
3205  *	note that the primitives return status which is passed back
3206  *	to the service procedure.  If the value is GLDE_RETRY, then
3207  *	it is assumed that processing must stop and the primitive has
3208  *	been put back onto the queue.  If the value is any other error,
3209  *	then an error ack is generated by the service procedure.
3210  */
3211 static int
3212 gld_cmds(queue_t *q, mblk_t *mp)
3213 {
3214 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3215 	gld_t *gld = (gld_t *)(q->q_ptr);
3216 	int result = DL_BADPRIM;
3217 	int mblkl = MBLKL(mp);
3218 	t_uscalar_t dlreq;
3219 
3220 	/* Make sure we have at least dlp->dl_primitive */
3221 	if (mblkl < sizeof (dlp->dl_primitive))
3222 		return (DL_BADPRIM);
3223 
3224 	dlreq = dlp->dl_primitive;
3225 #ifdef	GLD_DEBUG
3226 	if (gld_debug & GLDTRACE)
3227 		cmn_err(CE_NOTE,
3228 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3229 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3230 #endif
3231 
3232 	switch (dlreq) {
3233 	case DL_UDQOS_REQ:
3234 		if (mblkl < DL_UDQOS_REQ_SIZE)
3235 			break;
3236 		result = gld_udqos(q, mp);
3237 		break;
3238 
3239 	case DL_BIND_REQ:
3240 		if (mblkl < DL_BIND_REQ_SIZE)
3241 			break;
3242 		result = gld_bind(q, mp);
3243 		break;
3244 
3245 	case DL_UNBIND_REQ:
3246 		if (mblkl < DL_UNBIND_REQ_SIZE)
3247 			break;
3248 		result = gld_unbind(q, mp);
3249 		break;
3250 
3251 	case DL_UNITDATA_REQ:
3252 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3253 			break;
3254 		result = gld_unitdata(q, mp);
3255 		break;
3256 
3257 	case DL_INFO_REQ:
3258 		if (mblkl < DL_INFO_REQ_SIZE)
3259 			break;
3260 		result = gld_inforeq(q, mp);
3261 		break;
3262 
3263 	case DL_ATTACH_REQ:
3264 		if (mblkl < DL_ATTACH_REQ_SIZE)
3265 			break;
3266 		if (gld->gld_style == DL_STYLE2)
3267 			result = gldattach(q, mp);
3268 		else
3269 			result = DL_NOTSUPPORTED;
3270 		break;
3271 
3272 	case DL_DETACH_REQ:
3273 		if (mblkl < DL_DETACH_REQ_SIZE)
3274 			break;
3275 		if (gld->gld_style == DL_STYLE2)
3276 			result = gldunattach(q, mp);
3277 		else
3278 			result = DL_NOTSUPPORTED;
3279 		break;
3280 
3281 	case DL_ENABMULTI_REQ:
3282 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3283 			break;
3284 		result = gld_enable_multi(q, mp);
3285 		break;
3286 
3287 	case DL_DISABMULTI_REQ:
3288 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3289 			break;
3290 		result = gld_disable_multi(q, mp);
3291 		break;
3292 
3293 	case DL_PHYS_ADDR_REQ:
3294 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3295 			break;
3296 		result = gld_physaddr(q, mp);
3297 		break;
3298 
3299 	case DL_SET_PHYS_ADDR_REQ:
3300 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3301 			break;
3302 		result = gld_setaddr(q, mp);
3303 		break;
3304 
3305 	case DL_PROMISCON_REQ:
3306 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3307 			break;
3308 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3309 		break;
3310 
3311 	case DL_PROMISCOFF_REQ:
3312 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3313 			break;
3314 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3315 		break;
3316 
3317 	case DL_GET_STATISTICS_REQ:
3318 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3319 			break;
3320 		result = gld_get_statistics(q, mp);
3321 		break;
3322 
3323 	case DL_CAPABILITY_REQ:
3324 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3325 			break;
3326 		result = gld_cap(q, mp);
3327 		break;
3328 
3329 	case DL_NOTIFY_REQ:
3330 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3331 			break;
3332 		result = gld_notify_req(q, mp);
3333 		break;
3334 
3335 	case DL_XID_REQ:
3336 	case DL_XID_RES:
3337 	case DL_TEST_REQ:
3338 	case DL_TEST_RES:
3339 	case DL_CONTROL_REQ:
3340 	case DL_PASSIVE_REQ:
3341 		result = DL_NOTSUPPORTED;
3342 		break;
3343 
3344 	default:
3345 #ifdef	GLD_DEBUG
3346 		if (gld_debug & GLDERRS)
3347 			cmn_err(CE_WARN,
3348 			    "gld_cmds: unknown M_PROTO message: %d",
3349 			    dlreq);
3350 #endif
3351 		result = DL_BADPRIM;
3352 	}
3353 
3354 	return (result);
3355 }
3356 
3357 static int
3358 gld_cap(queue_t *q, mblk_t *mp)
3359 {
3360 	gld_t *gld = (gld_t *)q->q_ptr;
3361 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3362 
3363 	if (gld->gld_state == DL_UNATTACHED)
3364 		return (DL_OUTSTATE);
3365 
3366 	if (dlp->dl_sub_length == 0)
3367 		return (gld_cap_ack(q, mp));
3368 
3369 	return (gld_cap_enable(q, mp));
3370 }
3371 
3372 static int
3373 gld_cap_ack(queue_t *q, mblk_t *mp)
3374 {
3375 	gld_t *gld = (gld_t *)q->q_ptr;
3376 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3377 	gld_interface_t *ifp;
3378 	dl_capability_ack_t *dlap;
3379 	dl_capability_sub_t *dlsp;
3380 	size_t size = sizeof (dl_capability_ack_t);
3381 	size_t subsize = 0;
3382 
3383 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3384 
3385 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3386 		subsize += sizeof (dl_capability_sub_t) +
3387 		    sizeof (dl_capab_hcksum_t);
3388 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3389 		subsize += sizeof (dl_capability_sub_t) +
3390 		    sizeof (dl_capab_zerocopy_t);
3391 	if (macinfo->gldm_options & GLDOPT_MDT)
3392 		subsize += (sizeof (dl_capability_sub_t) +
3393 		    sizeof (dl_capab_mdt_t));
3394 
3395 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3396 	    DL_CAPABILITY_ACK)) == NULL)
3397 		return (GLDE_OK);
3398 
3399 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3400 	dlap->dl_sub_offset = 0;
3401 	if ((dlap->dl_sub_length = subsize) != 0)
3402 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3403 	dlsp = (dl_capability_sub_t *)&dlap[1];
3404 
3405 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3406 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3407 
3408 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3409 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3410 
3411 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3412 
3413 		dlhp->hcksum_txflags = 0;
3414 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3415 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3416 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3417 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3418 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3419 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3420 
3421 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3422 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3423 	}
3424 
3425 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3426 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3427 
3428 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3429 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3430 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3431 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3432 
3433 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3434 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3435 	}
3436 
3437 	if (macinfo->gldm_options & GLDOPT_MDT) {
3438 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3439 
3440 		dlsp->dl_cap = DL_CAPAB_MDT;
3441 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3442 
3443 		dlmp->mdt_version = MDT_VERSION_2;
3444 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3445 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3446 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3447 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3448 		dlmp->mdt_hdr_head = ifp->hdr_size;
3449 		dlmp->mdt_hdr_tail = 0;
3450 	}
3451 
3452 	qreply(q, mp);
3453 	return (GLDE_OK);
3454 }
3455 
3456 static int
3457 gld_cap_enable(queue_t *q, mblk_t *mp)
3458 {
3459 	dl_capability_req_t *dlp;
3460 	dl_capability_sub_t *dlsp;
3461 	dl_capab_hcksum_t *dlhp;
3462 	offset_t off;
3463 	size_t len;
3464 	size_t size;
3465 	offset_t end;
3466 
3467 	dlp = (dl_capability_req_t *)mp->b_rptr;
3468 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3469 
3470 	off = dlp->dl_sub_offset;
3471 	len = dlp->dl_sub_length;
3472 
3473 	if (!MBLKIN(mp, off, len))
3474 		return (DL_BADPRIM);
3475 
3476 	end = off + len;
3477 	while (off < end) {
3478 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3479 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3480 		if (off + size > end)
3481 			return (DL_BADPRIM);
3482 
3483 		switch (dlsp->dl_cap) {
3484 		case DL_CAPAB_HCKSUM:
3485 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3486 			/* nothing useful we can do with the contents */
3487 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3488 			break;
3489 		default:
3490 			break;
3491 		}
3492 
3493 		off += size;
3494 	}
3495 
3496 	qreply(q, mp);
3497 	return (GLDE_OK);
3498 }
3499 
3500 /*
3501  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3502  * requested the specific <notification> that the message carries AND is
3503  * eligible and ready to receive the notification immediately.
3504  *
3505  * This routine ignores flow control. Notifications will be sent regardless.
3506  *
3507  * In all cases, the original message passed in is freed at the end of
3508  * the routine.
3509  */
3510 static void
3511 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3512 {
3513 	gld_mac_pvt_t *mac_pvt;
3514 	gld_vlan_t *vlan;
3515 	gld_t *gld;
3516 	mblk_t *nmp;
3517 	int i;
3518 
3519 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3520 
3521 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3522 
3523 	/*
3524 	 * Search all the streams attached to this macinfo looking
3525 	 * for those eligible to receive the present notification.
3526 	 */
3527 	for (i = 0; i < VLAN_HASHSZ; i++) {
3528 		for (vlan = mac_pvt->vlan_hash[i];
3529 		    vlan != NULL; vlan = vlan->gldv_next) {
3530 			for (gld = vlan->gldv_str_next;
3531 			    gld != (gld_t *)&vlan->gldv_str_next;
3532 			    gld = gld->gld_next) {
3533 				ASSERT(gld->gld_qptr != NULL);
3534 				ASSERT(gld->gld_state == DL_IDLE ||
3535 				    gld->gld_state == DL_UNBOUND);
3536 				ASSERT(gld->gld_mac_info == macinfo);
3537 
3538 				if (gld->gld_flags & GLD_STR_CLOSING)
3539 					continue; /* not eligible - skip */
3540 				if (!(notification & gld->gld_notifications))
3541 					continue; /* not wanted - skip */
3542 				if ((nmp = dupmsg(mp)) == NULL)
3543 					continue; /* can't copy - skip */
3544 
3545 				/*
3546 				 * All OK; send dup'd notification up this
3547 				 * stream
3548 				 */
3549 				qreply(WR(gld->gld_qptr), nmp);
3550 			}
3551 		}
3552 	}
3553 
3554 	/*
3555 	 * Drop the original message block now
3556 	 */
3557 	freemsg(mp);
3558 }
3559 
3560 /*
3561  * For each (understood) bit in the <notifications> argument, contruct
3562  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3563  * eligible queues if <q> is NULL.
3564  */
3565 static void
3566 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3567 {
3568 	gld_mac_pvt_t *mac_pvt;
3569 	dl_notify_ind_t *dlnip;
3570 	struct gld_stats *stats;
3571 	mblk_t *mp;
3572 	size_t size;
3573 	uint32_t bit;
3574 
3575 	GLDM_LOCK(macinfo, RW_WRITER);
3576 
3577 	/*
3578 	 * The following cases shouldn't happen, but just in case the
3579 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3580 	 * check anyway ...
3581 	 */
3582 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3583 		GLDM_UNLOCK(macinfo);
3584 		return;				/* not ready yet	*/
3585 	}
3586 
3587 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3588 		GLDM_UNLOCK(macinfo);
3589 		return;				/* not ready anymore	*/
3590 	}
3591 
3592 	/*
3593 	 * Make sure the kstats are up to date, 'cos we use some of
3594 	 * the kstat values below, specifically the link speed ...
3595 	 */
3596 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3597 	stats = mac_pvt->statistics;
3598 	if (macinfo->gldm_get_stats)
3599 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3600 
3601 	for (bit = 1; notifications != 0; bit <<= 1) {
3602 		if ((notifications & bit) == 0)
3603 			continue;
3604 		notifications &= ~bit;
3605 
3606 		size = DL_NOTIFY_IND_SIZE;
3607 		if (bit == DL_NOTE_PHYS_ADDR)
3608 			size += macinfo->gldm_addrlen;
3609 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3610 			continue;
3611 
3612 		mp->b_datap->db_type = M_PROTO;
3613 		mp->b_wptr = mp->b_rptr + size;
3614 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3615 		dlnip->dl_primitive = DL_NOTIFY_IND;
3616 		dlnip->dl_notification = 0;
3617 		dlnip->dl_data = 0;
3618 		dlnip->dl_addr_length = 0;
3619 		dlnip->dl_addr_offset = 0;
3620 
3621 		switch (bit) {
3622 		case DL_NOTE_PROMISC_ON_PHYS:
3623 		case DL_NOTE_PROMISC_OFF_PHYS:
3624 			if (mac_pvt->nprom != 0)
3625 				dlnip->dl_notification = bit;
3626 			break;
3627 
3628 		case DL_NOTE_LINK_DOWN:
3629 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3630 				dlnip->dl_notification = bit;
3631 			break;
3632 
3633 		case DL_NOTE_LINK_UP:
3634 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3635 				dlnip->dl_notification = bit;
3636 			break;
3637 
3638 		case DL_NOTE_SPEED:
3639 			/*
3640 			 * Conversion required here:
3641 			 *	GLD keeps the speed in bit/s in a uint64
3642 			 *	DLPI wants it in kb/s in a uint32
3643 			 * Fortunately this is still big enough for 10Gb/s!
3644 			 */
3645 			dlnip->dl_notification = bit;
3646 			dlnip->dl_data = stats->glds_speed/1000ULL;
3647 			break;
3648 
3649 		case DL_NOTE_PHYS_ADDR:
3650 			dlnip->dl_notification = bit;
3651 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3652 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3653 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3654 			    abs(macinfo->gldm_saplen);
3655 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3656 			mac_copy(mac_pvt->curr_macaddr,
3657 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3658 			    macinfo->gldm_addrlen);
3659 			break;
3660 
3661 		default:
3662 			break;
3663 		}
3664 
3665 		if (dlnip->dl_notification == 0)
3666 			freemsg(mp);
3667 		else if (q != NULL)
3668 			qreply(q, mp);
3669 		else
3670 			gld_notify_qs(macinfo, mp, bit);
3671 	}
3672 
3673 	GLDM_UNLOCK(macinfo);
3674 }
3675 
3676 /*
3677  * gld_notify_req - handle a DL_NOTIFY_REQ message
3678  */
3679 static int
3680 gld_notify_req(queue_t *q, mblk_t *mp)
3681 {
3682 	gld_t *gld = (gld_t *)q->q_ptr;
3683 	gld_mac_info_t *macinfo;
3684 	gld_mac_pvt_t *pvt;
3685 	dl_notify_req_t *dlnrp;
3686 	dl_notify_ack_t *dlnap;
3687 
3688 	ASSERT(gld != NULL);
3689 	ASSERT(gld->gld_qptr == RD(q));
3690 
3691 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3692 
3693 #ifdef GLD_DEBUG
3694 	if (gld_debug & GLDTRACE)
3695 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
3696 			(void *)q, (void *)mp);
3697 #endif
3698 
3699 	if (gld->gld_state == DL_UNATTACHED) {
3700 #ifdef GLD_DEBUG
3701 		if (gld_debug & GLDERRS)
3702 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
3703 				gld->gld_state);
3704 #endif
3705 		return (DL_OUTSTATE);
3706 	}
3707 
3708 	/*
3709 	 * Remember what notifications are required by this stream
3710 	 */
3711 	macinfo = gld->gld_mac_info;
3712 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3713 
3714 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
3715 
3716 	/*
3717 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
3718 	 * that this driver can provide, independently of which ones have
3719 	 * previously been or are now being requested.
3720 	 */
3721 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
3722 	    DL_NOTIFY_ACK)) == NULL)
3723 		return (DL_SYSERR);
3724 
3725 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
3726 	dlnap->dl_notifications = pvt->notifications;
3727 	qreply(q, mp);
3728 
3729 	/*
3730 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
3731 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
3732 	 * that provide the current status.
3733 	 */
3734 	gld_notify_ind(macinfo, gld->gld_notifications, q);
3735 
3736 	return (GLDE_OK);
3737 }
3738 
3739 /*
3740  * gld_linkstate()
3741  *	Called by driver to tell GLD the state of the physical link.
3742  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
3743  *	notification to each client that has previously requested such
3744  *	notifications
3745  */
3746 void
3747 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
3748 {
3749 	uint32_t notification;
3750 
3751 	switch (newstate) {
3752 	default:
3753 		return;
3754 
3755 	case GLD_LINKSTATE_DOWN:
3756 		notification = DL_NOTE_LINK_DOWN;
3757 		break;
3758 
3759 	case GLD_LINKSTATE_UP:
3760 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
3761 		break;
3762 
3763 	case GLD_LINKSTATE_UNKNOWN:
3764 		notification = 0;
3765 		break;
3766 	}
3767 
3768 	GLDM_LOCK(macinfo, RW_WRITER);
3769 	if (macinfo->gldm_linkstate == newstate)
3770 		notification = 0;
3771 	else
3772 		macinfo->gldm_linkstate = newstate;
3773 	GLDM_UNLOCK(macinfo);
3774 
3775 	if (notification)
3776 		gld_notify_ind(macinfo, notification, NULL);
3777 }
3778 
3779 /*
3780  * gld_udqos - set the current QoS parameters (priority only at the moment).
3781  */
3782 static int
3783 gld_udqos(queue_t *q, mblk_t *mp)
3784 {
3785 	dl_udqos_req_t *dlp;
3786 	gld_t  *gld = (gld_t *)q->q_ptr;
3787 	int off;
3788 	int len;
3789 	dl_qos_cl_sel1_t *selp;
3790 
3791 	ASSERT(gld);
3792 	ASSERT(gld->gld_qptr == RD(q));
3793 
3794 #ifdef GLD_DEBUG
3795 	if (gld_debug & GLDTRACE)
3796 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
3797 #endif
3798 
3799 	if (gld->gld_state != DL_IDLE) {
3800 #ifdef GLD_DEBUG
3801 		if (gld_debug & GLDERRS)
3802 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
3803 			    gld->gld_state);
3804 #endif
3805 		return (DL_OUTSTATE);
3806 	}
3807 
3808 	dlp = (dl_udqos_req_t *)mp->b_rptr;
3809 	off = dlp->dl_qos_offset;
3810 	len = dlp->dl_qos_length;
3811 
3812 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
3813 		return (DL_BADQOSTYPE);
3814 
3815 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
3816 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
3817 		return (DL_BADQOSTYPE);
3818 
3819 	if (selp->dl_trans_delay != 0 &&
3820 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
3821 		return (DL_BADQOSPARAM);
3822 	if (selp->dl_protection != 0 &&
3823 	    selp->dl_protection != DL_QOS_DONT_CARE)
3824 		return (DL_BADQOSPARAM);
3825 	if (selp->dl_residual_error != 0 &&
3826 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
3827 		return (DL_BADQOSPARAM);
3828 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
3829 		return (DL_BADQOSPARAM);
3830 
3831 	gld->gld_upri = selp->dl_priority;
3832 
3833 	dlokack(q, mp, DL_UDQOS_REQ);
3834 	return (GLDE_OK);
3835 }
3836 
3837 static mblk_t *
3838 gld_bindack(queue_t *q, mblk_t *mp)
3839 {
3840 	gld_t *gld = (gld_t *)q->q_ptr;
3841 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3842 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3843 	dl_bind_ack_t *dlp;
3844 	size_t size;
3845 	t_uscalar_t addrlen;
3846 	uchar_t *sapp;
3847 
3848 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3849 	size = sizeof (dl_bind_ack_t) + addrlen;
3850 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
3851 		return (NULL);
3852 
3853 	dlp = (dl_bind_ack_t *)mp->b_rptr;
3854 	dlp->dl_sap = gld->gld_sap;
3855 	dlp->dl_addr_length = addrlen;
3856 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
3857 	dlp->dl_max_conind = 0;
3858 	dlp->dl_xidtest_flg = 0;
3859 
3860 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
3861 	    macinfo->gldm_addrlen);
3862 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
3863 	*(ushort_t *)sapp = gld->gld_sap;
3864 
3865 	return (mp);
3866 }
3867 
3868 /*
3869  * gld_bind - determine if a SAP is already allocated and whether it is legal
3870  * to do the bind at this time
3871  */
3872 static int
3873 gld_bind(queue_t *q, mblk_t *mp)
3874 {
3875 	ulong_t	sap;
3876 	dl_bind_req_t *dlp;
3877 	gld_t *gld = (gld_t *)q->q_ptr;
3878 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3879 
3880 	ASSERT(gld);
3881 	ASSERT(gld->gld_qptr == RD(q));
3882 
3883 #ifdef GLD_DEBUG
3884 	if (gld_debug & GLDTRACE)
3885 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
3886 #endif
3887 
3888 	dlp = (dl_bind_req_t *)mp->b_rptr;
3889 	sap = dlp->dl_sap;
3890 
3891 #ifdef GLD_DEBUG
3892 	if (gld_debug & GLDPROT)
3893 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
3894 #endif
3895 
3896 	if (gld->gld_state != DL_UNBOUND) {
3897 #ifdef GLD_DEBUG
3898 		if (gld_debug & GLDERRS)
3899 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
3900 				gld->gld_state);
3901 #endif
3902 		return (DL_OUTSTATE);
3903 	}
3904 	ASSERT(macinfo);
3905 
3906 	if (dlp->dl_service_mode != DL_CLDLS) {
3907 		return (DL_UNSUPPORTED);
3908 	}
3909 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
3910 		return (DL_NOAUTO);
3911 	}
3912 
3913 	/*
3914 	 * Check sap validity and decide whether this stream accepts
3915 	 * IEEE 802.2 (LLC) packets.
3916 	 */
3917 	if (sap > ETHERTYPE_MAX)
3918 		return (DL_BADSAP);
3919 
3920 	/*
3921 	 * Decide whether the SAP value selects EtherType encoding/decoding.
3922 	 * For compatibility with monolithic ethernet drivers, the range of
3923 	 * SAP values is different for DL_ETHER media.
3924 	 */
3925 	switch (macinfo->gldm_type) {
3926 	case DL_ETHER:
3927 		gld->gld_ethertype = (sap > ETHERMTU);
3928 		break;
3929 	default:
3930 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
3931 		break;
3932 	}
3933 
3934 	/* if we get to here, then the SAP is legal enough */
3935 	GLDM_LOCK(macinfo, RW_WRITER);
3936 	gld->gld_state = DL_IDLE;	/* bound and ready */
3937 	gld->gld_sap = sap;
3938 	gld_set_ipq(gld);
3939 
3940 #ifdef GLD_DEBUG
3941 	if (gld_debug & GLDPROT)
3942 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
3943 #endif
3944 
3945 	/* ACK the BIND */
3946 	mp = gld_bindack(q, mp);
3947 	GLDM_UNLOCK(macinfo);
3948 
3949 	if (mp != NULL) {
3950 		qreply(q, mp);
3951 		return (GLDE_OK);
3952 	}
3953 
3954 	return (DL_SYSERR);
3955 }
3956 
3957 /*
3958  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
3959  * The stream is still open and can be re-bound.
3960  */
3961 static int
3962 gld_unbind(queue_t *q, mblk_t *mp)
3963 {
3964 	gld_t *gld = (gld_t *)q->q_ptr;
3965 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3966 
3967 	ASSERT(gld);
3968 
3969 #ifdef GLD_DEBUG
3970 	if (gld_debug & GLDTRACE)
3971 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
3972 #endif
3973 
3974 	if (gld->gld_state != DL_IDLE) {
3975 #ifdef GLD_DEBUG
3976 		if (gld_debug & GLDERRS)
3977 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
3978 				gld->gld_state);
3979 #endif
3980 		return (DL_OUTSTATE);
3981 	}
3982 	ASSERT(macinfo);
3983 
3984 	/*
3985 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
3986 	 * See comments above gld_start().
3987 	 */
3988 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
3989 	membar_enter();
3990 	if (gld->gld_wput_count != 0) {
3991 		gld->gld_in_unbind = B_FALSE;
3992 		ASSERT(mp);		/* we didn't come from close */
3993 #ifdef GLD_DEBUG
3994 		if (gld_debug & GLDETRACE)
3995 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
3996 #endif
3997 		(void) putbq(q, mp);
3998 		qenable(q);		/* try again soon */
3999 		return (GLDE_RETRY);
4000 	}
4001 
4002 	GLDM_LOCK(macinfo, RW_WRITER);
4003 	gld->gld_state = DL_UNBOUND;
4004 	gld->gld_sap = 0;
4005 	gld_set_ipq(gld);
4006 	GLDM_UNLOCK(macinfo);
4007 
4008 	membar_exit();
4009 	gld->gld_in_unbind = B_FALSE;
4010 
4011 	/* mp is NULL if we came from close */
4012 	if (mp) {
4013 		gld_flushqueue(q);	/* flush the queues */
4014 		dlokack(q, mp, DL_UNBIND_REQ);
4015 	}
4016 	return (GLDE_OK);
4017 }
4018 
4019 /*
4020  * gld_inforeq - generate the response to an info request
4021  */
4022 static int
4023 gld_inforeq(queue_t *q, mblk_t *mp)
4024 {
4025 	gld_t		*gld;
4026 	dl_info_ack_t	*dlp;
4027 	int		bufsize;
4028 	glddev_t	*glddev;
4029 	gld_mac_info_t	*macinfo;
4030 	gld_mac_pvt_t	*mac_pvt;
4031 	int		sel_offset = 0;
4032 	int		range_offset = 0;
4033 	int		addr_offset;
4034 	int		addr_length;
4035 	int		sap_length;
4036 	int		brdcst_offset;
4037 	int		brdcst_length;
4038 	gld_vlan_t	*vlan;
4039 	uchar_t		*sapp;
4040 
4041 #ifdef GLD_DEBUG
4042 	if (gld_debug & GLDTRACE)
4043 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4044 #endif
4045 	gld = (gld_t *)q->q_ptr;
4046 	ASSERT(gld);
4047 	glddev = gld->gld_device;
4048 	ASSERT(glddev);
4049 
4050 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4051 		macinfo = gld->gld_mac_info;
4052 		ASSERT(macinfo != NULL);
4053 
4054 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4055 
4056 		addr_length = macinfo->gldm_addrlen;
4057 		sap_length = macinfo->gldm_saplen;
4058 		brdcst_length = macinfo->gldm_addrlen;
4059 	} else {
4060 		addr_length = glddev->gld_addrlen;
4061 		sap_length = glddev->gld_saplen;
4062 		brdcst_length = glddev->gld_addrlen;
4063 	}
4064 
4065 	bufsize = sizeof (dl_info_ack_t);
4066 
4067 	addr_offset = bufsize;
4068 	bufsize += addr_length;
4069 	bufsize += abs(sap_length);
4070 
4071 	brdcst_offset = bufsize;
4072 	bufsize += brdcst_length;
4073 
4074 	if ((vlan = (gld_vlan_t *)gld->gld_vlan) != NULL &&
4075 	    vlan->gldv_id != VLAN_VID_NONE) {
4076 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4077 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4078 
4079 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4080 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4081 	}
4082 
4083 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4084 		return (GLDE_OK);	/* nothing more to be done */
4085 
4086 	bzero(mp->b_rptr, bufsize);
4087 
4088 	dlp = (dl_info_ack_t *)mp->b_rptr;
4089 	dlp->dl_primitive = DL_INFO_ACK;
4090 	dlp->dl_version = DL_VERSION_2;
4091 	dlp->dl_service_mode = DL_CLDLS;
4092 	dlp->dl_current_state = gld->gld_state;
4093 	dlp->dl_provider_style = gld->gld_style;
4094 
4095 	if (sel_offset != 0) {
4096 		dl_qos_cl_sel1_t	*selp;
4097 		dl_qos_cl_range1_t	*rangep;
4098 
4099 		ASSERT(range_offset != 0);
4100 
4101 		dlp->dl_qos_offset = sel_offset;
4102 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4103 		dlp->dl_qos_range_offset = range_offset;
4104 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4105 
4106 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4107 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4108 		selp->dl_priority = gld->gld_upri;
4109 
4110 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4111 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4112 		rangep->dl_priority.dl_min = 0;
4113 		rangep->dl_priority.dl_max = 7;
4114 	}
4115 
4116 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4117 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4118 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4119 		dlp->dl_mac_type = macinfo->gldm_type;
4120 		dlp->dl_addr_length = addr_length + abs(sap_length);
4121 		dlp->dl_sap_length = sap_length;
4122 
4123 		if (gld->gld_state == DL_IDLE) {
4124 			/*
4125 			 * If we are bound to a non-LLC SAP on any medium
4126 			 * other than Ethernet, then we need room for a
4127 			 * SNAP header.  So we have to adjust the MTU size
4128 			 * accordingly.  XXX I suppose this should be done
4129 			 * in gldutil.c, but it seems likely that this will
4130 			 * always be true for everything GLD supports but
4131 			 * Ethernet.  Check this if you add another medium.
4132 			 */
4133 			if ((macinfo->gldm_type == DL_TPR ||
4134 			    macinfo->gldm_type == DL_FDDI) &&
4135 			    gld->gld_ethertype)
4136 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4137 
4138 			/* copy macaddr and sap */
4139 			dlp->dl_addr_offset = addr_offset;
4140 
4141 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4142 			    addr_offset, macinfo->gldm_addrlen);
4143 			sapp = mp->b_rptr + addr_offset +
4144 			    macinfo->gldm_addrlen;
4145 			*(ushort_t *)sapp = gld->gld_sap;
4146 		} else {
4147 			dlp->dl_addr_offset = 0;
4148 		}
4149 
4150 		/* copy broadcast addr */
4151 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4152 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4153 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4154 		    mp->b_rptr + brdcst_offset, brdcst_length);
4155 	} else {
4156 		/*
4157 		 * No PPA is attached.
4158 		 * The best we can do is use the values provided
4159 		 * by the first mac that called gld_register.
4160 		 */
4161 		dlp->dl_min_sdu = glddev->gld_minsdu;
4162 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4163 		dlp->dl_mac_type = glddev->gld_type;
4164 		dlp->dl_addr_length = addr_length + abs(sap_length);
4165 		dlp->dl_sap_length = sap_length;
4166 		dlp->dl_addr_offset = 0;
4167 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4168 		dlp->dl_brdcst_addr_length = brdcst_length;
4169 		mac_copy((caddr_t)glddev->gld_broadcast,
4170 		    mp->b_rptr + brdcst_offset, brdcst_length);
4171 	}
4172 	qreply(q, mp);
4173 	return (GLDE_OK);
4174 }
4175 
4176 /*
4177  * gld_unitdata (q, mp)
4178  * send a datagram.  Destination address/lsap is in M_PROTO
4179  * message (first mblock), data is in remainder of message.
4180  *
4181  */
4182 static int
4183 gld_unitdata(queue_t *q, mblk_t *mp)
4184 {
4185 	gld_t *gld = (gld_t *)q->q_ptr;
4186 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4187 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4188 	size_t	msglen;
4189 	mblk_t	*nmp;
4190 	gld_interface_t *ifp;
4191 	uint32_t start;
4192 	uint32_t stuff;
4193 	uint32_t end;
4194 	uint32_t value;
4195 	uint32_t flags;
4196 	uint32_t upri;
4197 
4198 #ifdef GLD_DEBUG
4199 	if (gld_debug & GLDTRACE)
4200 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4201 #endif
4202 
4203 	if (gld->gld_state != DL_IDLE) {
4204 #ifdef GLD_DEBUG
4205 		if (gld_debug & GLDERRS)
4206 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4207 				gld->gld_state);
4208 #endif
4209 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4210 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4211 		return (GLDE_OK);
4212 	}
4213 	ASSERT(macinfo != NULL);
4214 
4215 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4216 	    dlp->dl_dest_addr_length !=
4217 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4218 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4219 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4220 		return (GLDE_OK);
4221 	}
4222 
4223 	upri = dlp->dl_priority.dl_max;
4224 
4225 	msglen = msgdsize(mp);
4226 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4227 #ifdef GLD_DEBUG
4228 		if (gld_debug & GLDERRS)
4229 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4230 				(int)msglen);
4231 #endif
4232 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4233 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4234 		return (GLDE_OK);
4235 	}
4236 
4237 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4238 
4239 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4240 
4241 	/* grab any checksum information that may be present */
4242 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4243 	    &value, &flags);
4244 
4245 	/*
4246 	 * Prepend a valid header for transmission
4247 	 */
4248 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4249 #ifdef GLD_DEBUG
4250 		if (gld_debug & GLDERRS)
4251 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4252 #endif
4253 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4254 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4255 		return (GLDE_OK);
4256 	}
4257 
4258 	/* apply any checksum information to the first block in the chain */
4259 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4260 	    flags, 0);
4261 
4262 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4263 		qenable(q);
4264 		return (GLDE_RETRY);
4265 	}
4266 
4267 	return (GLDE_OK);
4268 }
4269 
4270 /*
4271  * gldattach(q, mp)
4272  * DLPI DL_ATTACH_REQ
4273  * this attaches the stream to a PPA
4274  */
4275 static int
4276 gldattach(queue_t *q, mblk_t *mp)
4277 {
4278 	dl_attach_req_t *at;
4279 	gld_mac_info_t *macinfo;
4280 	gld_t  *gld = (gld_t *)q->q_ptr;
4281 	glddev_t *glddev;
4282 	gld_mac_pvt_t *mac_pvt;
4283 	uint32_t ppa;
4284 	uint32_t vid;
4285 	gld_vlan_t *vlan;
4286 
4287 	at = (dl_attach_req_t *)mp->b_rptr;
4288 
4289 	if (gld->gld_state != DL_UNATTACHED)
4290 		return (DL_OUTSTATE);
4291 
4292 	ASSERT(!gld->gld_mac_info);
4293 
4294 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4295 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4296 	if (vid > VLAN_VID_MAX)
4297 		return (DL_BADPPA);
4298 
4299 	glddev = gld->gld_device;
4300 	mutex_enter(&glddev->gld_devlock);
4301 	for (macinfo = glddev->gld_mac_next;
4302 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4303 	    macinfo = macinfo->gldm_next) {
4304 		int inst;
4305 
4306 		ASSERT(macinfo != NULL);
4307 		if (macinfo->gldm_ppa != ppa)
4308 			continue;
4309 
4310 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4311 			continue;	/* this one's not ready yet */
4312 
4313 		/*
4314 		 * VLAN sanity check
4315 		 */
4316 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4317 			mutex_exit(&glddev->gld_devlock);
4318 			return (DL_BADPPA);
4319 		}
4320 
4321 		/*
4322 		 * We found the correct PPA, hold the instance
4323 		 */
4324 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4325 		if (inst == -1 || qassociate(q, inst) != 0) {
4326 			mutex_exit(&glddev->gld_devlock);
4327 			return (DL_BADPPA);
4328 		}
4329 
4330 		/* Take the stream off the per-driver-class list */
4331 		gldremque(gld);
4332 
4333 		/*
4334 		 * We must hold the lock to prevent multiple calls
4335 		 * to the reset and start routines.
4336 		 */
4337 		GLDM_LOCK(macinfo, RW_WRITER);
4338 
4339 		gld->gld_mac_info = macinfo;
4340 
4341 		if (macinfo->gldm_send_tagged != NULL)
4342 			gld->gld_send = macinfo->gldm_send_tagged;
4343 		else
4344 			gld->gld_send = macinfo->gldm_send;
4345 
4346 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4347 			GLDM_UNLOCK(macinfo);
4348 			gldinsque(gld, glddev->gld_str_prev);
4349 			mutex_exit(&glddev->gld_devlock);
4350 			(void) qassociate(q, -1);
4351 			return (DL_BADPPA);
4352 		}
4353 
4354 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4355 		if (!mac_pvt->started) {
4356 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4357 				gld_rem_vlan(vlan);
4358 				GLDM_UNLOCK(macinfo);
4359 				gldinsque(gld, glddev->gld_str_prev);
4360 				mutex_exit(&glddev->gld_devlock);
4361 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4362 				    EIO);
4363 				(void) qassociate(q, -1);
4364 				return (GLDE_OK);
4365 			}
4366 		}
4367 
4368 		gld->gld_vlan = vlan;
4369 		vlan->gldv_nstreams++;
4370 		gldinsque(gld, vlan->gldv_str_prev);
4371 		gld->gld_state = DL_UNBOUND;
4372 		GLDM_UNLOCK(macinfo);
4373 
4374 #ifdef GLD_DEBUG
4375 		if (gld_debug & GLDPROT) {
4376 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4377 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4378 		}
4379 #endif
4380 		mutex_exit(&glddev->gld_devlock);
4381 		dlokack(q, mp, DL_ATTACH_REQ);
4382 		return (GLDE_OK);
4383 	}
4384 	mutex_exit(&glddev->gld_devlock);
4385 	return (DL_BADPPA);
4386 }
4387 
4388 /*
4389  * gldunattach(q, mp)
4390  * DLPI DL_DETACH_REQ
4391  * detaches the mac layer from the stream
4392  */
4393 int
4394 gldunattach(queue_t *q, mblk_t *mp)
4395 {
4396 	gld_t  *gld = (gld_t *)q->q_ptr;
4397 	glddev_t *glddev = gld->gld_device;
4398 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4399 	int	state = gld->gld_state;
4400 	int	i;
4401 	gld_mac_pvt_t *mac_pvt;
4402 	gld_vlan_t *vlan;
4403 	boolean_t phys_off;
4404 	boolean_t mult_off;
4405 	int op = GLD_MAC_PROMISC_NOOP;
4406 
4407 	if (state != DL_UNBOUND)
4408 		return (DL_OUTSTATE);
4409 
4410 	ASSERT(macinfo != NULL);
4411 	ASSERT(gld->gld_sap == 0);
4412 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4413 
4414 #ifdef GLD_DEBUG
4415 	if (gld_debug & GLDPROT) {
4416 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4417 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4418 	}
4419 #endif
4420 
4421 	GLDM_LOCK(macinfo, RW_WRITER);
4422 
4423 	if (gld->gld_mcast) {
4424 		for (i = 0; i < gld->gld_multicnt; i++) {
4425 			gld_mcast_t *mcast;
4426 
4427 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4428 				ASSERT(mcast->gldm_refcnt);
4429 				gld_send_disable_multi(macinfo, mcast);
4430 			}
4431 		}
4432 		kmem_free(gld->gld_mcast,
4433 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4434 		gld->gld_mcast = NULL;
4435 		gld->gld_multicnt = 0;
4436 	}
4437 
4438 	/* decide if we need to turn off any promiscuity */
4439 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4440 	    --mac_pvt->nprom == 0);
4441 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4442 	    --mac_pvt->nprom_multi == 0);
4443 
4444 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4445 
4446 	if (phys_off) {
4447 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4448 		    GLD_MAC_PROMISC_MULTI;
4449 	} else if (mult_off) {
4450 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4451 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4452 	}
4453 
4454 	if (op != GLD_MAC_PROMISC_NOOP)
4455 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4456 
4457 	GLDM_UNLOCK(macinfo);
4458 
4459 	if (phys_off)
4460 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4461 
4462 	/*
4463 	 * We need to hold both locks when modifying the mac stream list
4464 	 * to protect findminor as well as everyone else.
4465 	 */
4466 	mutex_enter(&glddev->gld_devlock);
4467 	GLDM_LOCK(macinfo, RW_WRITER);
4468 
4469 	/* disassociate this stream with its vlan and underlying mac */
4470 	gldremque(gld);
4471 
4472 	vlan = (gld_vlan_t *)gld->gld_vlan;
4473 	if (--vlan->gldv_nstreams == 0) {
4474 		gld_rem_vlan(vlan);
4475 		gld->gld_vlan = NULL;
4476 	}
4477 
4478 	gld->gld_mac_info = NULL;
4479 	gld->gld_state = DL_UNATTACHED;
4480 
4481 	/* cleanup mac layer if last vlan */
4482 	if (mac_pvt->nvlan == 0) {
4483 		gld_stop_mac(macinfo);
4484 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4485 	}
4486 
4487 	/* make sure no references to this gld for gld_v0_sched */
4488 	if (mac_pvt->last_sched == gld)
4489 		mac_pvt->last_sched = NULL;
4490 
4491 	GLDM_UNLOCK(macinfo);
4492 
4493 	/* put the stream on the unattached Style 2 list */
4494 	gldinsque(gld, glddev->gld_str_prev);
4495 
4496 	mutex_exit(&glddev->gld_devlock);
4497 
4498 	/* There will be no mp if we were called from close */
4499 	if (mp) {
4500 		dlokack(q, mp, DL_DETACH_REQ);
4501 	}
4502 	if (gld->gld_style == DL_STYLE2)
4503 		(void) qassociate(q, -1);
4504 	return (GLDE_OK);
4505 }
4506 
4507 /*
4508  * gld_enable_multi (q, mp)
4509  * Enables multicast address on the stream.  If the mac layer
4510  * isn't enabled for this address, enable at that level as well.
4511  */
4512 static int
4513 gld_enable_multi(queue_t *q, mblk_t *mp)
4514 {
4515 	gld_t  *gld = (gld_t *)q->q_ptr;
4516 	glddev_t *glddev;
4517 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4518 	unsigned char *maddr;
4519 	dl_enabmulti_req_t *multi;
4520 	gld_mcast_t *mcast;
4521 	int	i, rc;
4522 	gld_mac_pvt_t *mac_pvt;
4523 
4524 #ifdef GLD_DEBUG
4525 	if (gld_debug & GLDPROT) {
4526 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4527 		    (void *)mp);
4528 	}
4529 #endif
4530 
4531 	if (gld->gld_state == DL_UNATTACHED)
4532 		return (DL_OUTSTATE);
4533 
4534 	ASSERT(macinfo != NULL);
4535 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4536 
4537 	if (macinfo->gldm_set_multicast == NULL) {
4538 		return (DL_UNSUPPORTED);
4539 	}
4540 
4541 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4542 
4543 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4544 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4545 		return (DL_BADADDR);
4546 
4547 	/* request appears to be valid */
4548 
4549 	glddev = mac_pvt->major_dev;
4550 	ASSERT(glddev == gld->gld_device);
4551 
4552 	maddr = mp->b_rptr + multi->dl_addr_offset;
4553 
4554 	/*
4555 	 * The multicast addresses live in a per-device table, along
4556 	 * with a reference count.  Each stream has a table that
4557 	 * points to entries in the device table, with the reference
4558 	 * count reflecting the number of streams pointing at it.  If
4559 	 * this multicast address is already in the per-device table,
4560 	 * all we have to do is point at it.
4561 	 */
4562 	GLDM_LOCK(macinfo, RW_WRITER);
4563 
4564 	/* does this address appear in current table? */
4565 	if (gld->gld_mcast == NULL) {
4566 		/* no mcast addresses -- allocate table */
4567 		gld->gld_mcast = GETSTRUCT(gld_mcast_t *,
4568 					    glddev->gld_multisize);
4569 		if (gld->gld_mcast == NULL) {
4570 			GLDM_UNLOCK(macinfo);
4571 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4572 			return (GLDE_OK);
4573 		}
4574 		gld->gld_multicnt = glddev->gld_multisize;
4575 	} else {
4576 		for (i = 0; i < gld->gld_multicnt; i++) {
4577 			if (gld->gld_mcast[i] &&
4578 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4579 				maddr, macinfo->gldm_addrlen)) {
4580 				/* this is a match -- just succeed */
4581 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4582 				GLDM_UNLOCK(macinfo);
4583 				dlokack(q, mp, DL_ENABMULTI_REQ);
4584 				return (GLDE_OK);
4585 			}
4586 		}
4587 	}
4588 
4589 	/*
4590 	 * it wasn't in the stream so check to see if the mac layer has it
4591 	 */
4592 	mcast = NULL;
4593 	if (mac_pvt->mcast_table == NULL) {
4594 		mac_pvt->mcast_table = GETSTRUCT(gld_mcast_t,
4595 						glddev->gld_multisize);
4596 		if (mac_pvt->mcast_table == NULL) {
4597 			GLDM_UNLOCK(macinfo);
4598 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4599 			return (GLDE_OK);
4600 		}
4601 	} else {
4602 		for (i = 0; i < glddev->gld_multisize; i++) {
4603 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4604 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4605 			    maddr, macinfo->gldm_addrlen)) {
4606 				mcast = &mac_pvt->mcast_table[i];
4607 				break;
4608 			}
4609 		}
4610 	}
4611 	if (mcast == NULL) {
4612 		/* not in mac layer -- find an empty mac slot to fill in */
4613 		for (i = 0; i < glddev->gld_multisize; i++) {
4614 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4615 				mcast = &mac_pvt->mcast_table[i];
4616 				mac_copy(maddr, mcast->gldm_addr,
4617 				    macinfo->gldm_addrlen);
4618 				break;
4619 			}
4620 		}
4621 	}
4622 	if (mcast == NULL) {
4623 		/* couldn't get a mac layer slot */
4624 		GLDM_UNLOCK(macinfo);
4625 		return (DL_TOOMANY);
4626 	}
4627 
4628 	/* now we have a mac layer slot in mcast -- get a stream slot */
4629 	for (i = 0; i < gld->gld_multicnt; i++) {
4630 		if (gld->gld_mcast[i] != NULL)
4631 			continue;
4632 		/* found an empty slot */
4633 		if (!mcast->gldm_refcnt) {
4634 			/* set mcast in hardware */
4635 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4636 
4637 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4638 			cmac_copy(maddr, cmaddr,
4639 			    macinfo->gldm_addrlen, macinfo);
4640 
4641 			rc = (*macinfo->gldm_set_multicast)
4642 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4643 			if (rc == GLD_NOTSUPPORTED) {
4644 				GLDM_UNLOCK(macinfo);
4645 				return (DL_NOTSUPPORTED);
4646 			} else if (rc == GLD_NORESOURCES) {
4647 				GLDM_UNLOCK(macinfo);
4648 				return (DL_TOOMANY);
4649 			} else if (rc == GLD_BADARG) {
4650 				GLDM_UNLOCK(macinfo);
4651 				return (DL_BADADDR);
4652 			} else if (rc == GLD_RETRY) {
4653 				/*
4654 				 * The putbq and gld_xwait must be
4655 				 * within the lock to prevent races
4656 				 * with gld_sched.
4657 				 */
4658 				(void) putbq(q, mp);
4659 				gld->gld_xwait = B_TRUE;
4660 				GLDM_UNLOCK(macinfo);
4661 				return (GLDE_RETRY);
4662 			} else if (rc != GLD_SUCCESS) {
4663 				GLDM_UNLOCK(macinfo);
4664 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4665 				    DL_SYSERR, EIO);
4666 				return (GLDE_OK);
4667 			}
4668 		}
4669 		gld->gld_mcast[i] = mcast;
4670 		mcast->gldm_refcnt++;
4671 		GLDM_UNLOCK(macinfo);
4672 		dlokack(q, mp, DL_ENABMULTI_REQ);
4673 		return (GLDE_OK);
4674 	}
4675 
4676 	/* couldn't get a stream slot */
4677 	GLDM_UNLOCK(macinfo);
4678 	return (DL_TOOMANY);
4679 }
4680 
4681 
4682 /*
4683  * gld_disable_multi (q, mp)
4684  * Disable the multicast address on the stream.  If last
4685  * reference for the mac layer, disable there as well.
4686  */
4687 static int
4688 gld_disable_multi(queue_t *q, mblk_t *mp)
4689 {
4690 	gld_t  *gld;
4691 	gld_mac_info_t *macinfo;
4692 	unsigned char *maddr;
4693 	dl_disabmulti_req_t *multi;
4694 	int i;
4695 	gld_mcast_t *mcast;
4696 
4697 #ifdef GLD_DEBUG
4698 	if (gld_debug & GLDPROT) {
4699 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
4700 		    (void *)mp);
4701 	}
4702 #endif
4703 
4704 	gld = (gld_t *)q->q_ptr;
4705 	if (gld->gld_state == DL_UNATTACHED)
4706 		return (DL_OUTSTATE);
4707 
4708 	macinfo = gld->gld_mac_info;
4709 	ASSERT(macinfo != NULL);
4710 	if (macinfo->gldm_set_multicast == NULL) {
4711 		return (DL_UNSUPPORTED);
4712 	}
4713 
4714 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
4715 
4716 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4717 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4718 		return (DL_BADADDR);
4719 
4720 	maddr = mp->b_rptr + multi->dl_addr_offset;
4721 
4722 	/* request appears to be valid */
4723 	/* does this address appear in current table? */
4724 	GLDM_LOCK(macinfo, RW_WRITER);
4725 	if (gld->gld_mcast != NULL) {
4726 		for (i = 0; i < gld->gld_multicnt; i++)
4727 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
4728 			    mac_eq(mcast->gldm_addr,
4729 			    maddr, macinfo->gldm_addrlen)) {
4730 				ASSERT(mcast->gldm_refcnt);
4731 				gld_send_disable_multi(macinfo, mcast);
4732 				gld->gld_mcast[i] = NULL;
4733 				GLDM_UNLOCK(macinfo);
4734 				dlokack(q, mp, DL_DISABMULTI_REQ);
4735 				return (GLDE_OK);
4736 			}
4737 	}
4738 	GLDM_UNLOCK(macinfo);
4739 	return (DL_NOTENAB); /* not an enabled address */
4740 }
4741 
4742 /*
4743  * gld_send_disable_multi(macinfo, mcast)
4744  * this function is used to disable a multicast address if the reference
4745  * count goes to zero. The disable request will then be forwarded to the
4746  * lower stream.
4747  */
4748 static void
4749 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
4750 {
4751 	ASSERT(macinfo != NULL);
4752 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
4753 	ASSERT(mcast != NULL);
4754 	ASSERT(mcast->gldm_refcnt);
4755 
4756 	if (!mcast->gldm_refcnt) {
4757 		return;			/* "cannot happen" */
4758 	}
4759 
4760 	if (--mcast->gldm_refcnt > 0) {
4761 		return;
4762 	}
4763 
4764 	/*
4765 	 * This must be converted from canonical form to device form.
4766 	 * The refcnt is now zero so we can trash the data.
4767 	 */
4768 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
4769 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
4770 
4771 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
4772 	(void) (*macinfo->gldm_set_multicast)
4773 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
4774 }
4775 
4776 /*
4777  * gld_promisc (q, mp, req, on)
4778  *	enable or disable the use of promiscuous mode with the hardware
4779  */
4780 static int
4781 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
4782 {
4783 	gld_t *gld;
4784 	gld_mac_info_t *macinfo;
4785 	gld_mac_pvt_t *mac_pvt;
4786 	gld_vlan_t *vlan;
4787 	union DL_primitives *prim;
4788 	int macrc = GLD_SUCCESS;
4789 	int dlerr = GLDE_OK;
4790 	int op = GLD_MAC_PROMISC_NOOP;
4791 
4792 #ifdef GLD_DEBUG
4793 	if (gld_debug & GLDTRACE)
4794 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
4795 		    (void *)q, (void *)mp, req, on);
4796 #endif
4797 
4798 	ASSERT(mp != NULL);
4799 	prim = (union DL_primitives *)mp->b_rptr;
4800 
4801 	/* XXX I think spec allows promisc in unattached state */
4802 	gld = (gld_t *)q->q_ptr;
4803 	if (gld->gld_state == DL_UNATTACHED)
4804 		return (DL_OUTSTATE);
4805 
4806 	macinfo = gld->gld_mac_info;
4807 	ASSERT(macinfo != NULL);
4808 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4809 
4810 	vlan = (gld_vlan_t *)gld->gld_vlan;
4811 	ASSERT(vlan != NULL);
4812 
4813 	GLDM_LOCK(macinfo, RW_WRITER);
4814 
4815 	/*
4816 	 * Work out what request (if any) has to be made to the MAC layer
4817 	 */
4818 	if (on) {
4819 		switch (prim->promiscon_req.dl_level) {
4820 		default:
4821 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4822 			break;
4823 
4824 		case DL_PROMISC_PHYS:
4825 			if (mac_pvt->nprom == 0)
4826 				op = GLD_MAC_PROMISC_PHYS;
4827 			break;
4828 
4829 		case DL_PROMISC_MULTI:
4830 			if (mac_pvt->nprom_multi == 0)
4831 				if (mac_pvt->nprom == 0)
4832 					op = GLD_MAC_PROMISC_MULTI;
4833 			break;
4834 
4835 		case DL_PROMISC_SAP:
4836 			/* We can do this without reference to the MAC */
4837 			break;
4838 		}
4839 	} else {
4840 		switch (prim->promiscoff_req.dl_level) {
4841 		default:
4842 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4843 			break;
4844 
4845 		case DL_PROMISC_PHYS:
4846 			if (!(gld->gld_flags & GLD_PROM_PHYS))
4847 				dlerr = DL_NOTENAB;
4848 			else if (mac_pvt->nprom == 1)
4849 				if (mac_pvt->nprom_multi)
4850 					op = GLD_MAC_PROMISC_MULTI;
4851 				else
4852 					op = GLD_MAC_PROMISC_NONE;
4853 			break;
4854 
4855 		case DL_PROMISC_MULTI:
4856 			if (!(gld->gld_flags & GLD_PROM_MULT))
4857 				dlerr = DL_NOTENAB;
4858 			else if (mac_pvt->nprom_multi == 1)
4859 				if (mac_pvt->nprom == 0)
4860 					op = GLD_MAC_PROMISC_NONE;
4861 			break;
4862 
4863 		case DL_PROMISC_SAP:
4864 			if (!(gld->gld_flags & GLD_PROM_SAP))
4865 				dlerr = DL_NOTENAB;
4866 
4867 			/* We can do this without reference to the MAC */
4868 			break;
4869 		}
4870 	}
4871 
4872 	/*
4873 	 * The request was invalid in some way so no need to continue.
4874 	 */
4875 	if (dlerr != GLDE_OK) {
4876 		GLDM_UNLOCK(macinfo);
4877 		return (dlerr);
4878 	}
4879 
4880 	/*
4881 	 * Issue the request to the MAC layer, if required
4882 	 */
4883 	if (op != GLD_MAC_PROMISC_NOOP) {
4884 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
4885 	}
4886 
4887 	/*
4888 	 * On success, update the appropriate flags & refcounts
4889 	 */
4890 	if (macrc == GLD_SUCCESS) {
4891 		if (on) {
4892 			switch (prim->promiscon_req.dl_level) {
4893 			case DL_PROMISC_PHYS:
4894 				mac_pvt->nprom++;
4895 				gld->gld_flags |= GLD_PROM_PHYS;
4896 				break;
4897 
4898 			case DL_PROMISC_MULTI:
4899 				mac_pvt->nprom_multi++;
4900 				gld->gld_flags |= GLD_PROM_MULT;
4901 				break;
4902 
4903 			case DL_PROMISC_SAP:
4904 				gld->gld_flags |= GLD_PROM_SAP;
4905 				break;
4906 
4907 			default:
4908 				break;
4909 			}
4910 		} else {
4911 			switch (prim->promiscoff_req.dl_level) {
4912 			case DL_PROMISC_PHYS:
4913 				mac_pvt->nprom--;
4914 				gld->gld_flags &= ~GLD_PROM_PHYS;
4915 				break;
4916 
4917 			case DL_PROMISC_MULTI:
4918 				mac_pvt->nprom_multi--;
4919 				gld->gld_flags &= ~GLD_PROM_MULT;
4920 				break;
4921 
4922 			case DL_PROMISC_SAP:
4923 				gld->gld_flags &= ~GLD_PROM_SAP;
4924 				break;
4925 
4926 			default:
4927 				break;
4928 			}
4929 		}
4930 	} else if (macrc == GLD_RETRY) {
4931 		/*
4932 		 * The putbq and gld_xwait must be within the lock to
4933 		 * prevent races with gld_sched.
4934 		 */
4935 		(void) putbq(q, mp);
4936 		gld->gld_xwait = B_TRUE;
4937 	}
4938 
4939 	/*
4940 	 * Update VLAN IPQ status -- it may have changed
4941 	 */
4942 	if (gld->gld_flags & (GLD_PROM_SAP | GLD_PROM_MULT | GLD_PROM_PHYS))
4943 		vlan->gldv_ipq_flags |= IPQ_FORBIDDEN;
4944 	else
4945 		vlan->gldv_ipq_flags &= ~IPQ_FORBIDDEN;
4946 
4947 	GLDM_UNLOCK(macinfo);
4948 
4949 	/*
4950 	 * Finally, decide how to reply.
4951 	 *
4952 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
4953 	 * layer but failed.  In such cases, we can return a DL_* error
4954 	 * code and let the caller send an error-ack reply upstream, or
4955 	 * we can send a reply here and then return GLDE_OK so that the
4956 	 * caller doesn't also respond.
4957 	 *
4958 	 * If physical-promiscuous mode was (successfully) switched on or
4959 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
4960 	 */
4961 	switch (macrc) {
4962 	case GLD_NOTSUPPORTED:
4963 		return (DL_NOTSUPPORTED);
4964 
4965 	case GLD_NORESOURCES:
4966 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
4967 		return (GLDE_OK);
4968 
4969 	case GLD_RETRY:
4970 		return (GLDE_RETRY);
4971 
4972 	default:
4973 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
4974 		return (GLDE_OK);
4975 
4976 	case GLD_SUCCESS:
4977 		dlokack(q, mp, req);
4978 		break;
4979 	}
4980 
4981 	switch (op) {
4982 	case GLD_MAC_PROMISC_NOOP:
4983 		break;
4984 
4985 	case GLD_MAC_PROMISC_PHYS:
4986 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
4987 		break;
4988 
4989 	default:
4990 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4991 		break;
4992 	}
4993 
4994 	return (GLDE_OK);
4995 }
4996 
4997 /*
4998  * gld_physaddr()
4999  *	get the current or factory physical address value
5000  */
5001 static int
5002 gld_physaddr(queue_t *q, mblk_t *mp)
5003 {
5004 	gld_t *gld = (gld_t *)q->q_ptr;
5005 	gld_mac_info_t *macinfo;
5006 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5007 	unsigned char addr[GLD_MAX_ADDRLEN];
5008 
5009 	if (gld->gld_state == DL_UNATTACHED)
5010 		return (DL_OUTSTATE);
5011 
5012 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5013 	ASSERT(macinfo != NULL);
5014 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5015 
5016 	switch (prim->physaddr_req.dl_addr_type) {
5017 	case DL_FACT_PHYS_ADDR:
5018 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5019 		    (caddr_t)addr, macinfo->gldm_addrlen);
5020 		break;
5021 	case DL_CURR_PHYS_ADDR:
5022 		/* make a copy so we don't hold the lock across qreply */
5023 		GLDM_LOCK(macinfo, RW_WRITER);
5024 		mac_copy((caddr_t)
5025 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5026 		    (caddr_t)addr, macinfo->gldm_addrlen);
5027 		GLDM_UNLOCK(macinfo);
5028 		break;
5029 	default:
5030 		return (DL_BADPRIM);
5031 	}
5032 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5033 	return (GLDE_OK);
5034 }
5035 
5036 /*
5037  * gld_setaddr()
5038  *	change the hardware's physical address to a user specified value
5039  */
5040 static int
5041 gld_setaddr(queue_t *q, mblk_t *mp)
5042 {
5043 	gld_t *gld = (gld_t *)q->q_ptr;
5044 	gld_mac_info_t *macinfo;
5045 	gld_mac_pvt_t *mac_pvt;
5046 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5047 	unsigned char *addr;
5048 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5049 	int rc;
5050 	gld_vlan_t *vlan;
5051 
5052 	if (gld->gld_state == DL_UNATTACHED)
5053 		return (DL_OUTSTATE);
5054 
5055 	vlan = (gld_vlan_t *)gld->gld_vlan;
5056 	ASSERT(vlan != NULL);
5057 
5058 	if (vlan->gldv_id != VLAN_VID_NONE)
5059 		return (DL_NOTSUPPORTED);
5060 
5061 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5062 	ASSERT(macinfo != NULL);
5063 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5064 
5065 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5066 	    prim->set_physaddr_req.dl_addr_length) ||
5067 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5068 		return (DL_BADADDR);
5069 
5070 	GLDM_LOCK(macinfo, RW_WRITER);
5071 
5072 	/* now do the set at the hardware level */
5073 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5074 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5075 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5076 
5077 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5078 	if (rc == GLD_SUCCESS)
5079 		mac_copy(addr, mac_pvt->curr_macaddr,
5080 		    macinfo->gldm_addrlen);
5081 
5082 	GLDM_UNLOCK(macinfo);
5083 
5084 	switch (rc) {
5085 	case GLD_SUCCESS:
5086 		break;
5087 	case GLD_NOTSUPPORTED:
5088 		return (DL_NOTSUPPORTED);
5089 	case GLD_BADARG:
5090 		return (DL_BADADDR);
5091 	case GLD_NORESOURCES:
5092 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5093 		return (GLDE_OK);
5094 	default:
5095 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5096 		return (GLDE_OK);
5097 	}
5098 
5099 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5100 
5101 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5102 	return (GLDE_OK);
5103 }
5104 
5105 int
5106 gld_get_statistics(queue_t *q, mblk_t *mp)
5107 {
5108 	dl_get_statistics_ack_t *dlsp;
5109 	gld_t  *gld = (gld_t *)q->q_ptr;
5110 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5111 	gld_mac_pvt_t *mac_pvt;
5112 
5113 	if (gld->gld_state == DL_UNATTACHED)
5114 		return (DL_OUTSTATE);
5115 
5116 	ASSERT(macinfo != NULL);
5117 
5118 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5119 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5120 
5121 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5122 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5123 
5124 	if (mp == NULL)
5125 		return (GLDE_OK);	/* mexchange already sent merror */
5126 
5127 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5128 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5129 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5130 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5131 
5132 	GLDM_LOCK(macinfo, RW_WRITER);
5133 	bcopy(mac_pvt->kstatp->ks_data,
5134 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5135 	    sizeof (struct gldkstats));
5136 	GLDM_UNLOCK(macinfo);
5137 
5138 	qreply(q, mp);
5139 	return (GLDE_OK);
5140 }
5141 
5142 /* =================================================== */
5143 /* misc utilities, some requiring various mutexes held */
5144 /* =================================================== */
5145 
5146 /*
5147  * Initialize and start the driver.
5148  */
5149 static int
5150 gld_start_mac(gld_mac_info_t *macinfo)
5151 {
5152 	int	rc;
5153 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5154 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5155 
5156 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5157 	ASSERT(!mac_pvt->started);
5158 
5159 	rc = (*macinfo->gldm_reset)(macinfo);
5160 	if (rc != GLD_SUCCESS)
5161 		return (GLD_FAILURE);
5162 
5163 	/* set the addr after we reset the device */
5164 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5165 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5166 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5167 
5168 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5169 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5170 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5171 		return (GLD_FAILURE);
5172 
5173 	rc = (*macinfo->gldm_start)(macinfo);
5174 	if (rc != GLD_SUCCESS)
5175 		return (GLD_FAILURE);
5176 
5177 	mac_pvt->started = B_TRUE;
5178 	return (GLD_SUCCESS);
5179 }
5180 
5181 /*
5182  * Stop the driver.
5183  */
5184 static void
5185 gld_stop_mac(gld_mac_info_t *macinfo)
5186 {
5187 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5188 
5189 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5190 	ASSERT(mac_pvt->started);
5191 
5192 	(void) (*macinfo->gldm_stop)(macinfo);
5193 
5194 	mac_pvt->started = B_FALSE;
5195 }
5196 
5197 
5198 /*
5199  * gld_set_ipq will set a pointer to the queue which is bound to the
5200  * IP sap if:
5201  * o the device type is ethernet or IPoIB.
5202  * o there is no stream in SAP promiscuous mode.
5203  * o there is exactly one stream bound to the IP sap.
5204  * o the stream is in "fastpath" mode.
5205  */
5206 static void
5207 gld_set_ipq(gld_t *gld)
5208 {
5209 	gld_vlan_t	*vlan;
5210 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5211 	gld_t		*ip_gld = NULL;
5212 	uint_t		ipq_candidates = 0;
5213 	gld_t		*ipv6_gld = NULL;
5214 	uint_t		ipv6q_candidates = 0;
5215 
5216 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5217 
5218 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5219 	if (((macinfo->gldm_type != DL_ETHER) &&
5220 	    (macinfo->gldm_type != DL_IB)) ||
5221 	    (gld_global_options & GLD_OPT_NO_IPQ))
5222 		return;
5223 
5224 	vlan = (gld_vlan_t *)gld->gld_vlan;
5225 	ASSERT(vlan != NULL);
5226 
5227 	/* clear down any previously defined ipqs */
5228 	vlan->gldv_ipq = NULL;
5229 	vlan->gldv_ipv6q = NULL;
5230 
5231 	/* Try to find a single stream eligible to receive IP packets */
5232 	for (gld = vlan->gldv_str_next;
5233 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5234 		if (gld->gld_state != DL_IDLE)
5235 			continue;	/* not eligible to receive */
5236 		if (gld->gld_flags & GLD_STR_CLOSING)
5237 			continue;	/* not eligible to receive */
5238 
5239 		if (gld->gld_sap == ETHERTYPE_IP) {
5240 			ip_gld = gld;
5241 			ipq_candidates++;
5242 		}
5243 
5244 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5245 			ipv6_gld = gld;
5246 			ipv6q_candidates++;
5247 		}
5248 	}
5249 
5250 	if (ipq_candidates == 1) {
5251 		ASSERT(ip_gld != NULL);
5252 
5253 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5254 			vlan->gldv_ipq = ip_gld->gld_qptr;
5255 	}
5256 
5257 	if (ipv6q_candidates == 1) {
5258 		ASSERT(ipv6_gld != NULL);
5259 
5260 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5261 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5262 	}
5263 }
5264 
5265 /*
5266  * gld_flushqueue (q)
5267  *	used by DLPI primitives that require flushing the queues.
5268  *	essentially, this is DL_UNBIND_REQ.
5269  */
5270 static void
5271 gld_flushqueue(queue_t *q)
5272 {
5273 	/* flush all data in both queues */
5274 	/* XXX Should these be FLUSHALL? */
5275 	flushq(q, FLUSHDATA);
5276 	flushq(WR(q), FLUSHDATA);
5277 	/* flush all the queues upstream */
5278 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5279 }
5280 
5281 /*
5282  * gld_devlookup (major)
5283  * search the device table for the device with specified
5284  * major number and return a pointer to it if it exists
5285  */
5286 static glddev_t *
5287 gld_devlookup(int major)
5288 {
5289 	struct glddevice *dev;
5290 
5291 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5292 
5293 	for (dev = gld_device_list.gld_next;
5294 	    dev != &gld_device_list;
5295 	    dev = dev->gld_next) {
5296 		ASSERT(dev);
5297 		if (dev->gld_major == major)
5298 			return (dev);
5299 	}
5300 	return (NULL);
5301 }
5302 
5303 /*
5304  * gld_findminor(device)
5305  * Returns a minor number currently unused by any stream in the current
5306  * device class (major) list.
5307  */
5308 static int
5309 gld_findminor(glddev_t *device)
5310 {
5311 	gld_t		*next;
5312 	gld_mac_info_t	*nextmac;
5313 	gld_vlan_t	*nextvlan;
5314 	int		minor;
5315 	int		i;
5316 
5317 	ASSERT(mutex_owned(&device->gld_devlock));
5318 
5319 	/* The fast way */
5320 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5321 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5322 		return (device->gld_nextminor++);
5323 
5324 	/* The steady way */
5325 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5326 	    minor++) {
5327 		/* Search all unattached streams */
5328 		for (next = device->gld_str_next;
5329 		    next != (gld_t *)&device->gld_str_next;
5330 		    next = next->gld_next) {
5331 			if (minor == next->gld_minor)
5332 				goto nextminor;
5333 		}
5334 		/* Search all attached streams; we don't need maclock because */
5335 		/* mac stream list is protected by devlock as well as maclock */
5336 		for (nextmac = device->gld_mac_next;
5337 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5338 		    nextmac = nextmac->gldm_next) {
5339 			gld_mac_pvt_t *pvt =
5340 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5341 
5342 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5343 				continue;	/* this one's not ready yet */
5344 
5345 			for (i = 0; i < VLAN_HASHSZ; i++) {
5346 				for (nextvlan = pvt->vlan_hash[i];
5347 				    nextvlan != NULL;
5348 				    nextvlan = nextvlan->gldv_next) {
5349 					for (next = nextvlan->gldv_str_next;
5350 					    next !=
5351 					    (gld_t *)&nextvlan->gldv_str_next;
5352 					    next = next->gld_next) {
5353 						if (minor == next->gld_minor)
5354 							goto nextminor;
5355 					}
5356 				}
5357 			}
5358 		}
5359 
5360 		return (minor);
5361 nextminor:
5362 		/* don't need to do anything */
5363 		;
5364 	}
5365 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5366 		device->gld_name);
5367 	return (0);
5368 }
5369 
5370 /*
5371  * version of insque/remque for use by this driver
5372  */
5373 struct qelem {
5374 	struct qelem *q_forw;
5375 	struct qelem *q_back;
5376 	/* rest of structure */
5377 };
5378 
5379 static void
5380 gldinsque(void *elem, void *pred)
5381 {
5382 	struct qelem *pelem = elem;
5383 	struct qelem *ppred = pred;
5384 	struct qelem *pnext = ppred->q_forw;
5385 
5386 	pelem->q_forw = pnext;
5387 	pelem->q_back = ppred;
5388 	ppred->q_forw = pelem;
5389 	pnext->q_back = pelem;
5390 }
5391 
5392 static void
5393 gldremque(void *arg)
5394 {
5395 	struct qelem *pelem = arg;
5396 	struct qelem *elem = arg;
5397 
5398 	pelem->q_forw->q_back = pelem->q_back;
5399 	pelem->q_back->q_forw = pelem->q_forw;
5400 	elem->q_back = elem->q_forw = NULL;
5401 }
5402 
5403 static gld_vlan_t *
5404 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5405 {
5406 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5407 	gld_vlan_t	**pp;
5408 	gld_vlan_t	*p;
5409 
5410 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5411 	while ((p = *pp) != NULL) {
5412 		ASSERT(p->gldv_id != vid);
5413 		pp = &(p->gldv_next);
5414 	}
5415 
5416 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5417 		return (NULL);
5418 
5419 	p->gldv_mac = macinfo;
5420 	p->gldv_id = vid;
5421 
5422 	if (vid == VLAN_VID_NONE) {
5423 		p->gldv_ptag = VLAN_VTAG_NONE;
5424 		p->gldv_stats = mac_pvt->statistics;
5425 		p->gldv_kstatp = NULL;
5426 	} else {
5427 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5428 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5429 		    KM_SLEEP);
5430 
5431 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5432 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5433 			kmem_free(p, sizeof (gld_vlan_t));
5434 			return (NULL);
5435 		}
5436 	}
5437 
5438 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5439 	mac_pvt->nvlan++;
5440 	*pp = p;
5441 
5442 	return (p);
5443 }
5444 
5445 static void
5446 gld_rem_vlan(gld_vlan_t *vlan)
5447 {
5448 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5449 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5450 	gld_vlan_t	**pp;
5451 	gld_vlan_t	*p;
5452 
5453 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5454 	while ((p = *pp) != NULL) {
5455 		if (p->gldv_id == vlan->gldv_id)
5456 			break;
5457 		pp = &(p->gldv_next);
5458 	}
5459 	ASSERT(p != NULL);
5460 
5461 	*pp = p->gldv_next;
5462 	mac_pvt->nvlan--;
5463 	if (p->gldv_id != VLAN_VID_NONE) {
5464 		ASSERT(p->gldv_kstatp != NULL);
5465 		kstat_delete(p->gldv_kstatp);
5466 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5467 	}
5468 	kmem_free(p, sizeof (gld_vlan_t));
5469 }
5470 
5471 gld_vlan_t *
5472 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5473 {
5474 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5475 	gld_vlan_t	*p;
5476 
5477 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5478 	while (p != NULL) {
5479 		if (p->gldv_id == vid)
5480 			return (p);
5481 		p = p->gldv_next;
5482 	}
5483 	return (NULL);
5484 }
5485 
5486 gld_vlan_t *
5487 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5488 {
5489 	gld_vlan_t	*vlan;
5490 
5491 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5492 		vlan = gld_add_vlan(macinfo, vid);
5493 
5494 	return (vlan);
5495 }
5496 
5497 /*
5498  * gld_bitrevcopy()
5499  * This is essentially bcopy, with the ability to bit reverse the
5500  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5501  * interfaces are bit reversed.
5502  */
5503 void
5504 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5505 {
5506 	while (n--)
5507 		*target++ = bit_rev[(uchar_t)*src++];
5508 }
5509 
5510 /*
5511  * gld_bitreverse()
5512  * Convert the bit order by swaping all the bits, using a
5513  * lookup table.
5514  */
5515 void
5516 gld_bitreverse(uchar_t *rptr, size_t n)
5517 {
5518 	while (n--) {
5519 		*rptr = bit_rev[*rptr];
5520 		rptr++;
5521 	}
5522 }
5523 
5524 char *
5525 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5526 {
5527 	int i;
5528 	char *cp = etherbuf;
5529 	static char digits[] = "0123456789abcdef";
5530 
5531 	for (i = 0; i < len; i++) {
5532 		*cp++ = digits[*ap >> 4];
5533 		*cp++ = digits[*ap++ & 0xf];
5534 		*cp++ = ':';
5535 	}
5536 	*--cp = 0;
5537 	return (etherbuf);
5538 }
5539 
5540 #ifdef GLD_DEBUG
5541 static void
5542 gld_check_assertions()
5543 {
5544 	glddev_t	*dev;
5545 	gld_mac_info_t	*mac;
5546 	gld_t		*str;
5547 	gld_vlan_t	*vlan;
5548 	int		i;
5549 
5550 	mutex_enter(&gld_device_list.gld_devlock);
5551 
5552 	for (dev = gld_device_list.gld_next;
5553 	    dev != (glddev_t *)&gld_device_list.gld_next;
5554 	    dev = dev->gld_next) {
5555 		mutex_enter(&dev->gld_devlock);
5556 		ASSERT(dev->gld_broadcast != NULL);
5557 		for (str = dev->gld_str_next;
5558 		    str != (gld_t *)&dev->gld_str_next;
5559 		    str = str->gld_next) {
5560 			ASSERT(str->gld_device == dev);
5561 			ASSERT(str->gld_mac_info == NULL);
5562 			ASSERT(str->gld_qptr != NULL);
5563 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5564 			ASSERT(str->gld_multicnt == 0);
5565 			ASSERT(str->gld_mcast == NULL);
5566 			ASSERT(!(str->gld_flags &
5567 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5568 			ASSERT(str->gld_sap == 0);
5569 			ASSERT(str->gld_state == DL_UNATTACHED);
5570 		}
5571 		for (mac = dev->gld_mac_next;
5572 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5573 		    mac = mac->gldm_next) {
5574 			int nvlan = 0;
5575 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5576 
5577 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5578 				continue;	/* this one's not ready yet */
5579 
5580 			GLDM_LOCK(mac, RW_WRITER);
5581 			ASSERT(mac->gldm_devinfo != NULL);
5582 			ASSERT(mac->gldm_mac_pvt != NULL);
5583 			ASSERT(pvt->interfacep != NULL);
5584 			ASSERT(pvt->kstatp != NULL);
5585 			ASSERT(pvt->statistics != NULL);
5586 			ASSERT(pvt->major_dev == dev);
5587 
5588 			for (i = 0; i < VLAN_HASHSZ; i++) {
5589 				for (vlan = pvt->vlan_hash[i];
5590 				    vlan != NULL; vlan = vlan->gldv_next) {
5591 					int nstr = 0;
5592 
5593 					ASSERT(vlan->gldv_mac == mac);
5594 
5595 					for (str = vlan->gldv_str_next;
5596 					    str !=
5597 					    (gld_t *)&vlan->gldv_str_next;
5598 					    str = str->gld_next) {
5599 						ASSERT(str->gld_device == dev);
5600 						ASSERT(str->gld_mac_info ==
5601 						    mac);
5602 						ASSERT(str->gld_qptr != NULL);
5603 						ASSERT(str->gld_minor >=
5604 						    GLD_MIN_CLONE_MINOR);
5605 						ASSERT(
5606 						    str->gld_multicnt == 0 ||
5607 						    str->gld_mcast);
5608 						nstr++;
5609 					}
5610 					ASSERT(vlan->gldv_nstreams == nstr);
5611 					nvlan++;
5612 				}
5613 			}
5614 			ASSERT(pvt->nvlan == nvlan);
5615 			GLDM_UNLOCK(mac);
5616 		}
5617 		mutex_exit(&dev->gld_devlock);
5618 	}
5619 	mutex_exit(&gld_device_list.gld_devlock);
5620 }
5621 #endif
5622