xref: /titanic_44/usr/src/uts/common/io/gld.c (revision 1ae0874509b6811fdde1dfd46f0d93fd09867a3f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * gld - Generic LAN Driver Version 2, PSARC/1997/382
30  *
31  * This is a utility module that provides generic facilities for
32  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
33  * are handled here.
34  *
35  * It no longer provides compatibility with drivers
36  * implemented according to the GLD v0 documentation published
37  * in 1993. (See PSARC 2003/728)
38  */
39 
40 
41 #include <sys/types.h>
42 #include <sys/errno.h>
43 #include <sys/stropts.h>
44 #include <sys/stream.h>
45 #include <sys/kmem.h>
46 #include <sys/stat.h>
47 #include <sys/modctl.h>
48 #include <sys/kstat.h>
49 #include <sys/debug.h>
50 #include <sys/note.h>
51 #include <sys/sysmacros.h>
52 
53 #include <sys/byteorder.h>
54 #include <sys/strsun.h>
55 #include <sys/strsubr.h>
56 #include <sys/dlpi.h>
57 #include <sys/pattr.h>
58 #include <sys/ethernet.h>
59 #include <sys/ib/clients/ibd/ibd.h>
60 #include <sys/policy.h>
61 #include <sys/atomic.h>
62 
63 #include <sys/multidata.h>
64 #include <sys/gld.h>
65 #include <sys/gldpriv.h>
66 
67 #include <sys/ddi.h>
68 #include <sys/sunddi.h>
69 
70 /*
71  * Macro to atomically increment counters of type uint32_t, uint64_t
72  * and ulong_t.
73  */
74 #define	BUMP(stat, delta)	do {				\
75 	_NOTE(CONSTANTCONDITION)				\
76 	if (sizeof (stat) == sizeof (uint32_t))	{		\
77 		atomic_add_32((uint32_t *)&stat, delta);	\
78 	_NOTE(CONSTANTCONDITION)				\
79 	} else if (sizeof (stat) == sizeof (uint64_t)) {	\
80 		atomic_add_64((uint64_t *)&stat, delta);	\
81 	}							\
82 	_NOTE(CONSTANTCONDITION)				\
83 } while (0)
84 
85 #define	UPDATE_STATS(vlan, pktinfo, number)	{		\
86 	if ((pktinfo).isBroadcast)				\
87 		(vlan)->gldv_stats->glds_brdcstxmt += (number);	\
88 	else if ((pktinfo).isMulticast)				\
89 		(vlan)->gldv_stats->glds_multixmt += (number);	\
90 	(vlan)->gldv_stats->glds_bytexmt64 += (pktinfo).pktLen;	\
91 	(vlan)->gldv_stats->glds_pktxmt64 += (number);		\
92 }
93 
94 #ifdef GLD_DEBUG
95 int gld_debug = GLDERRS;
96 #endif
97 
98 /* called from gld_register */
99 static int gld_initstats(gld_mac_info_t *);
100 
101 /* called from kstat mechanism, and from wsrv's get_statistics */
102 static int gld_update_kstat(kstat_t *, int);
103 
104 /* statistics for additional vlans */
105 static int gld_init_vlan_stats(gld_vlan_t *);
106 static int gld_update_vlan_kstat(kstat_t *, int);
107 
108 /* called from gld_getinfo */
109 static dev_info_t *gld_finddevinfo(dev_t);
110 
111 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
112 /* also from the source routing stuff for sending RDE protocol packets */
113 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
114 static int gld_start_mdt(queue_t *, mblk_t *, int);
115 
116 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
117 static void gld_precv(gld_mac_info_t *, gld_vlan_t *, mblk_t *);
118 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
119     pdesc_t *, pktinfo_t *);
120 
121 /* receive group: called from gld_recv and gld_precv* with maclock held */
122 static void gld_sendup(gld_mac_info_t *, gld_vlan_t *, pktinfo_t *, mblk_t *,
123     int (*)());
124 static int gld_accept(gld_t *, pktinfo_t *);
125 static int gld_mcmatch(gld_t *, pktinfo_t *);
126 static int gld_multicast(unsigned char *, gld_t *);
127 static int gld_paccept(gld_t *, pktinfo_t *);
128 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
129     void (*)(queue_t *, mblk_t *));
130 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *);
131 
132 /* wsrv group: called from wsrv, single threaded per queue */
133 static int gld_ioctl(queue_t *, mblk_t *);
134 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
135 static int gld_cmds(queue_t *, mblk_t *);
136 static mblk_t *gld_bindack(queue_t *, mblk_t *);
137 static int gld_notify_req(queue_t *, mblk_t *);
138 static int gld_udqos(queue_t *, mblk_t *);
139 static int gld_bind(queue_t *, mblk_t *);
140 static int gld_unbind(queue_t *, mblk_t *);
141 static int gld_inforeq(queue_t *, mblk_t *);
142 static int gld_unitdata(queue_t *, mblk_t *);
143 static int gldattach(queue_t *, mblk_t *);
144 static int gldunattach(queue_t *, mblk_t *);
145 static int gld_enable_multi(queue_t *, mblk_t *);
146 static int gld_disable_multi(queue_t *, mblk_t *);
147 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
148 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
149 static int gld_physaddr(queue_t *, mblk_t *);
150 static int gld_setaddr(queue_t *, mblk_t *);
151 static int gld_get_statistics(queue_t *, mblk_t *);
152 static int gld_cap(queue_t *, mblk_t *);
153 static int gld_cap_ack(queue_t *, mblk_t *);
154 static int gld_cap_enable(queue_t *, mblk_t *);
155 
156 /* misc utilities, some requiring various mutexes held */
157 static int gld_start_mac(gld_mac_info_t *);
158 static void gld_stop_mac(gld_mac_info_t *);
159 static void gld_set_ipq(gld_t *);
160 static void gld_flushqueue(queue_t *);
161 static glddev_t *gld_devlookup(int);
162 static int gld_findminor(glddev_t *);
163 static void gldinsque(void *, void *);
164 static void gldremque(void *);
165 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
166 void gld_bitreverse(uchar_t *, size_t);
167 char *gld_macaddr_sprintf(char *, unsigned char *, int);
168 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
169 static void gld_rem_vlan(gld_vlan_t *);
170 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
171 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
172 
173 #ifdef GLD_DEBUG
174 static void gld_check_assertions(void);
175 extern void gld_sr_dump(gld_mac_info_t *);
176 #endif
177 
178 /*
179  * Allocate and zero-out "number" structures each of type "structure" in
180  * kernel memory.
181  */
182 #define	GETSTRUCT(structure, number)   \
183 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
184 
185 #define	abs(a) ((a) < 0 ? -(a) : a)
186 
187 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
188 
189 /*
190  * VLANs are only supported on ethernet devices that manipulate VLAN headers
191  * themselves.
192  */
193 #define	VLAN_CAPABLE(macinfo) \
194 	((macinfo)->gldm_type == DL_ETHER && \
195 	(macinfo)->gldm_send_tagged != NULL)
196 
197 /*
198  * The set of notifications generatable by GLD itself, the additional
199  * set that can be generated if the MAC driver provide the link-state
200  * tracking callback capability, and the set supported by the GLD
201  * notification code below.
202  *
203  * PLEASE keep these in sync with what the code actually does!
204  */
205 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
206 						DL_NOTE_PROMISC_OFF_PHYS |
207 						DL_NOTE_PHYS_ADDR;
208 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
209 						DL_NOTE_LINK_UP |
210 						DL_NOTE_SPEED;
211 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
212 						DL_NOTE_PROMISC_OFF_PHYS |
213 						DL_NOTE_PHYS_ADDR |
214 						DL_NOTE_LINK_DOWN |
215 						DL_NOTE_LINK_UP |
216 						DL_NOTE_SPEED;
217 
218 /* Media must correspond to #defines in gld.h */
219 static char *gld_media[] = {
220 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
221 	"aui",		/* GLDM_AUI */
222 	"bnc",		/* GLDM_BNC */
223 	"twpair",	/* GLDM_TP */
224 	"fiber",	/* GLDM_FIBER */
225 	"100baseT",	/* GLDM_100BT */
226 	"100vgAnyLan",	/* GLDM_VGANYLAN */
227 	"10baseT",	/* GLDM_10BT */
228 	"ring4",	/* GLDM_RING4 */
229 	"ring16",	/* GLDM_RING16 */
230 	"PHY/MII",	/* GLDM_PHYMII */
231 	"100baseTX",	/* GLDM_100BTX */
232 	"100baseT4",	/* GLDM_100BT4 */
233 	"unknown",	/* skip */
234 	"ipib",		/* GLDM_IB */
235 };
236 
237 /* Must correspond to #defines in gld.h */
238 static char *gld_duplex[] = {
239 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
240 	"half",		/* GLD_DUPLEX_HALF */
241 	"full"		/* GLD_DUPLEX_FULL */
242 };
243 
244 extern int gld_interpret_ether(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
245 extern int gld_interpret_fddi(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
246 extern int gld_interpret_tr(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
247 extern int gld_interpret_ib(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
248 extern void gld_interpret_mdt_ib(gld_mac_info_t *, mblk_t *, pdescinfo_t *,
249     pktinfo_t *, int);
250 
251 extern mblk_t *gld_fastpath_ether(gld_t *, mblk_t *);
252 extern mblk_t *gld_fastpath_fddi(gld_t *, mblk_t *);
253 extern mblk_t *gld_fastpath_tr(gld_t *, mblk_t *);
254 extern mblk_t *gld_fastpath_ib(gld_t *, mblk_t *);
255 
256 extern mblk_t *gld_unitdata_ether(gld_t *, mblk_t *);
257 extern mblk_t *gld_unitdata_fddi(gld_t *, mblk_t *);
258 extern mblk_t *gld_unitdata_tr(gld_t *, mblk_t *);
259 extern mblk_t *gld_unitdata_ib(gld_t *, mblk_t *);
260 
261 extern void gld_init_ether(gld_mac_info_t *);
262 extern void gld_init_fddi(gld_mac_info_t *);
263 extern void gld_init_tr(gld_mac_info_t *);
264 extern void gld_init_ib(gld_mac_info_t *);
265 
266 extern void gld_uninit_ether(gld_mac_info_t *);
267 extern void gld_uninit_fddi(gld_mac_info_t *);
268 extern void gld_uninit_tr(gld_mac_info_t *);
269 extern void gld_uninit_ib(gld_mac_info_t *);
270 
271 /*
272  * Interface types currently supported by GLD.
273  * If you add new types, you must check all "XXX" strings in the GLD source
274  * for implementation issues that may affect the support of your new type.
275  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
276  * require generalizing this GLD source to handle the new cases.  In other
277  * words there are assumptions built into the code in a few places that must
278  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
279  */
280 static gld_interface_t interfaces[] = {
281 
282 	/* Ethernet Bus */
283 	{
284 		DL_ETHER,
285 		(uint_t)-1,
286 		sizeof (struct ether_mac_frm),
287 		gld_interpret_ether,
288 		NULL,
289 		gld_fastpath_ether,
290 		gld_unitdata_ether,
291 		gld_init_ether,
292 		gld_uninit_ether,
293 		"ether"
294 	},
295 
296 	/* Fiber Distributed data interface */
297 	{
298 		DL_FDDI,
299 		4352,
300 		sizeof (struct fddi_mac_frm),
301 		gld_interpret_fddi,
302 		NULL,
303 		gld_fastpath_fddi,
304 		gld_unitdata_fddi,
305 		gld_init_fddi,
306 		gld_uninit_fddi,
307 		"fddi"
308 	},
309 
310 	/* Token Ring interface */
311 	{
312 		DL_TPR,
313 		17914,
314 		-1,			/* variable header size */
315 		gld_interpret_tr,
316 		NULL,
317 		gld_fastpath_tr,
318 		gld_unitdata_tr,
319 		gld_init_tr,
320 		gld_uninit_tr,
321 		"tpr"
322 	},
323 
324 	/* Infiniband */
325 	{
326 		DL_IB,
327 		4092,
328 		sizeof (struct ipoib_header),
329 		gld_interpret_ib,
330 		gld_interpret_mdt_ib,
331 		gld_fastpath_ib,
332 		gld_unitdata_ib,
333 		gld_init_ib,
334 		gld_uninit_ib,
335 		"ipib"
336 	},
337 };
338 
339 /*
340  * bit reversal lookup table.
341  */
342 static	uchar_t bit_rev[] = {
343 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
344 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
345 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
346 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
347 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
348 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
349 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
350 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
351 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
352 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
353 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
354 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
355 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
356 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
357 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
358 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
359 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
360 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
361 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
362 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
363 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
364 	0x3f, 0xbf, 0x7f, 0xff,
365 };
366 
367 /*
368  * User priorities, mapped from b_band.
369  */
370 static uint32_t user_priority[] = {
371 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
372 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
373 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
374 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
375 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
376 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
377 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
378 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
379 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
380 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
381 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
382 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
383 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
384 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
385 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
386 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
387 };
388 
389 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
390 
391 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
392 
393 /*
394  * Module linkage information for the kernel.
395  */
396 
397 static struct modldrv modlmisc = {
398 	&mod_miscops,		/* Type of module - a utility provider */
399 	"Generic LAN Driver (" GLD_VERSION_STRING ") %I%"
400 #ifdef GLD_DEBUG
401 	" DEBUG"
402 #endif
403 };
404 
405 static struct modlinkage modlinkage = {
406 	MODREV_1, &modlmisc, NULL
407 };
408 
409 int
410 _init(void)
411 {
412 	int e;
413 
414 	/* initialize gld_device_list mutex */
415 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
416 
417 	/* initialize device driver (per-major) list */
418 	gld_device_list.gld_next =
419 	    gld_device_list.gld_prev = &gld_device_list;
420 
421 	if ((e = mod_install(&modlinkage)) != 0)
422 		mutex_destroy(&gld_device_list.gld_devlock);
423 
424 	return (e);
425 }
426 
427 int
428 _fini(void)
429 {
430 	int e;
431 
432 	if ((e = mod_remove(&modlinkage)) != 0)
433 		return (e);
434 
435 	ASSERT(gld_device_list.gld_next ==
436 	    (glddev_t *)&gld_device_list.gld_next);
437 	ASSERT(gld_device_list.gld_prev ==
438 	    (glddev_t *)&gld_device_list.gld_next);
439 	mutex_destroy(&gld_device_list.gld_devlock);
440 
441 	return (e);
442 }
443 
444 int
445 _info(struct modinfo *modinfop)
446 {
447 	return (mod_info(&modlinkage, modinfop));
448 }
449 
450 /*
451  * GLD service routines
452  */
453 
454 /* So this gld binary maybe can be forward compatible with future v2 drivers */
455 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
456 
457 /*ARGSUSED*/
458 gld_mac_info_t *
459 gld_mac_alloc(dev_info_t *devinfo)
460 {
461 	gld_mac_info_t *macinfo;
462 
463 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
464 	    KM_SLEEP);
465 
466 	/*
467 	 * The setting of gldm_driver_version will not be documented or allowed
468 	 * until a future release.
469 	 */
470 	macinfo->gldm_driver_version = GLD_VERSION_200;
471 
472 	/*
473 	 * GLD's version.  This also is undocumented for now, but will be
474 	 * available if needed in the future.
475 	 */
476 	macinfo->gldm_GLD_version = GLD_VERSION;
477 
478 	return (macinfo);
479 }
480 
481 /*
482  * gld_mac_free must be called after the driver has removed interrupts
483  * and completely stopped calling gld_recv() and gld_sched().  At that
484  * point the interrupt routine is guaranteed by the system to have been
485  * exited and the maclock is no longer needed.  Of course, it is
486  * expected (required) that (assuming gld_register() succeeded),
487  * gld_unregister() was called before gld_mac_free().
488  */
489 void
490 gld_mac_free(gld_mac_info_t *macinfo)
491 {
492 	ASSERT(macinfo);
493 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
494 
495 	/*
496 	 * Assert that if we made it through gld_register, then we must
497 	 * have unregistered.
498 	 */
499 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
500 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
501 
502 	GLDM_LOCK_DESTROY(macinfo);
503 
504 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
505 }
506 
507 /*
508  * gld_register -- called once per device instance (PPA)
509  *
510  * During its attach routine, a real device driver will register with GLD
511  * so that later opens and dl_attach_reqs will work.  The arguments are the
512  * devinfo pointer, the device name, and a macinfo structure describing the
513  * physical device instance.
514  */
515 int
516 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
517 {
518 	int mediatype;
519 	int major = ddi_name_to_major(devname), i;
520 	glddev_t *glddev;
521 	gld_mac_pvt_t *mac_pvt;
522 	char minordev[32];
523 	char pbuf[3*GLD_MAX_ADDRLEN];
524 	gld_interface_t *ifp;
525 
526 	ASSERT(devinfo != NULL);
527 	ASSERT(macinfo != NULL);
528 
529 	if (macinfo->gldm_driver_version != GLD_VERSION)
530 		return (DDI_FAILURE);
531 
532 	mediatype = macinfo->gldm_type;
533 
534 	/*
535 	 * Entry points should be ready for us.
536 	 * ioctl is optional.
537 	 * set_multicast and get_stats are optional in v0.
538 	 * intr is only required if you add an interrupt.
539 	 */
540 	ASSERT(macinfo->gldm_reset != NULL);
541 	ASSERT(macinfo->gldm_start != NULL);
542 	ASSERT(macinfo->gldm_stop != NULL);
543 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
544 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
545 	ASSERT(macinfo->gldm_send != NULL);
546 
547 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
548 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
549 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
550 	ASSERT(macinfo->gldm_vendor_addr != NULL);
551 	ASSERT(macinfo->gldm_ident != NULL);
552 
553 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
554 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
555 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
556 		return (DDI_FAILURE);
557 	}
558 
559 	/*
560 	 * GLD only functions properly with saplen == -2
561 	 */
562 	if (macinfo->gldm_saplen != -2) {
563 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
564 		    "not supported", devname, macinfo->gldm_saplen);
565 		return (DDI_FAILURE);
566 	}
567 
568 	/* see gld_rsrv() */
569 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
570 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
571 
572 	mutex_enter(&gld_device_list.gld_devlock);
573 	glddev = gld_devlookup(major);
574 
575 	/*
576 	 *  Allocate per-driver (major) data structure if necessary
577 	 */
578 	if (glddev == NULL) {
579 		/* first occurrence of this device name (major number) */
580 		glddev = GETSTRUCT(glddev_t, 1);
581 		if (glddev == NULL) {
582 			mutex_exit(&gld_device_list.gld_devlock);
583 			return (DDI_FAILURE);
584 		}
585 		(void) strncpy(glddev->gld_name, devname,
586 		    sizeof (glddev->gld_name) - 1);
587 		glddev->gld_major = major;
588 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
589 		glddev->gld_mac_next = glddev->gld_mac_prev =
590 			(gld_mac_info_t *)&glddev->gld_mac_next;
591 		glddev->gld_str_next = glddev->gld_str_prev =
592 			(gld_t *)&glddev->gld_str_next;
593 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
594 
595 		/* allow increase of number of supported multicast addrs */
596 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
597 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
598 
599 		/*
600 		 * Optionally restrict DLPI provider style
601 		 *
602 		 * -1 - don't create style 1 nodes
603 		 * -2 - don't create style 2 nodes
604 		 */
605 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
606 		    "gld-provider-styles", 0);
607 
608 		/* Stuff that's needed before any PPA gets attached */
609 		glddev->gld_type = macinfo->gldm_type;
610 		glddev->gld_minsdu = macinfo->gldm_minpkt;
611 		glddev->gld_saplen = macinfo->gldm_saplen;
612 		glddev->gld_addrlen = macinfo->gldm_addrlen;
613 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
614 		    KM_SLEEP);
615 		bcopy(macinfo->gldm_broadcast_addr,
616 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
617 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
618 		gldinsque(glddev, gld_device_list.gld_prev);
619 	}
620 	glddev->gld_ndevice++;
621 	/* Now glddev can't go away until we unregister this mac (or fail) */
622 	mutex_exit(&gld_device_list.gld_devlock);
623 
624 	/*
625 	 *  Per-instance initialization
626 	 */
627 
628 	/*
629 	 * Initialize per-mac structure that is private to GLD.
630 	 * Set up interface pointer. These are device class specific pointers
631 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
632 	 */
633 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
634 		if (mediatype != interfaces[i].mac_type)
635 			continue;
636 
637 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
638 		    KM_SLEEP);
639 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
640 		    &interfaces[i];
641 		break;
642 	}
643 
644 	if (ifp == NULL) {
645 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
646 		    "of type %d", devname, mediatype);
647 		goto failure;
648 	}
649 
650 	/*
651 	 * Driver can only register MTU within legal media range.
652 	 */
653 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
654 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
655 		    devname);
656 		goto failure;
657 	}
658 
659 	/*
660 	 * For now, only Infiniband drivers can use MDT. Do not add
661 	 * support for Ethernet, FDDI or TR.
662 	 */
663 	if (macinfo->gldm_mdt_pre != NULL) {
664 		if (mediatype != DL_IB) {
665 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
666 			    "driver of type %d", devname, mediatype);
667 			goto failure;
668 		}
669 
670 		/*
671 		 * Validate entry points.
672 		 */
673 		if ((macinfo->gldm_mdt_send == NULL) ||
674 		    (macinfo->gldm_mdt_post == NULL)) {
675 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
676 			    "%s driver of type %d", devname, mediatype);
677 			goto failure;
678 		}
679 		macinfo->gldm_options |= GLDOPT_MDT;
680 	}
681 
682 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
683 	mac_pvt->major_dev = glddev;
684 
685 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
686 	/*
687 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
688 	 * format or in wire format?  Also gldm_broadcast.  For now
689 	 * we are assuming canonical, but I'm not sure that makes the
690 	 * most sense for ease of driver implementation.
691 	 */
692 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
693 	    macinfo->gldm_addrlen);
694 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
695 
696 	/*
697 	 * The available set of notifications is those generatable by GLD
698 	 * itself, plus those corresponding to the capabilities of the MAC
699 	 * driver, intersected with those supported by gld_notify_ind() above.
700 	 */
701 	mac_pvt->notifications = gld_internal_notes;
702 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
703 		mac_pvt->notifications |= gld_linkstate_notes;
704 	mac_pvt->notifications &= gld_supported_notes;
705 
706 	GLDM_LOCK_INIT(macinfo);
707 
708 	ddi_set_driver_private(devinfo, macinfo);
709 
710 	/*
711 	 * Now atomically get a PPA and put ourselves on the mac list.
712 	 */
713 	mutex_enter(&glddev->gld_devlock);
714 
715 #ifdef DEBUG
716 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
717 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
718 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
719 		    macinfo->gldm_ppa);
720 #endif
721 
722 	/*
723 	 * Create style 2 node (gated by gld-provider-styles property).
724 	 *
725 	 * NOTE: When the CLONE_DEV flag is specified to
726 	 *	 ddi_create_minor_node() the minor number argument is
727 	 *	 immaterial. Opens of that node will go via the clone
728 	 *	 driver and gld_open() will always be passed a dev_t with
729 	 *	 minor of zero.
730 	 */
731 	if (glddev->gld_styles != -2) {
732 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
733 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
734 			mutex_exit(&glddev->gld_devlock);
735 			goto late_failure;
736 		}
737 	}
738 
739 	/*
740 	 * Create style 1 node (gated by gld-provider-styles property)
741 	 */
742 	if (glddev->gld_styles != -1) {
743 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
744 		    macinfo->gldm_ppa);
745 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
746 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
747 		    0) != DDI_SUCCESS) {
748 			mutex_exit(&glddev->gld_devlock);
749 			goto late_failure;
750 		}
751 	}
752 
753 	/* add ourselves to this major device's linked list of instances */
754 	gldinsque(macinfo, glddev->gld_mac_prev);
755 
756 	mutex_exit(&glddev->gld_devlock);
757 
758 	/*
759 	 * Unfortunately we need the ppa before we call gld_initstats();
760 	 * otherwise we would like to do this just above the mutex_enter
761 	 * above.  In which case we could have set MAC_READY inside the
762 	 * mutex and we wouldn't have needed to check it in open and
763 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
764 	 * inside the mutex because it might get taken in our kstat_update
765 	 * routine and cause a deadlock with kstat_chain_lock.
766 	 */
767 
768 	/* gld_initstats() calls (*ifp->init)() */
769 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
770 		mutex_enter(&glddev->gld_devlock);
771 		gldremque(macinfo);
772 		mutex_exit(&glddev->gld_devlock);
773 		goto late_failure;
774 	}
775 
776 	/*
777 	 * Need to indicate we are NOW ready to process interrupts;
778 	 * any interrupt before this is set is for someone else.
779 	 * This flag is also now used to tell open, et. al. that this
780 	 * mac is now fully ready and available for use.
781 	 */
782 	GLDM_LOCK(macinfo, RW_WRITER);
783 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
784 	GLDM_UNLOCK(macinfo);
785 
786 	/* log local ethernet address -- XXX not DDI compliant */
787 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
788 		(void) localetheraddr(
789 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
790 
791 	/* now put announcement into the message buffer */
792 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
793 	    glddev->gld_name,
794 	    macinfo->gldm_ppa, macinfo->gldm_ident,
795 	    mac_pvt->interfacep->mac_string,
796 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
797 	    macinfo->gldm_addrlen));
798 
799 	ddi_report_dev(devinfo);
800 	return (DDI_SUCCESS);
801 
802 late_failure:
803 	ddi_remove_minor_node(devinfo, NULL);
804 	GLDM_LOCK_DESTROY(macinfo);
805 	if (mac_pvt->curr_macaddr != NULL)
806 	    kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
807 	if (mac_pvt->statistics != NULL)
808 	    kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
809 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
810 	macinfo->gldm_mac_pvt = NULL;
811 
812 failure:
813 	mutex_enter(&gld_device_list.gld_devlock);
814 	glddev->gld_ndevice--;
815 	/*
816 	 * Note that just because this goes to zero here does not necessarily
817 	 * mean that we were the one who added the glddev above.  It's
818 	 * possible that the first mac unattached while were were in here
819 	 * failing to attach the second mac.  But we're now the last.
820 	 */
821 	if (glddev->gld_ndevice == 0) {
822 		/* There should be no macinfos left */
823 		ASSERT(glddev->gld_mac_next ==
824 		    (gld_mac_info_t *)&glddev->gld_mac_next);
825 		ASSERT(glddev->gld_mac_prev ==
826 		    (gld_mac_info_t *)&glddev->gld_mac_next);
827 
828 		/*
829 		 * There should be no DL_UNATTACHED streams: the system
830 		 * should not have detached the "first" devinfo which has
831 		 * all the open style 2 streams.
832 		 *
833 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
834 		 */
835 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
836 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
837 
838 		gldremque(glddev);
839 		mutex_destroy(&glddev->gld_devlock);
840 		if (glddev->gld_broadcast != NULL)
841 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
842 		kmem_free(glddev, sizeof (glddev_t));
843 	}
844 	mutex_exit(&gld_device_list.gld_devlock);
845 
846 	return (DDI_FAILURE);
847 }
848 
849 /*
850  * gld_unregister (macinfo)
851  * remove the macinfo structure from local structures
852  * this is cleanup for a driver to be unloaded
853  */
854 int
855 gld_unregister(gld_mac_info_t *macinfo)
856 {
857 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
858 	glddev_t *glddev = mac_pvt->major_dev;
859 	gld_interface_t *ifp;
860 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
861 
862 	mutex_enter(&glddev->gld_devlock);
863 	GLDM_LOCK(macinfo, RW_WRITER);
864 
865 	if (mac_pvt->nvlan > 0) {
866 		GLDM_UNLOCK(macinfo);
867 		mutex_exit(&glddev->gld_devlock);
868 		return (DDI_FAILURE);
869 	}
870 
871 #ifdef	GLD_DEBUG
872 	{
873 		int i;
874 
875 		for (i = 0; i < VLAN_HASHSZ; i++) {
876 			if ((mac_pvt->vlan_hash[i] != NULL))
877 				cmn_err(CE_PANIC,
878 				    "%s, line %d: "
879 				    "mac_pvt->vlan_hash[%d] != NULL",
880 				    __FILE__, __LINE__, i);
881 		}
882 	}
883 #endif
884 
885 	/* Delete this mac */
886 	gldremque(macinfo);
887 
888 	/* Disallow further entries to gld_recv() and gld_sched() */
889 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
890 
891 	GLDM_UNLOCK(macinfo);
892 	mutex_exit(&glddev->gld_devlock);
893 
894 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
895 	(*ifp->uninit)(macinfo);
896 
897 	ASSERT(mac_pvt->kstatp);
898 	kstat_delete(mac_pvt->kstatp);
899 
900 	ASSERT(GLDM_LOCK_INITED(macinfo));
901 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
902 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
903 
904 	if (mac_pvt->mcast_table != NULL)
905 		kmem_free(mac_pvt->mcast_table, multisize);
906 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
907 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
908 
909 	/* We now have one fewer instance for this major device */
910 	mutex_enter(&gld_device_list.gld_devlock);
911 	glddev->gld_ndevice--;
912 	if (glddev->gld_ndevice == 0) {
913 		/* There should be no macinfos left */
914 		ASSERT(glddev->gld_mac_next ==
915 		    (gld_mac_info_t *)&glddev->gld_mac_next);
916 		ASSERT(glddev->gld_mac_prev ==
917 		    (gld_mac_info_t *)&glddev->gld_mac_next);
918 
919 		/*
920 		 * There should be no DL_UNATTACHED streams: the system
921 		 * should not have detached the "first" devinfo which has
922 		 * all the open style 2 streams.
923 		 *
924 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
925 		 */
926 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
927 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
928 
929 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
930 		gldremque(glddev);
931 		mutex_destroy(&glddev->gld_devlock);
932 		if (glddev->gld_broadcast != NULL)
933 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
934 		kmem_free(glddev, sizeof (glddev_t));
935 	}
936 	mutex_exit(&gld_device_list.gld_devlock);
937 
938 	return (DDI_SUCCESS);
939 }
940 
941 /*
942  * gld_initstats
943  * called from gld_register
944  */
945 static int
946 gld_initstats(gld_mac_info_t *macinfo)
947 {
948 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
949 	struct gldkstats *sp;
950 	glddev_t *glddev;
951 	kstat_t *ksp;
952 	gld_interface_t *ifp;
953 
954 	glddev = mac_pvt->major_dev;
955 
956 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
957 	    NULL, "net", KSTAT_TYPE_NAMED,
958 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
959 		cmn_err(CE_WARN,
960 		    "GLD: failed to create kstat structure for %s%d",
961 		    glddev->gld_name, macinfo->gldm_ppa);
962 		return (GLD_FAILURE);
963 	}
964 	mac_pvt->kstatp = ksp;
965 
966 	ksp->ks_update = gld_update_kstat;
967 	ksp->ks_private = (void *)macinfo;
968 
969 	sp = ksp->ks_data;
970 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
971 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
972 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
973 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
974 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
975 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
976 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
977 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
978 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
979 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
984 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
985 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
986 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
987 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
988 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
990 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
991 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
992 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
993 
994 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
995 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
996 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
997 
998 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
999 	    KSTAT_DATA_UINT32);
1000 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1001 	    KSTAT_DATA_UINT32);
1002 
1003 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1004 
1005 	(*ifp->init)(macinfo);
1006 
1007 	kstat_install(ksp);
1008 
1009 	return (GLD_SUCCESS);
1010 }
1011 
1012 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1013 static int
1014 gld_update_kstat(kstat_t *ksp, int rw)
1015 {
1016 	gld_mac_info_t	*macinfo;
1017 	gld_mac_pvt_t	*mac_pvt;
1018 	struct gldkstats *gsp;
1019 	struct gld_stats *stats;
1020 
1021 	if (rw == KSTAT_WRITE)
1022 		return (EACCES);
1023 
1024 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1025 	ASSERT(macinfo != NULL);
1026 
1027 	GLDM_LOCK(macinfo, RW_WRITER);
1028 
1029 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1030 		GLDM_UNLOCK(macinfo);
1031 		return (EIO);	/* this one's not ready yet */
1032 	}
1033 
1034 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1035 		GLDM_UNLOCK(macinfo);
1036 		return (EIO);	/* this one's not ready any more */
1037 	}
1038 
1039 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1040 	gsp = mac_pvt->kstatp->ks_data;
1041 	ASSERT(gsp);
1042 	stats = mac_pvt->statistics;
1043 
1044 	if (macinfo->gldm_get_stats)
1045 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1046 
1047 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1048 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1049 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1050 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1051 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1052 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1053 
1054 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1055 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1056 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1057 
1058 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1059 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1060 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1061 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1062 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1063 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1064 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1065 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1066 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1067 	gsp->glds_missed.value.ul = stats->glds_missed;
1068 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1069 	    stats->glds_gldnorcvbuf;
1070 	gsp->glds_intr.value.ul = stats->glds_intr;
1071 
1072 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1073 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1074 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1075 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1076 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1077 
1078 	if (mac_pvt->nprom)
1079 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1080 	else if (mac_pvt->nprom_multi)
1081 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1082 	else
1083 		(void) strcpy(gsp->glds_prom.value.c, "off");
1084 
1085 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1086 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1087 	    ? stats->glds_media : 0]);
1088 
1089 	switch (macinfo->gldm_type) {
1090 	case DL_ETHER:
1091 		gsp->glds_frame.value.ul = stats->glds_frame;
1092 		gsp->glds_crc.value.ul = stats->glds_crc;
1093 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1094 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1095 		gsp->glds_defer.value.ul = stats->glds_defer;
1096 		gsp->glds_short.value.ul = stats->glds_short;
1097 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1098 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1099 		gsp->glds_dot3_first_coll.value.ui32 =
1100 		    stats->glds_dot3_first_coll;
1101 		gsp->glds_dot3_multi_coll.value.ui32 =
1102 		    stats->glds_dot3_multi_coll;
1103 		gsp->glds_dot3_sqe_error.value.ui32 =
1104 		    stats->glds_dot3_sqe_error;
1105 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1106 		    stats->glds_dot3_mac_xmt_error;
1107 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1108 		    stats->glds_dot3_mac_rcv_error;
1109 		gsp->glds_dot3_frame_too_long.value.ui32 =
1110 		    stats->glds_dot3_frame_too_long;
1111 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1112 		    stats->glds_duplex <
1113 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1114 		    stats->glds_duplex : 0]);
1115 		break;
1116 	case DL_TPR:
1117 		gsp->glds_dot5_line_error.value.ui32 =
1118 		    stats->glds_dot5_line_error;
1119 		gsp->glds_dot5_burst_error.value.ui32 =
1120 		    stats->glds_dot5_burst_error;
1121 		gsp->glds_dot5_signal_loss.value.ui32 =
1122 		    stats->glds_dot5_signal_loss;
1123 		gsp->glds_dot5_ace_error.value.ui32 =
1124 		    stats->glds_dot5_ace_error;
1125 		gsp->glds_dot5_internal_error.value.ui32 =
1126 		    stats->glds_dot5_internal_error;
1127 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1128 		    stats->glds_dot5_lost_frame_error;
1129 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1130 		    stats->glds_dot5_frame_copied_error;
1131 		gsp->glds_dot5_token_error.value.ui32 =
1132 		    stats->glds_dot5_token_error;
1133 		gsp->glds_dot5_freq_error.value.ui32 =
1134 		    stats->glds_dot5_freq_error;
1135 		break;
1136 	case DL_FDDI:
1137 		gsp->glds_fddi_mac_error.value.ui32 =
1138 		    stats->glds_fddi_mac_error;
1139 		gsp->glds_fddi_mac_lost.value.ui32 =
1140 		    stats->glds_fddi_mac_lost;
1141 		gsp->glds_fddi_mac_token.value.ui32 =
1142 		    stats->glds_fddi_mac_token;
1143 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1144 		    stats->glds_fddi_mac_tvx_expired;
1145 		gsp->glds_fddi_mac_late.value.ui32 =
1146 		    stats->glds_fddi_mac_late;
1147 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1148 		    stats->glds_fddi_mac_ring_op;
1149 		break;
1150 	case DL_IB:
1151 		break;
1152 	default:
1153 		break;
1154 	}
1155 
1156 	GLDM_UNLOCK(macinfo);
1157 
1158 #ifdef GLD_DEBUG
1159 	gld_check_assertions();
1160 	if (gld_debug & GLDRDE)
1161 		gld_sr_dump(macinfo);
1162 #endif
1163 
1164 	return (0);
1165 }
1166 
1167 static int
1168 gld_init_vlan_stats(gld_vlan_t *vlan)
1169 {
1170 	gld_mac_info_t *mac = vlan->gldv_mac;
1171 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1172 	struct gldkstats *sp;
1173 	glddev_t *glddev;
1174 	kstat_t *ksp;
1175 	char *name;
1176 	int instance;
1177 
1178 	glddev = mac_pvt->major_dev;
1179 	name = glddev->gld_name;
1180 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1181 
1182 	if ((ksp = kstat_create(name, instance,
1183 	    NULL, "net", KSTAT_TYPE_NAMED,
1184 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1185 		cmn_err(CE_WARN,
1186 		    "GLD: failed to create kstat structure for %s%d",
1187 		    name, instance);
1188 		return (GLD_FAILURE);
1189 	}
1190 
1191 	vlan->gldv_kstatp = ksp;
1192 
1193 	ksp->ks_update = gld_update_vlan_kstat;
1194 	ksp->ks_private = (void *)vlan;
1195 
1196 	sp = ksp->ks_data;
1197 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1198 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1199 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1200 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1201 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1202 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1203 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1204 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1205 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1206 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1211 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1212 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1213 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1214 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1215 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1217 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1218 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1219 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1220 
1221 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1222 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1223 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1224 
1225 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1226 	    KSTAT_DATA_UINT32);
1227 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1228 	    KSTAT_DATA_UINT32);
1229 
1230 	kstat_install(ksp);
1231 	return (GLD_SUCCESS);
1232 }
1233 
1234 static int
1235 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1236 {
1237 	gld_vlan_t	*vlan;
1238 	gld_mac_info_t	*macinfo;
1239 	struct gldkstats *gsp;
1240 	struct gld_stats *stats;
1241 
1242 	if (rw == KSTAT_WRITE)
1243 		return (EACCES);
1244 
1245 	vlan = (gld_vlan_t *)ksp->ks_private;
1246 	ASSERT(vlan != NULL);
1247 
1248 	macinfo = vlan->gldv_mac;
1249 	GLDM_LOCK(macinfo, RW_WRITER);
1250 
1251 	gsp = vlan->gldv_kstatp->ks_data;
1252 	ASSERT(gsp);
1253 	stats = vlan->gldv_stats;
1254 
1255 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1256 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1257 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1258 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1259 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1260 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1261 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1262 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1263 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1264 
1265 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1266 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1267 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1268 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1269 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1270 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1271 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1272 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1273 
1274 	GLDM_UNLOCK(macinfo);
1275 	return (0);
1276 }
1277 
1278 /*
1279  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1280  */
1281 /*ARGSUSED*/
1282 int
1283 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1284 {
1285 	dev_info_t	*devinfo;
1286 	minor_t		minor = getminor((dev_t)arg);
1287 	int		rc = DDI_FAILURE;
1288 
1289 	switch (cmd) {
1290 	case DDI_INFO_DEVT2DEVINFO:
1291 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1292 			*(dev_info_t **)resultp = devinfo;
1293 			rc = DDI_SUCCESS;
1294 		}
1295 		break;
1296 	case DDI_INFO_DEVT2INSTANCE:
1297 		/* Need static mapping for deferred attach */
1298 		if (minor == GLD_USE_STYLE2) {
1299 			/*
1300 			 * Style 2:  this minor number does not correspond to
1301 			 * any particular instance number.
1302 			 */
1303 			rc = DDI_FAILURE;
1304 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1305 			/* Style 1:  calculate the PPA from the minor */
1306 			*resultp = (void *)(uintptr_t)
1307 			    GLD_STYLE1_MINOR_TO_PPA(minor);
1308 			rc = DDI_SUCCESS;
1309 		} else {
1310 			/* Clone:  look for it.  Not a static mapping */
1311 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1312 				*resultp = (void *)(uintptr_t)
1313 				    ddi_get_instance(devinfo);
1314 				rc = DDI_SUCCESS;
1315 			}
1316 		}
1317 		break;
1318 	}
1319 
1320 	return (rc);
1321 }
1322 
1323 /* called from gld_getinfo */
1324 dev_info_t *
1325 gld_finddevinfo(dev_t dev)
1326 {
1327 	minor_t		minor = getminor(dev);
1328 	glddev_t	*device;
1329 	gld_mac_info_t	*mac;
1330 	gld_vlan_t	*vlan;
1331 	gld_t		*str;
1332 	dev_info_t	*devinfo = NULL;
1333 	int		i;
1334 
1335 	if (minor == GLD_USE_STYLE2) {
1336 		/*
1337 		 * Style 2:  this minor number does not correspond to
1338 		 * any particular instance number.
1339 		 *
1340 		 * XXX We don't know what to say.  See Bug 1165519.
1341 		 */
1342 		return (NULL);
1343 	}
1344 
1345 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1346 
1347 	device = gld_devlookup(getmajor(dev));
1348 	if (device == NULL) {
1349 		/* There are no attached instances of this device */
1350 		mutex_exit(&gld_device_list.gld_devlock);
1351 		return (NULL);
1352 	}
1353 
1354 	/*
1355 	 * Search all attached macs and streams.
1356 	 *
1357 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1358 	 * we don't know what devinfo we should report back even if we
1359 	 * found the minor.  Maybe we should associate streams that are
1360 	 * not currently attached to a PPA with the "first" devinfo node
1361 	 * of the major device to attach -- the one that created the
1362 	 * minor node for the generic device.
1363 	 */
1364 	mutex_enter(&device->gld_devlock);
1365 
1366 	for (mac = device->gld_mac_next;
1367 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1368 	    mac = mac->gldm_next) {
1369 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1370 
1371 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1372 			continue;	/* this one's not ready yet */
1373 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1374 			/* Style 1 -- look for the corresponding PPA */
1375 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1376 				devinfo = mac->gldm_devinfo;
1377 				goto out;	/* found it! */
1378 			} else
1379 				continue;	/* not this PPA */
1380 		}
1381 
1382 		/* We are looking for a clone */
1383 		for (i = 0; i < VLAN_HASHSZ; i++) {
1384 			for (vlan = pvt->vlan_hash[i];
1385 			    vlan != NULL; vlan = vlan->gldv_next) {
1386 				for (str = vlan->gldv_str_next;
1387 				    str != (gld_t *)&vlan->gldv_str_next;
1388 				    str = str->gld_next) {
1389 					ASSERT(str->gld_mac_info == mac);
1390 					if (minor == str->gld_minor) {
1391 						devinfo = mac->gldm_devinfo;
1392 						goto out;
1393 					}
1394 				}
1395 			}
1396 		}
1397 	}
1398 out:
1399 	mutex_exit(&device->gld_devlock);
1400 	mutex_exit(&gld_device_list.gld_devlock);
1401 	return (devinfo);
1402 }
1403 
1404 /*
1405  * STREAMS open routine.  The device dependent driver specifies this as its
1406  * open entry point.
1407  */
1408 /*ARGSUSED2*/
1409 int
1410 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1411 {
1412 	gld_mac_pvt_t *mac_pvt;
1413 	gld_t *gld;
1414 	glddev_t *glddev;
1415 	gld_mac_info_t *macinfo;
1416 	minor_t minor = getminor(*dev);
1417 	gld_vlan_t *vlan;
1418 	t_uscalar_t ppa;
1419 
1420 	ASSERT(q != NULL);
1421 
1422 	if (minor > GLD_MAX_STYLE1_MINOR)
1423 		return (ENXIO);
1424 
1425 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1426 
1427 	/* Find our per-major glddev_t structure */
1428 	mutex_enter(&gld_device_list.gld_devlock);
1429 	glddev = gld_devlookup(getmajor(*dev));
1430 
1431 	/*
1432 	 * This glddev will hang around since detach (and therefore
1433 	 * gld_unregister) can't run while we're here in the open routine.
1434 	 */
1435 	mutex_exit(&gld_device_list.gld_devlock);
1436 
1437 	if (glddev == NULL)
1438 		return (ENXIO);
1439 
1440 #ifdef GLD_DEBUG
1441 	if (gld_debug & GLDPROT) {
1442 		if (minor == GLD_USE_STYLE2)
1443 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1444 		else
1445 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1446 			    (void *)q, minor);
1447 	}
1448 #endif
1449 
1450 	/*
1451 	 * get a per-stream structure and link things together so we
1452 	 * can easily find them later.
1453 	 */
1454 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1455 
1456 	/*
1457 	 * fill in the structure and state info
1458 	 */
1459 	gld->gld_qptr = q;
1460 	gld->gld_device = glddev;
1461 	gld->gld_state = DL_UNATTACHED;
1462 
1463 	/*
1464 	 * we must atomically find a free minor number and add the stream
1465 	 * to a list, because gld_findminor has to traverse the lists to
1466 	 * determine which minor numbers are free.
1467 	 */
1468 	mutex_enter(&glddev->gld_devlock);
1469 
1470 	/* find a free minor device number for the clone */
1471 	gld->gld_minor = gld_findminor(glddev);
1472 	if (gld->gld_minor == 0) {
1473 		mutex_exit(&glddev->gld_devlock);
1474 		kmem_free(gld, sizeof (gld_t));
1475 		return (ENOSR);
1476 	}
1477 
1478 #ifdef GLD_VERBOSE_DEBUG
1479 	if (gld_debug & GLDPROT)
1480 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1481 		    (void *)gld, gld->gld_minor);
1482 #endif
1483 
1484 	if (minor == GLD_USE_STYLE2) {
1485 		gld->gld_style = DL_STYLE2;
1486 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1487 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1488 		gldinsque(gld, glddev->gld_str_prev);
1489 #ifdef GLD_VERBOSE_DEBUG
1490 		if (gld_debug & GLDPROT)
1491 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1492 #endif
1493 		(void) qassociate(q, -1);
1494 		goto done;
1495 	}
1496 
1497 	gld->gld_style = DL_STYLE1;
1498 
1499 	/* the PPA is actually 1 less than the minordev */
1500 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1501 
1502 	for (macinfo = glddev->gld_mac_next;
1503 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1504 	    macinfo = macinfo->gldm_next) {
1505 		ASSERT(macinfo != NULL);
1506 		if (macinfo->gldm_ppa != ppa)
1507 			continue;
1508 
1509 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1510 			continue;	/* this one's not ready yet */
1511 
1512 		/*
1513 		 * we found the correct PPA
1514 		 */
1515 		GLDM_LOCK(macinfo, RW_WRITER);
1516 
1517 		gld->gld_mac_info = macinfo;
1518 
1519 		if (macinfo->gldm_send_tagged != NULL)
1520 			gld->gld_send = macinfo->gldm_send_tagged;
1521 		else
1522 			gld->gld_send = macinfo->gldm_send;
1523 
1524 		/* now ready for action */
1525 		gld->gld_state = DL_UNBOUND;
1526 
1527 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1528 			GLDM_UNLOCK(macinfo);
1529 			mutex_exit(&glddev->gld_devlock);
1530 			kmem_free(gld, sizeof (gld_t));
1531 			return (EIO);
1532 		}
1533 
1534 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1535 		if (!mac_pvt->started) {
1536 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1537 				gld_rem_vlan(vlan);
1538 				GLDM_UNLOCK(macinfo);
1539 				mutex_exit(&glddev->gld_devlock);
1540 				kmem_free(gld, sizeof (gld_t));
1541 				return (EIO);
1542 			}
1543 		}
1544 
1545 		gld->gld_vlan = vlan;
1546 		vlan->gldv_nstreams++;
1547 		gldinsque(gld, vlan->gldv_str_prev);
1548 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1549 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1550 
1551 		GLDM_UNLOCK(macinfo);
1552 #ifdef GLD_VERBOSE_DEBUG
1553 		if (gld_debug & GLDPROT)
1554 			cmn_err(CE_NOTE,
1555 			    "GLDstruct added to instance list");
1556 #endif
1557 		break;
1558 	}
1559 
1560 	if (gld->gld_state == DL_UNATTACHED) {
1561 		mutex_exit(&glddev->gld_devlock);
1562 		kmem_free(gld, sizeof (gld_t));
1563 		return (ENXIO);
1564 	}
1565 
1566 done:
1567 	mutex_exit(&glddev->gld_devlock);
1568 	noenable(WR(q));	/* We'll do the qenables manually */
1569 	qprocson(q);		/* start the queues running */
1570 	qenable(WR(q));
1571 	return (0);
1572 }
1573 
1574 /*
1575  * normal stream close call checks current status and cleans up
1576  * data structures that were dynamically allocated
1577  */
1578 /*ARGSUSED1*/
1579 int
1580 gld_close(queue_t *q, int flag, cred_t *cred)
1581 {
1582 	gld_t	*gld = (gld_t *)q->q_ptr;
1583 	glddev_t *glddev = gld->gld_device;
1584 
1585 	ASSERT(q);
1586 	ASSERT(gld);
1587 
1588 #ifdef GLD_DEBUG
1589 	if (gld_debug & GLDPROT) {
1590 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1591 		    (void *)q, (gld->gld_style & 0x1) + 1);
1592 	}
1593 #endif
1594 
1595 	/* Hold all device streams lists still while we check for a macinfo */
1596 	mutex_enter(&glddev->gld_devlock);
1597 
1598 	if (gld->gld_mac_info != NULL) {
1599 		/* If there's a macinfo, block recv while we change state */
1600 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1601 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1602 		GLDM_UNLOCK(gld->gld_mac_info);
1603 	} else {
1604 		/* no mac DL_ATTACHED right now */
1605 		gld->gld_flags |= GLD_STR_CLOSING;
1606 	}
1607 
1608 	mutex_exit(&glddev->gld_devlock);
1609 
1610 	/*
1611 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1612 	 * we know wsrv isn't in there trying to undo what we're doing.
1613 	 */
1614 	qprocsoff(q);
1615 
1616 	ASSERT(gld->gld_wput_count == 0);
1617 	gld->gld_wput_count = 0;	/* just in case */
1618 
1619 	if (gld->gld_state == DL_IDLE) {
1620 		/* Need to unbind */
1621 		ASSERT(gld->gld_mac_info != NULL);
1622 		(void) gld_unbind(WR(q), NULL);
1623 	}
1624 
1625 	if (gld->gld_state == DL_UNBOUND) {
1626 		/*
1627 		 * Need to unattach
1628 		 * For style 2 stream, gldunattach also
1629 		 * associate queue with NULL dip
1630 		 */
1631 		ASSERT(gld->gld_mac_info != NULL);
1632 		(void) gldunattach(WR(q), NULL);
1633 	}
1634 
1635 	/* disassociate the stream from the device */
1636 	q->q_ptr = WR(q)->q_ptr = NULL;
1637 
1638 	/*
1639 	 * Since we unattached above (if necessary), we know that we're
1640 	 * on the per-major list of unattached streams, rather than a
1641 	 * per-PPA list.  So we know we should hold the devlock.
1642 	 */
1643 	mutex_enter(&glddev->gld_devlock);
1644 	gldremque(gld);			/* remove from Style 2 list */
1645 	mutex_exit(&glddev->gld_devlock);
1646 
1647 	kmem_free(gld, sizeof (gld_t));
1648 
1649 	return (0);
1650 }
1651 
1652 /*
1653  * gld_rsrv (q)
1654  *	simple read service procedure
1655  *	purpose is to avoid the time it takes for packets
1656  *	to move through IP so we can get them off the board
1657  *	as fast as possible due to limited PC resources.
1658  *
1659  *	This is not normally used in the current implementation.  It
1660  *	can be selected with the undocumented property "fast_recv".
1661  *	If that property is set, gld_recv will send the packet
1662  *	upstream with a putq() rather than a putnext(), thus causing
1663  *	this routine to be scheduled.
1664  */
1665 int
1666 gld_rsrv(queue_t *q)
1667 {
1668 	mblk_t *mp;
1669 
1670 	while ((mp = getq(q)) != NULL) {
1671 		if (canputnext(q)) {
1672 			putnext(q, mp);
1673 		} else {
1674 			freemsg(mp);
1675 		}
1676 	}
1677 	return (0);
1678 }
1679 
1680 /*
1681  * gld_wput (q, mp)
1682  * general gld stream write put routine. Receives fastpath data from upper
1683  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1684  * queued for later processing by the service procedure.
1685  */
1686 
1687 int
1688 gld_wput(queue_t *q, mblk_t *mp)
1689 {
1690 	gld_t  *gld = (gld_t *)(q->q_ptr);
1691 	int	rc;
1692 	boolean_t multidata = B_TRUE;
1693 
1694 #ifdef GLD_DEBUG
1695 	if (gld_debug & GLDTRACE)
1696 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1697 		    (void *)q, (void *)mp, DB_TYPE(mp));
1698 #endif
1699 	switch (DB_TYPE(mp)) {
1700 
1701 	case M_DATA:
1702 		/* fast data / raw support */
1703 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1704 		/* Tricky to access memory without taking the mutex */
1705 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1706 		    gld->gld_state != DL_IDLE) {
1707 			merror(q, mp, EPROTO);
1708 			break;
1709 		}
1710 		multidata = B_FALSE;
1711 		/* LINTED: E_CASE_FALLTHRU */
1712 	case M_MULTIDATA:
1713 		/* Only call gld_start() directly if nothing queued ahead */
1714 		/* No guarantees about ordering with different threads */
1715 		if (q->q_first)
1716 			goto use_wsrv;
1717 
1718 		/*
1719 		 * This can happen if wsrv has taken off the last mblk but
1720 		 * is still processing it.
1721 		 */
1722 		membar_consumer();
1723 		if (gld->gld_in_wsrv)
1724 			goto use_wsrv;
1725 
1726 		/*
1727 		 * Keep a count of current wput calls to start.
1728 		 * Nonzero count delays any attempted DL_UNBIND.
1729 		 * See comments above gld_start().
1730 		 */
1731 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1732 		membar_enter();
1733 
1734 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1735 		/* If this Q is in process of DL_UNBIND, don't call start */
1736 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1737 			/* Extremely unlikely */
1738 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1739 			goto use_wsrv;
1740 		}
1741 
1742 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1743 		    gld_start(q, mp, GLD_WPUT, UPRI(gld, mp->b_band));
1744 
1745 		/* Allow DL_UNBIND again */
1746 		membar_exit();
1747 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1748 
1749 		if (rc == GLD_NORESOURCES)
1750 			qenable(q);
1751 		break;	/*  Done with this packet */
1752 
1753 use_wsrv:
1754 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1755 		(void) putq(q, mp);
1756 		qenable(q);
1757 		break;
1758 
1759 	case M_IOCTL:
1760 		/* ioctl relies on wsrv single threading per queue */
1761 		(void) putq(q, mp);
1762 		qenable(q);
1763 		break;
1764 
1765 	case M_CTL:
1766 		(void) putq(q, mp);
1767 		qenable(q);
1768 		break;
1769 
1770 	case M_FLUSH:		/* canonical flush handling */
1771 		/* XXX Should these be FLUSHALL? */
1772 		if (*mp->b_rptr & FLUSHW)
1773 			flushq(q, 0);
1774 		if (*mp->b_rptr & FLUSHR) {
1775 			flushq(RD(q), 0);
1776 			*mp->b_rptr &= ~FLUSHW;
1777 			qreply(q, mp);
1778 		} else
1779 			freemsg(mp);
1780 		break;
1781 
1782 	case M_PROTO:
1783 	case M_PCPROTO:
1784 		/* these rely on wsrv single threading per queue */
1785 		(void) putq(q, mp);
1786 		qenable(q);
1787 		break;
1788 
1789 	default:
1790 #ifdef GLD_DEBUG
1791 		if (gld_debug & GLDETRACE)
1792 			cmn_err(CE_WARN,
1793 			    "gld: Unexpected packet type from queue: 0x%x",
1794 			    DB_TYPE(mp));
1795 #endif
1796 		freemsg(mp);
1797 	}
1798 	return (0);
1799 }
1800 
1801 /*
1802  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1803  * specification.
1804  *
1805  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1806  * lock for reading data items that are only ever written by us.
1807  */
1808 
1809 int
1810 gld_wsrv(queue_t *q)
1811 {
1812 	mblk_t *mp;
1813 	gld_t *gld = (gld_t *)q->q_ptr;
1814 	gld_mac_info_t *macinfo;
1815 	union DL_primitives *prim;
1816 	int err;
1817 	boolean_t multidata;
1818 
1819 #ifdef GLD_DEBUG
1820 	if (gld_debug & GLDTRACE)
1821 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1822 #endif
1823 
1824 	ASSERT(!gld->gld_in_wsrv);
1825 
1826 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1827 
1828 	if (q->q_first == NULL)
1829 		return (0);
1830 
1831 	macinfo = gld->gld_mac_info;
1832 
1833 	/*
1834 	 * Help wput avoid a call to gld_start if there might be a message
1835 	 * previously queued by that thread being processed here.
1836 	 */
1837 	gld->gld_in_wsrv = B_TRUE;
1838 	membar_enter();
1839 
1840 	while ((mp = getq(q)) != NULL) {
1841 		switch (DB_TYPE(mp)) {
1842 		case M_DATA:
1843 		case M_MULTIDATA:
1844 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1845 
1846 			/*
1847 			 * retry of a previously processed UNITDATA_REQ
1848 			 * or is a RAW or FAST message from above.
1849 			 */
1850 			if (macinfo == NULL) {
1851 				/* No longer attached to a PPA, drop packet */
1852 				freemsg(mp);
1853 				break;
1854 			}
1855 
1856 			gld->gld_sched_ran = B_FALSE;
1857 			membar_enter();
1858 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1859 			    gld_start(q, mp, GLD_WSRV, UPRI(gld, mp->b_band));
1860 			if (err == GLD_NORESOURCES) {
1861 				/* gld_sched will qenable us later */
1862 				gld->gld_xwait = B_TRUE; /* want qenable */
1863 				membar_enter();
1864 				/*
1865 				 * v2:  we're not holding the lock; it's
1866 				 * possible that the driver could have already
1867 				 * called gld_sched (following up on its
1868 				 * return of GLD_NORESOURCES), before we got a
1869 				 * chance to do the putbq() and set gld_xwait.
1870 				 * So if we saw a call to gld_sched that
1871 				 * examined this queue, since our call to
1872 				 * gld_start() above, then it's possible we've
1873 				 * already seen the only call to gld_sched()
1874 				 * we're ever going to see.  So we better retry
1875 				 * transmitting this packet right now.
1876 				 */
1877 				if (gld->gld_sched_ran) {
1878 #ifdef GLD_DEBUG
1879 					if (gld_debug & GLDTRACE)
1880 						cmn_err(CE_NOTE, "gld_wsrv: "
1881 						    "sched was called");
1882 #endif
1883 					break;	/* try again right now */
1884 				}
1885 				gld->gld_in_wsrv = B_FALSE;
1886 				return (0);
1887 			}
1888 			break;
1889 
1890 		case M_IOCTL:
1891 			(void) gld_ioctl(q, mp);
1892 			break;
1893 
1894 		case M_CTL:
1895 			if (macinfo == NULL) {
1896 				freemsg(mp);
1897 				break;
1898 			}
1899 
1900 			if (macinfo->gldm_mctl != NULL) {
1901 				GLDM_LOCK(macinfo, RW_WRITER);
1902 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1903 				GLDM_UNLOCK(macinfo);
1904 			} else {
1905 				/* This driver doesn't recognize, just drop */
1906 				freemsg(mp);
1907 			}
1908 			break;
1909 
1910 		case M_PROTO:	/* Will be an DLPI message of some type */
1911 		case M_PCPROTO:
1912 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1913 				if (err == GLDE_RETRY) {
1914 					gld->gld_in_wsrv = B_FALSE;
1915 					return (0); /* quit while we're ahead */
1916 				}
1917 				prim = (union DL_primitives *)mp->b_rptr;
1918 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1919 			}
1920 			break;
1921 
1922 		default:
1923 			/* This should never happen */
1924 #ifdef GLD_DEBUG
1925 			if (gld_debug & GLDERRS)
1926 				cmn_err(CE_WARN,
1927 				    "gld_wsrv: db_type(%x) not supported",
1928 				    mp->b_datap->db_type);
1929 #endif
1930 			freemsg(mp);	/* unknown types are discarded */
1931 			break;
1932 		}
1933 	}
1934 
1935 	membar_exit();
1936 	gld->gld_in_wsrv = B_FALSE;
1937 	return (0);
1938 }
1939 
1940 /*
1941  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1942  *
1943  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1944  *
1945  * In particular, we must avoid calling gld_precv*() if we came from wput().
1946  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1947  * packets to the receive side if we are in physical promiscuous mode.
1948  * Since the receive side holds a lock across its call to the upstream
1949  * putnext, and that upstream module could well have looped back to our
1950  * wput() routine on the same thread, we cannot call gld_precv* from here
1951  * for fear of causing a recursive lock entry in our receive code.
1952  *
1953  * There is a problem here when coming from gld_wput().  While wput
1954  * only comes here if the queue is attached to a PPA and bound to a SAP
1955  * and there are no messages on the queue ahead of the M_DATA that could
1956  * change that, it is theoretically possible that another thread could
1957  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1958  * could wake up and process them, before we finish processing this
1959  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1960  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1961  * and Style 1 streams only DL_DETACH in the close routine, where
1962  * qprocsoff() protects us.  If this happens we could end up calling
1963  * gldm_send() after we have detached the stream and possibly called
1964  * gldm_stop().  Worse, once the number of attached streams goes to zero,
1965  * detach/unregister could be called, and the macinfo could go away entirely.
1966  *
1967  * No one has ever seen this happen.
1968  *
1969  * It is some trouble to fix this, and we would rather not add any mutex
1970  * logic into the wput() routine, which is supposed to be a "fast"
1971  * path.
1972  *
1973  * What I've done is use an atomic counter to keep a count of the number
1974  * of threads currently calling gld_start() from wput() on this stream.
1975  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
1976  * the queue and qenables, hoping to have better luck next time.  Since
1977  * people shouldn't be trying to send after they've asked to DL_DETACH,
1978  * hopefully very soon all the wput=>start threads should have returned
1979  * and the DL_DETACH will succeed.  It's hard to test this since the odds
1980  * of the failure even trying to happen are so small.  I probably could
1981  * have ignored the whole issue and never been the worse for it.
1982  */
1983 static int
1984 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
1985 {
1986 	mblk_t *nmp;
1987 	gld_t *gld = (gld_t *)q->q_ptr;
1988 	gld_mac_info_t *macinfo;
1989 	gld_mac_pvt_t *mac_pvt;
1990 	int rc;
1991 	gld_interface_t *ifp;
1992 	pktinfo_t pktinfo;
1993 	uint32_t vtag;
1994 	gld_vlan_t *vlan;
1995 
1996 	ASSERT(DB_TYPE(mp) == M_DATA);
1997 	macinfo = gld->gld_mac_info;
1998 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1999 	ifp = mac_pvt->interfacep;
2000 	vlan = (gld_vlan_t *)gld->gld_vlan;
2001 
2002 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2003 		freemsg(mp);
2004 #ifdef GLD_DEBUG
2005 		if (gld_debug & GLDERRS)
2006 			cmn_err(CE_WARN,
2007 			    "gld_start: failed to interpret outbound packet");
2008 #endif
2009 		vlan->gldv_stats->glds_xmtbadinterp++;
2010 		return (GLD_BADARG);
2011 	}
2012 
2013 	/*
2014 	 * We're not holding the lock for this check.  If the promiscuous
2015 	 * state is in flux it doesn't matter much if we get this wrong.
2016 	 */
2017 	if (mac_pvt->nprom > 0) {
2018 		/*
2019 		 * We want to loopback to the receive side, but to avoid
2020 		 * recursive lock entry:  if we came from wput(), which
2021 		 * could have looped back via IP from our own receive
2022 		 * interrupt thread, we decline this request.  wput()
2023 		 * will then queue the packet for wsrv().  This means
2024 		 * that when snoop is running we don't get the advantage
2025 		 * of the wput() multithreaded direct entry to the
2026 		 * driver's send routine.
2027 		 */
2028 		if (caller == GLD_WPUT) {
2029 			(void) putbq(q, mp);
2030 			return (GLD_NORESOURCES);
2031 		}
2032 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2033 			nmp = dupmsg_noloan(mp);
2034 		else
2035 			nmp = dupmsg(mp);
2036 	} else
2037 		nmp = NULL;		/* we need no loopback */
2038 
2039 	vtag = GLD_MK_VTAG(vlan->gldv_ptag, upri);
2040 	if (ifp->hdr_size > 0 &&
2041 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2042 	    macinfo->gldm_maxpkt) {
2043 		freemsg(mp);	/* discard oversized outbound packet */
2044 		if (nmp)
2045 			freemsg(nmp);	/* free the duped message */
2046 #ifdef GLD_DEBUG
2047 		if (gld_debug & GLDERRS)
2048 			cmn_err(CE_WARN,
2049 			    "gld_start: oversize outbound packet, size %d,"
2050 			    "max %d", pktinfo.pktLen,
2051 			    ifp->hdr_size + macinfo->gldm_maxpkt);
2052 #endif
2053 		vlan->gldv_stats->glds_xmtbadinterp++;
2054 		return (GLD_BADARG);
2055 	}
2056 
2057 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2058 
2059 	if (rc != GLD_SUCCESS) {
2060 		if (rc == GLD_NORESOURCES) {
2061 			vlan->gldv_stats->glds_xmtretry++;
2062 			(void) putbq(q, mp);
2063 		} else {
2064 			/* transmit error; drop the packet */
2065 			freemsg(mp);
2066 			/* We're supposed to count failed attempts as well */
2067 			UPDATE_STATS(vlan, pktinfo, 1);
2068 #ifdef GLD_DEBUG
2069 			if (gld_debug & GLDERRS)
2070 				cmn_err(CE_WARN,
2071 				    "gld_start: gldm_send failed %d", rc);
2072 #endif
2073 		}
2074 		if (nmp)
2075 			freemsg(nmp);	/* free the dupped message */
2076 		return (rc);
2077 	}
2078 
2079 	UPDATE_STATS(vlan, pktinfo, 1);
2080 
2081 	/*
2082 	 * Loopback case. The message needs to be returned back on
2083 	 * the read side. This would silently fail if the dumpmsg fails
2084 	 * above. This is probably OK, if there is no memory to dup the
2085 	 * block, then there isn't much we could do anyway.
2086 	 */
2087 	if (nmp) {
2088 		GLDM_LOCK(macinfo, RW_WRITER);
2089 		gld_precv(macinfo, vlan, nmp);
2090 		GLDM_UNLOCK(macinfo);
2091 	}
2092 
2093 	return (GLD_SUCCESS);
2094 }
2095 
2096 /*
2097  * With MDT V.2 a single message mp can have one header area and multiple
2098  * payload areas. A packet is described by dl_pkt_info, and each packet can
2099  * span multiple payload areas (currently with TCP, each packet will have one
2100  * header and at the most two payload areas). MACs might have a limit on the
2101  * number of payload segments (i.e. per packet scatter-gather limit), and
2102  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2103  * might also have a limit on the total number of payloads in a message, and
2104  * that is specified by mdt_max_pld.
2105  */
2106 static int
2107 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2108 {
2109 	mblk_t *nextmp;
2110 	gld_t *gld = (gld_t *)q->q_ptr;
2111 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2112 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2113 	int numpacks, mdtpacks;
2114 	gld_interface_t *ifp = mac_pvt->interfacep;
2115 	pktinfo_t pktinfo;
2116 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2117 	boolean_t doloop = B_FALSE;
2118 	multidata_t *dlmdp;
2119 	pdescinfo_t pinfo;
2120 	pdesc_t *dl_pkt;
2121 	void *cookie;
2122 	uint_t totLen = 0;
2123 
2124 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2125 
2126 	/*
2127 	 * We're not holding the lock for this check.  If the promiscuous
2128 	 * state is in flux it doesn't matter much if we get this wrong.
2129 	 */
2130 	if (mac_pvt->nprom > 0) {
2131 		/*
2132 		 * We want to loopback to the receive side, but to avoid
2133 		 * recursive lock entry:  if we came from wput(), which
2134 		 * could have looped back via IP from our own receive
2135 		 * interrupt thread, we decline this request.  wput()
2136 		 * will then queue the packet for wsrv().  This means
2137 		 * that when snoop is running we don't get the advantage
2138 		 * of the wput() multithreaded direct entry to the
2139 		 * driver's send routine.
2140 		 */
2141 		if (caller == GLD_WPUT) {
2142 			(void) putbq(q, mp);
2143 			return (GLD_NORESOURCES);
2144 		}
2145 		doloop = B_TRUE;
2146 
2147 		/*
2148 		 * unlike the M_DATA case, we don't have to call
2149 		 * dupmsg_noloan here because mmd_transform
2150 		 * (called by gld_precv_mdt) will make a copy of
2151 		 * each dblk.
2152 		 */
2153 	}
2154 
2155 	while (mp != NULL) {
2156 		/*
2157 		 * The lower layer driver only gets a single multidata
2158 		 * message; this also makes it easier to handle noresources.
2159 		 */
2160 		nextmp = mp->b_cont;
2161 		mp->b_cont = NULL;
2162 
2163 		/*
2164 		 * Get number of packets in this message; if nothing
2165 		 * to transmit, go to next message.
2166 		 */
2167 		dlmdp = mmd_getmultidata(mp);
2168 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2169 			freemsg(mp);
2170 			mp = nextmp;
2171 			continue;
2172 		}
2173 
2174 		/*
2175 		 * Run interpreter to populate media specific pktinfo fields.
2176 		 * This collects per MDT message information like sap,
2177 		 * broad/multicast etc.
2178 		 */
2179 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2180 		    GLD_MDT_TX);
2181 
2182 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2183 
2184 		if (numpacks > 0) {
2185 			/*
2186 			 * Driver indicates it can transmit at least 1, and
2187 			 * possibly all, packets in MDT message.
2188 			 */
2189 			int count = numpacks;
2190 
2191 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2192 			    (dl_pkt != NULL);
2193 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2194 				/*
2195 				 * Format this packet by adding link header and
2196 				 * adjusting pdescinfo to include it; get
2197 				 * packet length.
2198 				 */
2199 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2200 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2201 
2202 				totLen += pktinfo.pktLen;
2203 
2204 				/*
2205 				 * Loop back packet before handing to the
2206 				 * driver.
2207 				 */
2208 				if (doloop &&
2209 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2210 					GLDM_LOCK(macinfo, RW_WRITER);
2211 					gld_precv_mdt(macinfo, vlan, mp,
2212 					    dl_pkt, &pktinfo);
2213 					GLDM_UNLOCK(macinfo);
2214 				}
2215 
2216 				/*
2217 				 * And send off to driver.
2218 				 */
2219 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2220 				    &pinfo);
2221 
2222 				/*
2223 				 * Be careful not to invoke getnextpdesc if we
2224 				 * already sent the last packet, since driver
2225 				 * might have posted it to hardware causing a
2226 				 * completion and freemsg() so the MDT data
2227 				 * structures might not be valid anymore.
2228 				 */
2229 				if (--count == 0)
2230 					break;
2231 			}
2232 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2233 			pktinfo.pktLen = totLen;
2234 			UPDATE_STATS(vlan, pktinfo, numpacks);
2235 
2236 			/*
2237 			 * In the noresources case (when driver indicates it
2238 			 * can not transmit all packets in the MDT message),
2239 			 * adjust to skip the first few packets on retrial.
2240 			 */
2241 			if (numpacks != mdtpacks) {
2242 				/*
2243 				 * Release already processed packet descriptors.
2244 				 */
2245 				for (count = 0; count < numpacks; count++) {
2246 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2247 					    &pinfo);
2248 					mmd_rempdesc(dl_pkt);
2249 				}
2250 				vlan->gldv_stats->glds_xmtretry++;
2251 				mp->b_cont = nextmp;
2252 				(void) putbq(q, mp);
2253 				return (GLD_NORESOURCES);
2254 			}
2255 		} else if (numpacks == 0) {
2256 			/*
2257 			 * Driver indicates it can not transmit any packets
2258 			 * currently and will request retrial later.
2259 			 */
2260 			vlan->gldv_stats->glds_xmtretry++;
2261 			mp->b_cont = nextmp;
2262 			(void) putbq(q, mp);
2263 			return (GLD_NORESOURCES);
2264 		} else {
2265 			ASSERT(numpacks == -1);
2266 			/*
2267 			 * We're supposed to count failed attempts as well.
2268 			 */
2269 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2270 			while (dl_pkt != NULL) {
2271 				/*
2272 				 * Call interpreter to determine total packet
2273 				 * bytes that are being dropped.
2274 				 */
2275 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2276 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2277 
2278 				totLen += pktinfo.pktLen;
2279 
2280 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2281 			}
2282 			pktinfo.pktLen = totLen;
2283 			UPDATE_STATS(vlan, pktinfo, mdtpacks);
2284 
2285 			/*
2286 			 * Transmit error; drop the message, move on
2287 			 * to the next one.
2288 			 */
2289 			freemsg(mp);
2290 		}
2291 
2292 		/*
2293 		 * Process the next multidata block, if there is one.
2294 		 */
2295 		mp = nextmp;
2296 	}
2297 
2298 	return (GLD_SUCCESS);
2299 }
2300 
2301 /*
2302  * gld_intr (macinfo)
2303  */
2304 uint_t
2305 gld_intr(gld_mac_info_t *macinfo)
2306 {
2307 	ASSERT(macinfo != NULL);
2308 
2309 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2310 		return (DDI_INTR_UNCLAIMED);
2311 
2312 	return ((*macinfo->gldm_intr)(macinfo));
2313 }
2314 
2315 /*
2316  * gld_sched (macinfo)
2317  *
2318  * This routine scans the streams that refer to a specific macinfo
2319  * structure and causes the STREAMS scheduler to try to run them if
2320  * they are marked as waiting for the transmit buffer.
2321  */
2322 void
2323 gld_sched(gld_mac_info_t *macinfo)
2324 {
2325 	gld_mac_pvt_t *mac_pvt;
2326 	gld_t *gld;
2327 	gld_vlan_t *vlan;
2328 	int i;
2329 
2330 	ASSERT(macinfo != NULL);
2331 
2332 	GLDM_LOCK(macinfo, RW_WRITER);
2333 
2334 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2335 		/* We're probably being called from a leftover interrupt */
2336 		GLDM_UNLOCK(macinfo);
2337 		return;
2338 	}
2339 
2340 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2341 
2342 	for (i = 0; i < VLAN_HASHSZ; i++) {
2343 		for (vlan = mac_pvt->vlan_hash[i];
2344 		    vlan != NULL; vlan = vlan->gldv_next) {
2345 			for (gld = vlan->gldv_str_next;
2346 			    gld != (gld_t *)&vlan->gldv_str_next;
2347 			    gld = gld->gld_next) {
2348 				ASSERT(gld->gld_mac_info == macinfo);
2349 				gld->gld_sched_ran = B_TRUE;
2350 				membar_enter();
2351 				if (gld->gld_xwait) {
2352 					gld->gld_xwait = B_FALSE;
2353 					qenable(WR(gld->gld_qptr));
2354 				}
2355 			}
2356 		}
2357 	}
2358 
2359 	GLDM_UNLOCK(macinfo);
2360 }
2361 
2362 /*
2363  * gld_precv (macinfo, mp)
2364  * called from gld_start to loopback a packet when in promiscuous mode
2365  */
2366 static void
2367 gld_precv(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp)
2368 {
2369 	gld_mac_pvt_t *mac_pvt;
2370 	gld_interface_t *ifp;
2371 	pktinfo_t pktinfo;
2372 
2373 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2374 
2375 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2376 	ifp = mac_pvt->interfacep;
2377 
2378 	/*
2379 	 * call the media specific packet interpreter routine
2380 	 */
2381 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2382 		freemsg(mp);
2383 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2384 #ifdef GLD_DEBUG
2385 		if (gld_debug & GLDERRS)
2386 			cmn_err(CE_WARN,
2387 			    "gld_precv: interpreter failed");
2388 #endif
2389 		return;
2390 	}
2391 
2392 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_paccept);
2393 }
2394 
2395 /*
2396  * called from gld_start_mdt to loopback packet(s) when in promiscuous mode
2397  */
2398 static void
2399 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2400     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2401 {
2402 	mblk_t *adjmp;
2403 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2404 	gld_interface_t *ifp = mac_pvt->interfacep;
2405 
2406 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2407 
2408 	/*
2409 	 * Get source/destination.
2410 	 */
2411 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2412 	    GLD_MDT_RXLOOP);
2413 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2414 		gld_sendup(macinfo, vlan, pktinfo, adjmp, gld_paccept);
2415 }
2416 
2417 /*
2418  * gld_recv (macinfo, mp)
2419  * called with an mac-level packet in a mblock; take the maclock,
2420  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2421  *
2422  * V0 drivers already are holding the mutex when they call us.
2423  */
2424 void
2425 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2426 {
2427 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2428 }
2429 
2430 void
2431 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2432 {
2433 	gld_mac_pvt_t *mac_pvt;
2434 	char pbuf[3*GLD_MAX_ADDRLEN];
2435 	pktinfo_t pktinfo;
2436 	gld_interface_t *ifp;
2437 	queue_t *ipq = NULL;
2438 	gld_vlan_t *vlan;
2439 	uint32_t vid;
2440 
2441 	ASSERT(macinfo != NULL);
2442 	ASSERT(mp->b_datap->db_ref);
2443 
2444 	GLDM_LOCK(macinfo, RW_READER);
2445 
2446 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2447 		/* We're probably being called from a leftover interrupt */
2448 		freemsg(mp);
2449 		goto done;
2450 	}
2451 
2452 	vid = GLD_VTAG_VID(vtag);
2453 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) {
2454 		freemsg(mp);
2455 		goto done;
2456 	}
2457 
2458 	/*
2459 	 * Check whether underlying media code supports the IPQ hack,
2460 	 * and if so, whether the interpreter can quickly parse the
2461 	 * packet to get some relevant parameters.
2462 	 */
2463 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2464 	ifp = mac_pvt->interfacep;
2465 	if (((*ifp->interpreter)(macinfo, mp, &pktinfo,
2466 	    GLD_RXQUICK) == 0) && (vlan->gldv_ipq_flags == 0)) {
2467 		switch (pktinfo.ethertype) {
2468 		case ETHERTYPE_IP:
2469 			ipq = vlan->gldv_ipq;
2470 			break;
2471 		case ETHERTYPE_IPV6:
2472 			ipq = vlan->gldv_ipv6q;
2473 			break;
2474 		}
2475 	}
2476 
2477 	BUMP(vlan->gldv_stats->glds_bytercv64, pktinfo.pktLen);
2478 	BUMP(vlan->gldv_stats->glds_pktrcv64, 1);
2479 
2480 	/*
2481 	 * Special case for IP; we can simply do the putnext here, if:
2482 	 * o ipq != NULL, and therefore:
2483 	 * - the device type supports IPQ (ethernet and IPoIB);
2484 	 * - the interpreter could quickly parse the packet;
2485 	 * - there are no PROMISC_SAP streams (on this VLAN);
2486 	 * - there is one, and only one, IP stream bound (to this VLAN);
2487 	 * - that stream is a "fastpath" stream;
2488 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2489 	 *
2490 	 * o the packet is specifically for me, and therefore:
2491 	 * - the packet is not multicast or broadcast (fastpath only
2492 	 *   wants unicast packets).
2493 	 *
2494 	 * o the stream is not asserting flow control.
2495 	 */
2496 	if (ipq != NULL &&
2497 	    pktinfo.isForMe &&
2498 	    canputnext(ipq)) {
2499 		/*
2500 		 * Skip the mac header. We know there is no LLC1/SNAP header
2501 		 * in this packet
2502 		 */
2503 		mp->b_rptr += pktinfo.macLen;
2504 		putnext(ipq, mp);
2505 		goto done;
2506 	}
2507 
2508 	/*
2509 	 * call the media specific packet interpreter routine
2510 	 */
2511 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2512 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2513 #ifdef GLD_DEBUG
2514 		if (gld_debug & GLDERRS)
2515 			cmn_err(CE_WARN,
2516 			    "gld_recv_tagged: interpreter failed");
2517 #endif
2518 		freemsg(mp);
2519 		goto done;
2520 	}
2521 
2522 	/*
2523 	 * This is safe even if vtag is VLAN_VTAG_NONE
2524 	 */
2525 
2526 	pktinfo.vid = vid;
2527 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2528 #ifdef GLD_DEBUG
2529 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2530 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2531 #endif
2532 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2533 
2534 #ifdef GLD_DEBUG
2535 	if ((gld_debug & GLDRECV) &&
2536 	    (!(gld_debug & GLDNOBR) ||
2537 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2538 		char pbuf2[3*GLD_MAX_ADDRLEN];
2539 
2540 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2541 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2542 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2543 		    pktinfo.dhost, macinfo->gldm_addrlen));
2544 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2545 		    pktinfo.vid,
2546 		    pktinfo.user_pri);
2547 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2548 		    "Hdr: %d,%d isMulticast: %s\n",
2549 		    pktinfo.ethertype,
2550 		    pktinfo.pktLen,
2551 		    pktinfo.macLen,
2552 		    pktinfo.hdrLen,
2553 		    pktinfo.isMulticast ? "Y" : "N");
2554 	}
2555 #endif
2556 
2557 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_accept);
2558 
2559 done:
2560 	GLDM_UNLOCK(macinfo);
2561 }
2562 
2563 /* =================================================================== */
2564 /* receive group: called from gld_recv and gld_precv* with maclock held */
2565 /* =================================================================== */
2566 
2567 /*
2568  * gld_sendup (macinfo, mp)
2569  * called with an ethernet packet in a mblock; must decide whether
2570  * packet is for us and which streams to queue it to.
2571  */
2572 static void
2573 gld_sendup(gld_mac_info_t *macinfo, gld_vlan_t *vlan, pktinfo_t *pktinfo,
2574     mblk_t *mp, int (*acceptfunc)())
2575 {
2576 	gld_t *gld;
2577 	gld_t *fgld = NULL;
2578 	mblk_t *nmp;
2579 	void (*send)(queue_t *qp, mblk_t *mp);
2580 	int (*cansend)(queue_t *qp);
2581 
2582 #ifdef GLD_DEBUG
2583 	if (gld_debug & GLDTRACE)
2584 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2585 		    (void *)macinfo);
2586 #endif
2587 
2588 	ASSERT(mp != NULL);
2589 	ASSERT(macinfo != NULL);
2590 	ASSERT(vlan != NULL);
2591 	ASSERT(pktinfo != NULL);
2592 	ASSERT(GLDM_LOCK_HELD(macinfo));
2593 
2594 	/*
2595 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2596 	 * gld_recv returns to the caller's interrupt routine.  The total
2597 	 * network throughput would normally be lower when selecting this
2598 	 * option, because we putq the messages and process them later,
2599 	 * instead of sending them with putnext now.  Some time critical
2600 	 * device might need this, so it's here but undocumented.
2601 	 */
2602 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2603 		send = (void (*)(queue_t *, mblk_t *))putq;
2604 		cansend = canput;
2605 	} else {
2606 		send = (void (*)(queue_t *, mblk_t *))putnext;
2607 		cansend = canputnext;
2608 	}
2609 
2610 	/*
2611 	 * Search all the streams attached to this macinfo looking for
2612 	 * those eligible to receive the present packet.
2613 	 */
2614 	for (gld = vlan->gldv_str_next;
2615 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
2616 #ifdef GLD_VERBOSE_DEBUG
2617 		cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p QSTATE: %s",
2618 		    gld->gld_sap, (void *)gld->gld_qptr,
2619 		    gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE");
2620 #endif
2621 		ASSERT(gld->gld_qptr != NULL);
2622 		ASSERT(gld->gld_state == DL_IDLE ||
2623 		    gld->gld_state == DL_UNBOUND);
2624 		ASSERT(gld->gld_mac_info == macinfo);
2625 		ASSERT(gld->gld_vlan == vlan);
2626 
2627 		if (gld->gld_state != DL_IDLE)
2628 			continue;	/* not eligible to receive */
2629 		if (gld->gld_flags & GLD_STR_CLOSING)
2630 			continue;	/* not eligible to receive */
2631 
2632 #ifdef GLD_DEBUG
2633 		if ((gld_debug & GLDRECV) &&
2634 		    (!(gld_debug & GLDNOBR) ||
2635 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2636 			cmn_err(CE_NOTE,
2637 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2638 			    gld->gld_sap,
2639 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2640 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2641 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2642 #endif
2643 
2644 		/*
2645 		 * The accept function differs depending on whether this is
2646 		 * a packet that we received from the wire or a loopback.
2647 		 */
2648 		if ((*acceptfunc)(gld, pktinfo)) {
2649 			/* sap matches */
2650 			pktinfo->wasAccepted = 1;	/* known protocol */
2651 
2652 			if (!(*cansend)(gld->gld_qptr)) {
2653 				/*
2654 				 * Upper stream is not accepting messages, i.e.
2655 				 * it is flow controlled, therefore we will
2656 				 * forgo sending the message up this stream.
2657 				 */
2658 #ifdef GLD_DEBUG
2659 				if (gld_debug & GLDETRACE)
2660 					cmn_err(CE_WARN,
2661 					    "gld_sendup: canput failed");
2662 #endif
2663 				BUMP(vlan->gldv_stats->glds_blocked, 1);
2664 				qenable(gld->gld_qptr);
2665 				continue;
2666 			}
2667 
2668 			/*
2669 			 * we are trying to avoid an extra dumpmsg() here.
2670 			 * If this is the first eligible queue, remember the
2671 			 * queue and send up the message after the loop.
2672 			 */
2673 			if (!fgld) {
2674 				fgld = gld;
2675 				continue;
2676 			}
2677 
2678 			/* duplicate the packet for this stream */
2679 			nmp = dupmsg(mp);
2680 			if (nmp == NULL) {
2681 				BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2682 #ifdef GLD_DEBUG
2683 				if (gld_debug & GLDERRS)
2684 					cmn_err(CE_WARN,
2685 					    "gld_sendup: dupmsg failed");
2686 #endif
2687 				break;	/* couldn't get resources; drop it */
2688 			}
2689 			/* pass the message up the stream */
2690 			gld_passon(gld, nmp, pktinfo, send);
2691 		}
2692 	}
2693 
2694 	ASSERT(mp);
2695 	/* send the original dup of the packet up the first stream found */
2696 	if (fgld)
2697 		gld_passon(fgld, mp, pktinfo, send);
2698 	else
2699 		freemsg(mp);	/* no streams matched */
2700 
2701 	/* We do not count looped back packets */
2702 	if (acceptfunc == gld_paccept)
2703 		return;		/* transmit loopback case */
2704 
2705 	if (pktinfo->isBroadcast)
2706 		BUMP(vlan->gldv_stats->glds_brdcstrcv, 1);
2707 	else if (pktinfo->isMulticast)
2708 		BUMP(vlan->gldv_stats->glds_multircv, 1);
2709 
2710 	/* No stream accepted this packet */
2711 	if (!pktinfo->wasAccepted)
2712 		BUMP(vlan->gldv_stats->glds_unknowns, 1);
2713 }
2714 
2715 /*
2716  * A packet matches a stream if:
2717  *     the stream accepts EtherType encoded packets and the type matches
2718  *  or the stream accepts LLC packets and the packet is an LLC packet
2719  */
2720 #define	MATCH(stream, pktinfo) \
2721 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2722 	(!stream->gld_ethertype && pktinfo->isLLC))
2723 
2724 /*
2725  * This function validates a packet for sending up a particular
2726  * stream. The message header has been parsed and its characteristic
2727  * are recorded in the pktinfo data structure. The streams stack info
2728  * are presented in gld data structures.
2729  */
2730 static int
2731 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2732 {
2733 	/*
2734 	 * if there is no match do not bother checking further.
2735 	 */
2736 	if (!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP))
2737 		return (0);
2738 
2739 	/*
2740 	 * We don't accept any packet from the hardware if we originated it.
2741 	 * (Contrast gld_paccept, the send-loopback accept function.)
2742 	 */
2743 	if (pktinfo->isLooped)
2744 		return (0);
2745 
2746 	/*
2747 	 * If the packet is broadcast or sent to us directly we will accept it.
2748 	 * Also we will accept multicast packets requested by the stream.
2749 	 */
2750 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2751 	    gld_mcmatch(gld, pktinfo))
2752 		return (1);
2753 
2754 	/*
2755 	 * Finally, accept anything else if we're in promiscuous mode
2756 	 */
2757 	if (gld->gld_flags & GLD_PROM_PHYS)
2758 		return (1);
2759 
2760 	return (0);
2761 }
2762 
2763 /*
2764  * Return TRUE if the given multicast address is one
2765  * of those that this particular Stream is interested in.
2766  */
2767 static int
2768 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
2769 {
2770 	/*
2771 	 * Return FALSE if not a multicast address.
2772 	 */
2773 	if (!pktinfo->isMulticast)
2774 		return (0);
2775 
2776 	/*
2777 	 * Check if all multicasts have been enabled for this Stream
2778 	 */
2779 	if (gld->gld_flags & GLD_PROM_MULT)
2780 		return (1);
2781 
2782 	/*
2783 	 * Return FALSE if no multicast addresses enabled for this Stream.
2784 	 */
2785 	if (!gld->gld_mcast)
2786 		return (0);
2787 
2788 	/*
2789 	 * Otherwise, look for it in the table.
2790 	 */
2791 	return (gld_multicast(pktinfo->dhost, gld));
2792 }
2793 
2794 /*
2795  * gld_multicast determines if the address is a multicast address for
2796  * this stream.
2797  */
2798 static int
2799 gld_multicast(unsigned char *macaddr, gld_t *gld)
2800 {
2801 	int i;
2802 
2803 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
2804 
2805 	if (!gld->gld_mcast)
2806 		return (0);
2807 
2808 	for (i = 0; i < gld->gld_multicnt; i++) {
2809 		if (gld->gld_mcast[i]) {
2810 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
2811 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
2812 			    gld->gld_mac_info->gldm_addrlen))
2813 				return (1);
2814 		}
2815 	}
2816 
2817 	return (0);
2818 }
2819 
2820 /*
2821  * accept function for looped back packets
2822  */
2823 static int
2824 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
2825 {
2826 	return (gld->gld_flags & GLD_PROM_PHYS &&
2827 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP));
2828 }
2829 
2830 static void
2831 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
2832 	void (*send)(queue_t *qp, mblk_t *mp))
2833 {
2834 	int skiplen;
2835 
2836 #ifdef GLD_DEBUG
2837 	if (gld_debug & GLDTRACE)
2838 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
2839 		    (void *)mp, (void *)pktinfo);
2840 
2841 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
2842 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2843 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
2844 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
2845 		    gld->gld_sap);
2846 #endif
2847 
2848 	/*
2849 	 * Figure out how much of the packet header to throw away.
2850 	 *
2851 	 * RAW streams expect to see the whole packet.
2852 	 *
2853 	 * Other streams expect to see the packet with the MAC header
2854 	 * removed.
2855 	 *
2856 	 * Normal DLPI (non RAW/FAST) streams also want the
2857 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
2858 	 */
2859 	if (gld->gld_flags & GLD_RAW) {
2860 		skiplen = 0;
2861 	} else {
2862 		skiplen = pktinfo->macLen;		/* skip mac header */
2863 		if (gld->gld_ethertype)
2864 			skiplen += pktinfo->hdrLen;	/* skip any extra */
2865 	}
2866 
2867 	if (skiplen >= pktinfo->pktLen) {
2868 		/*
2869 		 * If the interpreter did its job right, then it cannot be
2870 		 * asking us to skip more bytes than are in the packet!
2871 		 * However, there could be zero data bytes left after the
2872 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
2873 		 * should contain at least one byte of data, so if we have
2874 		 * none we just drop it.
2875 		 */
2876 		ASSERT(!(skiplen > pktinfo->pktLen));
2877 		freemsg(mp);
2878 		return;
2879 	}
2880 
2881 	/*
2882 	 * Skip over the header(s), taking care to possibly handle message
2883 	 * fragments shorter than the amount we need to skip.  Hopefully
2884 	 * the driver will put the entire packet, or at least the entire
2885 	 * header, into a single message block.  But we handle it if not.
2886 	 */
2887 	while (skiplen >= MBLKL(mp)) {
2888 		mblk_t *tmp = mp;
2889 		skiplen -= MBLKL(mp);
2890 		mp = mp->b_cont;
2891 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
2892 		freeb(tmp);
2893 	}
2894 	mp->b_rptr += skiplen;
2895 
2896 	/* Add M_PROTO if necessary, and pass upstream */
2897 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
2898 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
2899 		/* RAW/FAST: just send up the M_DATA */
2900 		(*send)(gld->gld_qptr, mp);
2901 	} else {
2902 		/* everybody else wants to see a unitdata_ind structure */
2903 		mp = gld_addudind(gld, mp, pktinfo);
2904 		if (mp)
2905 			(*send)(gld->gld_qptr, mp);
2906 		/* if it failed, gld_addudind already bumped statistic */
2907 	}
2908 }
2909 
2910 /*
2911  * gld_addudind(gld, mp, pktinfo)
2912  * format a DL_UNITDATA_IND message to be sent upstream to the user
2913  */
2914 static mblk_t *
2915 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo)
2916 {
2917 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
2918 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
2919 	dl_unitdata_ind_t	*dludindp;
2920 	mblk_t			*nmp;
2921 	int			size;
2922 	int			type;
2923 
2924 #ifdef GLD_DEBUG
2925 	if (gld_debug & GLDTRACE)
2926 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
2927 		    (void *)mp, (void *)pktinfo);
2928 #endif
2929 	ASSERT(macinfo != NULL);
2930 
2931 	/*
2932 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
2933 	 * might as well discard since we can't go further
2934 	 */
2935 	size = sizeof (dl_unitdata_ind_t) +
2936 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
2937 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
2938 		freemsg(mp);
2939 		BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2940 #ifdef GLD_DEBUG
2941 		if (gld_debug & GLDERRS)
2942 			cmn_err(CE_WARN,
2943 			    "gld_addudind: allocb failed");
2944 #endif
2945 		return ((mblk_t *)NULL);
2946 	}
2947 	DB_TYPE(nmp) = M_PROTO;
2948 	nmp->b_rptr = nmp->b_datap->db_lim - size;
2949 
2950 	type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
2951 
2952 	/*
2953 	 * now setup the DL_UNITDATA_IND header
2954 	 *
2955 	 * XXX This looks broken if the saps aren't two bytes.
2956 	 */
2957 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2958 	dludindp->dl_primitive = DL_UNITDATA_IND;
2959 	dludindp->dl_src_addr_length =
2960 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
2961 					abs(macinfo->gldm_saplen);
2962 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2963 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
2964 					dludindp->dl_dest_addr_length;
2965 
2966 	dludindp->dl_group_address = (pktinfo->isMulticast ||
2967 					pktinfo->isBroadcast);
2968 
2969 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
2970 
2971 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
2972 	nmp->b_wptr += macinfo->gldm_addrlen;
2973 
2974 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
2975 	*(ushort_t *)(nmp->b_wptr) = type;
2976 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2977 
2978 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
2979 
2980 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
2981 	nmp->b_wptr += macinfo->gldm_addrlen;
2982 
2983 	*(ushort_t *)(nmp->b_wptr) = type;
2984 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2985 
2986 	if (pktinfo->nosource)
2987 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
2988 	linkb(nmp, mp);
2989 	return (nmp);
2990 }
2991 
2992 /* ======================================================= */
2993 /* wsrv group: called from wsrv, single threaded per queue */
2994 /* ======================================================= */
2995 
2996 /*
2997  * We go to some trouble to avoid taking the same lock during normal
2998  * transmit processing as we do during normal receive processing.
2999  *
3000  * Elements of the per-instance macinfo and per-stream gld_t structures
3001  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3002  * (Elements of the gld_mac_pvt_t structure are considered part of the
3003  * macinfo structure for purposes of this discussion).
3004  *
3005  * However, it is more complicated than that:
3006  *
3007  *	Elements of the macinfo structure that are set before the macinfo
3008  *	structure is added to its device list by gld_register(), and never
3009  *	thereafter modified, are accessed without requiring taking the lock.
3010  *	A similar rule applies to those elements of the gld_t structure that
3011  *	are written by gld_open() before the stream is added to any list.
3012  *
3013  *	Most other elements of the macinfo structure may only be read or
3014  *	written while holding the maclock.
3015  *
3016  *	Most writable elements of the gld_t structure are written only
3017  *	within the single-threaded domain of wsrv() and subsidiaries.
3018  *	(This domain includes open/close while qprocs are not on.)
3019  *	The maclock need not be taken while within that domain
3020  *	simply to read those elements.  Writing to them, even within
3021  *	that domain, or reading from it outside that domain, requires
3022  *	holding the maclock.  Exception:  if the stream is not
3023  *	presently attached to a PPA, there is no associated macinfo,
3024  *	and no maclock need be taken.
3025  *
3026  *	The curr_macaddr element of the mac private structure is also
3027  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3028  *      of that structure. However, there are a few instances in the
3029  *      transmit path where we choose to forgo lock protection when
3030  *      reading this variable. This is to avoid lock contention between
3031  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3032  *      In doing so we will take a small risk or a few corrupted packets
3033  *      during the short an rare times when someone is changing the interface's
3034  *      physical address. We consider the small cost in this rare case to be
3035  *      worth the benefit of reduced lock contention under normal operating
3036  *      conditions. The risk/cost is small because:
3037  *          1. there is no guarantee at this layer of uncorrupted delivery.
3038  *          2. the physaddr doesn't change very often - no performance hit.
3039  *          3. if the physaddr changes, other stuff is going to be screwed
3040  *             up for a while anyway, while other sites refigure ARP, etc.,
3041  *             so losing a couple of packets is the least of our worries.
3042  *
3043  *	The list of streams associated with a macinfo is protected by
3044  *	two locks:  the per-macinfo maclock, and the per-major-device
3045  *	gld_devlock.  Both must be held to modify the list, but either
3046  *	may be held to protect the list during reading/traversing.  This
3047  *	allows independent locking for multiple instances in the receive
3048  *	path (using macinfo), while facilitating routines that must search
3049  *	the entire set of streams associated with a major device, such as
3050  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3051  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3052  *	protected, since they change at exactly the same time macinfo
3053  *	streams list does.
3054  *
3055  *	The list of macinfo structures associated with a major device
3056  *	structure is protected by the gld_devlock, as is the per-major
3057  *	list of Style 2 streams in the DL_UNATTACHED state.
3058  *
3059  *	The list of major devices is kept on a module-global list
3060  *	gld_device_list, which has its own lock to protect the list.
3061  *
3062  *	When it is necessary to hold more than one lock at a time, they
3063  *	are acquired in this "outside in" order:
3064  *		gld_device_list.gld_devlock
3065  *		glddev->gld_devlock
3066  *		GLDM_LOCK(macinfo)
3067  *
3068  *	Finally, there are some "volatile" elements of the gld_t structure
3069  *	used for synchronization between various routines that don't share
3070  *	the same mutexes.  See the routines for details.  These are:
3071  *		gld_xwait	between gld_wsrv() and gld_sched()
3072  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3073  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3074  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3075  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3076  *				(used in conjunction with q->q_first)
3077  */
3078 
3079 /*
3080  * gld_ioctl (q, mp)
3081  * handles all ioctl requests passed downstream. This routine is
3082  * passed a pointer to the message block with the ioctl request in it, and a
3083  * pointer to the queue so it can respond to the ioctl request with an ack.
3084  */
3085 int
3086 gld_ioctl(queue_t *q, mblk_t *mp)
3087 {
3088 	struct iocblk *iocp;
3089 	gld_t *gld;
3090 	gld_mac_info_t *macinfo;
3091 
3092 #ifdef GLD_DEBUG
3093 	if (gld_debug & GLDTRACE)
3094 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3095 #endif
3096 	gld = (gld_t *)q->q_ptr;
3097 	iocp = (struct iocblk *)mp->b_rptr;
3098 	switch (iocp->ioc_cmd) {
3099 	case DLIOCRAW:		/* raw M_DATA mode */
3100 		gld->gld_flags |= GLD_RAW;
3101 		DB_TYPE(mp) = M_IOCACK;
3102 		qreply(q, mp);
3103 		break;
3104 
3105 	case DL_IOC_HDR_INFO:	/* fastpath */
3106 		if (gld_global_options & GLD_OPT_NO_FASTPATH) {
3107 			miocnak(q, mp, 0, EINVAL);
3108 			break;
3109 		}
3110 		gld_fastpath(gld, q, mp);
3111 		break;
3112 
3113 	default:
3114 		macinfo	 = gld->gld_mac_info;
3115 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3116 			miocnak(q, mp, 0, EINVAL);
3117 			break;
3118 		}
3119 
3120 		GLDM_LOCK(macinfo, RW_WRITER);
3121 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3122 		GLDM_UNLOCK(macinfo);
3123 		break;
3124 	}
3125 	return (0);
3126 }
3127 
3128 /*
3129  * Since the rules for "fastpath" mode don't seem to be documented
3130  * anywhere, I will describe GLD's rules for fastpath users here:
3131  *
3132  * Once in this mode you remain there until close.
3133  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3134  * You must be bound (DL_IDLE) to transmit.
3135  * There are other rules not listed above.
3136  */
3137 static void
3138 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3139 {
3140 	gld_interface_t *ifp;
3141 	gld_mac_info_t *macinfo;
3142 	dl_unitdata_req_t *dludp;
3143 	mblk_t *nmp;
3144 	t_scalar_t off, len;
3145 	uint_t maclen;
3146 	int error;
3147 	gld_vlan_t *vlan;
3148 
3149 	if (gld->gld_state != DL_IDLE) {
3150 		miocnak(q, mp, 0, EINVAL);
3151 		return;
3152 	}
3153 
3154 	macinfo = gld->gld_mac_info;
3155 	ASSERT(macinfo != NULL);
3156 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3157 
3158 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3159 	if (error != 0) {
3160 		miocnak(q, mp, 0, error);
3161 		return;
3162 	}
3163 
3164 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3165 	off = dludp->dl_dest_addr_offset;
3166 	len = dludp->dl_dest_addr_length;
3167 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3168 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3169 		miocnak(q, mp, 0, EINVAL);
3170 		return;
3171 	}
3172 
3173 	/*
3174 	 * We take his fastpath request as a declaration that he will accept
3175 	 * M_DATA messages from us, whether or not we are willing to accept
3176 	 * them from him.  This allows us to have fastpath in one direction
3177 	 * (flow upstream) even on media with Source Routing, where we are
3178 	 * unable to provide a fixed MAC header to be prepended to downstream
3179 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3180 	 * allow him to send M_DATA down to us.
3181 	 */
3182 	GLDM_LOCK(macinfo, RW_WRITER);
3183 	gld->gld_flags |= GLD_FAST;
3184 	vlan = (gld_vlan_t *)gld->gld_vlan;
3185 	vlan->gldv_ipq_flags &= ~IPQ_DISABLED;
3186 	GLDM_UNLOCK(macinfo);
3187 
3188 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3189 
3190 	/* This will fail for Source Routing media */
3191 	/* Also on Ethernet on 802.2 SAPs */
3192 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3193 		miocnak(q, mp, 0, ENOMEM);
3194 		return;
3195 	}
3196 
3197 	/*
3198 	 * Link new mblk in after the "request" mblks.
3199 	 */
3200 	linkb(mp, nmp);
3201 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3202 }
3203 
3204 /*
3205  * gld_cmds (q, mp)
3206  *	process the DL commands as defined in dlpi.h
3207  *	note that the primitives return status which is passed back
3208  *	to the service procedure.  If the value is GLDE_RETRY, then
3209  *	it is assumed that processing must stop and the primitive has
3210  *	been put back onto the queue.  If the value is any other error,
3211  *	then an error ack is generated by the service procedure.
3212  */
3213 static int
3214 gld_cmds(queue_t *q, mblk_t *mp)
3215 {
3216 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3217 	gld_t *gld = (gld_t *)(q->q_ptr);
3218 	int result = DL_BADPRIM;
3219 	int mblkl = MBLKL(mp);
3220 	t_uscalar_t dlreq;
3221 
3222 	/* Make sure we have at least dlp->dl_primitive */
3223 	if (mblkl < sizeof (dlp->dl_primitive))
3224 		return (DL_BADPRIM);
3225 
3226 	dlreq = dlp->dl_primitive;
3227 #ifdef	GLD_DEBUG
3228 	if (gld_debug & GLDTRACE)
3229 		cmn_err(CE_NOTE,
3230 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3231 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3232 #endif
3233 
3234 	switch (dlreq) {
3235 	case DL_UDQOS_REQ:
3236 		if (mblkl < DL_UDQOS_REQ_SIZE)
3237 			break;
3238 		result = gld_udqos(q, mp);
3239 		break;
3240 
3241 	case DL_BIND_REQ:
3242 		if (mblkl < DL_BIND_REQ_SIZE)
3243 			break;
3244 		result = gld_bind(q, mp);
3245 		break;
3246 
3247 	case DL_UNBIND_REQ:
3248 		if (mblkl < DL_UNBIND_REQ_SIZE)
3249 			break;
3250 		result = gld_unbind(q, mp);
3251 		break;
3252 
3253 	case DL_UNITDATA_REQ:
3254 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3255 			break;
3256 		result = gld_unitdata(q, mp);
3257 		break;
3258 
3259 	case DL_INFO_REQ:
3260 		if (mblkl < DL_INFO_REQ_SIZE)
3261 			break;
3262 		result = gld_inforeq(q, mp);
3263 		break;
3264 
3265 	case DL_ATTACH_REQ:
3266 		if (mblkl < DL_ATTACH_REQ_SIZE)
3267 			break;
3268 		if (gld->gld_style == DL_STYLE2)
3269 			result = gldattach(q, mp);
3270 		else
3271 			result = DL_NOTSUPPORTED;
3272 		break;
3273 
3274 	case DL_DETACH_REQ:
3275 		if (mblkl < DL_DETACH_REQ_SIZE)
3276 			break;
3277 		if (gld->gld_style == DL_STYLE2)
3278 			result = gldunattach(q, mp);
3279 		else
3280 			result = DL_NOTSUPPORTED;
3281 		break;
3282 
3283 	case DL_ENABMULTI_REQ:
3284 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3285 			break;
3286 		result = gld_enable_multi(q, mp);
3287 		break;
3288 
3289 	case DL_DISABMULTI_REQ:
3290 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3291 			break;
3292 		result = gld_disable_multi(q, mp);
3293 		break;
3294 
3295 	case DL_PHYS_ADDR_REQ:
3296 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3297 			break;
3298 		result = gld_physaddr(q, mp);
3299 		break;
3300 
3301 	case DL_SET_PHYS_ADDR_REQ:
3302 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3303 			break;
3304 		result = gld_setaddr(q, mp);
3305 		break;
3306 
3307 	case DL_PROMISCON_REQ:
3308 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3309 			break;
3310 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3311 		break;
3312 
3313 	case DL_PROMISCOFF_REQ:
3314 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3315 			break;
3316 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3317 		break;
3318 
3319 	case DL_GET_STATISTICS_REQ:
3320 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3321 			break;
3322 		result = gld_get_statistics(q, mp);
3323 		break;
3324 
3325 	case DL_CAPABILITY_REQ:
3326 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3327 			break;
3328 		result = gld_cap(q, mp);
3329 		break;
3330 
3331 	case DL_NOTIFY_REQ:
3332 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3333 			break;
3334 		result = gld_notify_req(q, mp);
3335 		break;
3336 
3337 	case DL_XID_REQ:
3338 	case DL_XID_RES:
3339 	case DL_TEST_REQ:
3340 	case DL_TEST_RES:
3341 	case DL_CONTROL_REQ:
3342 	case DL_PASSIVE_REQ:
3343 		result = DL_NOTSUPPORTED;
3344 		break;
3345 
3346 	default:
3347 #ifdef	GLD_DEBUG
3348 		if (gld_debug & GLDERRS)
3349 			cmn_err(CE_WARN,
3350 			    "gld_cmds: unknown M_PROTO message: %d",
3351 			    dlreq);
3352 #endif
3353 		result = DL_BADPRIM;
3354 	}
3355 
3356 	return (result);
3357 }
3358 
3359 static int
3360 gld_cap(queue_t *q, mblk_t *mp)
3361 {
3362 	gld_t *gld = (gld_t *)q->q_ptr;
3363 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3364 
3365 	if (gld->gld_state == DL_UNATTACHED)
3366 		return (DL_OUTSTATE);
3367 
3368 	if (dlp->dl_sub_length == 0)
3369 		return (gld_cap_ack(q, mp));
3370 
3371 	return (gld_cap_enable(q, mp));
3372 }
3373 
3374 static int
3375 gld_cap_ack(queue_t *q, mblk_t *mp)
3376 {
3377 	gld_t *gld = (gld_t *)q->q_ptr;
3378 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3379 	gld_interface_t *ifp;
3380 	dl_capability_ack_t *dlap;
3381 	dl_capability_sub_t *dlsp;
3382 	size_t size = sizeof (dl_capability_ack_t);
3383 	size_t subsize = 0;
3384 
3385 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3386 
3387 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3388 		subsize += sizeof (dl_capability_sub_t) +
3389 		    sizeof (dl_capab_hcksum_t);
3390 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3391 		subsize += sizeof (dl_capability_sub_t) +
3392 		    sizeof (dl_capab_zerocopy_t);
3393 	if (macinfo->gldm_options & GLDOPT_MDT)
3394 		subsize += (sizeof (dl_capability_sub_t) +
3395 		    sizeof (dl_capab_mdt_t));
3396 
3397 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3398 	    DL_CAPABILITY_ACK)) == NULL)
3399 		return (GLDE_OK);
3400 
3401 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3402 	dlap->dl_sub_offset = 0;
3403 	if ((dlap->dl_sub_length = subsize) != 0)
3404 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3405 	dlsp = (dl_capability_sub_t *)&dlap[1];
3406 
3407 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3408 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3409 
3410 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3411 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3412 
3413 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3414 
3415 		dlhp->hcksum_txflags = 0;
3416 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3417 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3418 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3419 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3420 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3421 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3422 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3423 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3424 
3425 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3426 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3427 	}
3428 
3429 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3430 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3431 
3432 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3433 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3434 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3435 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3436 
3437 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3438 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3439 	}
3440 
3441 	if (macinfo->gldm_options & GLDOPT_MDT) {
3442 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3443 
3444 		dlsp->dl_cap = DL_CAPAB_MDT;
3445 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3446 
3447 		dlmp->mdt_version = MDT_VERSION_2;
3448 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3449 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3450 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3451 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3452 		dlmp->mdt_hdr_head = ifp->hdr_size;
3453 		dlmp->mdt_hdr_tail = 0;
3454 	}
3455 
3456 	qreply(q, mp);
3457 	return (GLDE_OK);
3458 }
3459 
3460 static int
3461 gld_cap_enable(queue_t *q, mblk_t *mp)
3462 {
3463 	dl_capability_req_t *dlp;
3464 	dl_capability_sub_t *dlsp;
3465 	dl_capab_hcksum_t *dlhp;
3466 	offset_t off;
3467 	size_t len;
3468 	size_t size;
3469 	offset_t end;
3470 
3471 	dlp = (dl_capability_req_t *)mp->b_rptr;
3472 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3473 
3474 	off = dlp->dl_sub_offset;
3475 	len = dlp->dl_sub_length;
3476 
3477 	if (!MBLKIN(mp, off, len))
3478 		return (DL_BADPRIM);
3479 
3480 	end = off + len;
3481 	while (off < end) {
3482 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3483 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3484 		if (off + size > end)
3485 			return (DL_BADPRIM);
3486 
3487 		switch (dlsp->dl_cap) {
3488 		case DL_CAPAB_HCKSUM:
3489 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3490 			/* nothing useful we can do with the contents */
3491 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3492 			break;
3493 		default:
3494 			break;
3495 		}
3496 
3497 		off += size;
3498 	}
3499 
3500 	qreply(q, mp);
3501 	return (GLDE_OK);
3502 }
3503 
3504 /*
3505  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3506  * requested the specific <notification> that the message carries AND is
3507  * eligible and ready to receive the notification immediately.
3508  *
3509  * This routine ignores flow control. Notifications will be sent regardless.
3510  *
3511  * In all cases, the original message passed in is freed at the end of
3512  * the routine.
3513  */
3514 static void
3515 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3516 {
3517 	gld_mac_pvt_t *mac_pvt;
3518 	gld_vlan_t *vlan;
3519 	gld_t *gld;
3520 	mblk_t *nmp;
3521 	int i;
3522 
3523 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3524 
3525 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3526 
3527 	/*
3528 	 * Search all the streams attached to this macinfo looking
3529 	 * for those eligible to receive the present notification.
3530 	 */
3531 	for (i = 0; i < VLAN_HASHSZ; i++) {
3532 		for (vlan = mac_pvt->vlan_hash[i];
3533 		    vlan != NULL; vlan = vlan->gldv_next) {
3534 			for (gld = vlan->gldv_str_next;
3535 			    gld != (gld_t *)&vlan->gldv_str_next;
3536 			    gld = gld->gld_next) {
3537 				ASSERT(gld->gld_qptr != NULL);
3538 				ASSERT(gld->gld_state == DL_IDLE ||
3539 				    gld->gld_state == DL_UNBOUND);
3540 				ASSERT(gld->gld_mac_info == macinfo);
3541 
3542 				if (gld->gld_flags & GLD_STR_CLOSING)
3543 					continue; /* not eligible - skip */
3544 				if (!(notification & gld->gld_notifications))
3545 					continue; /* not wanted - skip */
3546 				if ((nmp = dupmsg(mp)) == NULL)
3547 					continue; /* can't copy - skip */
3548 
3549 				/*
3550 				 * All OK; send dup'd notification up this
3551 				 * stream
3552 				 */
3553 				qreply(WR(gld->gld_qptr), nmp);
3554 			}
3555 		}
3556 	}
3557 
3558 	/*
3559 	 * Drop the original message block now
3560 	 */
3561 	freemsg(mp);
3562 }
3563 
3564 /*
3565  * For each (understood) bit in the <notifications> argument, contruct
3566  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3567  * eligible queues if <q> is NULL.
3568  */
3569 static void
3570 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3571 {
3572 	gld_mac_pvt_t *mac_pvt;
3573 	dl_notify_ind_t *dlnip;
3574 	struct gld_stats *stats;
3575 	mblk_t *mp;
3576 	size_t size;
3577 	uint32_t bit;
3578 
3579 	GLDM_LOCK(macinfo, RW_WRITER);
3580 
3581 	/*
3582 	 * The following cases shouldn't happen, but just in case the
3583 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3584 	 * check anyway ...
3585 	 */
3586 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3587 		GLDM_UNLOCK(macinfo);
3588 		return;				/* not ready yet	*/
3589 	}
3590 
3591 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3592 		GLDM_UNLOCK(macinfo);
3593 		return;				/* not ready anymore	*/
3594 	}
3595 
3596 	/*
3597 	 * Make sure the kstats are up to date, 'cos we use some of
3598 	 * the kstat values below, specifically the link speed ...
3599 	 */
3600 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3601 	stats = mac_pvt->statistics;
3602 	if (macinfo->gldm_get_stats)
3603 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3604 
3605 	for (bit = 1; notifications != 0; bit <<= 1) {
3606 		if ((notifications & bit) == 0)
3607 			continue;
3608 		notifications &= ~bit;
3609 
3610 		size = DL_NOTIFY_IND_SIZE;
3611 		if (bit == DL_NOTE_PHYS_ADDR)
3612 			size += macinfo->gldm_addrlen;
3613 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3614 			continue;
3615 
3616 		mp->b_datap->db_type = M_PROTO;
3617 		mp->b_wptr = mp->b_rptr + size;
3618 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3619 		dlnip->dl_primitive = DL_NOTIFY_IND;
3620 		dlnip->dl_notification = 0;
3621 		dlnip->dl_data = 0;
3622 		dlnip->dl_addr_length = 0;
3623 		dlnip->dl_addr_offset = 0;
3624 
3625 		switch (bit) {
3626 		case DL_NOTE_PROMISC_ON_PHYS:
3627 		case DL_NOTE_PROMISC_OFF_PHYS:
3628 			if (mac_pvt->nprom != 0)
3629 				dlnip->dl_notification = bit;
3630 			break;
3631 
3632 		case DL_NOTE_LINK_DOWN:
3633 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3634 				dlnip->dl_notification = bit;
3635 			break;
3636 
3637 		case DL_NOTE_LINK_UP:
3638 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3639 				dlnip->dl_notification = bit;
3640 			break;
3641 
3642 		case DL_NOTE_SPEED:
3643 			/*
3644 			 * Conversion required here:
3645 			 *	GLD keeps the speed in bit/s in a uint64
3646 			 *	DLPI wants it in kb/s in a uint32
3647 			 * Fortunately this is still big enough for 10Gb/s!
3648 			 */
3649 			dlnip->dl_notification = bit;
3650 			dlnip->dl_data = stats->glds_speed/1000ULL;
3651 			break;
3652 
3653 		case DL_NOTE_PHYS_ADDR:
3654 			dlnip->dl_notification = bit;
3655 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3656 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3657 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3658 			    abs(macinfo->gldm_saplen);
3659 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3660 			mac_copy(mac_pvt->curr_macaddr,
3661 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3662 			    macinfo->gldm_addrlen);
3663 			break;
3664 
3665 		default:
3666 			break;
3667 		}
3668 
3669 		if (dlnip->dl_notification == 0)
3670 			freemsg(mp);
3671 		else if (q != NULL)
3672 			qreply(q, mp);
3673 		else
3674 			gld_notify_qs(macinfo, mp, bit);
3675 	}
3676 
3677 	GLDM_UNLOCK(macinfo);
3678 }
3679 
3680 /*
3681  * gld_notify_req - handle a DL_NOTIFY_REQ message
3682  */
3683 static int
3684 gld_notify_req(queue_t *q, mblk_t *mp)
3685 {
3686 	gld_t *gld = (gld_t *)q->q_ptr;
3687 	gld_mac_info_t *macinfo;
3688 	gld_mac_pvt_t *pvt;
3689 	dl_notify_req_t *dlnrp;
3690 	dl_notify_ack_t *dlnap;
3691 
3692 	ASSERT(gld != NULL);
3693 	ASSERT(gld->gld_qptr == RD(q));
3694 
3695 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3696 
3697 #ifdef GLD_DEBUG
3698 	if (gld_debug & GLDTRACE)
3699 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
3700 			(void *)q, (void *)mp);
3701 #endif
3702 
3703 	if (gld->gld_state == DL_UNATTACHED) {
3704 #ifdef GLD_DEBUG
3705 		if (gld_debug & GLDERRS)
3706 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
3707 				gld->gld_state);
3708 #endif
3709 		return (DL_OUTSTATE);
3710 	}
3711 
3712 	/*
3713 	 * Remember what notifications are required by this stream
3714 	 */
3715 	macinfo = gld->gld_mac_info;
3716 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3717 
3718 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
3719 
3720 	/*
3721 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
3722 	 * that this driver can provide, independently of which ones have
3723 	 * previously been or are now being requested.
3724 	 */
3725 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
3726 	    DL_NOTIFY_ACK)) == NULL)
3727 		return (DL_SYSERR);
3728 
3729 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
3730 	dlnap->dl_notifications = pvt->notifications;
3731 	qreply(q, mp);
3732 
3733 	/*
3734 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
3735 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
3736 	 * that provide the current status.
3737 	 */
3738 	gld_notify_ind(macinfo, gld->gld_notifications, q);
3739 
3740 	return (GLDE_OK);
3741 }
3742 
3743 /*
3744  * gld_linkstate()
3745  *	Called by driver to tell GLD the state of the physical link.
3746  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
3747  *	notification to each client that has previously requested such
3748  *	notifications
3749  */
3750 void
3751 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
3752 {
3753 	uint32_t notification;
3754 
3755 	switch (newstate) {
3756 	default:
3757 		return;
3758 
3759 	case GLD_LINKSTATE_DOWN:
3760 		notification = DL_NOTE_LINK_DOWN;
3761 		break;
3762 
3763 	case GLD_LINKSTATE_UP:
3764 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
3765 		break;
3766 
3767 	case GLD_LINKSTATE_UNKNOWN:
3768 		notification = 0;
3769 		break;
3770 	}
3771 
3772 	GLDM_LOCK(macinfo, RW_WRITER);
3773 	if (macinfo->gldm_linkstate == newstate)
3774 		notification = 0;
3775 	else
3776 		macinfo->gldm_linkstate = newstate;
3777 	GLDM_UNLOCK(macinfo);
3778 
3779 	if (notification)
3780 		gld_notify_ind(macinfo, notification, NULL);
3781 }
3782 
3783 /*
3784  * gld_udqos - set the current QoS parameters (priority only at the moment).
3785  */
3786 static int
3787 gld_udqos(queue_t *q, mblk_t *mp)
3788 {
3789 	dl_udqos_req_t *dlp;
3790 	gld_t  *gld = (gld_t *)q->q_ptr;
3791 	int off;
3792 	int len;
3793 	dl_qos_cl_sel1_t *selp;
3794 
3795 	ASSERT(gld);
3796 	ASSERT(gld->gld_qptr == RD(q));
3797 
3798 #ifdef GLD_DEBUG
3799 	if (gld_debug & GLDTRACE)
3800 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
3801 #endif
3802 
3803 	if (gld->gld_state != DL_IDLE) {
3804 #ifdef GLD_DEBUG
3805 		if (gld_debug & GLDERRS)
3806 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
3807 			    gld->gld_state);
3808 #endif
3809 		return (DL_OUTSTATE);
3810 	}
3811 
3812 	dlp = (dl_udqos_req_t *)mp->b_rptr;
3813 	off = dlp->dl_qos_offset;
3814 	len = dlp->dl_qos_length;
3815 
3816 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
3817 		return (DL_BADQOSTYPE);
3818 
3819 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
3820 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
3821 		return (DL_BADQOSTYPE);
3822 
3823 	if (selp->dl_trans_delay != 0 &&
3824 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
3825 		return (DL_BADQOSPARAM);
3826 	if (selp->dl_protection != 0 &&
3827 	    selp->dl_protection != DL_QOS_DONT_CARE)
3828 		return (DL_BADQOSPARAM);
3829 	if (selp->dl_residual_error != 0 &&
3830 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
3831 		return (DL_BADQOSPARAM);
3832 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
3833 		return (DL_BADQOSPARAM);
3834 
3835 	gld->gld_upri = selp->dl_priority;
3836 
3837 	dlokack(q, mp, DL_UDQOS_REQ);
3838 	return (GLDE_OK);
3839 }
3840 
3841 static mblk_t *
3842 gld_bindack(queue_t *q, mblk_t *mp)
3843 {
3844 	gld_t *gld = (gld_t *)q->q_ptr;
3845 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3846 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3847 	dl_bind_ack_t *dlp;
3848 	size_t size;
3849 	t_uscalar_t addrlen;
3850 	uchar_t *sapp;
3851 
3852 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3853 	size = sizeof (dl_bind_ack_t) + addrlen;
3854 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
3855 		return (NULL);
3856 
3857 	dlp = (dl_bind_ack_t *)mp->b_rptr;
3858 	dlp->dl_sap = gld->gld_sap;
3859 	dlp->dl_addr_length = addrlen;
3860 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
3861 	dlp->dl_max_conind = 0;
3862 	dlp->dl_xidtest_flg = 0;
3863 
3864 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
3865 	    macinfo->gldm_addrlen);
3866 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
3867 	*(ushort_t *)sapp = gld->gld_sap;
3868 
3869 	return (mp);
3870 }
3871 
3872 /*
3873  * gld_bind - determine if a SAP is already allocated and whether it is legal
3874  * to do the bind at this time
3875  */
3876 static int
3877 gld_bind(queue_t *q, mblk_t *mp)
3878 {
3879 	ulong_t	sap;
3880 	dl_bind_req_t *dlp;
3881 	gld_t *gld = (gld_t *)q->q_ptr;
3882 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3883 
3884 	ASSERT(gld);
3885 	ASSERT(gld->gld_qptr == RD(q));
3886 
3887 #ifdef GLD_DEBUG
3888 	if (gld_debug & GLDTRACE)
3889 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
3890 #endif
3891 
3892 	dlp = (dl_bind_req_t *)mp->b_rptr;
3893 	sap = dlp->dl_sap;
3894 
3895 #ifdef GLD_DEBUG
3896 	if (gld_debug & GLDPROT)
3897 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
3898 #endif
3899 
3900 	if (gld->gld_state != DL_UNBOUND) {
3901 #ifdef GLD_DEBUG
3902 		if (gld_debug & GLDERRS)
3903 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
3904 				gld->gld_state);
3905 #endif
3906 		return (DL_OUTSTATE);
3907 	}
3908 	ASSERT(macinfo);
3909 
3910 	if (dlp->dl_service_mode != DL_CLDLS) {
3911 		return (DL_UNSUPPORTED);
3912 	}
3913 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
3914 		return (DL_NOAUTO);
3915 	}
3916 
3917 	/*
3918 	 * Check sap validity and decide whether this stream accepts
3919 	 * IEEE 802.2 (LLC) packets.
3920 	 */
3921 	if (sap > ETHERTYPE_MAX)
3922 		return (DL_BADSAP);
3923 
3924 	/*
3925 	 * Decide whether the SAP value selects EtherType encoding/decoding.
3926 	 * For compatibility with monolithic ethernet drivers, the range of
3927 	 * SAP values is different for DL_ETHER media.
3928 	 */
3929 	switch (macinfo->gldm_type) {
3930 	case DL_ETHER:
3931 		gld->gld_ethertype = (sap > ETHERMTU);
3932 		break;
3933 	default:
3934 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
3935 		break;
3936 	}
3937 
3938 	/* if we get to here, then the SAP is legal enough */
3939 	GLDM_LOCK(macinfo, RW_WRITER);
3940 	gld->gld_state = DL_IDLE;	/* bound and ready */
3941 	gld->gld_sap = sap;
3942 	gld_set_ipq(gld);
3943 
3944 #ifdef GLD_DEBUG
3945 	if (gld_debug & GLDPROT)
3946 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
3947 #endif
3948 
3949 	/* ACK the BIND */
3950 	mp = gld_bindack(q, mp);
3951 	GLDM_UNLOCK(macinfo);
3952 
3953 	if (mp != NULL) {
3954 		qreply(q, mp);
3955 		return (GLDE_OK);
3956 	}
3957 
3958 	return (DL_SYSERR);
3959 }
3960 
3961 /*
3962  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
3963  * The stream is still open and can be re-bound.
3964  */
3965 static int
3966 gld_unbind(queue_t *q, mblk_t *mp)
3967 {
3968 	gld_t *gld = (gld_t *)q->q_ptr;
3969 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3970 
3971 	ASSERT(gld);
3972 
3973 #ifdef GLD_DEBUG
3974 	if (gld_debug & GLDTRACE)
3975 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
3976 #endif
3977 
3978 	if (gld->gld_state != DL_IDLE) {
3979 #ifdef GLD_DEBUG
3980 		if (gld_debug & GLDERRS)
3981 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
3982 				gld->gld_state);
3983 #endif
3984 		return (DL_OUTSTATE);
3985 	}
3986 	ASSERT(macinfo);
3987 
3988 	/*
3989 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
3990 	 * See comments above gld_start().
3991 	 */
3992 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
3993 	membar_enter();
3994 	if (gld->gld_wput_count != 0) {
3995 		gld->gld_in_unbind = B_FALSE;
3996 		ASSERT(mp);		/* we didn't come from close */
3997 #ifdef GLD_DEBUG
3998 		if (gld_debug & GLDETRACE)
3999 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
4000 #endif
4001 		(void) putbq(q, mp);
4002 		qenable(q);		/* try again soon */
4003 		return (GLDE_RETRY);
4004 	}
4005 
4006 	GLDM_LOCK(macinfo, RW_WRITER);
4007 	gld->gld_state = DL_UNBOUND;
4008 	gld->gld_sap = 0;
4009 	gld_set_ipq(gld);
4010 	GLDM_UNLOCK(macinfo);
4011 
4012 	membar_exit();
4013 	gld->gld_in_unbind = B_FALSE;
4014 
4015 	/* mp is NULL if we came from close */
4016 	if (mp) {
4017 		gld_flushqueue(q);	/* flush the queues */
4018 		dlokack(q, mp, DL_UNBIND_REQ);
4019 	}
4020 	return (GLDE_OK);
4021 }
4022 
4023 /*
4024  * gld_inforeq - generate the response to an info request
4025  */
4026 static int
4027 gld_inforeq(queue_t *q, mblk_t *mp)
4028 {
4029 	gld_t		*gld;
4030 	dl_info_ack_t	*dlp;
4031 	int		bufsize;
4032 	glddev_t	*glddev;
4033 	gld_mac_info_t	*macinfo;
4034 	gld_mac_pvt_t	*mac_pvt;
4035 	int		sel_offset = 0;
4036 	int		range_offset = 0;
4037 	int		addr_offset;
4038 	int		addr_length;
4039 	int		sap_length;
4040 	int		brdcst_offset;
4041 	int		brdcst_length;
4042 	gld_vlan_t	*vlan;
4043 	uchar_t		*sapp;
4044 
4045 #ifdef GLD_DEBUG
4046 	if (gld_debug & GLDTRACE)
4047 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4048 #endif
4049 	gld = (gld_t *)q->q_ptr;
4050 	ASSERT(gld);
4051 	glddev = gld->gld_device;
4052 	ASSERT(glddev);
4053 
4054 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4055 		macinfo = gld->gld_mac_info;
4056 		ASSERT(macinfo != NULL);
4057 
4058 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4059 
4060 		addr_length = macinfo->gldm_addrlen;
4061 		sap_length = macinfo->gldm_saplen;
4062 		brdcst_length = macinfo->gldm_addrlen;
4063 	} else {
4064 		addr_length = glddev->gld_addrlen;
4065 		sap_length = glddev->gld_saplen;
4066 		brdcst_length = glddev->gld_addrlen;
4067 	}
4068 
4069 	bufsize = sizeof (dl_info_ack_t);
4070 
4071 	addr_offset = bufsize;
4072 	bufsize += addr_length;
4073 	bufsize += abs(sap_length);
4074 
4075 	brdcst_offset = bufsize;
4076 	bufsize += brdcst_length;
4077 
4078 	if ((vlan = (gld_vlan_t *)gld->gld_vlan) != NULL &&
4079 	    vlan->gldv_id != VLAN_VID_NONE) {
4080 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4081 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4082 
4083 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4084 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4085 	}
4086 
4087 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4088 		return (GLDE_OK);	/* nothing more to be done */
4089 
4090 	bzero(mp->b_rptr, bufsize);
4091 
4092 	dlp = (dl_info_ack_t *)mp->b_rptr;
4093 	dlp->dl_primitive = DL_INFO_ACK;
4094 	dlp->dl_version = DL_VERSION_2;
4095 	dlp->dl_service_mode = DL_CLDLS;
4096 	dlp->dl_current_state = gld->gld_state;
4097 	dlp->dl_provider_style = gld->gld_style;
4098 
4099 	if (sel_offset != 0) {
4100 		dl_qos_cl_sel1_t	*selp;
4101 		dl_qos_cl_range1_t	*rangep;
4102 
4103 		ASSERT(range_offset != 0);
4104 
4105 		dlp->dl_qos_offset = sel_offset;
4106 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4107 		dlp->dl_qos_range_offset = range_offset;
4108 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4109 
4110 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4111 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4112 		selp->dl_priority = gld->gld_upri;
4113 
4114 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4115 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4116 		rangep->dl_priority.dl_min = 0;
4117 		rangep->dl_priority.dl_max = 7;
4118 	}
4119 
4120 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4121 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4122 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4123 		dlp->dl_mac_type = macinfo->gldm_type;
4124 		dlp->dl_addr_length = addr_length + abs(sap_length);
4125 		dlp->dl_sap_length = sap_length;
4126 
4127 		if (gld->gld_state == DL_IDLE) {
4128 			/*
4129 			 * If we are bound to a non-LLC SAP on any medium
4130 			 * other than Ethernet, then we need room for a
4131 			 * SNAP header.  So we have to adjust the MTU size
4132 			 * accordingly.  XXX I suppose this should be done
4133 			 * in gldutil.c, but it seems likely that this will
4134 			 * always be true for everything GLD supports but
4135 			 * Ethernet.  Check this if you add another medium.
4136 			 */
4137 			if ((macinfo->gldm_type == DL_TPR ||
4138 			    macinfo->gldm_type == DL_FDDI) &&
4139 			    gld->gld_ethertype)
4140 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4141 
4142 			/* copy macaddr and sap */
4143 			dlp->dl_addr_offset = addr_offset;
4144 
4145 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4146 			    addr_offset, macinfo->gldm_addrlen);
4147 			sapp = mp->b_rptr + addr_offset +
4148 			    macinfo->gldm_addrlen;
4149 			*(ushort_t *)sapp = gld->gld_sap;
4150 		} else {
4151 			dlp->dl_addr_offset = 0;
4152 		}
4153 
4154 		/* copy broadcast addr */
4155 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4156 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4157 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4158 		    mp->b_rptr + brdcst_offset, brdcst_length);
4159 	} else {
4160 		/*
4161 		 * No PPA is attached.
4162 		 * The best we can do is use the values provided
4163 		 * by the first mac that called gld_register.
4164 		 */
4165 		dlp->dl_min_sdu = glddev->gld_minsdu;
4166 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4167 		dlp->dl_mac_type = glddev->gld_type;
4168 		dlp->dl_addr_length = addr_length + abs(sap_length);
4169 		dlp->dl_sap_length = sap_length;
4170 		dlp->dl_addr_offset = 0;
4171 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4172 		dlp->dl_brdcst_addr_length = brdcst_length;
4173 		mac_copy((caddr_t)glddev->gld_broadcast,
4174 		    mp->b_rptr + brdcst_offset, brdcst_length);
4175 	}
4176 	qreply(q, mp);
4177 	return (GLDE_OK);
4178 }
4179 
4180 /*
4181  * gld_unitdata (q, mp)
4182  * send a datagram.  Destination address/lsap is in M_PROTO
4183  * message (first mblock), data is in remainder of message.
4184  *
4185  */
4186 static int
4187 gld_unitdata(queue_t *q, mblk_t *mp)
4188 {
4189 	gld_t *gld = (gld_t *)q->q_ptr;
4190 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4191 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4192 	size_t	msglen;
4193 	mblk_t	*nmp;
4194 	gld_interface_t *ifp;
4195 	uint32_t start;
4196 	uint32_t stuff;
4197 	uint32_t end;
4198 	uint32_t value;
4199 	uint32_t flags;
4200 	uint32_t upri;
4201 
4202 #ifdef GLD_DEBUG
4203 	if (gld_debug & GLDTRACE)
4204 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4205 #endif
4206 
4207 	if (gld->gld_state != DL_IDLE) {
4208 #ifdef GLD_DEBUG
4209 		if (gld_debug & GLDERRS)
4210 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4211 				gld->gld_state);
4212 #endif
4213 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4214 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4215 		return (GLDE_OK);
4216 	}
4217 	ASSERT(macinfo != NULL);
4218 
4219 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4220 	    dlp->dl_dest_addr_length !=
4221 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4222 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4223 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4224 		return (GLDE_OK);
4225 	}
4226 
4227 	upri = dlp->dl_priority.dl_max;
4228 
4229 	msglen = msgdsize(mp);
4230 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4231 #ifdef GLD_DEBUG
4232 		if (gld_debug & GLDERRS)
4233 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4234 				(int)msglen);
4235 #endif
4236 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4237 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4238 		return (GLDE_OK);
4239 	}
4240 
4241 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4242 
4243 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4244 
4245 	/* grab any checksum information that may be present */
4246 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4247 	    &value, &flags);
4248 
4249 	/*
4250 	 * Prepend a valid header for transmission
4251 	 */
4252 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4253 #ifdef GLD_DEBUG
4254 		if (gld_debug & GLDERRS)
4255 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4256 #endif
4257 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4258 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4259 		return (GLDE_OK);
4260 	}
4261 
4262 	/* apply any checksum information to the first block in the chain */
4263 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4264 	    flags, 0);
4265 
4266 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4267 		qenable(q);
4268 		return (GLDE_RETRY);
4269 	}
4270 
4271 	return (GLDE_OK);
4272 }
4273 
4274 /*
4275  * gldattach(q, mp)
4276  * DLPI DL_ATTACH_REQ
4277  * this attaches the stream to a PPA
4278  */
4279 static int
4280 gldattach(queue_t *q, mblk_t *mp)
4281 {
4282 	dl_attach_req_t *at;
4283 	gld_mac_info_t *macinfo;
4284 	gld_t  *gld = (gld_t *)q->q_ptr;
4285 	glddev_t *glddev;
4286 	gld_mac_pvt_t *mac_pvt;
4287 	uint32_t ppa;
4288 	uint32_t vid;
4289 	gld_vlan_t *vlan;
4290 
4291 	at = (dl_attach_req_t *)mp->b_rptr;
4292 
4293 	if (gld->gld_state != DL_UNATTACHED)
4294 		return (DL_OUTSTATE);
4295 
4296 	ASSERT(!gld->gld_mac_info);
4297 
4298 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4299 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4300 	if (vid > VLAN_VID_MAX)
4301 		return (DL_BADPPA);
4302 
4303 	glddev = gld->gld_device;
4304 	mutex_enter(&glddev->gld_devlock);
4305 	for (macinfo = glddev->gld_mac_next;
4306 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4307 	    macinfo = macinfo->gldm_next) {
4308 		int inst;
4309 
4310 		ASSERT(macinfo != NULL);
4311 		if (macinfo->gldm_ppa != ppa)
4312 			continue;
4313 
4314 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4315 			continue;	/* this one's not ready yet */
4316 
4317 		/*
4318 		 * VLAN sanity check
4319 		 */
4320 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4321 			mutex_exit(&glddev->gld_devlock);
4322 			return (DL_BADPPA);
4323 		}
4324 
4325 		/*
4326 		 * We found the correct PPA, hold the instance
4327 		 */
4328 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4329 		if (inst == -1 || qassociate(q, inst) != 0) {
4330 			mutex_exit(&glddev->gld_devlock);
4331 			return (DL_BADPPA);
4332 		}
4333 
4334 		/* Take the stream off the per-driver-class list */
4335 		gldremque(gld);
4336 
4337 		/*
4338 		 * We must hold the lock to prevent multiple calls
4339 		 * to the reset and start routines.
4340 		 */
4341 		GLDM_LOCK(macinfo, RW_WRITER);
4342 
4343 		gld->gld_mac_info = macinfo;
4344 
4345 		if (macinfo->gldm_send_tagged != NULL)
4346 			gld->gld_send = macinfo->gldm_send_tagged;
4347 		else
4348 			gld->gld_send = macinfo->gldm_send;
4349 
4350 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4351 			GLDM_UNLOCK(macinfo);
4352 			gldinsque(gld, glddev->gld_str_prev);
4353 			mutex_exit(&glddev->gld_devlock);
4354 			(void) qassociate(q, -1);
4355 			return (DL_BADPPA);
4356 		}
4357 
4358 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4359 		if (!mac_pvt->started) {
4360 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4361 				gld_rem_vlan(vlan);
4362 				GLDM_UNLOCK(macinfo);
4363 				gldinsque(gld, glddev->gld_str_prev);
4364 				mutex_exit(&glddev->gld_devlock);
4365 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4366 				    EIO);
4367 				(void) qassociate(q, -1);
4368 				return (GLDE_OK);
4369 			}
4370 		}
4371 
4372 		gld->gld_vlan = vlan;
4373 		vlan->gldv_nstreams++;
4374 		gldinsque(gld, vlan->gldv_str_prev);
4375 		gld->gld_state = DL_UNBOUND;
4376 		GLDM_UNLOCK(macinfo);
4377 
4378 #ifdef GLD_DEBUG
4379 		if (gld_debug & GLDPROT) {
4380 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4381 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4382 		}
4383 #endif
4384 		mutex_exit(&glddev->gld_devlock);
4385 		dlokack(q, mp, DL_ATTACH_REQ);
4386 		return (GLDE_OK);
4387 	}
4388 	mutex_exit(&glddev->gld_devlock);
4389 	return (DL_BADPPA);
4390 }
4391 
4392 /*
4393  * gldunattach(q, mp)
4394  * DLPI DL_DETACH_REQ
4395  * detaches the mac layer from the stream
4396  */
4397 int
4398 gldunattach(queue_t *q, mblk_t *mp)
4399 {
4400 	gld_t  *gld = (gld_t *)q->q_ptr;
4401 	glddev_t *glddev = gld->gld_device;
4402 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4403 	int	state = gld->gld_state;
4404 	int	i;
4405 	gld_mac_pvt_t *mac_pvt;
4406 	gld_vlan_t *vlan;
4407 	boolean_t phys_off;
4408 	boolean_t mult_off;
4409 	int op = GLD_MAC_PROMISC_NOOP;
4410 
4411 	if (state != DL_UNBOUND)
4412 		return (DL_OUTSTATE);
4413 
4414 	ASSERT(macinfo != NULL);
4415 	ASSERT(gld->gld_sap == 0);
4416 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4417 
4418 #ifdef GLD_DEBUG
4419 	if (gld_debug & GLDPROT) {
4420 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4421 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4422 	}
4423 #endif
4424 
4425 	GLDM_LOCK(macinfo, RW_WRITER);
4426 
4427 	if (gld->gld_mcast) {
4428 		for (i = 0; i < gld->gld_multicnt; i++) {
4429 			gld_mcast_t *mcast;
4430 
4431 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4432 				ASSERT(mcast->gldm_refcnt);
4433 				gld_send_disable_multi(macinfo, mcast);
4434 			}
4435 		}
4436 		kmem_free(gld->gld_mcast,
4437 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4438 		gld->gld_mcast = NULL;
4439 		gld->gld_multicnt = 0;
4440 	}
4441 
4442 	/* decide if we need to turn off any promiscuity */
4443 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4444 	    --mac_pvt->nprom == 0);
4445 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4446 	    --mac_pvt->nprom_multi == 0);
4447 
4448 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4449 
4450 	if (phys_off) {
4451 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4452 		    GLD_MAC_PROMISC_MULTI;
4453 	} else if (mult_off) {
4454 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4455 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4456 	}
4457 
4458 	if (op != GLD_MAC_PROMISC_NOOP)
4459 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4460 
4461 	GLDM_UNLOCK(macinfo);
4462 
4463 	if (phys_off)
4464 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4465 
4466 	/*
4467 	 * We need to hold both locks when modifying the mac stream list
4468 	 * to protect findminor as well as everyone else.
4469 	 */
4470 	mutex_enter(&glddev->gld_devlock);
4471 	GLDM_LOCK(macinfo, RW_WRITER);
4472 
4473 	/* disassociate this stream with its vlan and underlying mac */
4474 	gldremque(gld);
4475 
4476 	vlan = (gld_vlan_t *)gld->gld_vlan;
4477 	if (--vlan->gldv_nstreams == 0) {
4478 		gld_rem_vlan(vlan);
4479 		gld->gld_vlan = NULL;
4480 	}
4481 
4482 	gld->gld_mac_info = NULL;
4483 	gld->gld_state = DL_UNATTACHED;
4484 
4485 	/* cleanup mac layer if last vlan */
4486 	if (mac_pvt->nvlan == 0) {
4487 		gld_stop_mac(macinfo);
4488 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4489 	}
4490 
4491 	/* make sure no references to this gld for gld_v0_sched */
4492 	if (mac_pvt->last_sched == gld)
4493 		mac_pvt->last_sched = NULL;
4494 
4495 	GLDM_UNLOCK(macinfo);
4496 
4497 	/* put the stream on the unattached Style 2 list */
4498 	gldinsque(gld, glddev->gld_str_prev);
4499 
4500 	mutex_exit(&glddev->gld_devlock);
4501 
4502 	/* There will be no mp if we were called from close */
4503 	if (mp) {
4504 		dlokack(q, mp, DL_DETACH_REQ);
4505 	}
4506 	if (gld->gld_style == DL_STYLE2)
4507 		(void) qassociate(q, -1);
4508 	return (GLDE_OK);
4509 }
4510 
4511 /*
4512  * gld_enable_multi (q, mp)
4513  * Enables multicast address on the stream.  If the mac layer
4514  * isn't enabled for this address, enable at that level as well.
4515  */
4516 static int
4517 gld_enable_multi(queue_t *q, mblk_t *mp)
4518 {
4519 	gld_t  *gld = (gld_t *)q->q_ptr;
4520 	glddev_t *glddev;
4521 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4522 	unsigned char *maddr;
4523 	dl_enabmulti_req_t *multi;
4524 	gld_mcast_t *mcast;
4525 	int	i, rc;
4526 	gld_mac_pvt_t *mac_pvt;
4527 
4528 #ifdef GLD_DEBUG
4529 	if (gld_debug & GLDPROT) {
4530 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4531 		    (void *)mp);
4532 	}
4533 #endif
4534 
4535 	if (gld->gld_state == DL_UNATTACHED)
4536 		return (DL_OUTSTATE);
4537 
4538 	ASSERT(macinfo != NULL);
4539 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4540 
4541 	if (macinfo->gldm_set_multicast == NULL) {
4542 		return (DL_UNSUPPORTED);
4543 	}
4544 
4545 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4546 
4547 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4548 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4549 		return (DL_BADADDR);
4550 
4551 	/* request appears to be valid */
4552 
4553 	glddev = mac_pvt->major_dev;
4554 	ASSERT(glddev == gld->gld_device);
4555 
4556 	maddr = mp->b_rptr + multi->dl_addr_offset;
4557 
4558 	/*
4559 	 * The multicast addresses live in a per-device table, along
4560 	 * with a reference count.  Each stream has a table that
4561 	 * points to entries in the device table, with the reference
4562 	 * count reflecting the number of streams pointing at it.  If
4563 	 * this multicast address is already in the per-device table,
4564 	 * all we have to do is point at it.
4565 	 */
4566 	GLDM_LOCK(macinfo, RW_WRITER);
4567 
4568 	/* does this address appear in current table? */
4569 	if (gld->gld_mcast == NULL) {
4570 		/* no mcast addresses -- allocate table */
4571 		gld->gld_mcast = GETSTRUCT(gld_mcast_t *,
4572 					    glddev->gld_multisize);
4573 		if (gld->gld_mcast == NULL) {
4574 			GLDM_UNLOCK(macinfo);
4575 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4576 			return (GLDE_OK);
4577 		}
4578 		gld->gld_multicnt = glddev->gld_multisize;
4579 	} else {
4580 		for (i = 0; i < gld->gld_multicnt; i++) {
4581 			if (gld->gld_mcast[i] &&
4582 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4583 				maddr, macinfo->gldm_addrlen)) {
4584 				/* this is a match -- just succeed */
4585 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4586 				GLDM_UNLOCK(macinfo);
4587 				dlokack(q, mp, DL_ENABMULTI_REQ);
4588 				return (GLDE_OK);
4589 			}
4590 		}
4591 	}
4592 
4593 	/*
4594 	 * it wasn't in the stream so check to see if the mac layer has it
4595 	 */
4596 	mcast = NULL;
4597 	if (mac_pvt->mcast_table == NULL) {
4598 		mac_pvt->mcast_table = GETSTRUCT(gld_mcast_t,
4599 						glddev->gld_multisize);
4600 		if (mac_pvt->mcast_table == NULL) {
4601 			GLDM_UNLOCK(macinfo);
4602 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4603 			return (GLDE_OK);
4604 		}
4605 	} else {
4606 		for (i = 0; i < glddev->gld_multisize; i++) {
4607 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4608 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4609 			    maddr, macinfo->gldm_addrlen)) {
4610 				mcast = &mac_pvt->mcast_table[i];
4611 				break;
4612 			}
4613 		}
4614 	}
4615 	if (mcast == NULL) {
4616 		/* not in mac layer -- find an empty mac slot to fill in */
4617 		for (i = 0; i < glddev->gld_multisize; i++) {
4618 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4619 				mcast = &mac_pvt->mcast_table[i];
4620 				mac_copy(maddr, mcast->gldm_addr,
4621 				    macinfo->gldm_addrlen);
4622 				break;
4623 			}
4624 		}
4625 	}
4626 	if (mcast == NULL) {
4627 		/* couldn't get a mac layer slot */
4628 		GLDM_UNLOCK(macinfo);
4629 		return (DL_TOOMANY);
4630 	}
4631 
4632 	/* now we have a mac layer slot in mcast -- get a stream slot */
4633 	for (i = 0; i < gld->gld_multicnt; i++) {
4634 		if (gld->gld_mcast[i] != NULL)
4635 			continue;
4636 		/* found an empty slot */
4637 		if (!mcast->gldm_refcnt) {
4638 			/* set mcast in hardware */
4639 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4640 
4641 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4642 			cmac_copy(maddr, cmaddr,
4643 			    macinfo->gldm_addrlen, macinfo);
4644 
4645 			rc = (*macinfo->gldm_set_multicast)
4646 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4647 			if (rc == GLD_NOTSUPPORTED) {
4648 				GLDM_UNLOCK(macinfo);
4649 				return (DL_NOTSUPPORTED);
4650 			} else if (rc == GLD_NORESOURCES) {
4651 				GLDM_UNLOCK(macinfo);
4652 				return (DL_TOOMANY);
4653 			} else if (rc == GLD_BADARG) {
4654 				GLDM_UNLOCK(macinfo);
4655 				return (DL_BADADDR);
4656 			} else if (rc == GLD_RETRY) {
4657 				/*
4658 				 * The putbq and gld_xwait must be
4659 				 * within the lock to prevent races
4660 				 * with gld_sched.
4661 				 */
4662 				(void) putbq(q, mp);
4663 				gld->gld_xwait = B_TRUE;
4664 				GLDM_UNLOCK(macinfo);
4665 				return (GLDE_RETRY);
4666 			} else if (rc != GLD_SUCCESS) {
4667 				GLDM_UNLOCK(macinfo);
4668 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4669 				    DL_SYSERR, EIO);
4670 				return (GLDE_OK);
4671 			}
4672 		}
4673 		gld->gld_mcast[i] = mcast;
4674 		mcast->gldm_refcnt++;
4675 		GLDM_UNLOCK(macinfo);
4676 		dlokack(q, mp, DL_ENABMULTI_REQ);
4677 		return (GLDE_OK);
4678 	}
4679 
4680 	/* couldn't get a stream slot */
4681 	GLDM_UNLOCK(macinfo);
4682 	return (DL_TOOMANY);
4683 }
4684 
4685 
4686 /*
4687  * gld_disable_multi (q, mp)
4688  * Disable the multicast address on the stream.  If last
4689  * reference for the mac layer, disable there as well.
4690  */
4691 static int
4692 gld_disable_multi(queue_t *q, mblk_t *mp)
4693 {
4694 	gld_t  *gld;
4695 	gld_mac_info_t *macinfo;
4696 	unsigned char *maddr;
4697 	dl_disabmulti_req_t *multi;
4698 	int i;
4699 	gld_mcast_t *mcast;
4700 
4701 #ifdef GLD_DEBUG
4702 	if (gld_debug & GLDPROT) {
4703 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
4704 		    (void *)mp);
4705 	}
4706 #endif
4707 
4708 	gld = (gld_t *)q->q_ptr;
4709 	if (gld->gld_state == DL_UNATTACHED)
4710 		return (DL_OUTSTATE);
4711 
4712 	macinfo = gld->gld_mac_info;
4713 	ASSERT(macinfo != NULL);
4714 	if (macinfo->gldm_set_multicast == NULL) {
4715 		return (DL_UNSUPPORTED);
4716 	}
4717 
4718 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
4719 
4720 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4721 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4722 		return (DL_BADADDR);
4723 
4724 	maddr = mp->b_rptr + multi->dl_addr_offset;
4725 
4726 	/* request appears to be valid */
4727 	/* does this address appear in current table? */
4728 	GLDM_LOCK(macinfo, RW_WRITER);
4729 	if (gld->gld_mcast != NULL) {
4730 		for (i = 0; i < gld->gld_multicnt; i++)
4731 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
4732 			    mac_eq(mcast->gldm_addr,
4733 			    maddr, macinfo->gldm_addrlen)) {
4734 				ASSERT(mcast->gldm_refcnt);
4735 				gld_send_disable_multi(macinfo, mcast);
4736 				gld->gld_mcast[i] = NULL;
4737 				GLDM_UNLOCK(macinfo);
4738 				dlokack(q, mp, DL_DISABMULTI_REQ);
4739 				return (GLDE_OK);
4740 			}
4741 	}
4742 	GLDM_UNLOCK(macinfo);
4743 	return (DL_NOTENAB); /* not an enabled address */
4744 }
4745 
4746 /*
4747  * gld_send_disable_multi(macinfo, mcast)
4748  * this function is used to disable a multicast address if the reference
4749  * count goes to zero. The disable request will then be forwarded to the
4750  * lower stream.
4751  */
4752 static void
4753 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
4754 {
4755 	ASSERT(macinfo != NULL);
4756 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
4757 	ASSERT(mcast != NULL);
4758 	ASSERT(mcast->gldm_refcnt);
4759 
4760 	if (!mcast->gldm_refcnt) {
4761 		return;			/* "cannot happen" */
4762 	}
4763 
4764 	if (--mcast->gldm_refcnt > 0) {
4765 		return;
4766 	}
4767 
4768 	/*
4769 	 * This must be converted from canonical form to device form.
4770 	 * The refcnt is now zero so we can trash the data.
4771 	 */
4772 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
4773 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
4774 
4775 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
4776 	(void) (*macinfo->gldm_set_multicast)
4777 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
4778 }
4779 
4780 /*
4781  * gld_promisc (q, mp, req, on)
4782  *	enable or disable the use of promiscuous mode with the hardware
4783  */
4784 static int
4785 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
4786 {
4787 	gld_t *gld;
4788 	gld_mac_info_t *macinfo;
4789 	gld_mac_pvt_t *mac_pvt;
4790 	gld_vlan_t *vlan;
4791 	union DL_primitives *prim;
4792 	int macrc = GLD_SUCCESS;
4793 	int dlerr = GLDE_OK;
4794 	int op = GLD_MAC_PROMISC_NOOP;
4795 
4796 #ifdef GLD_DEBUG
4797 	if (gld_debug & GLDTRACE)
4798 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
4799 		    (void *)q, (void *)mp, req, on);
4800 #endif
4801 
4802 	ASSERT(mp != NULL);
4803 	prim = (union DL_primitives *)mp->b_rptr;
4804 
4805 	/* XXX I think spec allows promisc in unattached state */
4806 	gld = (gld_t *)q->q_ptr;
4807 	if (gld->gld_state == DL_UNATTACHED)
4808 		return (DL_OUTSTATE);
4809 
4810 	macinfo = gld->gld_mac_info;
4811 	ASSERT(macinfo != NULL);
4812 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4813 
4814 	vlan = (gld_vlan_t *)gld->gld_vlan;
4815 	ASSERT(vlan != NULL);
4816 
4817 	GLDM_LOCK(macinfo, RW_WRITER);
4818 
4819 	/*
4820 	 * Work out what request (if any) has to be made to the MAC layer
4821 	 */
4822 	if (on) {
4823 		switch (prim->promiscon_req.dl_level) {
4824 		default:
4825 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4826 			break;
4827 
4828 		case DL_PROMISC_PHYS:
4829 			if (mac_pvt->nprom == 0)
4830 				op = GLD_MAC_PROMISC_PHYS;
4831 			break;
4832 
4833 		case DL_PROMISC_MULTI:
4834 			if (mac_pvt->nprom_multi == 0)
4835 				if (mac_pvt->nprom == 0)
4836 					op = GLD_MAC_PROMISC_MULTI;
4837 			break;
4838 
4839 		case DL_PROMISC_SAP:
4840 			/* We can do this without reference to the MAC */
4841 			break;
4842 		}
4843 	} else {
4844 		switch (prim->promiscoff_req.dl_level) {
4845 		default:
4846 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4847 			break;
4848 
4849 		case DL_PROMISC_PHYS:
4850 			if (!(gld->gld_flags & GLD_PROM_PHYS))
4851 				dlerr = DL_NOTENAB;
4852 			else if (mac_pvt->nprom == 1)
4853 				if (mac_pvt->nprom_multi)
4854 					op = GLD_MAC_PROMISC_MULTI;
4855 				else
4856 					op = GLD_MAC_PROMISC_NONE;
4857 			break;
4858 
4859 		case DL_PROMISC_MULTI:
4860 			if (!(gld->gld_flags & GLD_PROM_MULT))
4861 				dlerr = DL_NOTENAB;
4862 			else if (mac_pvt->nprom_multi == 1)
4863 				if (mac_pvt->nprom == 0)
4864 					op = GLD_MAC_PROMISC_NONE;
4865 			break;
4866 
4867 		case DL_PROMISC_SAP:
4868 			if (!(gld->gld_flags & GLD_PROM_SAP))
4869 				dlerr = DL_NOTENAB;
4870 
4871 			/* We can do this without reference to the MAC */
4872 			break;
4873 		}
4874 	}
4875 
4876 	/*
4877 	 * The request was invalid in some way so no need to continue.
4878 	 */
4879 	if (dlerr != GLDE_OK) {
4880 		GLDM_UNLOCK(macinfo);
4881 		return (dlerr);
4882 	}
4883 
4884 	/*
4885 	 * Issue the request to the MAC layer, if required
4886 	 */
4887 	if (op != GLD_MAC_PROMISC_NOOP) {
4888 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
4889 	}
4890 
4891 	/*
4892 	 * On success, update the appropriate flags & refcounts
4893 	 */
4894 	if (macrc == GLD_SUCCESS) {
4895 		if (on) {
4896 			switch (prim->promiscon_req.dl_level) {
4897 			case DL_PROMISC_PHYS:
4898 				mac_pvt->nprom++;
4899 				gld->gld_flags |= GLD_PROM_PHYS;
4900 				break;
4901 
4902 			case DL_PROMISC_MULTI:
4903 				mac_pvt->nprom_multi++;
4904 				gld->gld_flags |= GLD_PROM_MULT;
4905 				break;
4906 
4907 			case DL_PROMISC_SAP:
4908 				gld->gld_flags |= GLD_PROM_SAP;
4909 				break;
4910 
4911 			default:
4912 				break;
4913 			}
4914 		} else {
4915 			switch (prim->promiscoff_req.dl_level) {
4916 			case DL_PROMISC_PHYS:
4917 				mac_pvt->nprom--;
4918 				gld->gld_flags &= ~GLD_PROM_PHYS;
4919 				break;
4920 
4921 			case DL_PROMISC_MULTI:
4922 				mac_pvt->nprom_multi--;
4923 				gld->gld_flags &= ~GLD_PROM_MULT;
4924 				break;
4925 
4926 			case DL_PROMISC_SAP:
4927 				gld->gld_flags &= ~GLD_PROM_SAP;
4928 				break;
4929 
4930 			default:
4931 				break;
4932 			}
4933 		}
4934 	} else if (macrc == GLD_RETRY) {
4935 		/*
4936 		 * The putbq and gld_xwait must be within the lock to
4937 		 * prevent races with gld_sched.
4938 		 */
4939 		(void) putbq(q, mp);
4940 		gld->gld_xwait = B_TRUE;
4941 	}
4942 
4943 	/*
4944 	 * Update VLAN IPQ status -- it may have changed
4945 	 */
4946 	if (gld->gld_flags & (GLD_PROM_SAP | GLD_PROM_MULT | GLD_PROM_PHYS))
4947 		vlan->gldv_ipq_flags |= IPQ_FORBIDDEN;
4948 	else
4949 		vlan->gldv_ipq_flags &= ~IPQ_FORBIDDEN;
4950 
4951 	GLDM_UNLOCK(macinfo);
4952 
4953 	/*
4954 	 * Finally, decide how to reply.
4955 	 *
4956 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
4957 	 * layer but failed.  In such cases, we can return a DL_* error
4958 	 * code and let the caller send an error-ack reply upstream, or
4959 	 * we can send a reply here and then return GLDE_OK so that the
4960 	 * caller doesn't also respond.
4961 	 *
4962 	 * If physical-promiscuous mode was (successfully) switched on or
4963 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
4964 	 */
4965 	switch (macrc) {
4966 	case GLD_NOTSUPPORTED:
4967 		return (DL_NOTSUPPORTED);
4968 
4969 	case GLD_NORESOURCES:
4970 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
4971 		return (GLDE_OK);
4972 
4973 	case GLD_RETRY:
4974 		return (GLDE_RETRY);
4975 
4976 	default:
4977 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
4978 		return (GLDE_OK);
4979 
4980 	case GLD_SUCCESS:
4981 		dlokack(q, mp, req);
4982 		break;
4983 	}
4984 
4985 	switch (op) {
4986 	case GLD_MAC_PROMISC_NOOP:
4987 		break;
4988 
4989 	case GLD_MAC_PROMISC_PHYS:
4990 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
4991 		break;
4992 
4993 	default:
4994 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4995 		break;
4996 	}
4997 
4998 	return (GLDE_OK);
4999 }
5000 
5001 /*
5002  * gld_physaddr()
5003  *	get the current or factory physical address value
5004  */
5005 static int
5006 gld_physaddr(queue_t *q, mblk_t *mp)
5007 {
5008 	gld_t *gld = (gld_t *)q->q_ptr;
5009 	gld_mac_info_t *macinfo;
5010 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5011 	unsigned char addr[GLD_MAX_ADDRLEN];
5012 
5013 	if (gld->gld_state == DL_UNATTACHED)
5014 		return (DL_OUTSTATE);
5015 
5016 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5017 	ASSERT(macinfo != NULL);
5018 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5019 
5020 	switch (prim->physaddr_req.dl_addr_type) {
5021 	case DL_FACT_PHYS_ADDR:
5022 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5023 		    (caddr_t)addr, macinfo->gldm_addrlen);
5024 		break;
5025 	case DL_CURR_PHYS_ADDR:
5026 		/* make a copy so we don't hold the lock across qreply */
5027 		GLDM_LOCK(macinfo, RW_WRITER);
5028 		mac_copy((caddr_t)
5029 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5030 		    (caddr_t)addr, macinfo->gldm_addrlen);
5031 		GLDM_UNLOCK(macinfo);
5032 		break;
5033 	default:
5034 		return (DL_BADPRIM);
5035 	}
5036 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5037 	return (GLDE_OK);
5038 }
5039 
5040 /*
5041  * gld_setaddr()
5042  *	change the hardware's physical address to a user specified value
5043  */
5044 static int
5045 gld_setaddr(queue_t *q, mblk_t *mp)
5046 {
5047 	gld_t *gld = (gld_t *)q->q_ptr;
5048 	gld_mac_info_t *macinfo;
5049 	gld_mac_pvt_t *mac_pvt;
5050 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5051 	unsigned char *addr;
5052 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5053 	int rc;
5054 	gld_vlan_t *vlan;
5055 
5056 	if (gld->gld_state == DL_UNATTACHED)
5057 		return (DL_OUTSTATE);
5058 
5059 	vlan = (gld_vlan_t *)gld->gld_vlan;
5060 	ASSERT(vlan != NULL);
5061 
5062 	if (vlan->gldv_id != VLAN_VID_NONE)
5063 		return (DL_NOTSUPPORTED);
5064 
5065 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5066 	ASSERT(macinfo != NULL);
5067 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5068 
5069 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5070 	    prim->set_physaddr_req.dl_addr_length) ||
5071 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5072 		return (DL_BADADDR);
5073 
5074 	GLDM_LOCK(macinfo, RW_WRITER);
5075 
5076 	/* now do the set at the hardware level */
5077 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5078 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5079 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5080 
5081 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5082 	if (rc == GLD_SUCCESS)
5083 		mac_copy(addr, mac_pvt->curr_macaddr,
5084 		    macinfo->gldm_addrlen);
5085 
5086 	GLDM_UNLOCK(macinfo);
5087 
5088 	switch (rc) {
5089 	case GLD_SUCCESS:
5090 		break;
5091 	case GLD_NOTSUPPORTED:
5092 		return (DL_NOTSUPPORTED);
5093 	case GLD_BADARG:
5094 		return (DL_BADADDR);
5095 	case GLD_NORESOURCES:
5096 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5097 		return (GLDE_OK);
5098 	default:
5099 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5100 		return (GLDE_OK);
5101 	}
5102 
5103 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5104 
5105 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5106 	return (GLDE_OK);
5107 }
5108 
5109 int
5110 gld_get_statistics(queue_t *q, mblk_t *mp)
5111 {
5112 	dl_get_statistics_ack_t *dlsp;
5113 	gld_t  *gld = (gld_t *)q->q_ptr;
5114 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5115 	gld_mac_pvt_t *mac_pvt;
5116 
5117 	if (gld->gld_state == DL_UNATTACHED)
5118 		return (DL_OUTSTATE);
5119 
5120 	ASSERT(macinfo != NULL);
5121 
5122 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5123 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5124 
5125 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5126 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5127 
5128 	if (mp == NULL)
5129 		return (GLDE_OK);	/* mexchange already sent merror */
5130 
5131 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5132 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5133 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5134 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5135 
5136 	GLDM_LOCK(macinfo, RW_WRITER);
5137 	bcopy(mac_pvt->kstatp->ks_data,
5138 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5139 	    sizeof (struct gldkstats));
5140 	GLDM_UNLOCK(macinfo);
5141 
5142 	qreply(q, mp);
5143 	return (GLDE_OK);
5144 }
5145 
5146 /* =================================================== */
5147 /* misc utilities, some requiring various mutexes held */
5148 /* =================================================== */
5149 
5150 /*
5151  * Initialize and start the driver.
5152  */
5153 static int
5154 gld_start_mac(gld_mac_info_t *macinfo)
5155 {
5156 	int	rc;
5157 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5158 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5159 
5160 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5161 	ASSERT(!mac_pvt->started);
5162 
5163 	rc = (*macinfo->gldm_reset)(macinfo);
5164 	if (rc != GLD_SUCCESS)
5165 		return (GLD_FAILURE);
5166 
5167 	/* set the addr after we reset the device */
5168 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5169 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5170 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5171 
5172 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5173 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5174 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5175 		return (GLD_FAILURE);
5176 
5177 	rc = (*macinfo->gldm_start)(macinfo);
5178 	if (rc != GLD_SUCCESS)
5179 		return (GLD_FAILURE);
5180 
5181 	mac_pvt->started = B_TRUE;
5182 	return (GLD_SUCCESS);
5183 }
5184 
5185 /*
5186  * Stop the driver.
5187  */
5188 static void
5189 gld_stop_mac(gld_mac_info_t *macinfo)
5190 {
5191 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5192 
5193 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5194 	ASSERT(mac_pvt->started);
5195 
5196 	(void) (*macinfo->gldm_stop)(macinfo);
5197 
5198 	mac_pvt->started = B_FALSE;
5199 }
5200 
5201 
5202 /*
5203  * gld_set_ipq will set a pointer to the queue which is bound to the
5204  * IP sap if:
5205  * o the device type is ethernet or IPoIB.
5206  * o there is no stream in SAP promiscuous mode.
5207  * o there is exactly one stream bound to the IP sap.
5208  * o the stream is in "fastpath" mode.
5209  */
5210 static void
5211 gld_set_ipq(gld_t *gld)
5212 {
5213 	gld_vlan_t	*vlan;
5214 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5215 	gld_t		*ip_gld = NULL;
5216 	uint_t		ipq_candidates = 0;
5217 	gld_t		*ipv6_gld = NULL;
5218 	uint_t		ipv6q_candidates = 0;
5219 
5220 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5221 
5222 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5223 	if (((macinfo->gldm_type != DL_ETHER) &&
5224 	    (macinfo->gldm_type != DL_IB)) ||
5225 	    (gld_global_options & GLD_OPT_NO_IPQ))
5226 		return;
5227 
5228 	vlan = (gld_vlan_t *)gld->gld_vlan;
5229 	ASSERT(vlan != NULL);
5230 
5231 	/* clear down any previously defined ipqs */
5232 	vlan->gldv_ipq = NULL;
5233 	vlan->gldv_ipv6q = NULL;
5234 
5235 	/* Try to find a single stream eligible to receive IP packets */
5236 	for (gld = vlan->gldv_str_next;
5237 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5238 		if (gld->gld_state != DL_IDLE)
5239 			continue;	/* not eligible to receive */
5240 		if (gld->gld_flags & GLD_STR_CLOSING)
5241 			continue;	/* not eligible to receive */
5242 
5243 		if (gld->gld_sap == ETHERTYPE_IP) {
5244 			ip_gld = gld;
5245 			ipq_candidates++;
5246 		}
5247 
5248 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5249 			ipv6_gld = gld;
5250 			ipv6q_candidates++;
5251 		}
5252 	}
5253 
5254 	if (ipq_candidates == 1) {
5255 		ASSERT(ip_gld != NULL);
5256 
5257 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5258 			vlan->gldv_ipq = ip_gld->gld_qptr;
5259 	}
5260 
5261 	if (ipv6q_candidates == 1) {
5262 		ASSERT(ipv6_gld != NULL);
5263 
5264 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5265 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5266 	}
5267 }
5268 
5269 /*
5270  * gld_flushqueue (q)
5271  *	used by DLPI primitives that require flushing the queues.
5272  *	essentially, this is DL_UNBIND_REQ.
5273  */
5274 static void
5275 gld_flushqueue(queue_t *q)
5276 {
5277 	/* flush all data in both queues */
5278 	/* XXX Should these be FLUSHALL? */
5279 	flushq(q, FLUSHDATA);
5280 	flushq(WR(q), FLUSHDATA);
5281 	/* flush all the queues upstream */
5282 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5283 }
5284 
5285 /*
5286  * gld_devlookup (major)
5287  * search the device table for the device with specified
5288  * major number and return a pointer to it if it exists
5289  */
5290 static glddev_t *
5291 gld_devlookup(int major)
5292 {
5293 	struct glddevice *dev;
5294 
5295 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5296 
5297 	for (dev = gld_device_list.gld_next;
5298 	    dev != &gld_device_list;
5299 	    dev = dev->gld_next) {
5300 		ASSERT(dev);
5301 		if (dev->gld_major == major)
5302 			return (dev);
5303 	}
5304 	return (NULL);
5305 }
5306 
5307 /*
5308  * gld_findminor(device)
5309  * Returns a minor number currently unused by any stream in the current
5310  * device class (major) list.
5311  */
5312 static int
5313 gld_findminor(glddev_t *device)
5314 {
5315 	gld_t		*next;
5316 	gld_mac_info_t	*nextmac;
5317 	gld_vlan_t	*nextvlan;
5318 	int		minor;
5319 	int		i;
5320 
5321 	ASSERT(mutex_owned(&device->gld_devlock));
5322 
5323 	/* The fast way */
5324 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5325 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5326 		return (device->gld_nextminor++);
5327 
5328 	/* The steady way */
5329 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5330 	    minor++) {
5331 		/* Search all unattached streams */
5332 		for (next = device->gld_str_next;
5333 		    next != (gld_t *)&device->gld_str_next;
5334 		    next = next->gld_next) {
5335 			if (minor == next->gld_minor)
5336 				goto nextminor;
5337 		}
5338 		/* Search all attached streams; we don't need maclock because */
5339 		/* mac stream list is protected by devlock as well as maclock */
5340 		for (nextmac = device->gld_mac_next;
5341 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5342 		    nextmac = nextmac->gldm_next) {
5343 			gld_mac_pvt_t *pvt =
5344 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5345 
5346 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5347 				continue;	/* this one's not ready yet */
5348 
5349 			for (i = 0; i < VLAN_HASHSZ; i++) {
5350 				for (nextvlan = pvt->vlan_hash[i];
5351 				    nextvlan != NULL;
5352 				    nextvlan = nextvlan->gldv_next) {
5353 					for (next = nextvlan->gldv_str_next;
5354 					    next !=
5355 					    (gld_t *)&nextvlan->gldv_str_next;
5356 					    next = next->gld_next) {
5357 						if (minor == next->gld_minor)
5358 							goto nextminor;
5359 					}
5360 				}
5361 			}
5362 		}
5363 
5364 		return (minor);
5365 nextminor:
5366 		/* don't need to do anything */
5367 		;
5368 	}
5369 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5370 		device->gld_name);
5371 	return (0);
5372 }
5373 
5374 /*
5375  * version of insque/remque for use by this driver
5376  */
5377 struct qelem {
5378 	struct qelem *q_forw;
5379 	struct qelem *q_back;
5380 	/* rest of structure */
5381 };
5382 
5383 static void
5384 gldinsque(void *elem, void *pred)
5385 {
5386 	struct qelem *pelem = elem;
5387 	struct qelem *ppred = pred;
5388 	struct qelem *pnext = ppred->q_forw;
5389 
5390 	pelem->q_forw = pnext;
5391 	pelem->q_back = ppred;
5392 	ppred->q_forw = pelem;
5393 	pnext->q_back = pelem;
5394 }
5395 
5396 static void
5397 gldremque(void *arg)
5398 {
5399 	struct qelem *pelem = arg;
5400 	struct qelem *elem = arg;
5401 
5402 	pelem->q_forw->q_back = pelem->q_back;
5403 	pelem->q_back->q_forw = pelem->q_forw;
5404 	elem->q_back = elem->q_forw = NULL;
5405 }
5406 
5407 static gld_vlan_t *
5408 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5409 {
5410 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5411 	gld_vlan_t	**pp;
5412 	gld_vlan_t	*p;
5413 
5414 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5415 	while ((p = *pp) != NULL) {
5416 		ASSERT(p->gldv_id != vid);
5417 		pp = &(p->gldv_next);
5418 	}
5419 
5420 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5421 		return (NULL);
5422 
5423 	p->gldv_mac = macinfo;
5424 	p->gldv_id = vid;
5425 
5426 	if (vid == VLAN_VID_NONE) {
5427 		p->gldv_ptag = VLAN_VTAG_NONE;
5428 		p->gldv_stats = mac_pvt->statistics;
5429 		p->gldv_kstatp = NULL;
5430 	} else {
5431 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5432 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5433 		    KM_SLEEP);
5434 
5435 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5436 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5437 			kmem_free(p, sizeof (gld_vlan_t));
5438 			return (NULL);
5439 		}
5440 	}
5441 
5442 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5443 	mac_pvt->nvlan++;
5444 	*pp = p;
5445 
5446 	return (p);
5447 }
5448 
5449 static void
5450 gld_rem_vlan(gld_vlan_t *vlan)
5451 {
5452 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5453 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5454 	gld_vlan_t	**pp;
5455 	gld_vlan_t	*p;
5456 
5457 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5458 	while ((p = *pp) != NULL) {
5459 		if (p->gldv_id == vlan->gldv_id)
5460 			break;
5461 		pp = &(p->gldv_next);
5462 	}
5463 	ASSERT(p != NULL);
5464 
5465 	*pp = p->gldv_next;
5466 	mac_pvt->nvlan--;
5467 	if (p->gldv_id != VLAN_VID_NONE) {
5468 		ASSERT(p->gldv_kstatp != NULL);
5469 		kstat_delete(p->gldv_kstatp);
5470 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5471 	}
5472 	kmem_free(p, sizeof (gld_vlan_t));
5473 }
5474 
5475 gld_vlan_t *
5476 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5477 {
5478 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5479 	gld_vlan_t	*p;
5480 
5481 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5482 	while (p != NULL) {
5483 		if (p->gldv_id == vid)
5484 			return (p);
5485 		p = p->gldv_next;
5486 	}
5487 	return (NULL);
5488 }
5489 
5490 gld_vlan_t *
5491 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5492 {
5493 	gld_vlan_t	*vlan;
5494 
5495 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5496 		vlan = gld_add_vlan(macinfo, vid);
5497 
5498 	return (vlan);
5499 }
5500 
5501 /*
5502  * gld_bitrevcopy()
5503  * This is essentially bcopy, with the ability to bit reverse the
5504  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5505  * interfaces are bit reversed.
5506  */
5507 void
5508 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5509 {
5510 	while (n--)
5511 		*target++ = bit_rev[(uchar_t)*src++];
5512 }
5513 
5514 /*
5515  * gld_bitreverse()
5516  * Convert the bit order by swaping all the bits, using a
5517  * lookup table.
5518  */
5519 void
5520 gld_bitreverse(uchar_t *rptr, size_t n)
5521 {
5522 	while (n--) {
5523 		*rptr = bit_rev[*rptr];
5524 		rptr++;
5525 	}
5526 }
5527 
5528 char *
5529 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5530 {
5531 	int i;
5532 	char *cp = etherbuf;
5533 	static char digits[] = "0123456789abcdef";
5534 
5535 	for (i = 0; i < len; i++) {
5536 		*cp++ = digits[*ap >> 4];
5537 		*cp++ = digits[*ap++ & 0xf];
5538 		*cp++ = ':';
5539 	}
5540 	*--cp = 0;
5541 	return (etherbuf);
5542 }
5543 
5544 #ifdef GLD_DEBUG
5545 static void
5546 gld_check_assertions()
5547 {
5548 	glddev_t	*dev;
5549 	gld_mac_info_t	*mac;
5550 	gld_t		*str;
5551 	gld_vlan_t	*vlan;
5552 	int		i;
5553 
5554 	mutex_enter(&gld_device_list.gld_devlock);
5555 
5556 	for (dev = gld_device_list.gld_next;
5557 	    dev != (glddev_t *)&gld_device_list.gld_next;
5558 	    dev = dev->gld_next) {
5559 		mutex_enter(&dev->gld_devlock);
5560 		ASSERT(dev->gld_broadcast != NULL);
5561 		for (str = dev->gld_str_next;
5562 		    str != (gld_t *)&dev->gld_str_next;
5563 		    str = str->gld_next) {
5564 			ASSERT(str->gld_device == dev);
5565 			ASSERT(str->gld_mac_info == NULL);
5566 			ASSERT(str->gld_qptr != NULL);
5567 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5568 			ASSERT(str->gld_multicnt == 0);
5569 			ASSERT(str->gld_mcast == NULL);
5570 			ASSERT(!(str->gld_flags &
5571 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5572 			ASSERT(str->gld_sap == 0);
5573 			ASSERT(str->gld_state == DL_UNATTACHED);
5574 		}
5575 		for (mac = dev->gld_mac_next;
5576 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5577 		    mac = mac->gldm_next) {
5578 			int nvlan = 0;
5579 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5580 
5581 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5582 				continue;	/* this one's not ready yet */
5583 
5584 			GLDM_LOCK(mac, RW_WRITER);
5585 			ASSERT(mac->gldm_devinfo != NULL);
5586 			ASSERT(mac->gldm_mac_pvt != NULL);
5587 			ASSERT(pvt->interfacep != NULL);
5588 			ASSERT(pvt->kstatp != NULL);
5589 			ASSERT(pvt->statistics != NULL);
5590 			ASSERT(pvt->major_dev == dev);
5591 
5592 			for (i = 0; i < VLAN_HASHSZ; i++) {
5593 				for (vlan = pvt->vlan_hash[i];
5594 				    vlan != NULL; vlan = vlan->gldv_next) {
5595 					int nstr = 0;
5596 
5597 					ASSERT(vlan->gldv_mac == mac);
5598 
5599 					for (str = vlan->gldv_str_next;
5600 					    str !=
5601 					    (gld_t *)&vlan->gldv_str_next;
5602 					    str = str->gld_next) {
5603 						ASSERT(str->gld_device == dev);
5604 						ASSERT(str->gld_mac_info ==
5605 						    mac);
5606 						ASSERT(str->gld_qptr != NULL);
5607 						ASSERT(str->gld_minor >=
5608 						    GLD_MIN_CLONE_MINOR);
5609 						ASSERT(
5610 						    str->gld_multicnt == 0 ||
5611 						    str->gld_mcast);
5612 						nstr++;
5613 					}
5614 					ASSERT(vlan->gldv_nstreams == nstr);
5615 					nvlan++;
5616 				}
5617 			}
5618 			ASSERT(pvt->nvlan == nvlan);
5619 			GLDM_UNLOCK(mac);
5620 		}
5621 		mutex_exit(&dev->gld_devlock);
5622 	}
5623 	mutex_exit(&gld_device_list.gld_devlock);
5624 }
5625 #endif
5626