xref: /illumos-gate/usr/src/uts/common/io/gld.c (revision 622200ad88c6c6382403a01985a94e22484baac6)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * gld - Generic LAN Driver Version 2, PSARC/1997/382
30  *
31  * This is a utility module that provides generic facilities for
32  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
33  * are handled here.
34  *
35  * It no longer provides compatibility with drivers
36  * implemented according to the GLD v0 documentation published
37  * in 1993. (See PSARC 2003/728)
38  */
39 
40 
41 #include <sys/types.h>
42 #include <sys/errno.h>
43 #include <sys/stropts.h>
44 #include <sys/stream.h>
45 #include <sys/kmem.h>
46 #include <sys/stat.h>
47 #include <sys/modctl.h>
48 #include <sys/kstat.h>
49 #include <sys/debug.h>
50 #include <sys/note.h>
51 #include <sys/sysmacros.h>
52 
53 #include <sys/byteorder.h>
54 #include <sys/strsun.h>
55 #include <sys/strsubr.h>
56 #include <sys/dlpi.h>
57 #include <sys/pattr.h>
58 #include <sys/ethernet.h>
59 #include <sys/ib/clients/ibd/ibd.h>
60 #include <sys/policy.h>
61 #include <sys/atomic.h>
62 
63 #include <sys/multidata.h>
64 #include <sys/gld.h>
65 #include <sys/gldpriv.h>
66 
67 #include <sys/ddi.h>
68 #include <sys/sunddi.h>
69 
70 /*
71  * Macro to atomically increment counters of type uint32_t, uint64_t
72  * and ulong_t.
73  */
74 #define	BUMP(stat, delta)	do {				\
75 	_NOTE(CONSTANTCONDITION)				\
76 	if (sizeof (stat) == sizeof (uint32_t))	{		\
77 		atomic_add_32((uint32_t *)&stat, delta);	\
78 	_NOTE(CONSTANTCONDITION)				\
79 	} else if (sizeof (stat) == sizeof (uint64_t)) {	\
80 		atomic_add_64((uint64_t *)&stat, delta);	\
81 	}							\
82 	_NOTE(CONSTANTCONDITION)				\
83 } while (0)
84 
85 #define	UPDATE_STATS(vlan, pktinfo, number)	{		\
86 	if ((pktinfo).isBroadcast)				\
87 		(vlan)->gldv_stats->glds_brdcstxmt += (number);	\
88 	else if ((pktinfo).isMulticast)				\
89 		(vlan)->gldv_stats->glds_multixmt += (number);	\
90 	(vlan)->gldv_stats->glds_bytexmt64 += (pktinfo).pktLen;	\
91 	(vlan)->gldv_stats->glds_pktxmt64 += (number);		\
92 }
93 
94 #ifdef GLD_DEBUG
95 int gld_debug = GLDERRS;
96 #endif
97 
98 /* called from gld_register */
99 static int gld_initstats(gld_mac_info_t *);
100 
101 /* called from kstat mechanism, and from wsrv's get_statistics */
102 static int gld_update_kstat(kstat_t *, int);
103 
104 /* statistics for additional vlans */
105 static int gld_init_vlan_stats(gld_vlan_t *);
106 static int gld_update_vlan_kstat(kstat_t *, int);
107 
108 /* called from gld_getinfo */
109 static dev_info_t *gld_finddevinfo(dev_t);
110 
111 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
112 /* also from the source routing stuff for sending RDE protocol packets */
113 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
114 static int gld_start_mdt(queue_t *, mblk_t *, int);
115 
116 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
117 static void gld_precv(gld_mac_info_t *, gld_vlan_t *, mblk_t *);
118 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
119     pdesc_t *, pktinfo_t *);
120 
121 /* receive group: called from gld_recv and gld_precv* with maclock held */
122 static void gld_sendup(gld_mac_info_t *, gld_vlan_t *, pktinfo_t *, mblk_t *,
123     int (*)());
124 static int gld_accept(gld_t *, pktinfo_t *);
125 static int gld_mcmatch(gld_t *, pktinfo_t *);
126 static int gld_multicast(unsigned char *, gld_t *);
127 static int gld_paccept(gld_t *, pktinfo_t *);
128 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
129     void (*)(queue_t *, mblk_t *));
130 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *);
131 
132 /* wsrv group: called from wsrv, single threaded per queue */
133 static int gld_ioctl(queue_t *, mblk_t *);
134 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
135 static int gld_cmds(queue_t *, mblk_t *);
136 static mblk_t *gld_bindack(queue_t *, mblk_t *);
137 static int gld_notify_req(queue_t *, mblk_t *);
138 static int gld_udqos(queue_t *, mblk_t *);
139 static int gld_bind(queue_t *, mblk_t *);
140 static int gld_unbind(queue_t *, mblk_t *);
141 static int gld_inforeq(queue_t *, mblk_t *);
142 static int gld_unitdata(queue_t *, mblk_t *);
143 static int gldattach(queue_t *, mblk_t *);
144 static int gldunattach(queue_t *, mblk_t *);
145 static int gld_enable_multi(queue_t *, mblk_t *);
146 static int gld_disable_multi(queue_t *, mblk_t *);
147 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
148 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
149 static int gld_physaddr(queue_t *, mblk_t *);
150 static int gld_setaddr(queue_t *, mblk_t *);
151 static int gld_get_statistics(queue_t *, mblk_t *);
152 static int gld_cap(queue_t *, mblk_t *);
153 static int gld_cap_ack(queue_t *, mblk_t *);
154 static int gld_cap_enable(queue_t *, mblk_t *);
155 
156 /* misc utilities, some requiring various mutexes held */
157 static int gld_start_mac(gld_mac_info_t *);
158 static void gld_stop_mac(gld_mac_info_t *);
159 static void gld_set_ipq(gld_t *);
160 static void gld_flushqueue(queue_t *);
161 static glddev_t *gld_devlookup(int);
162 static int gld_findminor(glddev_t *);
163 static void gldinsque(void *, void *);
164 static void gldremque(void *);
165 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
166 void gld_bitreverse(uchar_t *, size_t);
167 char *gld_macaddr_sprintf(char *, unsigned char *, int);
168 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
169 static void gld_rem_vlan(gld_vlan_t *);
170 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
171 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
172 
173 #ifdef GLD_DEBUG
174 static void gld_check_assertions(void);
175 extern void gld_sr_dump(gld_mac_info_t *);
176 #endif
177 
178 /*
179  * Allocate and zero-out "number" structures each of type "structure" in
180  * kernel memory.
181  */
182 #define	GETSTRUCT(structure, number)   \
183 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
184 
185 #define	abs(a) ((a) < 0 ? -(a) : a)
186 
187 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
188 
189 /*
190  * VLANs are only supported on ethernet devices that manipulate VLAN headers
191  * themselves.
192  */
193 #define	VLAN_CAPABLE(macinfo) \
194 	((macinfo)->gldm_type == DL_ETHER && \
195 	(macinfo)->gldm_send_tagged != NULL)
196 
197 /*
198  * The set of notifications generatable by GLD itself, the additional
199  * set that can be generated if the MAC driver provide the link-state
200  * tracking callback capability, and the set supported by the GLD
201  * notification code below.
202  *
203  * PLEASE keep these in sync with what the code actually does!
204  */
205 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
206 						DL_NOTE_PROMISC_OFF_PHYS |
207 						DL_NOTE_PHYS_ADDR;
208 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
209 						DL_NOTE_LINK_UP |
210 						DL_NOTE_SPEED;
211 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
212 						DL_NOTE_PROMISC_OFF_PHYS |
213 						DL_NOTE_PHYS_ADDR |
214 						DL_NOTE_LINK_DOWN |
215 						DL_NOTE_LINK_UP |
216 						DL_NOTE_SPEED;
217 
218 /* Media must correspond to #defines in gld.h */
219 static char *gld_media[] = {
220 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
221 	"aui",		/* GLDM_AUI */
222 	"bnc",		/* GLDM_BNC */
223 	"twpair",	/* GLDM_TP */
224 	"fiber",	/* GLDM_FIBER */
225 	"100baseT",	/* GLDM_100BT */
226 	"100vgAnyLan",	/* GLDM_VGANYLAN */
227 	"10baseT",	/* GLDM_10BT */
228 	"ring4",	/* GLDM_RING4 */
229 	"ring16",	/* GLDM_RING16 */
230 	"PHY/MII",	/* GLDM_PHYMII */
231 	"100baseTX",	/* GLDM_100BTX */
232 	"100baseT4",	/* GLDM_100BT4 */
233 	"unknown",	/* skip */
234 	"ipib",		/* GLDM_IB */
235 };
236 
237 /* Must correspond to #defines in gld.h */
238 static char *gld_duplex[] = {
239 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
240 	"half",		/* GLD_DUPLEX_HALF */
241 	"full"		/* GLD_DUPLEX_FULL */
242 };
243 
244 extern int gld_interpret_ether(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
245 extern int gld_interpret_fddi(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
246 extern int gld_interpret_tr(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
247 extern int gld_interpret_ib(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
248 extern void gld_interpret_mdt_ib(gld_mac_info_t *, mblk_t *, pdescinfo_t *,
249     pktinfo_t *, int);
250 
251 extern mblk_t *gld_fastpath_ether(gld_t *, mblk_t *);
252 extern mblk_t *gld_fastpath_fddi(gld_t *, mblk_t *);
253 extern mblk_t *gld_fastpath_tr(gld_t *, mblk_t *);
254 extern mblk_t *gld_fastpath_ib(gld_t *, mblk_t *);
255 
256 extern mblk_t *gld_unitdata_ether(gld_t *, mblk_t *);
257 extern mblk_t *gld_unitdata_fddi(gld_t *, mblk_t *);
258 extern mblk_t *gld_unitdata_tr(gld_t *, mblk_t *);
259 extern mblk_t *gld_unitdata_ib(gld_t *, mblk_t *);
260 
261 extern void gld_init_ether(gld_mac_info_t *);
262 extern void gld_init_fddi(gld_mac_info_t *);
263 extern void gld_init_tr(gld_mac_info_t *);
264 extern void gld_init_ib(gld_mac_info_t *);
265 
266 extern void gld_uninit_ether(gld_mac_info_t *);
267 extern void gld_uninit_fddi(gld_mac_info_t *);
268 extern void gld_uninit_tr(gld_mac_info_t *);
269 extern void gld_uninit_ib(gld_mac_info_t *);
270 
271 /*
272  * Interface types currently supported by GLD.
273  * If you add new types, you must check all "XXX" strings in the GLD source
274  * for implementation issues that may affect the support of your new type.
275  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
276  * require generalizing this GLD source to handle the new cases.  In other
277  * words there are assumptions built into the code in a few places that must
278  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
279  */
280 static gld_interface_t interfaces[] = {
281 
282 	/* Ethernet Bus */
283 	{
284 		DL_ETHER,
285 		(uint_t)-1,
286 		sizeof (struct ether_mac_frm),
287 		gld_interpret_ether,
288 		NULL,
289 		gld_fastpath_ether,
290 		gld_unitdata_ether,
291 		gld_init_ether,
292 		gld_uninit_ether,
293 		"ether"
294 	},
295 
296 	/* Fiber Distributed data interface */
297 	{
298 		DL_FDDI,
299 		4352,
300 		sizeof (struct fddi_mac_frm),
301 		gld_interpret_fddi,
302 		NULL,
303 		gld_fastpath_fddi,
304 		gld_unitdata_fddi,
305 		gld_init_fddi,
306 		gld_uninit_fddi,
307 		"fddi"
308 	},
309 
310 	/* Token Ring interface */
311 	{
312 		DL_TPR,
313 		17914,
314 		-1,			/* variable header size */
315 		gld_interpret_tr,
316 		NULL,
317 		gld_fastpath_tr,
318 		gld_unitdata_tr,
319 		gld_init_tr,
320 		gld_uninit_tr,
321 		"tpr"
322 	},
323 
324 	/* Infiniband */
325 	{
326 		DL_IB,
327 		4092,
328 		sizeof (struct ipoib_header),
329 		gld_interpret_ib,
330 		gld_interpret_mdt_ib,
331 		gld_fastpath_ib,
332 		gld_unitdata_ib,
333 		gld_init_ib,
334 		gld_uninit_ib,
335 		"ipib"
336 	},
337 };
338 
339 /*
340  * bit reversal lookup table.
341  */
342 static	uchar_t bit_rev[] = {
343 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
344 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
345 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
346 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
347 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
348 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
349 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
350 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
351 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
352 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
353 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
354 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
355 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
356 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
357 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
358 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
359 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
360 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
361 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
362 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
363 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
364 	0x3f, 0xbf, 0x7f, 0xff,
365 };
366 
367 /*
368  * User priorities, mapped from b_band.
369  */
370 static uint32_t user_priority[] = {
371 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
372 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
373 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
374 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
375 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
376 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
377 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
378 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
379 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
380 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
381 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
382 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
383 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
384 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
385 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
386 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
387 };
388 
389 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
390 
391 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
392 
393 /*
394  * Module linkage information for the kernel.
395  */
396 
397 static struct modldrv modlmisc = {
398 	&mod_miscops,		/* Type of module - a utility provider */
399 	"Generic LAN Driver (" GLD_VERSION_STRING ") %I%"
400 #ifdef GLD_DEBUG
401 	" DEBUG"
402 #endif
403 };
404 
405 static struct modlinkage modlinkage = {
406 	MODREV_1, &modlmisc, NULL
407 };
408 
409 int
410 _init(void)
411 {
412 	int e;
413 
414 	/* initialize gld_device_list mutex */
415 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
416 
417 	/* initialize device driver (per-major) list */
418 	gld_device_list.gld_next =
419 	    gld_device_list.gld_prev = &gld_device_list;
420 
421 	if ((e = mod_install(&modlinkage)) != 0)
422 		mutex_destroy(&gld_device_list.gld_devlock);
423 
424 	return (e);
425 }
426 
427 int
428 _fini(void)
429 {
430 	int e;
431 
432 	if ((e = mod_remove(&modlinkage)) != 0)
433 		return (e);
434 
435 	ASSERT(gld_device_list.gld_next ==
436 	    (glddev_t *)&gld_device_list.gld_next);
437 	ASSERT(gld_device_list.gld_prev ==
438 	    (glddev_t *)&gld_device_list.gld_next);
439 	mutex_destroy(&gld_device_list.gld_devlock);
440 
441 	return (e);
442 }
443 
444 int
445 _info(struct modinfo *modinfop)
446 {
447 	return (mod_info(&modlinkage, modinfop));
448 }
449 
450 /*
451  * GLD service routines
452  */
453 
454 /* So this gld binary maybe can be forward compatible with future v2 drivers */
455 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
456 
457 /*ARGSUSED*/
458 gld_mac_info_t *
459 gld_mac_alloc(dev_info_t *devinfo)
460 {
461 	gld_mac_info_t *macinfo;
462 
463 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
464 	    KM_SLEEP);
465 
466 	/*
467 	 * The setting of gldm_driver_version will not be documented or allowed
468 	 * until a future release.
469 	 */
470 	macinfo->gldm_driver_version = GLD_VERSION_200;
471 
472 	/*
473 	 * GLD's version.  This also is undocumented for now, but will be
474 	 * available if needed in the future.
475 	 */
476 	macinfo->gldm_GLD_version = GLD_VERSION;
477 
478 	return (macinfo);
479 }
480 
481 /*
482  * gld_mac_free must be called after the driver has removed interrupts
483  * and completely stopped calling gld_recv() and gld_sched().  At that
484  * point the interrupt routine is guaranteed by the system to have been
485  * exited and the maclock is no longer needed.  Of course, it is
486  * expected (required) that (assuming gld_register() succeeded),
487  * gld_unregister() was called before gld_mac_free().
488  */
489 void
490 gld_mac_free(gld_mac_info_t *macinfo)
491 {
492 	ASSERT(macinfo);
493 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
494 
495 	/*
496 	 * Assert that if we made it through gld_register, then we must
497 	 * have unregistered.
498 	 */
499 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
500 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
501 
502 	GLDM_LOCK_DESTROY(macinfo);
503 
504 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
505 }
506 
507 /*
508  * gld_register -- called once per device instance (PPA)
509  *
510  * During its attach routine, a real device driver will register with GLD
511  * so that later opens and dl_attach_reqs will work.  The arguments are the
512  * devinfo pointer, the device name, and a macinfo structure describing the
513  * physical device instance.
514  */
515 int
516 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
517 {
518 	int mediatype;
519 	int major = ddi_name_to_major(devname), i;
520 	glddev_t *glddev;
521 	gld_mac_pvt_t *mac_pvt;
522 	char minordev[32];
523 	char pbuf[3*GLD_MAX_ADDRLEN];
524 	gld_interface_t *ifp;
525 
526 	ASSERT(devinfo != NULL);
527 	ASSERT(macinfo != NULL);
528 
529 	if (macinfo->gldm_driver_version != GLD_VERSION)
530 		return (DDI_FAILURE);
531 
532 	mediatype = macinfo->gldm_type;
533 
534 	/*
535 	 * Entry points should be ready for us.
536 	 * ioctl is optional.
537 	 * set_multicast and get_stats are optional in v0.
538 	 * intr is only required if you add an interrupt.
539 	 */
540 	ASSERT(macinfo->gldm_reset != NULL);
541 	ASSERT(macinfo->gldm_start != NULL);
542 	ASSERT(macinfo->gldm_stop != NULL);
543 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
544 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
545 	ASSERT(macinfo->gldm_send != NULL);
546 
547 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
548 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
549 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
550 	ASSERT(macinfo->gldm_vendor_addr != NULL);
551 	ASSERT(macinfo->gldm_ident != NULL);
552 
553 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
554 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
555 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
556 		return (DDI_FAILURE);
557 	}
558 
559 	/*
560 	 * GLD only functions properly with saplen == -2
561 	 */
562 	if (macinfo->gldm_saplen != -2) {
563 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
564 		    "not supported", devname, macinfo->gldm_saplen);
565 		return (DDI_FAILURE);
566 	}
567 
568 	/* see gld_rsrv() */
569 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
570 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
571 
572 	mutex_enter(&gld_device_list.gld_devlock);
573 	glddev = gld_devlookup(major);
574 
575 	/*
576 	 *  Allocate per-driver (major) data structure if necessary
577 	 */
578 	if (glddev == NULL) {
579 		/* first occurrence of this device name (major number) */
580 		glddev = GETSTRUCT(glddev_t, 1);
581 		if (glddev == NULL) {
582 			mutex_exit(&gld_device_list.gld_devlock);
583 			return (DDI_FAILURE);
584 		}
585 		(void) strncpy(glddev->gld_name, devname,
586 		    sizeof (glddev->gld_name) - 1);
587 		glddev->gld_major = major;
588 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
589 		glddev->gld_mac_next = glddev->gld_mac_prev =
590 			(gld_mac_info_t *)&glddev->gld_mac_next;
591 		glddev->gld_str_next = glddev->gld_str_prev =
592 			(gld_t *)&glddev->gld_str_next;
593 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
594 
595 		/* allow increase of number of supported multicast addrs */
596 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
597 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
598 
599 		/*
600 		 * Optionally restrict DLPI provider style
601 		 *
602 		 * -1 - don't create style 1 nodes
603 		 * -2 - don't create style 2 nodes
604 		 */
605 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
606 		    "gld-provider-styles", 0);
607 
608 		/* Stuff that's needed before any PPA gets attached */
609 		glddev->gld_type = macinfo->gldm_type;
610 		glddev->gld_minsdu = macinfo->gldm_minpkt;
611 		glddev->gld_saplen = macinfo->gldm_saplen;
612 		glddev->gld_addrlen = macinfo->gldm_addrlen;
613 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
614 		    KM_SLEEP);
615 		bcopy(macinfo->gldm_broadcast_addr,
616 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
617 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
618 		gldinsque(glddev, gld_device_list.gld_prev);
619 	}
620 	glddev->gld_ndevice++;
621 	/* Now glddev can't go away until we unregister this mac (or fail) */
622 	mutex_exit(&gld_device_list.gld_devlock);
623 
624 	/*
625 	 *  Per-instance initialization
626 	 */
627 
628 	/*
629 	 * Initialize per-mac structure that is private to GLD.
630 	 * Set up interface pointer. These are device class specific pointers
631 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
632 	 */
633 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
634 		if (mediatype != interfaces[i].mac_type)
635 			continue;
636 
637 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
638 		    KM_SLEEP);
639 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
640 		    &interfaces[i];
641 		break;
642 	}
643 
644 	if (ifp == NULL) {
645 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
646 		    "of type %d", devname, mediatype);
647 		goto failure;
648 	}
649 
650 	/*
651 	 * Driver can only register MTU within legal media range.
652 	 */
653 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
654 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
655 		    devname);
656 		goto failure;
657 	}
658 
659 	/*
660 	 * For now, only Infiniband drivers can use MDT. Do not add
661 	 * support for Ethernet, FDDI or TR.
662 	 */
663 	if (macinfo->gldm_mdt_pre != NULL) {
664 		if (mediatype != DL_IB) {
665 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
666 			    "driver of type %d", devname, mediatype);
667 			goto failure;
668 		}
669 
670 		/*
671 		 * Validate entry points.
672 		 */
673 		if ((macinfo->gldm_mdt_send == NULL) ||
674 		    (macinfo->gldm_mdt_post == NULL)) {
675 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
676 			    "%s driver of type %d", devname, mediatype);
677 			goto failure;
678 		}
679 		macinfo->gldm_options |= GLDOPT_MDT;
680 	}
681 
682 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
683 	mac_pvt->major_dev = glddev;
684 
685 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
686 	/*
687 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
688 	 * format or in wire format?  Also gldm_broadcast.  For now
689 	 * we are assuming canonical, but I'm not sure that makes the
690 	 * most sense for ease of driver implementation.
691 	 */
692 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
693 	    macinfo->gldm_addrlen);
694 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
695 
696 	/*
697 	 * The available set of notifications is those generatable by GLD
698 	 * itself, plus those corresponding to the capabilities of the MAC
699 	 * driver, intersected with those supported by gld_notify_ind() above.
700 	 */
701 	mac_pvt->notifications = gld_internal_notes;
702 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
703 		mac_pvt->notifications |= gld_linkstate_notes;
704 	mac_pvt->notifications &= gld_supported_notes;
705 
706 	GLDM_LOCK_INIT(macinfo);
707 
708 	ddi_set_driver_private(devinfo, macinfo);
709 
710 	/*
711 	 * Now atomically get a PPA and put ourselves on the mac list.
712 	 */
713 	mutex_enter(&glddev->gld_devlock);
714 
715 #ifdef DEBUG
716 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
717 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
718 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
719 		    macinfo->gldm_ppa);
720 #endif
721 
722 	/*
723 	 * Create style 2 node (gated by gld-provider-styles property).
724 	 *
725 	 * NOTE: When the CLONE_DEV flag is specified to
726 	 *	 ddi_create_minor_node() the minor number argument is
727 	 *	 immaterial. Opens of that node will go via the clone
728 	 *	 driver and gld_open() will always be passed a dev_t with
729 	 *	 minor of zero.
730 	 */
731 	if (glddev->gld_styles != -2) {
732 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
733 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
734 			mutex_exit(&glddev->gld_devlock);
735 			goto late_failure;
736 		}
737 	}
738 
739 	/*
740 	 * Create style 1 node (gated by gld-provider-styles property)
741 	 */
742 	if (glddev->gld_styles != -1) {
743 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
744 		    macinfo->gldm_ppa);
745 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
746 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
747 		    0) != DDI_SUCCESS) {
748 			mutex_exit(&glddev->gld_devlock);
749 			goto late_failure;
750 		}
751 	}
752 
753 	/* add ourselves to this major device's linked list of instances */
754 	gldinsque(macinfo, glddev->gld_mac_prev);
755 
756 	mutex_exit(&glddev->gld_devlock);
757 
758 	/*
759 	 * Unfortunately we need the ppa before we call gld_initstats();
760 	 * otherwise we would like to do this just above the mutex_enter
761 	 * above.  In which case we could have set MAC_READY inside the
762 	 * mutex and we wouldn't have needed to check it in open and
763 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
764 	 * inside the mutex because it might get taken in our kstat_update
765 	 * routine and cause a deadlock with kstat_chain_lock.
766 	 */
767 
768 	/* gld_initstats() calls (*ifp->init)() */
769 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
770 		mutex_enter(&glddev->gld_devlock);
771 		gldremque(macinfo);
772 		mutex_exit(&glddev->gld_devlock);
773 		goto late_failure;
774 	}
775 
776 	/*
777 	 * Need to indicate we are NOW ready to process interrupts;
778 	 * any interrupt before this is set is for someone else.
779 	 * This flag is also now used to tell open, et. al. that this
780 	 * mac is now fully ready and available for use.
781 	 */
782 	GLDM_LOCK(macinfo, RW_WRITER);
783 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
784 	GLDM_UNLOCK(macinfo);
785 
786 	/* log local ethernet address -- XXX not DDI compliant */
787 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
788 		(void) localetheraddr(
789 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
790 
791 	/* now put announcement into the message buffer */
792 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
793 	    glddev->gld_name,
794 	    macinfo->gldm_ppa, macinfo->gldm_ident,
795 	    mac_pvt->interfacep->mac_string,
796 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
797 	    macinfo->gldm_addrlen));
798 
799 	ddi_report_dev(devinfo);
800 	return (DDI_SUCCESS);
801 
802 late_failure:
803 	ddi_remove_minor_node(devinfo, NULL);
804 	GLDM_LOCK_DESTROY(macinfo);
805 	if (mac_pvt->curr_macaddr != NULL)
806 	    kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
807 	if (mac_pvt->statistics != NULL)
808 	    kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
809 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
810 	macinfo->gldm_mac_pvt = NULL;
811 
812 failure:
813 	mutex_enter(&gld_device_list.gld_devlock);
814 	glddev->gld_ndevice--;
815 	/*
816 	 * Note that just because this goes to zero here does not necessarily
817 	 * mean that we were the one who added the glddev above.  It's
818 	 * possible that the first mac unattached while were were in here
819 	 * failing to attach the second mac.  But we're now the last.
820 	 */
821 	if (glddev->gld_ndevice == 0) {
822 		/* There should be no macinfos left */
823 		ASSERT(glddev->gld_mac_next ==
824 		    (gld_mac_info_t *)&glddev->gld_mac_next);
825 		ASSERT(glddev->gld_mac_prev ==
826 		    (gld_mac_info_t *)&glddev->gld_mac_next);
827 
828 		/*
829 		 * There should be no DL_UNATTACHED streams: the system
830 		 * should not have detached the "first" devinfo which has
831 		 * all the open style 2 streams.
832 		 *
833 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
834 		 */
835 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
836 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
837 
838 		gldremque(glddev);
839 		mutex_destroy(&glddev->gld_devlock);
840 		if (glddev->gld_broadcast != NULL)
841 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
842 		kmem_free(glddev, sizeof (glddev_t));
843 	}
844 	mutex_exit(&gld_device_list.gld_devlock);
845 
846 	return (DDI_FAILURE);
847 }
848 
849 /*
850  * gld_unregister (macinfo)
851  * remove the macinfo structure from local structures
852  * this is cleanup for a driver to be unloaded
853  */
854 int
855 gld_unregister(gld_mac_info_t *macinfo)
856 {
857 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
858 	glddev_t *glddev = mac_pvt->major_dev;
859 	gld_interface_t *ifp;
860 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
861 
862 	mutex_enter(&glddev->gld_devlock);
863 	GLDM_LOCK(macinfo, RW_WRITER);
864 
865 	if (mac_pvt->nvlan > 0) {
866 		GLDM_UNLOCK(macinfo);
867 		mutex_exit(&glddev->gld_devlock);
868 		return (DDI_FAILURE);
869 	}
870 
871 #ifdef	GLD_DEBUG
872 	{
873 		int i;
874 
875 		for (i = 0; i < VLAN_HASHSZ; i++) {
876 			if ((mac_pvt->vlan_hash[i] != NULL))
877 				cmn_err(CE_PANIC,
878 				    "%s, line %d: "
879 				    "mac_pvt->vlan_hash[%d] != NULL",
880 				    __FILE__, __LINE__, i);
881 		}
882 	}
883 #endif
884 
885 	/* Delete this mac */
886 	gldremque(macinfo);
887 
888 	/* Disallow further entries to gld_recv() and gld_sched() */
889 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
890 
891 	GLDM_UNLOCK(macinfo);
892 	mutex_exit(&glddev->gld_devlock);
893 
894 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
895 	(*ifp->uninit)(macinfo);
896 
897 	ASSERT(mac_pvt->kstatp);
898 	kstat_delete(mac_pvt->kstatp);
899 
900 	ASSERT(GLDM_LOCK_INITED(macinfo));
901 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
902 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
903 
904 	if (mac_pvt->mcast_table != NULL)
905 		kmem_free(mac_pvt->mcast_table, multisize);
906 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
907 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
908 
909 	/* We now have one fewer instance for this major device */
910 	mutex_enter(&gld_device_list.gld_devlock);
911 	glddev->gld_ndevice--;
912 	if (glddev->gld_ndevice == 0) {
913 		/* There should be no macinfos left */
914 		ASSERT(glddev->gld_mac_next ==
915 		    (gld_mac_info_t *)&glddev->gld_mac_next);
916 		ASSERT(glddev->gld_mac_prev ==
917 		    (gld_mac_info_t *)&glddev->gld_mac_next);
918 
919 		/*
920 		 * There should be no DL_UNATTACHED streams: the system
921 		 * should not have detached the "first" devinfo which has
922 		 * all the open style 2 streams.
923 		 *
924 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
925 		 */
926 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
927 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
928 
929 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
930 		gldremque(glddev);
931 		mutex_destroy(&glddev->gld_devlock);
932 		if (glddev->gld_broadcast != NULL)
933 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
934 		kmem_free(glddev, sizeof (glddev_t));
935 	}
936 	mutex_exit(&gld_device_list.gld_devlock);
937 
938 	return (DDI_SUCCESS);
939 }
940 
941 /*
942  * gld_initstats
943  * called from gld_register
944  */
945 static int
946 gld_initstats(gld_mac_info_t *macinfo)
947 {
948 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
949 	struct gldkstats *sp;
950 	glddev_t *glddev;
951 	kstat_t *ksp;
952 	gld_interface_t *ifp;
953 
954 	glddev = mac_pvt->major_dev;
955 
956 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
957 	    NULL, "net", KSTAT_TYPE_NAMED,
958 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
959 		cmn_err(CE_WARN,
960 		    "GLD: failed to create kstat structure for %s%d",
961 		    glddev->gld_name, macinfo->gldm_ppa);
962 		return (GLD_FAILURE);
963 	}
964 	mac_pvt->kstatp = ksp;
965 
966 	ksp->ks_update = gld_update_kstat;
967 	ksp->ks_private = (void *)macinfo;
968 
969 	sp = ksp->ks_data;
970 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
971 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
972 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
973 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
974 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
975 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
976 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
977 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
978 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
979 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
984 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
985 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
986 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
987 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
988 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
990 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
991 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
992 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
993 
994 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
995 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
996 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
997 
998 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
999 	    KSTAT_DATA_UINT32);
1000 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1001 	    KSTAT_DATA_UINT32);
1002 
1003 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1004 
1005 	(*ifp->init)(macinfo);
1006 
1007 	kstat_install(ksp);
1008 
1009 	return (GLD_SUCCESS);
1010 }
1011 
1012 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1013 static int
1014 gld_update_kstat(kstat_t *ksp, int rw)
1015 {
1016 	gld_mac_info_t	*macinfo;
1017 	gld_mac_pvt_t	*mac_pvt;
1018 	struct gldkstats *gsp;
1019 	struct gld_stats *stats;
1020 
1021 	if (rw == KSTAT_WRITE)
1022 		return (EACCES);
1023 
1024 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1025 	ASSERT(macinfo != NULL);
1026 
1027 	GLDM_LOCK(macinfo, RW_WRITER);
1028 
1029 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1030 		GLDM_UNLOCK(macinfo);
1031 		return (EIO);	/* this one's not ready yet */
1032 	}
1033 
1034 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1035 		GLDM_UNLOCK(macinfo);
1036 		return (EIO);	/* this one's not ready any more */
1037 	}
1038 
1039 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1040 	gsp = mac_pvt->kstatp->ks_data;
1041 	ASSERT(gsp);
1042 	stats = mac_pvt->statistics;
1043 
1044 	if (macinfo->gldm_get_stats)
1045 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1046 
1047 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1048 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1049 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1050 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1051 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1052 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1053 
1054 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1055 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1056 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1057 
1058 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1059 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1060 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1061 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1062 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1063 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1064 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1065 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1066 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1067 	gsp->glds_missed.value.ul = stats->glds_missed;
1068 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1069 	    stats->glds_gldnorcvbuf;
1070 	gsp->glds_intr.value.ul = stats->glds_intr;
1071 
1072 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1073 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1074 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1075 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1076 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1077 
1078 	if (mac_pvt->nprom)
1079 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1080 	else if (mac_pvt->nprom_multi)
1081 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1082 	else
1083 		(void) strcpy(gsp->glds_prom.value.c, "off");
1084 
1085 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1086 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1087 	    ? stats->glds_media : 0]);
1088 
1089 	switch (macinfo->gldm_type) {
1090 	case DL_ETHER:
1091 		gsp->glds_frame.value.ul = stats->glds_frame;
1092 		gsp->glds_crc.value.ul = stats->glds_crc;
1093 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1094 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1095 		gsp->glds_defer.value.ul = stats->glds_defer;
1096 		gsp->glds_short.value.ul = stats->glds_short;
1097 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1098 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1099 		gsp->glds_dot3_first_coll.value.ui32 =
1100 		    stats->glds_dot3_first_coll;
1101 		gsp->glds_dot3_multi_coll.value.ui32 =
1102 		    stats->glds_dot3_multi_coll;
1103 		gsp->glds_dot3_sqe_error.value.ui32 =
1104 		    stats->glds_dot3_sqe_error;
1105 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1106 		    stats->glds_dot3_mac_xmt_error;
1107 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1108 		    stats->glds_dot3_mac_rcv_error;
1109 		gsp->glds_dot3_frame_too_long.value.ui32 =
1110 		    stats->glds_dot3_frame_too_long;
1111 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1112 		    stats->glds_duplex <
1113 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1114 		    stats->glds_duplex : 0]);
1115 		break;
1116 	case DL_TPR:
1117 		gsp->glds_dot5_line_error.value.ui32 =
1118 		    stats->glds_dot5_line_error;
1119 		gsp->glds_dot5_burst_error.value.ui32 =
1120 		    stats->glds_dot5_burst_error;
1121 		gsp->glds_dot5_signal_loss.value.ui32 =
1122 		    stats->glds_dot5_signal_loss;
1123 		gsp->glds_dot5_ace_error.value.ui32 =
1124 		    stats->glds_dot5_ace_error;
1125 		gsp->glds_dot5_internal_error.value.ui32 =
1126 		    stats->glds_dot5_internal_error;
1127 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1128 		    stats->glds_dot5_lost_frame_error;
1129 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1130 		    stats->glds_dot5_frame_copied_error;
1131 		gsp->glds_dot5_token_error.value.ui32 =
1132 		    stats->glds_dot5_token_error;
1133 		gsp->glds_dot5_freq_error.value.ui32 =
1134 		    stats->glds_dot5_freq_error;
1135 		break;
1136 	case DL_FDDI:
1137 		gsp->glds_fddi_mac_error.value.ui32 =
1138 		    stats->glds_fddi_mac_error;
1139 		gsp->glds_fddi_mac_lost.value.ui32 =
1140 		    stats->glds_fddi_mac_lost;
1141 		gsp->glds_fddi_mac_token.value.ui32 =
1142 		    stats->glds_fddi_mac_token;
1143 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1144 		    stats->glds_fddi_mac_tvx_expired;
1145 		gsp->glds_fddi_mac_late.value.ui32 =
1146 		    stats->glds_fddi_mac_late;
1147 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1148 		    stats->glds_fddi_mac_ring_op;
1149 		break;
1150 	case DL_IB:
1151 		break;
1152 	default:
1153 		break;
1154 	}
1155 
1156 	GLDM_UNLOCK(macinfo);
1157 
1158 #ifdef GLD_DEBUG
1159 	gld_check_assertions();
1160 	if (gld_debug & GLDRDE)
1161 		gld_sr_dump(macinfo);
1162 #endif
1163 
1164 	return (0);
1165 }
1166 
1167 static int
1168 gld_init_vlan_stats(gld_vlan_t *vlan)
1169 {
1170 	gld_mac_info_t *mac = vlan->gldv_mac;
1171 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1172 	struct gldkstats *sp;
1173 	glddev_t *glddev;
1174 	kstat_t *ksp;
1175 	char *name;
1176 	int instance;
1177 
1178 	glddev = mac_pvt->major_dev;
1179 	name = glddev->gld_name;
1180 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1181 
1182 	if ((ksp = kstat_create(name, instance,
1183 	    NULL, "net", KSTAT_TYPE_NAMED,
1184 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1185 		cmn_err(CE_WARN,
1186 		    "GLD: failed to create kstat structure for %s%d",
1187 		    name, instance);
1188 		return (GLD_FAILURE);
1189 	}
1190 
1191 	vlan->gldv_kstatp = ksp;
1192 
1193 	ksp->ks_update = gld_update_vlan_kstat;
1194 	ksp->ks_private = (void *)vlan;
1195 
1196 	sp = ksp->ks_data;
1197 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1198 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1199 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1200 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1201 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1202 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1203 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1204 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1205 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1206 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1211 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1212 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1213 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1214 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1215 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1217 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1218 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1219 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1220 
1221 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1222 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1223 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1224 
1225 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1226 	    KSTAT_DATA_UINT32);
1227 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1228 	    KSTAT_DATA_UINT32);
1229 
1230 	kstat_install(ksp);
1231 	return (GLD_SUCCESS);
1232 }
1233 
1234 static int
1235 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1236 {
1237 	gld_vlan_t	*vlan;
1238 	gld_mac_info_t	*macinfo;
1239 	struct gldkstats *gsp;
1240 	struct gld_stats *stats;
1241 
1242 	if (rw == KSTAT_WRITE)
1243 		return (EACCES);
1244 
1245 	vlan = (gld_vlan_t *)ksp->ks_private;
1246 	ASSERT(vlan != NULL);
1247 
1248 	macinfo = vlan->gldv_mac;
1249 	GLDM_LOCK(macinfo, RW_WRITER);
1250 
1251 	gsp = vlan->gldv_kstatp->ks_data;
1252 	ASSERT(gsp);
1253 	stats = vlan->gldv_stats;
1254 
1255 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1256 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1257 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1258 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1259 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1260 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1261 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1262 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1263 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1264 
1265 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1266 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1267 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1268 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1269 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1270 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1271 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1272 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1273 
1274 	GLDM_UNLOCK(macinfo);
1275 	return (0);
1276 }
1277 
1278 /*
1279  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1280  */
1281 /*ARGSUSED*/
1282 int
1283 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1284 {
1285 	dev_info_t	*devinfo;
1286 	minor_t		minor = getminor((dev_t)arg);
1287 	int		rc = DDI_FAILURE;
1288 
1289 	switch (cmd) {
1290 	case DDI_INFO_DEVT2DEVINFO:
1291 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1292 			*(dev_info_t **)resultp = devinfo;
1293 			rc = DDI_SUCCESS;
1294 		}
1295 		break;
1296 	case DDI_INFO_DEVT2INSTANCE:
1297 		/* Need static mapping for deferred attach */
1298 		if (minor == GLD_USE_STYLE2) {
1299 			/*
1300 			 * Style 2:  this minor number does not correspond to
1301 			 * any particular instance number.
1302 			 */
1303 			rc = DDI_FAILURE;
1304 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1305 			/* Style 1:  calculate the PPA from the minor */
1306 			*resultp = (void *)GLD_STYLE1_MINOR_TO_PPA(minor);
1307 			rc = DDI_SUCCESS;
1308 		} else {
1309 			/* Clone:  look for it.  Not a static mapping */
1310 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1311 				*resultp = (void *)ddi_get_instance(devinfo);
1312 				rc = DDI_SUCCESS;
1313 			}
1314 		}
1315 		break;
1316 	}
1317 
1318 	return (rc);
1319 }
1320 
1321 /* called from gld_getinfo */
1322 dev_info_t *
1323 gld_finddevinfo(dev_t dev)
1324 {
1325 	minor_t		minor = getminor(dev);
1326 	glddev_t	*device;
1327 	gld_mac_info_t	*mac;
1328 	gld_vlan_t	*vlan;
1329 	gld_t		*str;
1330 	dev_info_t	*devinfo = NULL;
1331 	int		i;
1332 
1333 	if (minor == GLD_USE_STYLE2) {
1334 		/*
1335 		 * Style 2:  this minor number does not correspond to
1336 		 * any particular instance number.
1337 		 *
1338 		 * XXX We don't know what to say.  See Bug 1165519.
1339 		 */
1340 		return (NULL);
1341 	}
1342 
1343 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1344 
1345 	device = gld_devlookup(getmajor(dev));
1346 	if (device == NULL) {
1347 		/* There are no attached instances of this device */
1348 		mutex_exit(&gld_device_list.gld_devlock);
1349 		return (NULL);
1350 	}
1351 
1352 	/*
1353 	 * Search all attached macs and streams.
1354 	 *
1355 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1356 	 * we don't know what devinfo we should report back even if we
1357 	 * found the minor.  Maybe we should associate streams that are
1358 	 * not currently attached to a PPA with the "first" devinfo node
1359 	 * of the major device to attach -- the one that created the
1360 	 * minor node for the generic device.
1361 	 */
1362 	mutex_enter(&device->gld_devlock);
1363 
1364 	for (mac = device->gld_mac_next;
1365 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1366 	    mac = mac->gldm_next) {
1367 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1368 
1369 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1370 			continue;	/* this one's not ready yet */
1371 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1372 			/* Style 1 -- look for the corresponding PPA */
1373 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1374 				devinfo = mac->gldm_devinfo;
1375 				goto out;	/* found it! */
1376 			} else
1377 				continue;	/* not this PPA */
1378 		}
1379 
1380 		/* We are looking for a clone */
1381 		for (i = 0; i < VLAN_HASHSZ; i++) {
1382 			for (vlan = pvt->vlan_hash[i];
1383 			    vlan != NULL; vlan = vlan->gldv_next) {
1384 				for (str = vlan->gldv_str_next;
1385 				    str != (gld_t *)&vlan->gldv_str_next;
1386 				    str = str->gld_next) {
1387 					ASSERT(str->gld_mac_info == mac);
1388 					if (minor == str->gld_minor) {
1389 						devinfo = mac->gldm_devinfo;
1390 						goto out;
1391 					}
1392 				}
1393 			}
1394 		}
1395 	}
1396 out:
1397 	mutex_exit(&device->gld_devlock);
1398 	mutex_exit(&gld_device_list.gld_devlock);
1399 	return (devinfo);
1400 }
1401 
1402 /*
1403  * STREAMS open routine.  The device dependent driver specifies this as its
1404  * open entry point.
1405  */
1406 /*ARGSUSED2*/
1407 int
1408 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1409 {
1410 	gld_mac_pvt_t *mac_pvt;
1411 	gld_t *gld;
1412 	glddev_t *glddev;
1413 	gld_mac_info_t *macinfo;
1414 	minor_t minor = getminor(*dev);
1415 	gld_vlan_t *vlan;
1416 	t_uscalar_t ppa;
1417 
1418 	ASSERT(q != NULL);
1419 
1420 	if (minor > GLD_MAX_STYLE1_MINOR)
1421 		return (ENXIO);
1422 
1423 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1424 
1425 	/* Find our per-major glddev_t structure */
1426 	mutex_enter(&gld_device_list.gld_devlock);
1427 	glddev = gld_devlookup(getmajor(*dev));
1428 
1429 	/*
1430 	 * This glddev will hang around since detach (and therefore
1431 	 * gld_unregister) can't run while we're here in the open routine.
1432 	 */
1433 	mutex_exit(&gld_device_list.gld_devlock);
1434 
1435 	if (glddev == NULL)
1436 		return (ENXIO);
1437 
1438 #ifdef GLD_DEBUG
1439 	if (gld_debug & GLDPROT) {
1440 		if (minor == GLD_USE_STYLE2)
1441 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1442 		else
1443 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1444 			    (void *)q, minor);
1445 	}
1446 #endif
1447 
1448 	/*
1449 	 * get a per-stream structure and link things together so we
1450 	 * can easily find them later.
1451 	 */
1452 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1453 
1454 	/*
1455 	 * fill in the structure and state info
1456 	 */
1457 	gld->gld_qptr = q;
1458 	gld->gld_device = glddev;
1459 	gld->gld_state = DL_UNATTACHED;
1460 
1461 	/*
1462 	 * we must atomically find a free minor number and add the stream
1463 	 * to a list, because gld_findminor has to traverse the lists to
1464 	 * determine which minor numbers are free.
1465 	 */
1466 	mutex_enter(&glddev->gld_devlock);
1467 
1468 	/* find a free minor device number for the clone */
1469 	gld->gld_minor = gld_findminor(glddev);
1470 	if (gld->gld_minor == 0) {
1471 		mutex_exit(&glddev->gld_devlock);
1472 		kmem_free(gld, sizeof (gld_t));
1473 		return (ENOSR);
1474 	}
1475 
1476 #ifdef GLD_VERBOSE_DEBUG
1477 	if (gld_debug & GLDPROT)
1478 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1479 		    (void *)gld, gld->gld_minor);
1480 #endif
1481 
1482 	if (minor == GLD_USE_STYLE2) {
1483 		gld->gld_style = DL_STYLE2;
1484 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1485 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1486 		gldinsque(gld, glddev->gld_str_prev);
1487 #ifdef GLD_VERBOSE_DEBUG
1488 		if (gld_debug & GLDPROT)
1489 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1490 #endif
1491 		(void) qassociate(q, -1);
1492 		goto done;
1493 	}
1494 
1495 	gld->gld_style = DL_STYLE1;
1496 
1497 	/* the PPA is actually 1 less than the minordev */
1498 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1499 
1500 	for (macinfo = glddev->gld_mac_next;
1501 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1502 	    macinfo = macinfo->gldm_next) {
1503 		ASSERT(macinfo != NULL);
1504 		if (macinfo->gldm_ppa != ppa)
1505 			continue;
1506 
1507 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1508 			continue;	/* this one's not ready yet */
1509 
1510 		/*
1511 		 * we found the correct PPA
1512 		 */
1513 		GLDM_LOCK(macinfo, RW_WRITER);
1514 
1515 		gld->gld_mac_info = macinfo;
1516 
1517 		if (macinfo->gldm_send_tagged != NULL)
1518 			gld->gld_send = macinfo->gldm_send_tagged;
1519 		else
1520 			gld->gld_send = macinfo->gldm_send;
1521 
1522 		/* now ready for action */
1523 		gld->gld_state = DL_UNBOUND;
1524 
1525 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1526 			GLDM_UNLOCK(macinfo);
1527 			mutex_exit(&glddev->gld_devlock);
1528 			kmem_free(gld, sizeof (gld_t));
1529 			return (EIO);
1530 		}
1531 
1532 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1533 		if (!mac_pvt->started) {
1534 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1535 				gld_rem_vlan(vlan);
1536 				GLDM_UNLOCK(macinfo);
1537 				mutex_exit(&glddev->gld_devlock);
1538 				kmem_free(gld, sizeof (gld_t));
1539 				return (EIO);
1540 			}
1541 		}
1542 
1543 		gld->gld_vlan = vlan;
1544 		vlan->gldv_nstreams++;
1545 		gldinsque(gld, vlan->gldv_str_prev);
1546 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1547 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1548 
1549 		GLDM_UNLOCK(macinfo);
1550 #ifdef GLD_VERBOSE_DEBUG
1551 		if (gld_debug & GLDPROT)
1552 			cmn_err(CE_NOTE,
1553 			    "GLDstruct added to instance list");
1554 #endif
1555 		break;
1556 	}
1557 
1558 	if (gld->gld_state == DL_UNATTACHED) {
1559 		mutex_exit(&glddev->gld_devlock);
1560 		kmem_free(gld, sizeof (gld_t));
1561 		return (ENXIO);
1562 	}
1563 
1564 done:
1565 	mutex_exit(&glddev->gld_devlock);
1566 	noenable(WR(q));	/* We'll do the qenables manually */
1567 	qprocson(q);		/* start the queues running */
1568 	qenable(WR(q));
1569 	return (0);
1570 }
1571 
1572 /*
1573  * normal stream close call checks current status and cleans up
1574  * data structures that were dynamically allocated
1575  */
1576 /*ARGSUSED1*/
1577 int
1578 gld_close(queue_t *q, int flag, cred_t *cred)
1579 {
1580 	gld_t	*gld = (gld_t *)q->q_ptr;
1581 	glddev_t *glddev = gld->gld_device;
1582 
1583 	ASSERT(q);
1584 	ASSERT(gld);
1585 
1586 #ifdef GLD_DEBUG
1587 	if (gld_debug & GLDPROT) {
1588 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1589 		    (void *)q, (gld->gld_style & 0x1) + 1);
1590 	}
1591 #endif
1592 
1593 	/* Hold all device streams lists still while we check for a macinfo */
1594 	mutex_enter(&glddev->gld_devlock);
1595 
1596 	if (gld->gld_mac_info != NULL) {
1597 		/* If there's a macinfo, block recv while we change state */
1598 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1599 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1600 		GLDM_UNLOCK(gld->gld_mac_info);
1601 	} else {
1602 		/* no mac DL_ATTACHED right now */
1603 		gld->gld_flags |= GLD_STR_CLOSING;
1604 	}
1605 
1606 	mutex_exit(&glddev->gld_devlock);
1607 
1608 	/*
1609 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1610 	 * we know wsrv isn't in there trying to undo what we're doing.
1611 	 */
1612 	qprocsoff(q);
1613 
1614 	ASSERT(gld->gld_wput_count == 0);
1615 	gld->gld_wput_count = 0;	/* just in case */
1616 
1617 	if (gld->gld_state == DL_IDLE) {
1618 		/* Need to unbind */
1619 		ASSERT(gld->gld_mac_info != NULL);
1620 		(void) gld_unbind(WR(q), NULL);
1621 	}
1622 
1623 	if (gld->gld_state == DL_UNBOUND) {
1624 		/*
1625 		 * Need to unattach
1626 		 * For style 2 stream, gldunattach also
1627 		 * associate queue with NULL dip
1628 		 */
1629 		ASSERT(gld->gld_mac_info != NULL);
1630 		(void) gldunattach(WR(q), NULL);
1631 	}
1632 
1633 	/* disassociate the stream from the device */
1634 	q->q_ptr = WR(q)->q_ptr = NULL;
1635 
1636 	/*
1637 	 * Since we unattached above (if necessary), we know that we're
1638 	 * on the per-major list of unattached streams, rather than a
1639 	 * per-PPA list.  So we know we should hold the devlock.
1640 	 */
1641 	mutex_enter(&glddev->gld_devlock);
1642 	gldremque(gld);			/* remove from Style 2 list */
1643 	mutex_exit(&glddev->gld_devlock);
1644 
1645 	kmem_free(gld, sizeof (gld_t));
1646 
1647 	return (0);
1648 }
1649 
1650 /*
1651  * gld_rsrv (q)
1652  *	simple read service procedure
1653  *	purpose is to avoid the time it takes for packets
1654  *	to move through IP so we can get them off the board
1655  *	as fast as possible due to limited PC resources.
1656  *
1657  *	This is not normally used in the current implementation.  It
1658  *	can be selected with the undocumented property "fast_recv".
1659  *	If that property is set, gld_recv will send the packet
1660  *	upstream with a putq() rather than a putnext(), thus causing
1661  *	this routine to be scheduled.
1662  */
1663 int
1664 gld_rsrv(queue_t *q)
1665 {
1666 	mblk_t *mp;
1667 
1668 	while ((mp = getq(q)) != NULL) {
1669 		if (canputnext(q)) {
1670 			putnext(q, mp);
1671 		} else {
1672 			freemsg(mp);
1673 		}
1674 	}
1675 	return (0);
1676 }
1677 
1678 /*
1679  * gld_wput (q, mp)
1680  * general gld stream write put routine. Receives fastpath data from upper
1681  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1682  * queued for later processing by the service procedure.
1683  */
1684 
1685 int
1686 gld_wput(queue_t *q, mblk_t *mp)
1687 {
1688 	gld_t  *gld = (gld_t *)(q->q_ptr);
1689 	int	rc;
1690 	boolean_t multidata = B_TRUE;
1691 
1692 #ifdef GLD_DEBUG
1693 	if (gld_debug & GLDTRACE)
1694 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1695 		    (void *)q, (void *)mp, DB_TYPE(mp));
1696 #endif
1697 	switch (DB_TYPE(mp)) {
1698 
1699 	case M_DATA:
1700 		/* fast data / raw support */
1701 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1702 		/* Tricky to access memory without taking the mutex */
1703 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1704 		    gld->gld_state != DL_IDLE) {
1705 			merror(q, mp, EPROTO);
1706 			break;
1707 		}
1708 		multidata = B_FALSE;
1709 		/* LINTED: E_CASE_FALLTHRU */
1710 	case M_MULTIDATA:
1711 		/* Only call gld_start() directly if nothing queued ahead */
1712 		/* No guarantees about ordering with different threads */
1713 		if (q->q_first)
1714 			goto use_wsrv;
1715 
1716 		/*
1717 		 * This can happen if wsrv has taken off the last mblk but
1718 		 * is still processing it.
1719 		 */
1720 		membar_consumer();
1721 		if (gld->gld_in_wsrv)
1722 			goto use_wsrv;
1723 
1724 		/*
1725 		 * Keep a count of current wput calls to start.
1726 		 * Nonzero count delays any attempted DL_UNBIND.
1727 		 * See comments above gld_start().
1728 		 */
1729 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1730 		membar_enter();
1731 
1732 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1733 		/* If this Q is in process of DL_UNBIND, don't call start */
1734 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1735 			/* Extremely unlikely */
1736 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1737 			goto use_wsrv;
1738 		}
1739 
1740 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1741 		    gld_start(q, mp, GLD_WPUT, UPRI(gld, mp->b_band));
1742 
1743 		/* Allow DL_UNBIND again */
1744 		membar_exit();
1745 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1746 
1747 		if (rc == GLD_NORESOURCES)
1748 			qenable(q);
1749 		break;	/*  Done with this packet */
1750 
1751 use_wsrv:
1752 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1753 		(void) putq(q, mp);
1754 		qenable(q);
1755 		break;
1756 
1757 	case M_IOCTL:
1758 		/* ioctl relies on wsrv single threading per queue */
1759 		(void) putq(q, mp);
1760 		qenable(q);
1761 		break;
1762 
1763 	case M_CTL:
1764 		(void) putq(q, mp);
1765 		qenable(q);
1766 		break;
1767 
1768 	case M_FLUSH:		/* canonical flush handling */
1769 		/* XXX Should these be FLUSHALL? */
1770 		if (*mp->b_rptr & FLUSHW)
1771 			flushq(q, 0);
1772 		if (*mp->b_rptr & FLUSHR) {
1773 			flushq(RD(q), 0);
1774 			*mp->b_rptr &= ~FLUSHW;
1775 			qreply(q, mp);
1776 		} else
1777 			freemsg(mp);
1778 		break;
1779 
1780 	case M_PROTO:
1781 	case M_PCPROTO:
1782 		/* these rely on wsrv single threading per queue */
1783 		(void) putq(q, mp);
1784 		qenable(q);
1785 		break;
1786 
1787 	default:
1788 #ifdef GLD_DEBUG
1789 		if (gld_debug & GLDETRACE)
1790 			cmn_err(CE_WARN,
1791 			    "gld: Unexpected packet type from queue: 0x%x",
1792 			    DB_TYPE(mp));
1793 #endif
1794 		freemsg(mp);
1795 	}
1796 	return (0);
1797 }
1798 
1799 /*
1800  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1801  * specification.
1802  *
1803  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1804  * lock for reading data items that are only ever written by us.
1805  */
1806 
1807 int
1808 gld_wsrv(queue_t *q)
1809 {
1810 	mblk_t *mp;
1811 	gld_t *gld = (gld_t *)q->q_ptr;
1812 	gld_mac_info_t *macinfo;
1813 	union DL_primitives *prim;
1814 	int err;
1815 	boolean_t multidata;
1816 
1817 #ifdef GLD_DEBUG
1818 	if (gld_debug & GLDTRACE)
1819 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1820 #endif
1821 
1822 	ASSERT(!gld->gld_in_wsrv);
1823 
1824 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1825 
1826 	if (q->q_first == NULL)
1827 		return (0);
1828 
1829 	macinfo = gld->gld_mac_info;
1830 
1831 	/*
1832 	 * Help wput avoid a call to gld_start if there might be a message
1833 	 * previously queued by that thread being processed here.
1834 	 */
1835 	gld->gld_in_wsrv = B_TRUE;
1836 	membar_enter();
1837 
1838 	while ((mp = getq(q)) != NULL) {
1839 		switch (DB_TYPE(mp)) {
1840 		case M_DATA:
1841 		case M_MULTIDATA:
1842 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1843 
1844 			/*
1845 			 * retry of a previously processed UNITDATA_REQ
1846 			 * or is a RAW or FAST message from above.
1847 			 */
1848 			if (macinfo == NULL) {
1849 				/* No longer attached to a PPA, drop packet */
1850 				freemsg(mp);
1851 				break;
1852 			}
1853 
1854 			gld->gld_sched_ran = B_FALSE;
1855 			membar_enter();
1856 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1857 			    gld_start(q, mp, GLD_WSRV, UPRI(gld, mp->b_band));
1858 			if (err == GLD_NORESOURCES) {
1859 				/* gld_sched will qenable us later */
1860 				gld->gld_xwait = B_TRUE; /* want qenable */
1861 				membar_enter();
1862 				/*
1863 				 * v2:  we're not holding the lock; it's
1864 				 * possible that the driver could have already
1865 				 * called gld_sched (following up on its
1866 				 * return of GLD_NORESOURCES), before we got a
1867 				 * chance to do the putbq() and set gld_xwait.
1868 				 * So if we saw a call to gld_sched that
1869 				 * examined this queue, since our call to
1870 				 * gld_start() above, then it's possible we've
1871 				 * already seen the only call to gld_sched()
1872 				 * we're ever going to see.  So we better retry
1873 				 * transmitting this packet right now.
1874 				 */
1875 				if (gld->gld_sched_ran) {
1876 #ifdef GLD_DEBUG
1877 					if (gld_debug & GLDTRACE)
1878 						cmn_err(CE_NOTE, "gld_wsrv: "
1879 						    "sched was called");
1880 #endif
1881 					break;	/* try again right now */
1882 				}
1883 				gld->gld_in_wsrv = B_FALSE;
1884 				return (0);
1885 			}
1886 			break;
1887 
1888 		case M_IOCTL:
1889 			(void) gld_ioctl(q, mp);
1890 			break;
1891 
1892 		case M_CTL:
1893 			if (macinfo == NULL) {
1894 				freemsg(mp);
1895 				break;
1896 			}
1897 
1898 			if (macinfo->gldm_mctl != NULL) {
1899 				GLDM_LOCK(macinfo, RW_WRITER);
1900 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1901 				GLDM_UNLOCK(macinfo);
1902 			} else {
1903 				/* This driver doesn't recognize, just drop */
1904 				freemsg(mp);
1905 			}
1906 			break;
1907 
1908 		case M_PROTO:	/* Will be an DLPI message of some type */
1909 		case M_PCPROTO:
1910 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1911 				if (err == GLDE_RETRY) {
1912 					gld->gld_in_wsrv = B_FALSE;
1913 					return (0); /* quit while we're ahead */
1914 				}
1915 				prim = (union DL_primitives *)mp->b_rptr;
1916 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1917 			}
1918 			break;
1919 
1920 		default:
1921 			/* This should never happen */
1922 #ifdef GLD_DEBUG
1923 			if (gld_debug & GLDERRS)
1924 				cmn_err(CE_WARN,
1925 				    "gld_wsrv: db_type(%x) not supported",
1926 				    mp->b_datap->db_type);
1927 #endif
1928 			freemsg(mp);	/* unknown types are discarded */
1929 			break;
1930 		}
1931 	}
1932 
1933 	membar_exit();
1934 	gld->gld_in_wsrv = B_FALSE;
1935 	return (0);
1936 }
1937 
1938 /*
1939  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1940  *
1941  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1942  *
1943  * In particular, we must avoid calling gld_precv*() if we came from wput().
1944  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1945  * packets to the receive side if we are in physical promiscuous mode.
1946  * Since the receive side holds a lock across its call to the upstream
1947  * putnext, and that upstream module could well have looped back to our
1948  * wput() routine on the same thread, we cannot call gld_precv* from here
1949  * for fear of causing a recursive lock entry in our receive code.
1950  *
1951  * There is a problem here when coming from gld_wput().  While wput
1952  * only comes here if the queue is attached to a PPA and bound to a SAP
1953  * and there are no messages on the queue ahead of the M_DATA that could
1954  * change that, it is theoretically possible that another thread could
1955  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1956  * could wake up and process them, before we finish processing this
1957  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1958  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1959  * and Style 1 streams only DL_DETACH in the close routine, where
1960  * qprocsoff() protects us.  If this happens we could end up calling
1961  * gldm_send() after we have detached the stream and possibly called
1962  * gldm_stop().  Worse, once the number of attached streams goes to zero,
1963  * detach/unregister could be called, and the macinfo could go away entirely.
1964  *
1965  * No one has ever seen this happen.
1966  *
1967  * It is some trouble to fix this, and we would rather not add any mutex
1968  * logic into the wput() routine, which is supposed to be a "fast"
1969  * path.
1970  *
1971  * What I've done is use an atomic counter to keep a count of the number
1972  * of threads currently calling gld_start() from wput() on this stream.
1973  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
1974  * the queue and qenables, hoping to have better luck next time.  Since
1975  * people shouldn't be trying to send after they've asked to DL_DETACH,
1976  * hopefully very soon all the wput=>start threads should have returned
1977  * and the DL_DETACH will succeed.  It's hard to test this since the odds
1978  * of the failure even trying to happen are so small.  I probably could
1979  * have ignored the whole issue and never been the worse for it.
1980  */
1981 static int
1982 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
1983 {
1984 	mblk_t *nmp;
1985 	gld_t *gld = (gld_t *)q->q_ptr;
1986 	gld_mac_info_t *macinfo;
1987 	gld_mac_pvt_t *mac_pvt;
1988 	int rc;
1989 	gld_interface_t *ifp;
1990 	pktinfo_t pktinfo;
1991 	uint32_t vtag;
1992 	gld_vlan_t *vlan;
1993 
1994 	ASSERT(DB_TYPE(mp) == M_DATA);
1995 	macinfo = gld->gld_mac_info;
1996 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1997 	ifp = mac_pvt->interfacep;
1998 	vlan = (gld_vlan_t *)gld->gld_vlan;
1999 
2000 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2001 		freemsg(mp);
2002 #ifdef GLD_DEBUG
2003 		if (gld_debug & GLDERRS)
2004 			cmn_err(CE_WARN,
2005 			    "gld_start: failed to interpret outbound packet");
2006 #endif
2007 		vlan->gldv_stats->glds_xmtbadinterp++;
2008 		return (GLD_BADARG);
2009 	}
2010 
2011 	/*
2012 	 * We're not holding the lock for this check.  If the promiscuous
2013 	 * state is in flux it doesn't matter much if we get this wrong.
2014 	 */
2015 	if (mac_pvt->nprom > 0) {
2016 		/*
2017 		 * We want to loopback to the receive side, but to avoid
2018 		 * recursive lock entry:  if we came from wput(), which
2019 		 * could have looped back via IP from our own receive
2020 		 * interrupt thread, we decline this request.  wput()
2021 		 * will then queue the packet for wsrv().  This means
2022 		 * that when snoop is running we don't get the advantage
2023 		 * of the wput() multithreaded direct entry to the
2024 		 * driver's send routine.
2025 		 */
2026 		if (caller == GLD_WPUT) {
2027 			(void) putbq(q, mp);
2028 			return (GLD_NORESOURCES);
2029 		}
2030 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2031 			nmp = dupmsg_noloan(mp);
2032 		else
2033 			nmp = dupmsg(mp);
2034 	} else
2035 		nmp = NULL;		/* we need no loopback */
2036 
2037 	vtag = GLD_MK_VTAG(vlan->gldv_ptag, upri);
2038 	if (ifp->hdr_size > 0 &&
2039 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2040 	    macinfo->gldm_maxpkt) {
2041 		freemsg(mp);	/* discard oversized outbound packet */
2042 		if (nmp)
2043 			freemsg(nmp);	/* free the duped message */
2044 #ifdef GLD_DEBUG
2045 		if (gld_debug & GLDERRS)
2046 			cmn_err(CE_WARN,
2047 			    "gld_start: oversize outbound packet, size %d,"
2048 			    "max %d", pktinfo.pktLen,
2049 			    ifp->hdr_size + macinfo->gldm_maxpkt);
2050 #endif
2051 		vlan->gldv_stats->glds_xmtbadinterp++;
2052 		return (GLD_BADARG);
2053 	}
2054 
2055 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2056 
2057 	if (rc != GLD_SUCCESS) {
2058 		if (rc == GLD_NORESOURCES) {
2059 			vlan->gldv_stats->glds_xmtretry++;
2060 			(void) putbq(q, mp);
2061 		} else {
2062 			/* transmit error; drop the packet */
2063 			freemsg(mp);
2064 			/* We're supposed to count failed attempts as well */
2065 			UPDATE_STATS(vlan, pktinfo, 1);
2066 #ifdef GLD_DEBUG
2067 			if (gld_debug & GLDERRS)
2068 				cmn_err(CE_WARN,
2069 				    "gld_start: gldm_send failed %d", rc);
2070 #endif
2071 		}
2072 		if (nmp)
2073 			freemsg(nmp);	/* free the dupped message */
2074 		return (rc);
2075 	}
2076 
2077 	UPDATE_STATS(vlan, pktinfo, 1);
2078 
2079 	/*
2080 	 * Loopback case. The message needs to be returned back on
2081 	 * the read side. This would silently fail if the dumpmsg fails
2082 	 * above. This is probably OK, if there is no memory to dup the
2083 	 * block, then there isn't much we could do anyway.
2084 	 */
2085 	if (nmp) {
2086 		GLDM_LOCK(macinfo, RW_WRITER);
2087 		gld_precv(macinfo, vlan, nmp);
2088 		GLDM_UNLOCK(macinfo);
2089 	}
2090 
2091 	return (GLD_SUCCESS);
2092 }
2093 
2094 /*
2095  * With MDT V.2 a single message mp can have one header area and multiple
2096  * payload areas. A packet is described by dl_pkt_info, and each packet can
2097  * span multiple payload areas (currently with TCP, each packet will have one
2098  * header and at the most two payload areas). MACs might have a limit on the
2099  * number of payload segments (i.e. per packet scatter-gather limit), and
2100  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2101  * might also have a limit on the total number of payloads in a message, and
2102  * that is specified by mdt_max_pld.
2103  */
2104 static int
2105 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2106 {
2107 	mblk_t *nextmp;
2108 	gld_t *gld = (gld_t *)q->q_ptr;
2109 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2110 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2111 	int numpacks, mdtpacks;
2112 	gld_interface_t *ifp = mac_pvt->interfacep;
2113 	pktinfo_t pktinfo;
2114 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2115 	boolean_t doloop = B_FALSE;
2116 	multidata_t *dlmdp;
2117 	pdescinfo_t pinfo;
2118 	pdesc_t *dl_pkt;
2119 	void *cookie;
2120 	uint_t totLen = 0;
2121 
2122 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2123 
2124 	/*
2125 	 * We're not holding the lock for this check.  If the promiscuous
2126 	 * state is in flux it doesn't matter much if we get this wrong.
2127 	 */
2128 	if (mac_pvt->nprom > 0) {
2129 		/*
2130 		 * We want to loopback to the receive side, but to avoid
2131 		 * recursive lock entry:  if we came from wput(), which
2132 		 * could have looped back via IP from our own receive
2133 		 * interrupt thread, we decline this request.  wput()
2134 		 * will then queue the packet for wsrv().  This means
2135 		 * that when snoop is running we don't get the advantage
2136 		 * of the wput() multithreaded direct entry to the
2137 		 * driver's send routine.
2138 		 */
2139 		if (caller == GLD_WPUT) {
2140 			(void) putbq(q, mp);
2141 			return (GLD_NORESOURCES);
2142 		}
2143 		doloop = B_TRUE;
2144 
2145 		/*
2146 		 * unlike the M_DATA case, we don't have to call
2147 		 * dupmsg_noloan here because mmd_transform
2148 		 * (called by gld_precv_mdt) will make a copy of
2149 		 * each dblk.
2150 		 */
2151 	}
2152 
2153 	while (mp != NULL) {
2154 		/*
2155 		 * The lower layer driver only gets a single multidata
2156 		 * message; this also makes it easier to handle noresources.
2157 		 */
2158 		nextmp = mp->b_cont;
2159 		mp->b_cont = NULL;
2160 
2161 		/*
2162 		 * Get number of packets in this message; if nothing
2163 		 * to transmit, go to next message.
2164 		 */
2165 		dlmdp = mmd_getmultidata(mp);
2166 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2167 			freemsg(mp);
2168 			mp = nextmp;
2169 			continue;
2170 		}
2171 
2172 		/*
2173 		 * Run interpreter to populate media specific pktinfo fields.
2174 		 * This collects per MDT message information like sap,
2175 		 * broad/multicast etc.
2176 		 */
2177 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2178 		    GLD_MDT_TX);
2179 
2180 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2181 
2182 		if (numpacks > 0) {
2183 			/*
2184 			 * Driver indicates it can transmit at least 1, and
2185 			 * possibly all, packets in MDT message.
2186 			 */
2187 			int count = numpacks;
2188 
2189 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2190 			    (dl_pkt != NULL);
2191 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2192 				/*
2193 				 * Format this packet by adding link header and
2194 				 * adjusting pdescinfo to include it; get
2195 				 * packet length.
2196 				 */
2197 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2198 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2199 
2200 				totLen += pktinfo.pktLen;
2201 
2202 				/*
2203 				 * Loop back packet before handing to the
2204 				 * driver.
2205 				 */
2206 				if (doloop &&
2207 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2208 					GLDM_LOCK(macinfo, RW_WRITER);
2209 					gld_precv_mdt(macinfo, vlan, mp,
2210 					    dl_pkt, &pktinfo);
2211 					GLDM_UNLOCK(macinfo);
2212 				}
2213 
2214 				/*
2215 				 * And send off to driver.
2216 				 */
2217 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2218 				    &pinfo);
2219 
2220 				/*
2221 				 * Be careful not to invoke getnextpdesc if we
2222 				 * already sent the last packet, since driver
2223 				 * might have posted it to hardware causing a
2224 				 * completion and freemsg() so the MDT data
2225 				 * structures might not be valid anymore.
2226 				 */
2227 				if (--count == 0)
2228 					break;
2229 			}
2230 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2231 			pktinfo.pktLen = totLen;
2232 			UPDATE_STATS(vlan, pktinfo, numpacks);
2233 
2234 			/*
2235 			 * In the noresources case (when driver indicates it
2236 			 * can not transmit all packets in the MDT message),
2237 			 * adjust to skip the first few packets on retrial.
2238 			 */
2239 			if (numpacks != mdtpacks) {
2240 				/*
2241 				 * Release already processed packet descriptors.
2242 				 */
2243 				for (count = 0; count < numpacks; count++) {
2244 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2245 					    &pinfo);
2246 					mmd_rempdesc(dl_pkt);
2247 				}
2248 				vlan->gldv_stats->glds_xmtretry++;
2249 				mp->b_cont = nextmp;
2250 				(void) putbq(q, mp);
2251 				return (GLD_NORESOURCES);
2252 			}
2253 		} else if (numpacks == 0) {
2254 			/*
2255 			 * Driver indicates it can not transmit any packets
2256 			 * currently and will request retrial later.
2257 			 */
2258 			vlan->gldv_stats->glds_xmtretry++;
2259 			mp->b_cont = nextmp;
2260 			(void) putbq(q, mp);
2261 			return (GLD_NORESOURCES);
2262 		} else {
2263 			ASSERT(numpacks == -1);
2264 			/*
2265 			 * We're supposed to count failed attempts as well.
2266 			 */
2267 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2268 			while (dl_pkt != NULL) {
2269 				/*
2270 				 * Call interpreter to determine total packet
2271 				 * bytes that are being dropped.
2272 				 */
2273 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2274 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2275 
2276 				totLen += pktinfo.pktLen;
2277 
2278 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2279 			}
2280 			pktinfo.pktLen = totLen;
2281 			UPDATE_STATS(vlan, pktinfo, mdtpacks);
2282 
2283 			/*
2284 			 * Transmit error; drop the message, move on
2285 			 * to the next one.
2286 			 */
2287 			freemsg(mp);
2288 		}
2289 
2290 		/*
2291 		 * Process the next multidata block, if there is one.
2292 		 */
2293 		mp = nextmp;
2294 	}
2295 
2296 	return (GLD_SUCCESS);
2297 }
2298 
2299 /*
2300  * gld_intr (macinfo)
2301  */
2302 uint_t
2303 gld_intr(gld_mac_info_t *macinfo)
2304 {
2305 	ASSERT(macinfo != NULL);
2306 
2307 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2308 		return (DDI_INTR_UNCLAIMED);
2309 
2310 	return ((*macinfo->gldm_intr)(macinfo));
2311 }
2312 
2313 /*
2314  * gld_sched (macinfo)
2315  *
2316  * This routine scans the streams that refer to a specific macinfo
2317  * structure and causes the STREAMS scheduler to try to run them if
2318  * they are marked as waiting for the transmit buffer.
2319  */
2320 void
2321 gld_sched(gld_mac_info_t *macinfo)
2322 {
2323 	gld_mac_pvt_t *mac_pvt;
2324 	gld_t *gld;
2325 	gld_vlan_t *vlan;
2326 	int i;
2327 
2328 	ASSERT(macinfo != NULL);
2329 
2330 	GLDM_LOCK(macinfo, RW_WRITER);
2331 
2332 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2333 		/* We're probably being called from a leftover interrupt */
2334 		GLDM_UNLOCK(macinfo);
2335 		return;
2336 	}
2337 
2338 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2339 
2340 	for (i = 0; i < VLAN_HASHSZ; i++) {
2341 		for (vlan = mac_pvt->vlan_hash[i];
2342 		    vlan != NULL; vlan = vlan->gldv_next) {
2343 			for (gld = vlan->gldv_str_next;
2344 			    gld != (gld_t *)&vlan->gldv_str_next;
2345 			    gld = gld->gld_next) {
2346 				ASSERT(gld->gld_mac_info == macinfo);
2347 				gld->gld_sched_ran = B_TRUE;
2348 				membar_enter();
2349 				if (gld->gld_xwait) {
2350 					gld->gld_xwait = B_FALSE;
2351 					qenable(WR(gld->gld_qptr));
2352 				}
2353 			}
2354 		}
2355 	}
2356 
2357 	GLDM_UNLOCK(macinfo);
2358 }
2359 
2360 /*
2361  * gld_precv (macinfo, mp)
2362  * called from gld_start to loopback a packet when in promiscuous mode
2363  */
2364 static void
2365 gld_precv(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp)
2366 {
2367 	gld_mac_pvt_t *mac_pvt;
2368 	gld_interface_t *ifp;
2369 	pktinfo_t pktinfo;
2370 
2371 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2372 
2373 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2374 	ifp = mac_pvt->interfacep;
2375 
2376 	/*
2377 	 * call the media specific packet interpreter routine
2378 	 */
2379 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2380 		freemsg(mp);
2381 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2382 #ifdef GLD_DEBUG
2383 		if (gld_debug & GLDERRS)
2384 			cmn_err(CE_WARN,
2385 			    "gld_precv: interpreter failed");
2386 #endif
2387 		return;
2388 	}
2389 
2390 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_paccept);
2391 }
2392 
2393 /*
2394  * called from gld_start_mdt to loopback packet(s) when in promiscuous mode
2395  */
2396 static void
2397 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2398     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2399 {
2400 	mblk_t *adjmp;
2401 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2402 	gld_interface_t *ifp = mac_pvt->interfacep;
2403 
2404 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2405 
2406 	/*
2407 	 * Get source/destination.
2408 	 */
2409 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2410 	    GLD_MDT_RXLOOP);
2411 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2412 		gld_sendup(macinfo, vlan, pktinfo, adjmp, gld_paccept);
2413 }
2414 
2415 /*
2416  * gld_recv (macinfo, mp)
2417  * called with an mac-level packet in a mblock; take the maclock,
2418  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2419  *
2420  * V0 drivers already are holding the mutex when they call us.
2421  */
2422 void
2423 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2424 {
2425 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2426 }
2427 
2428 void
2429 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2430 {
2431 	gld_mac_pvt_t *mac_pvt;
2432 	char pbuf[3*GLD_MAX_ADDRLEN];
2433 	pktinfo_t pktinfo;
2434 	gld_interface_t *ifp;
2435 	queue_t *ipq = NULL;
2436 	gld_vlan_t *vlan;
2437 	uint32_t vid;
2438 
2439 	ASSERT(macinfo != NULL);
2440 	ASSERT(mp->b_datap->db_ref);
2441 
2442 	GLDM_LOCK(macinfo, RW_READER);
2443 
2444 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2445 		/* We're probably being called from a leftover interrupt */
2446 		freemsg(mp);
2447 		goto done;
2448 	}
2449 
2450 	vid = GLD_VTAG_VID(vtag);
2451 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) {
2452 		freemsg(mp);
2453 		goto done;
2454 	}
2455 
2456 	/*
2457 	 * Check whether underlying media code supports the IPQ hack,
2458 	 * and if so, whether the interpreter can quickly parse the
2459 	 * packet to get some relevant parameters.
2460 	 */
2461 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2462 	ifp = mac_pvt->interfacep;
2463 	if (((*ifp->interpreter)(macinfo, mp, &pktinfo,
2464 	    GLD_RXQUICK) == 0) && (vlan->gldv_ipq_flags == 0)) {
2465 		switch (pktinfo.ethertype) {
2466 		case ETHERTYPE_IP:
2467 			ipq = vlan->gldv_ipq;
2468 			break;
2469 		case ETHERTYPE_IPV6:
2470 			ipq = vlan->gldv_ipv6q;
2471 			break;
2472 		}
2473 	}
2474 
2475 	BUMP(vlan->gldv_stats->glds_bytercv64, pktinfo.pktLen);
2476 	BUMP(vlan->gldv_stats->glds_pktrcv64, 1);
2477 
2478 	/*
2479 	 * Special case for IP; we can simply do the putnext here, if:
2480 	 * o ipq != NULL, and therefore:
2481 	 * - the device type supports IPQ (ethernet and IPoIB);
2482 	 * - the interpreter could quickly parse the packet;
2483 	 * - there are no PROMISC_SAP streams (on this VLAN);
2484 	 * - there is one, and only one, IP stream bound (to this VLAN);
2485 	 * - that stream is a "fastpath" stream;
2486 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2487 	 *
2488 	 * o the packet is specifically for me, and therefore:
2489 	 * - the packet is not multicast or broadcast (fastpath only
2490 	 *   wants unicast packets).
2491 	 *
2492 	 * o the stream is not asserting flow control.
2493 	 */
2494 	if (ipq != NULL &&
2495 	    pktinfo.isForMe &&
2496 	    canputnext(ipq)) {
2497 		/*
2498 		 * Skip the mac header. We know there is no LLC1/SNAP header
2499 		 * in this packet
2500 		 */
2501 		mp->b_rptr += pktinfo.macLen;
2502 		putnext(ipq, mp);
2503 		goto done;
2504 	}
2505 
2506 	/*
2507 	 * call the media specific packet interpreter routine
2508 	 */
2509 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2510 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2511 #ifdef GLD_DEBUG
2512 		if (gld_debug & GLDERRS)
2513 			cmn_err(CE_WARN,
2514 			    "gld_recv_tagged: interpreter failed");
2515 #endif
2516 		freemsg(mp);
2517 		goto done;
2518 	}
2519 
2520 	/*
2521 	 * This is safe even if vtag is VLAN_VTAG_NONE
2522 	 */
2523 
2524 	pktinfo.vid = vid;
2525 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2526 #ifdef GLD_DEBUG
2527 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2528 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2529 #endif
2530 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2531 
2532 #ifdef GLD_DEBUG
2533 	if ((gld_debug & GLDRECV) &&
2534 	    (!(gld_debug & GLDNOBR) ||
2535 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2536 		char pbuf2[3*GLD_MAX_ADDRLEN];
2537 
2538 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2539 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2540 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2541 		    pktinfo.dhost, macinfo->gldm_addrlen));
2542 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2543 		    pktinfo.vid,
2544 		    pktinfo.user_pri);
2545 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2546 		    "Hdr: %d,%d isMulticast: %s\n",
2547 		    pktinfo.ethertype,
2548 		    pktinfo.pktLen,
2549 		    pktinfo.macLen,
2550 		    pktinfo.hdrLen,
2551 		    pktinfo.isMulticast ? "Y" : "N");
2552 	}
2553 #endif
2554 
2555 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_accept);
2556 
2557 done:
2558 	GLDM_UNLOCK(macinfo);
2559 }
2560 
2561 /* =================================================================== */
2562 /* receive group: called from gld_recv and gld_precv* with maclock held */
2563 /* =================================================================== */
2564 
2565 /*
2566  * gld_sendup (macinfo, mp)
2567  * called with an ethernet packet in a mblock; must decide whether
2568  * packet is for us and which streams to queue it to.
2569  */
2570 static void
2571 gld_sendup(gld_mac_info_t *macinfo, gld_vlan_t *vlan, pktinfo_t *pktinfo,
2572     mblk_t *mp, int (*acceptfunc)())
2573 {
2574 	gld_t *gld;
2575 	gld_t *fgld = NULL;
2576 	mblk_t *nmp;
2577 	void (*send)(queue_t *qp, mblk_t *mp);
2578 	int (*cansend)(queue_t *qp);
2579 
2580 #ifdef GLD_DEBUG
2581 	if (gld_debug & GLDTRACE)
2582 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2583 		    (void *)macinfo);
2584 #endif
2585 
2586 	ASSERT(mp != NULL);
2587 	ASSERT(macinfo != NULL);
2588 	ASSERT(vlan != NULL);
2589 	ASSERT(pktinfo != NULL);
2590 	ASSERT(GLDM_LOCK_HELD(macinfo));
2591 
2592 	/*
2593 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2594 	 * gld_recv returns to the caller's interrupt routine.  The total
2595 	 * network throughput would normally be lower when selecting this
2596 	 * option, because we putq the messages and process them later,
2597 	 * instead of sending them with putnext now.  Some time critical
2598 	 * device might need this, so it's here but undocumented.
2599 	 */
2600 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2601 		send = (void (*)(queue_t *, mblk_t *))putq;
2602 		cansend = canput;
2603 	} else {
2604 		send = (void (*)(queue_t *, mblk_t *))putnext;
2605 		cansend = canputnext;
2606 	}
2607 
2608 	/*
2609 	 * Search all the streams attached to this macinfo looking for
2610 	 * those eligible to receive the present packet.
2611 	 */
2612 	for (gld = vlan->gldv_str_next;
2613 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
2614 #ifdef GLD_VERBOSE_DEBUG
2615 		cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p QSTATE: %s",
2616 		    gld->gld_sap, (void *)gld->gld_qptr,
2617 		    gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE");
2618 #endif
2619 		ASSERT(gld->gld_qptr != NULL);
2620 		ASSERT(gld->gld_state == DL_IDLE ||
2621 		    gld->gld_state == DL_UNBOUND);
2622 		ASSERT(gld->gld_mac_info == macinfo);
2623 		ASSERT(gld->gld_vlan == vlan);
2624 
2625 		if (gld->gld_state != DL_IDLE)
2626 			continue;	/* not eligible to receive */
2627 		if (gld->gld_flags & GLD_STR_CLOSING)
2628 			continue;	/* not eligible to receive */
2629 
2630 #ifdef GLD_DEBUG
2631 		if ((gld_debug & GLDRECV) &&
2632 		    (!(gld_debug & GLDNOBR) ||
2633 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2634 			cmn_err(CE_NOTE,
2635 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2636 			    gld->gld_sap,
2637 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2638 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2639 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2640 #endif
2641 
2642 		/*
2643 		 * The accept function differs depending on whether this is
2644 		 * a packet that we received from the wire or a loopback.
2645 		 */
2646 		if ((*acceptfunc)(gld, pktinfo)) {
2647 			/* sap matches */
2648 			pktinfo->wasAccepted = 1;	/* known protocol */
2649 
2650 			if (!(*cansend)(gld->gld_qptr)) {
2651 				/*
2652 				 * Upper stream is not accepting messages, i.e.
2653 				 * it is flow controlled, therefore we will
2654 				 * forgo sending the message up this stream.
2655 				 */
2656 #ifdef GLD_DEBUG
2657 				if (gld_debug & GLDETRACE)
2658 					cmn_err(CE_WARN,
2659 					    "gld_sendup: canput failed");
2660 #endif
2661 				BUMP(vlan->gldv_stats->glds_blocked, 1);
2662 				qenable(gld->gld_qptr);
2663 				continue;
2664 			}
2665 
2666 			/*
2667 			 * we are trying to avoid an extra dumpmsg() here.
2668 			 * If this is the first eligible queue, remember the
2669 			 * queue and send up the message after the loop.
2670 			 */
2671 			if (!fgld) {
2672 				fgld = gld;
2673 				continue;
2674 			}
2675 
2676 			/* duplicate the packet for this stream */
2677 			nmp = dupmsg(mp);
2678 			if (nmp == NULL) {
2679 				BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2680 #ifdef GLD_DEBUG
2681 				if (gld_debug & GLDERRS)
2682 					cmn_err(CE_WARN,
2683 					    "gld_sendup: dupmsg failed");
2684 #endif
2685 				break;	/* couldn't get resources; drop it */
2686 			}
2687 			/* pass the message up the stream */
2688 			gld_passon(gld, nmp, pktinfo, send);
2689 		}
2690 	}
2691 
2692 	ASSERT(mp);
2693 	/* send the original dup of the packet up the first stream found */
2694 	if (fgld)
2695 		gld_passon(fgld, mp, pktinfo, send);
2696 	else
2697 		freemsg(mp);	/* no streams matched */
2698 
2699 	/* We do not count looped back packets */
2700 	if (acceptfunc == gld_paccept)
2701 		return;		/* transmit loopback case */
2702 
2703 	if (pktinfo->isBroadcast)
2704 		BUMP(vlan->gldv_stats->glds_brdcstrcv, 1);
2705 	else if (pktinfo->isMulticast)
2706 		BUMP(vlan->gldv_stats->glds_multircv, 1);
2707 
2708 	/* No stream accepted this packet */
2709 	if (!pktinfo->wasAccepted)
2710 		BUMP(vlan->gldv_stats->glds_unknowns, 1);
2711 }
2712 
2713 /*
2714  * A packet matches a stream if:
2715  *     the stream accepts EtherType encoded packets and the type matches
2716  *  or the stream accepts LLC packets and the packet is an LLC packet
2717  */
2718 #define	MATCH(stream, pktinfo) \
2719 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2720 	(!stream->gld_ethertype && pktinfo->isLLC))
2721 
2722 /*
2723  * This function validates a packet for sending up a particular
2724  * stream. The message header has been parsed and its characteristic
2725  * are recorded in the pktinfo data structure. The streams stack info
2726  * are presented in gld data structures.
2727  */
2728 static int
2729 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2730 {
2731 	/*
2732 	 * if there is no match do not bother checking further.
2733 	 */
2734 	if (!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP))
2735 		return (0);
2736 
2737 	/*
2738 	 * We don't accept any packet from the hardware if we originated it.
2739 	 * (Contrast gld_paccept, the send-loopback accept function.)
2740 	 */
2741 	if (pktinfo->isLooped)
2742 		return (0);
2743 
2744 	/*
2745 	 * If the packet is broadcast or sent to us directly we will accept it.
2746 	 * Also we will accept multicast packets requested by the stream.
2747 	 */
2748 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2749 	    gld_mcmatch(gld, pktinfo))
2750 		return (1);
2751 
2752 	/*
2753 	 * Finally, accept anything else if we're in promiscuous mode
2754 	 */
2755 	if (gld->gld_flags & GLD_PROM_PHYS)
2756 		return (1);
2757 
2758 	return (0);
2759 }
2760 
2761 /*
2762  * Return TRUE if the given multicast address is one
2763  * of those that this particular Stream is interested in.
2764  */
2765 static int
2766 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
2767 {
2768 	/*
2769 	 * Return FALSE if not a multicast address.
2770 	 */
2771 	if (!pktinfo->isMulticast)
2772 		return (0);
2773 
2774 	/*
2775 	 * Check if all multicasts have been enabled for this Stream
2776 	 */
2777 	if (gld->gld_flags & GLD_PROM_MULT)
2778 		return (1);
2779 
2780 	/*
2781 	 * Return FALSE if no multicast addresses enabled for this Stream.
2782 	 */
2783 	if (!gld->gld_mcast)
2784 		return (0);
2785 
2786 	/*
2787 	 * Otherwise, look for it in the table.
2788 	 */
2789 	return (gld_multicast(pktinfo->dhost, gld));
2790 }
2791 
2792 /*
2793  * gld_multicast determines if the address is a multicast address for
2794  * this stream.
2795  */
2796 static int
2797 gld_multicast(unsigned char *macaddr, gld_t *gld)
2798 {
2799 	int i;
2800 
2801 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
2802 
2803 	if (!gld->gld_mcast)
2804 		return (0);
2805 
2806 	for (i = 0; i < gld->gld_multicnt; i++) {
2807 		if (gld->gld_mcast[i]) {
2808 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
2809 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
2810 			    gld->gld_mac_info->gldm_addrlen))
2811 				return (1);
2812 		}
2813 	}
2814 
2815 	return (0);
2816 }
2817 
2818 /*
2819  * accept function for looped back packets
2820  */
2821 static int
2822 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
2823 {
2824 	return (gld->gld_flags & GLD_PROM_PHYS &&
2825 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP));
2826 }
2827 
2828 static void
2829 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
2830 	void (*send)(queue_t *qp, mblk_t *mp))
2831 {
2832 	int skiplen;
2833 
2834 #ifdef GLD_DEBUG
2835 	if (gld_debug & GLDTRACE)
2836 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
2837 		    (void *)mp, (void *)pktinfo);
2838 
2839 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
2840 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2841 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
2842 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
2843 		    gld->gld_sap);
2844 #endif
2845 
2846 	/*
2847 	 * Figure out how much of the packet header to throw away.
2848 	 *
2849 	 * RAW streams expect to see the whole packet.
2850 	 *
2851 	 * Other streams expect to see the packet with the MAC header
2852 	 * removed.
2853 	 *
2854 	 * Normal DLPI (non RAW/FAST) streams also want the
2855 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
2856 	 */
2857 	if (gld->gld_flags & GLD_RAW) {
2858 		skiplen = 0;
2859 	} else {
2860 		skiplen = pktinfo->macLen;		/* skip mac header */
2861 		if (gld->gld_ethertype)
2862 			skiplen += pktinfo->hdrLen;	/* skip any extra */
2863 	}
2864 
2865 	if (skiplen >= pktinfo->pktLen) {
2866 		/*
2867 		 * If the interpreter did its job right, then it cannot be
2868 		 * asking us to skip more bytes than are in the packet!
2869 		 * However, there could be zero data bytes left after the
2870 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
2871 		 * should contain at least one byte of data, so if we have
2872 		 * none we just drop it.
2873 		 */
2874 		ASSERT(!(skiplen > pktinfo->pktLen));
2875 		freemsg(mp);
2876 		return;
2877 	}
2878 
2879 	/*
2880 	 * Skip over the header(s), taking care to possibly handle message
2881 	 * fragments shorter than the amount we need to skip.  Hopefully
2882 	 * the driver will put the entire packet, or at least the entire
2883 	 * header, into a single message block.  But we handle it if not.
2884 	 */
2885 	while (skiplen >= MBLKL(mp)) {
2886 		mblk_t *tmp = mp;
2887 		skiplen -= MBLKL(mp);
2888 		mp = mp->b_cont;
2889 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
2890 		freeb(tmp);
2891 	}
2892 	mp->b_rptr += skiplen;
2893 
2894 	/* Add M_PROTO if necessary, and pass upstream */
2895 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
2896 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
2897 		/* RAW/FAST: just send up the M_DATA */
2898 		(*send)(gld->gld_qptr, mp);
2899 	} else {
2900 		/* everybody else wants to see a unitdata_ind structure */
2901 		mp = gld_addudind(gld, mp, pktinfo);
2902 		if (mp)
2903 			(*send)(gld->gld_qptr, mp);
2904 		/* if it failed, gld_addudind already bumped statistic */
2905 	}
2906 }
2907 
2908 /*
2909  * gld_addudind(gld, mp, pktinfo)
2910  * format a DL_UNITDATA_IND message to be sent upstream to the user
2911  */
2912 static mblk_t *
2913 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo)
2914 {
2915 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
2916 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
2917 	dl_unitdata_ind_t	*dludindp;
2918 	mblk_t			*nmp;
2919 	int			size;
2920 	int			type;
2921 
2922 #ifdef GLD_DEBUG
2923 	if (gld_debug & GLDTRACE)
2924 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
2925 		    (void *)mp, (void *)pktinfo);
2926 #endif
2927 	ASSERT(macinfo != NULL);
2928 
2929 	/*
2930 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
2931 	 * might as well discard since we can't go further
2932 	 */
2933 	size = sizeof (dl_unitdata_ind_t) +
2934 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
2935 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
2936 		freemsg(mp);
2937 		BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2938 #ifdef GLD_DEBUG
2939 		if (gld_debug & GLDERRS)
2940 			cmn_err(CE_WARN,
2941 			    "gld_addudind: allocb failed");
2942 #endif
2943 		return ((mblk_t *)NULL);
2944 	}
2945 	DB_TYPE(nmp) = M_PROTO;
2946 	nmp->b_rptr = nmp->b_datap->db_lim - size;
2947 
2948 	type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
2949 
2950 	/*
2951 	 * now setup the DL_UNITDATA_IND header
2952 	 *
2953 	 * XXX This looks broken if the saps aren't two bytes.
2954 	 */
2955 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2956 	dludindp->dl_primitive = DL_UNITDATA_IND;
2957 	dludindp->dl_src_addr_length =
2958 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
2959 					abs(macinfo->gldm_saplen);
2960 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2961 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
2962 					dludindp->dl_dest_addr_length;
2963 
2964 	dludindp->dl_group_address = (pktinfo->isMulticast ||
2965 					pktinfo->isBroadcast);
2966 
2967 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
2968 
2969 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
2970 	nmp->b_wptr += macinfo->gldm_addrlen;
2971 
2972 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
2973 	*(ushort_t *)(nmp->b_wptr) = type;
2974 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2975 
2976 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
2977 
2978 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
2979 	nmp->b_wptr += macinfo->gldm_addrlen;
2980 
2981 	*(ushort_t *)(nmp->b_wptr) = type;
2982 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2983 
2984 	if (pktinfo->nosource)
2985 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
2986 	linkb(nmp, mp);
2987 	return (nmp);
2988 }
2989 
2990 /* ======================================================= */
2991 /* wsrv group: called from wsrv, single threaded per queue */
2992 /* ======================================================= */
2993 
2994 /*
2995  * We go to some trouble to avoid taking the same lock during normal
2996  * transmit processing as we do during normal receive processing.
2997  *
2998  * Elements of the per-instance macinfo and per-stream gld_t structures
2999  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3000  * (Elements of the gld_mac_pvt_t structure are considered part of the
3001  * macinfo structure for purposes of this discussion).
3002  *
3003  * However, it is more complicated than that:
3004  *
3005  *	Elements of the macinfo structure that are set before the macinfo
3006  *	structure is added to its device list by gld_register(), and never
3007  *	thereafter modified, are accessed without requiring taking the lock.
3008  *	A similar rule applies to those elements of the gld_t structure that
3009  *	are written by gld_open() before the stream is added to any list.
3010  *
3011  *	Most other elements of the macinfo structure may only be read or
3012  *	written while holding the maclock.
3013  *
3014  *	Most writable elements of the gld_t structure are written only
3015  *	within the single-threaded domain of wsrv() and subsidiaries.
3016  *	(This domain includes open/close while qprocs are not on.)
3017  *	The maclock need not be taken while within that domain
3018  *	simply to read those elements.  Writing to them, even within
3019  *	that domain, or reading from it outside that domain, requires
3020  *	holding the maclock.  Exception:  if the stream is not
3021  *	presently attached to a PPA, there is no associated macinfo,
3022  *	and no maclock need be taken.
3023  *
3024  *	The curr_macaddr element of the mac private structure is also
3025  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3026  *      of that structure. However, there are a few instances in the
3027  *      transmit path where we choose to forgo lock protection when
3028  *      reading this variable. This is to avoid lock contention between
3029  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3030  *      In doing so we will take a small risk or a few corrupted packets
3031  *      during the short an rare times when someone is changing the interface's
3032  *      physical address. We consider the small cost in this rare case to be
3033  *      worth the benefit of reduced lock contention under normal operating
3034  *      conditions. The risk/cost is small because:
3035  *          1. there is no guarantee at this layer of uncorrupted delivery.
3036  *          2. the physaddr doesn't change very often - no performance hit.
3037  *          3. if the physaddr changes, other stuff is going to be screwed
3038  *             up for a while anyway, while other sites refigure ARP, etc.,
3039  *             so losing a couple of packets is the least of our worries.
3040  *
3041  *	The list of streams associated with a macinfo is protected by
3042  *	two locks:  the per-macinfo maclock, and the per-major-device
3043  *	gld_devlock.  Both must be held to modify the list, but either
3044  *	may be held to protect the list during reading/traversing.  This
3045  *	allows independent locking for multiple instances in the receive
3046  *	path (using macinfo), while facilitating routines that must search
3047  *	the entire set of streams associated with a major device, such as
3048  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3049  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3050  *	protected, since they change at exactly the same time macinfo
3051  *	streams list does.
3052  *
3053  *	The list of macinfo structures associated with a major device
3054  *	structure is protected by the gld_devlock, as is the per-major
3055  *	list of Style 2 streams in the DL_UNATTACHED state.
3056  *
3057  *	The list of major devices is kept on a module-global list
3058  *	gld_device_list, which has its own lock to protect the list.
3059  *
3060  *	When it is necessary to hold more than one lock at a time, they
3061  *	are acquired in this "outside in" order:
3062  *		gld_device_list.gld_devlock
3063  *		glddev->gld_devlock
3064  *		GLDM_LOCK(macinfo)
3065  *
3066  *	Finally, there are some "volatile" elements of the gld_t structure
3067  *	used for synchronization between various routines that don't share
3068  *	the same mutexes.  See the routines for details.  These are:
3069  *		gld_xwait	between gld_wsrv() and gld_sched()
3070  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3071  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3072  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3073  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3074  *				(used in conjunction with q->q_first)
3075  */
3076 
3077 /*
3078  * gld_ioctl (q, mp)
3079  * handles all ioctl requests passed downstream. This routine is
3080  * passed a pointer to the message block with the ioctl request in it, and a
3081  * pointer to the queue so it can respond to the ioctl request with an ack.
3082  */
3083 int
3084 gld_ioctl(queue_t *q, mblk_t *mp)
3085 {
3086 	struct iocblk *iocp;
3087 	gld_t *gld;
3088 	gld_mac_info_t *macinfo;
3089 
3090 #ifdef GLD_DEBUG
3091 	if (gld_debug & GLDTRACE)
3092 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3093 #endif
3094 	gld = (gld_t *)q->q_ptr;
3095 	iocp = (struct iocblk *)mp->b_rptr;
3096 	switch (iocp->ioc_cmd) {
3097 	case DLIOCRAW:		/* raw M_DATA mode */
3098 		gld->gld_flags |= GLD_RAW;
3099 		DB_TYPE(mp) = M_IOCACK;
3100 		qreply(q, mp);
3101 		break;
3102 
3103 	case DL_IOC_HDR_INFO:	/* fastpath */
3104 		if (gld_global_options & GLD_OPT_NO_FASTPATH) {
3105 			miocnak(q, mp, 0, EINVAL);
3106 			break;
3107 		}
3108 		gld_fastpath(gld, q, mp);
3109 		break;
3110 
3111 	default:
3112 		macinfo	 = gld->gld_mac_info;
3113 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3114 			miocnak(q, mp, 0, EINVAL);
3115 			break;
3116 		}
3117 
3118 		GLDM_LOCK(macinfo, RW_WRITER);
3119 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3120 		GLDM_UNLOCK(macinfo);
3121 		break;
3122 	}
3123 	return (0);
3124 }
3125 
3126 /*
3127  * Since the rules for "fastpath" mode don't seem to be documented
3128  * anywhere, I will describe GLD's rules for fastpath users here:
3129  *
3130  * Once in this mode you remain there until close.
3131  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3132  * You must be bound (DL_IDLE) to transmit.
3133  * There are other rules not listed above.
3134  */
3135 static void
3136 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3137 {
3138 	gld_interface_t *ifp;
3139 	gld_mac_info_t *macinfo;
3140 	dl_unitdata_req_t *dludp;
3141 	mblk_t *nmp;
3142 	t_scalar_t off, len;
3143 	uint_t maclen;
3144 	int error;
3145 	gld_vlan_t *vlan;
3146 
3147 	if (gld->gld_state != DL_IDLE) {
3148 		miocnak(q, mp, 0, EINVAL);
3149 		return;
3150 	}
3151 
3152 	macinfo = gld->gld_mac_info;
3153 	ASSERT(macinfo != NULL);
3154 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3155 
3156 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3157 	if (error != 0) {
3158 		miocnak(q, mp, 0, error);
3159 		return;
3160 	}
3161 
3162 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3163 	off = dludp->dl_dest_addr_offset;
3164 	len = dludp->dl_dest_addr_length;
3165 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3166 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3167 		miocnak(q, mp, 0, EINVAL);
3168 		return;
3169 	}
3170 
3171 	/*
3172 	 * We take his fastpath request as a declaration that he will accept
3173 	 * M_DATA messages from us, whether or not we are willing to accept
3174 	 * them from him.  This allows us to have fastpath in one direction
3175 	 * (flow upstream) even on media with Source Routing, where we are
3176 	 * unable to provide a fixed MAC header to be prepended to downstream
3177 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3178 	 * allow him to send M_DATA down to us.
3179 	 */
3180 	GLDM_LOCK(macinfo, RW_WRITER);
3181 	gld->gld_flags |= GLD_FAST;
3182 	vlan = (gld_vlan_t *)gld->gld_vlan;
3183 	vlan->gldv_ipq_flags &= ~IPQ_DISABLED;
3184 	GLDM_UNLOCK(macinfo);
3185 
3186 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3187 
3188 	/* This will fail for Source Routing media */
3189 	/* Also on Ethernet on 802.2 SAPs */
3190 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3191 		miocnak(q, mp, 0, ENOMEM);
3192 		return;
3193 	}
3194 
3195 	/*
3196 	 * Link new mblk in after the "request" mblks.
3197 	 */
3198 	linkb(mp, nmp);
3199 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3200 }
3201 
3202 /*
3203  * gld_cmds (q, mp)
3204  *	process the DL commands as defined in dlpi.h
3205  *	note that the primitives return status which is passed back
3206  *	to the service procedure.  If the value is GLDE_RETRY, then
3207  *	it is assumed that processing must stop and the primitive has
3208  *	been put back onto the queue.  If the value is any other error,
3209  *	then an error ack is generated by the service procedure.
3210  */
3211 static int
3212 gld_cmds(queue_t *q, mblk_t *mp)
3213 {
3214 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3215 	gld_t *gld = (gld_t *)(q->q_ptr);
3216 	int result = DL_BADPRIM;
3217 	int mblkl = MBLKL(mp);
3218 	t_uscalar_t dlreq;
3219 
3220 	/* Make sure we have at least dlp->dl_primitive */
3221 	if (mblkl < sizeof (dlp->dl_primitive))
3222 		return (DL_BADPRIM);
3223 
3224 	dlreq = dlp->dl_primitive;
3225 #ifdef	GLD_DEBUG
3226 	if (gld_debug & GLDTRACE)
3227 		cmn_err(CE_NOTE,
3228 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3229 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3230 #endif
3231 
3232 	switch (dlreq) {
3233 	case DL_UDQOS_REQ:
3234 		if (mblkl < DL_UDQOS_REQ_SIZE)
3235 			break;
3236 		result = gld_udqos(q, mp);
3237 		break;
3238 
3239 	case DL_BIND_REQ:
3240 		if (mblkl < DL_BIND_REQ_SIZE)
3241 			break;
3242 		result = gld_bind(q, mp);
3243 		break;
3244 
3245 	case DL_UNBIND_REQ:
3246 		if (mblkl < DL_UNBIND_REQ_SIZE)
3247 			break;
3248 		result = gld_unbind(q, mp);
3249 		break;
3250 
3251 	case DL_UNITDATA_REQ:
3252 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3253 			break;
3254 		result = gld_unitdata(q, mp);
3255 		break;
3256 
3257 	case DL_INFO_REQ:
3258 		if (mblkl < DL_INFO_REQ_SIZE)
3259 			break;
3260 		result = gld_inforeq(q, mp);
3261 		break;
3262 
3263 	case DL_ATTACH_REQ:
3264 		if (mblkl < DL_ATTACH_REQ_SIZE)
3265 			break;
3266 		if (gld->gld_style == DL_STYLE2)
3267 			result = gldattach(q, mp);
3268 		else
3269 			result = DL_NOTSUPPORTED;
3270 		break;
3271 
3272 	case DL_DETACH_REQ:
3273 		if (mblkl < DL_DETACH_REQ_SIZE)
3274 			break;
3275 		if (gld->gld_style == DL_STYLE2)
3276 			result = gldunattach(q, mp);
3277 		else
3278 			result = DL_NOTSUPPORTED;
3279 		break;
3280 
3281 	case DL_ENABMULTI_REQ:
3282 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3283 			break;
3284 		result = gld_enable_multi(q, mp);
3285 		break;
3286 
3287 	case DL_DISABMULTI_REQ:
3288 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3289 			break;
3290 		result = gld_disable_multi(q, mp);
3291 		break;
3292 
3293 	case DL_PHYS_ADDR_REQ:
3294 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3295 			break;
3296 		result = gld_physaddr(q, mp);
3297 		break;
3298 
3299 	case DL_SET_PHYS_ADDR_REQ:
3300 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3301 			break;
3302 		result = gld_setaddr(q, mp);
3303 		break;
3304 
3305 	case DL_PROMISCON_REQ:
3306 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3307 			break;
3308 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3309 		break;
3310 
3311 	case DL_PROMISCOFF_REQ:
3312 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3313 			break;
3314 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3315 		break;
3316 
3317 	case DL_GET_STATISTICS_REQ:
3318 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3319 			break;
3320 		result = gld_get_statistics(q, mp);
3321 		break;
3322 
3323 	case DL_CAPABILITY_REQ:
3324 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3325 			break;
3326 		result = gld_cap(q, mp);
3327 		break;
3328 
3329 	case DL_NOTIFY_REQ:
3330 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3331 			break;
3332 		result = gld_notify_req(q, mp);
3333 		break;
3334 
3335 	case DL_XID_REQ:
3336 	case DL_XID_RES:
3337 	case DL_TEST_REQ:
3338 	case DL_TEST_RES:
3339 	case DL_CONTROL_REQ:
3340 	case DL_PASSIVE_REQ:
3341 		result = DL_NOTSUPPORTED;
3342 		break;
3343 
3344 	default:
3345 #ifdef	GLD_DEBUG
3346 		if (gld_debug & GLDERRS)
3347 			cmn_err(CE_WARN,
3348 			    "gld_cmds: unknown M_PROTO message: %d",
3349 			    dlreq);
3350 #endif
3351 		result = DL_BADPRIM;
3352 	}
3353 
3354 	return (result);
3355 }
3356 
3357 static int
3358 gld_cap(queue_t *q, mblk_t *mp)
3359 {
3360 	gld_t *gld = (gld_t *)q->q_ptr;
3361 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3362 
3363 	if (gld->gld_state == DL_UNATTACHED)
3364 		return (DL_OUTSTATE);
3365 
3366 	if (dlp->dl_sub_length == 0)
3367 		return (gld_cap_ack(q, mp));
3368 
3369 	return (gld_cap_enable(q, mp));
3370 }
3371 
3372 static int
3373 gld_cap_ack(queue_t *q, mblk_t *mp)
3374 {
3375 	gld_t *gld = (gld_t *)q->q_ptr;
3376 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3377 	gld_interface_t *ifp;
3378 	dl_capability_ack_t *dlap;
3379 	dl_capability_sub_t *dlsp;
3380 	size_t size = sizeof (dl_capability_ack_t);
3381 	size_t subsize = 0;
3382 
3383 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3384 
3385 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3386 		subsize += sizeof (dl_capability_sub_t) +
3387 		    sizeof (dl_capab_hcksum_t);
3388 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3389 		subsize += sizeof (dl_capability_sub_t) +
3390 		    sizeof (dl_capab_zerocopy_t);
3391 	if (macinfo->gldm_options & GLDOPT_MDT)
3392 		subsize += (sizeof (dl_capability_sub_t) +
3393 		    sizeof (dl_capab_mdt_t));
3394 
3395 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3396 	    DL_CAPABILITY_ACK)) == NULL)
3397 		return (GLDE_OK);
3398 
3399 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3400 	dlap->dl_sub_offset = 0;
3401 	if ((dlap->dl_sub_length = subsize) != 0)
3402 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3403 	dlsp = (dl_capability_sub_t *)&dlap[1];
3404 
3405 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3406 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3407 
3408 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3409 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3410 
3411 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3412 
3413 		dlhp->hcksum_txflags = 0;
3414 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3415 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3416 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3417 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3418 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3419 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3420 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3421 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3422 
3423 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3424 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3425 	}
3426 
3427 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3428 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3429 
3430 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3431 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3432 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3433 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3434 
3435 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3436 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3437 	}
3438 
3439 	if (macinfo->gldm_options & GLDOPT_MDT) {
3440 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3441 
3442 		dlsp->dl_cap = DL_CAPAB_MDT;
3443 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3444 
3445 		dlmp->mdt_version = MDT_VERSION_2;
3446 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3447 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3448 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3449 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3450 		dlmp->mdt_hdr_head = ifp->hdr_size;
3451 		dlmp->mdt_hdr_tail = 0;
3452 	}
3453 
3454 	qreply(q, mp);
3455 	return (GLDE_OK);
3456 }
3457 
3458 static int
3459 gld_cap_enable(queue_t *q, mblk_t *mp)
3460 {
3461 	dl_capability_req_t *dlp;
3462 	dl_capability_sub_t *dlsp;
3463 	dl_capab_hcksum_t *dlhp;
3464 	offset_t off;
3465 	size_t len;
3466 	size_t size;
3467 	offset_t end;
3468 
3469 	dlp = (dl_capability_req_t *)mp->b_rptr;
3470 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3471 
3472 	off = dlp->dl_sub_offset;
3473 	len = dlp->dl_sub_length;
3474 
3475 	if (!MBLKIN(mp, off, len))
3476 		return (DL_BADPRIM);
3477 
3478 	end = off + len;
3479 	while (off < end) {
3480 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3481 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3482 		if (off + size > end)
3483 			return (DL_BADPRIM);
3484 
3485 		switch (dlsp->dl_cap) {
3486 		case DL_CAPAB_HCKSUM:
3487 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3488 			/* nothing useful we can do with the contents */
3489 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3490 			break;
3491 		default:
3492 			break;
3493 		}
3494 
3495 		off += size;
3496 	}
3497 
3498 	qreply(q, mp);
3499 	return (GLDE_OK);
3500 }
3501 
3502 /*
3503  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3504  * requested the specific <notification> that the message carries AND is
3505  * eligible and ready to receive the notification immediately.
3506  *
3507  * This routine ignores flow control. Notifications will be sent regardless.
3508  *
3509  * In all cases, the original message passed in is freed at the end of
3510  * the routine.
3511  */
3512 static void
3513 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3514 {
3515 	gld_mac_pvt_t *mac_pvt;
3516 	gld_vlan_t *vlan;
3517 	gld_t *gld;
3518 	mblk_t *nmp;
3519 	int i;
3520 
3521 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3522 
3523 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3524 
3525 	/*
3526 	 * Search all the streams attached to this macinfo looking
3527 	 * for those eligible to receive the present notification.
3528 	 */
3529 	for (i = 0; i < VLAN_HASHSZ; i++) {
3530 		for (vlan = mac_pvt->vlan_hash[i];
3531 		    vlan != NULL; vlan = vlan->gldv_next) {
3532 			for (gld = vlan->gldv_str_next;
3533 			    gld != (gld_t *)&vlan->gldv_str_next;
3534 			    gld = gld->gld_next) {
3535 				ASSERT(gld->gld_qptr != NULL);
3536 				ASSERT(gld->gld_state == DL_IDLE ||
3537 				    gld->gld_state == DL_UNBOUND);
3538 				ASSERT(gld->gld_mac_info == macinfo);
3539 
3540 				if (gld->gld_flags & GLD_STR_CLOSING)
3541 					continue; /* not eligible - skip */
3542 				if (!(notification & gld->gld_notifications))
3543 					continue; /* not wanted - skip */
3544 				if ((nmp = dupmsg(mp)) == NULL)
3545 					continue; /* can't copy - skip */
3546 
3547 				/*
3548 				 * All OK; send dup'd notification up this
3549 				 * stream
3550 				 */
3551 				qreply(WR(gld->gld_qptr), nmp);
3552 			}
3553 		}
3554 	}
3555 
3556 	/*
3557 	 * Drop the original message block now
3558 	 */
3559 	freemsg(mp);
3560 }
3561 
3562 /*
3563  * For each (understood) bit in the <notifications> argument, contruct
3564  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3565  * eligible queues if <q> is NULL.
3566  */
3567 static void
3568 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3569 {
3570 	gld_mac_pvt_t *mac_pvt;
3571 	dl_notify_ind_t *dlnip;
3572 	struct gld_stats *stats;
3573 	mblk_t *mp;
3574 	size_t size;
3575 	uint32_t bit;
3576 
3577 	GLDM_LOCK(macinfo, RW_WRITER);
3578 
3579 	/*
3580 	 * The following cases shouldn't happen, but just in case the
3581 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3582 	 * check anyway ...
3583 	 */
3584 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3585 		GLDM_UNLOCK(macinfo);
3586 		return;				/* not ready yet	*/
3587 	}
3588 
3589 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3590 		GLDM_UNLOCK(macinfo);
3591 		return;				/* not ready anymore	*/
3592 	}
3593 
3594 	/*
3595 	 * Make sure the kstats are up to date, 'cos we use some of
3596 	 * the kstat values below, specifically the link speed ...
3597 	 */
3598 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3599 	stats = mac_pvt->statistics;
3600 	if (macinfo->gldm_get_stats)
3601 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3602 
3603 	for (bit = 1; notifications != 0; bit <<= 1) {
3604 		if ((notifications & bit) == 0)
3605 			continue;
3606 		notifications &= ~bit;
3607 
3608 		size = DL_NOTIFY_IND_SIZE;
3609 		if (bit == DL_NOTE_PHYS_ADDR)
3610 			size += macinfo->gldm_addrlen;
3611 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3612 			continue;
3613 
3614 		mp->b_datap->db_type = M_PROTO;
3615 		mp->b_wptr = mp->b_rptr + size;
3616 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3617 		dlnip->dl_primitive = DL_NOTIFY_IND;
3618 		dlnip->dl_notification = 0;
3619 		dlnip->dl_data = 0;
3620 		dlnip->dl_addr_length = 0;
3621 		dlnip->dl_addr_offset = 0;
3622 
3623 		switch (bit) {
3624 		case DL_NOTE_PROMISC_ON_PHYS:
3625 		case DL_NOTE_PROMISC_OFF_PHYS:
3626 			if (mac_pvt->nprom != 0)
3627 				dlnip->dl_notification = bit;
3628 			break;
3629 
3630 		case DL_NOTE_LINK_DOWN:
3631 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3632 				dlnip->dl_notification = bit;
3633 			break;
3634 
3635 		case DL_NOTE_LINK_UP:
3636 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3637 				dlnip->dl_notification = bit;
3638 			break;
3639 
3640 		case DL_NOTE_SPEED:
3641 			/*
3642 			 * Conversion required here:
3643 			 *	GLD keeps the speed in bit/s in a uint64
3644 			 *	DLPI wants it in kb/s in a uint32
3645 			 * Fortunately this is still big enough for 10Gb/s!
3646 			 */
3647 			dlnip->dl_notification = bit;
3648 			dlnip->dl_data = stats->glds_speed/1000ULL;
3649 			break;
3650 
3651 		case DL_NOTE_PHYS_ADDR:
3652 			dlnip->dl_notification = bit;
3653 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3654 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3655 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3656 			    abs(macinfo->gldm_saplen);
3657 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3658 			mac_copy(mac_pvt->curr_macaddr,
3659 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3660 			    macinfo->gldm_addrlen);
3661 			break;
3662 
3663 		default:
3664 			break;
3665 		}
3666 
3667 		if (dlnip->dl_notification == 0)
3668 			freemsg(mp);
3669 		else if (q != NULL)
3670 			qreply(q, mp);
3671 		else
3672 			gld_notify_qs(macinfo, mp, bit);
3673 	}
3674 
3675 	GLDM_UNLOCK(macinfo);
3676 }
3677 
3678 /*
3679  * gld_notify_req - handle a DL_NOTIFY_REQ message
3680  */
3681 static int
3682 gld_notify_req(queue_t *q, mblk_t *mp)
3683 {
3684 	gld_t *gld = (gld_t *)q->q_ptr;
3685 	gld_mac_info_t *macinfo;
3686 	gld_mac_pvt_t *pvt;
3687 	dl_notify_req_t *dlnrp;
3688 	dl_notify_ack_t *dlnap;
3689 
3690 	ASSERT(gld != NULL);
3691 	ASSERT(gld->gld_qptr == RD(q));
3692 
3693 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3694 
3695 #ifdef GLD_DEBUG
3696 	if (gld_debug & GLDTRACE)
3697 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
3698 			(void *)q, (void *)mp);
3699 #endif
3700 
3701 	if (gld->gld_state == DL_UNATTACHED) {
3702 #ifdef GLD_DEBUG
3703 		if (gld_debug & GLDERRS)
3704 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
3705 				gld->gld_state);
3706 #endif
3707 		return (DL_OUTSTATE);
3708 	}
3709 
3710 	/*
3711 	 * Remember what notifications are required by this stream
3712 	 */
3713 	macinfo = gld->gld_mac_info;
3714 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3715 
3716 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
3717 
3718 	/*
3719 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
3720 	 * that this driver can provide, independently of which ones have
3721 	 * previously been or are now being requested.
3722 	 */
3723 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
3724 	    DL_NOTIFY_ACK)) == NULL)
3725 		return (DL_SYSERR);
3726 
3727 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
3728 	dlnap->dl_notifications = pvt->notifications;
3729 	qreply(q, mp);
3730 
3731 	/*
3732 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
3733 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
3734 	 * that provide the current status.
3735 	 */
3736 	gld_notify_ind(macinfo, gld->gld_notifications, q);
3737 
3738 	return (GLDE_OK);
3739 }
3740 
3741 /*
3742  * gld_linkstate()
3743  *	Called by driver to tell GLD the state of the physical link.
3744  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
3745  *	notification to each client that has previously requested such
3746  *	notifications
3747  */
3748 void
3749 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
3750 {
3751 	uint32_t notification;
3752 
3753 	switch (newstate) {
3754 	default:
3755 		return;
3756 
3757 	case GLD_LINKSTATE_DOWN:
3758 		notification = DL_NOTE_LINK_DOWN;
3759 		break;
3760 
3761 	case GLD_LINKSTATE_UP:
3762 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
3763 		break;
3764 
3765 	case GLD_LINKSTATE_UNKNOWN:
3766 		notification = 0;
3767 		break;
3768 	}
3769 
3770 	GLDM_LOCK(macinfo, RW_WRITER);
3771 	if (macinfo->gldm_linkstate == newstate)
3772 		notification = 0;
3773 	else
3774 		macinfo->gldm_linkstate = newstate;
3775 	GLDM_UNLOCK(macinfo);
3776 
3777 	if (notification)
3778 		gld_notify_ind(macinfo, notification, NULL);
3779 }
3780 
3781 /*
3782  * gld_udqos - set the current QoS parameters (priority only at the moment).
3783  */
3784 static int
3785 gld_udqos(queue_t *q, mblk_t *mp)
3786 {
3787 	dl_udqos_req_t *dlp;
3788 	gld_t  *gld = (gld_t *)q->q_ptr;
3789 	int off;
3790 	int len;
3791 	dl_qos_cl_sel1_t *selp;
3792 
3793 	ASSERT(gld);
3794 	ASSERT(gld->gld_qptr == RD(q));
3795 
3796 #ifdef GLD_DEBUG
3797 	if (gld_debug & GLDTRACE)
3798 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
3799 #endif
3800 
3801 	if (gld->gld_state != DL_IDLE) {
3802 #ifdef GLD_DEBUG
3803 		if (gld_debug & GLDERRS)
3804 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
3805 			    gld->gld_state);
3806 #endif
3807 		return (DL_OUTSTATE);
3808 	}
3809 
3810 	dlp = (dl_udqos_req_t *)mp->b_rptr;
3811 	off = dlp->dl_qos_offset;
3812 	len = dlp->dl_qos_length;
3813 
3814 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
3815 		return (DL_BADQOSTYPE);
3816 
3817 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
3818 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
3819 		return (DL_BADQOSTYPE);
3820 
3821 	if (selp->dl_trans_delay != 0 &&
3822 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
3823 		return (DL_BADQOSPARAM);
3824 	if (selp->dl_protection != 0 &&
3825 	    selp->dl_protection != DL_QOS_DONT_CARE)
3826 		return (DL_BADQOSPARAM);
3827 	if (selp->dl_residual_error != 0 &&
3828 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
3829 		return (DL_BADQOSPARAM);
3830 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
3831 		return (DL_BADQOSPARAM);
3832 
3833 	gld->gld_upri = selp->dl_priority;
3834 
3835 	dlokack(q, mp, DL_UDQOS_REQ);
3836 	return (GLDE_OK);
3837 }
3838 
3839 static mblk_t *
3840 gld_bindack(queue_t *q, mblk_t *mp)
3841 {
3842 	gld_t *gld = (gld_t *)q->q_ptr;
3843 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3844 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3845 	dl_bind_ack_t *dlp;
3846 	size_t size;
3847 	t_uscalar_t addrlen;
3848 	uchar_t *sapp;
3849 
3850 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3851 	size = sizeof (dl_bind_ack_t) + addrlen;
3852 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
3853 		return (NULL);
3854 
3855 	dlp = (dl_bind_ack_t *)mp->b_rptr;
3856 	dlp->dl_sap = gld->gld_sap;
3857 	dlp->dl_addr_length = addrlen;
3858 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
3859 	dlp->dl_max_conind = 0;
3860 	dlp->dl_xidtest_flg = 0;
3861 
3862 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
3863 	    macinfo->gldm_addrlen);
3864 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
3865 	*(ushort_t *)sapp = gld->gld_sap;
3866 
3867 	return (mp);
3868 }
3869 
3870 /*
3871  * gld_bind - determine if a SAP is already allocated and whether it is legal
3872  * to do the bind at this time
3873  */
3874 static int
3875 gld_bind(queue_t *q, mblk_t *mp)
3876 {
3877 	ulong_t	sap;
3878 	dl_bind_req_t *dlp;
3879 	gld_t *gld = (gld_t *)q->q_ptr;
3880 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3881 
3882 	ASSERT(gld);
3883 	ASSERT(gld->gld_qptr == RD(q));
3884 
3885 #ifdef GLD_DEBUG
3886 	if (gld_debug & GLDTRACE)
3887 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
3888 #endif
3889 
3890 	dlp = (dl_bind_req_t *)mp->b_rptr;
3891 	sap = dlp->dl_sap;
3892 
3893 #ifdef GLD_DEBUG
3894 	if (gld_debug & GLDPROT)
3895 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
3896 #endif
3897 
3898 	if (gld->gld_state != DL_UNBOUND) {
3899 #ifdef GLD_DEBUG
3900 		if (gld_debug & GLDERRS)
3901 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
3902 				gld->gld_state);
3903 #endif
3904 		return (DL_OUTSTATE);
3905 	}
3906 	ASSERT(macinfo);
3907 
3908 	if (dlp->dl_service_mode != DL_CLDLS) {
3909 		return (DL_UNSUPPORTED);
3910 	}
3911 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
3912 		return (DL_NOAUTO);
3913 	}
3914 
3915 	/*
3916 	 * Check sap validity and decide whether this stream accepts
3917 	 * IEEE 802.2 (LLC) packets.
3918 	 */
3919 	if (sap > ETHERTYPE_MAX)
3920 		return (DL_BADSAP);
3921 
3922 	/*
3923 	 * Decide whether the SAP value selects EtherType encoding/decoding.
3924 	 * For compatibility with monolithic ethernet drivers, the range of
3925 	 * SAP values is different for DL_ETHER media.
3926 	 */
3927 	switch (macinfo->gldm_type) {
3928 	case DL_ETHER:
3929 		gld->gld_ethertype = (sap > ETHERMTU);
3930 		break;
3931 	default:
3932 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
3933 		break;
3934 	}
3935 
3936 	/* if we get to here, then the SAP is legal enough */
3937 	GLDM_LOCK(macinfo, RW_WRITER);
3938 	gld->gld_state = DL_IDLE;	/* bound and ready */
3939 	gld->gld_sap = sap;
3940 	gld_set_ipq(gld);
3941 
3942 #ifdef GLD_DEBUG
3943 	if (gld_debug & GLDPROT)
3944 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
3945 #endif
3946 
3947 	/* ACK the BIND */
3948 	mp = gld_bindack(q, mp);
3949 	GLDM_UNLOCK(macinfo);
3950 
3951 	if (mp != NULL) {
3952 		qreply(q, mp);
3953 		return (GLDE_OK);
3954 	}
3955 
3956 	return (DL_SYSERR);
3957 }
3958 
3959 /*
3960  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
3961  * The stream is still open and can be re-bound.
3962  */
3963 static int
3964 gld_unbind(queue_t *q, mblk_t *mp)
3965 {
3966 	gld_t *gld = (gld_t *)q->q_ptr;
3967 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3968 
3969 	ASSERT(gld);
3970 
3971 #ifdef GLD_DEBUG
3972 	if (gld_debug & GLDTRACE)
3973 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
3974 #endif
3975 
3976 	if (gld->gld_state != DL_IDLE) {
3977 #ifdef GLD_DEBUG
3978 		if (gld_debug & GLDERRS)
3979 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
3980 				gld->gld_state);
3981 #endif
3982 		return (DL_OUTSTATE);
3983 	}
3984 	ASSERT(macinfo);
3985 
3986 	/*
3987 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
3988 	 * See comments above gld_start().
3989 	 */
3990 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
3991 	membar_enter();
3992 	if (gld->gld_wput_count != 0) {
3993 		gld->gld_in_unbind = B_FALSE;
3994 		ASSERT(mp);		/* we didn't come from close */
3995 #ifdef GLD_DEBUG
3996 		if (gld_debug & GLDETRACE)
3997 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
3998 #endif
3999 		(void) putbq(q, mp);
4000 		qenable(q);		/* try again soon */
4001 		return (GLDE_RETRY);
4002 	}
4003 
4004 	GLDM_LOCK(macinfo, RW_WRITER);
4005 	gld->gld_state = DL_UNBOUND;
4006 	gld->gld_sap = 0;
4007 	gld_set_ipq(gld);
4008 	GLDM_UNLOCK(macinfo);
4009 
4010 	membar_exit();
4011 	gld->gld_in_unbind = B_FALSE;
4012 
4013 	/* mp is NULL if we came from close */
4014 	if (mp) {
4015 		gld_flushqueue(q);	/* flush the queues */
4016 		dlokack(q, mp, DL_UNBIND_REQ);
4017 	}
4018 	return (GLDE_OK);
4019 }
4020 
4021 /*
4022  * gld_inforeq - generate the response to an info request
4023  */
4024 static int
4025 gld_inforeq(queue_t *q, mblk_t *mp)
4026 {
4027 	gld_t		*gld;
4028 	dl_info_ack_t	*dlp;
4029 	int		bufsize;
4030 	glddev_t	*glddev;
4031 	gld_mac_info_t	*macinfo;
4032 	gld_mac_pvt_t	*mac_pvt;
4033 	int		sel_offset = 0;
4034 	int		range_offset = 0;
4035 	int		addr_offset;
4036 	int		addr_length;
4037 	int		sap_length;
4038 	int		brdcst_offset;
4039 	int		brdcst_length;
4040 	gld_vlan_t	*vlan;
4041 	uchar_t		*sapp;
4042 
4043 #ifdef GLD_DEBUG
4044 	if (gld_debug & GLDTRACE)
4045 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4046 #endif
4047 	gld = (gld_t *)q->q_ptr;
4048 	ASSERT(gld);
4049 	glddev = gld->gld_device;
4050 	ASSERT(glddev);
4051 
4052 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4053 		macinfo = gld->gld_mac_info;
4054 		ASSERT(macinfo != NULL);
4055 
4056 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4057 
4058 		addr_length = macinfo->gldm_addrlen;
4059 		sap_length = macinfo->gldm_saplen;
4060 		brdcst_length = macinfo->gldm_addrlen;
4061 	} else {
4062 		addr_length = glddev->gld_addrlen;
4063 		sap_length = glddev->gld_saplen;
4064 		brdcst_length = glddev->gld_addrlen;
4065 	}
4066 
4067 	bufsize = sizeof (dl_info_ack_t);
4068 
4069 	addr_offset = bufsize;
4070 	bufsize += addr_length;
4071 	bufsize += abs(sap_length);
4072 
4073 	brdcst_offset = bufsize;
4074 	bufsize += brdcst_length;
4075 
4076 	if ((vlan = (gld_vlan_t *)gld->gld_vlan) != NULL &&
4077 	    vlan->gldv_id != VLAN_VID_NONE) {
4078 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4079 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4080 
4081 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4082 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4083 	}
4084 
4085 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4086 		return (GLDE_OK);	/* nothing more to be done */
4087 
4088 	bzero(mp->b_rptr, bufsize);
4089 
4090 	dlp = (dl_info_ack_t *)mp->b_rptr;
4091 	dlp->dl_primitive = DL_INFO_ACK;
4092 	dlp->dl_version = DL_VERSION_2;
4093 	dlp->dl_service_mode = DL_CLDLS;
4094 	dlp->dl_current_state = gld->gld_state;
4095 	dlp->dl_provider_style = gld->gld_style;
4096 
4097 	if (sel_offset != 0) {
4098 		dl_qos_cl_sel1_t	*selp;
4099 		dl_qos_cl_range1_t	*rangep;
4100 
4101 		ASSERT(range_offset != 0);
4102 
4103 		dlp->dl_qos_offset = sel_offset;
4104 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4105 		dlp->dl_qos_range_offset = range_offset;
4106 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4107 
4108 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4109 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4110 		selp->dl_priority = gld->gld_upri;
4111 
4112 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4113 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4114 		rangep->dl_priority.dl_min = 0;
4115 		rangep->dl_priority.dl_max = 7;
4116 	}
4117 
4118 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4119 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4120 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4121 		dlp->dl_mac_type = macinfo->gldm_type;
4122 		dlp->dl_addr_length = addr_length + abs(sap_length);
4123 		dlp->dl_sap_length = sap_length;
4124 
4125 		if (gld->gld_state == DL_IDLE) {
4126 			/*
4127 			 * If we are bound to a non-LLC SAP on any medium
4128 			 * other than Ethernet, then we need room for a
4129 			 * SNAP header.  So we have to adjust the MTU size
4130 			 * accordingly.  XXX I suppose this should be done
4131 			 * in gldutil.c, but it seems likely that this will
4132 			 * always be true for everything GLD supports but
4133 			 * Ethernet.  Check this if you add another medium.
4134 			 */
4135 			if ((macinfo->gldm_type == DL_TPR ||
4136 			    macinfo->gldm_type == DL_FDDI) &&
4137 			    gld->gld_ethertype)
4138 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4139 
4140 			/* copy macaddr and sap */
4141 			dlp->dl_addr_offset = addr_offset;
4142 
4143 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4144 			    addr_offset, macinfo->gldm_addrlen);
4145 			sapp = mp->b_rptr + addr_offset +
4146 			    macinfo->gldm_addrlen;
4147 			*(ushort_t *)sapp = gld->gld_sap;
4148 		} else {
4149 			dlp->dl_addr_offset = 0;
4150 		}
4151 
4152 		/* copy broadcast addr */
4153 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4154 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4155 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4156 		    mp->b_rptr + brdcst_offset, brdcst_length);
4157 	} else {
4158 		/*
4159 		 * No PPA is attached.
4160 		 * The best we can do is use the values provided
4161 		 * by the first mac that called gld_register.
4162 		 */
4163 		dlp->dl_min_sdu = glddev->gld_minsdu;
4164 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4165 		dlp->dl_mac_type = glddev->gld_type;
4166 		dlp->dl_addr_length = addr_length + abs(sap_length);
4167 		dlp->dl_sap_length = sap_length;
4168 		dlp->dl_addr_offset = 0;
4169 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4170 		dlp->dl_brdcst_addr_length = brdcst_length;
4171 		mac_copy((caddr_t)glddev->gld_broadcast,
4172 		    mp->b_rptr + brdcst_offset, brdcst_length);
4173 	}
4174 	qreply(q, mp);
4175 	return (GLDE_OK);
4176 }
4177 
4178 /*
4179  * gld_unitdata (q, mp)
4180  * send a datagram.  Destination address/lsap is in M_PROTO
4181  * message (first mblock), data is in remainder of message.
4182  *
4183  */
4184 static int
4185 gld_unitdata(queue_t *q, mblk_t *mp)
4186 {
4187 	gld_t *gld = (gld_t *)q->q_ptr;
4188 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4189 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4190 	size_t	msglen;
4191 	mblk_t	*nmp;
4192 	gld_interface_t *ifp;
4193 	uint32_t start;
4194 	uint32_t stuff;
4195 	uint32_t end;
4196 	uint32_t value;
4197 	uint32_t flags;
4198 	uint32_t upri;
4199 
4200 #ifdef GLD_DEBUG
4201 	if (gld_debug & GLDTRACE)
4202 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4203 #endif
4204 
4205 	if (gld->gld_state != DL_IDLE) {
4206 #ifdef GLD_DEBUG
4207 		if (gld_debug & GLDERRS)
4208 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4209 				gld->gld_state);
4210 #endif
4211 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4212 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4213 		return (GLDE_OK);
4214 	}
4215 	ASSERT(macinfo != NULL);
4216 
4217 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4218 	    dlp->dl_dest_addr_length !=
4219 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4220 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4221 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4222 		return (GLDE_OK);
4223 	}
4224 
4225 	upri = dlp->dl_priority.dl_max;
4226 
4227 	msglen = msgdsize(mp);
4228 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4229 #ifdef GLD_DEBUG
4230 		if (gld_debug & GLDERRS)
4231 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4232 				(int)msglen);
4233 #endif
4234 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4235 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4236 		return (GLDE_OK);
4237 	}
4238 
4239 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4240 
4241 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4242 
4243 	/* grab any checksum information that may be present */
4244 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4245 	    &value, &flags);
4246 
4247 	/*
4248 	 * Prepend a valid header for transmission
4249 	 */
4250 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4251 #ifdef GLD_DEBUG
4252 		if (gld_debug & GLDERRS)
4253 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4254 #endif
4255 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4256 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4257 		return (GLDE_OK);
4258 	}
4259 
4260 	/* apply any checksum information to the first block in the chain */
4261 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4262 	    flags, 0);
4263 
4264 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4265 		qenable(q);
4266 		return (GLDE_RETRY);
4267 	}
4268 
4269 	return (GLDE_OK);
4270 }
4271 
4272 /*
4273  * gldattach(q, mp)
4274  * DLPI DL_ATTACH_REQ
4275  * this attaches the stream to a PPA
4276  */
4277 static int
4278 gldattach(queue_t *q, mblk_t *mp)
4279 {
4280 	dl_attach_req_t *at;
4281 	gld_mac_info_t *macinfo;
4282 	gld_t  *gld = (gld_t *)q->q_ptr;
4283 	glddev_t *glddev;
4284 	gld_mac_pvt_t *mac_pvt;
4285 	uint32_t ppa;
4286 	uint32_t vid;
4287 	gld_vlan_t *vlan;
4288 
4289 	at = (dl_attach_req_t *)mp->b_rptr;
4290 
4291 	if (gld->gld_state != DL_UNATTACHED)
4292 		return (DL_OUTSTATE);
4293 
4294 	ASSERT(!gld->gld_mac_info);
4295 
4296 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4297 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4298 	if (vid > VLAN_VID_MAX)
4299 		return (DL_BADPPA);
4300 
4301 	glddev = gld->gld_device;
4302 	mutex_enter(&glddev->gld_devlock);
4303 	for (macinfo = glddev->gld_mac_next;
4304 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4305 	    macinfo = macinfo->gldm_next) {
4306 		int inst;
4307 
4308 		ASSERT(macinfo != NULL);
4309 		if (macinfo->gldm_ppa != ppa)
4310 			continue;
4311 
4312 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4313 			continue;	/* this one's not ready yet */
4314 
4315 		/*
4316 		 * VLAN sanity check
4317 		 */
4318 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4319 			mutex_exit(&glddev->gld_devlock);
4320 			return (DL_BADPPA);
4321 		}
4322 
4323 		/*
4324 		 * We found the correct PPA, hold the instance
4325 		 */
4326 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4327 		if (inst == -1 || qassociate(q, inst) != 0) {
4328 			mutex_exit(&glddev->gld_devlock);
4329 			return (DL_BADPPA);
4330 		}
4331 
4332 		/* Take the stream off the per-driver-class list */
4333 		gldremque(gld);
4334 
4335 		/*
4336 		 * We must hold the lock to prevent multiple calls
4337 		 * to the reset and start routines.
4338 		 */
4339 		GLDM_LOCK(macinfo, RW_WRITER);
4340 
4341 		gld->gld_mac_info = macinfo;
4342 
4343 		if (macinfo->gldm_send_tagged != NULL)
4344 			gld->gld_send = macinfo->gldm_send_tagged;
4345 		else
4346 			gld->gld_send = macinfo->gldm_send;
4347 
4348 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4349 			GLDM_UNLOCK(macinfo);
4350 			gldinsque(gld, glddev->gld_str_prev);
4351 			mutex_exit(&glddev->gld_devlock);
4352 			(void) qassociate(q, -1);
4353 			return (DL_BADPPA);
4354 		}
4355 
4356 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4357 		if (!mac_pvt->started) {
4358 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4359 				gld_rem_vlan(vlan);
4360 				GLDM_UNLOCK(macinfo);
4361 				gldinsque(gld, glddev->gld_str_prev);
4362 				mutex_exit(&glddev->gld_devlock);
4363 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4364 				    EIO);
4365 				(void) qassociate(q, -1);
4366 				return (GLDE_OK);
4367 			}
4368 		}
4369 
4370 		gld->gld_vlan = vlan;
4371 		vlan->gldv_nstreams++;
4372 		gldinsque(gld, vlan->gldv_str_prev);
4373 		gld->gld_state = DL_UNBOUND;
4374 		GLDM_UNLOCK(macinfo);
4375 
4376 #ifdef GLD_DEBUG
4377 		if (gld_debug & GLDPROT) {
4378 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4379 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4380 		}
4381 #endif
4382 		mutex_exit(&glddev->gld_devlock);
4383 		dlokack(q, mp, DL_ATTACH_REQ);
4384 		return (GLDE_OK);
4385 	}
4386 	mutex_exit(&glddev->gld_devlock);
4387 	return (DL_BADPPA);
4388 }
4389 
4390 /*
4391  * gldunattach(q, mp)
4392  * DLPI DL_DETACH_REQ
4393  * detaches the mac layer from the stream
4394  */
4395 int
4396 gldunattach(queue_t *q, mblk_t *mp)
4397 {
4398 	gld_t  *gld = (gld_t *)q->q_ptr;
4399 	glddev_t *glddev = gld->gld_device;
4400 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4401 	int	state = gld->gld_state;
4402 	int	i;
4403 	gld_mac_pvt_t *mac_pvt;
4404 	gld_vlan_t *vlan;
4405 	boolean_t phys_off;
4406 	boolean_t mult_off;
4407 	int op = GLD_MAC_PROMISC_NOOP;
4408 
4409 	if (state != DL_UNBOUND)
4410 		return (DL_OUTSTATE);
4411 
4412 	ASSERT(macinfo != NULL);
4413 	ASSERT(gld->gld_sap == 0);
4414 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4415 
4416 #ifdef GLD_DEBUG
4417 	if (gld_debug & GLDPROT) {
4418 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4419 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4420 	}
4421 #endif
4422 
4423 	GLDM_LOCK(macinfo, RW_WRITER);
4424 
4425 	if (gld->gld_mcast) {
4426 		for (i = 0; i < gld->gld_multicnt; i++) {
4427 			gld_mcast_t *mcast;
4428 
4429 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4430 				ASSERT(mcast->gldm_refcnt);
4431 				gld_send_disable_multi(macinfo, mcast);
4432 			}
4433 		}
4434 		kmem_free(gld->gld_mcast,
4435 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4436 		gld->gld_mcast = NULL;
4437 		gld->gld_multicnt = 0;
4438 	}
4439 
4440 	/* decide if we need to turn off any promiscuity */
4441 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4442 	    --mac_pvt->nprom == 0);
4443 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4444 	    --mac_pvt->nprom_multi == 0);
4445 
4446 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4447 
4448 	if (phys_off) {
4449 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4450 		    GLD_MAC_PROMISC_MULTI;
4451 	} else if (mult_off) {
4452 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4453 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4454 	}
4455 
4456 	if (op != GLD_MAC_PROMISC_NOOP)
4457 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4458 
4459 	GLDM_UNLOCK(macinfo);
4460 
4461 	if (phys_off)
4462 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4463 
4464 	/*
4465 	 * We need to hold both locks when modifying the mac stream list
4466 	 * to protect findminor as well as everyone else.
4467 	 */
4468 	mutex_enter(&glddev->gld_devlock);
4469 	GLDM_LOCK(macinfo, RW_WRITER);
4470 
4471 	/* disassociate this stream with its vlan and underlying mac */
4472 	gldremque(gld);
4473 
4474 	vlan = (gld_vlan_t *)gld->gld_vlan;
4475 	if (--vlan->gldv_nstreams == 0) {
4476 		gld_rem_vlan(vlan);
4477 		gld->gld_vlan = NULL;
4478 	}
4479 
4480 	gld->gld_mac_info = NULL;
4481 	gld->gld_state = DL_UNATTACHED;
4482 
4483 	/* cleanup mac layer if last vlan */
4484 	if (mac_pvt->nvlan == 0) {
4485 		gld_stop_mac(macinfo);
4486 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4487 	}
4488 
4489 	/* make sure no references to this gld for gld_v0_sched */
4490 	if (mac_pvt->last_sched == gld)
4491 		mac_pvt->last_sched = NULL;
4492 
4493 	GLDM_UNLOCK(macinfo);
4494 
4495 	/* put the stream on the unattached Style 2 list */
4496 	gldinsque(gld, glddev->gld_str_prev);
4497 
4498 	mutex_exit(&glddev->gld_devlock);
4499 
4500 	/* There will be no mp if we were called from close */
4501 	if (mp) {
4502 		dlokack(q, mp, DL_DETACH_REQ);
4503 	}
4504 	if (gld->gld_style == DL_STYLE2)
4505 		(void) qassociate(q, -1);
4506 	return (GLDE_OK);
4507 }
4508 
4509 /*
4510  * gld_enable_multi (q, mp)
4511  * Enables multicast address on the stream.  If the mac layer
4512  * isn't enabled for this address, enable at that level as well.
4513  */
4514 static int
4515 gld_enable_multi(queue_t *q, mblk_t *mp)
4516 {
4517 	gld_t  *gld = (gld_t *)q->q_ptr;
4518 	glddev_t *glddev;
4519 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4520 	unsigned char *maddr;
4521 	dl_enabmulti_req_t *multi;
4522 	gld_mcast_t *mcast;
4523 	int	i, rc;
4524 	gld_mac_pvt_t *mac_pvt;
4525 
4526 #ifdef GLD_DEBUG
4527 	if (gld_debug & GLDPROT) {
4528 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4529 		    (void *)mp);
4530 	}
4531 #endif
4532 
4533 	if (gld->gld_state == DL_UNATTACHED)
4534 		return (DL_OUTSTATE);
4535 
4536 	ASSERT(macinfo != NULL);
4537 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4538 
4539 	if (macinfo->gldm_set_multicast == NULL) {
4540 		return (DL_UNSUPPORTED);
4541 	}
4542 
4543 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4544 
4545 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4546 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4547 		return (DL_BADADDR);
4548 
4549 	/* request appears to be valid */
4550 
4551 	glddev = mac_pvt->major_dev;
4552 	ASSERT(glddev == gld->gld_device);
4553 
4554 	maddr = mp->b_rptr + multi->dl_addr_offset;
4555 
4556 	/*
4557 	 * The multicast addresses live in a per-device table, along
4558 	 * with a reference count.  Each stream has a table that
4559 	 * points to entries in the device table, with the reference
4560 	 * count reflecting the number of streams pointing at it.  If
4561 	 * this multicast address is already in the per-device table,
4562 	 * all we have to do is point at it.
4563 	 */
4564 	GLDM_LOCK(macinfo, RW_WRITER);
4565 
4566 	/* does this address appear in current table? */
4567 	if (gld->gld_mcast == NULL) {
4568 		/* no mcast addresses -- allocate table */
4569 		gld->gld_mcast = GETSTRUCT(gld_mcast_t *,
4570 					    glddev->gld_multisize);
4571 		if (gld->gld_mcast == NULL) {
4572 			GLDM_UNLOCK(macinfo);
4573 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4574 			return (GLDE_OK);
4575 		}
4576 		gld->gld_multicnt = glddev->gld_multisize;
4577 	} else {
4578 		for (i = 0; i < gld->gld_multicnt; i++) {
4579 			if (gld->gld_mcast[i] &&
4580 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4581 				maddr, macinfo->gldm_addrlen)) {
4582 				/* this is a match -- just succeed */
4583 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4584 				GLDM_UNLOCK(macinfo);
4585 				dlokack(q, mp, DL_ENABMULTI_REQ);
4586 				return (GLDE_OK);
4587 			}
4588 		}
4589 	}
4590 
4591 	/*
4592 	 * it wasn't in the stream so check to see if the mac layer has it
4593 	 */
4594 	mcast = NULL;
4595 	if (mac_pvt->mcast_table == NULL) {
4596 		mac_pvt->mcast_table = GETSTRUCT(gld_mcast_t,
4597 						glddev->gld_multisize);
4598 		if (mac_pvt->mcast_table == NULL) {
4599 			GLDM_UNLOCK(macinfo);
4600 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4601 			return (GLDE_OK);
4602 		}
4603 	} else {
4604 		for (i = 0; i < glddev->gld_multisize; i++) {
4605 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4606 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4607 			    maddr, macinfo->gldm_addrlen)) {
4608 				mcast = &mac_pvt->mcast_table[i];
4609 				break;
4610 			}
4611 		}
4612 	}
4613 	if (mcast == NULL) {
4614 		/* not in mac layer -- find an empty mac slot to fill in */
4615 		for (i = 0; i < glddev->gld_multisize; i++) {
4616 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4617 				mcast = &mac_pvt->mcast_table[i];
4618 				mac_copy(maddr, mcast->gldm_addr,
4619 				    macinfo->gldm_addrlen);
4620 				break;
4621 			}
4622 		}
4623 	}
4624 	if (mcast == NULL) {
4625 		/* couldn't get a mac layer slot */
4626 		GLDM_UNLOCK(macinfo);
4627 		return (DL_TOOMANY);
4628 	}
4629 
4630 	/* now we have a mac layer slot in mcast -- get a stream slot */
4631 	for (i = 0; i < gld->gld_multicnt; i++) {
4632 		if (gld->gld_mcast[i] != NULL)
4633 			continue;
4634 		/* found an empty slot */
4635 		if (!mcast->gldm_refcnt) {
4636 			/* set mcast in hardware */
4637 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4638 
4639 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4640 			cmac_copy(maddr, cmaddr,
4641 			    macinfo->gldm_addrlen, macinfo);
4642 
4643 			rc = (*macinfo->gldm_set_multicast)
4644 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4645 			if (rc == GLD_NOTSUPPORTED) {
4646 				GLDM_UNLOCK(macinfo);
4647 				return (DL_NOTSUPPORTED);
4648 			} else if (rc == GLD_NORESOURCES) {
4649 				GLDM_UNLOCK(macinfo);
4650 				return (DL_TOOMANY);
4651 			} else if (rc == GLD_BADARG) {
4652 				GLDM_UNLOCK(macinfo);
4653 				return (DL_BADADDR);
4654 			} else if (rc == GLD_RETRY) {
4655 				/*
4656 				 * The putbq and gld_xwait must be
4657 				 * within the lock to prevent races
4658 				 * with gld_sched.
4659 				 */
4660 				(void) putbq(q, mp);
4661 				gld->gld_xwait = B_TRUE;
4662 				GLDM_UNLOCK(macinfo);
4663 				return (GLDE_RETRY);
4664 			} else if (rc != GLD_SUCCESS) {
4665 				GLDM_UNLOCK(macinfo);
4666 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4667 				    DL_SYSERR, EIO);
4668 				return (GLDE_OK);
4669 			}
4670 		}
4671 		gld->gld_mcast[i] = mcast;
4672 		mcast->gldm_refcnt++;
4673 		GLDM_UNLOCK(macinfo);
4674 		dlokack(q, mp, DL_ENABMULTI_REQ);
4675 		return (GLDE_OK);
4676 	}
4677 
4678 	/* couldn't get a stream slot */
4679 	GLDM_UNLOCK(macinfo);
4680 	return (DL_TOOMANY);
4681 }
4682 
4683 
4684 /*
4685  * gld_disable_multi (q, mp)
4686  * Disable the multicast address on the stream.  If last
4687  * reference for the mac layer, disable there as well.
4688  */
4689 static int
4690 gld_disable_multi(queue_t *q, mblk_t *mp)
4691 {
4692 	gld_t  *gld;
4693 	gld_mac_info_t *macinfo;
4694 	unsigned char *maddr;
4695 	dl_disabmulti_req_t *multi;
4696 	int i;
4697 	gld_mcast_t *mcast;
4698 
4699 #ifdef GLD_DEBUG
4700 	if (gld_debug & GLDPROT) {
4701 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
4702 		    (void *)mp);
4703 	}
4704 #endif
4705 
4706 	gld = (gld_t *)q->q_ptr;
4707 	if (gld->gld_state == DL_UNATTACHED)
4708 		return (DL_OUTSTATE);
4709 
4710 	macinfo = gld->gld_mac_info;
4711 	ASSERT(macinfo != NULL);
4712 	if (macinfo->gldm_set_multicast == NULL) {
4713 		return (DL_UNSUPPORTED);
4714 	}
4715 
4716 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
4717 
4718 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4719 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4720 		return (DL_BADADDR);
4721 
4722 	maddr = mp->b_rptr + multi->dl_addr_offset;
4723 
4724 	/* request appears to be valid */
4725 	/* does this address appear in current table? */
4726 	GLDM_LOCK(macinfo, RW_WRITER);
4727 	if (gld->gld_mcast != NULL) {
4728 		for (i = 0; i < gld->gld_multicnt; i++)
4729 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
4730 			    mac_eq(mcast->gldm_addr,
4731 			    maddr, macinfo->gldm_addrlen)) {
4732 				ASSERT(mcast->gldm_refcnt);
4733 				gld_send_disable_multi(macinfo, mcast);
4734 				gld->gld_mcast[i] = NULL;
4735 				GLDM_UNLOCK(macinfo);
4736 				dlokack(q, mp, DL_DISABMULTI_REQ);
4737 				return (GLDE_OK);
4738 			}
4739 	}
4740 	GLDM_UNLOCK(macinfo);
4741 	return (DL_NOTENAB); /* not an enabled address */
4742 }
4743 
4744 /*
4745  * gld_send_disable_multi(macinfo, mcast)
4746  * this function is used to disable a multicast address if the reference
4747  * count goes to zero. The disable request will then be forwarded to the
4748  * lower stream.
4749  */
4750 static void
4751 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
4752 {
4753 	ASSERT(macinfo != NULL);
4754 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
4755 	ASSERT(mcast != NULL);
4756 	ASSERT(mcast->gldm_refcnt);
4757 
4758 	if (!mcast->gldm_refcnt) {
4759 		return;			/* "cannot happen" */
4760 	}
4761 
4762 	if (--mcast->gldm_refcnt > 0) {
4763 		return;
4764 	}
4765 
4766 	/*
4767 	 * This must be converted from canonical form to device form.
4768 	 * The refcnt is now zero so we can trash the data.
4769 	 */
4770 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
4771 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
4772 
4773 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
4774 	(void) (*macinfo->gldm_set_multicast)
4775 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
4776 }
4777 
4778 /*
4779  * gld_promisc (q, mp, req, on)
4780  *	enable or disable the use of promiscuous mode with the hardware
4781  */
4782 static int
4783 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
4784 {
4785 	gld_t *gld;
4786 	gld_mac_info_t *macinfo;
4787 	gld_mac_pvt_t *mac_pvt;
4788 	gld_vlan_t *vlan;
4789 	union DL_primitives *prim;
4790 	int macrc = GLD_SUCCESS;
4791 	int dlerr = GLDE_OK;
4792 	int op = GLD_MAC_PROMISC_NOOP;
4793 
4794 #ifdef GLD_DEBUG
4795 	if (gld_debug & GLDTRACE)
4796 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
4797 		    (void *)q, (void *)mp, req, on);
4798 #endif
4799 
4800 	ASSERT(mp != NULL);
4801 	prim = (union DL_primitives *)mp->b_rptr;
4802 
4803 	/* XXX I think spec allows promisc in unattached state */
4804 	gld = (gld_t *)q->q_ptr;
4805 	if (gld->gld_state == DL_UNATTACHED)
4806 		return (DL_OUTSTATE);
4807 
4808 	macinfo = gld->gld_mac_info;
4809 	ASSERT(macinfo != NULL);
4810 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4811 
4812 	vlan = (gld_vlan_t *)gld->gld_vlan;
4813 	ASSERT(vlan != NULL);
4814 
4815 	GLDM_LOCK(macinfo, RW_WRITER);
4816 
4817 	/*
4818 	 * Work out what request (if any) has to be made to the MAC layer
4819 	 */
4820 	if (on) {
4821 		switch (prim->promiscon_req.dl_level) {
4822 		default:
4823 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4824 			break;
4825 
4826 		case DL_PROMISC_PHYS:
4827 			if (mac_pvt->nprom == 0)
4828 				op = GLD_MAC_PROMISC_PHYS;
4829 			break;
4830 
4831 		case DL_PROMISC_MULTI:
4832 			if (mac_pvt->nprom_multi == 0)
4833 				if (mac_pvt->nprom == 0)
4834 					op = GLD_MAC_PROMISC_MULTI;
4835 			break;
4836 
4837 		case DL_PROMISC_SAP:
4838 			/* We can do this without reference to the MAC */
4839 			break;
4840 		}
4841 	} else {
4842 		switch (prim->promiscoff_req.dl_level) {
4843 		default:
4844 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4845 			break;
4846 
4847 		case DL_PROMISC_PHYS:
4848 			if (!(gld->gld_flags & GLD_PROM_PHYS))
4849 				dlerr = DL_NOTENAB;
4850 			else if (mac_pvt->nprom == 1)
4851 				if (mac_pvt->nprom_multi)
4852 					op = GLD_MAC_PROMISC_MULTI;
4853 				else
4854 					op = GLD_MAC_PROMISC_NONE;
4855 			break;
4856 
4857 		case DL_PROMISC_MULTI:
4858 			if (!(gld->gld_flags & GLD_PROM_MULT))
4859 				dlerr = DL_NOTENAB;
4860 			else if (mac_pvt->nprom_multi == 1)
4861 				if (mac_pvt->nprom == 0)
4862 					op = GLD_MAC_PROMISC_NONE;
4863 			break;
4864 
4865 		case DL_PROMISC_SAP:
4866 			if (!(gld->gld_flags & GLD_PROM_SAP))
4867 				dlerr = DL_NOTENAB;
4868 
4869 			/* We can do this without reference to the MAC */
4870 			break;
4871 		}
4872 	}
4873 
4874 	/*
4875 	 * The request was invalid in some way so no need to continue.
4876 	 */
4877 	if (dlerr != GLDE_OK) {
4878 		GLDM_UNLOCK(macinfo);
4879 		return (dlerr);
4880 	}
4881 
4882 	/*
4883 	 * Issue the request to the MAC layer, if required
4884 	 */
4885 	if (op != GLD_MAC_PROMISC_NOOP) {
4886 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
4887 	}
4888 
4889 	/*
4890 	 * On success, update the appropriate flags & refcounts
4891 	 */
4892 	if (macrc == GLD_SUCCESS) {
4893 		if (on) {
4894 			switch (prim->promiscon_req.dl_level) {
4895 			case DL_PROMISC_PHYS:
4896 				mac_pvt->nprom++;
4897 				gld->gld_flags |= GLD_PROM_PHYS;
4898 				break;
4899 
4900 			case DL_PROMISC_MULTI:
4901 				mac_pvt->nprom_multi++;
4902 				gld->gld_flags |= GLD_PROM_MULT;
4903 				break;
4904 
4905 			case DL_PROMISC_SAP:
4906 				gld->gld_flags |= GLD_PROM_SAP;
4907 				break;
4908 
4909 			default:
4910 				break;
4911 			}
4912 		} else {
4913 			switch (prim->promiscoff_req.dl_level) {
4914 			case DL_PROMISC_PHYS:
4915 				mac_pvt->nprom--;
4916 				gld->gld_flags &= ~GLD_PROM_PHYS;
4917 				break;
4918 
4919 			case DL_PROMISC_MULTI:
4920 				mac_pvt->nprom_multi--;
4921 				gld->gld_flags &= ~GLD_PROM_MULT;
4922 				break;
4923 
4924 			case DL_PROMISC_SAP:
4925 				gld->gld_flags &= ~GLD_PROM_SAP;
4926 				break;
4927 
4928 			default:
4929 				break;
4930 			}
4931 		}
4932 	} else if (macrc == GLD_RETRY) {
4933 		/*
4934 		 * The putbq and gld_xwait must be within the lock to
4935 		 * prevent races with gld_sched.
4936 		 */
4937 		(void) putbq(q, mp);
4938 		gld->gld_xwait = B_TRUE;
4939 	}
4940 
4941 	/*
4942 	 * Update VLAN IPQ status -- it may have changed
4943 	 */
4944 	if (gld->gld_flags & (GLD_PROM_SAP | GLD_PROM_MULT | GLD_PROM_PHYS))
4945 		vlan->gldv_ipq_flags |= IPQ_FORBIDDEN;
4946 	else
4947 		vlan->gldv_ipq_flags &= ~IPQ_FORBIDDEN;
4948 
4949 	GLDM_UNLOCK(macinfo);
4950 
4951 	/*
4952 	 * Finally, decide how to reply.
4953 	 *
4954 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
4955 	 * layer but failed.  In such cases, we can return a DL_* error
4956 	 * code and let the caller send an error-ack reply upstream, or
4957 	 * we can send a reply here and then return GLDE_OK so that the
4958 	 * caller doesn't also respond.
4959 	 *
4960 	 * If physical-promiscuous mode was (successfully) switched on or
4961 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
4962 	 */
4963 	switch (macrc) {
4964 	case GLD_NOTSUPPORTED:
4965 		return (DL_NOTSUPPORTED);
4966 
4967 	case GLD_NORESOURCES:
4968 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
4969 		return (GLDE_OK);
4970 
4971 	case GLD_RETRY:
4972 		return (GLDE_RETRY);
4973 
4974 	default:
4975 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
4976 		return (GLDE_OK);
4977 
4978 	case GLD_SUCCESS:
4979 		dlokack(q, mp, req);
4980 		break;
4981 	}
4982 
4983 	switch (op) {
4984 	case GLD_MAC_PROMISC_NOOP:
4985 		break;
4986 
4987 	case GLD_MAC_PROMISC_PHYS:
4988 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
4989 		break;
4990 
4991 	default:
4992 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4993 		break;
4994 	}
4995 
4996 	return (GLDE_OK);
4997 }
4998 
4999 /*
5000  * gld_physaddr()
5001  *	get the current or factory physical address value
5002  */
5003 static int
5004 gld_physaddr(queue_t *q, mblk_t *mp)
5005 {
5006 	gld_t *gld = (gld_t *)q->q_ptr;
5007 	gld_mac_info_t *macinfo;
5008 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5009 	unsigned char addr[GLD_MAX_ADDRLEN];
5010 
5011 	if (gld->gld_state == DL_UNATTACHED)
5012 		return (DL_OUTSTATE);
5013 
5014 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5015 	ASSERT(macinfo != NULL);
5016 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5017 
5018 	switch (prim->physaddr_req.dl_addr_type) {
5019 	case DL_FACT_PHYS_ADDR:
5020 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5021 		    (caddr_t)addr, macinfo->gldm_addrlen);
5022 		break;
5023 	case DL_CURR_PHYS_ADDR:
5024 		/* make a copy so we don't hold the lock across qreply */
5025 		GLDM_LOCK(macinfo, RW_WRITER);
5026 		mac_copy((caddr_t)
5027 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5028 		    (caddr_t)addr, macinfo->gldm_addrlen);
5029 		GLDM_UNLOCK(macinfo);
5030 		break;
5031 	default:
5032 		return (DL_BADPRIM);
5033 	}
5034 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5035 	return (GLDE_OK);
5036 }
5037 
5038 /*
5039  * gld_setaddr()
5040  *	change the hardware's physical address to a user specified value
5041  */
5042 static int
5043 gld_setaddr(queue_t *q, mblk_t *mp)
5044 {
5045 	gld_t *gld = (gld_t *)q->q_ptr;
5046 	gld_mac_info_t *macinfo;
5047 	gld_mac_pvt_t *mac_pvt;
5048 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5049 	unsigned char *addr;
5050 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5051 	int rc;
5052 	gld_vlan_t *vlan;
5053 
5054 	if (gld->gld_state == DL_UNATTACHED)
5055 		return (DL_OUTSTATE);
5056 
5057 	vlan = (gld_vlan_t *)gld->gld_vlan;
5058 	ASSERT(vlan != NULL);
5059 
5060 	if (vlan->gldv_id != VLAN_VID_NONE)
5061 		return (DL_NOTSUPPORTED);
5062 
5063 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5064 	ASSERT(macinfo != NULL);
5065 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5066 
5067 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5068 	    prim->set_physaddr_req.dl_addr_length) ||
5069 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5070 		return (DL_BADADDR);
5071 
5072 	GLDM_LOCK(macinfo, RW_WRITER);
5073 
5074 	/* now do the set at the hardware level */
5075 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5076 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5077 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5078 
5079 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5080 	if (rc == GLD_SUCCESS)
5081 		mac_copy(addr, mac_pvt->curr_macaddr,
5082 		    macinfo->gldm_addrlen);
5083 
5084 	GLDM_UNLOCK(macinfo);
5085 
5086 	switch (rc) {
5087 	case GLD_SUCCESS:
5088 		break;
5089 	case GLD_NOTSUPPORTED:
5090 		return (DL_NOTSUPPORTED);
5091 	case GLD_BADARG:
5092 		return (DL_BADADDR);
5093 	case GLD_NORESOURCES:
5094 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5095 		return (GLDE_OK);
5096 	default:
5097 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5098 		return (GLDE_OK);
5099 	}
5100 
5101 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5102 
5103 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5104 	return (GLDE_OK);
5105 }
5106 
5107 int
5108 gld_get_statistics(queue_t *q, mblk_t *mp)
5109 {
5110 	dl_get_statistics_ack_t *dlsp;
5111 	gld_t  *gld = (gld_t *)q->q_ptr;
5112 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5113 	gld_mac_pvt_t *mac_pvt;
5114 
5115 	if (gld->gld_state == DL_UNATTACHED)
5116 		return (DL_OUTSTATE);
5117 
5118 	ASSERT(macinfo != NULL);
5119 
5120 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5121 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5122 
5123 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5124 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5125 
5126 	if (mp == NULL)
5127 		return (GLDE_OK);	/* mexchange already sent merror */
5128 
5129 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5130 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5131 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5132 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5133 
5134 	GLDM_LOCK(macinfo, RW_WRITER);
5135 	bcopy(mac_pvt->kstatp->ks_data,
5136 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5137 	    sizeof (struct gldkstats));
5138 	GLDM_UNLOCK(macinfo);
5139 
5140 	qreply(q, mp);
5141 	return (GLDE_OK);
5142 }
5143 
5144 /* =================================================== */
5145 /* misc utilities, some requiring various mutexes held */
5146 /* =================================================== */
5147 
5148 /*
5149  * Initialize and start the driver.
5150  */
5151 static int
5152 gld_start_mac(gld_mac_info_t *macinfo)
5153 {
5154 	int	rc;
5155 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5156 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5157 
5158 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5159 	ASSERT(!mac_pvt->started);
5160 
5161 	rc = (*macinfo->gldm_reset)(macinfo);
5162 	if (rc != GLD_SUCCESS)
5163 		return (GLD_FAILURE);
5164 
5165 	/* set the addr after we reset the device */
5166 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5167 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5168 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5169 
5170 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5171 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5172 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5173 		return (GLD_FAILURE);
5174 
5175 	rc = (*macinfo->gldm_start)(macinfo);
5176 	if (rc != GLD_SUCCESS)
5177 		return (GLD_FAILURE);
5178 
5179 	mac_pvt->started = B_TRUE;
5180 	return (GLD_SUCCESS);
5181 }
5182 
5183 /*
5184  * Stop the driver.
5185  */
5186 static void
5187 gld_stop_mac(gld_mac_info_t *macinfo)
5188 {
5189 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5190 
5191 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5192 	ASSERT(mac_pvt->started);
5193 
5194 	(void) (*macinfo->gldm_stop)(macinfo);
5195 
5196 	mac_pvt->started = B_FALSE;
5197 }
5198 
5199 
5200 /*
5201  * gld_set_ipq will set a pointer to the queue which is bound to the
5202  * IP sap if:
5203  * o the device type is ethernet or IPoIB.
5204  * o there is no stream in SAP promiscuous mode.
5205  * o there is exactly one stream bound to the IP sap.
5206  * o the stream is in "fastpath" mode.
5207  */
5208 static void
5209 gld_set_ipq(gld_t *gld)
5210 {
5211 	gld_vlan_t	*vlan;
5212 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5213 	gld_t		*ip_gld = NULL;
5214 	uint_t		ipq_candidates = 0;
5215 	gld_t		*ipv6_gld = NULL;
5216 	uint_t		ipv6q_candidates = 0;
5217 
5218 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5219 
5220 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5221 	if (((macinfo->gldm_type != DL_ETHER) &&
5222 	    (macinfo->gldm_type != DL_IB)) ||
5223 	    (gld_global_options & GLD_OPT_NO_IPQ))
5224 		return;
5225 
5226 	vlan = (gld_vlan_t *)gld->gld_vlan;
5227 	ASSERT(vlan != NULL);
5228 
5229 	/* clear down any previously defined ipqs */
5230 	vlan->gldv_ipq = NULL;
5231 	vlan->gldv_ipv6q = NULL;
5232 
5233 	/* Try to find a single stream eligible to receive IP packets */
5234 	for (gld = vlan->gldv_str_next;
5235 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5236 		if (gld->gld_state != DL_IDLE)
5237 			continue;	/* not eligible to receive */
5238 		if (gld->gld_flags & GLD_STR_CLOSING)
5239 			continue;	/* not eligible to receive */
5240 
5241 		if (gld->gld_sap == ETHERTYPE_IP) {
5242 			ip_gld = gld;
5243 			ipq_candidates++;
5244 		}
5245 
5246 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5247 			ipv6_gld = gld;
5248 			ipv6q_candidates++;
5249 		}
5250 	}
5251 
5252 	if (ipq_candidates == 1) {
5253 		ASSERT(ip_gld != NULL);
5254 
5255 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5256 			vlan->gldv_ipq = ip_gld->gld_qptr;
5257 	}
5258 
5259 	if (ipv6q_candidates == 1) {
5260 		ASSERT(ipv6_gld != NULL);
5261 
5262 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5263 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5264 	}
5265 }
5266 
5267 /*
5268  * gld_flushqueue (q)
5269  *	used by DLPI primitives that require flushing the queues.
5270  *	essentially, this is DL_UNBIND_REQ.
5271  */
5272 static void
5273 gld_flushqueue(queue_t *q)
5274 {
5275 	/* flush all data in both queues */
5276 	/* XXX Should these be FLUSHALL? */
5277 	flushq(q, FLUSHDATA);
5278 	flushq(WR(q), FLUSHDATA);
5279 	/* flush all the queues upstream */
5280 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5281 }
5282 
5283 /*
5284  * gld_devlookup (major)
5285  * search the device table for the device with specified
5286  * major number and return a pointer to it if it exists
5287  */
5288 static glddev_t *
5289 gld_devlookup(int major)
5290 {
5291 	struct glddevice *dev;
5292 
5293 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5294 
5295 	for (dev = gld_device_list.gld_next;
5296 	    dev != &gld_device_list;
5297 	    dev = dev->gld_next) {
5298 		ASSERT(dev);
5299 		if (dev->gld_major == major)
5300 			return (dev);
5301 	}
5302 	return (NULL);
5303 }
5304 
5305 /*
5306  * gld_findminor(device)
5307  * Returns a minor number currently unused by any stream in the current
5308  * device class (major) list.
5309  */
5310 static int
5311 gld_findminor(glddev_t *device)
5312 {
5313 	gld_t		*next;
5314 	gld_mac_info_t	*nextmac;
5315 	gld_vlan_t	*nextvlan;
5316 	int		minor;
5317 	int		i;
5318 
5319 	ASSERT(mutex_owned(&device->gld_devlock));
5320 
5321 	/* The fast way */
5322 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5323 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5324 		return (device->gld_nextminor++);
5325 
5326 	/* The steady way */
5327 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5328 	    minor++) {
5329 		/* Search all unattached streams */
5330 		for (next = device->gld_str_next;
5331 		    next != (gld_t *)&device->gld_str_next;
5332 		    next = next->gld_next) {
5333 			if (minor == next->gld_minor)
5334 				goto nextminor;
5335 		}
5336 		/* Search all attached streams; we don't need maclock because */
5337 		/* mac stream list is protected by devlock as well as maclock */
5338 		for (nextmac = device->gld_mac_next;
5339 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5340 		    nextmac = nextmac->gldm_next) {
5341 			gld_mac_pvt_t *pvt =
5342 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5343 
5344 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5345 				continue;	/* this one's not ready yet */
5346 
5347 			for (i = 0; i < VLAN_HASHSZ; i++) {
5348 				for (nextvlan = pvt->vlan_hash[i];
5349 				    nextvlan != NULL;
5350 				    nextvlan = nextvlan->gldv_next) {
5351 					for (next = nextvlan->gldv_str_next;
5352 					    next !=
5353 					    (gld_t *)&nextvlan->gldv_str_next;
5354 					    next = next->gld_next) {
5355 						if (minor == next->gld_minor)
5356 							goto nextminor;
5357 					}
5358 				}
5359 			}
5360 		}
5361 
5362 		return (minor);
5363 nextminor:
5364 		/* don't need to do anything */
5365 		;
5366 	}
5367 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5368 		device->gld_name);
5369 	return (0);
5370 }
5371 
5372 /*
5373  * version of insque/remque for use by this driver
5374  */
5375 struct qelem {
5376 	struct qelem *q_forw;
5377 	struct qelem *q_back;
5378 	/* rest of structure */
5379 };
5380 
5381 static void
5382 gldinsque(void *elem, void *pred)
5383 {
5384 	struct qelem *pelem = elem;
5385 	struct qelem *ppred = pred;
5386 	struct qelem *pnext = ppred->q_forw;
5387 
5388 	pelem->q_forw = pnext;
5389 	pelem->q_back = ppred;
5390 	ppred->q_forw = pelem;
5391 	pnext->q_back = pelem;
5392 }
5393 
5394 static void
5395 gldremque(void *arg)
5396 {
5397 	struct qelem *pelem = arg;
5398 	struct qelem *elem = arg;
5399 
5400 	pelem->q_forw->q_back = pelem->q_back;
5401 	pelem->q_back->q_forw = pelem->q_forw;
5402 	elem->q_back = elem->q_forw = NULL;
5403 }
5404 
5405 static gld_vlan_t *
5406 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5407 {
5408 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5409 	gld_vlan_t	**pp;
5410 	gld_vlan_t	*p;
5411 
5412 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5413 	while ((p = *pp) != NULL) {
5414 		ASSERT(p->gldv_id != vid);
5415 		pp = &(p->gldv_next);
5416 	}
5417 
5418 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5419 		return (NULL);
5420 
5421 	p->gldv_mac = macinfo;
5422 	p->gldv_id = vid;
5423 
5424 	if (vid == VLAN_VID_NONE) {
5425 		p->gldv_ptag = VLAN_VTAG_NONE;
5426 		p->gldv_stats = mac_pvt->statistics;
5427 		p->gldv_kstatp = NULL;
5428 	} else {
5429 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5430 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5431 		    KM_SLEEP);
5432 
5433 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5434 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5435 			kmem_free(p, sizeof (gld_vlan_t));
5436 			return (NULL);
5437 		}
5438 	}
5439 
5440 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5441 	mac_pvt->nvlan++;
5442 	*pp = p;
5443 
5444 	return (p);
5445 }
5446 
5447 static void
5448 gld_rem_vlan(gld_vlan_t *vlan)
5449 {
5450 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5451 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5452 	gld_vlan_t	**pp;
5453 	gld_vlan_t	*p;
5454 
5455 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5456 	while ((p = *pp) != NULL) {
5457 		if (p->gldv_id == vlan->gldv_id)
5458 			break;
5459 		pp = &(p->gldv_next);
5460 	}
5461 	ASSERT(p != NULL);
5462 
5463 	*pp = p->gldv_next;
5464 	mac_pvt->nvlan--;
5465 	if (p->gldv_id != VLAN_VID_NONE) {
5466 		ASSERT(p->gldv_kstatp != NULL);
5467 		kstat_delete(p->gldv_kstatp);
5468 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5469 	}
5470 	kmem_free(p, sizeof (gld_vlan_t));
5471 }
5472 
5473 gld_vlan_t *
5474 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5475 {
5476 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5477 	gld_vlan_t	*p;
5478 
5479 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5480 	while (p != NULL) {
5481 		if (p->gldv_id == vid)
5482 			return (p);
5483 		p = p->gldv_next;
5484 	}
5485 	return (NULL);
5486 }
5487 
5488 gld_vlan_t *
5489 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5490 {
5491 	gld_vlan_t	*vlan;
5492 
5493 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5494 		vlan = gld_add_vlan(macinfo, vid);
5495 
5496 	return (vlan);
5497 }
5498 
5499 /*
5500  * gld_bitrevcopy()
5501  * This is essentially bcopy, with the ability to bit reverse the
5502  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5503  * interfaces are bit reversed.
5504  */
5505 void
5506 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5507 {
5508 	while (n--)
5509 		*target++ = bit_rev[(uchar_t)*src++];
5510 }
5511 
5512 /*
5513  * gld_bitreverse()
5514  * Convert the bit order by swaping all the bits, using a
5515  * lookup table.
5516  */
5517 void
5518 gld_bitreverse(uchar_t *rptr, size_t n)
5519 {
5520 	while (n--) {
5521 		*rptr = bit_rev[*rptr];
5522 		rptr++;
5523 	}
5524 }
5525 
5526 char *
5527 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5528 {
5529 	int i;
5530 	char *cp = etherbuf;
5531 	static char digits[] = "0123456789abcdef";
5532 
5533 	for (i = 0; i < len; i++) {
5534 		*cp++ = digits[*ap >> 4];
5535 		*cp++ = digits[*ap++ & 0xf];
5536 		*cp++ = ':';
5537 	}
5538 	*--cp = 0;
5539 	return (etherbuf);
5540 }
5541 
5542 #ifdef GLD_DEBUG
5543 static void
5544 gld_check_assertions()
5545 {
5546 	glddev_t	*dev;
5547 	gld_mac_info_t	*mac;
5548 	gld_t		*str;
5549 	gld_vlan_t	*vlan;
5550 	int		i;
5551 
5552 	mutex_enter(&gld_device_list.gld_devlock);
5553 
5554 	for (dev = gld_device_list.gld_next;
5555 	    dev != (glddev_t *)&gld_device_list.gld_next;
5556 	    dev = dev->gld_next) {
5557 		mutex_enter(&dev->gld_devlock);
5558 		ASSERT(dev->gld_broadcast != NULL);
5559 		for (str = dev->gld_str_next;
5560 		    str != (gld_t *)&dev->gld_str_next;
5561 		    str = str->gld_next) {
5562 			ASSERT(str->gld_device == dev);
5563 			ASSERT(str->gld_mac_info == NULL);
5564 			ASSERT(str->gld_qptr != NULL);
5565 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5566 			ASSERT(str->gld_multicnt == 0);
5567 			ASSERT(str->gld_mcast == NULL);
5568 			ASSERT(!(str->gld_flags &
5569 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5570 			ASSERT(str->gld_sap == 0);
5571 			ASSERT(str->gld_state == DL_UNATTACHED);
5572 		}
5573 		for (mac = dev->gld_mac_next;
5574 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5575 		    mac = mac->gldm_next) {
5576 			int nvlan = 0;
5577 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5578 
5579 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5580 				continue;	/* this one's not ready yet */
5581 
5582 			GLDM_LOCK(mac, RW_WRITER);
5583 			ASSERT(mac->gldm_devinfo != NULL);
5584 			ASSERT(mac->gldm_mac_pvt != NULL);
5585 			ASSERT(pvt->interfacep != NULL);
5586 			ASSERT(pvt->kstatp != NULL);
5587 			ASSERT(pvt->statistics != NULL);
5588 			ASSERT(pvt->major_dev == dev);
5589 
5590 			for (i = 0; i < VLAN_HASHSZ; i++) {
5591 				for (vlan = pvt->vlan_hash[i];
5592 				    vlan != NULL; vlan = vlan->gldv_next) {
5593 					int nstr = 0;
5594 
5595 					ASSERT(vlan->gldv_mac == mac);
5596 
5597 					for (str = vlan->gldv_str_next;
5598 					    str !=
5599 					    (gld_t *)&vlan->gldv_str_next;
5600 					    str = str->gld_next) {
5601 						ASSERT(str->gld_device == dev);
5602 						ASSERT(str->gld_mac_info ==
5603 						    mac);
5604 						ASSERT(str->gld_qptr != NULL);
5605 						ASSERT(str->gld_minor >=
5606 						    GLD_MIN_CLONE_MINOR);
5607 						ASSERT(
5608 						    str->gld_multicnt == 0 ||
5609 						    str->gld_mcast);
5610 						nstr++;
5611 					}
5612 					ASSERT(vlan->gldv_nstreams == nstr);
5613 					nvlan++;
5614 				}
5615 			}
5616 			ASSERT(pvt->nvlan == nvlan);
5617 			GLDM_UNLOCK(mac);
5618 		}
5619 		mutex_exit(&dev->gld_devlock);
5620 	}
5621 	mutex_exit(&gld_device_list.gld_devlock);
5622 }
5623 #endif
5624