xref: /illumos-gate/usr/src/uts/common/io/gld.c (revision 7ae111d47a973fff4c6e231cc31f271dd9cef473)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * gld - Generic LAN Driver Version 2, PSARC/1997/382
30  *
31  * This is a utility module that provides generic facilities for
32  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
33  * are handled here.
34  *
35  * It no longer provides compatibility with drivers
36  * implemented according to the GLD v0 documentation published
37  * in 1993. (See PSARC 2003/728)
38  */
39 
40 
41 #include <sys/types.h>
42 #include <sys/errno.h>
43 #include <sys/stropts.h>
44 #include <sys/stream.h>
45 #include <sys/kmem.h>
46 #include <sys/stat.h>
47 #include <sys/modctl.h>
48 #include <sys/kstat.h>
49 #include <sys/debug.h>
50 #include <sys/note.h>
51 #include <sys/sysmacros.h>
52 
53 #include <sys/byteorder.h>
54 #include <sys/strsun.h>
55 #include <sys/strsubr.h>
56 #include <sys/dlpi.h>
57 #include <sys/pattr.h>
58 #include <sys/ethernet.h>
59 #include <sys/ib/clients/ibd/ibd.h>
60 #include <sys/policy.h>
61 #include <sys/atomic.h>
62 
63 #include <sys/multidata.h>
64 #include <sys/gld.h>
65 #include <sys/gldpriv.h>
66 
67 #include <sys/ddi.h>
68 #include <sys/sunddi.h>
69 
70 /*
71  * Macro to atomically increment counters of type uint32_t, uint64_t
72  * and ulong_t.
73  */
74 #define	BUMP(stat, delta)	do {				\
75 	_NOTE(CONSTANTCONDITION)				\
76 	if (sizeof (stat) == sizeof (uint32_t))	{		\
77 		atomic_add_32((uint32_t *)&stat, delta);	\
78 	_NOTE(CONSTANTCONDITION)				\
79 	} else if (sizeof (stat) == sizeof (uint64_t)) {	\
80 		atomic_add_64((uint64_t *)&stat, delta);	\
81 	}							\
82 	_NOTE(CONSTANTCONDITION)				\
83 } while (0)
84 
85 #define	UPDATE_STATS(vlan, pktinfo, number)	{		\
86 	if ((pktinfo).isBroadcast)				\
87 		(vlan)->gldv_stats->glds_brdcstxmt += (number);	\
88 	else if ((pktinfo).isMulticast)				\
89 		(vlan)->gldv_stats->glds_multixmt += (number);	\
90 	(vlan)->gldv_stats->glds_bytexmt64 += (pktinfo).pktLen;	\
91 	(vlan)->gldv_stats->glds_pktxmt64 += (number);		\
92 }
93 
94 #ifdef GLD_DEBUG
95 int gld_debug = GLDERRS;
96 #endif
97 
98 /* called from gld_register */
99 static int gld_initstats(gld_mac_info_t *);
100 
101 /* called from kstat mechanism, and from wsrv's get_statistics */
102 static int gld_update_kstat(kstat_t *, int);
103 
104 /* statistics for additional vlans */
105 static int gld_init_vlan_stats(gld_vlan_t *);
106 static int gld_update_vlan_kstat(kstat_t *, int);
107 
108 /* called from gld_getinfo */
109 static dev_info_t *gld_finddevinfo(dev_t);
110 
111 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
112 /* also from the source routing stuff for sending RDE protocol packets */
113 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
114 static int gld_start_mdt(queue_t *, mblk_t *, int);
115 
116 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
117 static void gld_precv(gld_mac_info_t *, gld_vlan_t *, mblk_t *);
118 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
119     pdesc_t *, pktinfo_t *);
120 
121 /* receive group: called from gld_recv and gld_precv* with maclock held */
122 static void gld_sendup(gld_mac_info_t *, gld_vlan_t *, pktinfo_t *, mblk_t *,
123     int (*)());
124 static int gld_accept(gld_t *, pktinfo_t *);
125 static int gld_mcmatch(gld_t *, pktinfo_t *);
126 static int gld_multicast(unsigned char *, gld_t *);
127 static int gld_paccept(gld_t *, pktinfo_t *);
128 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
129     void (*)(queue_t *, mblk_t *));
130 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *);
131 
132 /* wsrv group: called from wsrv, single threaded per queue */
133 static int gld_ioctl(queue_t *, mblk_t *);
134 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
135 static int gld_cmds(queue_t *, mblk_t *);
136 static mblk_t *gld_bindack(queue_t *, mblk_t *);
137 static int gld_notify_req(queue_t *, mblk_t *);
138 static int gld_udqos(queue_t *, mblk_t *);
139 static int gld_bind(queue_t *, mblk_t *);
140 static int gld_unbind(queue_t *, mblk_t *);
141 static int gld_inforeq(queue_t *, mblk_t *);
142 static int gld_unitdata(queue_t *, mblk_t *);
143 static int gldattach(queue_t *, mblk_t *);
144 static int gldunattach(queue_t *, mblk_t *);
145 static int gld_enable_multi(queue_t *, mblk_t *);
146 static int gld_disable_multi(queue_t *, mblk_t *);
147 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
148 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
149 static int gld_physaddr(queue_t *, mblk_t *);
150 static int gld_setaddr(queue_t *, mblk_t *);
151 static int gld_get_statistics(queue_t *, mblk_t *);
152 static int gld_cap(queue_t *, mblk_t *);
153 static int gld_cap_ack(queue_t *, mblk_t *);
154 static int gld_cap_enable(queue_t *, mblk_t *);
155 
156 /* misc utilities, some requiring various mutexes held */
157 static int gld_start_mac(gld_mac_info_t *);
158 static void gld_stop_mac(gld_mac_info_t *);
159 static void gld_set_ipq(gld_t *);
160 static void gld_flushqueue(queue_t *);
161 static glddev_t *gld_devlookup(int);
162 static int gld_findminor(glddev_t *);
163 static void gldinsque(void *, void *);
164 static void gldremque(void *);
165 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
166 void gld_bitreverse(uchar_t *, size_t);
167 char *gld_macaddr_sprintf(char *, unsigned char *, int);
168 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
169 static void gld_rem_vlan(gld_vlan_t *);
170 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
171 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
172 
173 #ifdef GLD_DEBUG
174 static void gld_check_assertions(void);
175 extern void gld_sr_dump(gld_mac_info_t *);
176 #endif
177 
178 /*
179  * Allocate and zero-out "number" structures each of type "structure" in
180  * kernel memory.
181  */
182 #define	GETSTRUCT(structure, number)   \
183 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
184 
185 #define	abs(a) ((a) < 0 ? -(a) : a)
186 
187 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
188 
189 /*
190  * VLANs are only supported on ethernet devices that manipulate VLAN headers
191  * themselves.
192  */
193 #define	VLAN_CAPABLE(macinfo) \
194 	((macinfo)->gldm_type == DL_ETHER && \
195 	(macinfo)->gldm_send_tagged != NULL)
196 
197 /*
198  * The set of notifications generatable by GLD itself, the additional
199  * set that can be generated if the MAC driver provide the link-state
200  * tracking callback capability, and the set supported by the GLD
201  * notification code below.
202  *
203  * PLEASE keep these in sync with what the code actually does!
204  */
205 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
206 						DL_NOTE_PROMISC_OFF_PHYS |
207 						DL_NOTE_PHYS_ADDR;
208 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
209 						DL_NOTE_LINK_UP |
210 						DL_NOTE_SPEED;
211 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
212 						DL_NOTE_PROMISC_OFF_PHYS |
213 						DL_NOTE_PHYS_ADDR |
214 						DL_NOTE_LINK_DOWN |
215 						DL_NOTE_LINK_UP |
216 						DL_NOTE_SPEED;
217 
218 /* Media must correspond to #defines in gld.h */
219 static char *gld_media[] = {
220 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
221 	"aui",		/* GLDM_AUI */
222 	"bnc",		/* GLDM_BNC */
223 	"twpair",	/* GLDM_TP */
224 	"fiber",	/* GLDM_FIBER */
225 	"100baseT",	/* GLDM_100BT */
226 	"100vgAnyLan",	/* GLDM_VGANYLAN */
227 	"10baseT",	/* GLDM_10BT */
228 	"ring4",	/* GLDM_RING4 */
229 	"ring16",	/* GLDM_RING16 */
230 	"PHY/MII",	/* GLDM_PHYMII */
231 	"100baseTX",	/* GLDM_100BTX */
232 	"100baseT4",	/* GLDM_100BT4 */
233 	"unknown",	/* skip */
234 	"ipib",		/* GLDM_IB */
235 };
236 
237 /* Must correspond to #defines in gld.h */
238 static char *gld_duplex[] = {
239 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
240 	"half",		/* GLD_DUPLEX_HALF */
241 	"full"		/* GLD_DUPLEX_FULL */
242 };
243 
244 extern int gld_interpret_ether(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
245 extern int gld_interpret_fddi(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
246 extern int gld_interpret_tr(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
247 extern int gld_interpret_ib(gld_mac_info_t *, mblk_t *, pktinfo_t *, int);
248 extern void gld_interpret_mdt_ib(gld_mac_info_t *, mblk_t *, pdescinfo_t *,
249     pktinfo_t *, int);
250 
251 extern mblk_t *gld_fastpath_ether(gld_t *, mblk_t *);
252 extern mblk_t *gld_fastpath_fddi(gld_t *, mblk_t *);
253 extern mblk_t *gld_fastpath_tr(gld_t *, mblk_t *);
254 extern mblk_t *gld_fastpath_ib(gld_t *, mblk_t *);
255 
256 extern mblk_t *gld_unitdata_ether(gld_t *, mblk_t *);
257 extern mblk_t *gld_unitdata_fddi(gld_t *, mblk_t *);
258 extern mblk_t *gld_unitdata_tr(gld_t *, mblk_t *);
259 extern mblk_t *gld_unitdata_ib(gld_t *, mblk_t *);
260 
261 extern void gld_init_ether(gld_mac_info_t *);
262 extern void gld_init_fddi(gld_mac_info_t *);
263 extern void gld_init_tr(gld_mac_info_t *);
264 extern void gld_init_ib(gld_mac_info_t *);
265 
266 extern void gld_uninit_ether(gld_mac_info_t *);
267 extern void gld_uninit_fddi(gld_mac_info_t *);
268 extern void gld_uninit_tr(gld_mac_info_t *);
269 extern void gld_uninit_ib(gld_mac_info_t *);
270 
271 /*
272  * Interface types currently supported by GLD.
273  * If you add new types, you must check all "XXX" strings in the GLD source
274  * for implementation issues that may affect the support of your new type.
275  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
276  * require generalizing this GLD source to handle the new cases.  In other
277  * words there are assumptions built into the code in a few places that must
278  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
279  */
280 static gld_interface_t interfaces[] = {
281 
282 	/* Ethernet Bus */
283 	{
284 		DL_ETHER,
285 		(uint_t)-1,
286 		sizeof (struct ether_mac_frm),
287 		gld_interpret_ether,
288 		NULL,
289 		gld_fastpath_ether,
290 		gld_unitdata_ether,
291 		gld_init_ether,
292 		gld_uninit_ether,
293 		"ether"
294 	},
295 
296 	/* Fiber Distributed data interface */
297 	{
298 		DL_FDDI,
299 		4352,
300 		sizeof (struct fddi_mac_frm),
301 		gld_interpret_fddi,
302 		NULL,
303 		gld_fastpath_fddi,
304 		gld_unitdata_fddi,
305 		gld_init_fddi,
306 		gld_uninit_fddi,
307 		"fddi"
308 	},
309 
310 	/* Token Ring interface */
311 	{
312 		DL_TPR,
313 		17914,
314 		-1,			/* variable header size */
315 		gld_interpret_tr,
316 		NULL,
317 		gld_fastpath_tr,
318 		gld_unitdata_tr,
319 		gld_init_tr,
320 		gld_uninit_tr,
321 		"tpr"
322 	},
323 
324 	/* Infiniband */
325 	{
326 		DL_IB,
327 		4092,
328 		sizeof (struct ipoib_header),
329 		gld_interpret_ib,
330 		gld_interpret_mdt_ib,
331 		gld_fastpath_ib,
332 		gld_unitdata_ib,
333 		gld_init_ib,
334 		gld_uninit_ib,
335 		"ipib"
336 	},
337 };
338 
339 /*
340  * bit reversal lookup table.
341  */
342 static	uchar_t bit_rev[] = {
343 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
344 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
345 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
346 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
347 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
348 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
349 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
350 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
351 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
352 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
353 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
354 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
355 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
356 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
357 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
358 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
359 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
360 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
361 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
362 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
363 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
364 	0x3f, 0xbf, 0x7f, 0xff,
365 };
366 
367 /*
368  * User priorities, mapped from b_band.
369  */
370 static uint32_t user_priority[] = {
371 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
372 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
373 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
374 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
375 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
376 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
377 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
378 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
379 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
380 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
381 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
382 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
383 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
384 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
385 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
386 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
387 };
388 
389 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
390 
391 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
392 
393 /*
394  * Module linkage information for the kernel.
395  */
396 
397 static struct modldrv modlmisc = {
398 	&mod_miscops,		/* Type of module - a utility provider */
399 	"Generic LAN Driver (" GLD_VERSION_STRING ") %I%"
400 #ifdef GLD_DEBUG
401 	" DEBUG"
402 #endif
403 };
404 
405 static struct modlinkage modlinkage = {
406 	MODREV_1, &modlmisc, NULL
407 };
408 
409 int
410 _init(void)
411 {
412 	int e;
413 
414 	/* initialize gld_device_list mutex */
415 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
416 
417 	/* initialize device driver (per-major) list */
418 	gld_device_list.gld_next =
419 	    gld_device_list.gld_prev = &gld_device_list;
420 
421 	if ((e = mod_install(&modlinkage)) != 0)
422 		mutex_destroy(&gld_device_list.gld_devlock);
423 
424 	return (e);
425 }
426 
427 int
428 _fini(void)
429 {
430 	int e;
431 
432 	if ((e = mod_remove(&modlinkage)) != 0)
433 		return (e);
434 
435 	ASSERT(gld_device_list.gld_next ==
436 	    (glddev_t *)&gld_device_list.gld_next);
437 	ASSERT(gld_device_list.gld_prev ==
438 	    (glddev_t *)&gld_device_list.gld_next);
439 	mutex_destroy(&gld_device_list.gld_devlock);
440 
441 	return (e);
442 }
443 
444 int
445 _info(struct modinfo *modinfop)
446 {
447 	return (mod_info(&modlinkage, modinfop));
448 }
449 
450 /*
451  * GLD service routines
452  */
453 
454 /* So this gld binary maybe can be forward compatible with future v2 drivers */
455 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
456 
457 /*ARGSUSED*/
458 gld_mac_info_t *
459 gld_mac_alloc(dev_info_t *devinfo)
460 {
461 	gld_mac_info_t *macinfo;
462 
463 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
464 	    KM_SLEEP);
465 
466 	/*
467 	 * The setting of gldm_driver_version will not be documented or allowed
468 	 * until a future release.
469 	 */
470 	macinfo->gldm_driver_version = GLD_VERSION_200;
471 
472 	/*
473 	 * GLD's version.  This also is undocumented for now, but will be
474 	 * available if needed in the future.
475 	 */
476 	macinfo->gldm_GLD_version = GLD_VERSION;
477 
478 	return (macinfo);
479 }
480 
481 /*
482  * gld_mac_free must be called after the driver has removed interrupts
483  * and completely stopped calling gld_recv() and gld_sched().  At that
484  * point the interrupt routine is guaranteed by the system to have been
485  * exited and the maclock is no longer needed.  Of course, it is
486  * expected (required) that (assuming gld_register() succeeded),
487  * gld_unregister() was called before gld_mac_free().
488  */
489 void
490 gld_mac_free(gld_mac_info_t *macinfo)
491 {
492 	ASSERT(macinfo);
493 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
494 
495 	/*
496 	 * Assert that if we made it through gld_register, then we must
497 	 * have unregistered.
498 	 */
499 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
500 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
501 
502 	GLDM_LOCK_DESTROY(macinfo);
503 
504 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
505 }
506 
507 /*
508  * gld_register -- called once per device instance (PPA)
509  *
510  * During its attach routine, a real device driver will register with GLD
511  * so that later opens and dl_attach_reqs will work.  The arguments are the
512  * devinfo pointer, the device name, and a macinfo structure describing the
513  * physical device instance.
514  */
515 int
516 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
517 {
518 	int mediatype;
519 	int major = ddi_name_to_major(devname), i;
520 	glddev_t *glddev;
521 	gld_mac_pvt_t *mac_pvt;
522 	char minordev[32];
523 	char pbuf[3*GLD_MAX_ADDRLEN];
524 	gld_interface_t *ifp;
525 
526 	ASSERT(devinfo != NULL);
527 	ASSERT(macinfo != NULL);
528 
529 	if (macinfo->gldm_driver_version != GLD_VERSION)
530 		return (DDI_FAILURE);
531 
532 	mediatype = macinfo->gldm_type;
533 
534 	/*
535 	 * Entry points should be ready for us.
536 	 * ioctl is optional.
537 	 * set_multicast and get_stats are optional in v0.
538 	 * intr is only required if you add an interrupt.
539 	 */
540 	ASSERT(macinfo->gldm_reset != NULL);
541 	ASSERT(macinfo->gldm_start != NULL);
542 	ASSERT(macinfo->gldm_stop != NULL);
543 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
544 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
545 	ASSERT(macinfo->gldm_send != NULL);
546 
547 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
548 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
549 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
550 	ASSERT(macinfo->gldm_vendor_addr != NULL);
551 	ASSERT(macinfo->gldm_ident != NULL);
552 
553 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
554 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
555 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
556 		return (DDI_FAILURE);
557 	}
558 
559 	/*
560 	 * GLD only functions properly with saplen == -2
561 	 */
562 	if (macinfo->gldm_saplen != -2) {
563 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
564 		    "not supported", devname, macinfo->gldm_saplen);
565 		return (DDI_FAILURE);
566 	}
567 
568 	/* see gld_rsrv() */
569 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
570 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
571 
572 	mutex_enter(&gld_device_list.gld_devlock);
573 	glddev = gld_devlookup(major);
574 
575 	/*
576 	 *  Allocate per-driver (major) data structure if necessary
577 	 */
578 	if (glddev == NULL) {
579 		/* first occurrence of this device name (major number) */
580 		glddev = GETSTRUCT(glddev_t, 1);
581 		if (glddev == NULL) {
582 			mutex_exit(&gld_device_list.gld_devlock);
583 			return (DDI_FAILURE);
584 		}
585 		(void) strncpy(glddev->gld_name, devname,
586 		    sizeof (glddev->gld_name) - 1);
587 		glddev->gld_major = major;
588 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
589 		glddev->gld_mac_next = glddev->gld_mac_prev =
590 			(gld_mac_info_t *)&glddev->gld_mac_next;
591 		glddev->gld_str_next = glddev->gld_str_prev =
592 			(gld_t *)&glddev->gld_str_next;
593 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
594 
595 		/* allow increase of number of supported multicast addrs */
596 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
597 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
598 
599 		/*
600 		 * Optionally restrict DLPI provider style
601 		 *
602 		 * -1 - don't create style 1 nodes
603 		 * -2 - don't create style 2 nodes
604 		 */
605 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
606 		    "gld-provider-styles", 0);
607 
608 		/* Stuff that's needed before any PPA gets attached */
609 		glddev->gld_type = macinfo->gldm_type;
610 		glddev->gld_minsdu = macinfo->gldm_minpkt;
611 		glddev->gld_saplen = macinfo->gldm_saplen;
612 		glddev->gld_addrlen = macinfo->gldm_addrlen;
613 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
614 		    KM_SLEEP);
615 		bcopy(macinfo->gldm_broadcast_addr,
616 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
617 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
618 		gldinsque(glddev, gld_device_list.gld_prev);
619 	}
620 	glddev->gld_ndevice++;
621 	/* Now glddev can't go away until we unregister this mac (or fail) */
622 	mutex_exit(&gld_device_list.gld_devlock);
623 
624 	/*
625 	 *  Per-instance initialization
626 	 */
627 
628 	/*
629 	 * Initialize per-mac structure that is private to GLD.
630 	 * Set up interface pointer. These are device class specific pointers
631 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
632 	 */
633 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
634 		if (mediatype != interfaces[i].mac_type)
635 			continue;
636 
637 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
638 		    KM_SLEEP);
639 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
640 		    &interfaces[i];
641 		break;
642 	}
643 
644 	if (ifp == NULL) {
645 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
646 		    "of type %d", devname, mediatype);
647 		goto failure;
648 	}
649 
650 	/*
651 	 * Driver can only register MTU within legal media range.
652 	 */
653 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
654 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
655 		    devname);
656 		goto failure;
657 	}
658 
659 	/*
660 	 * For now, only Infiniband drivers can use MDT. Do not add
661 	 * support for Ethernet, FDDI or TR.
662 	 */
663 	if (macinfo->gldm_mdt_pre != NULL) {
664 		if (mediatype != DL_IB) {
665 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
666 			    "driver of type %d", devname, mediatype);
667 			goto failure;
668 		}
669 
670 		/*
671 		 * Validate entry points.
672 		 */
673 		if ((macinfo->gldm_mdt_send == NULL) ||
674 		    (macinfo->gldm_mdt_post == NULL)) {
675 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
676 			    "%s driver of type %d", devname, mediatype);
677 			goto failure;
678 		}
679 		macinfo->gldm_options |= GLDOPT_MDT;
680 	}
681 
682 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
683 	mac_pvt->major_dev = glddev;
684 
685 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
686 	/*
687 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
688 	 * format or in wire format?  Also gldm_broadcast.  For now
689 	 * we are assuming canonical, but I'm not sure that makes the
690 	 * most sense for ease of driver implementation.
691 	 */
692 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
693 	    macinfo->gldm_addrlen);
694 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
695 
696 	/*
697 	 * The available set of notifications is those generatable by GLD
698 	 * itself, plus those corresponding to the capabilities of the MAC
699 	 * driver, intersected with those supported by gld_notify_ind() above.
700 	 */
701 	mac_pvt->notifications = gld_internal_notes;
702 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
703 		mac_pvt->notifications |= gld_linkstate_notes;
704 	mac_pvt->notifications &= gld_supported_notes;
705 
706 	GLDM_LOCK_INIT(macinfo);
707 
708 	ddi_set_driver_private(devinfo, macinfo);
709 
710 	/*
711 	 * Now atomically get a PPA and put ourselves on the mac list.
712 	 */
713 	mutex_enter(&glddev->gld_devlock);
714 
715 #ifdef DEBUG
716 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
717 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
718 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
719 		    macinfo->gldm_ppa);
720 #endif
721 
722 	/*
723 	 * Create style 2 node (gated by gld-provider-styles property).
724 	 *
725 	 * NOTE: When the CLONE_DEV flag is specified to
726 	 *	 ddi_create_minor_node() the minor number argument is
727 	 *	 immaterial. Opens of that node will go via the clone
728 	 *	 driver and gld_open() will always be passed a dev_t with
729 	 *	 minor of zero.
730 	 */
731 	if (glddev->gld_styles != -2) {
732 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
733 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
734 			mutex_exit(&glddev->gld_devlock);
735 			goto late_failure;
736 		}
737 	}
738 
739 	/*
740 	 * Create style 1 node (gated by gld-provider-styles property)
741 	 */
742 	if (glddev->gld_styles != -1) {
743 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
744 		    macinfo->gldm_ppa);
745 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
746 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
747 		    0) != DDI_SUCCESS) {
748 			mutex_exit(&glddev->gld_devlock);
749 			goto late_failure;
750 		}
751 	}
752 
753 	/* add ourselves to this major device's linked list of instances */
754 	gldinsque(macinfo, glddev->gld_mac_prev);
755 
756 	mutex_exit(&glddev->gld_devlock);
757 
758 	/*
759 	 * Unfortunately we need the ppa before we call gld_initstats();
760 	 * otherwise we would like to do this just above the mutex_enter
761 	 * above.  In which case we could have set MAC_READY inside the
762 	 * mutex and we wouldn't have needed to check it in open and
763 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
764 	 * inside the mutex because it might get taken in our kstat_update
765 	 * routine and cause a deadlock with kstat_chain_lock.
766 	 */
767 
768 	/* gld_initstats() calls (*ifp->init)() */
769 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
770 		mutex_enter(&glddev->gld_devlock);
771 		gldremque(macinfo);
772 		mutex_exit(&glddev->gld_devlock);
773 		goto late_failure;
774 	}
775 
776 	/*
777 	 * Need to indicate we are NOW ready to process interrupts;
778 	 * any interrupt before this is set is for someone else.
779 	 * This flag is also now used to tell open, et. al. that this
780 	 * mac is now fully ready and available for use.
781 	 */
782 	GLDM_LOCK(macinfo, RW_WRITER);
783 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
784 	GLDM_UNLOCK(macinfo);
785 
786 	/* log local ethernet address -- XXX not DDI compliant */
787 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
788 		(void) localetheraddr(
789 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
790 
791 	/* now put announcement into the message buffer */
792 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
793 	    glddev->gld_name,
794 	    macinfo->gldm_ppa, macinfo->gldm_ident,
795 	    mac_pvt->interfacep->mac_string,
796 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
797 	    macinfo->gldm_addrlen));
798 
799 	ddi_report_dev(devinfo);
800 	return (DDI_SUCCESS);
801 
802 late_failure:
803 	ddi_remove_minor_node(devinfo, NULL);
804 	GLDM_LOCK_DESTROY(macinfo);
805 	if (mac_pvt->curr_macaddr != NULL)
806 	    kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
807 	if (mac_pvt->statistics != NULL)
808 	    kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
809 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
810 	macinfo->gldm_mac_pvt = NULL;
811 
812 failure:
813 	mutex_enter(&gld_device_list.gld_devlock);
814 	glddev->gld_ndevice--;
815 	/*
816 	 * Note that just because this goes to zero here does not necessarily
817 	 * mean that we were the one who added the glddev above.  It's
818 	 * possible that the first mac unattached while were were in here
819 	 * failing to attach the second mac.  But we're now the last.
820 	 */
821 	if (glddev->gld_ndevice == 0) {
822 		/* There should be no macinfos left */
823 		ASSERT(glddev->gld_mac_next ==
824 		    (gld_mac_info_t *)&glddev->gld_mac_next);
825 		ASSERT(glddev->gld_mac_prev ==
826 		    (gld_mac_info_t *)&glddev->gld_mac_next);
827 
828 		/*
829 		 * There should be no DL_UNATTACHED streams: the system
830 		 * should not have detached the "first" devinfo which has
831 		 * all the open style 2 streams.
832 		 *
833 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
834 		 */
835 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
836 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
837 
838 		gldremque(glddev);
839 		mutex_destroy(&glddev->gld_devlock);
840 		if (glddev->gld_broadcast != NULL)
841 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
842 		kmem_free(glddev, sizeof (glddev_t));
843 	}
844 	mutex_exit(&gld_device_list.gld_devlock);
845 
846 	return (DDI_FAILURE);
847 }
848 
849 /*
850  * gld_unregister (macinfo)
851  * remove the macinfo structure from local structures
852  * this is cleanup for a driver to be unloaded
853  */
854 int
855 gld_unregister(gld_mac_info_t *macinfo)
856 {
857 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
858 	glddev_t *glddev = mac_pvt->major_dev;
859 	gld_interface_t *ifp;
860 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
861 
862 	mutex_enter(&glddev->gld_devlock);
863 	GLDM_LOCK(macinfo, RW_WRITER);
864 
865 	if (mac_pvt->nvlan > 0) {
866 		GLDM_UNLOCK(macinfo);
867 		mutex_exit(&glddev->gld_devlock);
868 		return (DDI_FAILURE);
869 	}
870 
871 #ifdef	GLD_DEBUG
872 	{
873 		int i;
874 
875 		for (i = 0; i < VLAN_HASHSZ; i++) {
876 			if ((mac_pvt->vlan_hash[i] != NULL))
877 				cmn_err(CE_PANIC,
878 				    "%s, line %d: "
879 				    "mac_pvt->vlan_hash[%d] != NULL",
880 				    __FILE__, __LINE__, i);
881 		}
882 	}
883 #endif
884 
885 	/* Delete this mac */
886 	gldremque(macinfo);
887 
888 	/* Disallow further entries to gld_recv() and gld_sched() */
889 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
890 
891 	GLDM_UNLOCK(macinfo);
892 	mutex_exit(&glddev->gld_devlock);
893 
894 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
895 	(*ifp->uninit)(macinfo);
896 
897 	ASSERT(mac_pvt->kstatp);
898 	kstat_delete(mac_pvt->kstatp);
899 
900 	ASSERT(GLDM_LOCK_INITED(macinfo));
901 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
902 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
903 
904 	if (mac_pvt->mcast_table != NULL)
905 		kmem_free(mac_pvt->mcast_table, multisize);
906 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
907 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
908 
909 	/* We now have one fewer instance for this major device */
910 	mutex_enter(&gld_device_list.gld_devlock);
911 	glddev->gld_ndevice--;
912 	if (glddev->gld_ndevice == 0) {
913 		/* There should be no macinfos left */
914 		ASSERT(glddev->gld_mac_next ==
915 		    (gld_mac_info_t *)&glddev->gld_mac_next);
916 		ASSERT(glddev->gld_mac_prev ==
917 		    (gld_mac_info_t *)&glddev->gld_mac_next);
918 
919 		/*
920 		 * There should be no DL_UNATTACHED streams: the system
921 		 * should not have detached the "first" devinfo which has
922 		 * all the open style 2 streams.
923 		 *
924 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
925 		 */
926 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
927 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
928 
929 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
930 		gldremque(glddev);
931 		mutex_destroy(&glddev->gld_devlock);
932 		if (glddev->gld_broadcast != NULL)
933 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
934 		kmem_free(glddev, sizeof (glddev_t));
935 	}
936 	mutex_exit(&gld_device_list.gld_devlock);
937 
938 	return (DDI_SUCCESS);
939 }
940 
941 /*
942  * gld_initstats
943  * called from gld_register
944  */
945 static int
946 gld_initstats(gld_mac_info_t *macinfo)
947 {
948 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
949 	struct gldkstats *sp;
950 	glddev_t *glddev;
951 	kstat_t *ksp;
952 	gld_interface_t *ifp;
953 
954 	glddev = mac_pvt->major_dev;
955 
956 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
957 	    NULL, "net", KSTAT_TYPE_NAMED,
958 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
959 		cmn_err(CE_WARN,
960 		    "GLD: failed to create kstat structure for %s%d",
961 		    glddev->gld_name, macinfo->gldm_ppa);
962 		return (GLD_FAILURE);
963 	}
964 	mac_pvt->kstatp = ksp;
965 
966 	ksp->ks_update = gld_update_kstat;
967 	ksp->ks_private = (void *)macinfo;
968 
969 	sp = ksp->ks_data;
970 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
971 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
972 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
973 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
974 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
975 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
976 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
977 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
978 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
979 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
984 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
985 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
986 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
987 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
988 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
990 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
991 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
992 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
993 
994 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
995 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
996 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
997 
998 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
999 	    KSTAT_DATA_UINT32);
1000 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1001 	    KSTAT_DATA_UINT32);
1002 
1003 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1004 
1005 	(*ifp->init)(macinfo);
1006 
1007 	kstat_install(ksp);
1008 
1009 	return (GLD_SUCCESS);
1010 }
1011 
1012 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1013 static int
1014 gld_update_kstat(kstat_t *ksp, int rw)
1015 {
1016 	gld_mac_info_t	*macinfo;
1017 	gld_mac_pvt_t	*mac_pvt;
1018 	struct gldkstats *gsp;
1019 	struct gld_stats *stats;
1020 
1021 	if (rw == KSTAT_WRITE)
1022 		return (EACCES);
1023 
1024 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1025 	ASSERT(macinfo != NULL);
1026 
1027 	GLDM_LOCK(macinfo, RW_WRITER);
1028 
1029 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1030 		GLDM_UNLOCK(macinfo);
1031 		return (EIO);	/* this one's not ready yet */
1032 	}
1033 
1034 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1035 		GLDM_UNLOCK(macinfo);
1036 		return (EIO);	/* this one's not ready any more */
1037 	}
1038 
1039 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1040 	gsp = mac_pvt->kstatp->ks_data;
1041 	ASSERT(gsp);
1042 	stats = mac_pvt->statistics;
1043 
1044 	if (macinfo->gldm_get_stats)
1045 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1046 
1047 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1048 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1049 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1050 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1051 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1052 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1053 
1054 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1055 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1056 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1057 
1058 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1059 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1060 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1061 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1062 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1063 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1064 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1065 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1066 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1067 	gsp->glds_missed.value.ul = stats->glds_missed;
1068 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1069 	    stats->glds_gldnorcvbuf;
1070 	gsp->glds_intr.value.ul = stats->glds_intr;
1071 
1072 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1073 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1074 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1075 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1076 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1077 
1078 	if (mac_pvt->nprom)
1079 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1080 	else if (mac_pvt->nprom_multi)
1081 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1082 	else
1083 		(void) strcpy(gsp->glds_prom.value.c, "off");
1084 
1085 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1086 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1087 	    ? stats->glds_media : 0]);
1088 
1089 	switch (macinfo->gldm_type) {
1090 	case DL_ETHER:
1091 		gsp->glds_frame.value.ul = stats->glds_frame;
1092 		gsp->glds_crc.value.ul = stats->glds_crc;
1093 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1094 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1095 		gsp->glds_defer.value.ul = stats->glds_defer;
1096 		gsp->glds_short.value.ul = stats->glds_short;
1097 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1098 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1099 		gsp->glds_dot3_first_coll.value.ui32 =
1100 		    stats->glds_dot3_first_coll;
1101 		gsp->glds_dot3_multi_coll.value.ui32 =
1102 		    stats->glds_dot3_multi_coll;
1103 		gsp->glds_dot3_sqe_error.value.ui32 =
1104 		    stats->glds_dot3_sqe_error;
1105 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1106 		    stats->glds_dot3_mac_xmt_error;
1107 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1108 		    stats->glds_dot3_mac_rcv_error;
1109 		gsp->glds_dot3_frame_too_long.value.ui32 =
1110 		    stats->glds_dot3_frame_too_long;
1111 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1112 		    stats->glds_duplex <
1113 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1114 		    stats->glds_duplex : 0]);
1115 		break;
1116 	case DL_TPR:
1117 		gsp->glds_dot5_line_error.value.ui32 =
1118 		    stats->glds_dot5_line_error;
1119 		gsp->glds_dot5_burst_error.value.ui32 =
1120 		    stats->glds_dot5_burst_error;
1121 		gsp->glds_dot5_signal_loss.value.ui32 =
1122 		    stats->glds_dot5_signal_loss;
1123 		gsp->glds_dot5_ace_error.value.ui32 =
1124 		    stats->glds_dot5_ace_error;
1125 		gsp->glds_dot5_internal_error.value.ui32 =
1126 		    stats->glds_dot5_internal_error;
1127 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1128 		    stats->glds_dot5_lost_frame_error;
1129 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1130 		    stats->glds_dot5_frame_copied_error;
1131 		gsp->glds_dot5_token_error.value.ui32 =
1132 		    stats->glds_dot5_token_error;
1133 		gsp->glds_dot5_freq_error.value.ui32 =
1134 		    stats->glds_dot5_freq_error;
1135 		break;
1136 	case DL_FDDI:
1137 		gsp->glds_fddi_mac_error.value.ui32 =
1138 		    stats->glds_fddi_mac_error;
1139 		gsp->glds_fddi_mac_lost.value.ui32 =
1140 		    stats->glds_fddi_mac_lost;
1141 		gsp->glds_fddi_mac_token.value.ui32 =
1142 		    stats->glds_fddi_mac_token;
1143 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1144 		    stats->glds_fddi_mac_tvx_expired;
1145 		gsp->glds_fddi_mac_late.value.ui32 =
1146 		    stats->glds_fddi_mac_late;
1147 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1148 		    stats->glds_fddi_mac_ring_op;
1149 		break;
1150 	case DL_IB:
1151 		break;
1152 	default:
1153 		break;
1154 	}
1155 
1156 	GLDM_UNLOCK(macinfo);
1157 
1158 #ifdef GLD_DEBUG
1159 	gld_check_assertions();
1160 	if (gld_debug & GLDRDE)
1161 		gld_sr_dump(macinfo);
1162 #endif
1163 
1164 	return (0);
1165 }
1166 
1167 static int
1168 gld_init_vlan_stats(gld_vlan_t *vlan)
1169 {
1170 	gld_mac_info_t *mac = vlan->gldv_mac;
1171 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1172 	struct gldkstats *sp;
1173 	glddev_t *glddev;
1174 	kstat_t *ksp;
1175 	char *name;
1176 	int instance;
1177 
1178 	glddev = mac_pvt->major_dev;
1179 	name = glddev->gld_name;
1180 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1181 
1182 	if ((ksp = kstat_create(name, instance,
1183 	    NULL, "net", KSTAT_TYPE_NAMED,
1184 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1185 		cmn_err(CE_WARN,
1186 		    "GLD: failed to create kstat structure for %s%d",
1187 		    name, instance);
1188 		return (GLD_FAILURE);
1189 	}
1190 
1191 	vlan->gldv_kstatp = ksp;
1192 
1193 	ksp->ks_update = gld_update_vlan_kstat;
1194 	ksp->ks_private = (void *)vlan;
1195 
1196 	sp = ksp->ks_data;
1197 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1198 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1199 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1200 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1201 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1202 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1203 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1204 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1205 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1206 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1211 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1212 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1213 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1214 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1215 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1217 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1218 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1219 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1220 
1221 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1222 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1223 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1224 
1225 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1226 	    KSTAT_DATA_UINT32);
1227 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1228 	    KSTAT_DATA_UINT32);
1229 
1230 	kstat_install(ksp);
1231 	return (GLD_SUCCESS);
1232 }
1233 
1234 static int
1235 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1236 {
1237 	gld_vlan_t	*vlan;
1238 	gld_mac_info_t	*macinfo;
1239 	struct gldkstats *gsp;
1240 	struct gld_stats *stats;
1241 	gld_mac_pvt_t *mac_pvt;
1242 	uint32_t media;
1243 
1244 	if (rw == KSTAT_WRITE)
1245 		return (EACCES);
1246 
1247 	vlan = (gld_vlan_t *)ksp->ks_private;
1248 	ASSERT(vlan != NULL);
1249 
1250 	macinfo = vlan->gldv_mac;
1251 	GLDM_LOCK(macinfo, RW_WRITER);
1252 
1253 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1254 
1255 	gsp = vlan->gldv_kstatp->ks_data;
1256 	ASSERT(gsp);
1257 	stats = vlan->gldv_stats;
1258 
1259 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1260 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1261 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1262 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1263 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1264 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1265 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1266 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1267 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1268 
1269 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1270 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1271 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1272 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1273 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1274 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1275 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1276 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1277 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1278 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1279 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1280 
1281 	gsp->glds_speed.value.ui64 = mac_pvt->statistics->glds_speed;
1282 	media = mac_pvt->statistics->glds_media;
1283 	(void) strcpy(gsp->glds_media.value.c,
1284 	    gld_media[media < sizeof (gld_media) / sizeof (gld_media[0]) ?
1285 	    media : 0]);
1286 
1287 	GLDM_UNLOCK(macinfo);
1288 	return (0);
1289 }
1290 
1291 /*
1292  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1293  */
1294 /*ARGSUSED*/
1295 int
1296 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1297 {
1298 	dev_info_t	*devinfo;
1299 	minor_t		minor = getminor((dev_t)arg);
1300 	int		rc = DDI_FAILURE;
1301 
1302 	switch (cmd) {
1303 	case DDI_INFO_DEVT2DEVINFO:
1304 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1305 			*(dev_info_t **)resultp = devinfo;
1306 			rc = DDI_SUCCESS;
1307 		}
1308 		break;
1309 	case DDI_INFO_DEVT2INSTANCE:
1310 		/* Need static mapping for deferred attach */
1311 		if (minor == GLD_USE_STYLE2) {
1312 			/*
1313 			 * Style 2:  this minor number does not correspond to
1314 			 * any particular instance number.
1315 			 */
1316 			rc = DDI_FAILURE;
1317 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1318 			/* Style 1:  calculate the PPA from the minor */
1319 			*resultp = (void *)(uintptr_t)
1320 			    GLD_STYLE1_MINOR_TO_PPA(minor);
1321 			rc = DDI_SUCCESS;
1322 		} else {
1323 			/* Clone:  look for it.  Not a static mapping */
1324 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1325 				*resultp = (void *)(uintptr_t)
1326 				    ddi_get_instance(devinfo);
1327 				rc = DDI_SUCCESS;
1328 			}
1329 		}
1330 		break;
1331 	}
1332 
1333 	return (rc);
1334 }
1335 
1336 /* called from gld_getinfo */
1337 dev_info_t *
1338 gld_finddevinfo(dev_t dev)
1339 {
1340 	minor_t		minor = getminor(dev);
1341 	glddev_t	*device;
1342 	gld_mac_info_t	*mac;
1343 	gld_vlan_t	*vlan;
1344 	gld_t		*str;
1345 	dev_info_t	*devinfo = NULL;
1346 	int		i;
1347 
1348 	if (minor == GLD_USE_STYLE2) {
1349 		/*
1350 		 * Style 2:  this minor number does not correspond to
1351 		 * any particular instance number.
1352 		 *
1353 		 * XXX We don't know what to say.  See Bug 1165519.
1354 		 */
1355 		return (NULL);
1356 	}
1357 
1358 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1359 
1360 	device = gld_devlookup(getmajor(dev));
1361 	if (device == NULL) {
1362 		/* There are no attached instances of this device */
1363 		mutex_exit(&gld_device_list.gld_devlock);
1364 		return (NULL);
1365 	}
1366 
1367 	/*
1368 	 * Search all attached macs and streams.
1369 	 *
1370 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1371 	 * we don't know what devinfo we should report back even if we
1372 	 * found the minor.  Maybe we should associate streams that are
1373 	 * not currently attached to a PPA with the "first" devinfo node
1374 	 * of the major device to attach -- the one that created the
1375 	 * minor node for the generic device.
1376 	 */
1377 	mutex_enter(&device->gld_devlock);
1378 
1379 	for (mac = device->gld_mac_next;
1380 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1381 	    mac = mac->gldm_next) {
1382 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1383 
1384 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1385 			continue;	/* this one's not ready yet */
1386 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1387 			/* Style 1 -- look for the corresponding PPA */
1388 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1389 				devinfo = mac->gldm_devinfo;
1390 				goto out;	/* found it! */
1391 			} else
1392 				continue;	/* not this PPA */
1393 		}
1394 
1395 		/* We are looking for a clone */
1396 		for (i = 0; i < VLAN_HASHSZ; i++) {
1397 			for (vlan = pvt->vlan_hash[i];
1398 			    vlan != NULL; vlan = vlan->gldv_next) {
1399 				for (str = vlan->gldv_str_next;
1400 				    str != (gld_t *)&vlan->gldv_str_next;
1401 				    str = str->gld_next) {
1402 					ASSERT(str->gld_mac_info == mac);
1403 					if (minor == str->gld_minor) {
1404 						devinfo = mac->gldm_devinfo;
1405 						goto out;
1406 					}
1407 				}
1408 			}
1409 		}
1410 	}
1411 out:
1412 	mutex_exit(&device->gld_devlock);
1413 	mutex_exit(&gld_device_list.gld_devlock);
1414 	return (devinfo);
1415 }
1416 
1417 /*
1418  * STREAMS open routine.  The device dependent driver specifies this as its
1419  * open entry point.
1420  */
1421 /*ARGSUSED2*/
1422 int
1423 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1424 {
1425 	gld_mac_pvt_t *mac_pvt;
1426 	gld_t *gld;
1427 	glddev_t *glddev;
1428 	gld_mac_info_t *macinfo;
1429 	minor_t minor = getminor(*dev);
1430 	gld_vlan_t *vlan;
1431 	t_uscalar_t ppa;
1432 
1433 	ASSERT(q != NULL);
1434 
1435 	if (minor > GLD_MAX_STYLE1_MINOR)
1436 		return (ENXIO);
1437 
1438 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1439 
1440 	/* Find our per-major glddev_t structure */
1441 	mutex_enter(&gld_device_list.gld_devlock);
1442 	glddev = gld_devlookup(getmajor(*dev));
1443 
1444 	/*
1445 	 * This glddev will hang around since detach (and therefore
1446 	 * gld_unregister) can't run while we're here in the open routine.
1447 	 */
1448 	mutex_exit(&gld_device_list.gld_devlock);
1449 
1450 	if (glddev == NULL)
1451 		return (ENXIO);
1452 
1453 #ifdef GLD_DEBUG
1454 	if (gld_debug & GLDPROT) {
1455 		if (minor == GLD_USE_STYLE2)
1456 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1457 		else
1458 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1459 			    (void *)q, minor);
1460 	}
1461 #endif
1462 
1463 	/*
1464 	 * get a per-stream structure and link things together so we
1465 	 * can easily find them later.
1466 	 */
1467 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1468 
1469 	/*
1470 	 * fill in the structure and state info
1471 	 */
1472 	gld->gld_qptr = q;
1473 	gld->gld_device = glddev;
1474 	gld->gld_state = DL_UNATTACHED;
1475 
1476 	/*
1477 	 * we must atomically find a free minor number and add the stream
1478 	 * to a list, because gld_findminor has to traverse the lists to
1479 	 * determine which minor numbers are free.
1480 	 */
1481 	mutex_enter(&glddev->gld_devlock);
1482 
1483 	/* find a free minor device number for the clone */
1484 	gld->gld_minor = gld_findminor(glddev);
1485 	if (gld->gld_minor == 0) {
1486 		mutex_exit(&glddev->gld_devlock);
1487 		kmem_free(gld, sizeof (gld_t));
1488 		return (ENOSR);
1489 	}
1490 
1491 #ifdef GLD_VERBOSE_DEBUG
1492 	if (gld_debug & GLDPROT)
1493 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1494 		    (void *)gld, gld->gld_minor);
1495 #endif
1496 
1497 	if (minor == GLD_USE_STYLE2) {
1498 		gld->gld_style = DL_STYLE2;
1499 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1500 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1501 		gldinsque(gld, glddev->gld_str_prev);
1502 #ifdef GLD_VERBOSE_DEBUG
1503 		if (gld_debug & GLDPROT)
1504 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1505 #endif
1506 		(void) qassociate(q, -1);
1507 		goto done;
1508 	}
1509 
1510 	gld->gld_style = DL_STYLE1;
1511 
1512 	/* the PPA is actually 1 less than the minordev */
1513 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1514 
1515 	for (macinfo = glddev->gld_mac_next;
1516 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1517 	    macinfo = macinfo->gldm_next) {
1518 		ASSERT(macinfo != NULL);
1519 		if (macinfo->gldm_ppa != ppa)
1520 			continue;
1521 
1522 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1523 			continue;	/* this one's not ready yet */
1524 
1525 		/*
1526 		 * we found the correct PPA
1527 		 */
1528 		GLDM_LOCK(macinfo, RW_WRITER);
1529 
1530 		gld->gld_mac_info = macinfo;
1531 
1532 		if (macinfo->gldm_send_tagged != NULL)
1533 			gld->gld_send = macinfo->gldm_send_tagged;
1534 		else
1535 			gld->gld_send = macinfo->gldm_send;
1536 
1537 		/* now ready for action */
1538 		gld->gld_state = DL_UNBOUND;
1539 
1540 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1541 			GLDM_UNLOCK(macinfo);
1542 			mutex_exit(&glddev->gld_devlock);
1543 			kmem_free(gld, sizeof (gld_t));
1544 			return (EIO);
1545 		}
1546 
1547 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1548 		if (!mac_pvt->started) {
1549 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1550 				gld_rem_vlan(vlan);
1551 				GLDM_UNLOCK(macinfo);
1552 				mutex_exit(&glddev->gld_devlock);
1553 				kmem_free(gld, sizeof (gld_t));
1554 				return (EIO);
1555 			}
1556 		}
1557 
1558 		gld->gld_vlan = vlan;
1559 		vlan->gldv_nstreams++;
1560 		gldinsque(gld, vlan->gldv_str_prev);
1561 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1562 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1563 
1564 		GLDM_UNLOCK(macinfo);
1565 #ifdef GLD_VERBOSE_DEBUG
1566 		if (gld_debug & GLDPROT)
1567 			cmn_err(CE_NOTE,
1568 			    "GLDstruct added to instance list");
1569 #endif
1570 		break;
1571 	}
1572 
1573 	if (gld->gld_state == DL_UNATTACHED) {
1574 		mutex_exit(&glddev->gld_devlock);
1575 		kmem_free(gld, sizeof (gld_t));
1576 		return (ENXIO);
1577 	}
1578 
1579 done:
1580 	mutex_exit(&glddev->gld_devlock);
1581 	noenable(WR(q));	/* We'll do the qenables manually */
1582 	qprocson(q);		/* start the queues running */
1583 	qenable(WR(q));
1584 	return (0);
1585 }
1586 
1587 /*
1588  * normal stream close call checks current status and cleans up
1589  * data structures that were dynamically allocated
1590  */
1591 /*ARGSUSED1*/
1592 int
1593 gld_close(queue_t *q, int flag, cred_t *cred)
1594 {
1595 	gld_t	*gld = (gld_t *)q->q_ptr;
1596 	glddev_t *glddev = gld->gld_device;
1597 
1598 	ASSERT(q);
1599 	ASSERT(gld);
1600 
1601 #ifdef GLD_DEBUG
1602 	if (gld_debug & GLDPROT) {
1603 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1604 		    (void *)q, (gld->gld_style & 0x1) + 1);
1605 	}
1606 #endif
1607 
1608 	/* Hold all device streams lists still while we check for a macinfo */
1609 	mutex_enter(&glddev->gld_devlock);
1610 
1611 	if (gld->gld_mac_info != NULL) {
1612 		/* If there's a macinfo, block recv while we change state */
1613 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1614 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1615 		GLDM_UNLOCK(gld->gld_mac_info);
1616 	} else {
1617 		/* no mac DL_ATTACHED right now */
1618 		gld->gld_flags |= GLD_STR_CLOSING;
1619 	}
1620 
1621 	mutex_exit(&glddev->gld_devlock);
1622 
1623 	/*
1624 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1625 	 * we know wsrv isn't in there trying to undo what we're doing.
1626 	 */
1627 	qprocsoff(q);
1628 
1629 	ASSERT(gld->gld_wput_count == 0);
1630 	gld->gld_wput_count = 0;	/* just in case */
1631 
1632 	if (gld->gld_state == DL_IDLE) {
1633 		/* Need to unbind */
1634 		ASSERT(gld->gld_mac_info != NULL);
1635 		(void) gld_unbind(WR(q), NULL);
1636 	}
1637 
1638 	if (gld->gld_state == DL_UNBOUND) {
1639 		/*
1640 		 * Need to unattach
1641 		 * For style 2 stream, gldunattach also
1642 		 * associate queue with NULL dip
1643 		 */
1644 		ASSERT(gld->gld_mac_info != NULL);
1645 		(void) gldunattach(WR(q), NULL);
1646 	}
1647 
1648 	/* disassociate the stream from the device */
1649 	q->q_ptr = WR(q)->q_ptr = NULL;
1650 
1651 	/*
1652 	 * Since we unattached above (if necessary), we know that we're
1653 	 * on the per-major list of unattached streams, rather than a
1654 	 * per-PPA list.  So we know we should hold the devlock.
1655 	 */
1656 	mutex_enter(&glddev->gld_devlock);
1657 	gldremque(gld);			/* remove from Style 2 list */
1658 	mutex_exit(&glddev->gld_devlock);
1659 
1660 	kmem_free(gld, sizeof (gld_t));
1661 
1662 	return (0);
1663 }
1664 
1665 /*
1666  * gld_rsrv (q)
1667  *	simple read service procedure
1668  *	purpose is to avoid the time it takes for packets
1669  *	to move through IP so we can get them off the board
1670  *	as fast as possible due to limited PC resources.
1671  *
1672  *	This is not normally used in the current implementation.  It
1673  *	can be selected with the undocumented property "fast_recv".
1674  *	If that property is set, gld_recv will send the packet
1675  *	upstream with a putq() rather than a putnext(), thus causing
1676  *	this routine to be scheduled.
1677  */
1678 int
1679 gld_rsrv(queue_t *q)
1680 {
1681 	mblk_t *mp;
1682 
1683 	while ((mp = getq(q)) != NULL) {
1684 		if (canputnext(q)) {
1685 			putnext(q, mp);
1686 		} else {
1687 			freemsg(mp);
1688 		}
1689 	}
1690 	return (0);
1691 }
1692 
1693 /*
1694  * gld_wput (q, mp)
1695  * general gld stream write put routine. Receives fastpath data from upper
1696  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1697  * queued for later processing by the service procedure.
1698  */
1699 
1700 int
1701 gld_wput(queue_t *q, mblk_t *mp)
1702 {
1703 	gld_t  *gld = (gld_t *)(q->q_ptr);
1704 	int	rc;
1705 	boolean_t multidata = B_TRUE;
1706 
1707 #ifdef GLD_DEBUG
1708 	if (gld_debug & GLDTRACE)
1709 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1710 		    (void *)q, (void *)mp, DB_TYPE(mp));
1711 #endif
1712 	switch (DB_TYPE(mp)) {
1713 
1714 	case M_DATA:
1715 		/* fast data / raw support */
1716 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1717 		/* Tricky to access memory without taking the mutex */
1718 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1719 		    gld->gld_state != DL_IDLE) {
1720 			merror(q, mp, EPROTO);
1721 			break;
1722 		}
1723 		multidata = B_FALSE;
1724 		/* LINTED: E_CASE_FALLTHRU */
1725 	case M_MULTIDATA:
1726 		/* Only call gld_start() directly if nothing queued ahead */
1727 		/* No guarantees about ordering with different threads */
1728 		if (q->q_first)
1729 			goto use_wsrv;
1730 
1731 		/*
1732 		 * This can happen if wsrv has taken off the last mblk but
1733 		 * is still processing it.
1734 		 */
1735 		membar_consumer();
1736 		if (gld->gld_in_wsrv)
1737 			goto use_wsrv;
1738 
1739 		/*
1740 		 * Keep a count of current wput calls to start.
1741 		 * Nonzero count delays any attempted DL_UNBIND.
1742 		 * See comments above gld_start().
1743 		 */
1744 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1745 		membar_enter();
1746 
1747 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1748 		/* If this Q is in process of DL_UNBIND, don't call start */
1749 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1750 			/* Extremely unlikely */
1751 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1752 			goto use_wsrv;
1753 		}
1754 
1755 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1756 		    gld_start(q, mp, GLD_WPUT, UPRI(gld, mp->b_band));
1757 
1758 		/* Allow DL_UNBIND again */
1759 		membar_exit();
1760 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1761 
1762 		if (rc == GLD_NORESOURCES)
1763 			qenable(q);
1764 		break;	/*  Done with this packet */
1765 
1766 use_wsrv:
1767 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1768 		(void) putq(q, mp);
1769 		qenable(q);
1770 		break;
1771 
1772 	case M_IOCTL:
1773 		/* ioctl relies on wsrv single threading per queue */
1774 		(void) putq(q, mp);
1775 		qenable(q);
1776 		break;
1777 
1778 	case M_CTL:
1779 		(void) putq(q, mp);
1780 		qenable(q);
1781 		break;
1782 
1783 	case M_FLUSH:		/* canonical flush handling */
1784 		/* XXX Should these be FLUSHALL? */
1785 		if (*mp->b_rptr & FLUSHW)
1786 			flushq(q, 0);
1787 		if (*mp->b_rptr & FLUSHR) {
1788 			flushq(RD(q), 0);
1789 			*mp->b_rptr &= ~FLUSHW;
1790 			qreply(q, mp);
1791 		} else
1792 			freemsg(mp);
1793 		break;
1794 
1795 	case M_PROTO:
1796 	case M_PCPROTO:
1797 		/* these rely on wsrv single threading per queue */
1798 		(void) putq(q, mp);
1799 		qenable(q);
1800 		break;
1801 
1802 	default:
1803 #ifdef GLD_DEBUG
1804 		if (gld_debug & GLDETRACE)
1805 			cmn_err(CE_WARN,
1806 			    "gld: Unexpected packet type from queue: 0x%x",
1807 			    DB_TYPE(mp));
1808 #endif
1809 		freemsg(mp);
1810 	}
1811 	return (0);
1812 }
1813 
1814 /*
1815  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1816  * specification.
1817  *
1818  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1819  * lock for reading data items that are only ever written by us.
1820  */
1821 
1822 int
1823 gld_wsrv(queue_t *q)
1824 {
1825 	mblk_t *mp;
1826 	gld_t *gld = (gld_t *)q->q_ptr;
1827 	gld_mac_info_t *macinfo;
1828 	union DL_primitives *prim;
1829 	int err;
1830 	boolean_t multidata;
1831 
1832 #ifdef GLD_DEBUG
1833 	if (gld_debug & GLDTRACE)
1834 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1835 #endif
1836 
1837 	ASSERT(!gld->gld_in_wsrv);
1838 
1839 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1840 
1841 	if (q->q_first == NULL)
1842 		return (0);
1843 
1844 	macinfo = gld->gld_mac_info;
1845 
1846 	/*
1847 	 * Help wput avoid a call to gld_start if there might be a message
1848 	 * previously queued by that thread being processed here.
1849 	 */
1850 	gld->gld_in_wsrv = B_TRUE;
1851 	membar_enter();
1852 
1853 	while ((mp = getq(q)) != NULL) {
1854 		switch (DB_TYPE(mp)) {
1855 		case M_DATA:
1856 		case M_MULTIDATA:
1857 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1858 
1859 			/*
1860 			 * retry of a previously processed UNITDATA_REQ
1861 			 * or is a RAW or FAST message from above.
1862 			 */
1863 			if (macinfo == NULL) {
1864 				/* No longer attached to a PPA, drop packet */
1865 				freemsg(mp);
1866 				break;
1867 			}
1868 
1869 			gld->gld_sched_ran = B_FALSE;
1870 			membar_enter();
1871 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1872 			    gld_start(q, mp, GLD_WSRV, UPRI(gld, mp->b_band));
1873 			if (err == GLD_NORESOURCES) {
1874 				/* gld_sched will qenable us later */
1875 				gld->gld_xwait = B_TRUE; /* want qenable */
1876 				membar_enter();
1877 				/*
1878 				 * v2:  we're not holding the lock; it's
1879 				 * possible that the driver could have already
1880 				 * called gld_sched (following up on its
1881 				 * return of GLD_NORESOURCES), before we got a
1882 				 * chance to do the putbq() and set gld_xwait.
1883 				 * So if we saw a call to gld_sched that
1884 				 * examined this queue, since our call to
1885 				 * gld_start() above, then it's possible we've
1886 				 * already seen the only call to gld_sched()
1887 				 * we're ever going to see.  So we better retry
1888 				 * transmitting this packet right now.
1889 				 */
1890 				if (gld->gld_sched_ran) {
1891 #ifdef GLD_DEBUG
1892 					if (gld_debug & GLDTRACE)
1893 						cmn_err(CE_NOTE, "gld_wsrv: "
1894 						    "sched was called");
1895 #endif
1896 					break;	/* try again right now */
1897 				}
1898 				gld->gld_in_wsrv = B_FALSE;
1899 				return (0);
1900 			}
1901 			break;
1902 
1903 		case M_IOCTL:
1904 			(void) gld_ioctl(q, mp);
1905 			break;
1906 
1907 		case M_CTL:
1908 			if (macinfo == NULL) {
1909 				freemsg(mp);
1910 				break;
1911 			}
1912 
1913 			if (macinfo->gldm_mctl != NULL) {
1914 				GLDM_LOCK(macinfo, RW_WRITER);
1915 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1916 				GLDM_UNLOCK(macinfo);
1917 			} else {
1918 				/* This driver doesn't recognize, just drop */
1919 				freemsg(mp);
1920 			}
1921 			break;
1922 
1923 		case M_PROTO:	/* Will be an DLPI message of some type */
1924 		case M_PCPROTO:
1925 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1926 				if (err == GLDE_RETRY) {
1927 					gld->gld_in_wsrv = B_FALSE;
1928 					return (0); /* quit while we're ahead */
1929 				}
1930 				prim = (union DL_primitives *)mp->b_rptr;
1931 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1932 			}
1933 			break;
1934 
1935 		default:
1936 			/* This should never happen */
1937 #ifdef GLD_DEBUG
1938 			if (gld_debug & GLDERRS)
1939 				cmn_err(CE_WARN,
1940 				    "gld_wsrv: db_type(%x) not supported",
1941 				    mp->b_datap->db_type);
1942 #endif
1943 			freemsg(mp);	/* unknown types are discarded */
1944 			break;
1945 		}
1946 	}
1947 
1948 	membar_exit();
1949 	gld->gld_in_wsrv = B_FALSE;
1950 	return (0);
1951 }
1952 
1953 /*
1954  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1955  *
1956  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1957  *
1958  * In particular, we must avoid calling gld_precv*() if we came from wput().
1959  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1960  * packets to the receive side if we are in physical promiscuous mode.
1961  * Since the receive side holds a lock across its call to the upstream
1962  * putnext, and that upstream module could well have looped back to our
1963  * wput() routine on the same thread, we cannot call gld_precv* from here
1964  * for fear of causing a recursive lock entry in our receive code.
1965  *
1966  * There is a problem here when coming from gld_wput().  While wput
1967  * only comes here if the queue is attached to a PPA and bound to a SAP
1968  * and there are no messages on the queue ahead of the M_DATA that could
1969  * change that, it is theoretically possible that another thread could
1970  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1971  * could wake up and process them, before we finish processing this
1972  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1973  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1974  * and Style 1 streams only DL_DETACH in the close routine, where
1975  * qprocsoff() protects us.  If this happens we could end up calling
1976  * gldm_send() after we have detached the stream and possibly called
1977  * gldm_stop().  Worse, once the number of attached streams goes to zero,
1978  * detach/unregister could be called, and the macinfo could go away entirely.
1979  *
1980  * No one has ever seen this happen.
1981  *
1982  * It is some trouble to fix this, and we would rather not add any mutex
1983  * logic into the wput() routine, which is supposed to be a "fast"
1984  * path.
1985  *
1986  * What I've done is use an atomic counter to keep a count of the number
1987  * of threads currently calling gld_start() from wput() on this stream.
1988  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
1989  * the queue and qenables, hoping to have better luck next time.  Since
1990  * people shouldn't be trying to send after they've asked to DL_DETACH,
1991  * hopefully very soon all the wput=>start threads should have returned
1992  * and the DL_DETACH will succeed.  It's hard to test this since the odds
1993  * of the failure even trying to happen are so small.  I probably could
1994  * have ignored the whole issue and never been the worse for it.
1995  */
1996 static int
1997 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
1998 {
1999 	mblk_t *nmp;
2000 	gld_t *gld = (gld_t *)q->q_ptr;
2001 	gld_mac_info_t *macinfo;
2002 	gld_mac_pvt_t *mac_pvt;
2003 	int rc;
2004 	gld_interface_t *ifp;
2005 	pktinfo_t pktinfo;
2006 	uint32_t vtag;
2007 	gld_vlan_t *vlan;
2008 
2009 	ASSERT(DB_TYPE(mp) == M_DATA);
2010 	macinfo = gld->gld_mac_info;
2011 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2012 	ifp = mac_pvt->interfacep;
2013 	vlan = (gld_vlan_t *)gld->gld_vlan;
2014 
2015 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2016 		freemsg(mp);
2017 #ifdef GLD_DEBUG
2018 		if (gld_debug & GLDERRS)
2019 			cmn_err(CE_WARN,
2020 			    "gld_start: failed to interpret outbound packet");
2021 #endif
2022 		vlan->gldv_stats->glds_xmtbadinterp++;
2023 		return (GLD_BADARG);
2024 	}
2025 
2026 	/*
2027 	 * We're not holding the lock for this check.  If the promiscuous
2028 	 * state is in flux it doesn't matter much if we get this wrong.
2029 	 */
2030 	if (mac_pvt->nprom > 0) {
2031 		/*
2032 		 * We want to loopback to the receive side, but to avoid
2033 		 * recursive lock entry:  if we came from wput(), which
2034 		 * could have looped back via IP from our own receive
2035 		 * interrupt thread, we decline this request.  wput()
2036 		 * will then queue the packet for wsrv().  This means
2037 		 * that when snoop is running we don't get the advantage
2038 		 * of the wput() multithreaded direct entry to the
2039 		 * driver's send routine.
2040 		 */
2041 		if (caller == GLD_WPUT) {
2042 			(void) putbq(q, mp);
2043 			return (GLD_NORESOURCES);
2044 		}
2045 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2046 			nmp = dupmsg_noloan(mp);
2047 		else
2048 			nmp = dupmsg(mp);
2049 	} else
2050 		nmp = NULL;		/* we need no loopback */
2051 
2052 	vtag = GLD_MK_VTAG(vlan->gldv_ptag, upri);
2053 	if (ifp->hdr_size > 0 &&
2054 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2055 	    macinfo->gldm_maxpkt) {
2056 		freemsg(mp);	/* discard oversized outbound packet */
2057 		if (nmp)
2058 			freemsg(nmp);	/* free the duped message */
2059 #ifdef GLD_DEBUG
2060 		if (gld_debug & GLDERRS)
2061 			cmn_err(CE_WARN,
2062 			    "gld_start: oversize outbound packet, size %d,"
2063 			    "max %d", pktinfo.pktLen,
2064 			    ifp->hdr_size + macinfo->gldm_maxpkt);
2065 #endif
2066 		vlan->gldv_stats->glds_xmtbadinterp++;
2067 		return (GLD_BADARG);
2068 	}
2069 
2070 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2071 
2072 	if (rc != GLD_SUCCESS) {
2073 		if (rc == GLD_NORESOURCES) {
2074 			vlan->gldv_stats->glds_xmtretry++;
2075 			(void) putbq(q, mp);
2076 		} else {
2077 			/* transmit error; drop the packet */
2078 			freemsg(mp);
2079 			/* We're supposed to count failed attempts as well */
2080 			UPDATE_STATS(vlan, pktinfo, 1);
2081 #ifdef GLD_DEBUG
2082 			if (gld_debug & GLDERRS)
2083 				cmn_err(CE_WARN,
2084 				    "gld_start: gldm_send failed %d", rc);
2085 #endif
2086 		}
2087 		if (nmp)
2088 			freemsg(nmp);	/* free the dupped message */
2089 		return (rc);
2090 	}
2091 
2092 	UPDATE_STATS(vlan, pktinfo, 1);
2093 
2094 	/*
2095 	 * Loopback case. The message needs to be returned back on
2096 	 * the read side. This would silently fail if the dumpmsg fails
2097 	 * above. This is probably OK, if there is no memory to dup the
2098 	 * block, then there isn't much we could do anyway.
2099 	 */
2100 	if (nmp) {
2101 		GLDM_LOCK(macinfo, RW_WRITER);
2102 		gld_precv(macinfo, vlan, nmp);
2103 		GLDM_UNLOCK(macinfo);
2104 	}
2105 
2106 	return (GLD_SUCCESS);
2107 }
2108 
2109 /*
2110  * With MDT V.2 a single message mp can have one header area and multiple
2111  * payload areas. A packet is described by dl_pkt_info, and each packet can
2112  * span multiple payload areas (currently with TCP, each packet will have one
2113  * header and at the most two payload areas). MACs might have a limit on the
2114  * number of payload segments (i.e. per packet scatter-gather limit), and
2115  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2116  * might also have a limit on the total number of payloads in a message, and
2117  * that is specified by mdt_max_pld.
2118  */
2119 static int
2120 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2121 {
2122 	mblk_t *nextmp;
2123 	gld_t *gld = (gld_t *)q->q_ptr;
2124 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2125 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2126 	int numpacks, mdtpacks;
2127 	gld_interface_t *ifp = mac_pvt->interfacep;
2128 	pktinfo_t pktinfo;
2129 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2130 	boolean_t doloop = B_FALSE;
2131 	multidata_t *dlmdp;
2132 	pdescinfo_t pinfo;
2133 	pdesc_t *dl_pkt;
2134 	void *cookie;
2135 	uint_t totLen = 0;
2136 
2137 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2138 
2139 	/*
2140 	 * We're not holding the lock for this check.  If the promiscuous
2141 	 * state is in flux it doesn't matter much if we get this wrong.
2142 	 */
2143 	if (mac_pvt->nprom > 0) {
2144 		/*
2145 		 * We want to loopback to the receive side, but to avoid
2146 		 * recursive lock entry:  if we came from wput(), which
2147 		 * could have looped back via IP from our own receive
2148 		 * interrupt thread, we decline this request.  wput()
2149 		 * will then queue the packet for wsrv().  This means
2150 		 * that when snoop is running we don't get the advantage
2151 		 * of the wput() multithreaded direct entry to the
2152 		 * driver's send routine.
2153 		 */
2154 		if (caller == GLD_WPUT) {
2155 			(void) putbq(q, mp);
2156 			return (GLD_NORESOURCES);
2157 		}
2158 		doloop = B_TRUE;
2159 
2160 		/*
2161 		 * unlike the M_DATA case, we don't have to call
2162 		 * dupmsg_noloan here because mmd_transform
2163 		 * (called by gld_precv_mdt) will make a copy of
2164 		 * each dblk.
2165 		 */
2166 	}
2167 
2168 	while (mp != NULL) {
2169 		/*
2170 		 * The lower layer driver only gets a single multidata
2171 		 * message; this also makes it easier to handle noresources.
2172 		 */
2173 		nextmp = mp->b_cont;
2174 		mp->b_cont = NULL;
2175 
2176 		/*
2177 		 * Get number of packets in this message; if nothing
2178 		 * to transmit, go to next message.
2179 		 */
2180 		dlmdp = mmd_getmultidata(mp);
2181 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2182 			freemsg(mp);
2183 			mp = nextmp;
2184 			continue;
2185 		}
2186 
2187 		/*
2188 		 * Run interpreter to populate media specific pktinfo fields.
2189 		 * This collects per MDT message information like sap,
2190 		 * broad/multicast etc.
2191 		 */
2192 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2193 		    GLD_MDT_TX);
2194 
2195 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2196 
2197 		if (numpacks > 0) {
2198 			/*
2199 			 * Driver indicates it can transmit at least 1, and
2200 			 * possibly all, packets in MDT message.
2201 			 */
2202 			int count = numpacks;
2203 
2204 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2205 			    (dl_pkt != NULL);
2206 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2207 				/*
2208 				 * Format this packet by adding link header and
2209 				 * adjusting pdescinfo to include it; get
2210 				 * packet length.
2211 				 */
2212 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2213 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2214 
2215 				totLen += pktinfo.pktLen;
2216 
2217 				/*
2218 				 * Loop back packet before handing to the
2219 				 * driver.
2220 				 */
2221 				if (doloop &&
2222 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2223 					GLDM_LOCK(macinfo, RW_WRITER);
2224 					gld_precv_mdt(macinfo, vlan, mp,
2225 					    dl_pkt, &pktinfo);
2226 					GLDM_UNLOCK(macinfo);
2227 				}
2228 
2229 				/*
2230 				 * And send off to driver.
2231 				 */
2232 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2233 				    &pinfo);
2234 
2235 				/*
2236 				 * Be careful not to invoke getnextpdesc if we
2237 				 * already sent the last packet, since driver
2238 				 * might have posted it to hardware causing a
2239 				 * completion and freemsg() so the MDT data
2240 				 * structures might not be valid anymore.
2241 				 */
2242 				if (--count == 0)
2243 					break;
2244 			}
2245 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2246 			pktinfo.pktLen = totLen;
2247 			UPDATE_STATS(vlan, pktinfo, numpacks);
2248 
2249 			/*
2250 			 * In the noresources case (when driver indicates it
2251 			 * can not transmit all packets in the MDT message),
2252 			 * adjust to skip the first few packets on retrial.
2253 			 */
2254 			if (numpacks != mdtpacks) {
2255 				/*
2256 				 * Release already processed packet descriptors.
2257 				 */
2258 				for (count = 0; count < numpacks; count++) {
2259 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2260 					    &pinfo);
2261 					mmd_rempdesc(dl_pkt);
2262 				}
2263 				vlan->gldv_stats->glds_xmtretry++;
2264 				mp->b_cont = nextmp;
2265 				(void) putbq(q, mp);
2266 				return (GLD_NORESOURCES);
2267 			}
2268 		} else if (numpacks == 0) {
2269 			/*
2270 			 * Driver indicates it can not transmit any packets
2271 			 * currently and will request retrial later.
2272 			 */
2273 			vlan->gldv_stats->glds_xmtretry++;
2274 			mp->b_cont = nextmp;
2275 			(void) putbq(q, mp);
2276 			return (GLD_NORESOURCES);
2277 		} else {
2278 			ASSERT(numpacks == -1);
2279 			/*
2280 			 * We're supposed to count failed attempts as well.
2281 			 */
2282 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2283 			while (dl_pkt != NULL) {
2284 				/*
2285 				 * Call interpreter to determine total packet
2286 				 * bytes that are being dropped.
2287 				 */
2288 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2289 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2290 
2291 				totLen += pktinfo.pktLen;
2292 
2293 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2294 			}
2295 			pktinfo.pktLen = totLen;
2296 			UPDATE_STATS(vlan, pktinfo, mdtpacks);
2297 
2298 			/*
2299 			 * Transmit error; drop the message, move on
2300 			 * to the next one.
2301 			 */
2302 			freemsg(mp);
2303 		}
2304 
2305 		/*
2306 		 * Process the next multidata block, if there is one.
2307 		 */
2308 		mp = nextmp;
2309 	}
2310 
2311 	return (GLD_SUCCESS);
2312 }
2313 
2314 /*
2315  * gld_intr (macinfo)
2316  */
2317 uint_t
2318 gld_intr(gld_mac_info_t *macinfo)
2319 {
2320 	ASSERT(macinfo != NULL);
2321 
2322 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2323 		return (DDI_INTR_UNCLAIMED);
2324 
2325 	return ((*macinfo->gldm_intr)(macinfo));
2326 }
2327 
2328 /*
2329  * gld_sched (macinfo)
2330  *
2331  * This routine scans the streams that refer to a specific macinfo
2332  * structure and causes the STREAMS scheduler to try to run them if
2333  * they are marked as waiting for the transmit buffer.
2334  */
2335 void
2336 gld_sched(gld_mac_info_t *macinfo)
2337 {
2338 	gld_mac_pvt_t *mac_pvt;
2339 	gld_t *gld;
2340 	gld_vlan_t *vlan;
2341 	int i;
2342 
2343 	ASSERT(macinfo != NULL);
2344 
2345 	GLDM_LOCK(macinfo, RW_WRITER);
2346 
2347 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2348 		/* We're probably being called from a leftover interrupt */
2349 		GLDM_UNLOCK(macinfo);
2350 		return;
2351 	}
2352 
2353 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2354 
2355 	for (i = 0; i < VLAN_HASHSZ; i++) {
2356 		for (vlan = mac_pvt->vlan_hash[i];
2357 		    vlan != NULL; vlan = vlan->gldv_next) {
2358 			for (gld = vlan->gldv_str_next;
2359 			    gld != (gld_t *)&vlan->gldv_str_next;
2360 			    gld = gld->gld_next) {
2361 				ASSERT(gld->gld_mac_info == macinfo);
2362 				gld->gld_sched_ran = B_TRUE;
2363 				membar_enter();
2364 				if (gld->gld_xwait) {
2365 					gld->gld_xwait = B_FALSE;
2366 					qenable(WR(gld->gld_qptr));
2367 				}
2368 			}
2369 		}
2370 	}
2371 
2372 	GLDM_UNLOCK(macinfo);
2373 }
2374 
2375 /*
2376  * gld_precv (macinfo, mp)
2377  * called from gld_start to loopback a packet when in promiscuous mode
2378  */
2379 static void
2380 gld_precv(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp)
2381 {
2382 	gld_mac_pvt_t *mac_pvt;
2383 	gld_interface_t *ifp;
2384 	pktinfo_t pktinfo;
2385 
2386 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2387 
2388 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2389 	ifp = mac_pvt->interfacep;
2390 
2391 	/*
2392 	 * call the media specific packet interpreter routine
2393 	 */
2394 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2395 		freemsg(mp);
2396 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2397 #ifdef GLD_DEBUG
2398 		if (gld_debug & GLDERRS)
2399 			cmn_err(CE_WARN,
2400 			    "gld_precv: interpreter failed");
2401 #endif
2402 		return;
2403 	}
2404 
2405 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_paccept);
2406 }
2407 
2408 /*
2409  * called from gld_start_mdt to loopback packet(s) when in promiscuous mode
2410  */
2411 static void
2412 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2413     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2414 {
2415 	mblk_t *adjmp;
2416 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2417 	gld_interface_t *ifp = mac_pvt->interfacep;
2418 
2419 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2420 
2421 	/*
2422 	 * Get source/destination.
2423 	 */
2424 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2425 	    GLD_MDT_RXLOOP);
2426 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2427 		gld_sendup(macinfo, vlan, pktinfo, adjmp, gld_paccept);
2428 }
2429 
2430 /*
2431  * gld_recv (macinfo, mp)
2432  * called with an mac-level packet in a mblock; take the maclock,
2433  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2434  *
2435  * V0 drivers already are holding the mutex when they call us.
2436  */
2437 void
2438 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2439 {
2440 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2441 }
2442 
2443 void
2444 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2445 {
2446 	gld_mac_pvt_t *mac_pvt;
2447 	char pbuf[3*GLD_MAX_ADDRLEN];
2448 	pktinfo_t pktinfo;
2449 	gld_interface_t *ifp;
2450 	queue_t *ipq = NULL;
2451 	gld_vlan_t *vlan;
2452 	uint32_t vid;
2453 
2454 	ASSERT(macinfo != NULL);
2455 	ASSERT(mp->b_datap->db_ref);
2456 
2457 	GLDM_LOCK(macinfo, RW_READER);
2458 
2459 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2460 		/* We're probably being called from a leftover interrupt */
2461 		freemsg(mp);
2462 		goto done;
2463 	}
2464 
2465 	vid = GLD_VTAG_VID(vtag);
2466 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL) {
2467 		freemsg(mp);
2468 		goto done;
2469 	}
2470 
2471 	/*
2472 	 * Check whether underlying media code supports the IPQ hack,
2473 	 * and if so, whether the interpreter can quickly parse the
2474 	 * packet to get some relevant parameters.
2475 	 */
2476 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2477 	ifp = mac_pvt->interfacep;
2478 	if (((*ifp->interpreter)(macinfo, mp, &pktinfo,
2479 	    GLD_RXQUICK) == 0) && (vlan->gldv_ipq_flags == 0)) {
2480 		switch (pktinfo.ethertype) {
2481 		case ETHERTYPE_IP:
2482 			ipq = vlan->gldv_ipq;
2483 			break;
2484 		case ETHERTYPE_IPV6:
2485 			ipq = vlan->gldv_ipv6q;
2486 			break;
2487 		}
2488 	}
2489 
2490 	BUMP(vlan->gldv_stats->glds_bytercv64, pktinfo.pktLen);
2491 	BUMP(vlan->gldv_stats->glds_pktrcv64, 1);
2492 
2493 	/*
2494 	 * Special case for IP; we can simply do the putnext here, if:
2495 	 * o ipq != NULL, and therefore:
2496 	 * - the device type supports IPQ (ethernet and IPoIB);
2497 	 * - the interpreter could quickly parse the packet;
2498 	 * - there are no PROMISC_SAP streams (on this VLAN);
2499 	 * - there is one, and only one, IP stream bound (to this VLAN);
2500 	 * - that stream is a "fastpath" stream;
2501 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2502 	 *
2503 	 * o the packet is specifically for me, and therefore:
2504 	 * - the packet is not multicast or broadcast (fastpath only
2505 	 *   wants unicast packets).
2506 	 *
2507 	 * o the stream is not asserting flow control.
2508 	 */
2509 	if (ipq != NULL &&
2510 	    pktinfo.isForMe &&
2511 	    canputnext(ipq)) {
2512 		/*
2513 		 * Skip the mac header. We know there is no LLC1/SNAP header
2514 		 * in this packet
2515 		 */
2516 		mp->b_rptr += pktinfo.macLen;
2517 		putnext(ipq, mp);
2518 		goto done;
2519 	}
2520 
2521 	/*
2522 	 * call the media specific packet interpreter routine
2523 	 */
2524 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2525 		BUMP(vlan->gldv_stats->glds_rcvbadinterp, 1);
2526 #ifdef GLD_DEBUG
2527 		if (gld_debug & GLDERRS)
2528 			cmn_err(CE_WARN,
2529 			    "gld_recv_tagged: interpreter failed");
2530 #endif
2531 		freemsg(mp);
2532 		goto done;
2533 	}
2534 
2535 	/*
2536 	 * This is safe even if vtag is VLAN_VTAG_NONE
2537 	 */
2538 
2539 	pktinfo.vid = vid;
2540 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2541 #ifdef GLD_DEBUG
2542 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2543 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2544 #endif
2545 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2546 
2547 #ifdef GLD_DEBUG
2548 	if ((gld_debug & GLDRECV) &&
2549 	    (!(gld_debug & GLDNOBR) ||
2550 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2551 		char pbuf2[3*GLD_MAX_ADDRLEN];
2552 
2553 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2554 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2555 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2556 		    pktinfo.dhost, macinfo->gldm_addrlen));
2557 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2558 		    pktinfo.vid,
2559 		    pktinfo.user_pri);
2560 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2561 		    "Hdr: %d,%d isMulticast: %s\n",
2562 		    pktinfo.ethertype,
2563 		    pktinfo.pktLen,
2564 		    pktinfo.macLen,
2565 		    pktinfo.hdrLen,
2566 		    pktinfo.isMulticast ? "Y" : "N");
2567 	}
2568 #endif
2569 
2570 	gld_sendup(macinfo, vlan, &pktinfo, mp, gld_accept);
2571 
2572 done:
2573 	GLDM_UNLOCK(macinfo);
2574 }
2575 
2576 /* =================================================================== */
2577 /* receive group: called from gld_recv and gld_precv* with maclock held */
2578 /* =================================================================== */
2579 
2580 /*
2581  * gld_sendup (macinfo, mp)
2582  * called with an ethernet packet in a mblock; must decide whether
2583  * packet is for us and which streams to queue it to.
2584  */
2585 static void
2586 gld_sendup(gld_mac_info_t *macinfo, gld_vlan_t *vlan, pktinfo_t *pktinfo,
2587     mblk_t *mp, int (*acceptfunc)())
2588 {
2589 	gld_t *gld;
2590 	gld_t *fgld = NULL;
2591 	mblk_t *nmp;
2592 	void (*send)(queue_t *qp, mblk_t *mp);
2593 	int (*cansend)(queue_t *qp);
2594 
2595 #ifdef GLD_DEBUG
2596 	if (gld_debug & GLDTRACE)
2597 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2598 		    (void *)macinfo);
2599 #endif
2600 
2601 	ASSERT(mp != NULL);
2602 	ASSERT(macinfo != NULL);
2603 	ASSERT(vlan != NULL);
2604 	ASSERT(pktinfo != NULL);
2605 	ASSERT(GLDM_LOCK_HELD(macinfo));
2606 
2607 	/*
2608 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2609 	 * gld_recv returns to the caller's interrupt routine.  The total
2610 	 * network throughput would normally be lower when selecting this
2611 	 * option, because we putq the messages and process them later,
2612 	 * instead of sending them with putnext now.  Some time critical
2613 	 * device might need this, so it's here but undocumented.
2614 	 */
2615 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2616 		send = (void (*)(queue_t *, mblk_t *))putq;
2617 		cansend = canput;
2618 	} else {
2619 		send = (void (*)(queue_t *, mblk_t *))putnext;
2620 		cansend = canputnext;
2621 	}
2622 
2623 	/*
2624 	 * Search all the streams attached to this macinfo looking for
2625 	 * those eligible to receive the present packet.
2626 	 */
2627 	for (gld = vlan->gldv_str_next;
2628 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
2629 #ifdef GLD_VERBOSE_DEBUG
2630 		cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p QSTATE: %s",
2631 		    gld->gld_sap, (void *)gld->gld_qptr,
2632 		    gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE");
2633 #endif
2634 		ASSERT(gld->gld_qptr != NULL);
2635 		ASSERT(gld->gld_state == DL_IDLE ||
2636 		    gld->gld_state == DL_UNBOUND);
2637 		ASSERT(gld->gld_mac_info == macinfo);
2638 		ASSERT(gld->gld_vlan == vlan);
2639 
2640 		if (gld->gld_state != DL_IDLE)
2641 			continue;	/* not eligible to receive */
2642 		if (gld->gld_flags & GLD_STR_CLOSING)
2643 			continue;	/* not eligible to receive */
2644 
2645 #ifdef GLD_DEBUG
2646 		if ((gld_debug & GLDRECV) &&
2647 		    (!(gld_debug & GLDNOBR) ||
2648 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2649 			cmn_err(CE_NOTE,
2650 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2651 			    gld->gld_sap,
2652 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2653 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2654 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2655 #endif
2656 
2657 		/*
2658 		 * The accept function differs depending on whether this is
2659 		 * a packet that we received from the wire or a loopback.
2660 		 */
2661 		if ((*acceptfunc)(gld, pktinfo)) {
2662 			/* sap matches */
2663 			pktinfo->wasAccepted = 1;	/* known protocol */
2664 
2665 			if (!(*cansend)(gld->gld_qptr)) {
2666 				/*
2667 				 * Upper stream is not accepting messages, i.e.
2668 				 * it is flow controlled, therefore we will
2669 				 * forgo sending the message up this stream.
2670 				 */
2671 #ifdef GLD_DEBUG
2672 				if (gld_debug & GLDETRACE)
2673 					cmn_err(CE_WARN,
2674 					    "gld_sendup: canput failed");
2675 #endif
2676 				BUMP(vlan->gldv_stats->glds_blocked, 1);
2677 				qenable(gld->gld_qptr);
2678 				continue;
2679 			}
2680 
2681 			/*
2682 			 * we are trying to avoid an extra dumpmsg() here.
2683 			 * If this is the first eligible queue, remember the
2684 			 * queue and send up the message after the loop.
2685 			 */
2686 			if (!fgld) {
2687 				fgld = gld;
2688 				continue;
2689 			}
2690 
2691 			/* duplicate the packet for this stream */
2692 			nmp = dupmsg(mp);
2693 			if (nmp == NULL) {
2694 				BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2695 #ifdef GLD_DEBUG
2696 				if (gld_debug & GLDERRS)
2697 					cmn_err(CE_WARN,
2698 					    "gld_sendup: dupmsg failed");
2699 #endif
2700 				break;	/* couldn't get resources; drop it */
2701 			}
2702 			/* pass the message up the stream */
2703 			gld_passon(gld, nmp, pktinfo, send);
2704 		}
2705 	}
2706 
2707 	ASSERT(mp);
2708 	/* send the original dup of the packet up the first stream found */
2709 	if (fgld)
2710 		gld_passon(fgld, mp, pktinfo, send);
2711 	else
2712 		freemsg(mp);	/* no streams matched */
2713 
2714 	/* We do not count looped back packets */
2715 	if (acceptfunc == gld_paccept)
2716 		return;		/* transmit loopback case */
2717 
2718 	if (pktinfo->isBroadcast)
2719 		BUMP(vlan->gldv_stats->glds_brdcstrcv, 1);
2720 	else if (pktinfo->isMulticast)
2721 		BUMP(vlan->gldv_stats->glds_multircv, 1);
2722 
2723 	/* No stream accepted this packet */
2724 	if (!pktinfo->wasAccepted)
2725 		BUMP(vlan->gldv_stats->glds_unknowns, 1);
2726 }
2727 
2728 /*
2729  * A packet matches a stream if:
2730  *     the stream accepts EtherType encoded packets and the type matches
2731  *  or the stream accepts LLC packets and the packet is an LLC packet
2732  */
2733 #define	MATCH(stream, pktinfo) \
2734 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2735 	(!stream->gld_ethertype && pktinfo->isLLC))
2736 
2737 /*
2738  * This function validates a packet for sending up a particular
2739  * stream. The message header has been parsed and its characteristic
2740  * are recorded in the pktinfo data structure. The streams stack info
2741  * are presented in gld data structures.
2742  */
2743 static int
2744 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2745 {
2746 	/*
2747 	 * if there is no match do not bother checking further.
2748 	 */
2749 	if (!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP))
2750 		return (0);
2751 
2752 	/*
2753 	 * We don't accept any packet from the hardware if we originated it.
2754 	 * (Contrast gld_paccept, the send-loopback accept function.)
2755 	 */
2756 	if (pktinfo->isLooped)
2757 		return (0);
2758 
2759 	/*
2760 	 * If the packet is broadcast or sent to us directly we will accept it.
2761 	 * Also we will accept multicast packets requested by the stream.
2762 	 */
2763 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2764 	    gld_mcmatch(gld, pktinfo))
2765 		return (1);
2766 
2767 	/*
2768 	 * Finally, accept anything else if we're in promiscuous mode
2769 	 */
2770 	if (gld->gld_flags & GLD_PROM_PHYS)
2771 		return (1);
2772 
2773 	return (0);
2774 }
2775 
2776 /*
2777  * Return TRUE if the given multicast address is one
2778  * of those that this particular Stream is interested in.
2779  */
2780 static int
2781 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
2782 {
2783 	/*
2784 	 * Return FALSE if not a multicast address.
2785 	 */
2786 	if (!pktinfo->isMulticast)
2787 		return (0);
2788 
2789 	/*
2790 	 * Check if all multicasts have been enabled for this Stream
2791 	 */
2792 	if (gld->gld_flags & GLD_PROM_MULT)
2793 		return (1);
2794 
2795 	/*
2796 	 * Return FALSE if no multicast addresses enabled for this Stream.
2797 	 */
2798 	if (!gld->gld_mcast)
2799 		return (0);
2800 
2801 	/*
2802 	 * Otherwise, look for it in the table.
2803 	 */
2804 	return (gld_multicast(pktinfo->dhost, gld));
2805 }
2806 
2807 /*
2808  * gld_multicast determines if the address is a multicast address for
2809  * this stream.
2810  */
2811 static int
2812 gld_multicast(unsigned char *macaddr, gld_t *gld)
2813 {
2814 	int i;
2815 
2816 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
2817 
2818 	if (!gld->gld_mcast)
2819 		return (0);
2820 
2821 	for (i = 0; i < gld->gld_multicnt; i++) {
2822 		if (gld->gld_mcast[i]) {
2823 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
2824 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
2825 			    gld->gld_mac_info->gldm_addrlen))
2826 				return (1);
2827 		}
2828 	}
2829 
2830 	return (0);
2831 }
2832 
2833 /*
2834  * accept function for looped back packets
2835  */
2836 static int
2837 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
2838 {
2839 	return (gld->gld_flags & GLD_PROM_PHYS &&
2840 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP));
2841 }
2842 
2843 static void
2844 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
2845 	void (*send)(queue_t *qp, mblk_t *mp))
2846 {
2847 	int skiplen;
2848 
2849 #ifdef GLD_DEBUG
2850 	if (gld_debug & GLDTRACE)
2851 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
2852 		    (void *)mp, (void *)pktinfo);
2853 
2854 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
2855 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2856 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
2857 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
2858 		    gld->gld_sap);
2859 #endif
2860 
2861 	/*
2862 	 * Figure out how much of the packet header to throw away.
2863 	 *
2864 	 * RAW streams expect to see the whole packet.
2865 	 *
2866 	 * Other streams expect to see the packet with the MAC header
2867 	 * removed.
2868 	 *
2869 	 * Normal DLPI (non RAW/FAST) streams also want the
2870 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
2871 	 */
2872 	if (gld->gld_flags & GLD_RAW) {
2873 		skiplen = 0;
2874 	} else {
2875 		skiplen = pktinfo->macLen;		/* skip mac header */
2876 		if (gld->gld_ethertype)
2877 			skiplen += pktinfo->hdrLen;	/* skip any extra */
2878 	}
2879 
2880 	if (skiplen >= pktinfo->pktLen) {
2881 		/*
2882 		 * If the interpreter did its job right, then it cannot be
2883 		 * asking us to skip more bytes than are in the packet!
2884 		 * However, there could be zero data bytes left after the
2885 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
2886 		 * should contain at least one byte of data, so if we have
2887 		 * none we just drop it.
2888 		 */
2889 		ASSERT(!(skiplen > pktinfo->pktLen));
2890 		freemsg(mp);
2891 		return;
2892 	}
2893 
2894 	/*
2895 	 * Skip over the header(s), taking care to possibly handle message
2896 	 * fragments shorter than the amount we need to skip.  Hopefully
2897 	 * the driver will put the entire packet, or at least the entire
2898 	 * header, into a single message block.  But we handle it if not.
2899 	 */
2900 	while (skiplen >= MBLKL(mp)) {
2901 		mblk_t *tmp = mp;
2902 		skiplen -= MBLKL(mp);
2903 		mp = mp->b_cont;
2904 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
2905 		freeb(tmp);
2906 	}
2907 	mp->b_rptr += skiplen;
2908 
2909 	/* Add M_PROTO if necessary, and pass upstream */
2910 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
2911 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
2912 		/* RAW/FAST: just send up the M_DATA */
2913 		(*send)(gld->gld_qptr, mp);
2914 	} else {
2915 		/* everybody else wants to see a unitdata_ind structure */
2916 		mp = gld_addudind(gld, mp, pktinfo);
2917 		if (mp)
2918 			(*send)(gld->gld_qptr, mp);
2919 		/* if it failed, gld_addudind already bumped statistic */
2920 	}
2921 }
2922 
2923 /*
2924  * gld_addudind(gld, mp, pktinfo)
2925  * format a DL_UNITDATA_IND message to be sent upstream to the user
2926  */
2927 static mblk_t *
2928 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo)
2929 {
2930 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
2931 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
2932 	dl_unitdata_ind_t	*dludindp;
2933 	mblk_t			*nmp;
2934 	int			size;
2935 	int			type;
2936 
2937 #ifdef GLD_DEBUG
2938 	if (gld_debug & GLDTRACE)
2939 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
2940 		    (void *)mp, (void *)pktinfo);
2941 #endif
2942 	ASSERT(macinfo != NULL);
2943 
2944 	/*
2945 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
2946 	 * might as well discard since we can't go further
2947 	 */
2948 	size = sizeof (dl_unitdata_ind_t) +
2949 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
2950 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
2951 		freemsg(mp);
2952 		BUMP(vlan->gldv_stats->glds_gldnorcvbuf, 1);
2953 #ifdef GLD_DEBUG
2954 		if (gld_debug & GLDERRS)
2955 			cmn_err(CE_WARN,
2956 			    "gld_addudind: allocb failed");
2957 #endif
2958 		return ((mblk_t *)NULL);
2959 	}
2960 	DB_TYPE(nmp) = M_PROTO;
2961 	nmp->b_rptr = nmp->b_datap->db_lim - size;
2962 
2963 	type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
2964 
2965 	/*
2966 	 * now setup the DL_UNITDATA_IND header
2967 	 *
2968 	 * XXX This looks broken if the saps aren't two bytes.
2969 	 */
2970 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
2971 	dludindp->dl_primitive = DL_UNITDATA_IND;
2972 	dludindp->dl_src_addr_length =
2973 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
2974 					abs(macinfo->gldm_saplen);
2975 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
2976 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
2977 					dludindp->dl_dest_addr_length;
2978 
2979 	dludindp->dl_group_address = (pktinfo->isMulticast ||
2980 					pktinfo->isBroadcast);
2981 
2982 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
2983 
2984 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
2985 	nmp->b_wptr += macinfo->gldm_addrlen;
2986 
2987 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
2988 	*(ushort_t *)(nmp->b_wptr) = type;
2989 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2990 
2991 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
2992 
2993 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
2994 	nmp->b_wptr += macinfo->gldm_addrlen;
2995 
2996 	*(ushort_t *)(nmp->b_wptr) = type;
2997 	nmp->b_wptr += abs(macinfo->gldm_saplen);
2998 
2999 	if (pktinfo->nosource)
3000 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
3001 	linkb(nmp, mp);
3002 	return (nmp);
3003 }
3004 
3005 /* ======================================================= */
3006 /* wsrv group: called from wsrv, single threaded per queue */
3007 /* ======================================================= */
3008 
3009 /*
3010  * We go to some trouble to avoid taking the same lock during normal
3011  * transmit processing as we do during normal receive processing.
3012  *
3013  * Elements of the per-instance macinfo and per-stream gld_t structures
3014  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3015  * (Elements of the gld_mac_pvt_t structure are considered part of the
3016  * macinfo structure for purposes of this discussion).
3017  *
3018  * However, it is more complicated than that:
3019  *
3020  *	Elements of the macinfo structure that are set before the macinfo
3021  *	structure is added to its device list by gld_register(), and never
3022  *	thereafter modified, are accessed without requiring taking the lock.
3023  *	A similar rule applies to those elements of the gld_t structure that
3024  *	are written by gld_open() before the stream is added to any list.
3025  *
3026  *	Most other elements of the macinfo structure may only be read or
3027  *	written while holding the maclock.
3028  *
3029  *	Most writable elements of the gld_t structure are written only
3030  *	within the single-threaded domain of wsrv() and subsidiaries.
3031  *	(This domain includes open/close while qprocs are not on.)
3032  *	The maclock need not be taken while within that domain
3033  *	simply to read those elements.  Writing to them, even within
3034  *	that domain, or reading from it outside that domain, requires
3035  *	holding the maclock.  Exception:  if the stream is not
3036  *	presently attached to a PPA, there is no associated macinfo,
3037  *	and no maclock need be taken.
3038  *
3039  *	The curr_macaddr element of the mac private structure is also
3040  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3041  *      of that structure. However, there are a few instances in the
3042  *      transmit path where we choose to forgo lock protection when
3043  *      reading this variable. This is to avoid lock contention between
3044  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3045  *      In doing so we will take a small risk or a few corrupted packets
3046  *      during the short an rare times when someone is changing the interface's
3047  *      physical address. We consider the small cost in this rare case to be
3048  *      worth the benefit of reduced lock contention under normal operating
3049  *      conditions. The risk/cost is small because:
3050  *          1. there is no guarantee at this layer of uncorrupted delivery.
3051  *          2. the physaddr doesn't change very often - no performance hit.
3052  *          3. if the physaddr changes, other stuff is going to be screwed
3053  *             up for a while anyway, while other sites refigure ARP, etc.,
3054  *             so losing a couple of packets is the least of our worries.
3055  *
3056  *	The list of streams associated with a macinfo is protected by
3057  *	two locks:  the per-macinfo maclock, and the per-major-device
3058  *	gld_devlock.  Both must be held to modify the list, but either
3059  *	may be held to protect the list during reading/traversing.  This
3060  *	allows independent locking for multiple instances in the receive
3061  *	path (using macinfo), while facilitating routines that must search
3062  *	the entire set of streams associated with a major device, such as
3063  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3064  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3065  *	protected, since they change at exactly the same time macinfo
3066  *	streams list does.
3067  *
3068  *	The list of macinfo structures associated with a major device
3069  *	structure is protected by the gld_devlock, as is the per-major
3070  *	list of Style 2 streams in the DL_UNATTACHED state.
3071  *
3072  *	The list of major devices is kept on a module-global list
3073  *	gld_device_list, which has its own lock to protect the list.
3074  *
3075  *	When it is necessary to hold more than one lock at a time, they
3076  *	are acquired in this "outside in" order:
3077  *		gld_device_list.gld_devlock
3078  *		glddev->gld_devlock
3079  *		GLDM_LOCK(macinfo)
3080  *
3081  *	Finally, there are some "volatile" elements of the gld_t structure
3082  *	used for synchronization between various routines that don't share
3083  *	the same mutexes.  See the routines for details.  These are:
3084  *		gld_xwait	between gld_wsrv() and gld_sched()
3085  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3086  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3087  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3088  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3089  *				(used in conjunction with q->q_first)
3090  */
3091 
3092 /*
3093  * gld_ioctl (q, mp)
3094  * handles all ioctl requests passed downstream. This routine is
3095  * passed a pointer to the message block with the ioctl request in it, and a
3096  * pointer to the queue so it can respond to the ioctl request with an ack.
3097  */
3098 int
3099 gld_ioctl(queue_t *q, mblk_t *mp)
3100 {
3101 	struct iocblk *iocp;
3102 	gld_t *gld;
3103 	gld_mac_info_t *macinfo;
3104 
3105 #ifdef GLD_DEBUG
3106 	if (gld_debug & GLDTRACE)
3107 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3108 #endif
3109 	gld = (gld_t *)q->q_ptr;
3110 	iocp = (struct iocblk *)mp->b_rptr;
3111 	switch (iocp->ioc_cmd) {
3112 	case DLIOCRAW:		/* raw M_DATA mode */
3113 		gld->gld_flags |= GLD_RAW;
3114 		DB_TYPE(mp) = M_IOCACK;
3115 		qreply(q, mp);
3116 		break;
3117 
3118 	case DL_IOC_HDR_INFO:	/* fastpath */
3119 		if (gld_global_options & GLD_OPT_NO_FASTPATH) {
3120 			miocnak(q, mp, 0, EINVAL);
3121 			break;
3122 		}
3123 		gld_fastpath(gld, q, mp);
3124 		break;
3125 
3126 	default:
3127 		macinfo	 = gld->gld_mac_info;
3128 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3129 			miocnak(q, mp, 0, EINVAL);
3130 			break;
3131 		}
3132 
3133 		GLDM_LOCK(macinfo, RW_WRITER);
3134 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3135 		GLDM_UNLOCK(macinfo);
3136 		break;
3137 	}
3138 	return (0);
3139 }
3140 
3141 /*
3142  * Since the rules for "fastpath" mode don't seem to be documented
3143  * anywhere, I will describe GLD's rules for fastpath users here:
3144  *
3145  * Once in this mode you remain there until close.
3146  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3147  * You must be bound (DL_IDLE) to transmit.
3148  * There are other rules not listed above.
3149  */
3150 static void
3151 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3152 {
3153 	gld_interface_t *ifp;
3154 	gld_mac_info_t *macinfo;
3155 	dl_unitdata_req_t *dludp;
3156 	mblk_t *nmp;
3157 	t_scalar_t off, len;
3158 	uint_t maclen;
3159 	int error;
3160 	gld_vlan_t *vlan;
3161 
3162 	if (gld->gld_state != DL_IDLE) {
3163 		miocnak(q, mp, 0, EINVAL);
3164 		return;
3165 	}
3166 
3167 	macinfo = gld->gld_mac_info;
3168 	ASSERT(macinfo != NULL);
3169 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3170 
3171 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3172 	if (error != 0) {
3173 		miocnak(q, mp, 0, error);
3174 		return;
3175 	}
3176 
3177 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3178 	off = dludp->dl_dest_addr_offset;
3179 	len = dludp->dl_dest_addr_length;
3180 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3181 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3182 		miocnak(q, mp, 0, EINVAL);
3183 		return;
3184 	}
3185 
3186 	/*
3187 	 * We take his fastpath request as a declaration that he will accept
3188 	 * M_DATA messages from us, whether or not we are willing to accept
3189 	 * them from him.  This allows us to have fastpath in one direction
3190 	 * (flow upstream) even on media with Source Routing, where we are
3191 	 * unable to provide a fixed MAC header to be prepended to downstream
3192 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3193 	 * allow him to send M_DATA down to us.
3194 	 */
3195 	GLDM_LOCK(macinfo, RW_WRITER);
3196 	gld->gld_flags |= GLD_FAST;
3197 	vlan = (gld_vlan_t *)gld->gld_vlan;
3198 	vlan->gldv_ipq_flags &= ~IPQ_DISABLED;
3199 	GLDM_UNLOCK(macinfo);
3200 
3201 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3202 
3203 	/* This will fail for Source Routing media */
3204 	/* Also on Ethernet on 802.2 SAPs */
3205 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3206 		miocnak(q, mp, 0, ENOMEM);
3207 		return;
3208 	}
3209 
3210 	/*
3211 	 * Link new mblk in after the "request" mblks.
3212 	 */
3213 	linkb(mp, nmp);
3214 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3215 }
3216 
3217 /*
3218  * gld_cmds (q, mp)
3219  *	process the DL commands as defined in dlpi.h
3220  *	note that the primitives return status which is passed back
3221  *	to the service procedure.  If the value is GLDE_RETRY, then
3222  *	it is assumed that processing must stop and the primitive has
3223  *	been put back onto the queue.  If the value is any other error,
3224  *	then an error ack is generated by the service procedure.
3225  */
3226 static int
3227 gld_cmds(queue_t *q, mblk_t *mp)
3228 {
3229 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3230 	gld_t *gld = (gld_t *)(q->q_ptr);
3231 	int result = DL_BADPRIM;
3232 	int mblkl = MBLKL(mp);
3233 	t_uscalar_t dlreq;
3234 
3235 	/* Make sure we have at least dlp->dl_primitive */
3236 	if (mblkl < sizeof (dlp->dl_primitive))
3237 		return (DL_BADPRIM);
3238 
3239 	dlreq = dlp->dl_primitive;
3240 #ifdef	GLD_DEBUG
3241 	if (gld_debug & GLDTRACE)
3242 		cmn_err(CE_NOTE,
3243 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3244 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3245 #endif
3246 
3247 	switch (dlreq) {
3248 	case DL_UDQOS_REQ:
3249 		if (mblkl < DL_UDQOS_REQ_SIZE)
3250 			break;
3251 		result = gld_udqos(q, mp);
3252 		break;
3253 
3254 	case DL_BIND_REQ:
3255 		if (mblkl < DL_BIND_REQ_SIZE)
3256 			break;
3257 		result = gld_bind(q, mp);
3258 		break;
3259 
3260 	case DL_UNBIND_REQ:
3261 		if (mblkl < DL_UNBIND_REQ_SIZE)
3262 			break;
3263 		result = gld_unbind(q, mp);
3264 		break;
3265 
3266 	case DL_UNITDATA_REQ:
3267 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3268 			break;
3269 		result = gld_unitdata(q, mp);
3270 		break;
3271 
3272 	case DL_INFO_REQ:
3273 		if (mblkl < DL_INFO_REQ_SIZE)
3274 			break;
3275 		result = gld_inforeq(q, mp);
3276 		break;
3277 
3278 	case DL_ATTACH_REQ:
3279 		if (mblkl < DL_ATTACH_REQ_SIZE)
3280 			break;
3281 		if (gld->gld_style == DL_STYLE2)
3282 			result = gldattach(q, mp);
3283 		else
3284 			result = DL_NOTSUPPORTED;
3285 		break;
3286 
3287 	case DL_DETACH_REQ:
3288 		if (mblkl < DL_DETACH_REQ_SIZE)
3289 			break;
3290 		if (gld->gld_style == DL_STYLE2)
3291 			result = gldunattach(q, mp);
3292 		else
3293 			result = DL_NOTSUPPORTED;
3294 		break;
3295 
3296 	case DL_ENABMULTI_REQ:
3297 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3298 			break;
3299 		result = gld_enable_multi(q, mp);
3300 		break;
3301 
3302 	case DL_DISABMULTI_REQ:
3303 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3304 			break;
3305 		result = gld_disable_multi(q, mp);
3306 		break;
3307 
3308 	case DL_PHYS_ADDR_REQ:
3309 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3310 			break;
3311 		result = gld_physaddr(q, mp);
3312 		break;
3313 
3314 	case DL_SET_PHYS_ADDR_REQ:
3315 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3316 			break;
3317 		result = gld_setaddr(q, mp);
3318 		break;
3319 
3320 	case DL_PROMISCON_REQ:
3321 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3322 			break;
3323 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3324 		break;
3325 
3326 	case DL_PROMISCOFF_REQ:
3327 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3328 			break;
3329 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3330 		break;
3331 
3332 	case DL_GET_STATISTICS_REQ:
3333 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3334 			break;
3335 		result = gld_get_statistics(q, mp);
3336 		break;
3337 
3338 	case DL_CAPABILITY_REQ:
3339 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3340 			break;
3341 		result = gld_cap(q, mp);
3342 		break;
3343 
3344 	case DL_NOTIFY_REQ:
3345 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3346 			break;
3347 		result = gld_notify_req(q, mp);
3348 		break;
3349 
3350 	case DL_XID_REQ:
3351 	case DL_XID_RES:
3352 	case DL_TEST_REQ:
3353 	case DL_TEST_RES:
3354 	case DL_CONTROL_REQ:
3355 	case DL_PASSIVE_REQ:
3356 		result = DL_NOTSUPPORTED;
3357 		break;
3358 
3359 	default:
3360 #ifdef	GLD_DEBUG
3361 		if (gld_debug & GLDERRS)
3362 			cmn_err(CE_WARN,
3363 			    "gld_cmds: unknown M_PROTO message: %d",
3364 			    dlreq);
3365 #endif
3366 		result = DL_BADPRIM;
3367 	}
3368 
3369 	return (result);
3370 }
3371 
3372 static int
3373 gld_cap(queue_t *q, mblk_t *mp)
3374 {
3375 	gld_t *gld = (gld_t *)q->q_ptr;
3376 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3377 
3378 	if (gld->gld_state == DL_UNATTACHED)
3379 		return (DL_OUTSTATE);
3380 
3381 	if (dlp->dl_sub_length == 0)
3382 		return (gld_cap_ack(q, mp));
3383 
3384 	return (gld_cap_enable(q, mp));
3385 }
3386 
3387 static int
3388 gld_cap_ack(queue_t *q, mblk_t *mp)
3389 {
3390 	gld_t *gld = (gld_t *)q->q_ptr;
3391 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3392 	gld_interface_t *ifp;
3393 	dl_capability_ack_t *dlap;
3394 	dl_capability_sub_t *dlsp;
3395 	size_t size = sizeof (dl_capability_ack_t);
3396 	size_t subsize = 0;
3397 
3398 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3399 
3400 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3401 		subsize += sizeof (dl_capability_sub_t) +
3402 		    sizeof (dl_capab_hcksum_t);
3403 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3404 		subsize += sizeof (dl_capability_sub_t) +
3405 		    sizeof (dl_capab_zerocopy_t);
3406 	if (macinfo->gldm_options & GLDOPT_MDT)
3407 		subsize += (sizeof (dl_capability_sub_t) +
3408 		    sizeof (dl_capab_mdt_t));
3409 
3410 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3411 	    DL_CAPABILITY_ACK)) == NULL)
3412 		return (GLDE_OK);
3413 
3414 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3415 	dlap->dl_sub_offset = 0;
3416 	if ((dlap->dl_sub_length = subsize) != 0)
3417 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3418 	dlsp = (dl_capability_sub_t *)&dlap[1];
3419 
3420 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3421 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3422 
3423 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3424 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3425 
3426 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3427 
3428 		dlhp->hcksum_txflags = 0;
3429 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3430 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3431 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3432 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3433 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3434 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3435 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3436 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3437 
3438 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3439 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3440 	}
3441 
3442 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3443 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3444 
3445 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3446 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3447 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3448 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3449 
3450 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3451 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3452 	}
3453 
3454 	if (macinfo->gldm_options & GLDOPT_MDT) {
3455 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3456 
3457 		dlsp->dl_cap = DL_CAPAB_MDT;
3458 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3459 
3460 		dlmp->mdt_version = MDT_VERSION_2;
3461 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3462 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3463 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3464 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3465 		dlmp->mdt_hdr_head = ifp->hdr_size;
3466 		dlmp->mdt_hdr_tail = 0;
3467 	}
3468 
3469 	qreply(q, mp);
3470 	return (GLDE_OK);
3471 }
3472 
3473 static int
3474 gld_cap_enable(queue_t *q, mblk_t *mp)
3475 {
3476 	dl_capability_req_t *dlp;
3477 	dl_capability_sub_t *dlsp;
3478 	dl_capab_hcksum_t *dlhp;
3479 	offset_t off;
3480 	size_t len;
3481 	size_t size;
3482 	offset_t end;
3483 
3484 	dlp = (dl_capability_req_t *)mp->b_rptr;
3485 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3486 
3487 	off = dlp->dl_sub_offset;
3488 	len = dlp->dl_sub_length;
3489 
3490 	if (!MBLKIN(mp, off, len))
3491 		return (DL_BADPRIM);
3492 
3493 	end = off + len;
3494 	while (off < end) {
3495 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3496 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3497 		if (off + size > end)
3498 			return (DL_BADPRIM);
3499 
3500 		switch (dlsp->dl_cap) {
3501 		case DL_CAPAB_HCKSUM:
3502 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3503 			/* nothing useful we can do with the contents */
3504 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3505 			break;
3506 		default:
3507 			break;
3508 		}
3509 
3510 		off += size;
3511 	}
3512 
3513 	qreply(q, mp);
3514 	return (GLDE_OK);
3515 }
3516 
3517 /*
3518  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3519  * requested the specific <notification> that the message carries AND is
3520  * eligible and ready to receive the notification immediately.
3521  *
3522  * This routine ignores flow control. Notifications will be sent regardless.
3523  *
3524  * In all cases, the original message passed in is freed at the end of
3525  * the routine.
3526  */
3527 static void
3528 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3529 {
3530 	gld_mac_pvt_t *mac_pvt;
3531 	gld_vlan_t *vlan;
3532 	gld_t *gld;
3533 	mblk_t *nmp;
3534 	int i;
3535 
3536 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3537 
3538 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3539 
3540 	/*
3541 	 * Search all the streams attached to this macinfo looking
3542 	 * for those eligible to receive the present notification.
3543 	 */
3544 	for (i = 0; i < VLAN_HASHSZ; i++) {
3545 		for (vlan = mac_pvt->vlan_hash[i];
3546 		    vlan != NULL; vlan = vlan->gldv_next) {
3547 			for (gld = vlan->gldv_str_next;
3548 			    gld != (gld_t *)&vlan->gldv_str_next;
3549 			    gld = gld->gld_next) {
3550 				ASSERT(gld->gld_qptr != NULL);
3551 				ASSERT(gld->gld_state == DL_IDLE ||
3552 				    gld->gld_state == DL_UNBOUND);
3553 				ASSERT(gld->gld_mac_info == macinfo);
3554 
3555 				if (gld->gld_flags & GLD_STR_CLOSING)
3556 					continue; /* not eligible - skip */
3557 				if (!(notification & gld->gld_notifications))
3558 					continue; /* not wanted - skip */
3559 				if ((nmp = dupmsg(mp)) == NULL)
3560 					continue; /* can't copy - skip */
3561 
3562 				/*
3563 				 * All OK; send dup'd notification up this
3564 				 * stream
3565 				 */
3566 				qreply(WR(gld->gld_qptr), nmp);
3567 			}
3568 		}
3569 	}
3570 
3571 	/*
3572 	 * Drop the original message block now
3573 	 */
3574 	freemsg(mp);
3575 }
3576 
3577 /*
3578  * For each (understood) bit in the <notifications> argument, contruct
3579  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3580  * eligible queues if <q> is NULL.
3581  */
3582 static void
3583 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3584 {
3585 	gld_mac_pvt_t *mac_pvt;
3586 	dl_notify_ind_t *dlnip;
3587 	struct gld_stats *stats;
3588 	mblk_t *mp;
3589 	size_t size;
3590 	uint32_t bit;
3591 
3592 	GLDM_LOCK(macinfo, RW_WRITER);
3593 
3594 	/*
3595 	 * The following cases shouldn't happen, but just in case the
3596 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3597 	 * check anyway ...
3598 	 */
3599 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3600 		GLDM_UNLOCK(macinfo);
3601 		return;				/* not ready yet	*/
3602 	}
3603 
3604 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3605 		GLDM_UNLOCK(macinfo);
3606 		return;				/* not ready anymore	*/
3607 	}
3608 
3609 	/*
3610 	 * Make sure the kstats are up to date, 'cos we use some of
3611 	 * the kstat values below, specifically the link speed ...
3612 	 */
3613 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3614 	stats = mac_pvt->statistics;
3615 	if (macinfo->gldm_get_stats)
3616 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3617 
3618 	for (bit = 1; notifications != 0; bit <<= 1) {
3619 		if ((notifications & bit) == 0)
3620 			continue;
3621 		notifications &= ~bit;
3622 
3623 		size = DL_NOTIFY_IND_SIZE;
3624 		if (bit == DL_NOTE_PHYS_ADDR)
3625 			size += macinfo->gldm_addrlen;
3626 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3627 			continue;
3628 
3629 		mp->b_datap->db_type = M_PROTO;
3630 		mp->b_wptr = mp->b_rptr + size;
3631 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3632 		dlnip->dl_primitive = DL_NOTIFY_IND;
3633 		dlnip->dl_notification = 0;
3634 		dlnip->dl_data = 0;
3635 		dlnip->dl_addr_length = 0;
3636 		dlnip->dl_addr_offset = 0;
3637 
3638 		switch (bit) {
3639 		case DL_NOTE_PROMISC_ON_PHYS:
3640 		case DL_NOTE_PROMISC_OFF_PHYS:
3641 			if (mac_pvt->nprom != 0)
3642 				dlnip->dl_notification = bit;
3643 			break;
3644 
3645 		case DL_NOTE_LINK_DOWN:
3646 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3647 				dlnip->dl_notification = bit;
3648 			break;
3649 
3650 		case DL_NOTE_LINK_UP:
3651 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3652 				dlnip->dl_notification = bit;
3653 			break;
3654 
3655 		case DL_NOTE_SPEED:
3656 			/*
3657 			 * Conversion required here:
3658 			 *	GLD keeps the speed in bit/s in a uint64
3659 			 *	DLPI wants it in kb/s in a uint32
3660 			 * Fortunately this is still big enough for 10Gb/s!
3661 			 */
3662 			dlnip->dl_notification = bit;
3663 			dlnip->dl_data = stats->glds_speed/1000ULL;
3664 			break;
3665 
3666 		case DL_NOTE_PHYS_ADDR:
3667 			dlnip->dl_notification = bit;
3668 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3669 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3670 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3671 			    abs(macinfo->gldm_saplen);
3672 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3673 			mac_copy(mac_pvt->curr_macaddr,
3674 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3675 			    macinfo->gldm_addrlen);
3676 			break;
3677 
3678 		default:
3679 			break;
3680 		}
3681 
3682 		if (dlnip->dl_notification == 0)
3683 			freemsg(mp);
3684 		else if (q != NULL)
3685 			qreply(q, mp);
3686 		else
3687 			gld_notify_qs(macinfo, mp, bit);
3688 	}
3689 
3690 	GLDM_UNLOCK(macinfo);
3691 }
3692 
3693 /*
3694  * gld_notify_req - handle a DL_NOTIFY_REQ message
3695  */
3696 static int
3697 gld_notify_req(queue_t *q, mblk_t *mp)
3698 {
3699 	gld_t *gld = (gld_t *)q->q_ptr;
3700 	gld_mac_info_t *macinfo;
3701 	gld_mac_pvt_t *pvt;
3702 	dl_notify_req_t *dlnrp;
3703 	dl_notify_ack_t *dlnap;
3704 
3705 	ASSERT(gld != NULL);
3706 	ASSERT(gld->gld_qptr == RD(q));
3707 
3708 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
3709 
3710 #ifdef GLD_DEBUG
3711 	if (gld_debug & GLDTRACE)
3712 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
3713 			(void *)q, (void *)mp);
3714 #endif
3715 
3716 	if (gld->gld_state == DL_UNATTACHED) {
3717 #ifdef GLD_DEBUG
3718 		if (gld_debug & GLDERRS)
3719 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
3720 				gld->gld_state);
3721 #endif
3722 		return (DL_OUTSTATE);
3723 	}
3724 
3725 	/*
3726 	 * Remember what notifications are required by this stream
3727 	 */
3728 	macinfo = gld->gld_mac_info;
3729 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3730 
3731 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
3732 
3733 	/*
3734 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
3735 	 * that this driver can provide, independently of which ones have
3736 	 * previously been or are now being requested.
3737 	 */
3738 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
3739 	    DL_NOTIFY_ACK)) == NULL)
3740 		return (DL_SYSERR);
3741 
3742 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
3743 	dlnap->dl_notifications = pvt->notifications;
3744 	qreply(q, mp);
3745 
3746 	/*
3747 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
3748 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
3749 	 * that provide the current status.
3750 	 */
3751 	gld_notify_ind(macinfo, gld->gld_notifications, q);
3752 
3753 	return (GLDE_OK);
3754 }
3755 
3756 /*
3757  * gld_linkstate()
3758  *	Called by driver to tell GLD the state of the physical link.
3759  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
3760  *	notification to each client that has previously requested such
3761  *	notifications
3762  */
3763 void
3764 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
3765 {
3766 	uint32_t notification;
3767 
3768 	switch (newstate) {
3769 	default:
3770 		return;
3771 
3772 	case GLD_LINKSTATE_DOWN:
3773 		notification = DL_NOTE_LINK_DOWN;
3774 		break;
3775 
3776 	case GLD_LINKSTATE_UP:
3777 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
3778 		break;
3779 
3780 	case GLD_LINKSTATE_UNKNOWN:
3781 		notification = 0;
3782 		break;
3783 	}
3784 
3785 	GLDM_LOCK(macinfo, RW_WRITER);
3786 	if (macinfo->gldm_linkstate == newstate)
3787 		notification = 0;
3788 	else
3789 		macinfo->gldm_linkstate = newstate;
3790 	GLDM_UNLOCK(macinfo);
3791 
3792 	if (notification)
3793 		gld_notify_ind(macinfo, notification, NULL);
3794 }
3795 
3796 /*
3797  * gld_udqos - set the current QoS parameters (priority only at the moment).
3798  */
3799 static int
3800 gld_udqos(queue_t *q, mblk_t *mp)
3801 {
3802 	dl_udqos_req_t *dlp;
3803 	gld_t  *gld = (gld_t *)q->q_ptr;
3804 	int off;
3805 	int len;
3806 	dl_qos_cl_sel1_t *selp;
3807 
3808 	ASSERT(gld);
3809 	ASSERT(gld->gld_qptr == RD(q));
3810 
3811 #ifdef GLD_DEBUG
3812 	if (gld_debug & GLDTRACE)
3813 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
3814 #endif
3815 
3816 	if (gld->gld_state != DL_IDLE) {
3817 #ifdef GLD_DEBUG
3818 		if (gld_debug & GLDERRS)
3819 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
3820 			    gld->gld_state);
3821 #endif
3822 		return (DL_OUTSTATE);
3823 	}
3824 
3825 	dlp = (dl_udqos_req_t *)mp->b_rptr;
3826 	off = dlp->dl_qos_offset;
3827 	len = dlp->dl_qos_length;
3828 
3829 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
3830 		return (DL_BADQOSTYPE);
3831 
3832 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
3833 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
3834 		return (DL_BADQOSTYPE);
3835 
3836 	if (selp->dl_trans_delay != 0 &&
3837 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
3838 		return (DL_BADQOSPARAM);
3839 	if (selp->dl_protection != 0 &&
3840 	    selp->dl_protection != DL_QOS_DONT_CARE)
3841 		return (DL_BADQOSPARAM);
3842 	if (selp->dl_residual_error != 0 &&
3843 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
3844 		return (DL_BADQOSPARAM);
3845 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
3846 		return (DL_BADQOSPARAM);
3847 
3848 	gld->gld_upri = selp->dl_priority;
3849 
3850 	dlokack(q, mp, DL_UDQOS_REQ);
3851 	return (GLDE_OK);
3852 }
3853 
3854 static mblk_t *
3855 gld_bindack(queue_t *q, mblk_t *mp)
3856 {
3857 	gld_t *gld = (gld_t *)q->q_ptr;
3858 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3859 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3860 	dl_bind_ack_t *dlp;
3861 	size_t size;
3862 	t_uscalar_t addrlen;
3863 	uchar_t *sapp;
3864 
3865 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3866 	size = sizeof (dl_bind_ack_t) + addrlen;
3867 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
3868 		return (NULL);
3869 
3870 	dlp = (dl_bind_ack_t *)mp->b_rptr;
3871 	dlp->dl_sap = gld->gld_sap;
3872 	dlp->dl_addr_length = addrlen;
3873 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
3874 	dlp->dl_max_conind = 0;
3875 	dlp->dl_xidtest_flg = 0;
3876 
3877 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
3878 	    macinfo->gldm_addrlen);
3879 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
3880 	*(ushort_t *)sapp = gld->gld_sap;
3881 
3882 	return (mp);
3883 }
3884 
3885 /*
3886  * gld_bind - determine if a SAP is already allocated and whether it is legal
3887  * to do the bind at this time
3888  */
3889 static int
3890 gld_bind(queue_t *q, mblk_t *mp)
3891 {
3892 	ulong_t	sap;
3893 	dl_bind_req_t *dlp;
3894 	gld_t *gld = (gld_t *)q->q_ptr;
3895 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3896 
3897 	ASSERT(gld);
3898 	ASSERT(gld->gld_qptr == RD(q));
3899 
3900 #ifdef GLD_DEBUG
3901 	if (gld_debug & GLDTRACE)
3902 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
3903 #endif
3904 
3905 	dlp = (dl_bind_req_t *)mp->b_rptr;
3906 	sap = dlp->dl_sap;
3907 
3908 #ifdef GLD_DEBUG
3909 	if (gld_debug & GLDPROT)
3910 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
3911 #endif
3912 
3913 	if (gld->gld_state != DL_UNBOUND) {
3914 #ifdef GLD_DEBUG
3915 		if (gld_debug & GLDERRS)
3916 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
3917 				gld->gld_state);
3918 #endif
3919 		return (DL_OUTSTATE);
3920 	}
3921 	ASSERT(macinfo);
3922 
3923 	if (dlp->dl_service_mode != DL_CLDLS) {
3924 		return (DL_UNSUPPORTED);
3925 	}
3926 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
3927 		return (DL_NOAUTO);
3928 	}
3929 
3930 	/*
3931 	 * Check sap validity and decide whether this stream accepts
3932 	 * IEEE 802.2 (LLC) packets.
3933 	 */
3934 	if (sap > ETHERTYPE_MAX)
3935 		return (DL_BADSAP);
3936 
3937 	/*
3938 	 * Decide whether the SAP value selects EtherType encoding/decoding.
3939 	 * For compatibility with monolithic ethernet drivers, the range of
3940 	 * SAP values is different for DL_ETHER media.
3941 	 */
3942 	switch (macinfo->gldm_type) {
3943 	case DL_ETHER:
3944 		gld->gld_ethertype = (sap > ETHERMTU);
3945 		break;
3946 	default:
3947 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
3948 		break;
3949 	}
3950 
3951 	/* if we get to here, then the SAP is legal enough */
3952 	GLDM_LOCK(macinfo, RW_WRITER);
3953 	gld->gld_state = DL_IDLE;	/* bound and ready */
3954 	gld->gld_sap = sap;
3955 	gld_set_ipq(gld);
3956 
3957 #ifdef GLD_DEBUG
3958 	if (gld_debug & GLDPROT)
3959 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
3960 #endif
3961 
3962 	/* ACK the BIND */
3963 	mp = gld_bindack(q, mp);
3964 	GLDM_UNLOCK(macinfo);
3965 
3966 	if (mp != NULL) {
3967 		qreply(q, mp);
3968 		return (GLDE_OK);
3969 	}
3970 
3971 	return (DL_SYSERR);
3972 }
3973 
3974 /*
3975  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
3976  * The stream is still open and can be re-bound.
3977  */
3978 static int
3979 gld_unbind(queue_t *q, mblk_t *mp)
3980 {
3981 	gld_t *gld = (gld_t *)q->q_ptr;
3982 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3983 
3984 	ASSERT(gld);
3985 
3986 #ifdef GLD_DEBUG
3987 	if (gld_debug & GLDTRACE)
3988 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
3989 #endif
3990 
3991 	if (gld->gld_state != DL_IDLE) {
3992 #ifdef GLD_DEBUG
3993 		if (gld_debug & GLDERRS)
3994 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
3995 				gld->gld_state);
3996 #endif
3997 		return (DL_OUTSTATE);
3998 	}
3999 	ASSERT(macinfo);
4000 
4001 	/*
4002 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
4003 	 * See comments above gld_start().
4004 	 */
4005 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
4006 	membar_enter();
4007 	if (gld->gld_wput_count != 0) {
4008 		gld->gld_in_unbind = B_FALSE;
4009 		ASSERT(mp);		/* we didn't come from close */
4010 #ifdef GLD_DEBUG
4011 		if (gld_debug & GLDETRACE)
4012 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
4013 #endif
4014 		(void) putbq(q, mp);
4015 		qenable(q);		/* try again soon */
4016 		return (GLDE_RETRY);
4017 	}
4018 
4019 	GLDM_LOCK(macinfo, RW_WRITER);
4020 	gld->gld_state = DL_UNBOUND;
4021 	gld->gld_sap = 0;
4022 	gld_set_ipq(gld);
4023 	GLDM_UNLOCK(macinfo);
4024 
4025 	membar_exit();
4026 	gld->gld_in_unbind = B_FALSE;
4027 
4028 	/* mp is NULL if we came from close */
4029 	if (mp) {
4030 		gld_flushqueue(q);	/* flush the queues */
4031 		dlokack(q, mp, DL_UNBIND_REQ);
4032 	}
4033 	return (GLDE_OK);
4034 }
4035 
4036 /*
4037  * gld_inforeq - generate the response to an info request
4038  */
4039 static int
4040 gld_inforeq(queue_t *q, mblk_t *mp)
4041 {
4042 	gld_t		*gld;
4043 	dl_info_ack_t	*dlp;
4044 	int		bufsize;
4045 	glddev_t	*glddev;
4046 	gld_mac_info_t	*macinfo;
4047 	gld_mac_pvt_t	*mac_pvt;
4048 	int		sel_offset = 0;
4049 	int		range_offset = 0;
4050 	int		addr_offset;
4051 	int		addr_length;
4052 	int		sap_length;
4053 	int		brdcst_offset;
4054 	int		brdcst_length;
4055 	gld_vlan_t	*vlan;
4056 	uchar_t		*sapp;
4057 
4058 #ifdef GLD_DEBUG
4059 	if (gld_debug & GLDTRACE)
4060 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4061 #endif
4062 	gld = (gld_t *)q->q_ptr;
4063 	ASSERT(gld);
4064 	glddev = gld->gld_device;
4065 	ASSERT(glddev);
4066 
4067 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4068 		macinfo = gld->gld_mac_info;
4069 		ASSERT(macinfo != NULL);
4070 
4071 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4072 
4073 		addr_length = macinfo->gldm_addrlen;
4074 		sap_length = macinfo->gldm_saplen;
4075 		brdcst_length = macinfo->gldm_addrlen;
4076 	} else {
4077 		addr_length = glddev->gld_addrlen;
4078 		sap_length = glddev->gld_saplen;
4079 		brdcst_length = glddev->gld_addrlen;
4080 	}
4081 
4082 	bufsize = sizeof (dl_info_ack_t);
4083 
4084 	addr_offset = bufsize;
4085 	bufsize += addr_length;
4086 	bufsize += abs(sap_length);
4087 
4088 	brdcst_offset = bufsize;
4089 	bufsize += brdcst_length;
4090 
4091 	if ((vlan = (gld_vlan_t *)gld->gld_vlan) != NULL &&
4092 	    vlan->gldv_id != VLAN_VID_NONE) {
4093 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4094 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4095 
4096 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4097 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4098 	}
4099 
4100 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4101 		return (GLDE_OK);	/* nothing more to be done */
4102 
4103 	bzero(mp->b_rptr, bufsize);
4104 
4105 	dlp = (dl_info_ack_t *)mp->b_rptr;
4106 	dlp->dl_primitive = DL_INFO_ACK;
4107 	dlp->dl_version = DL_VERSION_2;
4108 	dlp->dl_service_mode = DL_CLDLS;
4109 	dlp->dl_current_state = gld->gld_state;
4110 	dlp->dl_provider_style = gld->gld_style;
4111 
4112 	if (sel_offset != 0) {
4113 		dl_qos_cl_sel1_t	*selp;
4114 		dl_qos_cl_range1_t	*rangep;
4115 
4116 		ASSERT(range_offset != 0);
4117 
4118 		dlp->dl_qos_offset = sel_offset;
4119 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4120 		dlp->dl_qos_range_offset = range_offset;
4121 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4122 
4123 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4124 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4125 		selp->dl_priority = gld->gld_upri;
4126 
4127 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4128 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4129 		rangep->dl_priority.dl_min = 0;
4130 		rangep->dl_priority.dl_max = 7;
4131 	}
4132 
4133 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4134 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4135 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4136 		dlp->dl_mac_type = macinfo->gldm_type;
4137 		dlp->dl_addr_length = addr_length + abs(sap_length);
4138 		dlp->dl_sap_length = sap_length;
4139 
4140 		if (gld->gld_state == DL_IDLE) {
4141 			/*
4142 			 * If we are bound to a non-LLC SAP on any medium
4143 			 * other than Ethernet, then we need room for a
4144 			 * SNAP header.  So we have to adjust the MTU size
4145 			 * accordingly.  XXX I suppose this should be done
4146 			 * in gldutil.c, but it seems likely that this will
4147 			 * always be true for everything GLD supports but
4148 			 * Ethernet.  Check this if you add another medium.
4149 			 */
4150 			if ((macinfo->gldm_type == DL_TPR ||
4151 			    macinfo->gldm_type == DL_FDDI) &&
4152 			    gld->gld_ethertype)
4153 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4154 
4155 			/* copy macaddr and sap */
4156 			dlp->dl_addr_offset = addr_offset;
4157 
4158 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4159 			    addr_offset, macinfo->gldm_addrlen);
4160 			sapp = mp->b_rptr + addr_offset +
4161 			    macinfo->gldm_addrlen;
4162 			*(ushort_t *)sapp = gld->gld_sap;
4163 		} else {
4164 			dlp->dl_addr_offset = 0;
4165 		}
4166 
4167 		/* copy broadcast addr */
4168 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4169 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4170 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4171 		    mp->b_rptr + brdcst_offset, brdcst_length);
4172 	} else {
4173 		/*
4174 		 * No PPA is attached.
4175 		 * The best we can do is use the values provided
4176 		 * by the first mac that called gld_register.
4177 		 */
4178 		dlp->dl_min_sdu = glddev->gld_minsdu;
4179 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4180 		dlp->dl_mac_type = glddev->gld_type;
4181 		dlp->dl_addr_length = addr_length + abs(sap_length);
4182 		dlp->dl_sap_length = sap_length;
4183 		dlp->dl_addr_offset = 0;
4184 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4185 		dlp->dl_brdcst_addr_length = brdcst_length;
4186 		mac_copy((caddr_t)glddev->gld_broadcast,
4187 		    mp->b_rptr + brdcst_offset, brdcst_length);
4188 	}
4189 	qreply(q, mp);
4190 	return (GLDE_OK);
4191 }
4192 
4193 /*
4194  * gld_unitdata (q, mp)
4195  * send a datagram.  Destination address/lsap is in M_PROTO
4196  * message (first mblock), data is in remainder of message.
4197  *
4198  */
4199 static int
4200 gld_unitdata(queue_t *q, mblk_t *mp)
4201 {
4202 	gld_t *gld = (gld_t *)q->q_ptr;
4203 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4204 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4205 	size_t	msglen;
4206 	mblk_t	*nmp;
4207 	gld_interface_t *ifp;
4208 	uint32_t start;
4209 	uint32_t stuff;
4210 	uint32_t end;
4211 	uint32_t value;
4212 	uint32_t flags;
4213 	uint32_t upri;
4214 
4215 #ifdef GLD_DEBUG
4216 	if (gld_debug & GLDTRACE)
4217 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4218 #endif
4219 
4220 	if (gld->gld_state != DL_IDLE) {
4221 #ifdef GLD_DEBUG
4222 		if (gld_debug & GLDERRS)
4223 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4224 				gld->gld_state);
4225 #endif
4226 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4227 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4228 		return (GLDE_OK);
4229 	}
4230 	ASSERT(macinfo != NULL);
4231 
4232 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4233 	    dlp->dl_dest_addr_length !=
4234 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4235 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4236 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4237 		return (GLDE_OK);
4238 	}
4239 
4240 	upri = dlp->dl_priority.dl_max;
4241 
4242 	msglen = msgdsize(mp);
4243 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4244 #ifdef GLD_DEBUG
4245 		if (gld_debug & GLDERRS)
4246 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4247 				(int)msglen);
4248 #endif
4249 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4250 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4251 		return (GLDE_OK);
4252 	}
4253 
4254 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4255 
4256 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4257 
4258 	/* grab any checksum information that may be present */
4259 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4260 	    &value, &flags);
4261 
4262 	/*
4263 	 * Prepend a valid header for transmission
4264 	 */
4265 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4266 #ifdef GLD_DEBUG
4267 		if (gld_debug & GLDERRS)
4268 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4269 #endif
4270 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4271 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4272 		return (GLDE_OK);
4273 	}
4274 
4275 	/* apply any checksum information to the first block in the chain */
4276 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4277 	    flags, 0);
4278 
4279 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4280 		qenable(q);
4281 		return (GLDE_RETRY);
4282 	}
4283 
4284 	return (GLDE_OK);
4285 }
4286 
4287 /*
4288  * gldattach(q, mp)
4289  * DLPI DL_ATTACH_REQ
4290  * this attaches the stream to a PPA
4291  */
4292 static int
4293 gldattach(queue_t *q, mblk_t *mp)
4294 {
4295 	dl_attach_req_t *at;
4296 	gld_mac_info_t *macinfo;
4297 	gld_t  *gld = (gld_t *)q->q_ptr;
4298 	glddev_t *glddev;
4299 	gld_mac_pvt_t *mac_pvt;
4300 	uint32_t ppa;
4301 	uint32_t vid;
4302 	gld_vlan_t *vlan;
4303 
4304 	at = (dl_attach_req_t *)mp->b_rptr;
4305 
4306 	if (gld->gld_state != DL_UNATTACHED)
4307 		return (DL_OUTSTATE);
4308 
4309 	ASSERT(!gld->gld_mac_info);
4310 
4311 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4312 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4313 	if (vid > VLAN_VID_MAX)
4314 		return (DL_BADPPA);
4315 
4316 	glddev = gld->gld_device;
4317 	mutex_enter(&glddev->gld_devlock);
4318 	for (macinfo = glddev->gld_mac_next;
4319 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4320 	    macinfo = macinfo->gldm_next) {
4321 		int inst;
4322 
4323 		ASSERT(macinfo != NULL);
4324 		if (macinfo->gldm_ppa != ppa)
4325 			continue;
4326 
4327 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4328 			continue;	/* this one's not ready yet */
4329 
4330 		/*
4331 		 * VLAN sanity check
4332 		 */
4333 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4334 			mutex_exit(&glddev->gld_devlock);
4335 			return (DL_BADPPA);
4336 		}
4337 
4338 		/*
4339 		 * We found the correct PPA, hold the instance
4340 		 */
4341 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4342 		if (inst == -1 || qassociate(q, inst) != 0) {
4343 			mutex_exit(&glddev->gld_devlock);
4344 			return (DL_BADPPA);
4345 		}
4346 
4347 		/* Take the stream off the per-driver-class list */
4348 		gldremque(gld);
4349 
4350 		/*
4351 		 * We must hold the lock to prevent multiple calls
4352 		 * to the reset and start routines.
4353 		 */
4354 		GLDM_LOCK(macinfo, RW_WRITER);
4355 
4356 		gld->gld_mac_info = macinfo;
4357 
4358 		if (macinfo->gldm_send_tagged != NULL)
4359 			gld->gld_send = macinfo->gldm_send_tagged;
4360 		else
4361 			gld->gld_send = macinfo->gldm_send;
4362 
4363 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4364 			GLDM_UNLOCK(macinfo);
4365 			gldinsque(gld, glddev->gld_str_prev);
4366 			mutex_exit(&glddev->gld_devlock);
4367 			(void) qassociate(q, -1);
4368 			return (DL_BADPPA);
4369 		}
4370 
4371 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4372 		if (!mac_pvt->started) {
4373 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4374 				gld_rem_vlan(vlan);
4375 				GLDM_UNLOCK(macinfo);
4376 				gldinsque(gld, glddev->gld_str_prev);
4377 				mutex_exit(&glddev->gld_devlock);
4378 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4379 				    EIO);
4380 				(void) qassociate(q, -1);
4381 				return (GLDE_OK);
4382 			}
4383 		}
4384 
4385 		gld->gld_vlan = vlan;
4386 		vlan->gldv_nstreams++;
4387 		gldinsque(gld, vlan->gldv_str_prev);
4388 		gld->gld_state = DL_UNBOUND;
4389 		GLDM_UNLOCK(macinfo);
4390 
4391 #ifdef GLD_DEBUG
4392 		if (gld_debug & GLDPROT) {
4393 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4394 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4395 		}
4396 #endif
4397 		mutex_exit(&glddev->gld_devlock);
4398 		dlokack(q, mp, DL_ATTACH_REQ);
4399 		return (GLDE_OK);
4400 	}
4401 	mutex_exit(&glddev->gld_devlock);
4402 	return (DL_BADPPA);
4403 }
4404 
4405 /*
4406  * gldunattach(q, mp)
4407  * DLPI DL_DETACH_REQ
4408  * detaches the mac layer from the stream
4409  */
4410 int
4411 gldunattach(queue_t *q, mblk_t *mp)
4412 {
4413 	gld_t  *gld = (gld_t *)q->q_ptr;
4414 	glddev_t *glddev = gld->gld_device;
4415 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4416 	int	state = gld->gld_state;
4417 	int	i;
4418 	gld_mac_pvt_t *mac_pvt;
4419 	gld_vlan_t *vlan;
4420 	boolean_t phys_off;
4421 	boolean_t mult_off;
4422 	int op = GLD_MAC_PROMISC_NOOP;
4423 
4424 	if (state != DL_UNBOUND)
4425 		return (DL_OUTSTATE);
4426 
4427 	ASSERT(macinfo != NULL);
4428 	ASSERT(gld->gld_sap == 0);
4429 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4430 
4431 #ifdef GLD_DEBUG
4432 	if (gld_debug & GLDPROT) {
4433 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4434 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4435 	}
4436 #endif
4437 
4438 	GLDM_LOCK(macinfo, RW_WRITER);
4439 
4440 	if (gld->gld_mcast) {
4441 		for (i = 0; i < gld->gld_multicnt; i++) {
4442 			gld_mcast_t *mcast;
4443 
4444 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4445 				ASSERT(mcast->gldm_refcnt);
4446 				gld_send_disable_multi(macinfo, mcast);
4447 			}
4448 		}
4449 		kmem_free(gld->gld_mcast,
4450 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4451 		gld->gld_mcast = NULL;
4452 		gld->gld_multicnt = 0;
4453 	}
4454 
4455 	/* decide if we need to turn off any promiscuity */
4456 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4457 	    --mac_pvt->nprom == 0);
4458 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4459 	    --mac_pvt->nprom_multi == 0);
4460 
4461 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4462 
4463 	if (phys_off) {
4464 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4465 		    GLD_MAC_PROMISC_MULTI;
4466 	} else if (mult_off) {
4467 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4468 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4469 	}
4470 
4471 	if (op != GLD_MAC_PROMISC_NOOP)
4472 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4473 
4474 	GLDM_UNLOCK(macinfo);
4475 
4476 	if (phys_off)
4477 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4478 
4479 	/*
4480 	 * We need to hold both locks when modifying the mac stream list
4481 	 * to protect findminor as well as everyone else.
4482 	 */
4483 	mutex_enter(&glddev->gld_devlock);
4484 	GLDM_LOCK(macinfo, RW_WRITER);
4485 
4486 	/* disassociate this stream with its vlan and underlying mac */
4487 	gldremque(gld);
4488 
4489 	vlan = (gld_vlan_t *)gld->gld_vlan;
4490 	if (--vlan->gldv_nstreams == 0) {
4491 		gld_rem_vlan(vlan);
4492 		gld->gld_vlan = NULL;
4493 	}
4494 
4495 	gld->gld_mac_info = NULL;
4496 	gld->gld_state = DL_UNATTACHED;
4497 
4498 	/* cleanup mac layer if last vlan */
4499 	if (mac_pvt->nvlan == 0) {
4500 		gld_stop_mac(macinfo);
4501 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4502 	}
4503 
4504 	/* make sure no references to this gld for gld_v0_sched */
4505 	if (mac_pvt->last_sched == gld)
4506 		mac_pvt->last_sched = NULL;
4507 
4508 	GLDM_UNLOCK(macinfo);
4509 
4510 	/* put the stream on the unattached Style 2 list */
4511 	gldinsque(gld, glddev->gld_str_prev);
4512 
4513 	mutex_exit(&glddev->gld_devlock);
4514 
4515 	/* There will be no mp if we were called from close */
4516 	if (mp) {
4517 		dlokack(q, mp, DL_DETACH_REQ);
4518 	}
4519 	if (gld->gld_style == DL_STYLE2)
4520 		(void) qassociate(q, -1);
4521 	return (GLDE_OK);
4522 }
4523 
4524 /*
4525  * gld_enable_multi (q, mp)
4526  * Enables multicast address on the stream.  If the mac layer
4527  * isn't enabled for this address, enable at that level as well.
4528  */
4529 static int
4530 gld_enable_multi(queue_t *q, mblk_t *mp)
4531 {
4532 	gld_t  *gld = (gld_t *)q->q_ptr;
4533 	glddev_t *glddev;
4534 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4535 	unsigned char *maddr;
4536 	dl_enabmulti_req_t *multi;
4537 	gld_mcast_t *mcast;
4538 	int	i, rc;
4539 	gld_mac_pvt_t *mac_pvt;
4540 
4541 #ifdef GLD_DEBUG
4542 	if (gld_debug & GLDPROT) {
4543 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4544 		    (void *)mp);
4545 	}
4546 #endif
4547 
4548 	if (gld->gld_state == DL_UNATTACHED)
4549 		return (DL_OUTSTATE);
4550 
4551 	ASSERT(macinfo != NULL);
4552 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4553 
4554 	if (macinfo->gldm_set_multicast == NULL) {
4555 		return (DL_UNSUPPORTED);
4556 	}
4557 
4558 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4559 
4560 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4561 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4562 		return (DL_BADADDR);
4563 
4564 	/* request appears to be valid */
4565 
4566 	glddev = mac_pvt->major_dev;
4567 	ASSERT(glddev == gld->gld_device);
4568 
4569 	maddr = mp->b_rptr + multi->dl_addr_offset;
4570 
4571 	/*
4572 	 * The multicast addresses live in a per-device table, along
4573 	 * with a reference count.  Each stream has a table that
4574 	 * points to entries in the device table, with the reference
4575 	 * count reflecting the number of streams pointing at it.  If
4576 	 * this multicast address is already in the per-device table,
4577 	 * all we have to do is point at it.
4578 	 */
4579 	GLDM_LOCK(macinfo, RW_WRITER);
4580 
4581 	/* does this address appear in current table? */
4582 	if (gld->gld_mcast == NULL) {
4583 		/* no mcast addresses -- allocate table */
4584 		gld->gld_mcast = GETSTRUCT(gld_mcast_t *,
4585 					    glddev->gld_multisize);
4586 		if (gld->gld_mcast == NULL) {
4587 			GLDM_UNLOCK(macinfo);
4588 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4589 			return (GLDE_OK);
4590 		}
4591 		gld->gld_multicnt = glddev->gld_multisize;
4592 	} else {
4593 		for (i = 0; i < gld->gld_multicnt; i++) {
4594 			if (gld->gld_mcast[i] &&
4595 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4596 				maddr, macinfo->gldm_addrlen)) {
4597 				/* this is a match -- just succeed */
4598 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4599 				GLDM_UNLOCK(macinfo);
4600 				dlokack(q, mp, DL_ENABMULTI_REQ);
4601 				return (GLDE_OK);
4602 			}
4603 		}
4604 	}
4605 
4606 	/*
4607 	 * it wasn't in the stream so check to see if the mac layer has it
4608 	 */
4609 	mcast = NULL;
4610 	if (mac_pvt->mcast_table == NULL) {
4611 		mac_pvt->mcast_table = GETSTRUCT(gld_mcast_t,
4612 						glddev->gld_multisize);
4613 		if (mac_pvt->mcast_table == NULL) {
4614 			GLDM_UNLOCK(macinfo);
4615 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4616 			return (GLDE_OK);
4617 		}
4618 	} else {
4619 		for (i = 0; i < glddev->gld_multisize; i++) {
4620 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4621 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4622 			    maddr, macinfo->gldm_addrlen)) {
4623 				mcast = &mac_pvt->mcast_table[i];
4624 				break;
4625 			}
4626 		}
4627 	}
4628 	if (mcast == NULL) {
4629 		/* not in mac layer -- find an empty mac slot to fill in */
4630 		for (i = 0; i < glddev->gld_multisize; i++) {
4631 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4632 				mcast = &mac_pvt->mcast_table[i];
4633 				mac_copy(maddr, mcast->gldm_addr,
4634 				    macinfo->gldm_addrlen);
4635 				break;
4636 			}
4637 		}
4638 	}
4639 	if (mcast == NULL) {
4640 		/* couldn't get a mac layer slot */
4641 		GLDM_UNLOCK(macinfo);
4642 		return (DL_TOOMANY);
4643 	}
4644 
4645 	/* now we have a mac layer slot in mcast -- get a stream slot */
4646 	for (i = 0; i < gld->gld_multicnt; i++) {
4647 		if (gld->gld_mcast[i] != NULL)
4648 			continue;
4649 		/* found an empty slot */
4650 		if (!mcast->gldm_refcnt) {
4651 			/* set mcast in hardware */
4652 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4653 
4654 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4655 			cmac_copy(maddr, cmaddr,
4656 			    macinfo->gldm_addrlen, macinfo);
4657 
4658 			rc = (*macinfo->gldm_set_multicast)
4659 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4660 			if (rc == GLD_NOTSUPPORTED) {
4661 				GLDM_UNLOCK(macinfo);
4662 				return (DL_NOTSUPPORTED);
4663 			} else if (rc == GLD_NORESOURCES) {
4664 				GLDM_UNLOCK(macinfo);
4665 				return (DL_TOOMANY);
4666 			} else if (rc == GLD_BADARG) {
4667 				GLDM_UNLOCK(macinfo);
4668 				return (DL_BADADDR);
4669 			} else if (rc == GLD_RETRY) {
4670 				/*
4671 				 * The putbq and gld_xwait must be
4672 				 * within the lock to prevent races
4673 				 * with gld_sched.
4674 				 */
4675 				(void) putbq(q, mp);
4676 				gld->gld_xwait = B_TRUE;
4677 				GLDM_UNLOCK(macinfo);
4678 				return (GLDE_RETRY);
4679 			} else if (rc != GLD_SUCCESS) {
4680 				GLDM_UNLOCK(macinfo);
4681 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4682 				    DL_SYSERR, EIO);
4683 				return (GLDE_OK);
4684 			}
4685 		}
4686 		gld->gld_mcast[i] = mcast;
4687 		mcast->gldm_refcnt++;
4688 		GLDM_UNLOCK(macinfo);
4689 		dlokack(q, mp, DL_ENABMULTI_REQ);
4690 		return (GLDE_OK);
4691 	}
4692 
4693 	/* couldn't get a stream slot */
4694 	GLDM_UNLOCK(macinfo);
4695 	return (DL_TOOMANY);
4696 }
4697 
4698 
4699 /*
4700  * gld_disable_multi (q, mp)
4701  * Disable the multicast address on the stream.  If last
4702  * reference for the mac layer, disable there as well.
4703  */
4704 static int
4705 gld_disable_multi(queue_t *q, mblk_t *mp)
4706 {
4707 	gld_t  *gld;
4708 	gld_mac_info_t *macinfo;
4709 	unsigned char *maddr;
4710 	dl_disabmulti_req_t *multi;
4711 	int i;
4712 	gld_mcast_t *mcast;
4713 
4714 #ifdef GLD_DEBUG
4715 	if (gld_debug & GLDPROT) {
4716 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
4717 		    (void *)mp);
4718 	}
4719 #endif
4720 
4721 	gld = (gld_t *)q->q_ptr;
4722 	if (gld->gld_state == DL_UNATTACHED)
4723 		return (DL_OUTSTATE);
4724 
4725 	macinfo = gld->gld_mac_info;
4726 	ASSERT(macinfo != NULL);
4727 	if (macinfo->gldm_set_multicast == NULL) {
4728 		return (DL_UNSUPPORTED);
4729 	}
4730 
4731 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
4732 
4733 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4734 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4735 		return (DL_BADADDR);
4736 
4737 	maddr = mp->b_rptr + multi->dl_addr_offset;
4738 
4739 	/* request appears to be valid */
4740 	/* does this address appear in current table? */
4741 	GLDM_LOCK(macinfo, RW_WRITER);
4742 	if (gld->gld_mcast != NULL) {
4743 		for (i = 0; i < gld->gld_multicnt; i++)
4744 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
4745 			    mac_eq(mcast->gldm_addr,
4746 			    maddr, macinfo->gldm_addrlen)) {
4747 				ASSERT(mcast->gldm_refcnt);
4748 				gld_send_disable_multi(macinfo, mcast);
4749 				gld->gld_mcast[i] = NULL;
4750 				GLDM_UNLOCK(macinfo);
4751 				dlokack(q, mp, DL_DISABMULTI_REQ);
4752 				return (GLDE_OK);
4753 			}
4754 	}
4755 	GLDM_UNLOCK(macinfo);
4756 	return (DL_NOTENAB); /* not an enabled address */
4757 }
4758 
4759 /*
4760  * gld_send_disable_multi(macinfo, mcast)
4761  * this function is used to disable a multicast address if the reference
4762  * count goes to zero. The disable request will then be forwarded to the
4763  * lower stream.
4764  */
4765 static void
4766 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
4767 {
4768 	ASSERT(macinfo != NULL);
4769 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
4770 	ASSERT(mcast != NULL);
4771 	ASSERT(mcast->gldm_refcnt);
4772 
4773 	if (!mcast->gldm_refcnt) {
4774 		return;			/* "cannot happen" */
4775 	}
4776 
4777 	if (--mcast->gldm_refcnt > 0) {
4778 		return;
4779 	}
4780 
4781 	/*
4782 	 * This must be converted from canonical form to device form.
4783 	 * The refcnt is now zero so we can trash the data.
4784 	 */
4785 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
4786 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
4787 
4788 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
4789 	(void) (*macinfo->gldm_set_multicast)
4790 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
4791 }
4792 
4793 /*
4794  * gld_promisc (q, mp, req, on)
4795  *	enable or disable the use of promiscuous mode with the hardware
4796  */
4797 static int
4798 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
4799 {
4800 	gld_t *gld;
4801 	gld_mac_info_t *macinfo;
4802 	gld_mac_pvt_t *mac_pvt;
4803 	gld_vlan_t *vlan;
4804 	union DL_primitives *prim;
4805 	int macrc = GLD_SUCCESS;
4806 	int dlerr = GLDE_OK;
4807 	int op = GLD_MAC_PROMISC_NOOP;
4808 
4809 #ifdef GLD_DEBUG
4810 	if (gld_debug & GLDTRACE)
4811 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
4812 		    (void *)q, (void *)mp, req, on);
4813 #endif
4814 
4815 	ASSERT(mp != NULL);
4816 	prim = (union DL_primitives *)mp->b_rptr;
4817 
4818 	/* XXX I think spec allows promisc in unattached state */
4819 	gld = (gld_t *)q->q_ptr;
4820 	if (gld->gld_state == DL_UNATTACHED)
4821 		return (DL_OUTSTATE);
4822 
4823 	macinfo = gld->gld_mac_info;
4824 	ASSERT(macinfo != NULL);
4825 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4826 
4827 	vlan = (gld_vlan_t *)gld->gld_vlan;
4828 	ASSERT(vlan != NULL);
4829 
4830 	GLDM_LOCK(macinfo, RW_WRITER);
4831 
4832 	/*
4833 	 * Work out what request (if any) has to be made to the MAC layer
4834 	 */
4835 	if (on) {
4836 		switch (prim->promiscon_req.dl_level) {
4837 		default:
4838 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4839 			break;
4840 
4841 		case DL_PROMISC_PHYS:
4842 			if (mac_pvt->nprom == 0)
4843 				op = GLD_MAC_PROMISC_PHYS;
4844 			break;
4845 
4846 		case DL_PROMISC_MULTI:
4847 			if (mac_pvt->nprom_multi == 0)
4848 				if (mac_pvt->nprom == 0)
4849 					op = GLD_MAC_PROMISC_MULTI;
4850 			break;
4851 
4852 		case DL_PROMISC_SAP:
4853 			/* We can do this without reference to the MAC */
4854 			break;
4855 		}
4856 	} else {
4857 		switch (prim->promiscoff_req.dl_level) {
4858 		default:
4859 			dlerr = DL_UNSUPPORTED;	/* this is an error */
4860 			break;
4861 
4862 		case DL_PROMISC_PHYS:
4863 			if (!(gld->gld_flags & GLD_PROM_PHYS))
4864 				dlerr = DL_NOTENAB;
4865 			else if (mac_pvt->nprom == 1)
4866 				if (mac_pvt->nprom_multi)
4867 					op = GLD_MAC_PROMISC_MULTI;
4868 				else
4869 					op = GLD_MAC_PROMISC_NONE;
4870 			break;
4871 
4872 		case DL_PROMISC_MULTI:
4873 			if (!(gld->gld_flags & GLD_PROM_MULT))
4874 				dlerr = DL_NOTENAB;
4875 			else if (mac_pvt->nprom_multi == 1)
4876 				if (mac_pvt->nprom == 0)
4877 					op = GLD_MAC_PROMISC_NONE;
4878 			break;
4879 
4880 		case DL_PROMISC_SAP:
4881 			if (!(gld->gld_flags & GLD_PROM_SAP))
4882 				dlerr = DL_NOTENAB;
4883 
4884 			/* We can do this without reference to the MAC */
4885 			break;
4886 		}
4887 	}
4888 
4889 	/*
4890 	 * The request was invalid in some way so no need to continue.
4891 	 */
4892 	if (dlerr != GLDE_OK) {
4893 		GLDM_UNLOCK(macinfo);
4894 		return (dlerr);
4895 	}
4896 
4897 	/*
4898 	 * Issue the request to the MAC layer, if required
4899 	 */
4900 	if (op != GLD_MAC_PROMISC_NOOP) {
4901 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
4902 	}
4903 
4904 	/*
4905 	 * On success, update the appropriate flags & refcounts
4906 	 */
4907 	if (macrc == GLD_SUCCESS) {
4908 		if (on) {
4909 			switch (prim->promiscon_req.dl_level) {
4910 			case DL_PROMISC_PHYS:
4911 				mac_pvt->nprom++;
4912 				gld->gld_flags |= GLD_PROM_PHYS;
4913 				break;
4914 
4915 			case DL_PROMISC_MULTI:
4916 				mac_pvt->nprom_multi++;
4917 				gld->gld_flags |= GLD_PROM_MULT;
4918 				break;
4919 
4920 			case DL_PROMISC_SAP:
4921 				gld->gld_flags |= GLD_PROM_SAP;
4922 				break;
4923 
4924 			default:
4925 				break;
4926 			}
4927 		} else {
4928 			switch (prim->promiscoff_req.dl_level) {
4929 			case DL_PROMISC_PHYS:
4930 				mac_pvt->nprom--;
4931 				gld->gld_flags &= ~GLD_PROM_PHYS;
4932 				break;
4933 
4934 			case DL_PROMISC_MULTI:
4935 				mac_pvt->nprom_multi--;
4936 				gld->gld_flags &= ~GLD_PROM_MULT;
4937 				break;
4938 
4939 			case DL_PROMISC_SAP:
4940 				gld->gld_flags &= ~GLD_PROM_SAP;
4941 				break;
4942 
4943 			default:
4944 				break;
4945 			}
4946 		}
4947 	} else if (macrc == GLD_RETRY) {
4948 		/*
4949 		 * The putbq and gld_xwait must be within the lock to
4950 		 * prevent races with gld_sched.
4951 		 */
4952 		(void) putbq(q, mp);
4953 		gld->gld_xwait = B_TRUE;
4954 	}
4955 
4956 	/*
4957 	 * Update VLAN IPQ status -- it may have changed
4958 	 */
4959 	if (gld->gld_flags & (GLD_PROM_SAP | GLD_PROM_MULT | GLD_PROM_PHYS))
4960 		vlan->gldv_ipq_flags |= IPQ_FORBIDDEN;
4961 	else
4962 		vlan->gldv_ipq_flags &= ~IPQ_FORBIDDEN;
4963 
4964 	GLDM_UNLOCK(macinfo);
4965 
4966 	/*
4967 	 * Finally, decide how to reply.
4968 	 *
4969 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
4970 	 * layer but failed.  In such cases, we can return a DL_* error
4971 	 * code and let the caller send an error-ack reply upstream, or
4972 	 * we can send a reply here and then return GLDE_OK so that the
4973 	 * caller doesn't also respond.
4974 	 *
4975 	 * If physical-promiscuous mode was (successfully) switched on or
4976 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
4977 	 */
4978 	switch (macrc) {
4979 	case GLD_NOTSUPPORTED:
4980 		return (DL_NOTSUPPORTED);
4981 
4982 	case GLD_NORESOURCES:
4983 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
4984 		return (GLDE_OK);
4985 
4986 	case GLD_RETRY:
4987 		return (GLDE_RETRY);
4988 
4989 	default:
4990 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
4991 		return (GLDE_OK);
4992 
4993 	case GLD_SUCCESS:
4994 		dlokack(q, mp, req);
4995 		break;
4996 	}
4997 
4998 	switch (op) {
4999 	case GLD_MAC_PROMISC_NOOP:
5000 		break;
5001 
5002 	case GLD_MAC_PROMISC_PHYS:
5003 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
5004 		break;
5005 
5006 	default:
5007 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
5008 		break;
5009 	}
5010 
5011 	return (GLDE_OK);
5012 }
5013 
5014 /*
5015  * gld_physaddr()
5016  *	get the current or factory physical address value
5017  */
5018 static int
5019 gld_physaddr(queue_t *q, mblk_t *mp)
5020 {
5021 	gld_t *gld = (gld_t *)q->q_ptr;
5022 	gld_mac_info_t *macinfo;
5023 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5024 	unsigned char addr[GLD_MAX_ADDRLEN];
5025 
5026 	if (gld->gld_state == DL_UNATTACHED)
5027 		return (DL_OUTSTATE);
5028 
5029 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5030 	ASSERT(macinfo != NULL);
5031 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5032 
5033 	switch (prim->physaddr_req.dl_addr_type) {
5034 	case DL_FACT_PHYS_ADDR:
5035 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5036 		    (caddr_t)addr, macinfo->gldm_addrlen);
5037 		break;
5038 	case DL_CURR_PHYS_ADDR:
5039 		/* make a copy so we don't hold the lock across qreply */
5040 		GLDM_LOCK(macinfo, RW_WRITER);
5041 		mac_copy((caddr_t)
5042 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5043 		    (caddr_t)addr, macinfo->gldm_addrlen);
5044 		GLDM_UNLOCK(macinfo);
5045 		break;
5046 	default:
5047 		return (DL_BADPRIM);
5048 	}
5049 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5050 	return (GLDE_OK);
5051 }
5052 
5053 /*
5054  * gld_setaddr()
5055  *	change the hardware's physical address to a user specified value
5056  */
5057 static int
5058 gld_setaddr(queue_t *q, mblk_t *mp)
5059 {
5060 	gld_t *gld = (gld_t *)q->q_ptr;
5061 	gld_mac_info_t *macinfo;
5062 	gld_mac_pvt_t *mac_pvt;
5063 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5064 	unsigned char *addr;
5065 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5066 	int rc;
5067 	gld_vlan_t *vlan;
5068 
5069 	if (gld->gld_state == DL_UNATTACHED)
5070 		return (DL_OUTSTATE);
5071 
5072 	vlan = (gld_vlan_t *)gld->gld_vlan;
5073 	ASSERT(vlan != NULL);
5074 
5075 	if (vlan->gldv_id != VLAN_VID_NONE)
5076 		return (DL_NOTSUPPORTED);
5077 
5078 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5079 	ASSERT(macinfo != NULL);
5080 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5081 
5082 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5083 	    prim->set_physaddr_req.dl_addr_length) ||
5084 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5085 		return (DL_BADADDR);
5086 
5087 	GLDM_LOCK(macinfo, RW_WRITER);
5088 
5089 	/* now do the set at the hardware level */
5090 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5091 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5092 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5093 
5094 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5095 	if (rc == GLD_SUCCESS)
5096 		mac_copy(addr, mac_pvt->curr_macaddr,
5097 		    macinfo->gldm_addrlen);
5098 
5099 	GLDM_UNLOCK(macinfo);
5100 
5101 	switch (rc) {
5102 	case GLD_SUCCESS:
5103 		break;
5104 	case GLD_NOTSUPPORTED:
5105 		return (DL_NOTSUPPORTED);
5106 	case GLD_BADARG:
5107 		return (DL_BADADDR);
5108 	case GLD_NORESOURCES:
5109 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5110 		return (GLDE_OK);
5111 	default:
5112 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5113 		return (GLDE_OK);
5114 	}
5115 
5116 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5117 
5118 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5119 	return (GLDE_OK);
5120 }
5121 
5122 int
5123 gld_get_statistics(queue_t *q, mblk_t *mp)
5124 {
5125 	dl_get_statistics_ack_t *dlsp;
5126 	gld_t  *gld = (gld_t *)q->q_ptr;
5127 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5128 	gld_mac_pvt_t *mac_pvt;
5129 
5130 	if (gld->gld_state == DL_UNATTACHED)
5131 		return (DL_OUTSTATE);
5132 
5133 	ASSERT(macinfo != NULL);
5134 
5135 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5136 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5137 
5138 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5139 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5140 
5141 	if (mp == NULL)
5142 		return (GLDE_OK);	/* mexchange already sent merror */
5143 
5144 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5145 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5146 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5147 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5148 
5149 	GLDM_LOCK(macinfo, RW_WRITER);
5150 	bcopy(mac_pvt->kstatp->ks_data,
5151 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5152 	    sizeof (struct gldkstats));
5153 	GLDM_UNLOCK(macinfo);
5154 
5155 	qreply(q, mp);
5156 	return (GLDE_OK);
5157 }
5158 
5159 /* =================================================== */
5160 /* misc utilities, some requiring various mutexes held */
5161 /* =================================================== */
5162 
5163 /*
5164  * Initialize and start the driver.
5165  */
5166 static int
5167 gld_start_mac(gld_mac_info_t *macinfo)
5168 {
5169 	int	rc;
5170 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5171 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5172 
5173 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5174 	ASSERT(!mac_pvt->started);
5175 
5176 	rc = (*macinfo->gldm_reset)(macinfo);
5177 	if (rc != GLD_SUCCESS)
5178 		return (GLD_FAILURE);
5179 
5180 	/* set the addr after we reset the device */
5181 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5182 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5183 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5184 
5185 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5186 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5187 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5188 		return (GLD_FAILURE);
5189 
5190 	rc = (*macinfo->gldm_start)(macinfo);
5191 	if (rc != GLD_SUCCESS)
5192 		return (GLD_FAILURE);
5193 
5194 	mac_pvt->started = B_TRUE;
5195 	return (GLD_SUCCESS);
5196 }
5197 
5198 /*
5199  * Stop the driver.
5200  */
5201 static void
5202 gld_stop_mac(gld_mac_info_t *macinfo)
5203 {
5204 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5205 
5206 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5207 	ASSERT(mac_pvt->started);
5208 
5209 	(void) (*macinfo->gldm_stop)(macinfo);
5210 
5211 	mac_pvt->started = B_FALSE;
5212 }
5213 
5214 
5215 /*
5216  * gld_set_ipq will set a pointer to the queue which is bound to the
5217  * IP sap if:
5218  * o the device type is ethernet or IPoIB.
5219  * o there is no stream in SAP promiscuous mode.
5220  * o there is exactly one stream bound to the IP sap.
5221  * o the stream is in "fastpath" mode.
5222  */
5223 static void
5224 gld_set_ipq(gld_t *gld)
5225 {
5226 	gld_vlan_t	*vlan;
5227 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5228 	gld_t		*ip_gld = NULL;
5229 	uint_t		ipq_candidates = 0;
5230 	gld_t		*ipv6_gld = NULL;
5231 	uint_t		ipv6q_candidates = 0;
5232 
5233 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5234 
5235 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5236 	if (((macinfo->gldm_type != DL_ETHER) &&
5237 	    (macinfo->gldm_type != DL_IB)) ||
5238 	    (gld_global_options & GLD_OPT_NO_IPQ))
5239 		return;
5240 
5241 	vlan = (gld_vlan_t *)gld->gld_vlan;
5242 	ASSERT(vlan != NULL);
5243 
5244 	/* clear down any previously defined ipqs */
5245 	vlan->gldv_ipq = NULL;
5246 	vlan->gldv_ipv6q = NULL;
5247 
5248 	/* Try to find a single stream eligible to receive IP packets */
5249 	for (gld = vlan->gldv_str_next;
5250 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5251 		if (gld->gld_state != DL_IDLE)
5252 			continue;	/* not eligible to receive */
5253 		if (gld->gld_flags & GLD_STR_CLOSING)
5254 			continue;	/* not eligible to receive */
5255 
5256 		if (gld->gld_sap == ETHERTYPE_IP) {
5257 			ip_gld = gld;
5258 			ipq_candidates++;
5259 		}
5260 
5261 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5262 			ipv6_gld = gld;
5263 			ipv6q_candidates++;
5264 		}
5265 	}
5266 
5267 	if (ipq_candidates == 1) {
5268 		ASSERT(ip_gld != NULL);
5269 
5270 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5271 			vlan->gldv_ipq = ip_gld->gld_qptr;
5272 	}
5273 
5274 	if (ipv6q_candidates == 1) {
5275 		ASSERT(ipv6_gld != NULL);
5276 
5277 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5278 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5279 	}
5280 }
5281 
5282 /*
5283  * gld_flushqueue (q)
5284  *	used by DLPI primitives that require flushing the queues.
5285  *	essentially, this is DL_UNBIND_REQ.
5286  */
5287 static void
5288 gld_flushqueue(queue_t *q)
5289 {
5290 	/* flush all data in both queues */
5291 	/* XXX Should these be FLUSHALL? */
5292 	flushq(q, FLUSHDATA);
5293 	flushq(WR(q), FLUSHDATA);
5294 	/* flush all the queues upstream */
5295 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5296 }
5297 
5298 /*
5299  * gld_devlookup (major)
5300  * search the device table for the device with specified
5301  * major number and return a pointer to it if it exists
5302  */
5303 static glddev_t *
5304 gld_devlookup(int major)
5305 {
5306 	struct glddevice *dev;
5307 
5308 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5309 
5310 	for (dev = gld_device_list.gld_next;
5311 	    dev != &gld_device_list;
5312 	    dev = dev->gld_next) {
5313 		ASSERT(dev);
5314 		if (dev->gld_major == major)
5315 			return (dev);
5316 	}
5317 	return (NULL);
5318 }
5319 
5320 /*
5321  * gld_findminor(device)
5322  * Returns a minor number currently unused by any stream in the current
5323  * device class (major) list.
5324  */
5325 static int
5326 gld_findminor(glddev_t *device)
5327 {
5328 	gld_t		*next;
5329 	gld_mac_info_t	*nextmac;
5330 	gld_vlan_t	*nextvlan;
5331 	int		minor;
5332 	int		i;
5333 
5334 	ASSERT(mutex_owned(&device->gld_devlock));
5335 
5336 	/* The fast way */
5337 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5338 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5339 		return (device->gld_nextminor++);
5340 
5341 	/* The steady way */
5342 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5343 	    minor++) {
5344 		/* Search all unattached streams */
5345 		for (next = device->gld_str_next;
5346 		    next != (gld_t *)&device->gld_str_next;
5347 		    next = next->gld_next) {
5348 			if (minor == next->gld_minor)
5349 				goto nextminor;
5350 		}
5351 		/* Search all attached streams; we don't need maclock because */
5352 		/* mac stream list is protected by devlock as well as maclock */
5353 		for (nextmac = device->gld_mac_next;
5354 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5355 		    nextmac = nextmac->gldm_next) {
5356 			gld_mac_pvt_t *pvt =
5357 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5358 
5359 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5360 				continue;	/* this one's not ready yet */
5361 
5362 			for (i = 0; i < VLAN_HASHSZ; i++) {
5363 				for (nextvlan = pvt->vlan_hash[i];
5364 				    nextvlan != NULL;
5365 				    nextvlan = nextvlan->gldv_next) {
5366 					for (next = nextvlan->gldv_str_next;
5367 					    next !=
5368 					    (gld_t *)&nextvlan->gldv_str_next;
5369 					    next = next->gld_next) {
5370 						if (minor == next->gld_minor)
5371 							goto nextminor;
5372 					}
5373 				}
5374 			}
5375 		}
5376 
5377 		return (minor);
5378 nextminor:
5379 		/* don't need to do anything */
5380 		;
5381 	}
5382 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5383 		device->gld_name);
5384 	return (0);
5385 }
5386 
5387 /*
5388  * version of insque/remque for use by this driver
5389  */
5390 struct qelem {
5391 	struct qelem *q_forw;
5392 	struct qelem *q_back;
5393 	/* rest of structure */
5394 };
5395 
5396 static void
5397 gldinsque(void *elem, void *pred)
5398 {
5399 	struct qelem *pelem = elem;
5400 	struct qelem *ppred = pred;
5401 	struct qelem *pnext = ppred->q_forw;
5402 
5403 	pelem->q_forw = pnext;
5404 	pelem->q_back = ppred;
5405 	ppred->q_forw = pelem;
5406 	pnext->q_back = pelem;
5407 }
5408 
5409 static void
5410 gldremque(void *arg)
5411 {
5412 	struct qelem *pelem = arg;
5413 	struct qelem *elem = arg;
5414 
5415 	pelem->q_forw->q_back = pelem->q_back;
5416 	pelem->q_back->q_forw = pelem->q_forw;
5417 	elem->q_back = elem->q_forw = NULL;
5418 }
5419 
5420 static gld_vlan_t *
5421 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5422 {
5423 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5424 	gld_vlan_t	**pp;
5425 	gld_vlan_t	*p;
5426 
5427 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5428 	while ((p = *pp) != NULL) {
5429 		ASSERT(p->gldv_id != vid);
5430 		pp = &(p->gldv_next);
5431 	}
5432 
5433 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5434 		return (NULL);
5435 
5436 	p->gldv_mac = macinfo;
5437 	p->gldv_id = vid;
5438 
5439 	if (vid == VLAN_VID_NONE) {
5440 		p->gldv_ptag = VLAN_VTAG_NONE;
5441 		p->gldv_stats = mac_pvt->statistics;
5442 		p->gldv_kstatp = NULL;
5443 	} else {
5444 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5445 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5446 		    KM_SLEEP);
5447 
5448 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5449 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5450 			kmem_free(p, sizeof (gld_vlan_t));
5451 			return (NULL);
5452 		}
5453 	}
5454 
5455 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5456 	mac_pvt->nvlan++;
5457 	*pp = p;
5458 
5459 	return (p);
5460 }
5461 
5462 static void
5463 gld_rem_vlan(gld_vlan_t *vlan)
5464 {
5465 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5466 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5467 	gld_vlan_t	**pp;
5468 	gld_vlan_t	*p;
5469 
5470 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5471 	while ((p = *pp) != NULL) {
5472 		if (p->gldv_id == vlan->gldv_id)
5473 			break;
5474 		pp = &(p->gldv_next);
5475 	}
5476 	ASSERT(p != NULL);
5477 
5478 	*pp = p->gldv_next;
5479 	mac_pvt->nvlan--;
5480 	if (p->gldv_id != VLAN_VID_NONE) {
5481 		ASSERT(p->gldv_kstatp != NULL);
5482 		kstat_delete(p->gldv_kstatp);
5483 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5484 	}
5485 	kmem_free(p, sizeof (gld_vlan_t));
5486 }
5487 
5488 gld_vlan_t *
5489 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5490 {
5491 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5492 	gld_vlan_t	*p;
5493 
5494 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5495 	while (p != NULL) {
5496 		if (p->gldv_id == vid)
5497 			return (p);
5498 		p = p->gldv_next;
5499 	}
5500 	return (NULL);
5501 }
5502 
5503 gld_vlan_t *
5504 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5505 {
5506 	gld_vlan_t	*vlan;
5507 
5508 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5509 		vlan = gld_add_vlan(macinfo, vid);
5510 
5511 	return (vlan);
5512 }
5513 
5514 /*
5515  * gld_bitrevcopy()
5516  * This is essentially bcopy, with the ability to bit reverse the
5517  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5518  * interfaces are bit reversed.
5519  */
5520 void
5521 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5522 {
5523 	while (n--)
5524 		*target++ = bit_rev[(uchar_t)*src++];
5525 }
5526 
5527 /*
5528  * gld_bitreverse()
5529  * Convert the bit order by swaping all the bits, using a
5530  * lookup table.
5531  */
5532 void
5533 gld_bitreverse(uchar_t *rptr, size_t n)
5534 {
5535 	while (n--) {
5536 		*rptr = bit_rev[*rptr];
5537 		rptr++;
5538 	}
5539 }
5540 
5541 char *
5542 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5543 {
5544 	int i;
5545 	char *cp = etherbuf;
5546 	static char digits[] = "0123456789abcdef";
5547 
5548 	for (i = 0; i < len; i++) {
5549 		*cp++ = digits[*ap >> 4];
5550 		*cp++ = digits[*ap++ & 0xf];
5551 		*cp++ = ':';
5552 	}
5553 	*--cp = 0;
5554 	return (etherbuf);
5555 }
5556 
5557 #ifdef GLD_DEBUG
5558 static void
5559 gld_check_assertions()
5560 {
5561 	glddev_t	*dev;
5562 	gld_mac_info_t	*mac;
5563 	gld_t		*str;
5564 	gld_vlan_t	*vlan;
5565 	int		i;
5566 
5567 	mutex_enter(&gld_device_list.gld_devlock);
5568 
5569 	for (dev = gld_device_list.gld_next;
5570 	    dev != (glddev_t *)&gld_device_list.gld_next;
5571 	    dev = dev->gld_next) {
5572 		mutex_enter(&dev->gld_devlock);
5573 		ASSERT(dev->gld_broadcast != NULL);
5574 		for (str = dev->gld_str_next;
5575 		    str != (gld_t *)&dev->gld_str_next;
5576 		    str = str->gld_next) {
5577 			ASSERT(str->gld_device == dev);
5578 			ASSERT(str->gld_mac_info == NULL);
5579 			ASSERT(str->gld_qptr != NULL);
5580 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5581 			ASSERT(str->gld_multicnt == 0);
5582 			ASSERT(str->gld_mcast == NULL);
5583 			ASSERT(!(str->gld_flags &
5584 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5585 			ASSERT(str->gld_sap == 0);
5586 			ASSERT(str->gld_state == DL_UNATTACHED);
5587 		}
5588 		for (mac = dev->gld_mac_next;
5589 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5590 		    mac = mac->gldm_next) {
5591 			int nvlan = 0;
5592 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5593 
5594 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5595 				continue;	/* this one's not ready yet */
5596 
5597 			GLDM_LOCK(mac, RW_WRITER);
5598 			ASSERT(mac->gldm_devinfo != NULL);
5599 			ASSERT(mac->gldm_mac_pvt != NULL);
5600 			ASSERT(pvt->interfacep != NULL);
5601 			ASSERT(pvt->kstatp != NULL);
5602 			ASSERT(pvt->statistics != NULL);
5603 			ASSERT(pvt->major_dev == dev);
5604 
5605 			for (i = 0; i < VLAN_HASHSZ; i++) {
5606 				for (vlan = pvt->vlan_hash[i];
5607 				    vlan != NULL; vlan = vlan->gldv_next) {
5608 					int nstr = 0;
5609 
5610 					ASSERT(vlan->gldv_mac == mac);
5611 
5612 					for (str = vlan->gldv_str_next;
5613 					    str !=
5614 					    (gld_t *)&vlan->gldv_str_next;
5615 					    str = str->gld_next) {
5616 						ASSERT(str->gld_device == dev);
5617 						ASSERT(str->gld_mac_info ==
5618 						    mac);
5619 						ASSERT(str->gld_qptr != NULL);
5620 						ASSERT(str->gld_minor >=
5621 						    GLD_MIN_CLONE_MINOR);
5622 						ASSERT(
5623 						    str->gld_multicnt == 0 ||
5624 						    str->gld_mcast);
5625 						nstr++;
5626 					}
5627 					ASSERT(vlan->gldv_nstreams == nstr);
5628 					nvlan++;
5629 				}
5630 			}
5631 			ASSERT(pvt->nvlan == nvlan);
5632 			GLDM_UNLOCK(mac);
5633 		}
5634 		mutex_exit(&dev->gld_devlock);
5635 	}
5636 	mutex_exit(&gld_device_list.gld_devlock);
5637 }
5638 #endif
5639