xref: /titanic_41/usr/src/uts/common/io/gld.c (revision ba2be53024c0b999e74ba9adcd7d80fec5df8c57)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * gld - Generic LAN Driver Version 2, PSARC/1997/382
30  *
31  * This is a utility module that provides generic facilities for
32  * LAN	drivers.  The DLPI protocol and most STREAMS interfaces
33  * are handled here.
34  *
35  * It no longer provides compatibility with drivers
36  * implemented according to the GLD v0 documentation published
37  * in 1993. (See PSARC 2003/728)
38  */
39 
40 
41 #include <sys/types.h>
42 #include <sys/errno.h>
43 #include <sys/stropts.h>
44 #include <sys/stream.h>
45 #include <sys/kmem.h>
46 #include <sys/stat.h>
47 #include <sys/modctl.h>
48 #include <sys/kstat.h>
49 #include <sys/debug.h>
50 #include <sys/note.h>
51 #include <sys/sysmacros.h>
52 
53 #include <sys/byteorder.h>
54 #include <sys/strsun.h>
55 #include <sys/strsubr.h>
56 #include <sys/dlpi.h>
57 #include <sys/pattr.h>
58 #include <sys/ethernet.h>
59 #include <sys/ib/clients/ibd/ibd.h>
60 #include <sys/policy.h>
61 #include <sys/atomic.h>
62 
63 #include <sys/multidata.h>
64 #include <sys/gld.h>
65 #include <sys/gldpriv.h>
66 
67 #include <sys/ddi.h>
68 #include <sys/sunddi.h>
69 
70 /*
71  * Macros to increment statistics.
72  */
73 
74 /*
75  * Increase kstats. Note this operation is not atomic. It can be used when
76  * GLDM_LOCK_HELD_WRITE(macinfo).
77  */
78 #define	BUMP(stats, vstats, stat, delta)	do {			\
79 	((stats)->stat) += (delta);					\
80 	_NOTE(CONSTANTCONDITION)					\
81 	if ((vstats) != NULL)						\
82 		((struct gld_stats *)(vstats))->stat += (delta);	\
83 	_NOTE(CONSTANTCONDITION)					\
84 } while (0)
85 
86 #define	ATOMIC_BUMP_STAT(stat, delta)	do {			\
87 	_NOTE(CONSTANTCONDITION)				\
88 	if (sizeof ((stat)) == sizeof (uint32_t)) {		\
89 		atomic_add_32((uint32_t *)&(stat), (delta));	\
90 	_NOTE(CONSTANTCONDITION)				\
91 	} else if (sizeof ((stat)) == sizeof (uint64_t)) {	\
92 		atomic_add_64((uint64_t *)&(stat), (delta));	\
93 	}							\
94 	_NOTE(CONSTANTCONDITION)				\
95 } while (0)
96 
97 #define	ATOMIC_BUMP(stats, vstats, stat, delta)	do {			\
98 	ATOMIC_BUMP_STAT((stats)->stat, (delta));			\
99 	_NOTE(CONSTANTCONDITION)					\
100 	if ((vstats) != NULL) {						\
101 		ATOMIC_BUMP_STAT(((struct gld_stats *)(vstats))->stat,	\
102 		    (delta));						\
103 	}								\
104 	_NOTE(CONSTANTCONDITION)					\
105 } while (0)
106 
107 #define	UPDATE_STATS(stats, vstats, pktinfo, delta) {			\
108 	if ((pktinfo).isBroadcast) {					\
109 		ATOMIC_BUMP((stats), (vstats),				\
110 		    glds_brdcstxmt, (delta));				\
111 	} else if ((pktinfo).isMulticast) {				\
112 		ATOMIC_BUMP((stats), (vstats), glds_multixmt, (delta));	\
113 	}								\
114 	ATOMIC_BUMP((stats), (vstats), glds_bytexmt64,			\
115 	    ((pktinfo).pktLen));					\
116 	ATOMIC_BUMP((stats), (vstats), glds_pktxmt64, (delta));		\
117 }
118 
119 #ifdef GLD_DEBUG
120 int gld_debug = GLDERRS;
121 #endif
122 
123 /* called from gld_register */
124 static int gld_initstats(gld_mac_info_t *);
125 
126 /* called from kstat mechanism, and from wsrv's get_statistics */
127 static int gld_update_kstat(kstat_t *, int);
128 
129 /* statistics for additional vlans */
130 static int gld_init_vlan_stats(gld_vlan_t *);
131 static int gld_update_vlan_kstat(kstat_t *, int);
132 
133 /* called from gld_getinfo */
134 static dev_info_t *gld_finddevinfo(dev_t);
135 
136 /* called from wput, wsrv, unidata, and v0_sched to send a packet */
137 /* also from the source routing stuff for sending RDE protocol packets */
138 static int gld_start(queue_t *, mblk_t *, int, uint32_t);
139 static int gld_start_mdt(queue_t *, mblk_t *, int);
140 
141 /* called from gld_start[_mdt] to loopback packet(s) in promiscuous mode */
142 static void gld_precv(gld_mac_info_t *, mblk_t *, uint32_t, struct gld_stats *);
143 static void gld_precv_mdt(gld_mac_info_t *, gld_vlan_t *, mblk_t *,
144     pdesc_t *, pktinfo_t *);
145 
146 /* receive group: called from gld_recv and gld_precv* with maclock held */
147 static void gld_sendup(gld_mac_info_t *, pktinfo_t *, mblk_t *,
148     int (*)());
149 static int gld_accept(gld_t *, pktinfo_t *);
150 static int gld_mcmatch(gld_t *, pktinfo_t *);
151 static int gld_multicast(unsigned char *, gld_t *);
152 static int gld_paccept(gld_t *, pktinfo_t *);
153 static void gld_passon(gld_t *, mblk_t *, pktinfo_t *,
154     void (*)(queue_t *, mblk_t *));
155 static mblk_t *gld_addudind(gld_t *, mblk_t *, pktinfo_t *, boolean_t);
156 
157 /* wsrv group: called from wsrv, single threaded per queue */
158 static int gld_ioctl(queue_t *, mblk_t *);
159 static void gld_fastpath(gld_t *, queue_t *, mblk_t *);
160 static int gld_cmds(queue_t *, mblk_t *);
161 static mblk_t *gld_bindack(queue_t *, mblk_t *);
162 static int gld_notify_req(queue_t *, mblk_t *);
163 static int gld_udqos(queue_t *, mblk_t *);
164 static int gld_bind(queue_t *, mblk_t *);
165 static int gld_unbind(queue_t *, mblk_t *);
166 static int gld_inforeq(queue_t *, mblk_t *);
167 static int gld_unitdata(queue_t *, mblk_t *);
168 static int gldattach(queue_t *, mblk_t *);
169 static int gldunattach(queue_t *, mblk_t *);
170 static int gld_enable_multi(queue_t *, mblk_t *);
171 static int gld_disable_multi(queue_t *, mblk_t *);
172 static void gld_send_disable_multi(gld_mac_info_t *, gld_mcast_t *);
173 static int gld_promisc(queue_t *, mblk_t *, t_uscalar_t, boolean_t);
174 static int gld_physaddr(queue_t *, mblk_t *);
175 static int gld_setaddr(queue_t *, mblk_t *);
176 static int gld_get_statistics(queue_t *, mblk_t *);
177 static int gld_cap(queue_t *, mblk_t *);
178 static int gld_cap_ack(queue_t *, mblk_t *);
179 static int gld_cap_enable(queue_t *, mblk_t *);
180 
181 /* misc utilities, some requiring various mutexes held */
182 static int gld_start_mac(gld_mac_info_t *);
183 static void gld_stop_mac(gld_mac_info_t *);
184 static void gld_set_ipq(gld_t *);
185 static void gld_flushqueue(queue_t *);
186 static glddev_t *gld_devlookup(int);
187 static int gld_findminor(glddev_t *);
188 static void gldinsque(void *, void *);
189 static void gldremque(void *);
190 void gld_bitrevcopy(caddr_t, caddr_t, size_t);
191 void gld_bitreverse(uchar_t *, size_t);
192 char *gld_macaddr_sprintf(char *, unsigned char *, int);
193 static gld_vlan_t *gld_add_vlan(gld_mac_info_t *, uint32_t vid);
194 static void gld_rem_vlan(gld_vlan_t *);
195 gld_vlan_t *gld_find_vlan(gld_mac_info_t *, uint32_t);
196 gld_vlan_t *gld_get_vlan(gld_mac_info_t *, uint32_t);
197 
198 #ifdef GLD_DEBUG
199 static void gld_check_assertions(void);
200 extern void gld_sr_dump(gld_mac_info_t *);
201 #endif
202 
203 /*
204  * Allocate and zero-out "number" structures each of type "structure" in
205  * kernel memory.
206  */
207 #define	GLD_GETSTRUCT(structure, number)   \
208 	(kmem_zalloc((uint_t)(sizeof (structure) * (number)), KM_NOSLEEP))
209 
210 #define	abs(a) ((a) < 0 ? -(a) : a)
211 
212 uint32_t gld_global_options = GLD_OPT_NO_ETHRXSNAP;
213 
214 /*
215  * The device is of DL_ETHER type and is able to support VLAN by itself.
216  */
217 #define	VLAN_CAPABLE(macinfo) \
218 	((macinfo)->gldm_type == DL_ETHER && \
219 	(macinfo)->gldm_send_tagged != NULL)
220 
221 /*
222  * The set of notifications generatable by GLD itself, the additional
223  * set that can be generated if the MAC driver provide the link-state
224  * tracking callback capability, and the set supported by the GLD
225  * notification code below.
226  *
227  * PLEASE keep these in sync with what the code actually does!
228  */
229 static const uint32_t gld_internal_notes =	DL_NOTE_PROMISC_ON_PHYS |
230 						DL_NOTE_PROMISC_OFF_PHYS |
231 						DL_NOTE_PHYS_ADDR;
232 static const uint32_t gld_linkstate_notes =	DL_NOTE_LINK_DOWN |
233 						DL_NOTE_LINK_UP |
234 						DL_NOTE_SPEED;
235 static const uint32_t gld_supported_notes =	DL_NOTE_PROMISC_ON_PHYS |
236 						DL_NOTE_PROMISC_OFF_PHYS |
237 						DL_NOTE_PHYS_ADDR |
238 						DL_NOTE_LINK_DOWN |
239 						DL_NOTE_LINK_UP |
240 						DL_NOTE_SPEED;
241 
242 /* Media must correspond to #defines in gld.h */
243 static char *gld_media[] = {
244 	"unknown",	/* GLDM_UNKNOWN - driver cannot determine media */
245 	"aui",		/* GLDM_AUI */
246 	"bnc",		/* GLDM_BNC */
247 	"twpair",	/* GLDM_TP */
248 	"fiber",	/* GLDM_FIBER */
249 	"100baseT",	/* GLDM_100BT */
250 	"100vgAnyLan",	/* GLDM_VGANYLAN */
251 	"10baseT",	/* GLDM_10BT */
252 	"ring4",	/* GLDM_RING4 */
253 	"ring16",	/* GLDM_RING16 */
254 	"PHY/MII",	/* GLDM_PHYMII */
255 	"100baseTX",	/* GLDM_100BTX */
256 	"100baseT4",	/* GLDM_100BT4 */
257 	"unknown",	/* skip */
258 	"ipib",		/* GLDM_IB */
259 };
260 
261 /* Must correspond to #defines in gld.h */
262 static char *gld_duplex[] = {
263 	"unknown",	/* GLD_DUPLEX_UNKNOWN - not known or not applicable */
264 	"half",		/* GLD_DUPLEX_HALF */
265 	"full"		/* GLD_DUPLEX_FULL */
266 };
267 
268 /*
269  * Interface types currently supported by GLD.
270  * If you add new types, you must check all "XXX" strings in the GLD source
271  * for implementation issues that may affect the support of your new type.
272  * In particular, any type with gldm_addrlen > 6, or gldm_saplen != -2, will
273  * require generalizing this GLD source to handle the new cases.  In other
274  * words there are assumptions built into the code in a few places that must
275  * be fixed.  Be sure to turn on DEBUG/ASSERT code when testing a new type.
276  */
277 static gld_interface_t interfaces[] = {
278 
279 	/* Ethernet Bus */
280 	{
281 		DL_ETHER,
282 		(uint_t)-1,
283 		sizeof (struct ether_header),
284 		gld_interpret_ether,
285 		NULL,
286 		gld_fastpath_ether,
287 		gld_unitdata_ether,
288 		gld_init_ether,
289 		gld_uninit_ether,
290 		"ether"
291 	},
292 
293 	/* Fiber Distributed data interface */
294 	{
295 		DL_FDDI,
296 		4352,
297 		sizeof (struct fddi_mac_frm),
298 		gld_interpret_fddi,
299 		NULL,
300 		gld_fastpath_fddi,
301 		gld_unitdata_fddi,
302 		gld_init_fddi,
303 		gld_uninit_fddi,
304 		"fddi"
305 	},
306 
307 	/* Token Ring interface */
308 	{
309 		DL_TPR,
310 		17914,
311 		-1,			/* variable header size */
312 		gld_interpret_tr,
313 		NULL,
314 		gld_fastpath_tr,
315 		gld_unitdata_tr,
316 		gld_init_tr,
317 		gld_uninit_tr,
318 		"tpr"
319 	},
320 
321 	/* Infiniband */
322 	{
323 		DL_IB,
324 		4092,
325 		sizeof (struct ipoib_header),
326 		gld_interpret_ib,
327 		gld_interpret_mdt_ib,
328 		gld_fastpath_ib,
329 		gld_unitdata_ib,
330 		gld_init_ib,
331 		gld_uninit_ib,
332 		"ipib"
333 	},
334 };
335 
336 /*
337  * bit reversal lookup table.
338  */
339 static	uchar_t bit_rev[] = {
340 	0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
341 	0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
342 	0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
343 	0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
344 	0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
345 	0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
346 	0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
347 	0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
348 	0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
349 	0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
350 	0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
351 	0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
352 	0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
353 	0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
354 	0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
355 	0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
356 	0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
357 	0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
358 	0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
359 	0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
360 	0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
361 	0x3f, 0xbf, 0x7f, 0xff,
362 };
363 
364 /*
365  * User priorities, mapped from b_band.
366  */
367 static uint32_t user_priority[] = {
368 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
369 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
370 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
371 	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
372 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
373 	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
374 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
375 	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
376 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
377 	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
378 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
379 	5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
380 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
381 	6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
382 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
383 	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
384 };
385 
386 #define	UPRI(gld, band)	((band != 0) ? user_priority[(band)] : (gld)->gld_upri)
387 
388 static struct glddevice gld_device_list;  /* Per-system root of GLD tables */
389 
390 /*
391  * Module linkage information for the kernel.
392  */
393 
394 static struct modldrv modlmisc = {
395 	&mod_miscops,		/* Type of module - a utility provider */
396 	"Generic LAN Driver (" GLD_VERSION_STRING ") %I%"
397 #ifdef GLD_DEBUG
398 	" DEBUG"
399 #endif
400 };
401 
402 static struct modlinkage modlinkage = {
403 	MODREV_1, &modlmisc, NULL
404 };
405 
406 int
407 _init(void)
408 {
409 	int e;
410 
411 	/* initialize gld_device_list mutex */
412 	mutex_init(&gld_device_list.gld_devlock, NULL, MUTEX_DRIVER, NULL);
413 
414 	/* initialize device driver (per-major) list */
415 	gld_device_list.gld_next =
416 	    gld_device_list.gld_prev = &gld_device_list;
417 
418 	if ((e = mod_install(&modlinkage)) != 0)
419 		mutex_destroy(&gld_device_list.gld_devlock);
420 
421 	return (e);
422 }
423 
424 int
425 _fini(void)
426 {
427 	int e;
428 
429 	if ((e = mod_remove(&modlinkage)) != 0)
430 		return (e);
431 
432 	ASSERT(gld_device_list.gld_next ==
433 	    (glddev_t *)&gld_device_list.gld_next);
434 	ASSERT(gld_device_list.gld_prev ==
435 	    (glddev_t *)&gld_device_list.gld_next);
436 	mutex_destroy(&gld_device_list.gld_devlock);
437 
438 	return (e);
439 }
440 
441 int
442 _info(struct modinfo *modinfop)
443 {
444 	return (mod_info(&modlinkage, modinfop));
445 }
446 
447 /*
448  * GLD service routines
449  */
450 
451 /* So this gld binary maybe can be forward compatible with future v2 drivers */
452 #define	GLD_MAC_RESERVED (16 * sizeof (caddr_t))
453 
454 /*ARGSUSED*/
455 gld_mac_info_t *
456 gld_mac_alloc(dev_info_t *devinfo)
457 {
458 	gld_mac_info_t *macinfo;
459 
460 	macinfo = kmem_zalloc(sizeof (gld_mac_info_t) + GLD_MAC_RESERVED,
461 	    KM_SLEEP);
462 
463 	/*
464 	 * The setting of gldm_driver_version will not be documented or allowed
465 	 * until a future release.
466 	 */
467 	macinfo->gldm_driver_version = GLD_VERSION_200;
468 
469 	/*
470 	 * GLD's version.  This also is undocumented for now, but will be
471 	 * available if needed in the future.
472 	 */
473 	macinfo->gldm_GLD_version = GLD_VERSION;
474 
475 	return (macinfo);
476 }
477 
478 /*
479  * gld_mac_free must be called after the driver has removed interrupts
480  * and completely stopped calling gld_recv() and gld_sched().  At that
481  * point the interrupt routine is guaranteed by the system to have been
482  * exited and the maclock is no longer needed.  Of course, it is
483  * expected (required) that (assuming gld_register() succeeded),
484  * gld_unregister() was called before gld_mac_free().
485  */
486 void
487 gld_mac_free(gld_mac_info_t *macinfo)
488 {
489 	ASSERT(macinfo);
490 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
491 
492 	/*
493 	 * Assert that if we made it through gld_register, then we must
494 	 * have unregistered.
495 	 */
496 	ASSERT(!GLDM_LOCK_INITED(macinfo) ||
497 	    (macinfo->gldm_GLD_flags & GLD_UNREGISTERED));
498 
499 	GLDM_LOCK_DESTROY(macinfo);
500 
501 	kmem_free(macinfo, sizeof (gld_mac_info_t) + GLD_MAC_RESERVED);
502 }
503 
504 /*
505  * gld_register -- called once per device instance (PPA)
506  *
507  * During its attach routine, a real device driver will register with GLD
508  * so that later opens and dl_attach_reqs will work.  The arguments are the
509  * devinfo pointer, the device name, and a macinfo structure describing the
510  * physical device instance.
511  */
512 int
513 gld_register(dev_info_t *devinfo, char *devname, gld_mac_info_t *macinfo)
514 {
515 	int mediatype;
516 	int major = ddi_name_to_major(devname), i;
517 	glddev_t *glddev;
518 	gld_mac_pvt_t *mac_pvt;
519 	char minordev[32];
520 	char pbuf[3*GLD_MAX_ADDRLEN];
521 	gld_interface_t *ifp;
522 
523 	ASSERT(devinfo != NULL);
524 	ASSERT(macinfo != NULL);
525 
526 	if (macinfo->gldm_driver_version != GLD_VERSION)
527 		return (DDI_FAILURE);
528 
529 	mediatype = macinfo->gldm_type;
530 
531 	/*
532 	 * Entry points should be ready for us.
533 	 * ioctl is optional.
534 	 * set_multicast and get_stats are optional in v0.
535 	 * intr is only required if you add an interrupt.
536 	 */
537 	ASSERT(macinfo->gldm_reset != NULL);
538 	ASSERT(macinfo->gldm_start != NULL);
539 	ASSERT(macinfo->gldm_stop != NULL);
540 	ASSERT(macinfo->gldm_set_mac_addr != NULL);
541 	ASSERT(macinfo->gldm_set_promiscuous != NULL);
542 	ASSERT(macinfo->gldm_send != NULL);
543 
544 	ASSERT(macinfo->gldm_maxpkt >= macinfo->gldm_minpkt);
545 	ASSERT(macinfo->gldm_GLD_version == GLD_VERSION);
546 	ASSERT(macinfo->gldm_broadcast_addr != NULL);
547 	ASSERT(macinfo->gldm_vendor_addr != NULL);
548 	ASSERT(macinfo->gldm_ident != NULL);
549 
550 	if (macinfo->gldm_addrlen > GLD_MAX_ADDRLEN) {
551 		cmn_err(CE_WARN, "GLD: %s driver gldm_addrlen %d > %d not sup"
552 		    "ported", devname, macinfo->gldm_addrlen, GLD_MAX_ADDRLEN);
553 		return (DDI_FAILURE);
554 	}
555 
556 	/*
557 	 * GLD only functions properly with saplen == -2
558 	 */
559 	if (macinfo->gldm_saplen != -2) {
560 		cmn_err(CE_WARN, "GLD: %s driver gldm_saplen %d != -2 "
561 		    "not supported", devname, macinfo->gldm_saplen);
562 		return (DDI_FAILURE);
563 	}
564 
565 	/* see gld_rsrv() */
566 	if (ddi_getprop(DDI_DEV_T_NONE, devinfo, 0, "fast_recv", 0))
567 		macinfo->gldm_options |= GLDOPT_FAST_RECV;
568 
569 	mutex_enter(&gld_device_list.gld_devlock);
570 	glddev = gld_devlookup(major);
571 
572 	/*
573 	 *  Allocate per-driver (major) data structure if necessary
574 	 */
575 	if (glddev == NULL) {
576 		/* first occurrence of this device name (major number) */
577 		glddev = GLD_GETSTRUCT(glddev_t, 1);
578 		if (glddev == NULL) {
579 			mutex_exit(&gld_device_list.gld_devlock);
580 			return (DDI_FAILURE);
581 		}
582 		(void) strncpy(glddev->gld_name, devname,
583 		    sizeof (glddev->gld_name) - 1);
584 		glddev->gld_major = major;
585 		glddev->gld_nextminor = GLD_MIN_CLONE_MINOR;
586 		glddev->gld_mac_next = glddev->gld_mac_prev =
587 		    (gld_mac_info_t *)&glddev->gld_mac_next;
588 		glddev->gld_str_next = glddev->gld_str_prev =
589 		    (gld_t *)&glddev->gld_str_next;
590 		mutex_init(&glddev->gld_devlock, NULL, MUTEX_DRIVER, NULL);
591 
592 		/* allow increase of number of supported multicast addrs */
593 		glddev->gld_multisize = ddi_getprop(DDI_DEV_T_NONE,
594 		    devinfo, 0, "multisize", GLD_MAX_MULTICAST);
595 
596 		/*
597 		 * Optionally restrict DLPI provider style
598 		 *
599 		 * -1 - don't create style 1 nodes
600 		 * -2 - don't create style 2 nodes
601 		 */
602 		glddev->gld_styles = ddi_getprop(DDI_DEV_T_NONE, devinfo, 0,
603 		    "gld-provider-styles", 0);
604 
605 		/* Stuff that's needed before any PPA gets attached */
606 		glddev->gld_type = macinfo->gldm_type;
607 		glddev->gld_minsdu = macinfo->gldm_minpkt;
608 		glddev->gld_saplen = macinfo->gldm_saplen;
609 		glddev->gld_addrlen = macinfo->gldm_addrlen;
610 		glddev->gld_broadcast = kmem_zalloc(macinfo->gldm_addrlen,
611 		    KM_SLEEP);
612 		bcopy(macinfo->gldm_broadcast_addr,
613 		    glddev->gld_broadcast, macinfo->gldm_addrlen);
614 		glddev->gld_maxsdu = macinfo->gldm_maxpkt;
615 		gldinsque(glddev, gld_device_list.gld_prev);
616 	}
617 	glddev->gld_ndevice++;
618 	/* Now glddev can't go away until we unregister this mac (or fail) */
619 	mutex_exit(&gld_device_list.gld_devlock);
620 
621 	/*
622 	 *  Per-instance initialization
623 	 */
624 
625 	/*
626 	 * Initialize per-mac structure that is private to GLD.
627 	 * Set up interface pointer. These are device class specific pointers
628 	 * used to handle FDDI/TR/ETHER/IPoIB specific packets.
629 	 */
630 	for (i = 0; i < sizeof (interfaces)/sizeof (*interfaces); i++) {
631 		if (mediatype != interfaces[i].mac_type)
632 			continue;
633 
634 		macinfo->gldm_mac_pvt = kmem_zalloc(sizeof (gld_mac_pvt_t),
635 		    KM_SLEEP);
636 		((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep = ifp =
637 		    &interfaces[i];
638 		break;
639 	}
640 
641 	if (ifp == NULL) {
642 		cmn_err(CE_WARN, "GLD: this version does not support %s driver "
643 		    "of type %d", devname, mediatype);
644 		goto failure;
645 	}
646 
647 	/*
648 	 * Driver can only register MTU within legal media range.
649 	 */
650 	if (macinfo->gldm_maxpkt > ifp->mtu_size) {
651 		cmn_err(CE_WARN, "GLD: oversize MTU is specified by driver %s",
652 		    devname);
653 		goto failure;
654 	}
655 
656 	/*
657 	 * Correct margin size if it is not set.
658 	 */
659 	if (VLAN_CAPABLE(macinfo) && (macinfo->gldm_margin == 0))
660 		macinfo->gldm_margin = VTAG_SIZE;
661 
662 	/*
663 	 * For now, only Infiniband drivers can use MDT. Do not add
664 	 * support for Ethernet, FDDI or TR.
665 	 */
666 	if (macinfo->gldm_mdt_pre != NULL) {
667 		if (mediatype != DL_IB) {
668 			cmn_err(CE_WARN, "GLD: MDT not supported for %s "
669 			    "driver of type %d", devname, mediatype);
670 			goto failure;
671 		}
672 
673 		/*
674 		 * Validate entry points.
675 		 */
676 		if ((macinfo->gldm_mdt_send == NULL) ||
677 		    (macinfo->gldm_mdt_post == NULL)) {
678 			cmn_err(CE_WARN, "GLD: invalid MDT entry points for "
679 			    "%s driver of type %d", devname, mediatype);
680 			goto failure;
681 		}
682 		macinfo->gldm_options |= GLDOPT_MDT;
683 	}
684 
685 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
686 	mac_pvt->major_dev = glddev;
687 
688 	mac_pvt->curr_macaddr = kmem_zalloc(macinfo->gldm_addrlen, KM_SLEEP);
689 	/*
690 	 * XXX Do bit-reversed devices store gldm_vendor in canonical
691 	 * format or in wire format?  Also gldm_broadcast.  For now
692 	 * we are assuming canonical, but I'm not sure that makes the
693 	 * most sense for ease of driver implementation.
694 	 */
695 	bcopy(macinfo->gldm_vendor_addr, mac_pvt->curr_macaddr,
696 	    macinfo->gldm_addrlen);
697 	mac_pvt->statistics = kmem_zalloc(sizeof (struct gld_stats), KM_SLEEP);
698 
699 	/*
700 	 * The available set of notifications is those generatable by GLD
701 	 * itself, plus those corresponding to the capabilities of the MAC
702 	 * driver, intersected with those supported by gld_notify_ind() above.
703 	 */
704 	mac_pvt->notifications = gld_internal_notes;
705 	if (macinfo->gldm_capabilities & GLD_CAP_LINKSTATE)
706 		mac_pvt->notifications |= gld_linkstate_notes;
707 	mac_pvt->notifications &= gld_supported_notes;
708 
709 	GLDM_LOCK_INIT(macinfo);
710 
711 	ddi_set_driver_private(devinfo, macinfo);
712 
713 	/*
714 	 * Now atomically get a PPA and put ourselves on the mac list.
715 	 */
716 	mutex_enter(&glddev->gld_devlock);
717 
718 #ifdef DEBUG
719 	if (macinfo->gldm_ppa != ddi_get_instance(devinfo))
720 		cmn_err(CE_WARN, "%s%d instance != ppa %d",
721 		    ddi_driver_name(devinfo), ddi_get_instance(devinfo),
722 		    macinfo->gldm_ppa);
723 #endif
724 
725 	/*
726 	 * Create style 2 node (gated by gld-provider-styles property).
727 	 *
728 	 * NOTE: When the CLONE_DEV flag is specified to
729 	 *	 ddi_create_minor_node() the minor number argument is
730 	 *	 immaterial. Opens of that node will go via the clone
731 	 *	 driver and gld_open() will always be passed a dev_t with
732 	 *	 minor of zero.
733 	 */
734 	if (glddev->gld_styles != -2) {
735 		if (ddi_create_minor_node(devinfo, glddev->gld_name, S_IFCHR,
736 		    0, DDI_NT_NET, CLONE_DEV) == DDI_FAILURE) {
737 			mutex_exit(&glddev->gld_devlock);
738 			goto late_failure;
739 		}
740 	}
741 
742 	/*
743 	 * Create style 1 node (gated by gld-provider-styles property)
744 	 */
745 	if (glddev->gld_styles != -1) {
746 		(void) sprintf(minordev, "%s%d", glddev->gld_name,
747 		    macinfo->gldm_ppa);
748 		if (ddi_create_minor_node(devinfo, minordev, S_IFCHR,
749 		    GLD_STYLE1_PPA_TO_MINOR(macinfo->gldm_ppa), DDI_NT_NET,
750 		    0) != DDI_SUCCESS) {
751 			mutex_exit(&glddev->gld_devlock);
752 			goto late_failure;
753 		}
754 	}
755 
756 	/* add ourselves to this major device's linked list of instances */
757 	gldinsque(macinfo, glddev->gld_mac_prev);
758 
759 	mutex_exit(&glddev->gld_devlock);
760 
761 	/*
762 	 * Unfortunately we need the ppa before we call gld_initstats();
763 	 * otherwise we would like to do this just above the mutex_enter
764 	 * above.  In which case we could have set MAC_READY inside the
765 	 * mutex and we wouldn't have needed to check it in open and
766 	 * DL_ATTACH.  We wouldn't like to do the initstats/kstat_create
767 	 * inside the mutex because it might get taken in our kstat_update
768 	 * routine and cause a deadlock with kstat_chain_lock.
769 	 */
770 
771 	/* gld_initstats() calls (*ifp->init)() */
772 	if (gld_initstats(macinfo) != GLD_SUCCESS) {
773 		mutex_enter(&glddev->gld_devlock);
774 		gldremque(macinfo);
775 		mutex_exit(&glddev->gld_devlock);
776 		goto late_failure;
777 	}
778 
779 	/*
780 	 * Need to indicate we are NOW ready to process interrupts;
781 	 * any interrupt before this is set is for someone else.
782 	 * This flag is also now used to tell open, et. al. that this
783 	 * mac is now fully ready and available for use.
784 	 */
785 	GLDM_LOCK(macinfo, RW_WRITER);
786 	macinfo->gldm_GLD_flags |= GLD_MAC_READY;
787 	GLDM_UNLOCK(macinfo);
788 
789 	/* log local ethernet address -- XXX not DDI compliant */
790 	if (macinfo->gldm_addrlen == sizeof (struct ether_addr))
791 		(void) localetheraddr(
792 		    (struct ether_addr *)macinfo->gldm_vendor_addr, NULL);
793 
794 	/* now put announcement into the message buffer */
795 	cmn_err(CE_CONT, "!%s%d: %s: type \"%s\" mac address %s\n",
796 	    glddev->gld_name,
797 	    macinfo->gldm_ppa, macinfo->gldm_ident,
798 	    mac_pvt->interfacep->mac_string,
799 	    gld_macaddr_sprintf(pbuf, macinfo->gldm_vendor_addr,
800 	    macinfo->gldm_addrlen));
801 
802 	ddi_report_dev(devinfo);
803 	return (DDI_SUCCESS);
804 
805 late_failure:
806 	ddi_remove_minor_node(devinfo, NULL);
807 	GLDM_LOCK_DESTROY(macinfo);
808 	if (mac_pvt->curr_macaddr != NULL)
809 		kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
810 	if (mac_pvt->statistics != NULL)
811 		kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
812 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
813 	macinfo->gldm_mac_pvt = NULL;
814 
815 failure:
816 	mutex_enter(&gld_device_list.gld_devlock);
817 	glddev->gld_ndevice--;
818 	/*
819 	 * Note that just because this goes to zero here does not necessarily
820 	 * mean that we were the one who added the glddev above.  It's
821 	 * possible that the first mac unattached while were were in here
822 	 * failing to attach the second mac.  But we're now the last.
823 	 */
824 	if (glddev->gld_ndevice == 0) {
825 		/* There should be no macinfos left */
826 		ASSERT(glddev->gld_mac_next ==
827 		    (gld_mac_info_t *)&glddev->gld_mac_next);
828 		ASSERT(glddev->gld_mac_prev ==
829 		    (gld_mac_info_t *)&glddev->gld_mac_next);
830 
831 		/*
832 		 * There should be no DL_UNATTACHED streams: the system
833 		 * should not have detached the "first" devinfo which has
834 		 * all the open style 2 streams.
835 		 *
836 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
837 		 */
838 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
839 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
840 
841 		gldremque(glddev);
842 		mutex_destroy(&glddev->gld_devlock);
843 		if (glddev->gld_broadcast != NULL)
844 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
845 		kmem_free(glddev, sizeof (glddev_t));
846 	}
847 	mutex_exit(&gld_device_list.gld_devlock);
848 
849 	return (DDI_FAILURE);
850 }
851 
852 /*
853  * gld_unregister (macinfo)
854  * remove the macinfo structure from local structures
855  * this is cleanup for a driver to be unloaded
856  */
857 int
858 gld_unregister(gld_mac_info_t *macinfo)
859 {
860 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
861 	glddev_t *glddev = mac_pvt->major_dev;
862 	gld_interface_t *ifp;
863 	int multisize = sizeof (gld_mcast_t) * glddev->gld_multisize;
864 
865 	mutex_enter(&glddev->gld_devlock);
866 	GLDM_LOCK(macinfo, RW_WRITER);
867 
868 	if (mac_pvt->nvlan > 0) {
869 		GLDM_UNLOCK(macinfo);
870 		mutex_exit(&glddev->gld_devlock);
871 		return (DDI_FAILURE);
872 	}
873 
874 #ifdef	GLD_DEBUG
875 	{
876 		int i;
877 
878 		for (i = 0; i < VLAN_HASHSZ; i++) {
879 			if ((mac_pvt->vlan_hash[i] != NULL))
880 				cmn_err(CE_PANIC,
881 				    "%s, line %d: "
882 				    "mac_pvt->vlan_hash[%d] != NULL",
883 				    __FILE__, __LINE__, i);
884 		}
885 	}
886 #endif
887 
888 	/* Delete this mac */
889 	gldremque(macinfo);
890 
891 	/* Disallow further entries to gld_recv() and gld_sched() */
892 	macinfo->gldm_GLD_flags |= GLD_UNREGISTERED;
893 
894 	GLDM_UNLOCK(macinfo);
895 	mutex_exit(&glddev->gld_devlock);
896 
897 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
898 	(*ifp->uninit)(macinfo);
899 
900 	ASSERT(mac_pvt->kstatp);
901 	kstat_delete(mac_pvt->kstatp);
902 
903 	ASSERT(GLDM_LOCK_INITED(macinfo));
904 	kmem_free(mac_pvt->curr_macaddr, macinfo->gldm_addrlen);
905 	kmem_free(mac_pvt->statistics, sizeof (struct gld_stats));
906 
907 	if (mac_pvt->mcast_table != NULL)
908 		kmem_free(mac_pvt->mcast_table, multisize);
909 	kmem_free(macinfo->gldm_mac_pvt, sizeof (gld_mac_pvt_t));
910 	macinfo->gldm_mac_pvt = (caddr_t)NULL;
911 
912 	/* We now have one fewer instance for this major device */
913 	mutex_enter(&gld_device_list.gld_devlock);
914 	glddev->gld_ndevice--;
915 	if (glddev->gld_ndevice == 0) {
916 		/* There should be no macinfos left */
917 		ASSERT(glddev->gld_mac_next ==
918 		    (gld_mac_info_t *)&glddev->gld_mac_next);
919 		ASSERT(glddev->gld_mac_prev ==
920 		    (gld_mac_info_t *)&glddev->gld_mac_next);
921 
922 		/*
923 		 * There should be no DL_UNATTACHED streams: the system
924 		 * should not have detached the "first" devinfo which has
925 		 * all the open style 2 streams.
926 		 *
927 		 * XXX This is not clear.  See gld_getinfo and Bug 1165519
928 		 */
929 		ASSERT(glddev->gld_str_next == (gld_t *)&glddev->gld_str_next);
930 		ASSERT(glddev->gld_str_prev == (gld_t *)&glddev->gld_str_next);
931 
932 		ddi_remove_minor_node(macinfo->gldm_devinfo, NULL);
933 		gldremque(glddev);
934 		mutex_destroy(&glddev->gld_devlock);
935 		if (glddev->gld_broadcast != NULL)
936 			kmem_free(glddev->gld_broadcast, glddev->gld_addrlen);
937 		kmem_free(glddev, sizeof (glddev_t));
938 	}
939 	mutex_exit(&gld_device_list.gld_devlock);
940 
941 	return (DDI_SUCCESS);
942 }
943 
944 /*
945  * gld_initstats
946  * called from gld_register
947  */
948 static int
949 gld_initstats(gld_mac_info_t *macinfo)
950 {
951 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
952 	struct gldkstats *sp;
953 	glddev_t *glddev;
954 	kstat_t *ksp;
955 	gld_interface_t *ifp;
956 
957 	glddev = mac_pvt->major_dev;
958 
959 	if ((ksp = kstat_create(glddev->gld_name, macinfo->gldm_ppa,
960 	    NULL, "net", KSTAT_TYPE_NAMED,
961 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
962 		cmn_err(CE_WARN,
963 		    "GLD: failed to create kstat structure for %s%d",
964 		    glddev->gld_name, macinfo->gldm_ppa);
965 		return (GLD_FAILURE);
966 	}
967 	mac_pvt->kstatp = ksp;
968 
969 	ksp->ks_update = gld_update_kstat;
970 	ksp->ks_private = (void *)macinfo;
971 
972 	sp = ksp->ks_data;
973 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
974 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
975 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
976 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
977 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
978 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
979 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
980 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
981 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
982 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
983 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
984 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
985 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
986 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
987 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
988 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
989 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
990 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
991 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
992 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
993 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
994 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
995 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
996 
997 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
998 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
999 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1000 
1001 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1002 	    KSTAT_DATA_UINT32);
1003 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1004 	    KSTAT_DATA_UINT32);
1005 
1006 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
1007 
1008 	(*ifp->init)(macinfo);
1009 
1010 	kstat_install(ksp);
1011 
1012 	return (GLD_SUCCESS);
1013 }
1014 
1015 /* called from kstat mechanism, and from wsrv's get_statistics_req */
1016 static int
1017 gld_update_kstat(kstat_t *ksp, int rw)
1018 {
1019 	gld_mac_info_t	*macinfo;
1020 	gld_mac_pvt_t	*mac_pvt;
1021 	struct gldkstats *gsp;
1022 	struct gld_stats *stats;
1023 
1024 	if (rw == KSTAT_WRITE)
1025 		return (EACCES);
1026 
1027 	macinfo = (gld_mac_info_t *)ksp->ks_private;
1028 	ASSERT(macinfo != NULL);
1029 
1030 	GLDM_LOCK(macinfo, RW_WRITER);
1031 
1032 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
1033 		GLDM_UNLOCK(macinfo);
1034 		return (EIO);	/* this one's not ready yet */
1035 	}
1036 
1037 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
1038 		GLDM_UNLOCK(macinfo);
1039 		return (EIO);	/* this one's not ready any more */
1040 	}
1041 
1042 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1043 	gsp = mac_pvt->kstatp->ks_data;
1044 	ASSERT(gsp);
1045 	stats = mac_pvt->statistics;
1046 
1047 	if (macinfo->gldm_get_stats)
1048 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
1049 
1050 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1051 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1052 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1053 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1054 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;	/* 0 for now */
1055 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1056 
1057 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1058 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1059 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1060 
1061 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1062 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1063 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1064 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1065 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1066 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1067 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1068 	gsp->glds_overflow.value.ul = stats->glds_overflow;
1069 	gsp->glds_underflow.value.ul = stats->glds_underflow;
1070 	gsp->glds_missed.value.ul = stats->glds_missed;
1071 	gsp->glds_norcvbuf.value.ul = stats->glds_norcvbuf +
1072 	    stats->glds_gldnorcvbuf;
1073 	gsp->glds_intr.value.ul = stats->glds_intr;
1074 
1075 	gsp->glds_speed.value.ui64 = stats->glds_speed;
1076 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1077 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1078 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1079 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1080 
1081 	if (mac_pvt->nprom)
1082 		(void) strcpy(gsp->glds_prom.value.c, "phys");
1083 	else if (mac_pvt->nprom_multi)
1084 		(void) strcpy(gsp->glds_prom.value.c, "multi");
1085 	else
1086 		(void) strcpy(gsp->glds_prom.value.c, "off");
1087 
1088 	(void) strcpy(gsp->glds_media.value.c, gld_media[
1089 	    stats->glds_media < sizeof (gld_media) / sizeof (gld_media[0])
1090 	    ? stats->glds_media : 0]);
1091 
1092 	switch (macinfo->gldm_type) {
1093 	case DL_ETHER:
1094 		gsp->glds_frame.value.ul = stats->glds_frame;
1095 		gsp->glds_crc.value.ul = stats->glds_crc;
1096 		gsp->glds_collisions.value.ul = stats->glds_collisions;
1097 		gsp->glds_excoll.value.ul = stats->glds_excoll;
1098 		gsp->glds_defer.value.ul = stats->glds_defer;
1099 		gsp->glds_short.value.ul = stats->glds_short;
1100 		gsp->glds_xmtlatecoll.value.ul = stats->glds_xmtlatecoll;
1101 		gsp->glds_nocarrier.value.ul = stats->glds_nocarrier;
1102 		gsp->glds_dot3_first_coll.value.ui32 =
1103 		    stats->glds_dot3_first_coll;
1104 		gsp->glds_dot3_multi_coll.value.ui32 =
1105 		    stats->glds_dot3_multi_coll;
1106 		gsp->glds_dot3_sqe_error.value.ui32 =
1107 		    stats->glds_dot3_sqe_error;
1108 		gsp->glds_dot3_mac_xmt_error.value.ui32 =
1109 		    stats->glds_dot3_mac_xmt_error;
1110 		gsp->glds_dot3_mac_rcv_error.value.ui32 =
1111 		    stats->glds_dot3_mac_rcv_error;
1112 		gsp->glds_dot3_frame_too_long.value.ui32 =
1113 		    stats->glds_dot3_frame_too_long;
1114 		(void) strcpy(gsp->glds_duplex.value.c, gld_duplex[
1115 		    stats->glds_duplex <
1116 		    sizeof (gld_duplex) / sizeof (gld_duplex[0]) ?
1117 		    stats->glds_duplex : 0]);
1118 		break;
1119 	case DL_TPR:
1120 		gsp->glds_dot5_line_error.value.ui32 =
1121 		    stats->glds_dot5_line_error;
1122 		gsp->glds_dot5_burst_error.value.ui32 =
1123 		    stats->glds_dot5_burst_error;
1124 		gsp->glds_dot5_signal_loss.value.ui32 =
1125 		    stats->glds_dot5_signal_loss;
1126 		gsp->glds_dot5_ace_error.value.ui32 =
1127 		    stats->glds_dot5_ace_error;
1128 		gsp->glds_dot5_internal_error.value.ui32 =
1129 		    stats->glds_dot5_internal_error;
1130 		gsp->glds_dot5_lost_frame_error.value.ui32 =
1131 		    stats->glds_dot5_lost_frame_error;
1132 		gsp->glds_dot5_frame_copied_error.value.ui32 =
1133 		    stats->glds_dot5_frame_copied_error;
1134 		gsp->glds_dot5_token_error.value.ui32 =
1135 		    stats->glds_dot5_token_error;
1136 		gsp->glds_dot5_freq_error.value.ui32 =
1137 		    stats->glds_dot5_freq_error;
1138 		break;
1139 	case DL_FDDI:
1140 		gsp->glds_fddi_mac_error.value.ui32 =
1141 		    stats->glds_fddi_mac_error;
1142 		gsp->glds_fddi_mac_lost.value.ui32 =
1143 		    stats->glds_fddi_mac_lost;
1144 		gsp->glds_fddi_mac_token.value.ui32 =
1145 		    stats->glds_fddi_mac_token;
1146 		gsp->glds_fddi_mac_tvx_expired.value.ui32 =
1147 		    stats->glds_fddi_mac_tvx_expired;
1148 		gsp->glds_fddi_mac_late.value.ui32 =
1149 		    stats->glds_fddi_mac_late;
1150 		gsp->glds_fddi_mac_ring_op.value.ui32 =
1151 		    stats->glds_fddi_mac_ring_op;
1152 		break;
1153 	case DL_IB:
1154 		break;
1155 	default:
1156 		break;
1157 	}
1158 
1159 	GLDM_UNLOCK(macinfo);
1160 
1161 #ifdef GLD_DEBUG
1162 	gld_check_assertions();
1163 	if (gld_debug & GLDRDE)
1164 		gld_sr_dump(macinfo);
1165 #endif
1166 
1167 	return (0);
1168 }
1169 
1170 static int
1171 gld_init_vlan_stats(gld_vlan_t *vlan)
1172 {
1173 	gld_mac_info_t *mac = vlan->gldv_mac;
1174 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1175 	struct gldkstats *sp;
1176 	glddev_t *glddev;
1177 	kstat_t *ksp;
1178 	char *name;
1179 	int instance;
1180 
1181 	glddev = mac_pvt->major_dev;
1182 	name = glddev->gld_name;
1183 	instance = (vlan->gldv_id * GLD_VLAN_SCALE) + mac->gldm_ppa;
1184 
1185 	if ((ksp = kstat_create(name, instance,
1186 	    NULL, "net", KSTAT_TYPE_NAMED,
1187 	    sizeof (struct gldkstats) / sizeof (kstat_named_t), 0)) == NULL) {
1188 		cmn_err(CE_WARN,
1189 		    "GLD: failed to create kstat structure for %s%d",
1190 		    name, instance);
1191 		return (GLD_FAILURE);
1192 	}
1193 
1194 	vlan->gldv_kstatp = ksp;
1195 
1196 	ksp->ks_update = gld_update_vlan_kstat;
1197 	ksp->ks_private = (void *)vlan;
1198 
1199 	sp = ksp->ks_data;
1200 	kstat_named_init(&sp->glds_pktrcv, "ipackets", KSTAT_DATA_UINT32);
1201 	kstat_named_init(&sp->glds_pktxmt, "opackets", KSTAT_DATA_UINT32);
1202 	kstat_named_init(&sp->glds_errrcv, "ierrors", KSTAT_DATA_ULONG);
1203 	kstat_named_init(&sp->glds_errxmt, "oerrors", KSTAT_DATA_ULONG);
1204 	kstat_named_init(&sp->glds_bytexmt, "obytes", KSTAT_DATA_UINT32);
1205 	kstat_named_init(&sp->glds_bytercv, "rbytes", KSTAT_DATA_UINT32);
1206 	kstat_named_init(&sp->glds_multixmt, "multixmt", KSTAT_DATA_ULONG);
1207 	kstat_named_init(&sp->glds_multircv, "multircv", KSTAT_DATA_ULONG);
1208 	kstat_named_init(&sp->glds_brdcstxmt, "brdcstxmt", KSTAT_DATA_ULONG);
1209 	kstat_named_init(&sp->glds_brdcstrcv, "brdcstrcv", KSTAT_DATA_ULONG);
1210 	kstat_named_init(&sp->glds_blocked, "blocked", KSTAT_DATA_ULONG);
1211 	kstat_named_init(&sp->glds_noxmtbuf, "noxmtbuf", KSTAT_DATA_ULONG);
1212 	kstat_named_init(&sp->glds_norcvbuf, "norcvbuf", KSTAT_DATA_ULONG);
1213 	kstat_named_init(&sp->glds_xmtretry, "xmtretry", KSTAT_DATA_ULONG);
1214 	kstat_named_init(&sp->glds_intr, "intr", KSTAT_DATA_ULONG);
1215 	kstat_named_init(&sp->glds_pktrcv64, "ipackets64", KSTAT_DATA_UINT64);
1216 	kstat_named_init(&sp->glds_pktxmt64, "opackets64", KSTAT_DATA_UINT64);
1217 	kstat_named_init(&sp->glds_bytexmt64, "obytes64", KSTAT_DATA_UINT64);
1218 	kstat_named_init(&sp->glds_bytercv64, "rbytes64", KSTAT_DATA_UINT64);
1219 	kstat_named_init(&sp->glds_unknowns, "unknowns", KSTAT_DATA_ULONG);
1220 	kstat_named_init(&sp->glds_speed, "ifspeed", KSTAT_DATA_UINT64);
1221 	kstat_named_init(&sp->glds_media, "media", KSTAT_DATA_CHAR);
1222 	kstat_named_init(&sp->glds_prom, "promisc", KSTAT_DATA_CHAR);
1223 
1224 	kstat_named_init(&sp->glds_overflow, "oflo", KSTAT_DATA_ULONG);
1225 	kstat_named_init(&sp->glds_underflow, "uflo", KSTAT_DATA_ULONG);
1226 	kstat_named_init(&sp->glds_missed, "missed", KSTAT_DATA_ULONG);
1227 
1228 	kstat_named_init(&sp->glds_xmtbadinterp, "xmt_badinterp",
1229 	    KSTAT_DATA_UINT32);
1230 	kstat_named_init(&sp->glds_rcvbadinterp, "rcv_badinterp",
1231 	    KSTAT_DATA_UINT32);
1232 
1233 	kstat_install(ksp);
1234 	return (GLD_SUCCESS);
1235 }
1236 
1237 static int
1238 gld_update_vlan_kstat(kstat_t *ksp, int rw)
1239 {
1240 	gld_vlan_t	*vlan;
1241 	gld_mac_info_t	*macinfo;
1242 	struct gldkstats *gsp;
1243 	struct gld_stats *stats;
1244 	gld_mac_pvt_t *mac_pvt;
1245 	uint32_t media;
1246 
1247 	if (rw == KSTAT_WRITE)
1248 		return (EACCES);
1249 
1250 	vlan = (gld_vlan_t *)ksp->ks_private;
1251 	ASSERT(vlan != NULL);
1252 
1253 	macinfo = vlan->gldv_mac;
1254 	GLDM_LOCK(macinfo, RW_WRITER);
1255 
1256 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1257 
1258 	gsp = vlan->gldv_kstatp->ks_data;
1259 	ASSERT(gsp);
1260 	stats = vlan->gldv_stats;
1261 
1262 	gsp->glds_pktxmt.value.ui32 = stats->glds_pktxmt64 & 0xffffffff;
1263 	gsp->glds_bytexmt.value.ui32 = stats->glds_bytexmt64 & 0xffffffff;
1264 	gsp->glds_errxmt.value.ul = stats->glds_errxmt;
1265 	gsp->glds_multixmt.value.ul = stats->glds_multixmt;
1266 	gsp->glds_brdcstxmt.value.ul = stats->glds_brdcstxmt;
1267 	gsp->glds_noxmtbuf.value.ul = stats->glds_noxmtbuf;
1268 	gsp->glds_xmtretry.value.ul = stats->glds_xmtretry;
1269 	gsp->glds_pktxmt64.value.ui64 = stats->glds_pktxmt64;
1270 	gsp->glds_bytexmt64.value.ui64 = stats->glds_bytexmt64;
1271 
1272 	gsp->glds_pktrcv.value.ui32 = stats->glds_pktrcv64 & 0xffffffff;
1273 	gsp->glds_bytercv.value.ui32 = stats->glds_bytercv64 & 0xffffffff;
1274 	gsp->glds_errrcv.value.ul = stats->glds_errrcv;
1275 	gsp->glds_multircv.value.ul = stats->glds_multircv;
1276 	gsp->glds_brdcstrcv.value.ul = stats->glds_brdcstrcv;
1277 	gsp->glds_blocked.value.ul = stats->glds_blocked;
1278 	gsp->glds_pktrcv64.value.ui64 = stats->glds_pktrcv64;
1279 	gsp->glds_bytercv64.value.ui64 = stats->glds_bytercv64;
1280 	gsp->glds_unknowns.value.ul = stats->glds_unknowns;
1281 	gsp->glds_xmtbadinterp.value.ui32 = stats->glds_xmtbadinterp;
1282 	gsp->glds_rcvbadinterp.value.ui32 = stats->glds_rcvbadinterp;
1283 
1284 	gsp->glds_speed.value.ui64 = mac_pvt->statistics->glds_speed;
1285 	media = mac_pvt->statistics->glds_media;
1286 	(void) strcpy(gsp->glds_media.value.c,
1287 	    gld_media[media < sizeof (gld_media) / sizeof (gld_media[0]) ?
1288 	    media : 0]);
1289 
1290 	GLDM_UNLOCK(macinfo);
1291 	return (0);
1292 }
1293 
1294 /*
1295  * The device dependent driver specifies gld_getinfo as its getinfo routine.
1296  */
1297 /*ARGSUSED*/
1298 int
1299 gld_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1300 {
1301 	dev_info_t	*devinfo;
1302 	minor_t		minor = getminor((dev_t)arg);
1303 	int		rc = DDI_FAILURE;
1304 
1305 	switch (cmd) {
1306 	case DDI_INFO_DEVT2DEVINFO:
1307 		if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1308 			*(dev_info_t **)resultp = devinfo;
1309 			rc = DDI_SUCCESS;
1310 		}
1311 		break;
1312 	case DDI_INFO_DEVT2INSTANCE:
1313 		/* Need static mapping for deferred attach */
1314 		if (minor == GLD_USE_STYLE2) {
1315 			/*
1316 			 * Style 2:  this minor number does not correspond to
1317 			 * any particular instance number.
1318 			 */
1319 			rc = DDI_FAILURE;
1320 		} else if (minor <= GLD_MAX_STYLE1_MINOR) {
1321 			/* Style 1:  calculate the PPA from the minor */
1322 			*resultp = (void *)(uintptr_t)
1323 			    GLD_STYLE1_MINOR_TO_PPA(minor);
1324 			rc = DDI_SUCCESS;
1325 		} else {
1326 			/* Clone:  look for it.  Not a static mapping */
1327 			if ((devinfo = gld_finddevinfo((dev_t)arg)) != NULL) {
1328 				*resultp = (void *)(uintptr_t)
1329 				    ddi_get_instance(devinfo);
1330 				rc = DDI_SUCCESS;
1331 			}
1332 		}
1333 		break;
1334 	}
1335 
1336 	return (rc);
1337 }
1338 
1339 /* called from gld_getinfo */
1340 dev_info_t *
1341 gld_finddevinfo(dev_t dev)
1342 {
1343 	minor_t		minor = getminor(dev);
1344 	glddev_t	*device;
1345 	gld_mac_info_t	*mac;
1346 	gld_vlan_t	*vlan;
1347 	gld_t		*str;
1348 	dev_info_t	*devinfo = NULL;
1349 	int		i;
1350 
1351 	if (minor == GLD_USE_STYLE2) {
1352 		/*
1353 		 * Style 2:  this minor number does not correspond to
1354 		 * any particular instance number.
1355 		 *
1356 		 * XXX We don't know what to say.  See Bug 1165519.
1357 		 */
1358 		return (NULL);
1359 	}
1360 
1361 	mutex_enter(&gld_device_list.gld_devlock);	/* hold the device */
1362 
1363 	device = gld_devlookup(getmajor(dev));
1364 	if (device == NULL) {
1365 		/* There are no attached instances of this device */
1366 		mutex_exit(&gld_device_list.gld_devlock);
1367 		return (NULL);
1368 	}
1369 
1370 	/*
1371 	 * Search all attached macs and streams.
1372 	 *
1373 	 * XXX We don't bother checking the DL_UNATTACHED streams since
1374 	 * we don't know what devinfo we should report back even if we
1375 	 * found the minor.  Maybe we should associate streams that are
1376 	 * not currently attached to a PPA with the "first" devinfo node
1377 	 * of the major device to attach -- the one that created the
1378 	 * minor node for the generic device.
1379 	 */
1380 	mutex_enter(&device->gld_devlock);
1381 
1382 	for (mac = device->gld_mac_next;
1383 	    mac != (gld_mac_info_t *)&device->gld_mac_next;
1384 	    mac = mac->gldm_next) {
1385 		gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
1386 
1387 		if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
1388 			continue;	/* this one's not ready yet */
1389 		if (minor <= GLD_MAX_STYLE1_MINOR) {
1390 			/* Style 1 -- look for the corresponding PPA */
1391 			if (minor == GLD_STYLE1_PPA_TO_MINOR(mac->gldm_ppa)) {
1392 				devinfo = mac->gldm_devinfo;
1393 				goto out;	/* found it! */
1394 			} else
1395 				continue;	/* not this PPA */
1396 		}
1397 
1398 		/* We are looking for a clone */
1399 		for (i = 0; i < VLAN_HASHSZ; i++) {
1400 			for (vlan = pvt->vlan_hash[i];
1401 			    vlan != NULL; vlan = vlan->gldv_next) {
1402 				for (str = vlan->gldv_str_next;
1403 				    str != (gld_t *)&vlan->gldv_str_next;
1404 				    str = str->gld_next) {
1405 					ASSERT(str->gld_mac_info == mac);
1406 					if (minor == str->gld_minor) {
1407 						devinfo = mac->gldm_devinfo;
1408 						goto out;
1409 					}
1410 				}
1411 			}
1412 		}
1413 	}
1414 out:
1415 	mutex_exit(&device->gld_devlock);
1416 	mutex_exit(&gld_device_list.gld_devlock);
1417 	return (devinfo);
1418 }
1419 
1420 /*
1421  * STREAMS open routine.  The device dependent driver specifies this as its
1422  * open entry point.
1423  */
1424 /*ARGSUSED2*/
1425 int
1426 gld_open(queue_t *q, dev_t *dev, int flag, int sflag, cred_t *cred)
1427 {
1428 	gld_mac_pvt_t *mac_pvt;
1429 	gld_t *gld;
1430 	glddev_t *glddev;
1431 	gld_mac_info_t *macinfo;
1432 	minor_t minor = getminor(*dev);
1433 	gld_vlan_t *vlan;
1434 	t_uscalar_t ppa;
1435 
1436 	ASSERT(q != NULL);
1437 
1438 	if (minor > GLD_MAX_STYLE1_MINOR)
1439 		return (ENXIO);
1440 
1441 	ASSERT(q->q_ptr == NULL);	/* Clone device gives us a fresh Q */
1442 
1443 	/* Find our per-major glddev_t structure */
1444 	mutex_enter(&gld_device_list.gld_devlock);
1445 	glddev = gld_devlookup(getmajor(*dev));
1446 
1447 	/*
1448 	 * This glddev will hang around since detach (and therefore
1449 	 * gld_unregister) can't run while we're here in the open routine.
1450 	 */
1451 	mutex_exit(&gld_device_list.gld_devlock);
1452 
1453 	if (glddev == NULL)
1454 		return (ENXIO);
1455 
1456 #ifdef GLD_DEBUG
1457 	if (gld_debug & GLDPROT) {
1458 		if (minor == GLD_USE_STYLE2)
1459 			cmn_err(CE_NOTE, "gld_open(%p, Style 2)", (void *)q);
1460 		else
1461 			cmn_err(CE_NOTE, "gld_open(%p, Style 1, minor = %d)",
1462 			    (void *)q, minor);
1463 	}
1464 #endif
1465 
1466 	/*
1467 	 * get a per-stream structure and link things together so we
1468 	 * can easily find them later.
1469 	 */
1470 	gld = kmem_zalloc(sizeof (gld_t), KM_SLEEP);
1471 
1472 	/*
1473 	 * fill in the structure and state info
1474 	 */
1475 	gld->gld_qptr = q;
1476 	gld->gld_device = glddev;
1477 	gld->gld_state = DL_UNATTACHED;
1478 
1479 	/*
1480 	 * we must atomically find a free minor number and add the stream
1481 	 * to a list, because gld_findminor has to traverse the lists to
1482 	 * determine which minor numbers are free.
1483 	 */
1484 	mutex_enter(&glddev->gld_devlock);
1485 
1486 	/* find a free minor device number for the clone */
1487 	gld->gld_minor = gld_findminor(glddev);
1488 	if (gld->gld_minor == 0) {
1489 		mutex_exit(&glddev->gld_devlock);
1490 		kmem_free(gld, sizeof (gld_t));
1491 		return (ENOSR);
1492 	}
1493 
1494 #ifdef GLD_VERBOSE_DEBUG
1495 	if (gld_debug & GLDPROT)
1496 		cmn_err(CE_NOTE, "gld_open() gld ptr: %p minor: %d",
1497 		    (void *)gld, gld->gld_minor);
1498 #endif
1499 
1500 	if (minor == GLD_USE_STYLE2) {
1501 		gld->gld_style = DL_STYLE2;
1502 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1503 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1504 		gldinsque(gld, glddev->gld_str_prev);
1505 #ifdef GLD_VERBOSE_DEBUG
1506 		if (gld_debug & GLDPROT)
1507 			cmn_err(CE_NOTE, "GLDstruct added to device list");
1508 #endif
1509 		(void) qassociate(q, -1);
1510 		goto done;
1511 	}
1512 
1513 	gld->gld_style = DL_STYLE1;
1514 
1515 	/* the PPA is actually 1 less than the minordev */
1516 	ppa = GLD_STYLE1_MINOR_TO_PPA(minor);
1517 
1518 	for (macinfo = glddev->gld_mac_next;
1519 	    macinfo != (gld_mac_info_t *)(&glddev->gld_mac_next);
1520 	    macinfo = macinfo->gldm_next) {
1521 		ASSERT(macinfo != NULL);
1522 		if (macinfo->gldm_ppa != ppa)
1523 			continue;
1524 
1525 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
1526 			continue;	/* this one's not ready yet */
1527 
1528 		/*
1529 		 * we found the correct PPA
1530 		 */
1531 		GLDM_LOCK(macinfo, RW_WRITER);
1532 
1533 		gld->gld_mac_info = macinfo;
1534 
1535 		if (macinfo->gldm_send_tagged != NULL)
1536 			gld->gld_send = macinfo->gldm_send_tagged;
1537 		else
1538 			gld->gld_send = macinfo->gldm_send;
1539 
1540 		/* now ready for action */
1541 		gld->gld_state = DL_UNBOUND;
1542 
1543 		if ((vlan = gld_get_vlan(macinfo, VLAN_VID_NONE)) == NULL) {
1544 			GLDM_UNLOCK(macinfo);
1545 			mutex_exit(&glddev->gld_devlock);
1546 			kmem_free(gld, sizeof (gld_t));
1547 			return (EIO);
1548 		}
1549 
1550 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
1551 		if (!mac_pvt->started) {
1552 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
1553 				gld_rem_vlan(vlan);
1554 				GLDM_UNLOCK(macinfo);
1555 				mutex_exit(&glddev->gld_devlock);
1556 				kmem_free(gld, sizeof (gld_t));
1557 				return (EIO);
1558 			}
1559 		}
1560 
1561 		gld->gld_vlan = vlan;
1562 		vlan->gldv_nstreams++;
1563 		gldinsque(gld, vlan->gldv_str_prev);
1564 		*dev = makedevice(getmajor(*dev), gld->gld_minor);
1565 		WR(q)->q_ptr = q->q_ptr = (caddr_t)gld;
1566 
1567 		GLDM_UNLOCK(macinfo);
1568 #ifdef GLD_VERBOSE_DEBUG
1569 		if (gld_debug & GLDPROT)
1570 			cmn_err(CE_NOTE,
1571 			    "GLDstruct added to instance list");
1572 #endif
1573 		break;
1574 	}
1575 
1576 	if (gld->gld_state == DL_UNATTACHED) {
1577 		mutex_exit(&glddev->gld_devlock);
1578 		kmem_free(gld, sizeof (gld_t));
1579 		return (ENXIO);
1580 	}
1581 
1582 done:
1583 	mutex_exit(&glddev->gld_devlock);
1584 	noenable(WR(q));	/* We'll do the qenables manually */
1585 	qprocson(q);		/* start the queues running */
1586 	qenable(WR(q));
1587 	return (0);
1588 }
1589 
1590 /*
1591  * normal stream close call checks current status and cleans up
1592  * data structures that were dynamically allocated
1593  */
1594 /*ARGSUSED1*/
1595 int
1596 gld_close(queue_t *q, int flag, cred_t *cred)
1597 {
1598 	gld_t	*gld = (gld_t *)q->q_ptr;
1599 	glddev_t *glddev = gld->gld_device;
1600 
1601 	ASSERT(q);
1602 	ASSERT(gld);
1603 
1604 #ifdef GLD_DEBUG
1605 	if (gld_debug & GLDPROT) {
1606 		cmn_err(CE_NOTE, "gld_close(%p, Style %d)",
1607 		    (void *)q, (gld->gld_style & 0x1) + 1);
1608 	}
1609 #endif
1610 
1611 	/* Hold all device streams lists still while we check for a macinfo */
1612 	mutex_enter(&glddev->gld_devlock);
1613 
1614 	if (gld->gld_mac_info != NULL) {
1615 		/* If there's a macinfo, block recv while we change state */
1616 		GLDM_LOCK(gld->gld_mac_info, RW_WRITER);
1617 		gld->gld_flags |= GLD_STR_CLOSING; /* no more rcv putnexts */
1618 		GLDM_UNLOCK(gld->gld_mac_info);
1619 	} else {
1620 		/* no mac DL_ATTACHED right now */
1621 		gld->gld_flags |= GLD_STR_CLOSING;
1622 	}
1623 
1624 	mutex_exit(&glddev->gld_devlock);
1625 
1626 	/*
1627 	 * qprocsoff before we call gld_unbind/gldunattach, so that
1628 	 * we know wsrv isn't in there trying to undo what we're doing.
1629 	 */
1630 	qprocsoff(q);
1631 
1632 	ASSERT(gld->gld_wput_count == 0);
1633 	gld->gld_wput_count = 0;	/* just in case */
1634 
1635 	if (gld->gld_state == DL_IDLE) {
1636 		/* Need to unbind */
1637 		ASSERT(gld->gld_mac_info != NULL);
1638 		(void) gld_unbind(WR(q), NULL);
1639 	}
1640 
1641 	if (gld->gld_state == DL_UNBOUND) {
1642 		/*
1643 		 * Need to unattach
1644 		 * For style 2 stream, gldunattach also
1645 		 * associate queue with NULL dip
1646 		 */
1647 		ASSERT(gld->gld_mac_info != NULL);
1648 		(void) gldunattach(WR(q), NULL);
1649 	}
1650 
1651 	/* disassociate the stream from the device */
1652 	q->q_ptr = WR(q)->q_ptr = NULL;
1653 
1654 	/*
1655 	 * Since we unattached above (if necessary), we know that we're
1656 	 * on the per-major list of unattached streams, rather than a
1657 	 * per-PPA list.  So we know we should hold the devlock.
1658 	 */
1659 	mutex_enter(&glddev->gld_devlock);
1660 	gldremque(gld);			/* remove from Style 2 list */
1661 	mutex_exit(&glddev->gld_devlock);
1662 
1663 	kmem_free(gld, sizeof (gld_t));
1664 
1665 	return (0);
1666 }
1667 
1668 /*
1669  * gld_rsrv (q)
1670  *	simple read service procedure
1671  *	purpose is to avoid the time it takes for packets
1672  *	to move through IP so we can get them off the board
1673  *	as fast as possible due to limited PC resources.
1674  *
1675  *	This is not normally used in the current implementation.  It
1676  *	can be selected with the undocumented property "fast_recv".
1677  *	If that property is set, gld_recv will send the packet
1678  *	upstream with a putq() rather than a putnext(), thus causing
1679  *	this routine to be scheduled.
1680  */
1681 int
1682 gld_rsrv(queue_t *q)
1683 {
1684 	mblk_t *mp;
1685 
1686 	while ((mp = getq(q)) != NULL) {
1687 		if (canputnext(q)) {
1688 			putnext(q, mp);
1689 		} else {
1690 			freemsg(mp);
1691 		}
1692 	}
1693 	return (0);
1694 }
1695 
1696 /*
1697  * gld_wput (q, mp)
1698  * general gld stream write put routine. Receives fastpath data from upper
1699  * modules and processes it immediately.  ioctl and M_PROTO/M_PCPROTO are
1700  * queued for later processing by the service procedure.
1701  */
1702 
1703 int
1704 gld_wput(queue_t *q, mblk_t *mp)
1705 {
1706 	gld_t  *gld = (gld_t *)(q->q_ptr);
1707 	int	rc;
1708 	boolean_t multidata = B_TRUE;
1709 	uint32_t upri;
1710 
1711 #ifdef GLD_DEBUG
1712 	if (gld_debug & GLDTRACE)
1713 		cmn_err(CE_NOTE, "gld_wput(%p %p): type %x",
1714 		    (void *)q, (void *)mp, DB_TYPE(mp));
1715 #endif
1716 	switch (DB_TYPE(mp)) {
1717 
1718 	case M_DATA:
1719 		/* fast data / raw support */
1720 		/* we must be DL_ATTACHED and DL_BOUND to do this */
1721 		/* Tricky to access memory without taking the mutex */
1722 		if ((gld->gld_flags & (GLD_RAW | GLD_FAST)) == 0 ||
1723 		    gld->gld_state != DL_IDLE) {
1724 			merror(q, mp, EPROTO);
1725 			break;
1726 		}
1727 		/*
1728 		 * Cleanup MBLK_VTAG in case it is set by other
1729 		 * modules. MBLK_VTAG is used to save the vtag information.
1730 		 */
1731 		GLD_CLEAR_MBLK_VTAG(mp);
1732 		multidata = B_FALSE;
1733 		/* LINTED: E_CASE_FALLTHRU */
1734 	case M_MULTIDATA:
1735 		/* Only call gld_start() directly if nothing queued ahead */
1736 		/* No guarantees about ordering with different threads */
1737 		if (q->q_first)
1738 			goto use_wsrv;
1739 
1740 		/*
1741 		 * This can happen if wsrv has taken off the last mblk but
1742 		 * is still processing it.
1743 		 */
1744 		membar_consumer();
1745 		if (gld->gld_in_wsrv)
1746 			goto use_wsrv;
1747 
1748 		/*
1749 		 * Keep a count of current wput calls to start.
1750 		 * Nonzero count delays any attempted DL_UNBIND.
1751 		 * See comments above gld_start().
1752 		 */
1753 		atomic_add_32((uint32_t *)&gld->gld_wput_count, 1);
1754 		membar_enter();
1755 
1756 		/* Recheck state now wput_count is set to prevent DL_UNBIND */
1757 		/* If this Q is in process of DL_UNBIND, don't call start */
1758 		if (gld->gld_state != DL_IDLE || gld->gld_in_unbind) {
1759 			/* Extremely unlikely */
1760 			atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1761 			goto use_wsrv;
1762 		}
1763 
1764 		/*
1765 		 * Get the priority value. Note that in raw mode, the
1766 		 * per-packet priority value kept in b_band is ignored.
1767 		 */
1768 		upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1769 		    UPRI(gld, mp->b_band);
1770 
1771 		rc = (multidata) ? gld_start_mdt(q, mp, GLD_WPUT) :
1772 		    gld_start(q, mp, GLD_WPUT, upri);
1773 
1774 		/* Allow DL_UNBIND again */
1775 		membar_exit();
1776 		atomic_add_32((uint32_t *)&gld->gld_wput_count, -1);
1777 
1778 		if (rc == GLD_NORESOURCES)
1779 			qenable(q);
1780 		break;	/*  Done with this packet */
1781 
1782 use_wsrv:
1783 		/* Q not empty, in DL_DETACH, or start gave NORESOURCES */
1784 		(void) putq(q, mp);
1785 		qenable(q);
1786 		break;
1787 
1788 	case M_IOCTL:
1789 		/* ioctl relies on wsrv single threading per queue */
1790 		(void) putq(q, mp);
1791 		qenable(q);
1792 		break;
1793 
1794 	case M_CTL:
1795 		(void) putq(q, mp);
1796 		qenable(q);
1797 		break;
1798 
1799 	case M_FLUSH:		/* canonical flush handling */
1800 		/* XXX Should these be FLUSHALL? */
1801 		if (*mp->b_rptr & FLUSHW)
1802 			flushq(q, 0);
1803 		if (*mp->b_rptr & FLUSHR) {
1804 			flushq(RD(q), 0);
1805 			*mp->b_rptr &= ~FLUSHW;
1806 			qreply(q, mp);
1807 		} else
1808 			freemsg(mp);
1809 		break;
1810 
1811 	case M_PROTO:
1812 	case M_PCPROTO:
1813 		/* these rely on wsrv single threading per queue */
1814 		(void) putq(q, mp);
1815 		qenable(q);
1816 		break;
1817 
1818 	default:
1819 #ifdef GLD_DEBUG
1820 		if (gld_debug & GLDETRACE)
1821 			cmn_err(CE_WARN,
1822 			    "gld: Unexpected packet type from queue: 0x%x",
1823 			    DB_TYPE(mp));
1824 #endif
1825 		freemsg(mp);
1826 	}
1827 	return (0);
1828 }
1829 
1830 /*
1831  * gld_wsrv - Incoming messages are processed according to the DLPI protocol
1832  * specification.
1833  *
1834  * wsrv is single-threaded per Q.  We make use of this to avoid taking the
1835  * lock for reading data items that are only ever written by us.
1836  */
1837 
1838 int
1839 gld_wsrv(queue_t *q)
1840 {
1841 	mblk_t *mp;
1842 	gld_t *gld = (gld_t *)q->q_ptr;
1843 	gld_mac_info_t *macinfo;
1844 	union DL_primitives *prim;
1845 	int err;
1846 	boolean_t multidata;
1847 	uint32_t upri;
1848 
1849 #ifdef GLD_DEBUG
1850 	if (gld_debug & GLDTRACE)
1851 		cmn_err(CE_NOTE, "gld_wsrv(%p)", (void *)q);
1852 #endif
1853 
1854 	ASSERT(!gld->gld_in_wsrv);
1855 
1856 	gld->gld_xwait = B_FALSE; /* We are now going to process this Q */
1857 
1858 	if (q->q_first == NULL)
1859 		return (0);
1860 
1861 	macinfo = gld->gld_mac_info;
1862 
1863 	/*
1864 	 * Help wput avoid a call to gld_start if there might be a message
1865 	 * previously queued by that thread being processed here.
1866 	 */
1867 	gld->gld_in_wsrv = B_TRUE;
1868 	membar_enter();
1869 
1870 	while ((mp = getq(q)) != NULL) {
1871 		switch (DB_TYPE(mp)) {
1872 		case M_DATA:
1873 		case M_MULTIDATA:
1874 			multidata = (DB_TYPE(mp) == M_MULTIDATA);
1875 
1876 			/*
1877 			 * retry of a previously processed UNITDATA_REQ
1878 			 * or is a RAW or FAST message from above.
1879 			 */
1880 			if (macinfo == NULL) {
1881 				/* No longer attached to a PPA, drop packet */
1882 				freemsg(mp);
1883 				break;
1884 			}
1885 
1886 			gld->gld_sched_ran = B_FALSE;
1887 			membar_enter();
1888 
1889 			/*
1890 			 * Get the priority value. Note that in raw mode, the
1891 			 * per-packet priority value kept in b_band is ignored.
1892 			 */
1893 			upri = (gld->gld_flags & GLD_RAW) ? gld->gld_upri :
1894 			    UPRI(gld, mp->b_band);
1895 
1896 			err = (multidata) ? gld_start_mdt(q, mp, GLD_WSRV) :
1897 			    gld_start(q, mp, GLD_WSRV, upri);
1898 			if (err == GLD_NORESOURCES) {
1899 				/* gld_sched will qenable us later */
1900 				gld->gld_xwait = B_TRUE; /* want qenable */
1901 				membar_enter();
1902 				/*
1903 				 * v2:  we're not holding the lock; it's
1904 				 * possible that the driver could have already
1905 				 * called gld_sched (following up on its
1906 				 * return of GLD_NORESOURCES), before we got a
1907 				 * chance to do the putbq() and set gld_xwait.
1908 				 * So if we saw a call to gld_sched that
1909 				 * examined this queue, since our call to
1910 				 * gld_start() above, then it's possible we've
1911 				 * already seen the only call to gld_sched()
1912 				 * we're ever going to see.  So we better retry
1913 				 * transmitting this packet right now.
1914 				 */
1915 				if (gld->gld_sched_ran) {
1916 #ifdef GLD_DEBUG
1917 					if (gld_debug & GLDTRACE)
1918 						cmn_err(CE_NOTE, "gld_wsrv: "
1919 						    "sched was called");
1920 #endif
1921 					break;	/* try again right now */
1922 				}
1923 				gld->gld_in_wsrv = B_FALSE;
1924 				return (0);
1925 			}
1926 			break;
1927 
1928 		case M_IOCTL:
1929 			(void) gld_ioctl(q, mp);
1930 			break;
1931 
1932 		case M_CTL:
1933 			if (macinfo == NULL) {
1934 				freemsg(mp);
1935 				break;
1936 			}
1937 
1938 			if (macinfo->gldm_mctl != NULL) {
1939 				GLDM_LOCK(macinfo, RW_WRITER);
1940 				(void) (*macinfo->gldm_mctl) (macinfo, q, mp);
1941 				GLDM_UNLOCK(macinfo);
1942 			} else {
1943 				/* This driver doesn't recognize, just drop */
1944 				freemsg(mp);
1945 			}
1946 			break;
1947 
1948 		case M_PROTO:	/* Will be an DLPI message of some type */
1949 		case M_PCPROTO:
1950 			if ((err = gld_cmds(q, mp)) != GLDE_OK) {
1951 				if (err == GLDE_RETRY) {
1952 					gld->gld_in_wsrv = B_FALSE;
1953 					return (0); /* quit while we're ahead */
1954 				}
1955 				prim = (union DL_primitives *)mp->b_rptr;
1956 				dlerrorack(q, mp, prim->dl_primitive, err, 0);
1957 			}
1958 			break;
1959 
1960 		default:
1961 			/* This should never happen */
1962 #ifdef GLD_DEBUG
1963 			if (gld_debug & GLDERRS)
1964 				cmn_err(CE_WARN,
1965 				    "gld_wsrv: db_type(%x) not supported",
1966 				    mp->b_datap->db_type);
1967 #endif
1968 			freemsg(mp);	/* unknown types are discarded */
1969 			break;
1970 		}
1971 	}
1972 
1973 	membar_exit();
1974 	gld->gld_in_wsrv = B_FALSE;
1975 	return (0);
1976 }
1977 
1978 /*
1979  * gld_start() can get called from gld_wput(), gld_wsrv(), or gld_unitdata().
1980  *
1981  * We only come directly from wput() in the GLD_FAST (fastpath) or RAW case.
1982  *
1983  * In particular, we must avoid calling gld_precv*() if we came from wput().
1984  * gld_precv*() is where we, on the transmit side, loop back our outgoing
1985  * packets to the receive side if we are in physical promiscuous mode.
1986  * Since the receive side holds a lock across its call to the upstream
1987  * putnext, and that upstream module could well have looped back to our
1988  * wput() routine on the same thread, we cannot call gld_precv* from here
1989  * for fear of causing a recursive lock entry in our receive code.
1990  *
1991  * There is a problem here when coming from gld_wput().  While wput
1992  * only comes here if the queue is attached to a PPA and bound to a SAP
1993  * and there are no messages on the queue ahead of the M_DATA that could
1994  * change that, it is theoretically possible that another thread could
1995  * now wput a DL_UNBIND and a DL_DETACH message, and the wsrv() routine
1996  * could wake up and process them, before we finish processing this
1997  * send of the M_DATA.  This can only possibly happen on a Style 2 RAW or
1998  * FAST (fastpath) stream:  non RAW/FAST streams always go through wsrv(),
1999  * and Style 1 streams only DL_DETACH in the close routine, where
2000  * qprocsoff() protects us.  If this happens we could end up calling
2001  * gldm_send() after we have detached the stream and possibly called
2002  * gldm_stop().  Worse, once the number of attached streams goes to zero,
2003  * detach/unregister could be called, and the macinfo could go away entirely.
2004  *
2005  * No one has ever seen this happen.
2006  *
2007  * It is some trouble to fix this, and we would rather not add any mutex
2008  * logic into the wput() routine, which is supposed to be a "fast"
2009  * path.
2010  *
2011  * What I've done is use an atomic counter to keep a count of the number
2012  * of threads currently calling gld_start() from wput() on this stream.
2013  * If DL_DETACH sees this as nonzero, it putbqs the request back onto
2014  * the queue and qenables, hoping to have better luck next time.  Since
2015  * people shouldn't be trying to send after they've asked to DL_DETACH,
2016  * hopefully very soon all the wput=>start threads should have returned
2017  * and the DL_DETACH will succeed.  It's hard to test this since the odds
2018  * of the failure even trying to happen are so small.  I probably could
2019  * have ignored the whole issue and never been the worse for it.
2020  *
2021  * Because some GLDv2 Ethernet drivers do not allow the size of transmitted
2022  * packet to be greater than ETHERMAX, we must first strip the VLAN tag
2023  * from a tagged packet before passing it to the driver's gld_send() entry
2024  * point function, and pass the VLAN tag as a separate argument. The
2025  * gld_send() function may fail. In that case, the packet will need to be
2026  * queued in order to be processed again in GLD's service routine. As the
2027  * VTAG has already been stripped at that time, we save the VTAG information
2028  * in (the unused fields of) dblk using GLD_SAVE_MBLK_VTAG(), so that the
2029  * VTAG can also be queued and be able to be got when gld_start() is called
2030  * next time from gld_wsrv().
2031  *
2032  * Some rules to use GLD_{CLEAR|SAVE}_MBLK_VTAG macros:
2033  *
2034  * - GLD_SAVE_MBLK_VTAG() must be called to save the VTAG information each time
2035  *   the message is queued by putbq().
2036  *
2037  * - GLD_CLEAR_MBLK_VTAG() must be called to clear the bogus VTAG information
2038  *   (if any) in dblk before the message is passed to the gld_start() function.
2039  */
2040 static int
2041 gld_start(queue_t *q, mblk_t *mp, int caller, uint32_t upri)
2042 {
2043 	mblk_t *nmp;
2044 	gld_t *gld = (gld_t *)q->q_ptr;
2045 	gld_mac_info_t *macinfo;
2046 	gld_mac_pvt_t *mac_pvt;
2047 	int rc;
2048 	gld_interface_t *ifp;
2049 	pktinfo_t pktinfo;
2050 	uint32_t vtag, vid;
2051 	uint32_t raw_vtag = 0;
2052 	gld_vlan_t *vlan;
2053 	struct gld_stats *stats0, *stats = NULL;
2054 
2055 	ASSERT(DB_TYPE(mp) == M_DATA);
2056 	macinfo = gld->gld_mac_info;
2057 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2058 	ifp = mac_pvt->interfacep;
2059 	vlan = (gld_vlan_t *)gld->gld_vlan;
2060 	vid = vlan->gldv_id;
2061 
2062 	/*
2063 	 * If this interface is a VLAN, the kstats of corresponding
2064 	 * "VLAN 0" should also be updated. Note that the gld_vlan_t
2065 	 * structure for VLAN 0 might not exist if there are no DLPI
2066 	 * consumers attaching on VLAN 0. Fortunately we can directly
2067 	 * access VLAN 0's kstats from macinfo.
2068 	 *
2069 	 * Therefore, stats0 (VLAN 0's kstats) must always be
2070 	 * updated, and stats must to be updated if it is not NULL.
2071 	 */
2072 	stats0 = mac_pvt->statistics;
2073 	if (vid != VLAN_VID_NONE)
2074 		stats = vlan->gldv_stats;
2075 
2076 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_TX) != 0) {
2077 #ifdef GLD_DEBUG
2078 		if (gld_debug & GLDERRS)
2079 			cmn_err(CE_WARN,
2080 			    "gld_start: failed to interpret outbound packet");
2081 #endif
2082 		goto badarg;
2083 	}
2084 
2085 	vtag = VLAN_VID_NONE;
2086 	raw_vtag = GLD_GET_MBLK_VTAG(mp);
2087 	if (GLD_VTAG_TCI(raw_vtag) != 0) {
2088 		uint16_t raw_pri, raw_vid, evid;
2089 
2090 		/*
2091 		 * Tagged packet.
2092 		 */
2093 		raw_pri = GLD_VTAG_PRI(raw_vtag);
2094 		raw_vid = GLD_VTAG_VID(raw_vtag);
2095 		GLD_CLEAR_MBLK_VTAG(mp);
2096 
2097 		if (gld->gld_flags & GLD_RAW) {
2098 			/*
2099 			 * In raw mode, we only expect untagged packets or
2100 			 * special priority-tagged packets on a VLAN stream.
2101 			 * Drop the packet if its VID is not zero.
2102 			 */
2103 			if (vid != VLAN_VID_NONE && raw_vid != VLAN_VID_NONE)
2104 				goto badarg;
2105 
2106 			/*
2107 			 * If it is raw mode, use the per-stream priority if
2108 			 * the priority is not specified in the packet.
2109 			 * Otherwise, ignore the priority bits in the packet.
2110 			 */
2111 			upri = (raw_pri != 0) ? raw_pri : upri;
2112 		}
2113 
2114 		if (vid == VLAN_VID_NONE && vid != raw_vid) {
2115 			gld_vlan_t *tmp_vlan;
2116 
2117 			/*
2118 			 * This link is a physical link but the packet is
2119 			 * a VLAN tagged packet, the kstats of corresponding
2120 			 * VLAN (if any) should also be updated.
2121 			 */
2122 			tmp_vlan = gld_find_vlan(macinfo, raw_vid);
2123 			if (tmp_vlan != NULL)
2124 				stats = tmp_vlan->gldv_stats;
2125 		}
2126 
2127 		evid = (vid == VLAN_VID_NONE) ? raw_vid : vid;
2128 		if (evid != VLAN_VID_NONE || upri != 0)
2129 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, evid);
2130 	} else {
2131 		/*
2132 		 * Untagged packet:
2133 		 * Get vtag from the attached PPA of this stream.
2134 		 */
2135 		if ((vid != VLAN_VID_NONE) ||
2136 		    ((macinfo->gldm_type == DL_ETHER) && (upri != 0))) {
2137 			vtag = GLD_MAKE_VTAG(upri, VLAN_CFI_ETHER, vid);
2138 		}
2139 	}
2140 
2141 	/*
2142 	 * We're not holding the lock for this check.  If the promiscuous
2143 	 * state is in flux it doesn't matter much if we get this wrong.
2144 	 */
2145 	if (mac_pvt->nprom > 0) {
2146 		/*
2147 		 * We want to loopback to the receive side, but to avoid
2148 		 * recursive lock entry:  if we came from wput(), which
2149 		 * could have looped back via IP from our own receive
2150 		 * interrupt thread, we decline this request.  wput()
2151 		 * will then queue the packet for wsrv().  This means
2152 		 * that when snoop is running we don't get the advantage
2153 		 * of the wput() multithreaded direct entry to the
2154 		 * driver's send routine.
2155 		 */
2156 		if (caller == GLD_WPUT) {
2157 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2158 			(void) putbq(q, mp);
2159 			return (GLD_NORESOURCES);
2160 		}
2161 		if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
2162 			nmp = dupmsg_noloan(mp);
2163 		else
2164 			nmp = dupmsg(mp);
2165 	} else
2166 		nmp = NULL;		/* we need no loopback */
2167 
2168 	if (ifp->hdr_size > 0 &&
2169 	    pktinfo.pktLen > ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2170 	    macinfo->gldm_maxpkt) {
2171 		if (nmp)
2172 			freemsg(nmp);	/* free the duped message */
2173 #ifdef GLD_DEBUG
2174 		if (gld_debug & GLDERRS)
2175 			cmn_err(CE_WARN,
2176 			    "gld_start: oversize outbound packet, size %d,"
2177 			    "max %d", pktinfo.pktLen,
2178 			    ifp->hdr_size + (vtag == 0 ? 0 : VTAG_SIZE) +
2179 			    macinfo->gldm_maxpkt);
2180 #endif
2181 		goto badarg;
2182 	}
2183 
2184 	rc = (*gld->gld_send)(macinfo, mp, vtag);
2185 
2186 	if (rc != GLD_SUCCESS) {
2187 		if (rc == GLD_NORESOURCES) {
2188 			ATOMIC_BUMP(stats0, stats, glds_xmtretry, 1);
2189 			GLD_SAVE_MBLK_VTAG(mp, raw_vtag);
2190 			(void) putbq(q, mp);
2191 		} else {
2192 			/* transmit error; drop the packet */
2193 			freemsg(mp);
2194 			/* We're supposed to count failed attempts as well */
2195 			UPDATE_STATS(stats0, stats, pktinfo, 1);
2196 #ifdef GLD_DEBUG
2197 			if (gld_debug & GLDERRS)
2198 				cmn_err(CE_WARN,
2199 				    "gld_start: gldm_send failed %d", rc);
2200 #endif
2201 		}
2202 		if (nmp)
2203 			freemsg(nmp);	/* free the dupped message */
2204 		return (rc);
2205 	}
2206 
2207 	UPDATE_STATS(stats0, stats, pktinfo, 1);
2208 
2209 	/*
2210 	 * Loopback case. The message needs to be returned back on
2211 	 * the read side. This would silently fail if the dupmsg fails
2212 	 * above. This is probably OK, if there is no memory to dup the
2213 	 * block, then there isn't much we could do anyway.
2214 	 */
2215 	if (nmp) {
2216 		GLDM_LOCK(macinfo, RW_WRITER);
2217 		gld_precv(macinfo, nmp, vtag, stats);
2218 		GLDM_UNLOCK(macinfo);
2219 	}
2220 
2221 	return (GLD_SUCCESS);
2222 badarg:
2223 	freemsg(mp);
2224 
2225 	ATOMIC_BUMP(stats0, stats, glds_xmtbadinterp, 1);
2226 	return (GLD_BADARG);
2227 }
2228 
2229 /*
2230  * With MDT V.2 a single message mp can have one header area and multiple
2231  * payload areas. A packet is described by dl_pkt_info, and each packet can
2232  * span multiple payload areas (currently with TCP, each packet will have one
2233  * header and at the most two payload areas). MACs might have a limit on the
2234  * number of payload segments (i.e. per packet scatter-gather limit), and
2235  * MDT V.2 has a way of specifying that with mdt_span_limit; the MAC driver
2236  * might also have a limit on the total number of payloads in a message, and
2237  * that is specified by mdt_max_pld.
2238  */
2239 static int
2240 gld_start_mdt(queue_t *q, mblk_t *mp, int caller)
2241 {
2242 	mblk_t *nextmp;
2243 	gld_t *gld = (gld_t *)q->q_ptr;
2244 	gld_mac_info_t *macinfo = gld->gld_mac_info;
2245 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2246 	int numpacks, mdtpacks;
2247 	gld_interface_t *ifp = mac_pvt->interfacep;
2248 	pktinfo_t pktinfo;
2249 	gld_vlan_t *vlan = (gld_vlan_t *)gld->gld_vlan;
2250 	boolean_t doloop = B_FALSE;
2251 	multidata_t *dlmdp;
2252 	pdescinfo_t pinfo;
2253 	pdesc_t *dl_pkt;
2254 	void *cookie;
2255 	uint_t totLen = 0;
2256 
2257 	ASSERT(DB_TYPE(mp) == M_MULTIDATA);
2258 
2259 	/*
2260 	 * We're not holding the lock for this check.  If the promiscuous
2261 	 * state is in flux it doesn't matter much if we get this wrong.
2262 	 */
2263 	if (mac_pvt->nprom > 0) {
2264 		/*
2265 		 * We want to loopback to the receive side, but to avoid
2266 		 * recursive lock entry:  if we came from wput(), which
2267 		 * could have looped back via IP from our own receive
2268 		 * interrupt thread, we decline this request.  wput()
2269 		 * will then queue the packet for wsrv().  This means
2270 		 * that when snoop is running we don't get the advantage
2271 		 * of the wput() multithreaded direct entry to the
2272 		 * driver's send routine.
2273 		 */
2274 		if (caller == GLD_WPUT) {
2275 			(void) putbq(q, mp);
2276 			return (GLD_NORESOURCES);
2277 		}
2278 		doloop = B_TRUE;
2279 
2280 		/*
2281 		 * unlike the M_DATA case, we don't have to call
2282 		 * dupmsg_noloan here because mmd_transform
2283 		 * (called by gld_precv_mdt) will make a copy of
2284 		 * each dblk.
2285 		 */
2286 	}
2287 
2288 	while (mp != NULL) {
2289 		/*
2290 		 * The lower layer driver only gets a single multidata
2291 		 * message; this also makes it easier to handle noresources.
2292 		 */
2293 		nextmp = mp->b_cont;
2294 		mp->b_cont = NULL;
2295 
2296 		/*
2297 		 * Get number of packets in this message; if nothing
2298 		 * to transmit, go to next message.
2299 		 */
2300 		dlmdp = mmd_getmultidata(mp);
2301 		if ((mdtpacks = (int)mmd_getcnt(dlmdp, NULL, NULL)) == 0) {
2302 			freemsg(mp);
2303 			mp = nextmp;
2304 			continue;
2305 		}
2306 
2307 		/*
2308 		 * Run interpreter to populate media specific pktinfo fields.
2309 		 * This collects per MDT message information like sap,
2310 		 * broad/multicast etc.
2311 		 */
2312 		(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, &pktinfo,
2313 		    GLD_MDT_TX);
2314 
2315 		numpacks = (*macinfo->gldm_mdt_pre)(macinfo, mp, &cookie);
2316 
2317 		if (numpacks > 0) {
2318 			/*
2319 			 * Driver indicates it can transmit at least 1, and
2320 			 * possibly all, packets in MDT message.
2321 			 */
2322 			int count = numpacks;
2323 
2324 			for (dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2325 			    (dl_pkt != NULL);
2326 			    dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo)) {
2327 				/*
2328 				 * Format this packet by adding link header and
2329 				 * adjusting pdescinfo to include it; get
2330 				 * packet length.
2331 				 */
2332 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2333 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2334 
2335 				totLen += pktinfo.pktLen;
2336 
2337 				/*
2338 				 * Loop back packet before handing to the
2339 				 * driver.
2340 				 */
2341 				if (doloop &&
2342 				    mmd_adjpdesc(dl_pkt, &pinfo) != NULL) {
2343 					GLDM_LOCK(macinfo, RW_WRITER);
2344 					gld_precv_mdt(macinfo, vlan, mp,
2345 					    dl_pkt, &pktinfo);
2346 					GLDM_UNLOCK(macinfo);
2347 				}
2348 
2349 				/*
2350 				 * And send off to driver.
2351 				 */
2352 				(*macinfo->gldm_mdt_send)(macinfo, cookie,
2353 				    &pinfo);
2354 
2355 				/*
2356 				 * Be careful not to invoke getnextpdesc if we
2357 				 * already sent the last packet, since driver
2358 				 * might have posted it to hardware causing a
2359 				 * completion and freemsg() so the MDT data
2360 				 * structures might not be valid anymore.
2361 				 */
2362 				if (--count == 0)
2363 					break;
2364 			}
2365 			(*macinfo->gldm_mdt_post)(macinfo, mp, cookie);
2366 			pktinfo.pktLen = totLen;
2367 			UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, numpacks);
2368 
2369 			/*
2370 			 * In the noresources case (when driver indicates it
2371 			 * can not transmit all packets in the MDT message),
2372 			 * adjust to skip the first few packets on retrial.
2373 			 */
2374 			if (numpacks != mdtpacks) {
2375 				/*
2376 				 * Release already processed packet descriptors.
2377 				 */
2378 				for (count = 0; count < numpacks; count++) {
2379 					dl_pkt = mmd_getfirstpdesc(dlmdp,
2380 					    &pinfo);
2381 					mmd_rempdesc(dl_pkt);
2382 				}
2383 				ATOMIC_BUMP(vlan->gldv_stats, NULL,
2384 				    glds_xmtretry, 1);
2385 				mp->b_cont = nextmp;
2386 				(void) putbq(q, mp);
2387 				return (GLD_NORESOURCES);
2388 			}
2389 		} else if (numpacks == 0) {
2390 			/*
2391 			 * Driver indicates it can not transmit any packets
2392 			 * currently and will request retrial later.
2393 			 */
2394 			ATOMIC_BUMP(vlan->gldv_stats, NULL, glds_xmtretry, 1);
2395 			mp->b_cont = nextmp;
2396 			(void) putbq(q, mp);
2397 			return (GLD_NORESOURCES);
2398 		} else {
2399 			ASSERT(numpacks == -1);
2400 			/*
2401 			 * We're supposed to count failed attempts as well.
2402 			 */
2403 			dl_pkt = mmd_getfirstpdesc(dlmdp, &pinfo);
2404 			while (dl_pkt != NULL) {
2405 				/*
2406 				 * Call interpreter to determine total packet
2407 				 * bytes that are being dropped.
2408 				 */
2409 				(void) (*ifp->interpreter_mdt)(macinfo, NULL,
2410 				    &pinfo, &pktinfo, GLD_MDT_TXPKT);
2411 
2412 				totLen += pktinfo.pktLen;
2413 
2414 				dl_pkt = mmd_getnextpdesc(dl_pkt, &pinfo);
2415 			}
2416 			pktinfo.pktLen = totLen;
2417 			UPDATE_STATS(vlan->gldv_stats, NULL, pktinfo, mdtpacks);
2418 
2419 			/*
2420 			 * Transmit error; drop the message, move on
2421 			 * to the next one.
2422 			 */
2423 			freemsg(mp);
2424 		}
2425 
2426 		/*
2427 		 * Process the next multidata block, if there is one.
2428 		 */
2429 		mp = nextmp;
2430 	}
2431 
2432 	return (GLD_SUCCESS);
2433 }
2434 
2435 /*
2436  * gld_intr (macinfo)
2437  */
2438 uint_t
2439 gld_intr(gld_mac_info_t *macinfo)
2440 {
2441 	ASSERT(macinfo != NULL);
2442 
2443 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
2444 		return (DDI_INTR_UNCLAIMED);
2445 
2446 	return ((*macinfo->gldm_intr)(macinfo));
2447 }
2448 
2449 /*
2450  * gld_sched (macinfo)
2451  *
2452  * This routine scans the streams that refer to a specific macinfo
2453  * structure and causes the STREAMS scheduler to try to run them if
2454  * they are marked as waiting for the transmit buffer.
2455  */
2456 void
2457 gld_sched(gld_mac_info_t *macinfo)
2458 {
2459 	gld_mac_pvt_t *mac_pvt;
2460 	gld_t *gld;
2461 	gld_vlan_t *vlan;
2462 	int i;
2463 
2464 	ASSERT(macinfo != NULL);
2465 
2466 	GLDM_LOCK(macinfo, RW_WRITER);
2467 
2468 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2469 		/* We're probably being called from a leftover interrupt */
2470 		GLDM_UNLOCK(macinfo);
2471 		return;
2472 	}
2473 
2474 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2475 
2476 	for (i = 0; i < VLAN_HASHSZ; i++) {
2477 		for (vlan = mac_pvt->vlan_hash[i];
2478 		    vlan != NULL; vlan = vlan->gldv_next) {
2479 			for (gld = vlan->gldv_str_next;
2480 			    gld != (gld_t *)&vlan->gldv_str_next;
2481 			    gld = gld->gld_next) {
2482 				ASSERT(gld->gld_mac_info == macinfo);
2483 				gld->gld_sched_ran = B_TRUE;
2484 				membar_enter();
2485 				if (gld->gld_xwait) {
2486 					gld->gld_xwait = B_FALSE;
2487 					qenable(WR(gld->gld_qptr));
2488 				}
2489 			}
2490 		}
2491 	}
2492 
2493 	GLDM_UNLOCK(macinfo);
2494 }
2495 
2496 /*
2497  * gld_precv (macinfo, mp, vtag, stats)
2498  * called from gld_start to loopback a packet when in promiscuous mode
2499  *
2500  * VLAN 0's statistics need to be updated. If stats is not NULL,
2501  * it needs to be updated as well.
2502  */
2503 static void
2504 gld_precv(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag,
2505     struct gld_stats *stats)
2506 {
2507 	gld_mac_pvt_t *mac_pvt;
2508 	gld_interface_t *ifp;
2509 	pktinfo_t pktinfo;
2510 
2511 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2512 
2513 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2514 	ifp = mac_pvt->interfacep;
2515 
2516 	/*
2517 	 * call the media specific packet interpreter routine
2518 	 */
2519 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXLOOP) != 0) {
2520 		freemsg(mp);
2521 		BUMP(mac_pvt->statistics, stats, glds_rcvbadinterp, 1);
2522 #ifdef GLD_DEBUG
2523 		if (gld_debug & GLDERRS)
2524 			cmn_err(CE_WARN,
2525 			    "gld_precv: interpreter failed");
2526 #endif
2527 		return;
2528 	}
2529 
2530 	/*
2531 	 * Update the vtag information.
2532 	 */
2533 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2534 	pktinfo.vid = GLD_VTAG_VID(vtag);
2535 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2536 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2537 
2538 	gld_sendup(macinfo, &pktinfo, mp, gld_paccept);
2539 }
2540 
2541 /*
2542  * Called from gld_start_mdt to loopback packet(s) when in promiscuous mode.
2543  * Note that 'vlan' is always a physical link, because MDT can only be
2544  * enabled on non-VLAN streams.
2545  */
2546 /*ARGSUSED*/
2547 static void
2548 gld_precv_mdt(gld_mac_info_t *macinfo, gld_vlan_t *vlan, mblk_t *mp,
2549     pdesc_t *dl_pkt, pktinfo_t *pktinfo)
2550 {
2551 	mblk_t *adjmp;
2552 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2553 	gld_interface_t *ifp = mac_pvt->interfacep;
2554 
2555 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
2556 
2557 	/*
2558 	 * Get source/destination.
2559 	 */
2560 	(void) (*ifp->interpreter_mdt)(macinfo, mp, NULL, pktinfo,
2561 	    GLD_MDT_RXLOOP);
2562 	if ((adjmp = mmd_transform(dl_pkt)) != NULL)
2563 		gld_sendup(macinfo, pktinfo, adjmp, gld_paccept);
2564 }
2565 
2566 /*
2567  * gld_recv (macinfo, mp)
2568  * called with an mac-level packet in a mblock; take the maclock,
2569  * try the ip4q and ip6q hack, and otherwise call gld_sendup.
2570  *
2571  * V0 drivers already are holding the mutex when they call us.
2572  */
2573 void
2574 gld_recv(gld_mac_info_t *macinfo, mblk_t *mp)
2575 {
2576 	gld_recv_tagged(macinfo, mp, VLAN_VTAG_NONE);
2577 }
2578 
2579 void
2580 gld_recv_tagged(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t vtag)
2581 {
2582 	gld_mac_pvt_t *mac_pvt;
2583 	char pbuf[3*GLD_MAX_ADDRLEN];
2584 	pktinfo_t pktinfo;
2585 	gld_interface_t *ifp;
2586 	queue_t *ipq = NULL;
2587 	gld_vlan_t *vlan = NULL, *vlan0 = NULL, *vlann = NULL;
2588 	struct gld_stats *stats0, *stats = NULL;
2589 	uint32_t vid;
2590 	int err;
2591 
2592 	ASSERT(macinfo != NULL);
2593 	ASSERT(mp->b_datap->db_ref);
2594 
2595 	GLDM_LOCK(macinfo, RW_READER);
2596 
2597 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
2598 		/* We're probably being called from a leftover interrupt */
2599 		freemsg(mp);
2600 		goto done;
2601 	}
2602 
2603 	/*
2604 	 * If this packet is a VLAN tagged packet, the kstats of corresponding
2605 	 * "VLAN 0" should also be updated. We can directly access VLAN 0's
2606 	 * kstats from macinfo.
2607 	 *
2608 	 * Further, the packets needs to be passed to VLAN 0 if there is
2609 	 * any DLPI consumer on VLAN 0 who is interested in tagged packets
2610 	 * (DL_PROMISC_SAP is on or is bounded to ETHERTYPE_VLAN SAP).
2611 	 */
2612 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
2613 	stats0 = mac_pvt->statistics;
2614 
2615 	vid = GLD_VTAG_VID(vtag);
2616 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2617 	if (vid != VLAN_VID_NONE) {
2618 		/*
2619 		 * If there are no physical DLPI consumers interested in the
2620 		 * VLAN packet, clear vlan0.
2621 		 */
2622 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2623 			vlan0 = NULL;
2624 		/*
2625 		 * vlann is the VLAN with the same VID as the VLAN packet.
2626 		 */
2627 		vlann = gld_find_vlan(macinfo, vid);
2628 		if (vlann != NULL)
2629 			stats = vlann->gldv_stats;
2630 	}
2631 
2632 	vlan = (vid == VLAN_VID_NONE) ? vlan0 : vlann;
2633 
2634 	ifp = mac_pvt->interfacep;
2635 	err = (*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RXQUICK);
2636 
2637 	BUMP(stats0, stats, glds_bytercv64, pktinfo.pktLen);
2638 	BUMP(stats0, stats, glds_pktrcv64, 1);
2639 
2640 	if ((vlann == NULL) && (vlan0 == NULL)) {
2641 		freemsg(mp);
2642 		goto done;
2643 	}
2644 
2645 	/*
2646 	 * Check whether underlying media code supports the IPQ hack:
2647 	 *
2648 	 * - the interpreter could quickly parse the packet
2649 	 * - the device type supports IPQ (ethernet and IPoIB)
2650 	 * - there is one, and only one, IP stream bound (to this VLAN)
2651 	 * - that stream is a "fastpath" stream
2652 	 * - the packet is of type ETHERTYPE_IP or ETHERTYPE_IPV6
2653 	 * - there are no streams in promiscuous mode (on this VLAN)
2654 	 * - if this packet is tagged, there is no need to send this
2655 	 *   packet to physical streams
2656 	 */
2657 	if ((err != 0) && ((vlan != NULL) && (vlan->gldv_nprom == 0)) &&
2658 	    (vlan == vlan0 || vlan0 == NULL)) {
2659 		switch (pktinfo.ethertype) {
2660 		case ETHERTYPE_IP:
2661 			ipq = vlan->gldv_ipq;
2662 			break;
2663 		case ETHERTYPE_IPV6:
2664 			ipq = vlan->gldv_ipv6q;
2665 			break;
2666 		}
2667 	}
2668 
2669 	/*
2670 	 * Special case for IP; we can simply do the putnext here, if:
2671 	 * o The IPQ hack is possible (ipq != NULL).
2672 	 * o the packet is specifically for me, and therefore:
2673 	 * - the packet is not multicast or broadcast (fastpath only
2674 	 *   wants unicast packets).
2675 	 *
2676 	 * o the stream is not asserting flow control.
2677 	 */
2678 	if (ipq != NULL &&
2679 	    pktinfo.isForMe &&
2680 	    canputnext(ipq)) {
2681 		/*
2682 		 * Skip the mac header. We know there is no LLC1/SNAP header
2683 		 * in this packet
2684 		 */
2685 		mp->b_rptr += pktinfo.macLen;
2686 		putnext(ipq, mp);
2687 		goto done;
2688 	}
2689 
2690 	/*
2691 	 * call the media specific packet interpreter routine
2692 	 */
2693 	if ((*ifp->interpreter)(macinfo, mp, &pktinfo, GLD_RX) != 0) {
2694 		BUMP(stats0, stats, glds_rcvbadinterp, 1);
2695 #ifdef GLD_DEBUG
2696 		if (gld_debug & GLDERRS)
2697 			cmn_err(CE_WARN,
2698 			    "gld_recv_tagged: interpreter failed");
2699 #endif
2700 		freemsg(mp);
2701 		goto done;
2702 	}
2703 
2704 	/*
2705 	 * This is safe even if vtag is VLAN_VTAG_NONE
2706 	 */
2707 	pktinfo.vid = vid;
2708 	pktinfo.cfi = GLD_VTAG_CFI(vtag);
2709 #ifdef GLD_DEBUG
2710 	if (pktinfo.cfi != VLAN_CFI_ETHER)
2711 		cmn_err(CE_WARN, "gld_recv_tagged: non-ETHER CFI");
2712 #endif
2713 	pktinfo.user_pri = GLD_VTAG_PRI(vtag);
2714 	pktinfo.isTagged = (vtag != VLAN_VID_NONE);
2715 
2716 #ifdef GLD_DEBUG
2717 	if ((gld_debug & GLDRECV) &&
2718 	    (!(gld_debug & GLDNOBR) ||
2719 	    (!pktinfo.isBroadcast && !pktinfo.isMulticast))) {
2720 		char pbuf2[3*GLD_MAX_ADDRLEN];
2721 
2722 		cmn_err(CE_CONT, "gld_recv_tagged: machdr=<%s -> %s>\n",
2723 		    gld_macaddr_sprintf(pbuf, pktinfo.shost,
2724 		    macinfo->gldm_addrlen), gld_macaddr_sprintf(pbuf2,
2725 		    pktinfo.dhost, macinfo->gldm_addrlen));
2726 		cmn_err(CE_CONT, "gld_recv_tagged: VlanId %d UserPri %d\n",
2727 		    pktinfo.vid,
2728 		    pktinfo.user_pri);
2729 		cmn_err(CE_CONT, "gld_recv_tagged: ethertype: %4x Len: %4d "
2730 		    "Hdr: %d,%d isMulticast: %s\n",
2731 		    pktinfo.ethertype,
2732 		    pktinfo.pktLen,
2733 		    pktinfo.macLen,
2734 		    pktinfo.hdrLen,
2735 		    pktinfo.isMulticast ? "Y" : "N");
2736 	}
2737 #endif
2738 
2739 	gld_sendup(macinfo, &pktinfo, mp, gld_accept);
2740 
2741 done:
2742 	GLDM_UNLOCK(macinfo);
2743 }
2744 
2745 /* =================================================================== */
2746 /* receive group: called from gld_recv and gld_precv* with maclock held */
2747 /* =================================================================== */
2748 
2749 /*
2750  * Search all the streams attached to the specified VLAN looking for
2751  * those eligible to receive the packet.
2752  * Note that in order to avoid an extra dupmsg(), if this is the first
2753  * eligible stream, remember it (in fgldp) so that we can send up the
2754  * message after this function.
2755  *
2756  * Return errno if fails. Currently the only error is ENOMEM.
2757  */
2758 static int
2759 gld_sendup_vlan(gld_vlan_t *vlan, pktinfo_t *pktinfo, mblk_t *mp,
2760     int (*acceptfunc)(), void (*send)(), int (*cansend)(), gld_t **fgldp)
2761 {
2762 	mblk_t *nmp;
2763 	gld_t *gld;
2764 	int err = 0;
2765 
2766 	ASSERT(vlan != NULL);
2767 	for (gld = vlan->gldv_str_next; gld != (gld_t *)&vlan->gldv_str_next;
2768 	    gld = gld->gld_next) {
2769 #ifdef GLD_VERBOSE_DEBUG
2770 		cmn_err(CE_NOTE, "gld_sendup: SAP: %4x QPTR: %p "QSTATE: %s",
2771 		    gld->gld_sap, (void *)gld->gld_qptr,
2772 		    gld->gld_state == DL_IDLE ? "IDLE": "NOT IDLE");
2773 #endif
2774 		ASSERT(gld->gld_qptr != NULL);
2775 		ASSERT(gld->gld_state == DL_IDLE ||
2776 		    gld->gld_state == DL_UNBOUND);
2777 		ASSERT(gld->gld_vlan == vlan);
2778 
2779 		if (gld->gld_state != DL_IDLE)
2780 			continue;	/* not eligible to receive */
2781 		if (gld->gld_flags & GLD_STR_CLOSING)
2782 			continue;	/* not eligible to receive */
2783 
2784 #ifdef GLD_DEBUG
2785 		if ((gld_debug & GLDRECV) &&
2786 		    (!(gld_debug & GLDNOBR) ||
2787 		    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
2788 			cmn_err(CE_NOTE,
2789 			    "gld_sendup: queue sap: %4x promis: %s %s %s",
2790 			    gld->gld_sap,
2791 			    gld->gld_flags & GLD_PROM_PHYS ? "phys " : "     ",
2792 			    gld->gld_flags & GLD_PROM_SAP  ? "sap  " : "     ",
2793 			    gld->gld_flags & GLD_PROM_MULT ? "multi" : "     ");
2794 #endif
2795 
2796 		/*
2797 		 * The accept function differs depending on whether this is
2798 		 * a packet that we received from the wire or a loopback.
2799 		 */
2800 		if ((*acceptfunc)(gld, pktinfo)) {
2801 			/* sap matches */
2802 			pktinfo->wasAccepted = 1; /* known protocol */
2803 
2804 			if (!(*cansend)(gld->gld_qptr)) {
2805 				/*
2806 				 * Upper stream is not accepting messages, i.e.
2807 				 * it is flow controlled, therefore we will
2808 				 * forgo sending the message up this stream.
2809 				 */
2810 #ifdef GLD_DEBUG
2811 				if (gld_debug & GLDETRACE)
2812 					cmn_err(CE_WARN,
2813 					    "gld_sendup: canput failed");
2814 #endif
2815 				BUMP(vlan->gldv_stats, NULL, glds_blocked, 1);
2816 				qenable(gld->gld_qptr);
2817 				continue;
2818 			}
2819 
2820 			/*
2821 			 * In order to avoid an extra dupmsg(), remember this
2822 			 * gld if this is the first eligible stream.
2823 			 */
2824 			if (*fgldp == NULL) {
2825 				*fgldp = gld;
2826 				continue;
2827 			}
2828 
2829 			/* duplicate the packet for this stream */
2830 			nmp = dupmsg(mp);
2831 			if (nmp == NULL) {
2832 				BUMP(vlan->gldv_stats, NULL,
2833 				    glds_gldnorcvbuf, 1);
2834 #ifdef GLD_DEBUG
2835 				if (gld_debug & GLDERRS)
2836 					cmn_err(CE_WARN,
2837 					    "gld_sendup: dupmsg failed");
2838 #endif
2839 				/* couldn't get resources; drop it */
2840 				err = ENOMEM;
2841 				break;
2842 			}
2843 			/* pass the message up the stream */
2844 			gld_passon(gld, nmp, pktinfo, send);
2845 		}
2846 	}
2847 	return (err);
2848 }
2849 
2850 /*
2851  * gld_sendup (macinfo, pktinfo, mp, acceptfunc)
2852  * called with an ethernet packet in an mblk; must decide whether
2853  * packet is for us and which streams to queue it to.
2854  */
2855 static void
2856 gld_sendup(gld_mac_info_t *macinfo, pktinfo_t *pktinfo,
2857     mblk_t *mp, int (*acceptfunc)())
2858 {
2859 	gld_t *fgld = NULL;
2860 	void (*send)(queue_t *qp, mblk_t *mp);
2861 	int (*cansend)(queue_t *qp);
2862 	gld_vlan_t *vlan0, *vlann = NULL;
2863 	struct gld_stats *stats0, *stats = NULL;
2864 	int err = 0;
2865 
2866 #ifdef GLD_DEBUG
2867 	if (gld_debug & GLDTRACE)
2868 		cmn_err(CE_NOTE, "gld_sendup(%p, %p)", (void *)mp,
2869 		    (void *)macinfo);
2870 #endif
2871 
2872 	ASSERT(mp != NULL);
2873 	ASSERT(macinfo != NULL);
2874 	ASSERT(pktinfo != NULL);
2875 	ASSERT(GLDM_LOCK_HELD(macinfo));
2876 
2877 	/*
2878 	 * The tagged packets should also be looped back (transmit-side)
2879 	 * or sent up (receive-side) to VLAN 0 if VLAN 0 is set to
2880 	 * DL_PROMISC_SAP or there is any DLPI consumer bind to the
2881 	 * ETHERTYPE_VLAN SAP. The kstats of VLAN 0 needs to be updated
2882 	 * as well.
2883 	 */
2884 	stats0 = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->statistics;
2885 	vlan0 = gld_find_vlan(macinfo, VLAN_VID_NONE);
2886 	if (pktinfo->vid != VLAN_VID_NONE) {
2887 		if ((vlan0 != NULL) && (vlan0->gldv_nvlan_sap == 0))
2888 			vlan0 = NULL;
2889 		vlann = gld_find_vlan(macinfo, pktinfo->vid);
2890 		if (vlann != NULL)
2891 			stats = vlann->gldv_stats;
2892 	}
2893 
2894 	ASSERT((vlan0 != NULL) || (vlann != NULL));
2895 
2896 	/*
2897 	 * The "fast" in "GLDOPT_FAST_RECV" refers to the speed at which
2898 	 * gld_recv returns to the caller's interrupt routine.  The total
2899 	 * network throughput would normally be lower when selecting this
2900 	 * option, because we putq the messages and process them later,
2901 	 * instead of sending them with putnext now.  Some time critical
2902 	 * device might need this, so it's here but undocumented.
2903 	 */
2904 	if (macinfo->gldm_options & GLDOPT_FAST_RECV) {
2905 		send = (void (*)(queue_t *, mblk_t *))putq;
2906 		cansend = canput;
2907 	} else {
2908 		send = (void (*)(queue_t *, mblk_t *))putnext;
2909 		cansend = canputnext;
2910 	}
2911 
2912 	/*
2913 	 * Send the packets for all eligible streams.
2914 	 */
2915 	if (vlan0 != NULL) {
2916 		err = gld_sendup_vlan(vlan0, pktinfo, mp, acceptfunc, send,
2917 		    cansend, &fgld);
2918 	}
2919 	if ((err == 0) && (vlann != NULL)) {
2920 		err = gld_sendup_vlan(vlann, pktinfo, mp, acceptfunc, send,
2921 		    cansend, &fgld);
2922 	}
2923 
2924 	ASSERT(mp);
2925 	/* send the original dup of the packet up the first stream found */
2926 	if (fgld)
2927 		gld_passon(fgld, mp, pktinfo, send);
2928 	else
2929 		freemsg(mp);	/* no streams matched */
2930 
2931 	/* We do not count looped back packets */
2932 	if (acceptfunc == gld_paccept)
2933 		return;		/* transmit loopback case */
2934 
2935 	if (pktinfo->isBroadcast)
2936 		BUMP(stats0, stats, glds_brdcstrcv, 1);
2937 	else if (pktinfo->isMulticast)
2938 		BUMP(stats0, stats, glds_multircv, 1);
2939 
2940 	/* No stream accepted this packet */
2941 	if (!pktinfo->wasAccepted)
2942 		BUMP(stats0, stats, glds_unknowns, 1);
2943 }
2944 
2945 #define	GLD_IS_PHYS(gld)	\
2946 	(((gld_vlan_t *)gld->gld_vlan)->gldv_id == VLAN_VID_NONE)
2947 
2948 /*
2949  * A packet matches a stream if:
2950  *      The stream's VLAN id is the same as the one in the packet.
2951  *  and the stream accepts EtherType encoded packets and the type matches
2952  *  or  the stream accepts LLC packets and the packet is an LLC packet
2953  */
2954 #define	MATCH(stream, pktinfo) \
2955 	((((gld_vlan_t *)stream->gld_vlan)->gldv_id == pktinfo->vid) && \
2956 	((stream->gld_ethertype && stream->gld_sap == pktinfo->ethertype) || \
2957 	(!stream->gld_ethertype && pktinfo->isLLC)))
2958 
2959 /*
2960  * This function validates a packet for sending up a particular
2961  * stream. The message header has been parsed and its characteristic
2962  * are recorded in the pktinfo data structure. The streams stack info
2963  * are presented in gld data structures.
2964  */
2965 static int
2966 gld_accept(gld_t *gld, pktinfo_t *pktinfo)
2967 {
2968 	/*
2969 	 * if there is no match do not bother checking further.
2970 	 * Note that it is okay to examine gld_vlan because
2971 	 * macinfo->gldm_lock is held.
2972 	 *
2973 	 * Because all tagged packets have SAP value ETHERTYPE_VLAN,
2974 	 * these packets will pass the SAP filter check if the stream
2975 	 * is a ETHERTYPE_VLAN listener.
2976 	 */
2977 	if ((!MATCH(gld, pktinfo) && !(gld->gld_flags & GLD_PROM_SAP) &&
2978 	    !(GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
2979 	    pktinfo->isTagged)))
2980 		return (0);
2981 
2982 	/*
2983 	 * We don't accept any packet from the hardware if we originated it.
2984 	 * (Contrast gld_paccept, the send-loopback accept function.)
2985 	 */
2986 	if (pktinfo->isLooped)
2987 		return (0);
2988 
2989 	/*
2990 	 * If the packet is broadcast or sent to us directly we will accept it.
2991 	 * Also we will accept multicast packets requested by the stream.
2992 	 */
2993 	if (pktinfo->isForMe || pktinfo->isBroadcast ||
2994 	    gld_mcmatch(gld, pktinfo))
2995 		return (1);
2996 
2997 	/*
2998 	 * Finally, accept anything else if we're in promiscuous mode
2999 	 */
3000 	if (gld->gld_flags & GLD_PROM_PHYS)
3001 		return (1);
3002 
3003 	return (0);
3004 }
3005 
3006 /*
3007  * Return TRUE if the given multicast address is one
3008  * of those that this particular Stream is interested in.
3009  */
3010 static int
3011 gld_mcmatch(gld_t *gld, pktinfo_t *pktinfo)
3012 {
3013 	/*
3014 	 * Return FALSE if not a multicast address.
3015 	 */
3016 	if (!pktinfo->isMulticast)
3017 		return (0);
3018 
3019 	/*
3020 	 * Check if all multicasts have been enabled for this Stream
3021 	 */
3022 	if (gld->gld_flags & GLD_PROM_MULT)
3023 		return (1);
3024 
3025 	/*
3026 	 * Return FALSE if no multicast addresses enabled for this Stream.
3027 	 */
3028 	if (!gld->gld_mcast)
3029 		return (0);
3030 
3031 	/*
3032 	 * Otherwise, look for it in the table.
3033 	 */
3034 	return (gld_multicast(pktinfo->dhost, gld));
3035 }
3036 
3037 /*
3038  * gld_multicast determines if the address is a multicast address for
3039  * this stream.
3040  */
3041 static int
3042 gld_multicast(unsigned char *macaddr, gld_t *gld)
3043 {
3044 	int i;
3045 
3046 	ASSERT(GLDM_LOCK_HELD(gld->gld_mac_info));
3047 
3048 	if (!gld->gld_mcast)
3049 		return (0);
3050 
3051 	for (i = 0; i < gld->gld_multicnt; i++) {
3052 		if (gld->gld_mcast[i]) {
3053 			ASSERT(gld->gld_mcast[i]->gldm_refcnt);
3054 			if (mac_eq(gld->gld_mcast[i]->gldm_addr, macaddr,
3055 			    gld->gld_mac_info->gldm_addrlen))
3056 				return (1);
3057 		}
3058 	}
3059 
3060 	return (0);
3061 }
3062 
3063 /*
3064  * accept function for looped back packets
3065  */
3066 static int
3067 gld_paccept(gld_t *gld, pktinfo_t *pktinfo)
3068 {
3069 	/*
3070 	 * Note that it is okay to examine gld_vlan because macinfo->gldm_lock
3071 	 * is held.
3072 	 *
3073 	 * If a stream is a ETHERTYPE_VLAN listener, it must
3074 	 * accept all tagged packets as those packets have SAP value
3075 	 * ETHERTYPE_VLAN.
3076 	 */
3077 	return (gld->gld_flags & GLD_PROM_PHYS &&
3078 	    (MATCH(gld, pktinfo) || gld->gld_flags & GLD_PROM_SAP ||
3079 	    (GLD_IS_PHYS(gld) && gld->gld_sap == ETHERTYPE_VLAN &&
3080 	    pktinfo->isTagged)));
3081 
3082 }
3083 
3084 static void
3085 gld_passon(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo,
3086 	void (*send)(queue_t *qp, mblk_t *mp))
3087 {
3088 	boolean_t is_phys = GLD_IS_PHYS(gld);
3089 	int skiplen;
3090 	boolean_t addtag = B_FALSE;
3091 	uint32_t vtag = 0;
3092 
3093 #ifdef GLD_DEBUG
3094 	if (gld_debug & GLDTRACE)
3095 		cmn_err(CE_NOTE, "gld_passon(%p, %p, %p)", (void *)gld,
3096 		    (void *)mp, (void *)pktinfo);
3097 
3098 	if ((gld_debug & GLDRECV) && (!(gld_debug & GLDNOBR) ||
3099 	    (!pktinfo->isBroadcast && !pktinfo->isMulticast)))
3100 		cmn_err(CE_NOTE, "gld_passon: q: %p mblk: %p minor: %d sap: %x",
3101 		    (void *)gld->gld_qptr->q_next, (void *)mp, gld->gld_minor,
3102 		    gld->gld_sap);
3103 #endif
3104 	/*
3105 	 * Figure out how much of the packet header to throw away.
3106 	 *
3107 	 * Normal DLPI (non RAW/FAST) streams also want the
3108 	 * DL_UNITDATA_IND M_PROTO message block prepended to the M_DATA.
3109 	 */
3110 	if (gld->gld_flags & GLD_RAW) {
3111 		/*
3112 		 * The packet will be tagged in the following cases:
3113 		 *   - if priority is not 0
3114 		 *   - a tagged packet sent on a physical link
3115 		 */
3116 		if ((pktinfo->isTagged && is_phys) || (pktinfo->user_pri != 0))
3117 			addtag = B_TRUE;
3118 		skiplen = 0;
3119 	} else {
3120 		/*
3121 		 * The packet will be tagged if it meets all below conditions:
3122 		 *   -  this is a physical stream
3123 		 *   -  this packet is tagged packet
3124 		 *   -  the stream is either a DL_PROMISC_SAP listener or a
3125 		 *	ETHERTYPE_VLAN listener
3126 		 */
3127 		if (is_phys && pktinfo->isTagged &&
3128 		    ((gld->gld_sap == ETHERTYPE_VLAN) ||
3129 		    (gld->gld_flags & GLD_PROM_SAP))) {
3130 			addtag = B_TRUE;
3131 		}
3132 
3133 		skiplen = pktinfo->macLen;		/* skip mac header */
3134 		if (gld->gld_ethertype)
3135 			skiplen += pktinfo->hdrLen;	/* skip any extra */
3136 	}
3137 	if (skiplen >= pktinfo->pktLen) {
3138 		/*
3139 		 * If the interpreter did its job right, then it cannot be
3140 		 * asking us to skip more bytes than are in the packet!
3141 		 * However, there could be zero data bytes left after the
3142 		 * amount to skip.  DLPI specifies that passed M_DATA blocks
3143 		 * should contain at least one byte of data, so if we have
3144 		 * none we just drop it.
3145 		 */
3146 		ASSERT(!(skiplen > pktinfo->pktLen));
3147 		freemsg(mp);
3148 		return;
3149 	}
3150 
3151 	if (addtag) {
3152 		mblk_t *savemp = mp;
3153 
3154 		vtag = GLD_MAKE_VTAG(pktinfo->user_pri, pktinfo->cfi,
3155 		    is_phys ? pktinfo->vid : VLAN_VID_NONE);
3156 		if ((mp = gld_insert_vtag_ether(mp, vtag)) == NULL) {
3157 			freemsg(savemp);
3158 			return;
3159 		}
3160 	}
3161 
3162 	/*
3163 	 * Skip over the header(s), taking care to possibly handle message
3164 	 * fragments shorter than the amount we need to skip.  Hopefully
3165 	 * the driver will put the entire packet, or at least the entire
3166 	 * header, into a single message block.  But we handle it if not.
3167 	 */
3168 	while (skiplen >= MBLKL(mp)) {
3169 		mblk_t *savemp = mp;
3170 		skiplen -= MBLKL(mp);
3171 		mp = mp->b_cont;
3172 		ASSERT(mp != NULL);	/* because skiplen < pktinfo->pktLen */
3173 		freeb(savemp);
3174 	}
3175 	mp->b_rptr += skiplen;
3176 
3177 	/* Add M_PROTO if necessary, and pass upstream */
3178 	if (((gld->gld_flags & GLD_FAST) && !pktinfo->isMulticast &&
3179 	    !pktinfo->isBroadcast) || (gld->gld_flags & GLD_RAW)) {
3180 		/* RAW/FAST: just send up the M_DATA */
3181 		(*send)(gld->gld_qptr, mp);
3182 	} else {
3183 		/* everybody else wants to see a unitdata_ind structure */
3184 		mp = gld_addudind(gld, mp, pktinfo, addtag);
3185 		if (mp)
3186 			(*send)(gld->gld_qptr, mp);
3187 		/* if it failed, gld_addudind already bumped statistic */
3188 	}
3189 }
3190 
3191 /*
3192  * gld_addudind(gld, mp, pktinfo)
3193  * format a DL_UNITDATA_IND message to be sent upstream to the user
3194  */
3195 static mblk_t *
3196 gld_addudind(gld_t *gld, mblk_t *mp, pktinfo_t *pktinfo, boolean_t tagged)
3197 {
3198 	gld_mac_info_t		*macinfo = gld->gld_mac_info;
3199 	gld_vlan_t		*vlan = (gld_vlan_t *)gld->gld_vlan;
3200 	dl_unitdata_ind_t	*dludindp;
3201 	mblk_t			*nmp;
3202 	int			size;
3203 	int			type;
3204 
3205 #ifdef GLD_DEBUG
3206 	if (gld_debug & GLDTRACE)
3207 		cmn_err(CE_NOTE, "gld_addudind(%p, %p, %p)", (void *)gld,
3208 		    (void *)mp, (void *)pktinfo);
3209 #endif
3210 	ASSERT(macinfo != NULL);
3211 
3212 	/*
3213 	 * Allocate the DL_UNITDATA_IND M_PROTO header, if allocation fails
3214 	 * might as well discard since we can't go further
3215 	 */
3216 	size = sizeof (dl_unitdata_ind_t) +
3217 	    2 * (macinfo->gldm_addrlen + abs(macinfo->gldm_saplen));
3218 	if ((nmp = allocb(size, BPRI_MED)) == NULL) {
3219 		freemsg(mp);
3220 		BUMP(vlan->gldv_stats, NULL, glds_gldnorcvbuf, 1);
3221 #ifdef GLD_DEBUG
3222 		if (gld_debug & GLDERRS)
3223 			cmn_err(CE_WARN,
3224 			    "gld_addudind: allocb failed");
3225 #endif
3226 		return ((mblk_t *)NULL);
3227 	}
3228 	DB_TYPE(nmp) = M_PROTO;
3229 	nmp->b_rptr = nmp->b_datap->db_lim - size;
3230 
3231 	if (tagged)
3232 		type = ETHERTYPE_VLAN;
3233 	else
3234 		type = (gld->gld_ethertype) ? pktinfo->ethertype : 0;
3235 
3236 
3237 	/*
3238 	 * now setup the DL_UNITDATA_IND header
3239 	 *
3240 	 * XXX This looks broken if the saps aren't two bytes.
3241 	 */
3242 	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3243 	dludindp->dl_primitive = DL_UNITDATA_IND;
3244 	dludindp->dl_src_addr_length =
3245 	    dludindp->dl_dest_addr_length = macinfo->gldm_addrlen +
3246 	    abs(macinfo->gldm_saplen);
3247 	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3248 	dludindp->dl_src_addr_offset = dludindp->dl_dest_addr_offset +
3249 	    dludindp->dl_dest_addr_length;
3250 
3251 	dludindp->dl_group_address = (pktinfo->isMulticast ||
3252 	    pktinfo->isBroadcast);
3253 
3254 	nmp->b_wptr = nmp->b_rptr + dludindp->dl_dest_addr_offset;
3255 
3256 	mac_copy(pktinfo->dhost, nmp->b_wptr, macinfo->gldm_addrlen);
3257 	nmp->b_wptr += macinfo->gldm_addrlen;
3258 
3259 	ASSERT(macinfo->gldm_saplen == -2);	/* XXX following code assumes */
3260 	*(ushort_t *)(nmp->b_wptr) = type;
3261 	nmp->b_wptr += abs(macinfo->gldm_saplen);
3262 
3263 	ASSERT(nmp->b_wptr == nmp->b_rptr + dludindp->dl_src_addr_offset);
3264 
3265 	mac_copy(pktinfo->shost, nmp->b_wptr, macinfo->gldm_addrlen);
3266 	nmp->b_wptr += macinfo->gldm_addrlen;
3267 
3268 	*(ushort_t *)(nmp->b_wptr) = type;
3269 	nmp->b_wptr += abs(macinfo->gldm_saplen);
3270 
3271 	if (pktinfo->nosource)
3272 		dludindp->dl_src_addr_offset = dludindp->dl_src_addr_length = 0;
3273 	linkb(nmp, mp);
3274 	return (nmp);
3275 }
3276 
3277 /* ======================================================= */
3278 /* wsrv group: called from wsrv, single threaded per queue */
3279 /* ======================================================= */
3280 
3281 /*
3282  * We go to some trouble to avoid taking the same lock during normal
3283  * transmit processing as we do during normal receive processing.
3284  *
3285  * Elements of the per-instance macinfo and per-stream gld_t structures
3286  * are for the most part protected by the GLDM_LOCK rwlock/mutex.
3287  * (Elements of the gld_mac_pvt_t structure are considered part of the
3288  * macinfo structure for purposes of this discussion).
3289  *
3290  * However, it is more complicated than that:
3291  *
3292  *	Elements of the macinfo structure that are set before the macinfo
3293  *	structure is added to its device list by gld_register(), and never
3294  *	thereafter modified, are accessed without requiring taking the lock.
3295  *	A similar rule applies to those elements of the gld_t structure that
3296  *	are written by gld_open() before the stream is added to any list.
3297  *
3298  *	Most other elements of the macinfo structure may only be read or
3299  *	written while holding the maclock.
3300  *
3301  *	Most writable elements of the gld_t structure are written only
3302  *	within the single-threaded domain of wsrv() and subsidiaries.
3303  *	(This domain includes open/close while qprocs are not on.)
3304  *	The maclock need not be taken while within that domain
3305  *	simply to read those elements.  Writing to them, even within
3306  *	that domain, or reading from it outside that domain, requires
3307  *	holding the maclock.  Exception:  if the stream is not
3308  *	presently attached to a PPA, there is no associated macinfo,
3309  *	and no maclock need be taken.
3310  *
3311  *	The curr_macaddr element of the mac private structure is also
3312  *      protected by the GLDM_LOCK rwlock/mutex, like most other members
3313  *      of that structure. However, there are a few instances in the
3314  *      transmit path where we choose to forgo lock protection when
3315  *      reading this variable. This is to avoid lock contention between
3316  *      threads executing the DL_UNITDATA_REQ case and receive threads.
3317  *      In doing so we will take a small risk or a few corrupted packets
3318  *      during the short an rare times when someone is changing the interface's
3319  *      physical address. We consider the small cost in this rare case to be
3320  *      worth the benefit of reduced lock contention under normal operating
3321  *      conditions. The risk/cost is small because:
3322  *          1. there is no guarantee at this layer of uncorrupted delivery.
3323  *          2. the physaddr doesn't change very often - no performance hit.
3324  *          3. if the physaddr changes, other stuff is going to be screwed
3325  *             up for a while anyway, while other sites refigure ARP, etc.,
3326  *             so losing a couple of packets is the least of our worries.
3327  *
3328  *	The list of streams associated with a macinfo is protected by
3329  *	two locks:  the per-macinfo maclock, and the per-major-device
3330  *	gld_devlock.  Both must be held to modify the list, but either
3331  *	may be held to protect the list during reading/traversing.  This
3332  *	allows independent locking for multiple instances in the receive
3333  *	path (using macinfo), while facilitating routines that must search
3334  *	the entire set of streams associated with a major device, such as
3335  *	gld_findminor(), gld_finddevinfo(), close().  The "nstreams"
3336  *	macinfo	element, and the gld_mac_info gld_t element, are similarly
3337  *	protected, since they change at exactly the same time macinfo
3338  *	streams list does.
3339  *
3340  *	The list of macinfo structures associated with a major device
3341  *	structure is protected by the gld_devlock, as is the per-major
3342  *	list of Style 2 streams in the DL_UNATTACHED state.
3343  *
3344  *	The list of major devices is kept on a module-global list
3345  *	gld_device_list, which has its own lock to protect the list.
3346  *
3347  *	When it is necessary to hold more than one lock at a time, they
3348  *	are acquired in this "outside in" order:
3349  *		gld_device_list.gld_devlock
3350  *		glddev->gld_devlock
3351  *		GLDM_LOCK(macinfo)
3352  *
3353  *	Finally, there are some "volatile" elements of the gld_t structure
3354  *	used for synchronization between various routines that don't share
3355  *	the same mutexes.  See the routines for details.  These are:
3356  *		gld_xwait	between gld_wsrv() and gld_sched()
3357  *		gld_sched_ran	between gld_wsrv() and gld_sched()
3358  *		gld_in_unbind	between gld_wput() and wsrv's gld_unbind()
3359  *		gld_wput_count	between gld_wput() and wsrv's gld_unbind()
3360  *		gld_in_wsrv	between gld_wput() and gld_wsrv()
3361  *				(used in conjunction with q->q_first)
3362  */
3363 
3364 /*
3365  * gld_ioctl (q, mp)
3366  * handles all ioctl requests passed downstream. This routine is
3367  * passed a pointer to the message block with the ioctl request in it, and a
3368  * pointer to the queue so it can respond to the ioctl request with an ack.
3369  */
3370 int
3371 gld_ioctl(queue_t *q, mblk_t *mp)
3372 {
3373 	struct iocblk *iocp;
3374 	gld_t *gld;
3375 	gld_mac_info_t *macinfo;
3376 
3377 #ifdef GLD_DEBUG
3378 	if (gld_debug & GLDTRACE)
3379 		cmn_err(CE_NOTE, "gld_ioctl(%p %p)", (void *)q, (void *)mp);
3380 #endif
3381 	gld = (gld_t *)q->q_ptr;
3382 	iocp = (struct iocblk *)mp->b_rptr;
3383 	switch (iocp->ioc_cmd) {
3384 	case DLIOCRAW:		/* raw M_DATA mode */
3385 		gld->gld_flags |= GLD_RAW;
3386 		DB_TYPE(mp) = M_IOCACK;
3387 		qreply(q, mp);
3388 		break;
3389 
3390 	case DL_IOC_HDR_INFO:	/* fastpath */
3391 		/*
3392 		 * DL_IOC_HDR_INFO should only come from IP. The one
3393 		 * initiated from user-land should not be allowed.
3394 		 */
3395 		if ((gld_global_options & GLD_OPT_NO_FASTPATH) ||
3396 		    (iocp->ioc_cr != kcred)) {
3397 			miocnak(q, mp, 0, EINVAL);
3398 			break;
3399 		}
3400 		gld_fastpath(gld, q, mp);
3401 		break;
3402 
3403 	case DLIOCMARGININFO: {	/* margin size */
3404 		int err;
3405 
3406 		if ((macinfo = gld->gld_mac_info) == NULL) {
3407 			miocnak(q, mp, 0, EINVAL);
3408 			break;
3409 		}
3410 
3411 		if ((err = miocpullup(mp, sizeof (uint32_t))) != 0) {
3412 			miocnak(q, mp, 0, err);
3413 			break;
3414 		}
3415 
3416 		*((uint32_t *)mp->b_cont->b_rptr) = macinfo->gldm_margin;
3417 		miocack(q, mp, sizeof (uint32_t), 0);
3418 		break;
3419 	}
3420 	default:
3421 		macinfo	 = gld->gld_mac_info;
3422 		if (macinfo == NULL || macinfo->gldm_ioctl == NULL) {
3423 			miocnak(q, mp, 0, EINVAL);
3424 			break;
3425 		}
3426 
3427 		GLDM_LOCK(macinfo, RW_WRITER);
3428 		(void) (*macinfo->gldm_ioctl) (macinfo, q, mp);
3429 		GLDM_UNLOCK(macinfo);
3430 		break;
3431 	}
3432 	return (0);
3433 }
3434 
3435 /*
3436  * Since the rules for "fastpath" mode don't seem to be documented
3437  * anywhere, I will describe GLD's rules for fastpath users here:
3438  *
3439  * Once in this mode you remain there until close.
3440  * If you unbind/rebind you should get a new header using DL_IOC_HDR_INFO.
3441  * You must be bound (DL_IDLE) to transmit.
3442  * There are other rules not listed above.
3443  */
3444 static void
3445 gld_fastpath(gld_t *gld, queue_t *q, mblk_t *mp)
3446 {
3447 	gld_interface_t *ifp;
3448 	gld_mac_info_t *macinfo;
3449 	dl_unitdata_req_t *dludp;
3450 	mblk_t *nmp;
3451 	t_scalar_t off, len;
3452 	uint_t maclen;
3453 	int error;
3454 
3455 	if (gld->gld_state != DL_IDLE) {
3456 		miocnak(q, mp, 0, EINVAL);
3457 		return;
3458 	}
3459 
3460 	macinfo = gld->gld_mac_info;
3461 	ASSERT(macinfo != NULL);
3462 	maclen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
3463 
3464 	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + maclen);
3465 	if (error != 0) {
3466 		miocnak(q, mp, 0, error);
3467 		return;
3468 	}
3469 
3470 	dludp = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
3471 	off = dludp->dl_dest_addr_offset;
3472 	len = dludp->dl_dest_addr_length;
3473 	if (dludp->dl_primitive != DL_UNITDATA_REQ ||
3474 	    !MBLKIN(mp->b_cont, off, len) || len != maclen) {
3475 		miocnak(q, mp, 0, EINVAL);
3476 		return;
3477 	}
3478 
3479 	/*
3480 	 * We take his fastpath request as a declaration that he will accept
3481 	 * M_DATA messages from us, whether or not we are willing to accept
3482 	 * them from him.  This allows us to have fastpath in one direction
3483 	 * (flow upstream) even on media with Source Routing, where we are
3484 	 * unable to provide a fixed MAC header to be prepended to downstream
3485 	 * flowing packets.  So we set GLD_FAST whether or not we decide to
3486 	 * allow him to send M_DATA down to us.
3487 	 */
3488 	GLDM_LOCK(macinfo, RW_WRITER);
3489 	gld->gld_flags |= GLD_FAST;
3490 	GLDM_UNLOCK(macinfo);
3491 
3492 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3493 
3494 	/* This will fail for Source Routing media */
3495 	/* Also on Ethernet on 802.2 SAPs */
3496 	if ((nmp = (*ifp->mkfastpath)(gld, mp)) == NULL) {
3497 		miocnak(q, mp, 0, ENOMEM);
3498 		return;
3499 	}
3500 
3501 	/*
3502 	 * Link new mblk in after the "request" mblks.
3503 	 */
3504 	linkb(mp, nmp);
3505 	miocack(q, mp, msgdsize(mp->b_cont), 0);
3506 }
3507 
3508 /*
3509  * gld_cmds (q, mp)
3510  *	process the DL commands as defined in dlpi.h
3511  *	note that the primitives return status which is passed back
3512  *	to the service procedure.  If the value is GLDE_RETRY, then
3513  *	it is assumed that processing must stop and the primitive has
3514  *	been put back onto the queue.  If the value is any other error,
3515  *	then an error ack is generated by the service procedure.
3516  */
3517 static int
3518 gld_cmds(queue_t *q, mblk_t *mp)
3519 {
3520 	union DL_primitives *dlp = (union DL_primitives *)mp->b_rptr;
3521 	gld_t *gld = (gld_t *)(q->q_ptr);
3522 	int result = DL_BADPRIM;
3523 	int mblkl = MBLKL(mp);
3524 	t_uscalar_t dlreq;
3525 
3526 	/* Make sure we have at least dlp->dl_primitive */
3527 	if (mblkl < sizeof (dlp->dl_primitive))
3528 		return (DL_BADPRIM);
3529 
3530 	dlreq = dlp->dl_primitive;
3531 #ifdef	GLD_DEBUG
3532 	if (gld_debug & GLDTRACE)
3533 		cmn_err(CE_NOTE,
3534 		    "gld_cmds(%p, %p):dlp=%p, dlp->dl_primitive=%d",
3535 		    (void *)q, (void *)mp, (void *)dlp, dlreq);
3536 #endif
3537 
3538 	switch (dlreq) {
3539 	case DL_UDQOS_REQ:
3540 		if (mblkl < DL_UDQOS_REQ_SIZE)
3541 			break;
3542 		result = gld_udqos(q, mp);
3543 		break;
3544 
3545 	case DL_BIND_REQ:
3546 		if (mblkl < DL_BIND_REQ_SIZE)
3547 			break;
3548 		result = gld_bind(q, mp);
3549 		break;
3550 
3551 	case DL_UNBIND_REQ:
3552 		if (mblkl < DL_UNBIND_REQ_SIZE)
3553 			break;
3554 		result = gld_unbind(q, mp);
3555 		break;
3556 
3557 	case DL_UNITDATA_REQ:
3558 		if (mblkl < DL_UNITDATA_REQ_SIZE)
3559 			break;
3560 		result = gld_unitdata(q, mp);
3561 		break;
3562 
3563 	case DL_INFO_REQ:
3564 		if (mblkl < DL_INFO_REQ_SIZE)
3565 			break;
3566 		result = gld_inforeq(q, mp);
3567 		break;
3568 
3569 	case DL_ATTACH_REQ:
3570 		if (mblkl < DL_ATTACH_REQ_SIZE)
3571 			break;
3572 		if (gld->gld_style == DL_STYLE2)
3573 			result = gldattach(q, mp);
3574 		else
3575 			result = DL_NOTSUPPORTED;
3576 		break;
3577 
3578 	case DL_DETACH_REQ:
3579 		if (mblkl < DL_DETACH_REQ_SIZE)
3580 			break;
3581 		if (gld->gld_style == DL_STYLE2)
3582 			result = gldunattach(q, mp);
3583 		else
3584 			result = DL_NOTSUPPORTED;
3585 		break;
3586 
3587 	case DL_ENABMULTI_REQ:
3588 		if (mblkl < DL_ENABMULTI_REQ_SIZE)
3589 			break;
3590 		result = gld_enable_multi(q, mp);
3591 		break;
3592 
3593 	case DL_DISABMULTI_REQ:
3594 		if (mblkl < DL_DISABMULTI_REQ_SIZE)
3595 			break;
3596 		result = gld_disable_multi(q, mp);
3597 		break;
3598 
3599 	case DL_PHYS_ADDR_REQ:
3600 		if (mblkl < DL_PHYS_ADDR_REQ_SIZE)
3601 			break;
3602 		result = gld_physaddr(q, mp);
3603 		break;
3604 
3605 	case DL_SET_PHYS_ADDR_REQ:
3606 		if (mblkl < DL_SET_PHYS_ADDR_REQ_SIZE)
3607 			break;
3608 		result = gld_setaddr(q, mp);
3609 		break;
3610 
3611 	case DL_PROMISCON_REQ:
3612 		if (mblkl < DL_PROMISCON_REQ_SIZE)
3613 			break;
3614 		result = gld_promisc(q, mp, dlreq, B_TRUE);
3615 		break;
3616 
3617 	case DL_PROMISCOFF_REQ:
3618 		if (mblkl < DL_PROMISCOFF_REQ_SIZE)
3619 			break;
3620 		result = gld_promisc(q, mp, dlreq, B_FALSE);
3621 		break;
3622 
3623 	case DL_GET_STATISTICS_REQ:
3624 		if (mblkl < DL_GET_STATISTICS_REQ_SIZE)
3625 			break;
3626 		result = gld_get_statistics(q, mp);
3627 		break;
3628 
3629 	case DL_CAPABILITY_REQ:
3630 		if (mblkl < DL_CAPABILITY_REQ_SIZE)
3631 			break;
3632 		result = gld_cap(q, mp);
3633 		break;
3634 
3635 	case DL_NOTIFY_REQ:
3636 		if (mblkl < DL_NOTIFY_REQ_SIZE)
3637 			break;
3638 		result = gld_notify_req(q, mp);
3639 		break;
3640 
3641 	case DL_XID_REQ:
3642 	case DL_XID_RES:
3643 	case DL_TEST_REQ:
3644 	case DL_TEST_RES:
3645 	case DL_CONTROL_REQ:
3646 	case DL_PASSIVE_REQ:
3647 		result = DL_NOTSUPPORTED;
3648 		break;
3649 
3650 	default:
3651 #ifdef	GLD_DEBUG
3652 		if (gld_debug & GLDERRS)
3653 			cmn_err(CE_WARN,
3654 			    "gld_cmds: unknown M_PROTO message: %d",
3655 			    dlreq);
3656 #endif
3657 		result = DL_BADPRIM;
3658 	}
3659 
3660 	return (result);
3661 }
3662 
3663 static int
3664 gld_cap(queue_t *q, mblk_t *mp)
3665 {
3666 	gld_t *gld = (gld_t *)q->q_ptr;
3667 	dl_capability_req_t *dlp = (dl_capability_req_t *)mp->b_rptr;
3668 
3669 	if (gld->gld_state == DL_UNATTACHED)
3670 		return (DL_OUTSTATE);
3671 
3672 	if (dlp->dl_sub_length == 0)
3673 		return (gld_cap_ack(q, mp));
3674 
3675 	return (gld_cap_enable(q, mp));
3676 }
3677 
3678 static int
3679 gld_cap_ack(queue_t *q, mblk_t *mp)
3680 {
3681 	gld_t *gld = (gld_t *)q->q_ptr;
3682 	gld_mac_info_t *macinfo = gld->gld_mac_info;
3683 	gld_interface_t *ifp;
3684 	dl_capability_ack_t *dlap;
3685 	dl_capability_sub_t *dlsp;
3686 	size_t size = sizeof (dl_capability_ack_t);
3687 	size_t subsize = 0;
3688 
3689 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
3690 
3691 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY)
3692 		subsize += sizeof (dl_capability_sub_t) +
3693 		    sizeof (dl_capab_hcksum_t);
3694 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY)
3695 		subsize += sizeof (dl_capability_sub_t) +
3696 		    sizeof (dl_capab_zerocopy_t);
3697 	if (macinfo->gldm_options & GLDOPT_MDT)
3698 		subsize += (sizeof (dl_capability_sub_t) +
3699 		    sizeof (dl_capab_mdt_t));
3700 
3701 	if ((mp = mexchange(q, mp, size + subsize, M_PROTO,
3702 	    DL_CAPABILITY_ACK)) == NULL)
3703 		return (GLDE_OK);
3704 
3705 	dlap = (dl_capability_ack_t *)mp->b_rptr;
3706 	dlap->dl_sub_offset = 0;
3707 	if ((dlap->dl_sub_length = subsize) != 0)
3708 		dlap->dl_sub_offset = sizeof (dl_capability_ack_t);
3709 	dlsp = (dl_capability_sub_t *)&dlap[1];
3710 
3711 	if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_ANY) {
3712 		dl_capab_hcksum_t *dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3713 
3714 		dlsp->dl_cap = DL_CAPAB_HCKSUM;
3715 		dlsp->dl_length = sizeof (dl_capab_hcksum_t);
3716 
3717 		dlhp->hcksum_version = HCKSUM_VERSION_1;
3718 
3719 		dlhp->hcksum_txflags = 0;
3720 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_PARTIAL)
3721 			dlhp->hcksum_txflags |= HCKSUM_INET_PARTIAL;
3722 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V4)
3723 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V4;
3724 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_FULL_V6)
3725 			dlhp->hcksum_txflags |= HCKSUM_INET_FULL_V6;
3726 		if (macinfo->gldm_capabilities & GLD_CAP_CKSUM_IPHDR)
3727 			dlhp->hcksum_txflags |= HCKSUM_IPHDRCKSUM;
3728 
3729 		dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3730 		dlsp = (dl_capability_sub_t *)&dlhp[1];
3731 	}
3732 
3733 	if (macinfo->gldm_capabilities & GLD_CAP_ZEROCOPY) {
3734 		dl_capab_zerocopy_t *dlzp = (dl_capab_zerocopy_t *)&dlsp[1];
3735 
3736 		dlsp->dl_cap = DL_CAPAB_ZEROCOPY;
3737 		dlsp->dl_length = sizeof (dl_capab_zerocopy_t);
3738 		dlzp->zerocopy_version = ZEROCOPY_VERSION_1;
3739 		dlzp->zerocopy_flags = DL_CAPAB_VMSAFE_MEM;
3740 
3741 		dlcapabsetqid(&(dlzp->zerocopy_mid), RD(q));
3742 		dlsp = (dl_capability_sub_t *)&dlzp[1];
3743 	}
3744 
3745 	if (macinfo->gldm_options & GLDOPT_MDT) {
3746 		dl_capab_mdt_t *dlmp = (dl_capab_mdt_t *)&dlsp[1];
3747 
3748 		dlsp->dl_cap = DL_CAPAB_MDT;
3749 		dlsp->dl_length = sizeof (dl_capab_mdt_t);
3750 
3751 		dlmp->mdt_version = MDT_VERSION_2;
3752 		dlmp->mdt_max_pld = macinfo->gldm_mdt_segs;
3753 		dlmp->mdt_span_limit = macinfo->gldm_mdt_sgl;
3754 		dlcapabsetqid(&dlmp->mdt_mid, OTHERQ(q));
3755 		dlmp->mdt_flags = DL_CAPAB_MDT_ENABLE;
3756 		dlmp->mdt_hdr_head = ifp->hdr_size;
3757 		dlmp->mdt_hdr_tail = 0;
3758 	}
3759 
3760 	qreply(q, mp);
3761 	return (GLDE_OK);
3762 }
3763 
3764 static int
3765 gld_cap_enable(queue_t *q, mblk_t *mp)
3766 {
3767 	dl_capability_req_t *dlp;
3768 	dl_capability_sub_t *dlsp;
3769 	dl_capab_hcksum_t *dlhp;
3770 	offset_t off;
3771 	size_t len;
3772 	size_t size;
3773 	offset_t end;
3774 
3775 	dlp = (dl_capability_req_t *)mp->b_rptr;
3776 	dlp->dl_primitive = DL_CAPABILITY_ACK;
3777 
3778 	off = dlp->dl_sub_offset;
3779 	len = dlp->dl_sub_length;
3780 
3781 	if (!MBLKIN(mp, off, len))
3782 		return (DL_BADPRIM);
3783 
3784 	end = off + len;
3785 	while (off < end) {
3786 		dlsp = (dl_capability_sub_t *)(mp->b_rptr + off);
3787 		size = sizeof (dl_capability_sub_t) + dlsp->dl_length;
3788 		if (off + size > end)
3789 			return (DL_BADPRIM);
3790 
3791 		switch (dlsp->dl_cap) {
3792 		case DL_CAPAB_HCKSUM:
3793 			dlhp = (dl_capab_hcksum_t *)&dlsp[1];
3794 			/* nothing useful we can do with the contents */
3795 			dlcapabsetqid(&(dlhp->hcksum_mid), RD(q));
3796 			break;
3797 		default:
3798 			break;
3799 		}
3800 
3801 		off += size;
3802 	}
3803 
3804 	qreply(q, mp);
3805 	return (GLDE_OK);
3806 }
3807 
3808 /*
3809  * Send a copy of the DL_NOTIFY_IND message <mp> to each stream that has
3810  * requested the specific <notification> that the message carries AND is
3811  * eligible and ready to receive the notification immediately.
3812  *
3813  * This routine ignores flow control. Notifications will be sent regardless.
3814  *
3815  * In all cases, the original message passed in is freed at the end of
3816  * the routine.
3817  */
3818 static void
3819 gld_notify_qs(gld_mac_info_t *macinfo, mblk_t *mp, uint32_t notification)
3820 {
3821 	gld_mac_pvt_t *mac_pvt;
3822 	gld_vlan_t *vlan;
3823 	gld_t *gld;
3824 	mblk_t *nmp;
3825 	int i;
3826 
3827 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
3828 
3829 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3830 
3831 	/*
3832 	 * Search all the streams attached to this macinfo looking
3833 	 * for those eligible to receive the present notification.
3834 	 */
3835 	for (i = 0; i < VLAN_HASHSZ; i++) {
3836 		for (vlan = mac_pvt->vlan_hash[i];
3837 		    vlan != NULL; vlan = vlan->gldv_next) {
3838 			for (gld = vlan->gldv_str_next;
3839 			    gld != (gld_t *)&vlan->gldv_str_next;
3840 			    gld = gld->gld_next) {
3841 				ASSERT(gld->gld_qptr != NULL);
3842 				ASSERT(gld->gld_state == DL_IDLE ||
3843 				    gld->gld_state == DL_UNBOUND);
3844 				ASSERT(gld->gld_mac_info == macinfo);
3845 
3846 				if (gld->gld_flags & GLD_STR_CLOSING)
3847 					continue; /* not eligible - skip */
3848 				if (!(notification & gld->gld_notifications))
3849 					continue; /* not wanted - skip */
3850 				if ((nmp = dupmsg(mp)) == NULL)
3851 					continue; /* can't copy - skip */
3852 
3853 				/*
3854 				 * All OK; send dup'd notification up this
3855 				 * stream
3856 				 */
3857 				qreply(WR(gld->gld_qptr), nmp);
3858 			}
3859 		}
3860 	}
3861 
3862 	/*
3863 	 * Drop the original message block now
3864 	 */
3865 	freemsg(mp);
3866 }
3867 
3868 /*
3869  * For each (understood) bit in the <notifications> argument, contruct
3870  * a DL_NOTIFY_IND message and send it to the specified <q>, or to all
3871  * eligible queues if <q> is NULL.
3872  */
3873 static void
3874 gld_notify_ind(gld_mac_info_t *macinfo, uint32_t notifications, queue_t *q)
3875 {
3876 	gld_mac_pvt_t *mac_pvt;
3877 	dl_notify_ind_t *dlnip;
3878 	struct gld_stats *stats;
3879 	mblk_t *mp;
3880 	size_t size;
3881 	uint32_t bit;
3882 
3883 	GLDM_LOCK(macinfo, RW_WRITER);
3884 
3885 	/*
3886 	 * The following cases shouldn't happen, but just in case the
3887 	 * MAC driver calls gld_linkstate() at an inappropriate time, we
3888 	 * check anyway ...
3889 	 */
3890 	if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY)) {
3891 		GLDM_UNLOCK(macinfo);
3892 		return;				/* not ready yet	*/
3893 	}
3894 
3895 	if (macinfo->gldm_GLD_flags & GLD_UNREGISTERED) {
3896 		GLDM_UNLOCK(macinfo);
3897 		return;				/* not ready anymore	*/
3898 	}
3899 
3900 	/*
3901 	 * Make sure the kstats are up to date, 'cos we use some of
3902 	 * the kstat values below, specifically the link speed ...
3903 	 */
3904 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3905 	stats = mac_pvt->statistics;
3906 	if (macinfo->gldm_get_stats)
3907 		(void) (*macinfo->gldm_get_stats)(macinfo, stats);
3908 
3909 	for (bit = 1; notifications != 0; bit <<= 1) {
3910 		if ((notifications & bit) == 0)
3911 			continue;
3912 		notifications &= ~bit;
3913 
3914 		size = DL_NOTIFY_IND_SIZE;
3915 		if (bit == DL_NOTE_PHYS_ADDR)
3916 			size += macinfo->gldm_addrlen;
3917 		if ((mp = allocb(size, BPRI_MED)) == NULL)
3918 			continue;
3919 
3920 		mp->b_datap->db_type = M_PROTO;
3921 		mp->b_wptr = mp->b_rptr + size;
3922 		dlnip = (dl_notify_ind_t *)mp->b_rptr;
3923 		dlnip->dl_primitive = DL_NOTIFY_IND;
3924 		dlnip->dl_notification = 0;
3925 		dlnip->dl_data = 0;
3926 		dlnip->dl_addr_length = 0;
3927 		dlnip->dl_addr_offset = 0;
3928 
3929 		switch (bit) {
3930 		case DL_NOTE_PROMISC_ON_PHYS:
3931 		case DL_NOTE_PROMISC_OFF_PHYS:
3932 			if (mac_pvt->nprom != 0)
3933 				dlnip->dl_notification = bit;
3934 			break;
3935 
3936 		case DL_NOTE_LINK_DOWN:
3937 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_DOWN)
3938 				dlnip->dl_notification = bit;
3939 			break;
3940 
3941 		case DL_NOTE_LINK_UP:
3942 			if (macinfo->gldm_linkstate == GLD_LINKSTATE_UP)
3943 				dlnip->dl_notification = bit;
3944 			break;
3945 
3946 		case DL_NOTE_SPEED:
3947 			/*
3948 			 * Conversion required here:
3949 			 *	GLD keeps the speed in bit/s in a uint64
3950 			 *	DLPI wants it in kb/s in a uint32
3951 			 * Fortunately this is still big enough for 10Gb/s!
3952 			 */
3953 			dlnip->dl_notification = bit;
3954 			dlnip->dl_data = stats->glds_speed/1000ULL;
3955 			break;
3956 
3957 		case DL_NOTE_PHYS_ADDR:
3958 			dlnip->dl_notification = bit;
3959 			dlnip->dl_data = DL_CURR_PHYS_ADDR;
3960 			dlnip->dl_addr_offset = sizeof (dl_notify_ind_t);
3961 			dlnip->dl_addr_length = macinfo->gldm_addrlen +
3962 			    abs(macinfo->gldm_saplen);
3963 			mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
3964 			mac_copy(mac_pvt->curr_macaddr,
3965 			    mp->b_rptr + sizeof (dl_notify_ind_t),
3966 			    macinfo->gldm_addrlen);
3967 			break;
3968 
3969 		default:
3970 			break;
3971 		}
3972 
3973 		if (dlnip->dl_notification == 0)
3974 			freemsg(mp);
3975 		else if (q != NULL)
3976 			qreply(q, mp);
3977 		else
3978 			gld_notify_qs(macinfo, mp, bit);
3979 	}
3980 
3981 	GLDM_UNLOCK(macinfo);
3982 }
3983 
3984 /*
3985  * gld_notify_req - handle a DL_NOTIFY_REQ message
3986  */
3987 static int
3988 gld_notify_req(queue_t *q, mblk_t *mp)
3989 {
3990 	gld_t *gld = (gld_t *)q->q_ptr;
3991 	gld_mac_info_t *macinfo;
3992 	gld_mac_pvt_t *pvt;
3993 	dl_notify_req_t *dlnrp;
3994 	dl_notify_ack_t *dlnap;
3995 
3996 	ASSERT(gld != NULL);
3997 	ASSERT(gld->gld_qptr == RD(q));
3998 
3999 	dlnrp = (dl_notify_req_t *)mp->b_rptr;
4000 
4001 #ifdef GLD_DEBUG
4002 	if (gld_debug & GLDTRACE)
4003 		cmn_err(CE_NOTE, "gld_notify_req(%p %p)",
4004 		    (void *)q, (void *)mp);
4005 #endif
4006 
4007 	if (gld->gld_state == DL_UNATTACHED) {
4008 #ifdef GLD_DEBUG
4009 		if (gld_debug & GLDERRS)
4010 			cmn_err(CE_NOTE, "gld_notify_req: wrong state (%d)",
4011 			    gld->gld_state);
4012 #endif
4013 		return (DL_OUTSTATE);
4014 	}
4015 
4016 	/*
4017 	 * Remember what notifications are required by this stream
4018 	 */
4019 	macinfo = gld->gld_mac_info;
4020 	pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4021 
4022 	gld->gld_notifications = dlnrp->dl_notifications & pvt->notifications;
4023 
4024 	/*
4025 	 * The return DL_NOTIFY_ACK carries the bitset of notifications
4026 	 * that this driver can provide, independently of which ones have
4027 	 * previously been or are now being requested.
4028 	 */
4029 	if ((mp = mexchange(q, mp, sizeof (dl_notify_ack_t), M_PCPROTO,
4030 	    DL_NOTIFY_ACK)) == NULL)
4031 		return (DL_SYSERR);
4032 
4033 	dlnap = (dl_notify_ack_t *)mp->b_rptr;
4034 	dlnap->dl_notifications = pvt->notifications;
4035 	qreply(q, mp);
4036 
4037 	/*
4038 	 * A side effect of a DL_NOTIFY_REQ is that after the DL_NOTIFY_ACK
4039 	 * reply, the the requestor gets zero or more DL_NOTIFY_IND messages
4040 	 * that provide the current status.
4041 	 */
4042 	gld_notify_ind(macinfo, gld->gld_notifications, q);
4043 
4044 	return (GLDE_OK);
4045 }
4046 
4047 /*
4048  * gld_linkstate()
4049  *	Called by driver to tell GLD the state of the physical link.
4050  *	As a side effect, sends a DL_NOTE_LINK_UP or DL_NOTE_LINK_DOWN
4051  *	notification to each client that has previously requested such
4052  *	notifications
4053  */
4054 void
4055 gld_linkstate(gld_mac_info_t *macinfo, int32_t newstate)
4056 {
4057 	uint32_t notification;
4058 
4059 	switch (newstate) {
4060 	default:
4061 		return;
4062 
4063 	case GLD_LINKSTATE_DOWN:
4064 		notification = DL_NOTE_LINK_DOWN;
4065 		break;
4066 
4067 	case GLD_LINKSTATE_UP:
4068 		notification = DL_NOTE_LINK_UP | DL_NOTE_SPEED;
4069 		break;
4070 
4071 	case GLD_LINKSTATE_UNKNOWN:
4072 		notification = 0;
4073 		break;
4074 	}
4075 
4076 	GLDM_LOCK(macinfo, RW_WRITER);
4077 	if (macinfo->gldm_linkstate == newstate)
4078 		notification = 0;
4079 	else
4080 		macinfo->gldm_linkstate = newstate;
4081 	GLDM_UNLOCK(macinfo);
4082 
4083 	if (notification)
4084 		gld_notify_ind(macinfo, notification, NULL);
4085 }
4086 
4087 /*
4088  * gld_udqos - set the current QoS parameters (priority only at the moment).
4089  */
4090 static int
4091 gld_udqos(queue_t *q, mblk_t *mp)
4092 {
4093 	dl_udqos_req_t *dlp;
4094 	gld_t  *gld = (gld_t *)q->q_ptr;
4095 	int off;
4096 	int len;
4097 	dl_qos_cl_sel1_t *selp;
4098 
4099 	ASSERT(gld);
4100 	ASSERT(gld->gld_qptr == RD(q));
4101 
4102 #ifdef GLD_DEBUG
4103 	if (gld_debug & GLDTRACE)
4104 		cmn_err(CE_NOTE, "gld_udqos(%p %p)", (void *)q, (void *)mp);
4105 #endif
4106 
4107 	if (gld->gld_state != DL_IDLE) {
4108 #ifdef GLD_DEBUG
4109 		if (gld_debug & GLDERRS)
4110 			cmn_err(CE_NOTE, "gld_udqos: wrong state (%d)",
4111 			    gld->gld_state);
4112 #endif
4113 		return (DL_OUTSTATE);
4114 	}
4115 
4116 	dlp = (dl_udqos_req_t *)mp->b_rptr;
4117 	off = dlp->dl_qos_offset;
4118 	len = dlp->dl_qos_length;
4119 
4120 	if (len != sizeof (dl_qos_cl_sel1_t) || !MBLKIN(mp, off, len))
4121 		return (DL_BADQOSTYPE);
4122 
4123 	selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + off);
4124 	if (selp->dl_qos_type != DL_QOS_CL_SEL1)
4125 		return (DL_BADQOSTYPE);
4126 
4127 	if (selp->dl_trans_delay != 0 &&
4128 	    selp->dl_trans_delay != DL_QOS_DONT_CARE)
4129 		return (DL_BADQOSPARAM);
4130 	if (selp->dl_protection != 0 &&
4131 	    selp->dl_protection != DL_QOS_DONT_CARE)
4132 		return (DL_BADQOSPARAM);
4133 	if (selp->dl_residual_error != 0 &&
4134 	    selp->dl_residual_error != DL_QOS_DONT_CARE)
4135 		return (DL_BADQOSPARAM);
4136 	if (selp->dl_priority < 0 || selp->dl_priority > 7)
4137 		return (DL_BADQOSPARAM);
4138 
4139 	gld->gld_upri = selp->dl_priority;
4140 
4141 	dlokack(q, mp, DL_UDQOS_REQ);
4142 	return (GLDE_OK);
4143 }
4144 
4145 static mblk_t *
4146 gld_bindack(queue_t *q, mblk_t *mp)
4147 {
4148 	gld_t *gld = (gld_t *)q->q_ptr;
4149 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4150 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4151 	dl_bind_ack_t *dlp;
4152 	size_t size;
4153 	t_uscalar_t addrlen;
4154 	uchar_t *sapp;
4155 
4156 	addrlen = macinfo->gldm_addrlen + abs(macinfo->gldm_saplen);
4157 	size = sizeof (dl_bind_ack_t) + addrlen;
4158 	if ((mp = mexchange(q, mp, size, M_PCPROTO, DL_BIND_ACK)) == NULL)
4159 		return (NULL);
4160 
4161 	dlp = (dl_bind_ack_t *)mp->b_rptr;
4162 	dlp->dl_sap = gld->gld_sap;
4163 	dlp->dl_addr_length = addrlen;
4164 	dlp->dl_addr_offset = sizeof (dl_bind_ack_t);
4165 	dlp->dl_max_conind = 0;
4166 	dlp->dl_xidtest_flg = 0;
4167 
4168 	mac_copy(mac_pvt->curr_macaddr, (uchar_t *)&dlp[1],
4169 	    macinfo->gldm_addrlen);
4170 	sapp = mp->b_rptr + dlp->dl_addr_offset + macinfo->gldm_addrlen;
4171 	*(ushort_t *)sapp = gld->gld_sap;
4172 
4173 	return (mp);
4174 }
4175 
4176 /*
4177  * gld_bind - determine if a SAP is already allocated and whether it is legal
4178  * to do the bind at this time
4179  */
4180 static int
4181 gld_bind(queue_t *q, mblk_t *mp)
4182 {
4183 	ulong_t	sap;
4184 	dl_bind_req_t *dlp;
4185 	gld_t *gld = (gld_t *)q->q_ptr;
4186 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4187 
4188 	ASSERT(gld);
4189 	ASSERT(gld->gld_qptr == RD(q));
4190 
4191 #ifdef GLD_DEBUG
4192 	if (gld_debug & GLDTRACE)
4193 		cmn_err(CE_NOTE, "gld_bind(%p %p)", (void *)q, (void *)mp);
4194 #endif
4195 
4196 	dlp = (dl_bind_req_t *)mp->b_rptr;
4197 	sap = dlp->dl_sap;
4198 
4199 #ifdef GLD_DEBUG
4200 	if (gld_debug & GLDPROT)
4201 		cmn_err(CE_NOTE, "gld_bind: lsap=%lx", sap);
4202 #endif
4203 
4204 	if (gld->gld_state != DL_UNBOUND) {
4205 #ifdef GLD_DEBUG
4206 		if (gld_debug & GLDERRS)
4207 			cmn_err(CE_NOTE, "gld_bind: bound or not attached (%d)",
4208 			    gld->gld_state);
4209 #endif
4210 		return (DL_OUTSTATE);
4211 	}
4212 	ASSERT(macinfo);
4213 
4214 	if (dlp->dl_service_mode != DL_CLDLS) {
4215 		return (DL_UNSUPPORTED);
4216 	}
4217 	if (dlp->dl_xidtest_flg & (DL_AUTO_XID | DL_AUTO_TEST)) {
4218 		return (DL_NOAUTO);
4219 	}
4220 
4221 	/*
4222 	 * Check sap validity and decide whether this stream accepts
4223 	 * IEEE 802.2 (LLC) packets.
4224 	 */
4225 	if (sap > ETHERTYPE_MAX)
4226 		return (DL_BADSAP);
4227 
4228 	/*
4229 	 * Decide whether the SAP value selects EtherType encoding/decoding.
4230 	 * For compatibility with monolithic ethernet drivers, the range of
4231 	 * SAP values is different for DL_ETHER media.
4232 	 */
4233 	switch (macinfo->gldm_type) {
4234 	case DL_ETHER:
4235 		gld->gld_ethertype = (sap > ETHERMTU);
4236 		break;
4237 	default:
4238 		gld->gld_ethertype = (sap > GLD_MAX_802_SAP);
4239 		break;
4240 	}
4241 
4242 	/* if we get to here, then the SAP is legal enough */
4243 	GLDM_LOCK(macinfo, RW_WRITER);
4244 	gld->gld_state = DL_IDLE;	/* bound and ready */
4245 	gld->gld_sap = sap;
4246 	if ((macinfo->gldm_type == DL_ETHER) && (sap == ETHERTYPE_VLAN))
4247 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap++;
4248 	gld_set_ipq(gld);
4249 
4250 #ifdef GLD_DEBUG
4251 	if (gld_debug & GLDPROT)
4252 		cmn_err(CE_NOTE, "gld_bind: ok - sap = %d", gld->gld_sap);
4253 #endif
4254 
4255 	/* ACK the BIND */
4256 	mp = gld_bindack(q, mp);
4257 	GLDM_UNLOCK(macinfo);
4258 
4259 	if (mp != NULL) {
4260 		qreply(q, mp);
4261 		return (GLDE_OK);
4262 	}
4263 
4264 	return (DL_SYSERR);
4265 }
4266 
4267 /*
4268  * gld_unbind - perform an unbind of an LSAP or ether type on the stream.
4269  * The stream is still open and can be re-bound.
4270  */
4271 static int
4272 gld_unbind(queue_t *q, mblk_t *mp)
4273 {
4274 	gld_t *gld = (gld_t *)q->q_ptr;
4275 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4276 
4277 	ASSERT(gld);
4278 
4279 #ifdef GLD_DEBUG
4280 	if (gld_debug & GLDTRACE)
4281 		cmn_err(CE_NOTE, "gld_unbind(%p %p)", (void *)q, (void *)mp);
4282 #endif
4283 
4284 	if (gld->gld_state != DL_IDLE) {
4285 #ifdef GLD_DEBUG
4286 		if (gld_debug & GLDERRS)
4287 			cmn_err(CE_NOTE, "gld_unbind: wrong state (%d)",
4288 			    gld->gld_state);
4289 #endif
4290 		return (DL_OUTSTATE);
4291 	}
4292 	ASSERT(macinfo);
4293 
4294 	/*
4295 	 * Avoid unbinding (DL_UNBIND_REQ) while FAST/RAW is inside wput.
4296 	 * See comments above gld_start().
4297 	 */
4298 	gld->gld_in_unbind = B_TRUE;	/* disallow wput=>start */
4299 	membar_enter();
4300 	if (gld->gld_wput_count != 0) {
4301 		gld->gld_in_unbind = B_FALSE;
4302 		ASSERT(mp);		/* we didn't come from close */
4303 #ifdef GLD_DEBUG
4304 		if (gld_debug & GLDETRACE)
4305 			cmn_err(CE_NOTE, "gld_unbind: defer for wput");
4306 #endif
4307 		(void) putbq(q, mp);
4308 		qenable(q);		/* try again soon */
4309 		return (GLDE_RETRY);
4310 	}
4311 
4312 	GLDM_LOCK(macinfo, RW_WRITER);
4313 	if ((macinfo->gldm_type == DL_ETHER) &&
4314 	    (gld->gld_sap == ETHERTYPE_VLAN)) {
4315 		((gld_vlan_t *)gld->gld_vlan)->gldv_nvlan_sap--;
4316 	}
4317 	gld->gld_state = DL_UNBOUND;
4318 	gld->gld_sap = 0;
4319 	gld_set_ipq(gld);
4320 	GLDM_UNLOCK(macinfo);
4321 
4322 	membar_exit();
4323 	gld->gld_in_unbind = B_FALSE;
4324 
4325 	/* mp is NULL if we came from close */
4326 	if (mp) {
4327 		gld_flushqueue(q);	/* flush the queues */
4328 		dlokack(q, mp, DL_UNBIND_REQ);
4329 	}
4330 	return (GLDE_OK);
4331 }
4332 
4333 /*
4334  * gld_inforeq - generate the response to an info request
4335  */
4336 static int
4337 gld_inforeq(queue_t *q, mblk_t *mp)
4338 {
4339 	gld_t		*gld;
4340 	dl_info_ack_t	*dlp;
4341 	int		bufsize;
4342 	glddev_t	*glddev;
4343 	gld_mac_info_t	*macinfo;
4344 	gld_mac_pvt_t	*mac_pvt;
4345 	int		sel_offset = 0;
4346 	int		range_offset = 0;
4347 	int		addr_offset;
4348 	int		addr_length;
4349 	int		sap_length;
4350 	int		brdcst_offset;
4351 	int		brdcst_length;
4352 	uchar_t		*sapp;
4353 
4354 #ifdef GLD_DEBUG
4355 	if (gld_debug & GLDTRACE)
4356 		cmn_err(CE_NOTE, "gld_inforeq(%p %p)", (void *)q, (void *)mp);
4357 #endif
4358 	gld = (gld_t *)q->q_ptr;
4359 	ASSERT(gld);
4360 	glddev = gld->gld_device;
4361 	ASSERT(glddev);
4362 
4363 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4364 		macinfo = gld->gld_mac_info;
4365 		ASSERT(macinfo != NULL);
4366 
4367 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4368 
4369 		addr_length = macinfo->gldm_addrlen;
4370 		sap_length = macinfo->gldm_saplen;
4371 		brdcst_length = macinfo->gldm_addrlen;
4372 	} else {
4373 		addr_length = glddev->gld_addrlen;
4374 		sap_length = glddev->gld_saplen;
4375 		brdcst_length = glddev->gld_addrlen;
4376 	}
4377 
4378 	bufsize = sizeof (dl_info_ack_t);
4379 
4380 	addr_offset = bufsize;
4381 	bufsize += addr_length;
4382 	bufsize += abs(sap_length);
4383 
4384 	brdcst_offset = bufsize;
4385 	bufsize += brdcst_length;
4386 
4387 	if (((gld_vlan_t *)gld->gld_vlan) != NULL) {
4388 		sel_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4389 		bufsize = sel_offset + sizeof (dl_qos_cl_sel1_t);
4390 
4391 		range_offset = P2ROUNDUP(bufsize, sizeof (int64_t));
4392 		bufsize = range_offset + sizeof (dl_qos_cl_range1_t);
4393 	}
4394 
4395 	if ((mp = mexchange(q, mp, bufsize, M_PCPROTO, DL_INFO_ACK)) == NULL)
4396 		return (GLDE_OK);	/* nothing more to be done */
4397 
4398 	bzero(mp->b_rptr, bufsize);
4399 
4400 	dlp = (dl_info_ack_t *)mp->b_rptr;
4401 	dlp->dl_primitive = DL_INFO_ACK;
4402 	dlp->dl_version = DL_VERSION_2;
4403 	dlp->dl_service_mode = DL_CLDLS;
4404 	dlp->dl_current_state = gld->gld_state;
4405 	dlp->dl_provider_style = gld->gld_style;
4406 
4407 	if (sel_offset != 0) {
4408 		dl_qos_cl_sel1_t	*selp;
4409 		dl_qos_cl_range1_t	*rangep;
4410 
4411 		ASSERT(range_offset != 0);
4412 
4413 		dlp->dl_qos_offset = sel_offset;
4414 		dlp->dl_qos_length = sizeof (dl_qos_cl_sel1_t);
4415 		dlp->dl_qos_range_offset = range_offset;
4416 		dlp->dl_qos_range_length = sizeof (dl_qos_cl_range1_t);
4417 
4418 		selp = (dl_qos_cl_sel1_t *)(mp->b_rptr + sel_offset);
4419 		selp->dl_qos_type = DL_QOS_CL_SEL1;
4420 		selp->dl_priority = gld->gld_upri;
4421 
4422 		rangep = (dl_qos_cl_range1_t *)(mp->b_rptr + range_offset);
4423 		rangep->dl_qos_type = DL_QOS_CL_RANGE1;
4424 		rangep->dl_priority.dl_min = 0;
4425 		rangep->dl_priority.dl_max = 7;
4426 	}
4427 
4428 	if (gld->gld_state == DL_IDLE || gld->gld_state == DL_UNBOUND) {
4429 		dlp->dl_min_sdu = macinfo->gldm_minpkt;
4430 		dlp->dl_max_sdu = macinfo->gldm_maxpkt;
4431 		dlp->dl_mac_type = macinfo->gldm_type;
4432 		dlp->dl_addr_length = addr_length + abs(sap_length);
4433 		dlp->dl_sap_length = sap_length;
4434 
4435 		if (gld->gld_state == DL_IDLE) {
4436 			/*
4437 			 * If we are bound to a non-LLC SAP on any medium
4438 			 * other than Ethernet, then we need room for a
4439 			 * SNAP header.  So we have to adjust the MTU size
4440 			 * accordingly.  XXX I suppose this should be done
4441 			 * in gldutil.c, but it seems likely that this will
4442 			 * always be true for everything GLD supports but
4443 			 * Ethernet.  Check this if you add another medium.
4444 			 */
4445 			if ((macinfo->gldm_type == DL_TPR ||
4446 			    macinfo->gldm_type == DL_FDDI) &&
4447 			    gld->gld_ethertype)
4448 				dlp->dl_max_sdu -= LLC_SNAP_HDR_LEN;
4449 
4450 			/* copy macaddr and sap */
4451 			dlp->dl_addr_offset = addr_offset;
4452 
4453 			mac_copy(mac_pvt->curr_macaddr, mp->b_rptr +
4454 			    addr_offset, macinfo->gldm_addrlen);
4455 			sapp = mp->b_rptr + addr_offset +
4456 			    macinfo->gldm_addrlen;
4457 			*(ushort_t *)sapp = gld->gld_sap;
4458 		} else {
4459 			dlp->dl_addr_offset = 0;
4460 		}
4461 
4462 		/* copy broadcast addr */
4463 		dlp->dl_brdcst_addr_length = macinfo->gldm_addrlen;
4464 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4465 		mac_copy((caddr_t)macinfo->gldm_broadcast_addr,
4466 		    mp->b_rptr + brdcst_offset, brdcst_length);
4467 	} else {
4468 		/*
4469 		 * No PPA is attached.
4470 		 * The best we can do is use the values provided
4471 		 * by the first mac that called gld_register.
4472 		 */
4473 		dlp->dl_min_sdu = glddev->gld_minsdu;
4474 		dlp->dl_max_sdu = glddev->gld_maxsdu;
4475 		dlp->dl_mac_type = glddev->gld_type;
4476 		dlp->dl_addr_length = addr_length + abs(sap_length);
4477 		dlp->dl_sap_length = sap_length;
4478 		dlp->dl_addr_offset = 0;
4479 		dlp->dl_brdcst_addr_offset = brdcst_offset;
4480 		dlp->dl_brdcst_addr_length = brdcst_length;
4481 		mac_copy((caddr_t)glddev->gld_broadcast,
4482 		    mp->b_rptr + brdcst_offset, brdcst_length);
4483 	}
4484 	qreply(q, mp);
4485 	return (GLDE_OK);
4486 }
4487 
4488 /*
4489  * gld_unitdata (q, mp)
4490  * send a datagram.  Destination address/lsap is in M_PROTO
4491  * message (first mblock), data is in remainder of message.
4492  *
4493  */
4494 static int
4495 gld_unitdata(queue_t *q, mblk_t *mp)
4496 {
4497 	gld_t *gld = (gld_t *)q->q_ptr;
4498 	dl_unitdata_req_t *dlp = (dl_unitdata_req_t *)mp->b_rptr;
4499 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4500 	size_t	msglen;
4501 	mblk_t	*nmp;
4502 	gld_interface_t *ifp;
4503 	uint32_t start;
4504 	uint32_t stuff;
4505 	uint32_t end;
4506 	uint32_t value;
4507 	uint32_t flags;
4508 	uint32_t upri;
4509 
4510 #ifdef GLD_DEBUG
4511 	if (gld_debug & GLDTRACE)
4512 		cmn_err(CE_NOTE, "gld_unitdata(%p %p)", (void *)q, (void *)mp);
4513 #endif
4514 
4515 	if (gld->gld_state != DL_IDLE) {
4516 #ifdef GLD_DEBUG
4517 		if (gld_debug & GLDERRS)
4518 			cmn_err(CE_NOTE, "gld_unitdata: wrong state (%d)",
4519 			    gld->gld_state);
4520 #endif
4521 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4522 		    dlp->dl_dest_addr_length, DL_OUTSTATE, 0);
4523 		return (GLDE_OK);
4524 	}
4525 	ASSERT(macinfo != NULL);
4526 
4527 	if (!MBLKIN(mp, dlp->dl_dest_addr_offset, dlp->dl_dest_addr_length) ||
4528 	    dlp->dl_dest_addr_length !=
4529 	    macinfo->gldm_addrlen + abs(macinfo->gldm_saplen)) {
4530 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4531 		    dlp->dl_dest_addr_length, DL_BADADDR, 0);
4532 		return (GLDE_OK);
4533 	}
4534 
4535 	upri = dlp->dl_priority.dl_max;
4536 
4537 	msglen = msgdsize(mp);
4538 	if (msglen == 0 || msglen > macinfo->gldm_maxpkt) {
4539 #ifdef GLD_DEBUG
4540 		if (gld_debug & GLDERRS)
4541 			cmn_err(CE_NOTE, "gld_unitdata: bad msglen (%d)",
4542 			    (int)msglen);
4543 #endif
4544 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4545 		    dlp->dl_dest_addr_length, DL_BADDATA, 0);
4546 		return (GLDE_OK);
4547 	}
4548 
4549 	ASSERT(mp->b_cont != NULL);	/* because msgdsize(mp) is nonzero */
4550 
4551 	ifp = ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->interfacep;
4552 
4553 	/* grab any checksum information that may be present */
4554 	hcksum_retrieve(mp->b_cont, NULL, NULL, &start, &stuff, &end,
4555 	    &value, &flags);
4556 
4557 	/*
4558 	 * Prepend a valid header for transmission
4559 	 */
4560 	if ((nmp = (*ifp->mkunitdata)(gld, mp)) == NULL) {
4561 #ifdef GLD_DEBUG
4562 		if (gld_debug & GLDERRS)
4563 			cmn_err(CE_NOTE, "gld_unitdata: mkunitdata failed.");
4564 #endif
4565 		dluderrorind(q, mp, mp->b_rptr + dlp->dl_dest_addr_offset,
4566 		    dlp->dl_dest_addr_length, DL_SYSERR, ENOSR);
4567 		return (GLDE_OK);
4568 	}
4569 
4570 	/* apply any checksum information to the first block in the chain */
4571 	(void) hcksum_assoc(nmp, NULL, NULL, start, stuff, end, value,
4572 	    flags, 0);
4573 
4574 	GLD_CLEAR_MBLK_VTAG(nmp);
4575 	if (gld_start(q, nmp, GLD_WSRV, upri) == GLD_NORESOURCES) {
4576 		qenable(q);
4577 		return (GLDE_RETRY);
4578 	}
4579 
4580 	return (GLDE_OK);
4581 }
4582 
4583 /*
4584  * gldattach(q, mp)
4585  * DLPI DL_ATTACH_REQ
4586  * this attaches the stream to a PPA
4587  */
4588 static int
4589 gldattach(queue_t *q, mblk_t *mp)
4590 {
4591 	dl_attach_req_t *at;
4592 	gld_mac_info_t *macinfo;
4593 	gld_t  *gld = (gld_t *)q->q_ptr;
4594 	glddev_t *glddev;
4595 	gld_mac_pvt_t *mac_pvt;
4596 	uint32_t ppa;
4597 	uint32_t vid;
4598 	gld_vlan_t *vlan;
4599 
4600 	at = (dl_attach_req_t *)mp->b_rptr;
4601 
4602 	if (gld->gld_state != DL_UNATTACHED)
4603 		return (DL_OUTSTATE);
4604 
4605 	ASSERT(!gld->gld_mac_info);
4606 
4607 	ppa = at->dl_ppa % GLD_VLAN_SCALE;	/* 0 .. 999	*/
4608 	vid = at->dl_ppa / GLD_VLAN_SCALE;	/* 0 .. 4094	*/
4609 	if (vid > VLAN_VID_MAX)
4610 		return (DL_BADPPA);
4611 
4612 	glddev = gld->gld_device;
4613 	mutex_enter(&glddev->gld_devlock);
4614 	for (macinfo = glddev->gld_mac_next;
4615 	    macinfo != (gld_mac_info_t *)&glddev->gld_mac_next;
4616 	    macinfo = macinfo->gldm_next) {
4617 		int inst;
4618 
4619 		ASSERT(macinfo != NULL);
4620 		if (macinfo->gldm_ppa != ppa)
4621 			continue;
4622 
4623 		if (!(macinfo->gldm_GLD_flags & GLD_MAC_READY))
4624 			continue;	/* this one's not ready yet */
4625 
4626 		/*
4627 		 * VLAN sanity check
4628 		 */
4629 		if (vid != VLAN_VID_NONE && !VLAN_CAPABLE(macinfo)) {
4630 			mutex_exit(&glddev->gld_devlock);
4631 			return (DL_BADPPA);
4632 		}
4633 
4634 		/*
4635 		 * We found the correct PPA, hold the instance
4636 		 */
4637 		inst = ddi_get_instance(macinfo->gldm_devinfo);
4638 		if (inst == -1 || qassociate(q, inst) != 0) {
4639 			mutex_exit(&glddev->gld_devlock);
4640 			return (DL_BADPPA);
4641 		}
4642 
4643 		/* Take the stream off the per-driver-class list */
4644 		gldremque(gld);
4645 
4646 		/*
4647 		 * We must hold the lock to prevent multiple calls
4648 		 * to the reset and start routines.
4649 		 */
4650 		GLDM_LOCK(macinfo, RW_WRITER);
4651 
4652 		gld->gld_mac_info = macinfo;
4653 
4654 		if (macinfo->gldm_send_tagged != NULL)
4655 			gld->gld_send = macinfo->gldm_send_tagged;
4656 		else
4657 			gld->gld_send = macinfo->gldm_send;
4658 
4659 		if ((vlan = gld_get_vlan(macinfo, vid)) == NULL) {
4660 			GLDM_UNLOCK(macinfo);
4661 			gldinsque(gld, glddev->gld_str_prev);
4662 			mutex_exit(&glddev->gld_devlock);
4663 			(void) qassociate(q, -1);
4664 			return (DL_BADPPA);
4665 		}
4666 
4667 		mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4668 		if (!mac_pvt->started) {
4669 			if (gld_start_mac(macinfo) != GLD_SUCCESS) {
4670 				gld_rem_vlan(vlan);
4671 				GLDM_UNLOCK(macinfo);
4672 				gldinsque(gld, glddev->gld_str_prev);
4673 				mutex_exit(&glddev->gld_devlock);
4674 				dlerrorack(q, mp, DL_ATTACH_REQ, DL_SYSERR,
4675 				    EIO);
4676 				(void) qassociate(q, -1);
4677 				return (GLDE_OK);
4678 			}
4679 		}
4680 
4681 		gld->gld_vlan = vlan;
4682 		vlan->gldv_nstreams++;
4683 		gldinsque(gld, vlan->gldv_str_prev);
4684 		gld->gld_state = DL_UNBOUND;
4685 		GLDM_UNLOCK(macinfo);
4686 
4687 #ifdef GLD_DEBUG
4688 		if (gld_debug & GLDPROT) {
4689 			cmn_err(CE_NOTE, "gldattach(%p, %p, PPA = %d)",
4690 			    (void *)q, (void *)mp, macinfo->gldm_ppa);
4691 		}
4692 #endif
4693 		mutex_exit(&glddev->gld_devlock);
4694 		dlokack(q, mp, DL_ATTACH_REQ);
4695 		return (GLDE_OK);
4696 	}
4697 	mutex_exit(&glddev->gld_devlock);
4698 	return (DL_BADPPA);
4699 }
4700 
4701 /*
4702  * gldunattach(q, mp)
4703  * DLPI DL_DETACH_REQ
4704  * detaches the mac layer from the stream
4705  */
4706 int
4707 gldunattach(queue_t *q, mblk_t *mp)
4708 {
4709 	gld_t  *gld = (gld_t *)q->q_ptr;
4710 	glddev_t *glddev = gld->gld_device;
4711 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4712 	int	state = gld->gld_state;
4713 	int	i;
4714 	gld_mac_pvt_t *mac_pvt;
4715 	gld_vlan_t *vlan;
4716 	boolean_t phys_off;
4717 	boolean_t mult_off;
4718 	int op = GLD_MAC_PROMISC_NOOP;
4719 
4720 	if (state != DL_UNBOUND)
4721 		return (DL_OUTSTATE);
4722 
4723 	ASSERT(macinfo != NULL);
4724 	ASSERT(gld->gld_sap == 0);
4725 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4726 
4727 #ifdef GLD_DEBUG
4728 	if (gld_debug & GLDPROT) {
4729 		cmn_err(CE_NOTE, "gldunattach(%p, %p, PPA = %d)",
4730 		    (void *)q, (void *)mp, macinfo->gldm_ppa);
4731 	}
4732 #endif
4733 
4734 	GLDM_LOCK(macinfo, RW_WRITER);
4735 
4736 	if (gld->gld_mcast) {
4737 		for (i = 0; i < gld->gld_multicnt; i++) {
4738 			gld_mcast_t *mcast;
4739 
4740 			if ((mcast = gld->gld_mcast[i]) != NULL) {
4741 				ASSERT(mcast->gldm_refcnt);
4742 				gld_send_disable_multi(macinfo, mcast);
4743 			}
4744 		}
4745 		kmem_free(gld->gld_mcast,
4746 		    sizeof (gld_mcast_t *) * gld->gld_multicnt);
4747 		gld->gld_mcast = NULL;
4748 		gld->gld_multicnt = 0;
4749 	}
4750 
4751 	/* decide if we need to turn off any promiscuity */
4752 	phys_off = (gld->gld_flags & GLD_PROM_PHYS &&
4753 	    --mac_pvt->nprom == 0);
4754 	mult_off = (gld->gld_flags & GLD_PROM_MULT &&
4755 	    --mac_pvt->nprom_multi == 0);
4756 
4757 	if (phys_off) {
4758 		op = (mac_pvt->nprom_multi == 0) ? GLD_MAC_PROMISC_NONE :
4759 		    GLD_MAC_PROMISC_MULTI;
4760 	} else if (mult_off) {
4761 		op = (mac_pvt->nprom == 0) ? GLD_MAC_PROMISC_NONE :
4762 		    GLD_MAC_PROMISC_NOOP;	/* phys overrides multi */
4763 	}
4764 
4765 	if (op != GLD_MAC_PROMISC_NOOP)
4766 		(void) (*macinfo->gldm_set_promiscuous)(macinfo, op);
4767 
4768 	vlan = (gld_vlan_t *)gld->gld_vlan;
4769 	if (gld->gld_flags & GLD_PROM_PHYS)
4770 		vlan->gldv_nprom--;
4771 	if (gld->gld_flags & GLD_PROM_MULT)
4772 		vlan->gldv_nprom--;
4773 	if (gld->gld_flags & GLD_PROM_SAP) {
4774 		vlan->gldv_nprom--;
4775 		vlan->gldv_nvlan_sap--;
4776 	}
4777 
4778 	gld->gld_flags &= ~(GLD_PROM_PHYS | GLD_PROM_SAP | GLD_PROM_MULT);
4779 
4780 	GLDM_UNLOCK(macinfo);
4781 
4782 	if (phys_off)
4783 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
4784 
4785 	/*
4786 	 * We need to hold both locks when modifying the mac stream list
4787 	 * to protect findminor as well as everyone else.
4788 	 */
4789 	mutex_enter(&glddev->gld_devlock);
4790 	GLDM_LOCK(macinfo, RW_WRITER);
4791 
4792 	/* disassociate this stream with its vlan and underlying mac */
4793 	gldremque(gld);
4794 
4795 	if (--vlan->gldv_nstreams == 0) {
4796 		gld_rem_vlan(vlan);
4797 		gld->gld_vlan = NULL;
4798 	}
4799 
4800 	gld->gld_mac_info = NULL;
4801 	gld->gld_state = DL_UNATTACHED;
4802 
4803 	/* cleanup mac layer if last vlan */
4804 	if (mac_pvt->nvlan == 0) {
4805 		gld_stop_mac(macinfo);
4806 		macinfo->gldm_GLD_flags &= ~GLD_INTR_WAIT;
4807 	}
4808 
4809 	/* make sure no references to this gld for gld_v0_sched */
4810 	if (mac_pvt->last_sched == gld)
4811 		mac_pvt->last_sched = NULL;
4812 
4813 	GLDM_UNLOCK(macinfo);
4814 
4815 	/* put the stream on the unattached Style 2 list */
4816 	gldinsque(gld, glddev->gld_str_prev);
4817 
4818 	mutex_exit(&glddev->gld_devlock);
4819 
4820 	/* There will be no mp if we were called from close */
4821 	if (mp) {
4822 		dlokack(q, mp, DL_DETACH_REQ);
4823 	}
4824 	if (gld->gld_style == DL_STYLE2)
4825 		(void) qassociate(q, -1);
4826 	return (GLDE_OK);
4827 }
4828 
4829 /*
4830  * gld_enable_multi (q, mp)
4831  * Enables multicast address on the stream.  If the mac layer
4832  * isn't enabled for this address, enable at that level as well.
4833  */
4834 static int
4835 gld_enable_multi(queue_t *q, mblk_t *mp)
4836 {
4837 	gld_t  *gld = (gld_t *)q->q_ptr;
4838 	glddev_t *glddev;
4839 	gld_mac_info_t *macinfo = gld->gld_mac_info;
4840 	unsigned char *maddr;
4841 	dl_enabmulti_req_t *multi;
4842 	gld_mcast_t *mcast;
4843 	int	i, rc;
4844 	gld_mac_pvt_t *mac_pvt;
4845 
4846 #ifdef GLD_DEBUG
4847 	if (gld_debug & GLDPROT) {
4848 		cmn_err(CE_NOTE, "gld_enable_multi(%p, %p)", (void *)q,
4849 		    (void *)mp);
4850 	}
4851 #endif
4852 
4853 	if (gld->gld_state == DL_UNATTACHED)
4854 		return (DL_OUTSTATE);
4855 
4856 	ASSERT(macinfo != NULL);
4857 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
4858 
4859 	if (macinfo->gldm_set_multicast == NULL) {
4860 		return (DL_UNSUPPORTED);
4861 	}
4862 
4863 	multi = (dl_enabmulti_req_t *)mp->b_rptr;
4864 
4865 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
4866 	    multi->dl_addr_length != macinfo->gldm_addrlen)
4867 		return (DL_BADADDR);
4868 
4869 	/* request appears to be valid */
4870 
4871 	glddev = mac_pvt->major_dev;
4872 	ASSERT(glddev == gld->gld_device);
4873 
4874 	maddr = mp->b_rptr + multi->dl_addr_offset;
4875 
4876 	/*
4877 	 * The multicast addresses live in a per-device table, along
4878 	 * with a reference count.  Each stream has a table that
4879 	 * points to entries in the device table, with the reference
4880 	 * count reflecting the number of streams pointing at it.  If
4881 	 * this multicast address is already in the per-device table,
4882 	 * all we have to do is point at it.
4883 	 */
4884 	GLDM_LOCK(macinfo, RW_WRITER);
4885 
4886 	/* does this address appear in current table? */
4887 	if (gld->gld_mcast == NULL) {
4888 		/* no mcast addresses -- allocate table */
4889 		gld->gld_mcast = GLD_GETSTRUCT(gld_mcast_t *,
4890 		    glddev->gld_multisize);
4891 		if (gld->gld_mcast == NULL) {
4892 			GLDM_UNLOCK(macinfo);
4893 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4894 			return (GLDE_OK);
4895 		}
4896 		gld->gld_multicnt = glddev->gld_multisize;
4897 	} else {
4898 		for (i = 0; i < gld->gld_multicnt; i++) {
4899 			if (gld->gld_mcast[i] &&
4900 			    mac_eq(gld->gld_mcast[i]->gldm_addr,
4901 			    maddr, macinfo->gldm_addrlen)) {
4902 				/* this is a match -- just succeed */
4903 				ASSERT(gld->gld_mcast[i]->gldm_refcnt);
4904 				GLDM_UNLOCK(macinfo);
4905 				dlokack(q, mp, DL_ENABMULTI_REQ);
4906 				return (GLDE_OK);
4907 			}
4908 		}
4909 	}
4910 
4911 	/*
4912 	 * it wasn't in the stream so check to see if the mac layer has it
4913 	 */
4914 	mcast = NULL;
4915 	if (mac_pvt->mcast_table == NULL) {
4916 		mac_pvt->mcast_table = GLD_GETSTRUCT(gld_mcast_t,
4917 		    glddev->gld_multisize);
4918 		if (mac_pvt->mcast_table == NULL) {
4919 			GLDM_UNLOCK(macinfo);
4920 			dlerrorack(q, mp, DL_ENABMULTI_REQ, DL_SYSERR, ENOSR);
4921 			return (GLDE_OK);
4922 		}
4923 	} else {
4924 		for (i = 0; i < glddev->gld_multisize; i++) {
4925 			if (mac_pvt->mcast_table[i].gldm_refcnt &&
4926 			    mac_eq(mac_pvt->mcast_table[i].gldm_addr,
4927 			    maddr, macinfo->gldm_addrlen)) {
4928 				mcast = &mac_pvt->mcast_table[i];
4929 				break;
4930 			}
4931 		}
4932 	}
4933 	if (mcast == NULL) {
4934 		/* not in mac layer -- find an empty mac slot to fill in */
4935 		for (i = 0; i < glddev->gld_multisize; i++) {
4936 			if (mac_pvt->mcast_table[i].gldm_refcnt == 0) {
4937 				mcast = &mac_pvt->mcast_table[i];
4938 				mac_copy(maddr, mcast->gldm_addr,
4939 				    macinfo->gldm_addrlen);
4940 				break;
4941 			}
4942 		}
4943 	}
4944 	if (mcast == NULL) {
4945 		/* couldn't get a mac layer slot */
4946 		GLDM_UNLOCK(macinfo);
4947 		return (DL_TOOMANY);
4948 	}
4949 
4950 	/* now we have a mac layer slot in mcast -- get a stream slot */
4951 	for (i = 0; i < gld->gld_multicnt; i++) {
4952 		if (gld->gld_mcast[i] != NULL)
4953 			continue;
4954 		/* found an empty slot */
4955 		if (!mcast->gldm_refcnt) {
4956 			/* set mcast in hardware */
4957 			unsigned char cmaddr[GLD_MAX_ADDRLEN];
4958 
4959 			ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
4960 			cmac_copy(maddr, cmaddr,
4961 			    macinfo->gldm_addrlen, macinfo);
4962 
4963 			rc = (*macinfo->gldm_set_multicast)
4964 			    (macinfo, cmaddr, GLD_MULTI_ENABLE);
4965 			if (rc == GLD_NOTSUPPORTED) {
4966 				GLDM_UNLOCK(macinfo);
4967 				return (DL_NOTSUPPORTED);
4968 			} else if (rc == GLD_NORESOURCES) {
4969 				GLDM_UNLOCK(macinfo);
4970 				return (DL_TOOMANY);
4971 			} else if (rc == GLD_BADARG) {
4972 				GLDM_UNLOCK(macinfo);
4973 				return (DL_BADADDR);
4974 			} else if (rc == GLD_RETRY) {
4975 				/*
4976 				 * The putbq and gld_xwait must be
4977 				 * within the lock to prevent races
4978 				 * with gld_sched.
4979 				 */
4980 				(void) putbq(q, mp);
4981 				gld->gld_xwait = B_TRUE;
4982 				GLDM_UNLOCK(macinfo);
4983 				return (GLDE_RETRY);
4984 			} else if (rc != GLD_SUCCESS) {
4985 				GLDM_UNLOCK(macinfo);
4986 				dlerrorack(q, mp, DL_ENABMULTI_REQ,
4987 				    DL_SYSERR, EIO);
4988 				return (GLDE_OK);
4989 			}
4990 		}
4991 		gld->gld_mcast[i] = mcast;
4992 		mcast->gldm_refcnt++;
4993 		GLDM_UNLOCK(macinfo);
4994 		dlokack(q, mp, DL_ENABMULTI_REQ);
4995 		return (GLDE_OK);
4996 	}
4997 
4998 	/* couldn't get a stream slot */
4999 	GLDM_UNLOCK(macinfo);
5000 	return (DL_TOOMANY);
5001 }
5002 
5003 
5004 /*
5005  * gld_disable_multi (q, mp)
5006  * Disable the multicast address on the stream.  If last
5007  * reference for the mac layer, disable there as well.
5008  */
5009 static int
5010 gld_disable_multi(queue_t *q, mblk_t *mp)
5011 {
5012 	gld_t  *gld;
5013 	gld_mac_info_t *macinfo;
5014 	unsigned char *maddr;
5015 	dl_disabmulti_req_t *multi;
5016 	int i;
5017 	gld_mcast_t *mcast;
5018 
5019 #ifdef GLD_DEBUG
5020 	if (gld_debug & GLDPROT) {
5021 		cmn_err(CE_NOTE, "gld_disable_multi(%p, %p)", (void *)q,
5022 		    (void *)mp);
5023 	}
5024 #endif
5025 
5026 	gld = (gld_t *)q->q_ptr;
5027 	if (gld->gld_state == DL_UNATTACHED)
5028 		return (DL_OUTSTATE);
5029 
5030 	macinfo = gld->gld_mac_info;
5031 	ASSERT(macinfo != NULL);
5032 	if (macinfo->gldm_set_multicast == NULL) {
5033 		return (DL_UNSUPPORTED);
5034 	}
5035 
5036 	multi = (dl_disabmulti_req_t *)mp->b_rptr;
5037 
5038 	if (!MBLKIN(mp, multi->dl_addr_offset, multi->dl_addr_length) ||
5039 	    multi->dl_addr_length != macinfo->gldm_addrlen)
5040 		return (DL_BADADDR);
5041 
5042 	maddr = mp->b_rptr + multi->dl_addr_offset;
5043 
5044 	/* request appears to be valid */
5045 	/* does this address appear in current table? */
5046 	GLDM_LOCK(macinfo, RW_WRITER);
5047 	if (gld->gld_mcast != NULL) {
5048 		for (i = 0; i < gld->gld_multicnt; i++)
5049 			if (((mcast = gld->gld_mcast[i]) != NULL) &&
5050 			    mac_eq(mcast->gldm_addr,
5051 			    maddr, macinfo->gldm_addrlen)) {
5052 				ASSERT(mcast->gldm_refcnt);
5053 				gld_send_disable_multi(macinfo, mcast);
5054 				gld->gld_mcast[i] = NULL;
5055 				GLDM_UNLOCK(macinfo);
5056 				dlokack(q, mp, DL_DISABMULTI_REQ);
5057 				return (GLDE_OK);
5058 			}
5059 	}
5060 	GLDM_UNLOCK(macinfo);
5061 	return (DL_NOTENAB); /* not an enabled address */
5062 }
5063 
5064 /*
5065  * gld_send_disable_multi(macinfo, mcast)
5066  * this function is used to disable a multicast address if the reference
5067  * count goes to zero. The disable request will then be forwarded to the
5068  * lower stream.
5069  */
5070 static void
5071 gld_send_disable_multi(gld_mac_info_t *macinfo, gld_mcast_t *mcast)
5072 {
5073 	ASSERT(macinfo != NULL);
5074 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5075 	ASSERT(mcast != NULL);
5076 	ASSERT(mcast->gldm_refcnt);
5077 
5078 	if (!mcast->gldm_refcnt) {
5079 		return;			/* "cannot happen" */
5080 	}
5081 
5082 	if (--mcast->gldm_refcnt > 0) {
5083 		return;
5084 	}
5085 
5086 	/*
5087 	 * This must be converted from canonical form to device form.
5088 	 * The refcnt is now zero so we can trash the data.
5089 	 */
5090 	if (macinfo->gldm_options & GLDOPT_CANONICAL_ADDR)
5091 		gld_bitreverse(mcast->gldm_addr, macinfo->gldm_addrlen);
5092 
5093 	/* XXX Ought to check for GLD_NORESOURCES or GLD_FAILURE */
5094 	(void) (*macinfo->gldm_set_multicast)
5095 	    (macinfo, mcast->gldm_addr, GLD_MULTI_DISABLE);
5096 }
5097 
5098 /*
5099  * gld_promisc (q, mp, req, on)
5100  *	enable or disable the use of promiscuous mode with the hardware
5101  */
5102 static int
5103 gld_promisc(queue_t *q, mblk_t *mp, t_uscalar_t req, boolean_t on)
5104 {
5105 	gld_t *gld;
5106 	gld_mac_info_t *macinfo;
5107 	gld_mac_pvt_t *mac_pvt;
5108 	gld_vlan_t *vlan;
5109 	union DL_primitives *prim;
5110 	int macrc = GLD_SUCCESS;
5111 	int dlerr = GLDE_OK;
5112 	int op = GLD_MAC_PROMISC_NOOP;
5113 
5114 #ifdef GLD_DEBUG
5115 	if (gld_debug & GLDTRACE)
5116 		cmn_err(CE_NOTE, "gld_promisc(%p, %p, %d, %d)",
5117 		    (void *)q, (void *)mp, req, on);
5118 #endif
5119 
5120 	ASSERT(mp != NULL);
5121 	prim = (union DL_primitives *)mp->b_rptr;
5122 
5123 	/* XXX I think spec allows promisc in unattached state */
5124 	gld = (gld_t *)q->q_ptr;
5125 	if (gld->gld_state == DL_UNATTACHED)
5126 		return (DL_OUTSTATE);
5127 
5128 	macinfo = gld->gld_mac_info;
5129 	ASSERT(macinfo != NULL);
5130 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5131 
5132 	vlan = (gld_vlan_t *)gld->gld_vlan;
5133 	ASSERT(vlan != NULL);
5134 
5135 	GLDM_LOCK(macinfo, RW_WRITER);
5136 
5137 	/*
5138 	 * Work out what request (if any) has to be made to the MAC layer
5139 	 */
5140 	if (on) {
5141 		switch (prim->promiscon_req.dl_level) {
5142 		default:
5143 			dlerr = DL_UNSUPPORTED;	/* this is an error */
5144 			break;
5145 
5146 		case DL_PROMISC_PHYS:
5147 			if (mac_pvt->nprom == 0)
5148 				op = GLD_MAC_PROMISC_PHYS;
5149 			break;
5150 
5151 		case DL_PROMISC_MULTI:
5152 			if (mac_pvt->nprom_multi == 0)
5153 				if (mac_pvt->nprom == 0)
5154 					op = GLD_MAC_PROMISC_MULTI;
5155 			break;
5156 
5157 		case DL_PROMISC_SAP:
5158 			/* We can do this without reference to the MAC */
5159 			break;
5160 		}
5161 	} else {
5162 		switch (prim->promiscoff_req.dl_level) {
5163 		default:
5164 			dlerr = DL_UNSUPPORTED;	/* this is an error */
5165 			break;
5166 
5167 		case DL_PROMISC_PHYS:
5168 			if (!(gld->gld_flags & GLD_PROM_PHYS))
5169 				dlerr = DL_NOTENAB;
5170 			else if (mac_pvt->nprom == 1)
5171 				if (mac_pvt->nprom_multi)
5172 					op = GLD_MAC_PROMISC_MULTI;
5173 				else
5174 					op = GLD_MAC_PROMISC_NONE;
5175 			break;
5176 
5177 		case DL_PROMISC_MULTI:
5178 			if (!(gld->gld_flags & GLD_PROM_MULT))
5179 				dlerr = DL_NOTENAB;
5180 			else if (mac_pvt->nprom_multi == 1)
5181 				if (mac_pvt->nprom == 0)
5182 					op = GLD_MAC_PROMISC_NONE;
5183 			break;
5184 
5185 		case DL_PROMISC_SAP:
5186 			if (!(gld->gld_flags & GLD_PROM_SAP))
5187 				dlerr = DL_NOTENAB;
5188 
5189 			/* We can do this without reference to the MAC */
5190 			break;
5191 		}
5192 	}
5193 
5194 	/*
5195 	 * The request was invalid in some way so no need to continue.
5196 	 */
5197 	if (dlerr != GLDE_OK) {
5198 		GLDM_UNLOCK(macinfo);
5199 		return (dlerr);
5200 	}
5201 
5202 	/*
5203 	 * Issue the request to the MAC layer, if required
5204 	 */
5205 	if (op != GLD_MAC_PROMISC_NOOP) {
5206 		macrc = (*macinfo->gldm_set_promiscuous)(macinfo, op);
5207 	}
5208 
5209 	/*
5210 	 * On success, update the appropriate flags & refcounts
5211 	 */
5212 	if (macrc == GLD_SUCCESS) {
5213 		if (on) {
5214 			switch (prim->promiscon_req.dl_level) {
5215 			case DL_PROMISC_PHYS:
5216 				mac_pvt->nprom++;
5217 				vlan->gldv_nprom++;
5218 				gld->gld_flags |= GLD_PROM_PHYS;
5219 				break;
5220 
5221 			case DL_PROMISC_MULTI:
5222 				mac_pvt->nprom_multi++;
5223 				vlan->gldv_nprom++;
5224 				gld->gld_flags |= GLD_PROM_MULT;
5225 				break;
5226 
5227 			case DL_PROMISC_SAP:
5228 				gld->gld_flags |= GLD_PROM_SAP;
5229 				vlan->gldv_nprom++;
5230 				vlan->gldv_nvlan_sap++;
5231 				break;
5232 
5233 			default:
5234 				break;
5235 			}
5236 		} else {
5237 			switch (prim->promiscoff_req.dl_level) {
5238 			case DL_PROMISC_PHYS:
5239 				mac_pvt->nprom--;
5240 				vlan->gldv_nprom--;
5241 				gld->gld_flags &= ~GLD_PROM_PHYS;
5242 				break;
5243 
5244 			case DL_PROMISC_MULTI:
5245 				mac_pvt->nprom_multi--;
5246 				vlan->gldv_nprom--;
5247 				gld->gld_flags &= ~GLD_PROM_MULT;
5248 				break;
5249 
5250 			case DL_PROMISC_SAP:
5251 				gld->gld_flags &= ~GLD_PROM_SAP;
5252 				vlan->gldv_nvlan_sap--;
5253 				vlan->gldv_nprom--;
5254 				break;
5255 
5256 			default:
5257 				break;
5258 			}
5259 		}
5260 	} else if (macrc == GLD_RETRY) {
5261 		/*
5262 		 * The putbq and gld_xwait must be within the lock to
5263 		 * prevent races with gld_sched.
5264 		 */
5265 		(void) putbq(q, mp);
5266 		gld->gld_xwait = B_TRUE;
5267 	}
5268 
5269 	GLDM_UNLOCK(macinfo);
5270 
5271 	/*
5272 	 * Finally, decide how to reply.
5273 	 *
5274 	 * If <macrc> is not GLD_SUCCESS, the request was put to the MAC
5275 	 * layer but failed.  In such cases, we can return a DL_* error
5276 	 * code and let the caller send an error-ack reply upstream, or
5277 	 * we can send a reply here and then return GLDE_OK so that the
5278 	 * caller doesn't also respond.
5279 	 *
5280 	 * If physical-promiscuous mode was (successfully) switched on or
5281 	 * off, send a notification (DL_NOTIFY_IND) to anyone interested.
5282 	 */
5283 	switch (macrc) {
5284 	case GLD_NOTSUPPORTED:
5285 		return (DL_NOTSUPPORTED);
5286 
5287 	case GLD_NORESOURCES:
5288 		dlerrorack(q, mp, req, DL_SYSERR, ENOSR);
5289 		return (GLDE_OK);
5290 
5291 	case GLD_RETRY:
5292 		return (GLDE_RETRY);
5293 
5294 	default:
5295 		dlerrorack(q, mp, req, DL_SYSERR, EIO);
5296 		return (GLDE_OK);
5297 
5298 	case GLD_SUCCESS:
5299 		dlokack(q, mp, req);
5300 		break;
5301 	}
5302 
5303 	switch (op) {
5304 	case GLD_MAC_PROMISC_NOOP:
5305 		break;
5306 
5307 	case GLD_MAC_PROMISC_PHYS:
5308 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_ON_PHYS, NULL);
5309 		break;
5310 
5311 	default:
5312 		gld_notify_ind(macinfo, DL_NOTE_PROMISC_OFF_PHYS, NULL);
5313 		break;
5314 	}
5315 
5316 	return (GLDE_OK);
5317 }
5318 
5319 /*
5320  * gld_physaddr()
5321  *	get the current or factory physical address value
5322  */
5323 static int
5324 gld_physaddr(queue_t *q, mblk_t *mp)
5325 {
5326 	gld_t *gld = (gld_t *)q->q_ptr;
5327 	gld_mac_info_t *macinfo;
5328 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5329 	unsigned char addr[GLD_MAX_ADDRLEN];
5330 
5331 	if (gld->gld_state == DL_UNATTACHED)
5332 		return (DL_OUTSTATE);
5333 
5334 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5335 	ASSERT(macinfo != NULL);
5336 	ASSERT(macinfo->gldm_addrlen <= GLD_MAX_ADDRLEN);
5337 
5338 	switch (prim->physaddr_req.dl_addr_type) {
5339 	case DL_FACT_PHYS_ADDR:
5340 		mac_copy((caddr_t)macinfo->gldm_vendor_addr,
5341 		    (caddr_t)addr, macinfo->gldm_addrlen);
5342 		break;
5343 	case DL_CURR_PHYS_ADDR:
5344 		/* make a copy so we don't hold the lock across qreply */
5345 		GLDM_LOCK(macinfo, RW_WRITER);
5346 		mac_copy((caddr_t)
5347 		    ((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)->curr_macaddr,
5348 		    (caddr_t)addr, macinfo->gldm_addrlen);
5349 		GLDM_UNLOCK(macinfo);
5350 		break;
5351 	default:
5352 		return (DL_BADPRIM);
5353 	}
5354 	dlphysaddrack(q, mp, (caddr_t)addr, macinfo->gldm_addrlen);
5355 	return (GLDE_OK);
5356 }
5357 
5358 /*
5359  * gld_setaddr()
5360  *	change the hardware's physical address to a user specified value
5361  */
5362 static int
5363 gld_setaddr(queue_t *q, mblk_t *mp)
5364 {
5365 	gld_t *gld = (gld_t *)q->q_ptr;
5366 	gld_mac_info_t *macinfo;
5367 	gld_mac_pvt_t *mac_pvt;
5368 	union DL_primitives *prim = (union DL_primitives *)mp->b_rptr;
5369 	unsigned char *addr;
5370 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5371 	int rc;
5372 	gld_vlan_t *vlan;
5373 
5374 	if (gld->gld_state == DL_UNATTACHED)
5375 		return (DL_OUTSTATE);
5376 
5377 	vlan = (gld_vlan_t *)gld->gld_vlan;
5378 	ASSERT(vlan != NULL);
5379 
5380 	if (vlan->gldv_id != VLAN_VID_NONE)
5381 		return (DL_NOTSUPPORTED);
5382 
5383 	macinfo = (gld_mac_info_t *)gld->gld_mac_info;
5384 	ASSERT(macinfo != NULL);
5385 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5386 
5387 	if (!MBLKIN(mp, prim->set_physaddr_req.dl_addr_offset,
5388 	    prim->set_physaddr_req.dl_addr_length) ||
5389 	    prim->set_physaddr_req.dl_addr_length != macinfo->gldm_addrlen)
5390 		return (DL_BADADDR);
5391 
5392 	GLDM_LOCK(macinfo, RW_WRITER);
5393 
5394 	/* now do the set at the hardware level */
5395 	addr = mp->b_rptr + prim->set_physaddr_req.dl_addr_offset;
5396 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5397 	cmac_copy(addr, cmaddr, macinfo->gldm_addrlen, macinfo);
5398 
5399 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5400 	if (rc == GLD_SUCCESS)
5401 		mac_copy(addr, mac_pvt->curr_macaddr,
5402 		    macinfo->gldm_addrlen);
5403 
5404 	GLDM_UNLOCK(macinfo);
5405 
5406 	switch (rc) {
5407 	case GLD_SUCCESS:
5408 		break;
5409 	case GLD_NOTSUPPORTED:
5410 		return (DL_NOTSUPPORTED);
5411 	case GLD_BADARG:
5412 		return (DL_BADADDR);
5413 	case GLD_NORESOURCES:
5414 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, ENOSR);
5415 		return (GLDE_OK);
5416 	default:
5417 		dlerrorack(q, mp, DL_SET_PHYS_ADDR_REQ, DL_SYSERR, EIO);
5418 		return (GLDE_OK);
5419 	}
5420 
5421 	gld_notify_ind(macinfo, DL_NOTE_PHYS_ADDR, NULL);
5422 
5423 	dlokack(q, mp, DL_SET_PHYS_ADDR_REQ);
5424 	return (GLDE_OK);
5425 }
5426 
5427 int
5428 gld_get_statistics(queue_t *q, mblk_t *mp)
5429 {
5430 	dl_get_statistics_ack_t *dlsp;
5431 	gld_t  *gld = (gld_t *)q->q_ptr;
5432 	gld_mac_info_t *macinfo = gld->gld_mac_info;
5433 	gld_mac_pvt_t *mac_pvt;
5434 
5435 	if (gld->gld_state == DL_UNATTACHED)
5436 		return (DL_OUTSTATE);
5437 
5438 	ASSERT(macinfo != NULL);
5439 
5440 	mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5441 	(void) gld_update_kstat(mac_pvt->kstatp, KSTAT_READ);
5442 
5443 	mp = mexchange(q, mp, DL_GET_STATISTICS_ACK_SIZE +
5444 	    sizeof (struct gldkstats), M_PCPROTO, DL_GET_STATISTICS_ACK);
5445 
5446 	if (mp == NULL)
5447 		return (GLDE_OK);	/* mexchange already sent merror */
5448 
5449 	dlsp = (dl_get_statistics_ack_t *)mp->b_rptr;
5450 	dlsp->dl_primitive = DL_GET_STATISTICS_ACK;
5451 	dlsp->dl_stat_length = sizeof (struct gldkstats);
5452 	dlsp->dl_stat_offset = DL_GET_STATISTICS_ACK_SIZE;
5453 
5454 	GLDM_LOCK(macinfo, RW_WRITER);
5455 	bcopy(mac_pvt->kstatp->ks_data,
5456 	    (mp->b_rptr + DL_GET_STATISTICS_ACK_SIZE),
5457 	    sizeof (struct gldkstats));
5458 	GLDM_UNLOCK(macinfo);
5459 
5460 	qreply(q, mp);
5461 	return (GLDE_OK);
5462 }
5463 
5464 /* =================================================== */
5465 /* misc utilities, some requiring various mutexes held */
5466 /* =================================================== */
5467 
5468 /*
5469  * Initialize and start the driver.
5470  */
5471 static int
5472 gld_start_mac(gld_mac_info_t *macinfo)
5473 {
5474 	int	rc;
5475 	unsigned char cmaddr[GLD_MAX_ADDRLEN];
5476 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5477 
5478 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5479 	ASSERT(!mac_pvt->started);
5480 
5481 	rc = (*macinfo->gldm_reset)(macinfo);
5482 	if (rc != GLD_SUCCESS)
5483 		return (GLD_FAILURE);
5484 
5485 	/* set the addr after we reset the device */
5486 	ASSERT(sizeof (cmaddr) >= macinfo->gldm_addrlen);
5487 	cmac_copy(((gld_mac_pvt_t *)macinfo->gldm_mac_pvt)
5488 	    ->curr_macaddr, cmaddr, macinfo->gldm_addrlen, macinfo);
5489 
5490 	rc = (*macinfo->gldm_set_mac_addr)(macinfo, cmaddr);
5491 	ASSERT(rc != GLD_BADARG);  /* this address was good before */
5492 	if (rc != GLD_SUCCESS && rc != GLD_NOTSUPPORTED)
5493 		return (GLD_FAILURE);
5494 
5495 	rc = (*macinfo->gldm_start)(macinfo);
5496 	if (rc != GLD_SUCCESS)
5497 		return (GLD_FAILURE);
5498 
5499 	mac_pvt->started = B_TRUE;
5500 	return (GLD_SUCCESS);
5501 }
5502 
5503 /*
5504  * Stop the driver.
5505  */
5506 static void
5507 gld_stop_mac(gld_mac_info_t *macinfo)
5508 {
5509 	gld_mac_pvt_t *mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5510 
5511 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5512 	ASSERT(mac_pvt->started);
5513 
5514 	(void) (*macinfo->gldm_stop)(macinfo);
5515 
5516 	mac_pvt->started = B_FALSE;
5517 }
5518 
5519 
5520 /*
5521  * gld_set_ipq will set a pointer to the queue which is bound to the
5522  * IP sap if:
5523  * o the device type is ethernet or IPoIB.
5524  * o there is no stream in SAP promiscuous mode.
5525  * o there is exactly one stream bound to the IP sap.
5526  * o the stream is in "fastpath" mode.
5527  */
5528 static void
5529 gld_set_ipq(gld_t *gld)
5530 {
5531 	gld_vlan_t	*vlan;
5532 	gld_mac_info_t	*macinfo = gld->gld_mac_info;
5533 	gld_t		*ip_gld = NULL;
5534 	uint_t		ipq_candidates = 0;
5535 	gld_t		*ipv6_gld = NULL;
5536 	uint_t		ipv6q_candidates = 0;
5537 
5538 	ASSERT(GLDM_LOCK_HELD_WRITE(macinfo));
5539 
5540 	/* The ipq code in gld_recv() is intimate with ethernet/IPoIB */
5541 	if (((macinfo->gldm_type != DL_ETHER) &&
5542 	    (macinfo->gldm_type != DL_IB)) ||
5543 	    (gld_global_options & GLD_OPT_NO_IPQ))
5544 		return;
5545 
5546 	vlan = (gld_vlan_t *)gld->gld_vlan;
5547 	ASSERT(vlan != NULL);
5548 
5549 	/* clear down any previously defined ipqs */
5550 	vlan->gldv_ipq = NULL;
5551 	vlan->gldv_ipv6q = NULL;
5552 
5553 	/* Try to find a single stream eligible to receive IP packets */
5554 	for (gld = vlan->gldv_str_next;
5555 	    gld != (gld_t *)&vlan->gldv_str_next; gld = gld->gld_next) {
5556 		if (gld->gld_state != DL_IDLE)
5557 			continue;	/* not eligible to receive */
5558 		if (gld->gld_flags & GLD_STR_CLOSING)
5559 			continue;	/* not eligible to receive */
5560 
5561 		if (gld->gld_sap == ETHERTYPE_IP) {
5562 			ip_gld = gld;
5563 			ipq_candidates++;
5564 		}
5565 
5566 		if (gld->gld_sap == ETHERTYPE_IPV6) {
5567 			ipv6_gld = gld;
5568 			ipv6q_candidates++;
5569 		}
5570 	}
5571 
5572 	if (ipq_candidates == 1) {
5573 		ASSERT(ip_gld != NULL);
5574 
5575 		if (ip_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5576 			vlan->gldv_ipq = ip_gld->gld_qptr;
5577 	}
5578 
5579 	if (ipv6q_candidates == 1) {
5580 		ASSERT(ipv6_gld != NULL);
5581 
5582 		if (ipv6_gld->gld_flags & GLD_FAST)	/* eligible for ipq */
5583 			vlan->gldv_ipv6q = ipv6_gld->gld_qptr;
5584 	}
5585 }
5586 
5587 /*
5588  * gld_flushqueue (q)
5589  *	used by DLPI primitives that require flushing the queues.
5590  *	essentially, this is DL_UNBIND_REQ.
5591  */
5592 static void
5593 gld_flushqueue(queue_t *q)
5594 {
5595 	/* flush all data in both queues */
5596 	/* XXX Should these be FLUSHALL? */
5597 	flushq(q, FLUSHDATA);
5598 	flushq(WR(q), FLUSHDATA);
5599 	/* flush all the queues upstream */
5600 	(void) putctl1(q, M_FLUSH, FLUSHRW);
5601 }
5602 
5603 /*
5604  * gld_devlookup (major)
5605  * search the device table for the device with specified
5606  * major number and return a pointer to it if it exists
5607  */
5608 static glddev_t *
5609 gld_devlookup(int major)
5610 {
5611 	struct glddevice *dev;
5612 
5613 	ASSERT(mutex_owned(&gld_device_list.gld_devlock));
5614 
5615 	for (dev = gld_device_list.gld_next;
5616 	    dev != &gld_device_list;
5617 	    dev = dev->gld_next) {
5618 		ASSERT(dev);
5619 		if (dev->gld_major == major)
5620 			return (dev);
5621 	}
5622 	return (NULL);
5623 }
5624 
5625 /*
5626  * gld_findminor(device)
5627  * Returns a minor number currently unused by any stream in the current
5628  * device class (major) list.
5629  */
5630 static int
5631 gld_findminor(glddev_t *device)
5632 {
5633 	gld_t		*next;
5634 	gld_mac_info_t	*nextmac;
5635 	gld_vlan_t	*nextvlan;
5636 	int		minor;
5637 	int		i;
5638 
5639 	ASSERT(mutex_owned(&device->gld_devlock));
5640 
5641 	/* The fast way */
5642 	if (device->gld_nextminor >= GLD_MIN_CLONE_MINOR &&
5643 	    device->gld_nextminor <= GLD_MAX_CLONE_MINOR)
5644 		return (device->gld_nextminor++);
5645 
5646 	/* The steady way */
5647 	for (minor = GLD_MIN_CLONE_MINOR; minor <= GLD_MAX_CLONE_MINOR;
5648 	    minor++) {
5649 		/* Search all unattached streams */
5650 		for (next = device->gld_str_next;
5651 		    next != (gld_t *)&device->gld_str_next;
5652 		    next = next->gld_next) {
5653 			if (minor == next->gld_minor)
5654 				goto nextminor;
5655 		}
5656 		/* Search all attached streams; we don't need maclock because */
5657 		/* mac stream list is protected by devlock as well as maclock */
5658 		for (nextmac = device->gld_mac_next;
5659 		    nextmac != (gld_mac_info_t *)&device->gld_mac_next;
5660 		    nextmac = nextmac->gldm_next) {
5661 			gld_mac_pvt_t *pvt =
5662 			    (gld_mac_pvt_t *)nextmac->gldm_mac_pvt;
5663 
5664 			if (!(nextmac->gldm_GLD_flags & GLD_MAC_READY))
5665 				continue;	/* this one's not ready yet */
5666 
5667 			for (i = 0; i < VLAN_HASHSZ; i++) {
5668 				for (nextvlan = pvt->vlan_hash[i];
5669 				    nextvlan != NULL;
5670 				    nextvlan = nextvlan->gldv_next) {
5671 					for (next = nextvlan->gldv_str_next;
5672 					    next !=
5673 					    (gld_t *)&nextvlan->gldv_str_next;
5674 					    next = next->gld_next) {
5675 						if (minor == next->gld_minor)
5676 							goto nextminor;
5677 					}
5678 				}
5679 			}
5680 		}
5681 
5682 		return (minor);
5683 nextminor:
5684 		/* don't need to do anything */
5685 		;
5686 	}
5687 	cmn_err(CE_WARN, "GLD ran out of minor numbers for %s",
5688 	    device->gld_name);
5689 	return (0);
5690 }
5691 
5692 /*
5693  * version of insque/remque for use by this driver
5694  */
5695 struct qelem {
5696 	struct qelem *q_forw;
5697 	struct qelem *q_back;
5698 	/* rest of structure */
5699 };
5700 
5701 static void
5702 gldinsque(void *elem, void *pred)
5703 {
5704 	struct qelem *pelem = elem;
5705 	struct qelem *ppred = pred;
5706 	struct qelem *pnext = ppred->q_forw;
5707 
5708 	pelem->q_forw = pnext;
5709 	pelem->q_back = ppred;
5710 	ppred->q_forw = pelem;
5711 	pnext->q_back = pelem;
5712 }
5713 
5714 static void
5715 gldremque(void *arg)
5716 {
5717 	struct qelem *pelem = arg;
5718 	struct qelem *elem = arg;
5719 
5720 	pelem->q_forw->q_back = pelem->q_back;
5721 	pelem->q_back->q_forw = pelem->q_forw;
5722 	elem->q_back = elem->q_forw = NULL;
5723 }
5724 
5725 static gld_vlan_t *
5726 gld_add_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5727 {
5728 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5729 	gld_vlan_t	**pp;
5730 	gld_vlan_t	*p;
5731 
5732 	pp = &(mac_pvt->vlan_hash[vid % VLAN_HASHSZ]);
5733 	while ((p = *pp) != NULL) {
5734 		ASSERT(p->gldv_id != vid);
5735 		pp = &(p->gldv_next);
5736 	}
5737 
5738 	if ((p = kmem_zalloc(sizeof (gld_vlan_t), KM_NOSLEEP)) == NULL)
5739 		return (NULL);
5740 
5741 	p->gldv_mac = macinfo;
5742 	p->gldv_id = vid;
5743 
5744 	if (vid == VLAN_VID_NONE) {
5745 		p->gldv_ptag = VLAN_VTAG_NONE;
5746 		p->gldv_stats = mac_pvt->statistics;
5747 		p->gldv_kstatp = NULL;
5748 	} else {
5749 		p->gldv_ptag = GLD_MK_PTAG(VLAN_CFI_ETHER, vid);
5750 		p->gldv_stats = kmem_zalloc(sizeof (struct gld_stats),
5751 		    KM_SLEEP);
5752 
5753 		if (gld_init_vlan_stats(p) != GLD_SUCCESS) {
5754 			kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5755 			kmem_free(p, sizeof (gld_vlan_t));
5756 			return (NULL);
5757 		}
5758 	}
5759 
5760 	p->gldv_str_next = p->gldv_str_prev = (gld_t *)&p->gldv_str_next;
5761 	mac_pvt->nvlan++;
5762 	*pp = p;
5763 
5764 	return (p);
5765 }
5766 
5767 static void
5768 gld_rem_vlan(gld_vlan_t *vlan)
5769 {
5770 	gld_mac_info_t	*macinfo = vlan->gldv_mac;
5771 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5772 	gld_vlan_t	**pp;
5773 	gld_vlan_t	*p;
5774 
5775 	pp = &(mac_pvt->vlan_hash[vlan->gldv_id % VLAN_HASHSZ]);
5776 	while ((p = *pp) != NULL) {
5777 		if (p->gldv_id == vlan->gldv_id)
5778 			break;
5779 		pp = &(p->gldv_next);
5780 	}
5781 	ASSERT(p != NULL);
5782 
5783 	*pp = p->gldv_next;
5784 	mac_pvt->nvlan--;
5785 	if (p->gldv_id != VLAN_VID_NONE) {
5786 		ASSERT(p->gldv_kstatp != NULL);
5787 		kstat_delete(p->gldv_kstatp);
5788 		kmem_free(p->gldv_stats, sizeof (struct gld_stats));
5789 	}
5790 	kmem_free(p, sizeof (gld_vlan_t));
5791 }
5792 
5793 gld_vlan_t *
5794 gld_find_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5795 {
5796 	gld_mac_pvt_t	*mac_pvt = (gld_mac_pvt_t *)macinfo->gldm_mac_pvt;
5797 	gld_vlan_t	*p;
5798 
5799 	p = mac_pvt->vlan_hash[vid % VLAN_HASHSZ];
5800 	while (p != NULL) {
5801 		if (p->gldv_id == vid)
5802 			return (p);
5803 		p = p->gldv_next;
5804 	}
5805 	return (NULL);
5806 }
5807 
5808 gld_vlan_t *
5809 gld_get_vlan(gld_mac_info_t *macinfo, uint32_t vid)
5810 {
5811 	gld_vlan_t	*vlan;
5812 
5813 	if ((vlan = gld_find_vlan(macinfo, vid)) == NULL)
5814 		vlan = gld_add_vlan(macinfo, vid);
5815 
5816 	return (vlan);
5817 }
5818 
5819 /*
5820  * gld_bitrevcopy()
5821  * This is essentially bcopy, with the ability to bit reverse the
5822  * the source bytes. The MAC addresses bytes as transmitted by FDDI
5823  * interfaces are bit reversed.
5824  */
5825 void
5826 gld_bitrevcopy(caddr_t src, caddr_t target, size_t n)
5827 {
5828 	while (n--)
5829 		*target++ = bit_rev[(uchar_t)*src++];
5830 }
5831 
5832 /*
5833  * gld_bitreverse()
5834  * Convert the bit order by swaping all the bits, using a
5835  * lookup table.
5836  */
5837 void
5838 gld_bitreverse(uchar_t *rptr, size_t n)
5839 {
5840 	while (n--) {
5841 		*rptr = bit_rev[*rptr];
5842 		rptr++;
5843 	}
5844 }
5845 
5846 char *
5847 gld_macaddr_sprintf(char *etherbuf, unsigned char *ap, int len)
5848 {
5849 	int i;
5850 	char *cp = etherbuf;
5851 	static char digits[] = "0123456789abcdef";
5852 
5853 	for (i = 0; i < len; i++) {
5854 		*cp++ = digits[*ap >> 4];
5855 		*cp++ = digits[*ap++ & 0xf];
5856 		*cp++ = ':';
5857 	}
5858 	*--cp = 0;
5859 	return (etherbuf);
5860 }
5861 
5862 #ifdef GLD_DEBUG
5863 static void
5864 gld_check_assertions()
5865 {
5866 	glddev_t	*dev;
5867 	gld_mac_info_t	*mac;
5868 	gld_t		*str;
5869 	gld_vlan_t	*vlan;
5870 	int		i;
5871 
5872 	mutex_enter(&gld_device_list.gld_devlock);
5873 
5874 	for (dev = gld_device_list.gld_next;
5875 	    dev != (glddev_t *)&gld_device_list.gld_next;
5876 	    dev = dev->gld_next) {
5877 		mutex_enter(&dev->gld_devlock);
5878 		ASSERT(dev->gld_broadcast != NULL);
5879 		for (str = dev->gld_str_next;
5880 		    str != (gld_t *)&dev->gld_str_next;
5881 		    str = str->gld_next) {
5882 			ASSERT(str->gld_device == dev);
5883 			ASSERT(str->gld_mac_info == NULL);
5884 			ASSERT(str->gld_qptr != NULL);
5885 			ASSERT(str->gld_minor >= GLD_MIN_CLONE_MINOR);
5886 			ASSERT(str->gld_multicnt == 0);
5887 			ASSERT(str->gld_mcast == NULL);
5888 			ASSERT(!(str->gld_flags &
5889 			    (GLD_PROM_PHYS|GLD_PROM_MULT|GLD_PROM_SAP)));
5890 			ASSERT(str->gld_sap == 0);
5891 			ASSERT(str->gld_state == DL_UNATTACHED);
5892 		}
5893 		for (mac = dev->gld_mac_next;
5894 		    mac != (gld_mac_info_t *)&dev->gld_mac_next;
5895 		    mac = mac->gldm_next) {
5896 			int nvlan = 0;
5897 			gld_mac_pvt_t *pvt = (gld_mac_pvt_t *)mac->gldm_mac_pvt;
5898 
5899 			if (!(mac->gldm_GLD_flags & GLD_MAC_READY))
5900 				continue;	/* this one's not ready yet */
5901 
5902 			GLDM_LOCK(mac, RW_WRITER);
5903 			ASSERT(mac->gldm_devinfo != NULL);
5904 			ASSERT(mac->gldm_mac_pvt != NULL);
5905 			ASSERT(pvt->interfacep != NULL);
5906 			ASSERT(pvt->kstatp != NULL);
5907 			ASSERT(pvt->statistics != NULL);
5908 			ASSERT(pvt->major_dev == dev);
5909 
5910 			for (i = 0; i < VLAN_HASHSZ; i++) {
5911 				for (vlan = pvt->vlan_hash[i];
5912 				    vlan != NULL; vlan = vlan->gldv_next) {
5913 					int nstr = 0;
5914 
5915 					ASSERT(vlan->gldv_mac == mac);
5916 
5917 					for (str = vlan->gldv_str_next;
5918 					    str !=
5919 					    (gld_t *)&vlan->gldv_str_next;
5920 					    str = str->gld_next) {
5921 						ASSERT(str->gld_device == dev);
5922 						ASSERT(str->gld_mac_info ==
5923 						    mac);
5924 						ASSERT(str->gld_qptr != NULL);
5925 						ASSERT(str->gld_minor >=
5926 						    GLD_MIN_CLONE_MINOR);
5927 						ASSERT(
5928 						    str->gld_multicnt == 0 ||
5929 						    str->gld_mcast);
5930 						nstr++;
5931 					}
5932 					ASSERT(vlan->gldv_nstreams == nstr);
5933 					nvlan++;
5934 				}
5935 			}
5936 			ASSERT(pvt->nvlan == nvlan);
5937 			GLDM_UNLOCK(mac);
5938 		}
5939 		mutex_exit(&dev->gld_devlock);
5940 	}
5941 	mutex_exit(&gld_device_list.gld_devlock);
5942 }
5943 #endif
5944